diff options
| author | Ingo Molnar <mingo@kernel.org> | 2016-06-14 05:14:34 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2016-06-14 05:14:34 -0400 |
| commit | 3559ff9650224a4af6b777a5df786f521f66db5a (patch) | |
| tree | ff1d9ec0e27ab4b452f1d8b12485d21102900597 | |
| parent | 70e0d117f2502f19517be03a64b3c513f31b3cdb (diff) | |
| parent | db06d759d6cf903aeda8c107fd3abd366dd80200 (diff) | |
Merge branch 'linus' into perf/core, to pick up fixes before merging new changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
317 files changed, 2751 insertions, 1897 deletions
diff --git a/Documentation/devicetree/bindings/hwmon/ina2xx.txt b/Documentation/devicetree/bindings/hwmon/ina2xx.txt index 9bcd5e87830d..02af0d94e921 100644 --- a/Documentation/devicetree/bindings/hwmon/ina2xx.txt +++ b/Documentation/devicetree/bindings/hwmon/ina2xx.txt | |||
| @@ -7,6 +7,7 @@ Required properties: | |||
| 7 | - "ti,ina220" for ina220 | 7 | - "ti,ina220" for ina220 |
| 8 | - "ti,ina226" for ina226 | 8 | - "ti,ina226" for ina226 |
| 9 | - "ti,ina230" for ina230 | 9 | - "ti,ina230" for ina230 |
| 10 | - "ti,ina231" for ina231 | ||
| 10 | - reg: I2C address | 11 | - reg: I2C address |
| 11 | 12 | ||
| 12 | Optional properties: | 13 | Optional properties: |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt index bfeabb843941..71191ff0e781 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-arb-gpio-challenge.txt | |||
| @@ -44,8 +44,8 @@ Required properties: | |||
| 44 | - our-claim-gpio: The GPIO that we use to claim the bus. | 44 | - our-claim-gpio: The GPIO that we use to claim the bus. |
| 45 | - their-claim-gpios: The GPIOs that the other sides use to claim the bus. | 45 | - their-claim-gpios: The GPIOs that the other sides use to claim the bus. |
| 46 | Note that some implementations may only support a single other master. | 46 | Note that some implementations may only support a single other master. |
| 47 | - Standard I2C mux properties. See mux.txt in this directory. | 47 | - Standard I2C mux properties. See i2c-mux.txt in this directory. |
| 48 | - Single I2C child bus node at reg 0. See mux.txt in this directory. | 48 | - Single I2C child bus node at reg 0. See i2c-mux.txt in this directory. |
| 49 | 49 | ||
| 50 | Optional properties: | 50 | Optional properties: |
| 51 | - slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us. | 51 | - slew-delay-us: microseconds to wait for a GPIO to go high. Default is 10 us. |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt index 6078aefe7ed4..7ce23ac61308 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-demux-pinctrl.txt | |||
| @@ -27,7 +27,8 @@ Required properties: | |||
| 27 | - i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C | 27 | - i2c-bus-name: The name of this bus. Also needed as pinctrl-name for the I2C |
| 28 | parents. | 28 | parents. |
| 29 | 29 | ||
| 30 | Furthermore, I2C mux properties and child nodes. See mux.txt in this directory. | 30 | Furthermore, I2C mux properties and child nodes. See i2c-mux.txt in this |
| 31 | directory. | ||
| 31 | 32 | ||
| 32 | Example: | 33 | Example: |
| 33 | 34 | ||
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt index 66709a825541..21da3ecbb370 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt | |||
| @@ -22,8 +22,8 @@ Required properties: | |||
| 22 | - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side | 22 | - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side |
| 23 | port is connected to. | 23 | port is connected to. |
| 24 | - mux-gpios: list of gpios used to control the muxer | 24 | - mux-gpios: list of gpios used to control the muxer |
| 25 | * Standard I2C mux properties. See mux.txt in this directory. | 25 | * Standard I2C mux properties. See i2c-mux.txt in this directory. |
| 26 | * I2C child bus nodes. See mux.txt in this directory. | 26 | * I2C child bus nodes. See i2c-mux.txt in this directory. |
| 27 | 27 | ||
| 28 | Optional properties: | 28 | Optional properties: |
| 29 | - idle-state: value to set the muxer to when idle. When no value is | 29 | - idle-state: value to set the muxer to when idle. When no value is |
| @@ -33,7 +33,7 @@ For each i2c child node, an I2C child bus will be created. They will | |||
| 33 | be numbered based on their order in the device tree. | 33 | be numbered based on their order in the device tree. |
| 34 | 34 | ||
| 35 | Whenever an access is made to a device on a child bus, the value set | 35 | Whenever an access is made to a device on a child bus, the value set |
| 36 | in the revelant node's reg property will be output using the list of | 36 | in the relevant node's reg property will be output using the list of |
| 37 | GPIOs, the first in the list holding the least-significant value. | 37 | GPIOs, the first in the list holding the least-significant value. |
| 38 | 38 | ||
| 39 | If an idle state is defined, using the idle-state (optional) property, | 39 | If an idle state is defined, using the idle-state (optional) property, |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt index ae8af1694e95..33119a98e144 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt | |||
| @@ -28,9 +28,9 @@ Also required are: | |||
| 28 | * Standard pinctrl properties that specify the pin mux state for each child | 28 | * Standard pinctrl properties that specify the pin mux state for each child |
| 29 | bus. See ../pinctrl/pinctrl-bindings.txt. | 29 | bus. See ../pinctrl/pinctrl-bindings.txt. |
| 30 | 30 | ||
| 31 | * Standard I2C mux properties. See mux.txt in this directory. | 31 | * Standard I2C mux properties. See i2c-mux.txt in this directory. |
| 32 | 32 | ||
| 33 | * I2C child bus nodes. See mux.txt in this directory. | 33 | * I2C child bus nodes. See i2c-mux.txt in this directory. |
| 34 | 34 | ||
| 35 | For each named state defined in the pinctrl-names property, an I2C child bus | 35 | For each named state defined in the pinctrl-names property, an I2C child bus |
| 36 | will be created. I2C child bus numbers are assigned based on the index into | 36 | will be created. I2C child bus numbers are assigned based on the index into |
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt index 688783fbe696..de00d7fc450b 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-reg.txt | |||
| @@ -7,8 +7,8 @@ Required properties: | |||
| 7 | - compatible: i2c-mux-reg | 7 | - compatible: i2c-mux-reg |
| 8 | - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side | 8 | - i2c-parent: The phandle of the I2C bus that this multiplexer's master-side |
| 9 | port is connected to. | 9 | port is connected to. |
| 10 | * Standard I2C mux properties. See mux.txt in this directory. | 10 | * Standard I2C mux properties. See i2c-mux.txt in this directory. |
| 11 | * I2C child bus nodes. See mux.txt in this directory. | 11 | * I2C child bus nodes. See i2c-mux.txt in this directory. |
| 12 | 12 | ||
| 13 | Optional properties: | 13 | Optional properties: |
| 14 | - reg: this pair of <offset size> specifies the register to control the mux. | 14 | - reg: this pair of <offset size> specifies the register to control the mux. |
| @@ -24,7 +24,7 @@ Optional properties: | |||
| 24 | given, it defaults to the last value used. | 24 | given, it defaults to the last value used. |
| 25 | 25 | ||
| 26 | Whenever an access is made to a device on a child bus, the value set | 26 | Whenever an access is made to a device on a child bus, the value set |
| 27 | in the revelant node's reg property will be output to the register. | 27 | in the relevant node's reg property will be output to the register. |
| 28 | 28 | ||
| 29 | If an idle state is defined, using the idle-state (optional) property, | 29 | If an idle state is defined, using the idle-state (optional) property, |
| 30 | whenever an access is not being made to a device on a child bus, the | 30 | whenever an access is not being made to a device on a child bus, the |
diff --git a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt index 14aa6cf58201..6a9a63cb0543 100644 --- a/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt +++ b/Documentation/devicetree/bindings/net/marvell-bt-sd8xxx.txt | |||
| @@ -13,10 +13,10 @@ Optional properties: | |||
| 13 | initialization. This is an array of 28 values(u8). | 13 | initialization. This is an array of 28 values(u8). |
| 14 | 14 | ||
| 15 | - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip. | 15 | - marvell,wakeup-pin: It represents wakeup pin number of the bluetooth chip. |
| 16 | firmware will use the pin to wakeup host system. | 16 | firmware will use the pin to wakeup host system (u16). |
| 17 | - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host | 17 | - marvell,wakeup-gap-ms: wakeup gap represents wakeup latency of the host |
| 18 | platform. The value will be configured to firmware. This | 18 | platform. The value will be configured to firmware. This |
| 19 | is needed to work chip's sleep feature as expected. | 19 | is needed to work chip's sleep feature as expected (u16). |
| 20 | - interrupt-parent: phandle of the parent interrupt controller | 20 | - interrupt-parent: phandle of the parent interrupt controller |
| 21 | - interrupts : interrupt pin number to the cpu. Driver will request an irq based | 21 | - interrupts : interrupt pin number to the cpu. Driver will request an irq based |
| 22 | on this interrupt number. During system suspend, the irq will be | 22 | on this interrupt number. During system suspend, the irq will be |
| @@ -50,7 +50,7 @@ calibration data is also available in below example. | |||
| 50 | 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 | 50 | 0x37 0x01 0x1c 0x00 0xff 0xff 0xff 0xff 0x01 0x7f 0x04 0x02 |
| 51 | 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 | 51 | 0x00 0x00 0xba 0xce 0xc0 0xc6 0x2d 0x00 0x00 0x00 0x00 0x00 |
| 52 | 0x00 0x00 0xf0 0x00>; | 52 | 0x00 0x00 0xf0 0x00>; |
| 53 | marvell,wakeup-pin = <0x0d>; | 53 | marvell,wakeup-pin = /bits/ 16 <0x0d>; |
| 54 | marvell,wakeup-gap-ms = <0x64>; | 54 | marvell,wakeup-gap-ms = /bits/ 16 <0x64>; |
| 55 | }; | 55 | }; |
| 56 | }; | 56 | }; |
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt index a7440bcd67ff..2c2500df0dce 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/Documentation/devicetree/bindings/vendor-prefixes.txt | |||
| @@ -255,6 +255,7 @@ synology Synology, Inc. | |||
| 255 | SUNW Sun Microsystems, Inc | 255 | SUNW Sun Microsystems, Inc |
| 256 | tbs TBS Technologies | 256 | tbs TBS Technologies |
| 257 | tcl Toby Churchill Ltd. | 257 | tcl Toby Churchill Ltd. |
| 258 | technexion TechNexion | ||
| 258 | technologic Technologic Systems | 259 | technologic Technologic Systems |
| 259 | thine THine Electronics, Inc. | 260 | thine THine Electronics, Inc. |
| 260 | ti Texas Instruments | 261 | ti Texas Instruments |
| @@ -269,6 +270,7 @@ tronsmart Tronsmart | |||
| 269 | truly Truly Semiconductors Limited | 270 | truly Truly Semiconductors Limited |
| 270 | tyan Tyan Computer Corporation | 271 | tyan Tyan Computer Corporation |
| 271 | upisemi uPI Semiconductor Corp. | 272 | upisemi uPI Semiconductor Corp. |
| 273 | uniwest United Western Technologies Corp (UniWest) | ||
| 272 | urt United Radiant Technology Corporation | 274 | urt United Radiant Technology Corporation |
| 273 | usi Universal Scientific Industrial Co., Ltd. | 275 | usi Universal Scientific Industrial Co., Ltd. |
| 274 | v3 V3 Semiconductor | 276 | v3 V3 Semiconductor |
diff --git a/MAINTAINERS b/MAINTAINERS index 66de4da2d244..16700e4fcc4a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -8009,6 +8009,7 @@ Q: http://patchwork.kernel.org/project/linux-wireless/list/ | |||
| 8009 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git | 8009 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git |
| 8010 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git | 8010 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git |
| 8011 | S: Maintained | 8011 | S: Maintained |
| 8012 | F: Documentation/devicetree/bindings/net/wireless/ | ||
| 8012 | F: drivers/net/wireless/ | 8013 | F: drivers/net/wireless/ |
| 8013 | 8014 | ||
| 8014 | NETXEN (1/10) GbE SUPPORT | 8015 | NETXEN (1/10) GbE SUPPORT |
| @@ -8406,10 +8407,9 @@ F: drivers/i2c/busses/i2c-ocores.c | |||
| 8406 | OPEN FIRMWARE AND FLATTENED DEVICE TREE | 8407 | OPEN FIRMWARE AND FLATTENED DEVICE TREE |
| 8407 | M: Rob Herring <robh+dt@kernel.org> | 8408 | M: Rob Herring <robh+dt@kernel.org> |
| 8408 | M: Frank Rowand <frowand.list@gmail.com> | 8409 | M: Frank Rowand <frowand.list@gmail.com> |
| 8409 | M: Grant Likely <grant.likely@linaro.org> | ||
| 8410 | L: devicetree@vger.kernel.org | 8410 | L: devicetree@vger.kernel.org |
| 8411 | W: http://www.devicetree.org/ | 8411 | W: http://www.devicetree.org/ |
| 8412 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git | 8412 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git |
| 8413 | S: Maintained | 8413 | S: Maintained |
| 8414 | F: drivers/of/ | 8414 | F: drivers/of/ |
| 8415 | F: include/linux/of*.h | 8415 | F: include/linux/of*.h |
| @@ -8417,12 +8417,10 @@ F: scripts/dtc/ | |||
| 8417 | 8417 | ||
| 8418 | OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS | 8418 | OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS |
| 8419 | M: Rob Herring <robh+dt@kernel.org> | 8419 | M: Rob Herring <robh+dt@kernel.org> |
| 8420 | M: Pawel Moll <pawel.moll@arm.com> | ||
| 8421 | M: Mark Rutland <mark.rutland@arm.com> | 8420 | M: Mark Rutland <mark.rutland@arm.com> |
| 8422 | M: Ian Campbell <ijc+devicetree@hellion.org.uk> | ||
| 8423 | M: Kumar Gala <galak@codeaurora.org> | ||
| 8424 | L: devicetree@vger.kernel.org | 8421 | L: devicetree@vger.kernel.org |
| 8425 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git | 8422 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git |
| 8423 | Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/ | ||
| 8426 | S: Maintained | 8424 | S: Maintained |
| 8427 | F: Documentation/devicetree/ | 8425 | F: Documentation/devicetree/ |
| 8428 | F: arch/*/boot/dts/ | 8426 | F: arch/*/boot/dts/ |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 4 | 1 | VERSION = 4 |
| 2 | PATCHLEVEL = 7 | 2 | PATCHLEVEL = 7 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc3 |
| 5 | NAME = Psychotic Stoned Sheep | 5 | NAME = Psychotic Stoned Sheep |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 0dcbacfdea4b..0d3e59f56974 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
| @@ -61,7 +61,7 @@ config RWSEM_GENERIC_SPINLOCK | |||
| 61 | def_bool y | 61 | def_bool y |
| 62 | 62 | ||
| 63 | config ARCH_DISCONTIGMEM_ENABLE | 63 | config ARCH_DISCONTIGMEM_ENABLE |
| 64 | def_bool y | 64 | def_bool n |
| 65 | 65 | ||
| 66 | config ARCH_FLATMEM_ENABLE | 66 | config ARCH_FLATMEM_ENABLE |
| 67 | def_bool y | 67 | def_bool y |
| @@ -186,9 +186,6 @@ if SMP | |||
| 186 | config ARC_HAS_COH_CACHES | 186 | config ARC_HAS_COH_CACHES |
| 187 | def_bool n | 187 | def_bool n |
| 188 | 188 | ||
| 189 | config ARC_HAS_REENTRANT_IRQ_LV2 | ||
| 190 | def_bool n | ||
| 191 | |||
| 192 | config ARC_MCIP | 189 | config ARC_MCIP |
| 193 | bool "ARConnect Multicore IP (MCIP) Support " | 190 | bool "ARConnect Multicore IP (MCIP) Support " |
| 194 | depends on ISA_ARCV2 | 191 | depends on ISA_ARCV2 |
| @@ -366,25 +363,10 @@ config NODES_SHIFT | |||
| 366 | if ISA_ARCOMPACT | 363 | if ISA_ARCOMPACT |
| 367 | 364 | ||
| 368 | config ARC_COMPACT_IRQ_LEVELS | 365 | config ARC_COMPACT_IRQ_LEVELS |
| 369 | bool "ARCompact IRQ Priorities: High(2)/Low(1)" | 366 | bool "Setup Timer IRQ as high Priority" |
| 370 | default n | 367 | default n |
| 371 | # Timer HAS to be high priority, for any other high priority config | ||
| 372 | select ARC_IRQ3_LV2 | ||
| 373 | # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy | 368 | # if SMP, LV2 enabled ONLY if ARC implementation has LV2 re-entrancy |
| 374 | depends on !SMP || ARC_HAS_REENTRANT_IRQ_LV2 | 369 | depends on !SMP |
| 375 | |||
| 376 | if ARC_COMPACT_IRQ_LEVELS | ||
| 377 | |||
| 378 | config ARC_IRQ3_LV2 | ||
| 379 | bool | ||
| 380 | |||
| 381 | config ARC_IRQ5_LV2 | ||
| 382 | bool | ||
| 383 | |||
| 384 | config ARC_IRQ6_LV2 | ||
| 385 | bool | ||
| 386 | |||
| 387 | endif #ARC_COMPACT_IRQ_LEVELS | ||
| 388 | 370 | ||
| 389 | config ARC_FPU_SAVE_RESTORE | 371 | config ARC_FPU_SAVE_RESTORE |
| 390 | bool "Enable FPU state persistence across context switch" | 372 | bool "Enable FPU state persistence across context switch" |
| @@ -407,11 +389,6 @@ config ARC_HAS_LLSC | |||
| 407 | default y | 389 | default y |
| 408 | depends on !ARC_CANT_LLSC | 390 | depends on !ARC_CANT_LLSC |
| 409 | 391 | ||
| 410 | config ARC_STAR_9000923308 | ||
| 411 | bool "Workaround for llock/scond livelock" | ||
| 412 | default n | ||
| 413 | depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC | ||
| 414 | |||
| 415 | config ARC_HAS_SWAPE | 392 | config ARC_HAS_SWAPE |
| 416 | bool "Insn: SWAPE (endian-swap)" | 393 | bool "Insn: SWAPE (endian-swap)" |
| 417 | default y | 394 | default y |
| @@ -471,7 +448,7 @@ config LINUX_LINK_BASE | |||
| 471 | 448 | ||
| 472 | config HIGHMEM | 449 | config HIGHMEM |
| 473 | bool "High Memory Support" | 450 | bool "High Memory Support" |
| 474 | select DISCONTIGMEM | 451 | select ARCH_DISCONTIGMEM_ENABLE |
| 475 | help | 452 | help |
| 476 | With ARC 2G:2G address split, only upper 2G is directly addressable by | 453 | With ARC 2G:2G address split, only upper 2G is directly addressable by |
| 477 | kernel. Enable this to potentially allow access to rest of 2G and PAE | 454 | kernel. Enable this to potentially allow access to rest of 2G and PAE |
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 02fabef2891c..d4df6be66d58 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile | |||
| @@ -127,7 +127,7 @@ libs-y += arch/arc/lib/ $(LIBGCC) | |||
| 127 | 127 | ||
| 128 | boot := arch/arc/boot | 128 | boot := arch/arc/boot |
| 129 | 129 | ||
| 130 | #default target for make without any arguements. | 130 | #default target for make without any arguments. |
| 131 | KBUILD_IMAGE := bootpImage | 131 | KBUILD_IMAGE := bootpImage |
| 132 | 132 | ||
| 133 | all: $(KBUILD_IMAGE) | 133 | all: $(KBUILD_IMAGE) |
diff --git a/arch/arc/boot/dts/abilis_tb100.dtsi b/arch/arc/boot/dts/abilis_tb100.dtsi index 3942634f805a..02410b211433 100644 --- a/arch/arc/boot/dts/abilis_tb100.dtsi +++ b/arch/arc/boot/dts/abilis_tb100.dtsi | |||
| @@ -23,8 +23,6 @@ | |||
| 23 | 23 | ||
| 24 | 24 | ||
| 25 | / { | 25 | / { |
| 26 | clock-frequency = <500000000>; /* 500 MHZ */ | ||
| 27 | |||
| 28 | soc100 { | 26 | soc100 { |
| 29 | bus-frequency = <166666666>; | 27 | bus-frequency = <166666666>; |
| 30 | 28 | ||
diff --git a/arch/arc/boot/dts/abilis_tb101.dtsi b/arch/arc/boot/dts/abilis_tb101.dtsi index b0467229a5c4..f9e7686044eb 100644 --- a/arch/arc/boot/dts/abilis_tb101.dtsi +++ b/arch/arc/boot/dts/abilis_tb101.dtsi | |||
| @@ -23,8 +23,6 @@ | |||
| 23 | 23 | ||
| 24 | 24 | ||
| 25 | / { | 25 | / { |
| 26 | clock-frequency = <500000000>; /* 500 MHZ */ | ||
| 27 | |||
| 28 | soc100 { | 26 | soc100 { |
| 29 | bus-frequency = <166666666>; | 27 | bus-frequency = <166666666>; |
| 30 | 28 | ||
diff --git a/arch/arc/boot/dts/axc001.dtsi b/arch/arc/boot/dts/axc001.dtsi index 3e02f152edcb..6ae2c476ad82 100644 --- a/arch/arc/boot/dts/axc001.dtsi +++ b/arch/arc/boot/dts/axc001.dtsi | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | / { | 16 | / { |
| 17 | compatible = "snps,arc"; | 17 | compatible = "snps,arc"; |
| 18 | clock-frequency = <750000000>; /* 750 MHZ */ | ||
| 19 | #address-cells = <1>; | 18 | #address-cells = <1>; |
| 20 | #size-cells = <1>; | 19 | #size-cells = <1>; |
| 21 | 20 | ||
diff --git a/arch/arc/boot/dts/axc003.dtsi b/arch/arc/boot/dts/axc003.dtsi index 378e455a94c4..14df46f141bf 100644 --- a/arch/arc/boot/dts/axc003.dtsi +++ b/arch/arc/boot/dts/axc003.dtsi | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | / { | 15 | / { |
| 16 | compatible = "snps,arc"; | 16 | compatible = "snps,arc"; |
| 17 | clock-frequency = <90000000>; | ||
| 18 | #address-cells = <1>; | 17 | #address-cells = <1>; |
| 19 | #size-cells = <1>; | 18 | #size-cells = <1>; |
| 20 | 19 | ||
diff --git a/arch/arc/boot/dts/axc003_idu.dtsi b/arch/arc/boot/dts/axc003_idu.dtsi index 64c94b2860ab..3d6cfa32bf51 100644 --- a/arch/arc/boot/dts/axc003_idu.dtsi +++ b/arch/arc/boot/dts/axc003_idu.dtsi | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | / { | 15 | / { |
| 16 | compatible = "snps,arc"; | 16 | compatible = "snps,arc"; |
| 17 | clock-frequency = <90000000>; | ||
| 18 | #address-cells = <1>; | 17 | #address-cells = <1>; |
| 19 | #size-cells = <1>; | 18 | #size-cells = <1>; |
| 20 | 19 | ||
diff --git a/arch/arc/boot/dts/eznps.dts b/arch/arc/boot/dts/eznps.dts index b89f6c3eb352..1e0d225791c1 100644 --- a/arch/arc/boot/dts/eznps.dts +++ b/arch/arc/boot/dts/eznps.dts | |||
| @@ -18,7 +18,6 @@ | |||
| 18 | 18 | ||
| 19 | / { | 19 | / { |
| 20 | compatible = "ezchip,arc-nps"; | 20 | compatible = "ezchip,arc-nps"; |
| 21 | clock-frequency = <83333333>; /* 83.333333 MHZ */ | ||
| 22 | #address-cells = <1>; | 21 | #address-cells = <1>; |
| 23 | #size-cells = <1>; | 22 | #size-cells = <1>; |
| 24 | interrupt-parent = <&intc>; | 23 | interrupt-parent = <&intc>; |
diff --git a/arch/arc/boot/dts/nsim_700.dts b/arch/arc/boot/dts/nsim_700.dts index 5d5e373e0ebc..63970513e4ae 100644 --- a/arch/arc/boot/dts/nsim_700.dts +++ b/arch/arc/boot/dts/nsim_700.dts | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | 11 | ||
| 12 | / { | 12 | / { |
| 13 | compatible = "snps,nsim"; | 13 | compatible = "snps,nsim"; |
| 14 | clock-frequency = <80000000>; /* 80 MHZ */ | ||
| 15 | #address-cells = <1>; | 14 | #address-cells = <1>; |
| 16 | #size-cells = <1>; | 15 | #size-cells = <1>; |
| 17 | interrupt-parent = <&core_intc>; | 16 | interrupt-parent = <&core_intc>; |
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts index b5b060adce8a..763d66c883da 100644 --- a/arch/arc/boot/dts/nsimosci.dts +++ b/arch/arc/boot/dts/nsimosci.dts | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | 11 | ||
| 12 | / { | 12 | / { |
| 13 | compatible = "snps,nsimosci"; | 13 | compatible = "snps,nsimosci"; |
| 14 | clock-frequency = <20000000>; /* 20 MHZ */ | ||
| 15 | #address-cells = <1>; | 14 | #address-cells = <1>; |
| 16 | #size-cells = <1>; | 15 | #size-cells = <1>; |
| 17 | interrupt-parent = <&core_intc>; | 16 | interrupt-parent = <&core_intc>; |
diff --git a/arch/arc/boot/dts/nsimosci_hs.dts b/arch/arc/boot/dts/nsimosci_hs.dts index 325e73090a18..4eb97c584b18 100644 --- a/arch/arc/boot/dts/nsimosci_hs.dts +++ b/arch/arc/boot/dts/nsimosci_hs.dts | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | 11 | ||
| 12 | / { | 12 | / { |
| 13 | compatible = "snps,nsimosci_hs"; | 13 | compatible = "snps,nsimosci_hs"; |
| 14 | clock-frequency = <20000000>; /* 20 MHZ */ | ||
| 15 | #address-cells = <1>; | 14 | #address-cells = <1>; |
| 16 | #size-cells = <1>; | 15 | #size-cells = <1>; |
| 17 | interrupt-parent = <&core_intc>; | 16 | interrupt-parent = <&core_intc>; |
diff --git a/arch/arc/boot/dts/nsimosci_hs_idu.dts b/arch/arc/boot/dts/nsimosci_hs_idu.dts index ee03d7126581..853f897eb2a3 100644 --- a/arch/arc/boot/dts/nsimosci_hs_idu.dts +++ b/arch/arc/boot/dts/nsimosci_hs_idu.dts | |||
| @@ -11,7 +11,6 @@ | |||
| 11 | 11 | ||
| 12 | / { | 12 | / { |
| 13 | compatible = "snps,nsimosci_hs"; | 13 | compatible = "snps,nsimosci_hs"; |
| 14 | clock-frequency = <5000000>; /* 5 MHZ */ | ||
| 15 | #address-cells = <1>; | 14 | #address-cells = <1>; |
| 16 | #size-cells = <1>; | 15 | #size-cells = <1>; |
| 17 | interrupt-parent = <&core_intc>; | 16 | interrupt-parent = <&core_intc>; |
diff --git a/arch/arc/boot/dts/skeleton.dtsi b/arch/arc/boot/dts/skeleton.dtsi index 3a10cc633e2b..65808fe0a290 100644 --- a/arch/arc/boot/dts/skeleton.dtsi +++ b/arch/arc/boot/dts/skeleton.dtsi | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | 13 | ||
| 14 | / { | 14 | / { |
| 15 | compatible = "snps,arc"; | 15 | compatible = "snps,arc"; |
| 16 | clock-frequency = <80000000>; /* 80 MHZ */ | ||
| 17 | #address-cells = <1>; | 16 | #address-cells = <1>; |
| 18 | #size-cells = <1>; | 17 | #size-cells = <1>; |
| 19 | chosen { }; | 18 | chosen { }; |
diff --git a/arch/arc/boot/dts/skeleton_hs.dtsi b/arch/arc/boot/dts/skeleton_hs.dtsi index 71fd308a9298..2dfe8037dfbb 100644 --- a/arch/arc/boot/dts/skeleton_hs.dtsi +++ b/arch/arc/boot/dts/skeleton_hs.dtsi | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | 8 | ||
| 9 | / { | 9 | / { |
| 10 | compatible = "snps,arc"; | 10 | compatible = "snps,arc"; |
| 11 | clock-frequency = <80000000>; /* 80 MHZ */ | ||
| 12 | #address-cells = <1>; | 11 | #address-cells = <1>; |
| 13 | #size-cells = <1>; | 12 | #size-cells = <1>; |
| 14 | chosen { }; | 13 | chosen { }; |
diff --git a/arch/arc/boot/dts/skeleton_hs_idu.dtsi b/arch/arc/boot/dts/skeleton_hs_idu.dtsi index d1cb25a66989..4c11079f3565 100644 --- a/arch/arc/boot/dts/skeleton_hs_idu.dtsi +++ b/arch/arc/boot/dts/skeleton_hs_idu.dtsi | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | 8 | ||
| 9 | / { | 9 | / { |
| 10 | compatible = "snps,arc"; | 10 | compatible = "snps,arc"; |
| 11 | clock-frequency = <80000000>; /* 80 MHZ */ | ||
| 12 | #address-cells = <1>; | 11 | #address-cells = <1>; |
| 13 | #size-cells = <1>; | 12 | #size-cells = <1>; |
| 14 | chosen { }; | 13 | chosen { }; |
diff --git a/arch/arc/boot/dts/vdk_axc003.dtsi b/arch/arc/boot/dts/vdk_axc003.dtsi index ad4ee43bd2ac..0fd6ba985b16 100644 --- a/arch/arc/boot/dts/vdk_axc003.dtsi +++ b/arch/arc/boot/dts/vdk_axc003.dtsi | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | / { | 15 | / { |
| 16 | compatible = "snps,arc"; | 16 | compatible = "snps,arc"; |
| 17 | clock-frequency = <50000000>; | ||
| 18 | #address-cells = <1>; | 17 | #address-cells = <1>; |
| 19 | #size-cells = <1>; | 18 | #size-cells = <1>; |
| 20 | 19 | ||
diff --git a/arch/arc/boot/dts/vdk_axc003_idu.dtsi b/arch/arc/boot/dts/vdk_axc003_idu.dtsi index a3cb6263c581..82214cd7ba0c 100644 --- a/arch/arc/boot/dts/vdk_axc003_idu.dtsi +++ b/arch/arc/boot/dts/vdk_axc003_idu.dtsi | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | 15 | ||
| 16 | / { | 16 | / { |
| 17 | compatible = "snps,arc"; | 17 | compatible = "snps,arc"; |
| 18 | clock-frequency = <50000000>; | ||
| 19 | #address-cells = <1>; | 18 | #address-cells = <1>; |
| 20 | #size-cells = <1>; | 19 | #size-cells = <1>; |
| 21 | 20 | ||
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h index 5f3dcbbc0cc9..dd683995bc9d 100644 --- a/arch/arc/include/asm/atomic.h +++ b/arch/arc/include/asm/atomic.h | |||
| @@ -25,50 +25,17 @@ | |||
| 25 | 25 | ||
| 26 | #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) | 26 | #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) |
| 27 | 27 | ||
| 28 | #ifdef CONFIG_ARC_STAR_9000923308 | ||
| 29 | |||
| 30 | #define SCOND_FAIL_RETRY_VAR_DEF \ | ||
| 31 | unsigned int delay = 1, tmp; \ | ||
| 32 | |||
| 33 | #define SCOND_FAIL_RETRY_ASM \ | ||
| 34 | " bz 4f \n" \ | ||
| 35 | " ; --- scond fail delay --- \n" \ | ||
| 36 | " mov %[tmp], %[delay] \n" /* tmp = delay */ \ | ||
| 37 | "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ | ||
| 38 | " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ | ||
| 39 | " rol %[delay], %[delay] \n" /* delay *= 2 */ \ | ||
| 40 | " b 1b \n" /* start over */ \ | ||
| 41 | "4: ; --- success --- \n" \ | ||
| 42 | |||
| 43 | #define SCOND_FAIL_RETRY_VARS \ | ||
| 44 | ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \ | ||
| 45 | |||
| 46 | #else /* !CONFIG_ARC_STAR_9000923308 */ | ||
| 47 | |||
| 48 | #define SCOND_FAIL_RETRY_VAR_DEF | ||
| 49 | |||
| 50 | #define SCOND_FAIL_RETRY_ASM \ | ||
| 51 | " bnz 1b \n" \ | ||
| 52 | |||
| 53 | #define SCOND_FAIL_RETRY_VARS | ||
| 54 | |||
| 55 | #endif | ||
| 56 | |||
| 57 | #define ATOMIC_OP(op, c_op, asm_op) \ | 28 | #define ATOMIC_OP(op, c_op, asm_op) \ |
| 58 | static inline void atomic_##op(int i, atomic_t *v) \ | 29 | static inline void atomic_##op(int i, atomic_t *v) \ |
| 59 | { \ | 30 | { \ |
| 60 | unsigned int val; \ | 31 | unsigned int val; \ |
| 61 | SCOND_FAIL_RETRY_VAR_DEF \ | ||
| 62 | \ | 32 | \ |
| 63 | __asm__ __volatile__( \ | 33 | __asm__ __volatile__( \ |
| 64 | "1: llock %[val], [%[ctr]] \n" \ | 34 | "1: llock %[val], [%[ctr]] \n" \ |
| 65 | " " #asm_op " %[val], %[val], %[i] \n" \ | 35 | " " #asm_op " %[val], %[val], %[i] \n" \ |
| 66 | " scond %[val], [%[ctr]] \n" \ | 36 | " scond %[val], [%[ctr]] \n" \ |
| 67 | " \n" \ | 37 | " bnz 1b \n" \ |
| 68 | SCOND_FAIL_RETRY_ASM \ | ||
| 69 | \ | ||
| 70 | : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ | 38 | : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \ |
| 71 | SCOND_FAIL_RETRY_VARS \ | ||
| 72 | : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ | 39 | : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \ |
| 73 | [i] "ir" (i) \ | 40 | [i] "ir" (i) \ |
| 74 | : "cc"); \ | 41 | : "cc"); \ |
| @@ -77,8 +44,7 @@ static inline void atomic_##op(int i, atomic_t *v) \ | |||
| 77 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ | 44 | #define ATOMIC_OP_RETURN(op, c_op, asm_op) \ |
| 78 | static inline int atomic_##op##_return(int i, atomic_t *v) \ | 45 | static inline int atomic_##op##_return(int i, atomic_t *v) \ |
| 79 | { \ | 46 | { \ |
| 80 | unsigned int val; \ | 47 | unsigned int val; \ |
| 81 | SCOND_FAIL_RETRY_VAR_DEF \ | ||
| 82 | \ | 48 | \ |
| 83 | /* \ | 49 | /* \ |
| 84 | * Explicit full memory barrier needed before/after as \ | 50 | * Explicit full memory barrier needed before/after as \ |
| @@ -90,11 +56,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \ | |||
| 90 | "1: llock %[val], [%[ctr]] \n" \ | 56 | "1: llock %[val], [%[ctr]] \n" \ |
| 91 | " " #asm_op " %[val], %[val], %[i] \n" \ | 57 | " " #asm_op " %[val], %[val], %[i] \n" \ |
| 92 | " scond %[val], [%[ctr]] \n" \ | 58 | " scond %[val], [%[ctr]] \n" \ |
| 93 | " \n" \ | 59 | " bnz 1b \n" \ |
| 94 | SCOND_FAIL_RETRY_ASM \ | ||
| 95 | \ | ||
| 96 | : [val] "=&r" (val) \ | 60 | : [val] "=&r" (val) \ |
| 97 | SCOND_FAIL_RETRY_VARS \ | ||
| 98 | : [ctr] "r" (&v->counter), \ | 61 | : [ctr] "r" (&v->counter), \ |
| 99 | [i] "ir" (i) \ | 62 | [i] "ir" (i) \ |
| 100 | : "cc"); \ | 63 | : "cc"); \ |
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h index e0e1faf03c50..14c310f2e0b1 100644 --- a/arch/arc/include/asm/entry-compact.h +++ b/arch/arc/include/asm/entry-compact.h | |||
| @@ -76,8 +76,8 @@ | |||
| 76 | * We need to be a bit more cautious here. What if a kernel bug in | 76 | * We need to be a bit more cautious here. What if a kernel bug in |
| 77 | * L1 ISR, caused SP to go whaco (some small value which looks like | 77 | * L1 ISR, caused SP to go whaco (some small value which looks like |
| 78 | * USER stk) and then we take L2 ISR. | 78 | * USER stk) and then we take L2 ISR. |
| 79 | * Above brlo alone would treat it as a valid L1-L2 sceanrio | 79 | * Above brlo alone would treat it as a valid L1-L2 scenario |
| 80 | * instead of shouting alound | 80 | * instead of shouting around |
| 81 | * The only feasible way is to make sure this L2 happened in | 81 | * The only feasible way is to make sure this L2 happened in |
| 82 | * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in | 82 | * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in |
| 83 | * L1 ISR before it switches stack | 83 | * L1 ISR before it switches stack |
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index 1fd467ef658f..b0b87f2447f5 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h | |||
| @@ -83,7 +83,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) | |||
| 83 | local_flush_tlb_all(); | 83 | local_flush_tlb_all(); |
| 84 | 84 | ||
| 85 | /* | 85 | /* |
| 86 | * Above checke for rollover of 8 bit ASID in 32 bit container. | 86 | * Above check for rollover of 8 bit ASID in 32 bit container. |
| 87 | * If the container itself wrapped around, set it to a non zero | 87 | * If the container itself wrapped around, set it to a non zero |
| 88 | * "generation" to distinguish from no context | 88 | * "generation" to distinguish from no context |
| 89 | */ | 89 | */ |
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 034bbdc0ff61..858f98ef7f1b 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
| @@ -47,7 +47,7 @@ | |||
| 47 | * Page Tables are purely for Linux VM's consumption and the bits below are | 47 | * Page Tables are purely for Linux VM's consumption and the bits below are |
| 48 | * suited to that (uniqueness). Hence some are not implemented in the TLB and | 48 | * suited to that (uniqueness). Hence some are not implemented in the TLB and |
| 49 | * some have different value in TLB. | 49 | * some have different value in TLB. |
| 50 | * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible becoz they live in | 50 | * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in |
| 51 | * seperate PD0 and PD1, which combined forms a translation entry) | 51 | * seperate PD0 and PD1, which combined forms a translation entry) |
| 52 | * while for PTE perspective, they are 8 and 9 respectively | 52 | * while for PTE perspective, they are 8 and 9 respectively |
| 53 | * with MMU v3: Most bits (except SHARED) represent the exact hardware pos | 53 | * with MMU v3: Most bits (except SHARED) represent the exact hardware pos |
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index f9048994b22f..16b630fbeb6a 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h | |||
| @@ -78,7 +78,7 @@ struct task_struct; | |||
| 78 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) | 78 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) |
| 79 | 79 | ||
| 80 | /* | 80 | /* |
| 81 | * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode. | 81 | * Where about of Task's sp, fp, blink when it was last seen in kernel mode. |
| 82 | * Look in process.c for details of kernel stack layout | 82 | * Look in process.c for details of kernel stack layout |
| 83 | */ | 83 | */ |
| 84 | #define TSK_K_ESP(tsk) (tsk->thread.ksp) | 84 | #define TSK_K_ESP(tsk) (tsk->thread.ksp) |
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h index 991380438d6b..89fdd1b0a76e 100644 --- a/arch/arc/include/asm/smp.h +++ b/arch/arc/include/asm/smp.h | |||
| @@ -86,7 +86,7 @@ static inline const char *arc_platform_smp_cpuinfo(void) | |||
| 86 | * (1) These insn were introduced only in 4.10 release. So for older released | 86 | * (1) These insn were introduced only in 4.10 release. So for older released |
| 87 | * support needed. | 87 | * support needed. |
| 88 | * | 88 | * |
| 89 | * (2) In a SMP setup, the LLOCK/SCOND atomiticity across CPUs needs to be | 89 | * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be |
| 90 | * gaurantted by the platform (not something which core handles). | 90 | * gaurantted by the platform (not something which core handles). |
| 91 | * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ | 91 | * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ |
| 92 | * disabling for atomicity. | 92 | * disabling for atomicity. |
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h index 800e7c430ca5..cded4a9b5438 100644 --- a/arch/arc/include/asm/spinlock.h +++ b/arch/arc/include/asm/spinlock.h | |||
| @@ -20,11 +20,6 @@ | |||
| 20 | 20 | ||
| 21 | #ifdef CONFIG_ARC_HAS_LLSC | 21 | #ifdef CONFIG_ARC_HAS_LLSC |
| 22 | 22 | ||
| 23 | /* | ||
| 24 | * A normal LLOCK/SCOND based system, w/o need for livelock workaround | ||
| 25 | */ | ||
| 26 | #ifndef CONFIG_ARC_STAR_9000923308 | ||
| 27 | |||
| 28 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 23 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
| 29 | { | 24 | { |
| 30 | unsigned int val; | 25 | unsigned int val; |
| @@ -238,293 +233,6 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
| 238 | smp_mb(); | 233 | smp_mb(); |
| 239 | } | 234 | } |
| 240 | 235 | ||
| 241 | #else /* CONFIG_ARC_STAR_9000923308 */ | ||
| 242 | |||
| 243 | /* | ||
| 244 | * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping | ||
| 245 | * coherency transactions in the SCU. The exclusive line state keeps rotating | ||
| 246 | * among contenting cores leading to a never ending cycle. So break the cycle | ||
| 247 | * by deferring the retry of failed exclusive access (SCOND). The actual delay | ||
| 248 | * needed is function of number of contending cores as well as the unrelated | ||
| 249 | * coherency traffic from other cores. To keep the code simple, start off with | ||
| 250 | * small delay of 1 which would suffice most cases and in case of contention | ||
| 251 | * double the delay. Eventually the delay is sufficient such that the coherency | ||
| 252 | * pipeline is drained, thus a subsequent exclusive access would succeed. | ||
| 253 | */ | ||
| 254 | |||
| 255 | #define SCOND_FAIL_RETRY_VAR_DEF \ | ||
| 256 | unsigned int delay, tmp; \ | ||
| 257 | |||
| 258 | #define SCOND_FAIL_RETRY_ASM \ | ||
| 259 | " ; --- scond fail delay --- \n" \ | ||
| 260 | " mov %[tmp], %[delay] \n" /* tmp = delay */ \ | ||
| 261 | "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \ | ||
| 262 | " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \ | ||
| 263 | " rol %[delay], %[delay] \n" /* delay *= 2 */ \ | ||
| 264 | " b 1b \n" /* start over */ \ | ||
| 265 | " \n" \ | ||
| 266 | "4: ; --- done --- \n" \ | ||
| 267 | |||
| 268 | #define SCOND_FAIL_RETRY_VARS \ | ||
| 269 | ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \ | ||
| 270 | |||
| 271 | static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
| 272 | { | ||
| 273 | unsigned int val; | ||
| 274 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 275 | |||
| 276 | smp_mb(); | ||
| 277 | |||
| 278 | __asm__ __volatile__( | ||
| 279 | "0: mov %[delay], 1 \n" | ||
| 280 | "1: llock %[val], [%[slock]] \n" | ||
| 281 | " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */ | ||
| 282 | " scond %[LOCKED], [%[slock]] \n" /* acquire */ | ||
| 283 | " bz 4f \n" /* done */ | ||
| 284 | " \n" | ||
| 285 | SCOND_FAIL_RETRY_ASM | ||
| 286 | |||
| 287 | : [val] "=&r" (val) | ||
| 288 | SCOND_FAIL_RETRY_VARS | ||
| 289 | : [slock] "r" (&(lock->slock)), | ||
| 290 | [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) | ||
| 291 | : "memory", "cc"); | ||
| 292 | |||
| 293 | smp_mb(); | ||
| 294 | } | ||
| 295 | |||
| 296 | /* 1 - lock taken successfully */ | ||
| 297 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
| 298 | { | ||
| 299 | unsigned int val, got_it = 0; | ||
| 300 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 301 | |||
| 302 | smp_mb(); | ||
| 303 | |||
| 304 | __asm__ __volatile__( | ||
| 305 | "0: mov %[delay], 1 \n" | ||
| 306 | "1: llock %[val], [%[slock]] \n" | ||
| 307 | " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */ | ||
| 308 | " scond %[LOCKED], [%[slock]] \n" /* acquire */ | ||
| 309 | " bz.d 4f \n" | ||
| 310 | " mov.z %[got_it], 1 \n" /* got it */ | ||
| 311 | " \n" | ||
| 312 | SCOND_FAIL_RETRY_ASM | ||
| 313 | |||
| 314 | : [val] "=&r" (val), | ||
| 315 | [got_it] "+&r" (got_it) | ||
| 316 | SCOND_FAIL_RETRY_VARS | ||
| 317 | : [slock] "r" (&(lock->slock)), | ||
| 318 | [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__) | ||
| 319 | : "memory", "cc"); | ||
| 320 | |||
| 321 | smp_mb(); | ||
| 322 | |||
| 323 | return got_it; | ||
| 324 | } | ||
| 325 | |||
| 326 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
| 327 | { | ||
| 328 | smp_mb(); | ||
| 329 | |||
| 330 | lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; | ||
| 331 | |||
| 332 | smp_mb(); | ||
| 333 | } | ||
| 334 | |||
| 335 | /* | ||
| 336 | * Read-write spinlocks, allowing multiple readers but only one writer. | ||
| 337 | * Unfair locking as Writers could be starved indefinitely by Reader(s) | ||
| 338 | */ | ||
| 339 | |||
| 340 | static inline void arch_read_lock(arch_rwlock_t *rw) | ||
| 341 | { | ||
| 342 | unsigned int val; | ||
| 343 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 344 | |||
| 345 | smp_mb(); | ||
| 346 | |||
| 347 | /* | ||
| 348 | * zero means writer holds the lock exclusively, deny Reader. | ||
| 349 | * Otherwise grant lock to first/subseq reader | ||
| 350 | * | ||
| 351 | * if (rw->counter > 0) { | ||
| 352 | * rw->counter--; | ||
| 353 | * ret = 1; | ||
| 354 | * } | ||
| 355 | */ | ||
| 356 | |||
| 357 | __asm__ __volatile__( | ||
| 358 | "0: mov %[delay], 1 \n" | ||
| 359 | "1: llock %[val], [%[rwlock]] \n" | ||
| 360 | " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */ | ||
| 361 | " sub %[val], %[val], 1 \n" /* reader lock */ | ||
| 362 | " scond %[val], [%[rwlock]] \n" | ||
| 363 | " bz 4f \n" /* done */ | ||
| 364 | " \n" | ||
| 365 | SCOND_FAIL_RETRY_ASM | ||
| 366 | |||
| 367 | : [val] "=&r" (val) | ||
| 368 | SCOND_FAIL_RETRY_VARS | ||
| 369 | : [rwlock] "r" (&(rw->counter)), | ||
| 370 | [WR_LOCKED] "ir" (0) | ||
| 371 | : "memory", "cc"); | ||
| 372 | |||
| 373 | smp_mb(); | ||
| 374 | } | ||
| 375 | |||
| 376 | /* 1 - lock taken successfully */ | ||
| 377 | static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
| 378 | { | ||
| 379 | unsigned int val, got_it = 0; | ||
| 380 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 381 | |||
| 382 | smp_mb(); | ||
| 383 | |||
| 384 | __asm__ __volatile__( | ||
| 385 | "0: mov %[delay], 1 \n" | ||
| 386 | "1: llock %[val], [%[rwlock]] \n" | ||
| 387 | " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */ | ||
| 388 | " sub %[val], %[val], 1 \n" /* counter-- */ | ||
| 389 | " scond %[val], [%[rwlock]] \n" | ||
| 390 | " bz.d 4f \n" | ||
| 391 | " mov.z %[got_it], 1 \n" /* got it */ | ||
| 392 | " \n" | ||
| 393 | SCOND_FAIL_RETRY_ASM | ||
| 394 | |||
| 395 | : [val] "=&r" (val), | ||
| 396 | [got_it] "+&r" (got_it) | ||
| 397 | SCOND_FAIL_RETRY_VARS | ||
| 398 | : [rwlock] "r" (&(rw->counter)), | ||
| 399 | [WR_LOCKED] "ir" (0) | ||
| 400 | : "memory", "cc"); | ||
| 401 | |||
| 402 | smp_mb(); | ||
| 403 | |||
| 404 | return got_it; | ||
| 405 | } | ||
| 406 | |||
| 407 | static inline void arch_write_lock(arch_rwlock_t *rw) | ||
| 408 | { | ||
| 409 | unsigned int val; | ||
| 410 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 411 | |||
| 412 | smp_mb(); | ||
| 413 | |||
| 414 | /* | ||
| 415 | * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__), | ||
| 416 | * deny writer. Otherwise if unlocked grant to writer | ||
| 417 | * Hence the claim that Linux rwlocks are unfair to writers. | ||
| 418 | * (can be starved for an indefinite time by readers). | ||
| 419 | * | ||
| 420 | * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) { | ||
| 421 | * rw->counter = 0; | ||
| 422 | * ret = 1; | ||
| 423 | * } | ||
| 424 | */ | ||
| 425 | |||
| 426 | __asm__ __volatile__( | ||
| 427 | "0: mov %[delay], 1 \n" | ||
| 428 | "1: llock %[val], [%[rwlock]] \n" | ||
| 429 | " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */ | ||
| 430 | " mov %[val], %[WR_LOCKED] \n" | ||
| 431 | " scond %[val], [%[rwlock]] \n" | ||
| 432 | " bz 4f \n" | ||
| 433 | " \n" | ||
| 434 | SCOND_FAIL_RETRY_ASM | ||
| 435 | |||
| 436 | : [val] "=&r" (val) | ||
| 437 | SCOND_FAIL_RETRY_VARS | ||
| 438 | : [rwlock] "r" (&(rw->counter)), | ||
| 439 | [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), | ||
| 440 | [WR_LOCKED] "ir" (0) | ||
| 441 | : "memory", "cc"); | ||
| 442 | |||
| 443 | smp_mb(); | ||
| 444 | } | ||
| 445 | |||
| 446 | /* 1 - lock taken successfully */ | ||
| 447 | static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
| 448 | { | ||
| 449 | unsigned int val, got_it = 0; | ||
| 450 | SCOND_FAIL_RETRY_VAR_DEF; | ||
| 451 | |||
| 452 | smp_mb(); | ||
| 453 | |||
| 454 | __asm__ __volatile__( | ||
| 455 | "0: mov %[delay], 1 \n" | ||
| 456 | "1: llock %[val], [%[rwlock]] \n" | ||
| 457 | " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */ | ||
| 458 | " mov %[val], %[WR_LOCKED] \n" | ||
| 459 | " scond %[val], [%[rwlock]] \n" | ||
| 460 | " bz.d 4f \n" | ||
| 461 | " mov.z %[got_it], 1 \n" /* got it */ | ||
| 462 | " \n" | ||
| 463 | SCOND_FAIL_RETRY_ASM | ||
| 464 | |||
| 465 | : [val] "=&r" (val), | ||
| 466 | [got_it] "+&r" (got_it) | ||
| 467 | SCOND_FAIL_RETRY_VARS | ||
| 468 | : [rwlock] "r" (&(rw->counter)), | ||
| 469 | [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__), | ||
| 470 | [WR_LOCKED] "ir" (0) | ||
| 471 | : "memory", "cc"); | ||
| 472 | |||
| 473 | smp_mb(); | ||
| 474 | |||
| 475 | return got_it; | ||
| 476 | } | ||
| 477 | |||
| 478 | static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
| 479 | { | ||
| 480 | unsigned int val; | ||
| 481 | |||
| 482 | smp_mb(); | ||
| 483 | |||
| 484 | /* | ||
| 485 | * rw->counter++; | ||
| 486 | */ | ||
| 487 | __asm__ __volatile__( | ||
| 488 | "1: llock %[val], [%[rwlock]] \n" | ||
| 489 | " add %[val], %[val], 1 \n" | ||
| 490 | " scond %[val], [%[rwlock]] \n" | ||
| 491 | " bnz 1b \n" | ||
| 492 | " \n" | ||
| 493 | : [val] "=&r" (val) | ||
| 494 | : [rwlock] "r" (&(rw->counter)) | ||
| 495 | : "memory", "cc"); | ||
| 496 | |||
| 497 | smp_mb(); | ||
| 498 | } | ||
| 499 | |||
| 500 | static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
| 501 | { | ||
| 502 | unsigned int val; | ||
| 503 | |||
| 504 | smp_mb(); | ||
| 505 | |||
| 506 | /* | ||
| 507 | * rw->counter = __ARCH_RW_LOCK_UNLOCKED__; | ||
| 508 | */ | ||
| 509 | __asm__ __volatile__( | ||
| 510 | "1: llock %[val], [%[rwlock]] \n" | ||
| 511 | " scond %[UNLOCKED], [%[rwlock]]\n" | ||
| 512 | " bnz 1b \n" | ||
| 513 | " \n" | ||
| 514 | : [val] "=&r" (val) | ||
| 515 | : [rwlock] "r" (&(rw->counter)), | ||
| 516 | [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__) | ||
| 517 | : "memory", "cc"); | ||
| 518 | |||
| 519 | smp_mb(); | ||
| 520 | } | ||
| 521 | |||
| 522 | #undef SCOND_FAIL_RETRY_VAR_DEF | ||
| 523 | #undef SCOND_FAIL_RETRY_ASM | ||
| 524 | #undef SCOND_FAIL_RETRY_VARS | ||
| 525 | |||
| 526 | #endif /* CONFIG_ARC_STAR_9000923308 */ | ||
| 527 | |||
| 528 | #else /* !CONFIG_ARC_HAS_LLSC */ | 236 | #else /* !CONFIG_ARC_HAS_LLSC */ |
| 529 | 237 | ||
| 530 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 238 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h index 3af67455659a..2d79e527fa50 100644 --- a/arch/arc/include/asm/thread_info.h +++ b/arch/arc/include/asm/thread_info.h | |||
| @@ -103,7 +103,7 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void) | |||
| 103 | 103 | ||
| 104 | /* | 104 | /* |
| 105 | * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. | 105 | * _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it. |
| 106 | * SYSCALL_TRACE is anways seperately/unconditionally tested right after a | 106 | * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a |
| 107 | * syscall, so all that reamins to be tested is _TIF_WORK_MASK | 107 | * syscall, so all that reamins to be tested is _TIF_WORK_MASK |
| 108 | */ | 108 | */ |
| 109 | 109 | ||
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index d1da6032b715..a78d5670884f 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) | 32 | #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) |
| 33 | 33 | ||
| 34 | /* | 34 | /* |
| 35 | * Algorthmically, for __user_ok() we want do: | 35 | * Algorithmically, for __user_ok() we want do: |
| 36 | * (start < TASK_SIZE) && (start+len < TASK_SIZE) | 36 | * (start < TASK_SIZE) && (start+len < TASK_SIZE) |
| 37 | * where TASK_SIZE could either be retrieved from thread_info->addr_limit or | 37 | * where TASK_SIZE could either be retrieved from thread_info->addr_limit or |
| 38 | * emitted directly in code. | 38 | * emitted directly in code. |
diff --git a/arch/arc/include/uapi/asm/swab.h b/arch/arc/include/uapi/asm/swab.h index 095599a73195..71f3918b0fc3 100644 --- a/arch/arc/include/uapi/asm/swab.h +++ b/arch/arc/include/uapi/asm/swab.h | |||
| @@ -74,7 +74,7 @@ | |||
| 74 | __tmp ^ __in; \ | 74 | __tmp ^ __in; \ |
| 75 | }) | 75 | }) |
| 76 | 76 | ||
| 77 | #elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bwap instruction */ | 77 | #elif (ARC_BSWAP_TYPE == 2) /* Custom single cycle bswap instruction */ |
| 78 | 78 | ||
| 79 | #define __arch_swab32(x) \ | 79 | #define __arch_swab32(x) \ |
| 80 | ({ \ | 80 | ({ \ |
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S index 0cb0abaa0479..98812c1248df 100644 --- a/arch/arc/kernel/entry-compact.S +++ b/arch/arc/kernel/entry-compact.S | |||
| @@ -91,27 +91,13 @@ VECTOR mem_service ; 0x8, Mem exception (0x1) | |||
| 91 | VECTOR instr_service ; 0x10, Instrn Error (0x2) | 91 | VECTOR instr_service ; 0x10, Instrn Error (0x2) |
| 92 | 92 | ||
| 93 | ; ******************** Device ISRs ********************** | 93 | ; ******************** Device ISRs ********************** |
| 94 | #ifdef CONFIG_ARC_IRQ3_LV2 | 94 | #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS |
| 95 | VECTOR handle_interrupt_level2 | ||
| 96 | #else | ||
| 97 | VECTOR handle_interrupt_level1 | ||
| 98 | #endif | ||
| 99 | |||
| 100 | VECTOR handle_interrupt_level1 | ||
| 101 | |||
| 102 | #ifdef CONFIG_ARC_IRQ5_LV2 | ||
| 103 | VECTOR handle_interrupt_level2 | ||
| 104 | #else | ||
| 105 | VECTOR handle_interrupt_level1 | ||
| 106 | #endif | ||
| 107 | |||
| 108 | #ifdef CONFIG_ARC_IRQ6_LV2 | ||
| 109 | VECTOR handle_interrupt_level2 | 95 | VECTOR handle_interrupt_level2 |
| 110 | #else | 96 | #else |
| 111 | VECTOR handle_interrupt_level1 | 97 | VECTOR handle_interrupt_level1 |
| 112 | #endif | 98 | #endif |
| 113 | 99 | ||
| 114 | .rept 25 | 100 | .rept 28 |
| 115 | VECTOR handle_interrupt_level1 ; Other devices | 101 | VECTOR handle_interrupt_level1 ; Other devices |
| 116 | .endr | 102 | .endr |
| 117 | 103 | ||
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c index c5cceca36118..ce9deb953ca9 100644 --- a/arch/arc/kernel/intc-compact.c +++ b/arch/arc/kernel/intc-compact.c | |||
| @@ -28,10 +28,8 @@ void arc_init_IRQ(void) | |||
| 28 | { | 28 | { |
| 29 | int level_mask = 0; | 29 | int level_mask = 0; |
| 30 | 30 | ||
| 31 | /* setup any high priority Interrupts (Level2 in ARCompact jargon) */ | 31 | /* Is timer high priority Interrupt (Level2 in ARCompact jargon) */ |
| 32 | level_mask |= IS_ENABLED(CONFIG_ARC_IRQ3_LV2) << 3; | 32 | level_mask |= IS_ENABLED(CONFIG_ARC_COMPACT_IRQ_LEVELS) << TIMER0_IRQ; |
| 33 | level_mask |= IS_ENABLED(CONFIG_ARC_IRQ5_LV2) << 5; | ||
| 34 | level_mask |= IS_ENABLED(CONFIG_ARC_IRQ6_LV2) << 6; | ||
| 35 | 33 | ||
| 36 | /* | 34 | /* |
| 37 | * Write to register, even if no LV2 IRQs configured to reset it | 35 | * Write to register, even if no LV2 IRQs configured to reset it |
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 6fd48021324b..08f03d9b5b3e 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c | |||
| @@ -108,7 +108,7 @@ static void arc_perf_event_update(struct perf_event *event, | |||
| 108 | int64_t delta = new_raw_count - prev_raw_count; | 108 | int64_t delta = new_raw_count - prev_raw_count; |
| 109 | 109 | ||
| 110 | /* | 110 | /* |
| 111 | * We don't afaraid of hwc->prev_count changing beneath our feet | 111 | * We aren't afraid of hwc->prev_count changing beneath our feet |
| 112 | * because there's no way for us to re-enter this function anytime. | 112 | * because there's no way for us to re-enter this function anytime. |
| 113 | */ | 113 | */ |
| 114 | local64_set(&hwc->prev_count, new_raw_count); | 114 | local64_set(&hwc->prev_count, new_raw_count); |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index f63b8bfefb0c..2ee7a4d758a8 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
| @@ -392,7 +392,7 @@ void __init setup_arch(char **cmdline_p) | |||
| 392 | /* | 392 | /* |
| 393 | * If we are here, it is established that @uboot_arg didn't | 393 | * If we are here, it is established that @uboot_arg didn't |
| 394 | * point to DT blob. Instead if u-boot says it is cmdline, | 394 | * point to DT blob. Instead if u-boot says it is cmdline, |
| 395 | * Appent to embedded DT cmdline. | 395 | * append to embedded DT cmdline. |
| 396 | * setup_machine_fdt() would have populated @boot_command_line | 396 | * setup_machine_fdt() would have populated @boot_command_line |
| 397 | */ | 397 | */ |
| 398 | if (uboot_tag == 1) { | 398 | if (uboot_tag == 1) { |
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 004b7f0bc76c..6cb3736b6b83 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c | |||
| @@ -34,7 +34,7 @@ | |||
| 34 | * -ViXS were still seeing crashes when using insmod to load drivers. | 34 | * -ViXS were still seeing crashes when using insmod to load drivers. |
| 35 | * It turned out that the code to change Execute permssions for TLB entries | 35 | * It turned out that the code to change Execute permssions for TLB entries |
| 36 | * of user was not guarded for interrupts (mod_tlb_permission) | 36 | * of user was not guarded for interrupts (mod_tlb_permission) |
| 37 | * This was cauing TLB entries to be overwritten on unrelated indexes | 37 | * This was causing TLB entries to be overwritten on unrelated indexes |
| 38 | * | 38 | * |
| 39 | * Vineetg: July 15th 2008: Bug #94183 | 39 | * Vineetg: July 15th 2008: Bug #94183 |
| 40 | * -Exception happens in Delay slot of a JMP, and before user space resumes, | 40 | * -Exception happens in Delay slot of a JMP, and before user space resumes, |
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index a6f91e88ce36..934150e7ac48 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c | |||
| @@ -276,7 +276,7 @@ static int tlb_stats_open(struct inode *inode, struct file *file) | |||
| 276 | return 0; | 276 | return 0; |
| 277 | } | 277 | } |
| 278 | 278 | ||
| 279 | /* called on user read(): display the couters */ | 279 | /* called on user read(): display the counters */ |
| 280 | static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ | 280 | static ssize_t tlb_stats_output(struct file *file, /* file descriptor */ |
| 281 | char __user *user_buf, /* user buffer */ | 281 | char __user *user_buf, /* user buffer */ |
| 282 | size_t len, /* length of buffer */ | 282 | size_t len, /* length of buffer */ |
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c index 9e5eddbb856f..5a294b2c3cb3 100644 --- a/arch/arc/mm/cache.c +++ b/arch/arc/mm/cache.c | |||
| @@ -215,7 +215,7 @@ slc_chk: | |||
| 215 | * ------------------ | 215 | * ------------------ |
| 216 | * This ver of MMU supports variable page sizes (1k-16k): although Linux will | 216 | * This ver of MMU supports variable page sizes (1k-16k): although Linux will |
| 217 | * only support 8k (default), 16k and 4k. | 217 | * only support 8k (default), 16k and 4k. |
| 218 | * However from hardware perspective, smaller page sizes aggrevate aliasing | 218 | * However from hardware perspective, smaller page sizes aggravate aliasing |
| 219 | * meaning more vaddr bits needed to disambiguate the cache-line-op ; | 219 | * meaning more vaddr bits needed to disambiguate the cache-line-op ; |
| 220 | * the existing scheme of piggybacking won't work for certain configurations. | 220 | * the existing scheme of piggybacking won't work for certain configurations. |
| 221 | * Two new registers IC_PTAG and DC_PTAG inttoduced. | 221 | * Two new registers IC_PTAG and DC_PTAG inttoduced. |
| @@ -302,7 +302,7 @@ void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, | |||
| 302 | 302 | ||
| 303 | /* | 303 | /* |
| 304 | * This is technically for MMU v4, using the MMU v3 programming model | 304 | * This is technically for MMU v4, using the MMU v3 programming model |
| 305 | * Special work for HS38 aliasing I-cache configuratino with PAE40 | 305 | * Special work for HS38 aliasing I-cache configuration with PAE40 |
| 306 | * - upper 8 bits of paddr need to be written into PTAG_HI | 306 | * - upper 8 bits of paddr need to be written into PTAG_HI |
| 307 | * - (and needs to be written before the lower 32 bits) | 307 | * - (and needs to be written before the lower 32 bits) |
| 308 | * Note that PTAG_HI is hoisted outside the line loop | 308 | * Note that PTAG_HI is hoisted outside the line loop |
| @@ -936,7 +936,7 @@ void arc_cache_init(void) | |||
| 936 | ic->ver, CONFIG_ARC_MMU_VER); | 936 | ic->ver, CONFIG_ARC_MMU_VER); |
| 937 | 937 | ||
| 938 | /* | 938 | /* |
| 939 | * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG | 939 | * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG |
| 940 | * pair to provide vaddr/paddr respectively, just as in MMU v3 | 940 | * pair to provide vaddr/paddr respectively, just as in MMU v3 |
| 941 | */ | 941 | */ |
| 942 | if (is_isa_arcv2() && ic->alias) | 942 | if (is_isa_arcv2() && ic->alias) |
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c index 8c8e36fa5659..73d7e4c75b7d 100644 --- a/arch/arc/mm/dma.c +++ b/arch/arc/mm/dma.c | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | * DMA Coherent API Notes | 10 | * DMA Coherent API Notes |
| 11 | * | 11 | * |
| 12 | * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is | 12 | * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is |
| 13 | * implemented by accessintg it using a kernel virtual address, with | 13 | * implemented by accessing it using a kernel virtual address, with |
| 14 | * Cache bit off in the TLB entry. | 14 | * Cache bit off in the TLB entry. |
| 15 | * | 15 | * |
| 16 | * The default DMA address == Phy address which is 0x8000_0000 based. | 16 | * The default DMA address == Phy address which is 0x8000_0000 based. |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 5954881a35ac..ba3fc12bd272 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
| @@ -109,7 +109,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, | |||
| 109 | * PTE_RDONLY is cleared by default in the asm below, so set it in | 109 | * PTE_RDONLY is cleared by default in the asm below, so set it in |
| 110 | * back if necessary (read-only or clean PTE). | 110 | * back if necessary (read-only or clean PTE). |
| 111 | */ | 111 | */ |
| 112 | if (!pte_write(entry) || !dirty) | 112 | if (!pte_write(entry) || !pte_sw_dirty(entry)) |
| 113 | pte_val(entry) |= PTE_RDONLY; | 113 | pte_val(entry) |= PTE_RDONLY; |
| 114 | 114 | ||
| 115 | /* | 115 | /* |
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h index 0c12a3bfe2ab..069369f6414b 100644 --- a/arch/powerpc/include/asm/nohash/64/pgalloc.h +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h | |||
| @@ -172,7 +172,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, | |||
| 172 | 172 | ||
| 173 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) | 173 | static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) |
| 174 | { | 174 | { |
| 175 | pte_fragment_fre((unsigned long *)pte, 1); | 175 | pte_fragment_free((unsigned long *)pte, 1); |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) | 178 | static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index ccd2037c797f..6ee4b72cda42 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
| @@ -719,7 +719,7 @@ unsigned char ibm_architecture_vec[] = { | |||
| 719 | * must match by the macro below. Update the definition if | 719 | * must match by the macro below. Update the definition if |
| 720 | * the structure layout changes. | 720 | * the structure layout changes. |
| 721 | */ | 721 | */ |
| 722 | #define IBM_ARCH_VEC_NRCORES_OFFSET 125 | 722 | #define IBM_ARCH_VEC_NRCORES_OFFSET 133 |
| 723 | W(NR_CPUS), /* number of cores supported */ | 723 | W(NR_CPUS), /* number of cores supported */ |
| 724 | 0, | 724 | 0, |
| 725 | 0, | 725 | 0, |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 30a03c03fe73..060b140f03c6 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
| @@ -377,7 +377,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset, | |||
| 377 | 377 | ||
| 378 | #else | 378 | #else |
| 379 | BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != | 379 | BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != |
| 380 | offsetof(struct thread_fp_state, fpr[32][0])); | 380 | offsetof(struct thread_fp_state, fpr[32])); |
| 381 | 381 | ||
| 382 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 382 | return user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
| 383 | &target->thread.fp_state, 0, -1); | 383 | &target->thread.fp_state, 0, -1); |
| @@ -405,7 +405,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, | |||
| 405 | return 0; | 405 | return 0; |
| 406 | #else | 406 | #else |
| 407 | BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != | 407 | BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) != |
| 408 | offsetof(struct thread_fp_state, fpr[32][0])); | 408 | offsetof(struct thread_fp_state, fpr[32])); |
| 409 | 409 | ||
| 410 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, | 410 | return user_regset_copyin(&pos, &count, &kbuf, &ubuf, |
| 411 | &target->thread.fp_state, 0, -1); | 411 | &target->thread.fp_state, 0, -1); |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index d873f6507f72..40e05e7f43de 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
| @@ -550,7 +550,11 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot, | |||
| 550 | } | 550 | } |
| 551 | } | 551 | } |
| 552 | /* This works for all page sizes, and for 256M and 1T segments */ | 552 | /* This works for all page sizes, and for 256M and 1T segments */ |
| 553 | *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; | 553 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
| 554 | *ssize = hpte_r >> HPTE_R_3_0_SSIZE_SHIFT; | ||
| 555 | else | ||
| 556 | *ssize = hpte_v >> HPTE_V_SSIZE_SHIFT; | ||
| 557 | |||
| 554 | shift = mmu_psize_defs[size].shift; | 558 | shift = mmu_psize_defs[size].shift; |
| 555 | 559 | ||
| 556 | avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); | 560 | avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm); |
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index 0fdaf93a3e09..54efba2fd66e 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c | |||
| @@ -117,7 +117,7 @@ static inline void _tlbie_va(unsigned long va, unsigned long pid, | |||
| 117 | */ | 117 | */ |
| 118 | void radix__local_flush_tlb_mm(struct mm_struct *mm) | 118 | void radix__local_flush_tlb_mm(struct mm_struct *mm) |
| 119 | { | 119 | { |
| 120 | unsigned int pid; | 120 | unsigned long pid; |
| 121 | 121 | ||
| 122 | preempt_disable(); | 122 | preempt_disable(); |
| 123 | pid = mm->context.id; | 123 | pid = mm->context.id; |
| @@ -130,7 +130,7 @@ EXPORT_SYMBOL(radix__local_flush_tlb_mm); | |||
| 130 | void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, | 130 | void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 131 | unsigned long ap, int nid) | 131 | unsigned long ap, int nid) |
| 132 | { | 132 | { |
| 133 | unsigned int pid; | 133 | unsigned long pid; |
| 134 | 134 | ||
| 135 | preempt_disable(); | 135 | preempt_disable(); |
| 136 | pid = mm ? mm->context.id : 0; | 136 | pid = mm ? mm->context.id : 0; |
| @@ -160,7 +160,7 @@ static int mm_is_core_local(struct mm_struct *mm) | |||
| 160 | 160 | ||
| 161 | void radix__flush_tlb_mm(struct mm_struct *mm) | 161 | void radix__flush_tlb_mm(struct mm_struct *mm) |
| 162 | { | 162 | { |
| 163 | unsigned int pid; | 163 | unsigned long pid; |
| 164 | 164 | ||
| 165 | preempt_disable(); | 165 | preempt_disable(); |
| 166 | pid = mm->context.id; | 166 | pid = mm->context.id; |
| @@ -185,7 +185,7 @@ EXPORT_SYMBOL(radix__flush_tlb_mm); | |||
| 185 | void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, | 185 | void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, |
| 186 | unsigned long ap, int nid) | 186 | unsigned long ap, int nid) |
| 187 | { | 187 | { |
| 188 | unsigned int pid; | 188 | unsigned long pid; |
| 189 | 189 | ||
| 190 | preempt_disable(); | 190 | preempt_disable(); |
| 191 | pid = mm ? mm->context.id : 0; | 191 | pid = mm ? mm->context.id : 0; |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index b7dfc1359d01..3e8865b187de 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
| @@ -927,7 +927,7 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, | |||
| 927 | dn = pci_device_to_OF_node(dev); | 927 | dn = pci_device_to_OF_node(dev); |
| 928 | pdn = PCI_DN(dn); | 928 | pdn = PCI_DN(dn); |
| 929 | buid = pdn->phb->buid; | 929 | buid = pdn->phb->buid; |
| 930 | cfg_addr = (pdn->busno << 8) | pdn->devfn; | 930 | cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8)); |
| 931 | 931 | ||
| 932 | ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, | 932 | ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, |
| 933 | cfg_addr, BUID_HI(buid), BUID_LO(buid)); | 933 | cfg_addr, BUID_HI(buid), BUID_LO(buid)); |
| @@ -956,7 +956,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, | |||
| 956 | dn = pci_device_to_OF_node(dev); | 956 | dn = pci_device_to_OF_node(dev); |
| 957 | pdn = PCI_DN(dn); | 957 | pdn = PCI_DN(dn); |
| 958 | buid = pdn->phb->buid; | 958 | buid = pdn->phb->buid; |
| 959 | cfg_addr = (pdn->busno << 8) | pdn->devfn; | 959 | cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8)); |
| 960 | 960 | ||
| 961 | do { | 961 | do { |
| 962 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ | 962 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ |
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 7336e55c248c..824e54086e07 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c | |||
| @@ -2948,27 +2948,10 @@ static struct intel_uncore_type bdx_uncore_cbox = { | |||
| 2948 | .format_group = &hswep_uncore_cbox_format_group, | 2948 | .format_group = &hswep_uncore_cbox_format_group, |
| 2949 | }; | 2949 | }; |
| 2950 | 2950 | ||
| 2951 | static struct intel_uncore_type bdx_uncore_sbox = { | ||
| 2952 | .name = "sbox", | ||
| 2953 | .num_counters = 4, | ||
| 2954 | .num_boxes = 4, | ||
| 2955 | .perf_ctr_bits = 48, | ||
| 2956 | .event_ctl = HSWEP_S0_MSR_PMON_CTL0, | ||
| 2957 | .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, | ||
| 2958 | .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, | ||
| 2959 | .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, | ||
| 2960 | .msr_offset = HSWEP_SBOX_MSR_OFFSET, | ||
| 2961 | .ops = &hswep_uncore_sbox_msr_ops, | ||
| 2962 | .format_group = &hswep_uncore_sbox_format_group, | ||
| 2963 | }; | ||
| 2964 | |||
| 2965 | #define BDX_MSR_UNCORE_SBOX 3 | ||
| 2966 | |||
| 2967 | static struct intel_uncore_type *bdx_msr_uncores[] = { | 2951 | static struct intel_uncore_type *bdx_msr_uncores[] = { |
| 2968 | &bdx_uncore_ubox, | 2952 | &bdx_uncore_ubox, |
| 2969 | &bdx_uncore_cbox, | 2953 | &bdx_uncore_cbox, |
| 2970 | &hswep_uncore_pcu, | 2954 | &hswep_uncore_pcu, |
| 2971 | &bdx_uncore_sbox, | ||
| 2972 | NULL, | 2955 | NULL, |
| 2973 | }; | 2956 | }; |
| 2974 | 2957 | ||
| @@ -2977,10 +2960,6 @@ void bdx_uncore_cpu_init(void) | |||
| 2977 | if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) | 2960 | if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) |
| 2978 | bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; | 2961 | bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; |
| 2979 | uncore_msr_uncores = bdx_msr_uncores; | 2962 | uncore_msr_uncores = bdx_msr_uncores; |
| 2980 | |||
| 2981 | /* BDX-DE doesn't have SBOX */ | ||
| 2982 | if (boot_cpu_data.x86_model == 86) | ||
| 2983 | uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; | ||
| 2984 | } | 2963 | } |
| 2985 | 2964 | ||
| 2986 | static struct intel_uncore_type bdx_uncore_ha = { | 2965 | static struct intel_uncore_type bdx_uncore_ha = { |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 84e33ff5a6d5..446702ed99dc 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
| @@ -2588,8 +2588,8 @@ static struct resource * __init ioapic_setup_resources(void) | |||
| 2588 | res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 2588 | res[num].flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
| 2589 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); | 2589 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); |
| 2590 | mem += IOAPIC_RESOURCE_NAME_SIZE; | 2590 | mem += IOAPIC_RESOURCE_NAME_SIZE; |
| 2591 | ioapics[i].iomem_res = &res[num]; | ||
| 2591 | num++; | 2592 | num++; |
| 2592 | ioapics[i].iomem_res = res; | ||
| 2593 | } | 2593 | } |
| 2594 | 2594 | ||
| 2595 | ioapic_resources = res; | 2595 | ioapic_resources = res; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c343a54bed39..f5c69d8974e1 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
| @@ -674,14 +674,14 @@ static void init_amd_bd(struct cpuinfo_x86 *c) | |||
| 674 | u64 value; | 674 | u64 value; |
| 675 | 675 | ||
| 676 | /* re-enable TopologyExtensions if switched off by BIOS */ | 676 | /* re-enable TopologyExtensions if switched off by BIOS */ |
| 677 | if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && | 677 | if ((c->x86_model >= 0x10) && (c->x86_model <= 0x6f) && |
| 678 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { | 678 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { |
| 679 | 679 | ||
| 680 | if (msr_set_bit(0xc0011005, 54) > 0) { | 680 | if (msr_set_bit(0xc0011005, 54) > 0) { |
| 681 | rdmsrl(0xc0011005, value); | 681 | rdmsrl(0xc0011005, value); |
| 682 | if (value & BIT_64(54)) { | 682 | if (value & BIT_64(54)) { |
| 683 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); | 683 | set_cpu_cap(c, X86_FEATURE_TOPOEXT); |
| 684 | pr_info(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); | 684 | pr_info_once(FW_INFO "CPU: Re-enabling disabled Topology Extensions Support.\n"); |
| 685 | } | 685 | } |
| 686 | } | 686 | } |
| 687 | } | 687 | } |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index d1590486204a..00f03d82e69a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
| @@ -96,6 +96,12 @@ static inline void cond_local_irq_disable(struct pt_regs *regs) | |||
| 96 | local_irq_disable(); | 96 | local_irq_disable(); |
| 97 | } | 97 | } |
| 98 | 98 | ||
| 99 | /* | ||
| 100 | * In IST context, we explicitly disable preemption. This serves two | ||
| 101 | * purposes: it makes it much less likely that we would accidentally | ||
| 102 | * schedule in IST context and it will force a warning if we somehow | ||
| 103 | * manage to schedule by accident. | ||
| 104 | */ | ||
| 99 | void ist_enter(struct pt_regs *regs) | 105 | void ist_enter(struct pt_regs *regs) |
| 100 | { | 106 | { |
| 101 | if (user_mode(regs)) { | 107 | if (user_mode(regs)) { |
| @@ -110,13 +116,7 @@ void ist_enter(struct pt_regs *regs) | |||
| 110 | rcu_nmi_enter(); | 116 | rcu_nmi_enter(); |
| 111 | } | 117 | } |
| 112 | 118 | ||
| 113 | /* | 119 | preempt_disable(); |
| 114 | * We are atomic because we're on the IST stack; or we're on | ||
| 115 | * x86_32, in which case we still shouldn't schedule; or we're | ||
| 116 | * on x86_64 and entered from user mode, in which case we're | ||
| 117 | * still atomic unless ist_begin_non_atomic is called. | ||
| 118 | */ | ||
| 119 | preempt_count_add(HARDIRQ_OFFSET); | ||
| 120 | 120 | ||
| 121 | /* This code is a bit fragile. Test it. */ | 121 | /* This code is a bit fragile. Test it. */ |
| 122 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); | 122 | RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work"); |
| @@ -124,7 +124,7 @@ void ist_enter(struct pt_regs *regs) | |||
| 124 | 124 | ||
| 125 | void ist_exit(struct pt_regs *regs) | 125 | void ist_exit(struct pt_regs *regs) |
| 126 | { | 126 | { |
| 127 | preempt_count_sub(HARDIRQ_OFFSET); | 127 | preempt_enable_no_resched(); |
| 128 | 128 | ||
| 129 | if (!user_mode(regs)) | 129 | if (!user_mode(regs)) |
| 130 | rcu_nmi_exit(); | 130 | rcu_nmi_exit(); |
| @@ -155,7 +155,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) | |||
| 155 | BUG_ON((unsigned long)(current_top_of_stack() - | 155 | BUG_ON((unsigned long)(current_top_of_stack() - |
| 156 | current_stack_pointer()) >= THREAD_SIZE); | 156 | current_stack_pointer()) >= THREAD_SIZE); |
| 157 | 157 | ||
| 158 | preempt_count_sub(HARDIRQ_OFFSET); | 158 | preempt_enable_no_resched(); |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | /** | 161 | /** |
| @@ -165,7 +165,7 @@ void ist_begin_non_atomic(struct pt_regs *regs) | |||
| 165 | */ | 165 | */ |
| 166 | void ist_end_non_atomic(void) | 166 | void ist_end_non_atomic(void) |
| 167 | { | 167 | { |
| 168 | preempt_count_add(HARDIRQ_OFFSET); | 168 | preempt_disable(); |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | static nokprobe_inline int | 171 | static nokprobe_inline int |
diff --git a/block/blk-lib.c b/block/blk-lib.c index 23d7f301a196..9e29dc351695 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
| @@ -113,6 +113,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
| 113 | ret = submit_bio_wait(type, bio); | 113 | ret = submit_bio_wait(type, bio); |
| 114 | if (ret == -EOPNOTSUPP) | 114 | if (ret == -EOPNOTSUPP) |
| 115 | ret = 0; | 115 | ret = 0; |
| 116 | bio_put(bio); | ||
| 116 | } | 117 | } |
| 117 | blk_finish_plug(&plug); | 118 | blk_finish_plug(&plug); |
| 118 | 119 | ||
| @@ -165,8 +166,10 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |||
| 165 | } | 166 | } |
| 166 | } | 167 | } |
| 167 | 168 | ||
| 168 | if (bio) | 169 | if (bio) { |
| 169 | ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); | 170 | ret = submit_bio_wait(REQ_WRITE | REQ_WRITE_SAME, bio); |
| 171 | bio_put(bio); | ||
| 172 | } | ||
| 170 | return ret != -EOPNOTSUPP ? ret : 0; | 173 | return ret != -EOPNOTSUPP ? ret : 0; |
| 171 | } | 174 | } |
| 172 | EXPORT_SYMBOL(blkdev_issue_write_same); | 175 | EXPORT_SYMBOL(blkdev_issue_write_same); |
| @@ -206,8 +209,11 @@ static int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |||
| 206 | } | 209 | } |
| 207 | } | 210 | } |
| 208 | 211 | ||
| 209 | if (bio) | 212 | if (bio) { |
| 210 | return submit_bio_wait(WRITE, bio); | 213 | ret = submit_bio_wait(WRITE, bio); |
| 214 | bio_put(bio); | ||
| 215 | return ret; | ||
| 216 | } | ||
| 211 | return 0; | 217 | return 0; |
| 212 | } | 218 | } |
| 213 | 219 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 29cbc1b5fbdb..f9b9049b1284 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -1262,12 +1262,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1262 | 1262 | ||
| 1263 | blk_queue_split(q, &bio, q->bio_split); | 1263 | blk_queue_split(q, &bio, q->bio_split); |
| 1264 | 1264 | ||
| 1265 | if (!is_flush_fua && !blk_queue_nomerges(q)) { | 1265 | if (!is_flush_fua && !blk_queue_nomerges(q) && |
| 1266 | if (blk_attempt_plug_merge(q, bio, &request_count, | 1266 | blk_attempt_plug_merge(q, bio, &request_count, &same_queue_rq)) |
| 1267 | &same_queue_rq)) | 1267 | return BLK_QC_T_NONE; |
| 1268 | return BLK_QC_T_NONE; | ||
| 1269 | } else | ||
| 1270 | request_count = blk_plug_queued_count(q); | ||
| 1271 | 1268 | ||
| 1272 | rq = blk_mq_map_request(q, bio, &data); | 1269 | rq = blk_mq_map_request(q, bio, &data); |
| 1273 | if (unlikely(!rq)) | 1270 | if (unlikely(!rq)) |
| @@ -1358,9 +1355,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) | |||
| 1358 | 1355 | ||
| 1359 | blk_queue_split(q, &bio, q->bio_split); | 1356 | blk_queue_split(q, &bio, q->bio_split); |
| 1360 | 1357 | ||
| 1361 | if (!is_flush_fua && !blk_queue_nomerges(q) && | 1358 | if (!is_flush_fua && !blk_queue_nomerges(q)) { |
| 1362 | blk_attempt_plug_merge(q, bio, &request_count, NULL)) | 1359 | if (blk_attempt_plug_merge(q, bio, &request_count, NULL)) |
| 1363 | return BLK_QC_T_NONE; | 1360 | return BLK_QC_T_NONE; |
| 1361 | } else | ||
| 1362 | request_count = blk_plug_queued_count(q); | ||
| 1364 | 1363 | ||
| 1365 | rq = blk_mq_map_request(q, bio, &data); | 1364 | rq = blk_mq_map_request(q, bio, &data); |
| 1366 | if (unlikely(!rq)) | 1365 | if (unlikely(!rq)) |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 31e8da648fff..262ca31b86d9 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -1051,7 +1051,7 @@ static int __init acpi_bus_init(void) | |||
| 1051 | * Maybe EC region is required at bus_scan/acpi_get_devices. So it | 1051 | * Maybe EC region is required at bus_scan/acpi_get_devices. So it |
| 1052 | * is necessary to enable it as early as possible. | 1052 | * is necessary to enable it as early as possible. |
| 1053 | */ | 1053 | */ |
| 1054 | acpi_boot_ec_enable(); | 1054 | acpi_ec_dsdt_probe(); |
| 1055 | 1055 | ||
| 1056 | printk(KERN_INFO PREFIX "Interpreter enabled\n"); | 1056 | printk(KERN_INFO PREFIX "Interpreter enabled\n"); |
| 1057 | 1057 | ||
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 0e70181f150c..73c76d646064 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
| @@ -1446,10 +1446,30 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context) | |||
| 1446 | return AE_OK; | 1446 | return AE_OK; |
| 1447 | } | 1447 | } |
| 1448 | 1448 | ||
| 1449 | int __init acpi_boot_ec_enable(void) | 1449 | static const struct acpi_device_id ec_device_ids[] = { |
| 1450 | {"PNP0C09", 0}, | ||
| 1451 | {"", 0}, | ||
| 1452 | }; | ||
| 1453 | |||
| 1454 | int __init acpi_ec_dsdt_probe(void) | ||
| 1450 | { | 1455 | { |
| 1451 | if (!boot_ec) | 1456 | acpi_status status; |
| 1457 | |||
| 1458 | if (boot_ec) | ||
| 1452 | return 0; | 1459 | return 0; |
| 1460 | |||
| 1461 | /* | ||
| 1462 | * Finding EC from DSDT if there is no ECDT EC available. When this | ||
| 1463 | * function is invoked, ACPI tables have been fully loaded, we can | ||
| 1464 | * walk namespace now. | ||
| 1465 | */ | ||
| 1466 | boot_ec = make_acpi_ec(); | ||
| 1467 | if (!boot_ec) | ||
| 1468 | return -ENOMEM; | ||
| 1469 | status = acpi_get_devices(ec_device_ids[0].id, | ||
| 1470 | ec_parse_device, boot_ec, NULL); | ||
| 1471 | if (ACPI_FAILURE(status) || !boot_ec->handle) | ||
| 1472 | return -ENODEV; | ||
| 1453 | if (!ec_install_handlers(boot_ec)) { | 1473 | if (!ec_install_handlers(boot_ec)) { |
| 1454 | first_ec = boot_ec; | 1474 | first_ec = boot_ec; |
| 1455 | return 0; | 1475 | return 0; |
| @@ -1457,11 +1477,6 @@ int __init acpi_boot_ec_enable(void) | |||
| 1457 | return -EFAULT; | 1477 | return -EFAULT; |
| 1458 | } | 1478 | } |
| 1459 | 1479 | ||
| 1460 | static const struct acpi_device_id ec_device_ids[] = { | ||
| 1461 | {"PNP0C09", 0}, | ||
| 1462 | {"", 0}, | ||
| 1463 | }; | ||
| 1464 | |||
| 1465 | #if 0 | 1480 | #if 0 |
| 1466 | /* | 1481 | /* |
| 1467 | * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not | 1482 | * Some EC firmware variations refuses to respond QR_EC when SCI_EVT is not |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 9bb0773d39bf..27cc7feabfe4 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
| @@ -181,7 +181,7 @@ typedef int (*acpi_ec_query_func) (void *data); | |||
| 181 | 181 | ||
| 182 | int acpi_ec_init(void); | 182 | int acpi_ec_init(void); |
| 183 | int acpi_ec_ecdt_probe(void); | 183 | int acpi_ec_ecdt_probe(void); |
| 184 | int acpi_boot_ec_enable(void); | 184 | int acpi_ec_dsdt_probe(void); |
| 185 | void acpi_ec_block_transactions(void); | 185 | void acpi_ec_block_transactions(void); |
| 186 | void acpi_ec_unblock_transactions(void); | 186 | void acpi_ec_unblock_transactions(void); |
| 187 | void acpi_ec_unblock_transactions_early(void); | 187 | void acpi_ec_unblock_transactions_early(void); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 31e73a7a40f2..6a48ed41963f 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -941,7 +941,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd) | |||
| 941 | debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); | 941 | debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); |
| 942 | debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); | 942 | debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); |
| 943 | debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); | 943 | debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); |
| 944 | debugfs_create_file("flags", 0444, dir, &nbd, &nbd_dbg_flags_ops); | 944 | debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); |
| 945 | 945 | ||
| 946 | return 0; | 946 | return 0; |
| 947 | } | 947 | } |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index ca13df854639..2e6d1e9c3345 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -874,8 +874,12 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 874 | const struct blk_mq_queue_data *qd) | 874 | const struct blk_mq_queue_data *qd) |
| 875 | { | 875 | { |
| 876 | unsigned long flags; | 876 | unsigned long flags; |
| 877 | struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data; | 877 | int qid = hctx->queue_num; |
| 878 | struct blkfront_info *info = hctx->queue->queuedata; | ||
| 879 | struct blkfront_ring_info *rinfo = NULL; | ||
| 878 | 880 | ||
| 881 | BUG_ON(info->nr_rings <= qid); | ||
| 882 | rinfo = &info->rinfo[qid]; | ||
| 879 | blk_mq_start_request(qd->rq); | 883 | blk_mq_start_request(qd->rq); |
| 880 | spin_lock_irqsave(&rinfo->ring_lock, flags); | 884 | spin_lock_irqsave(&rinfo->ring_lock, flags); |
| 881 | if (RING_FULL(&rinfo->ring)) | 885 | if (RING_FULL(&rinfo->ring)) |
| @@ -901,20 +905,9 @@ out_busy: | |||
| 901 | return BLK_MQ_RQ_QUEUE_BUSY; | 905 | return BLK_MQ_RQ_QUEUE_BUSY; |
| 902 | } | 906 | } |
| 903 | 907 | ||
| 904 | static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, | ||
| 905 | unsigned int index) | ||
| 906 | { | ||
| 907 | struct blkfront_info *info = (struct blkfront_info *)data; | ||
| 908 | |||
| 909 | BUG_ON(info->nr_rings <= index); | ||
| 910 | hctx->driver_data = &info->rinfo[index]; | ||
| 911 | return 0; | ||
| 912 | } | ||
| 913 | |||
| 914 | static struct blk_mq_ops blkfront_mq_ops = { | 908 | static struct blk_mq_ops blkfront_mq_ops = { |
| 915 | .queue_rq = blkif_queue_rq, | 909 | .queue_rq = blkif_queue_rq, |
| 916 | .map_queue = blk_mq_map_queue, | 910 | .map_queue = blk_mq_map_queue, |
| 917 | .init_hctx = blk_mq_init_hctx, | ||
| 918 | }; | 911 | }; |
| 919 | 912 | ||
| 920 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | 913 | static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, |
| @@ -950,6 +943,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size, | |||
| 950 | return PTR_ERR(rq); | 943 | return PTR_ERR(rq); |
| 951 | } | 944 | } |
| 952 | 945 | ||
| 946 | rq->queuedata = info; | ||
| 953 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); | 947 | queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); |
| 954 | 948 | ||
| 955 | if (info->feature_discard) { | 949 | if (info->feature_discard) { |
| @@ -2149,6 +2143,8 @@ static int blkfront_resume(struct xenbus_device *dev) | |||
| 2149 | return err; | 2143 | return err; |
| 2150 | 2144 | ||
| 2151 | err = talk_to_blkback(dev, info); | 2145 | err = talk_to_blkback(dev, info); |
| 2146 | if (!err) | ||
| 2147 | blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings); | ||
| 2152 | 2148 | ||
| 2153 | /* | 2149 | /* |
| 2154 | * We have to wait for the backend to switch to | 2150 | * We have to wait for the backend to switch to |
| @@ -2485,10 +2481,23 @@ static void blkback_changed(struct xenbus_device *dev, | |||
| 2485 | break; | 2481 | break; |
| 2486 | 2482 | ||
| 2487 | case XenbusStateConnected: | 2483 | case XenbusStateConnected: |
| 2488 | if (dev->state != XenbusStateInitialised) { | 2484 | /* |
| 2485 | * talk_to_blkback sets state to XenbusStateInitialised | ||
| 2486 | * and blkfront_connect sets it to XenbusStateConnected | ||
| 2487 | * (if connection went OK). | ||
| 2488 | * | ||
| 2489 | * If the backend (or toolstack) decides to poke at backend | ||
| 2490 | * state (and re-trigger the watch by setting the state repeatedly | ||
| 2491 | * to XenbusStateConnected (4)) we need to deal with this. | ||
| 2492 | * This is allowed as this is used to communicate to the guest | ||
| 2493 | * that the size of disk has changed! | ||
| 2494 | */ | ||
| 2495 | if ((dev->state != XenbusStateInitialised) && | ||
| 2496 | (dev->state != XenbusStateConnected)) { | ||
| 2489 | if (talk_to_blkback(dev, info)) | 2497 | if (talk_to_blkback(dev, info)) |
| 2490 | break; | 2498 | break; |
| 2491 | } | 2499 | } |
| 2500 | |||
| 2492 | blkfront_connect(info); | 2501 | blkfront_connect(info); |
| 2493 | break; | 2502 | break; |
| 2494 | 2503 | ||
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 0d159b513469..ee367e9b7d2e 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -1460,6 +1460,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1460 | 1460 | ||
| 1461 | intel_pstate_clear_update_util_hook(policy->cpu); | 1461 | intel_pstate_clear_update_util_hook(policy->cpu); |
| 1462 | 1462 | ||
| 1463 | pr_debug("set_policy cpuinfo.max %u policy->max %u\n", | ||
| 1464 | policy->cpuinfo.max_freq, policy->max); | ||
| 1465 | |||
| 1463 | cpu = all_cpu_data[0]; | 1466 | cpu = all_cpu_data[0]; |
| 1464 | if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && | 1467 | if (cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate && |
| 1465 | policy->max < policy->cpuinfo.max_freq && | 1468 | policy->max < policy->cpuinfo.max_freq && |
| @@ -1495,13 +1498,13 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 1495 | limits->max_sysfs_pct); | 1498 | limits->max_sysfs_pct); |
| 1496 | limits->max_perf_pct = max(limits->min_policy_pct, | 1499 | limits->max_perf_pct = max(limits->min_policy_pct, |
| 1497 | limits->max_perf_pct); | 1500 | limits->max_perf_pct); |
| 1498 | limits->max_perf = round_up(limits->max_perf, FRAC_BITS); | ||
| 1499 | 1501 | ||
| 1500 | /* Make sure min_perf_pct <= max_perf_pct */ | 1502 | /* Make sure min_perf_pct <= max_perf_pct */ |
| 1501 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); | 1503 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); |
| 1502 | 1504 | ||
| 1503 | limits->min_perf = div_fp(limits->min_perf_pct, 100); | 1505 | limits->min_perf = div_fp(limits->min_perf_pct, 100); |
| 1504 | limits->max_perf = div_fp(limits->max_perf_pct, 100); | 1506 | limits->max_perf = div_fp(limits->max_perf_pct, 100); |
| 1507 | limits->max_perf = round_up(limits->max_perf, FRAC_BITS); | ||
| 1505 | 1508 | ||
| 1506 | out: | 1509 | out: |
| 1507 | intel_pstate_set_update_util_hook(policy->cpu); | 1510 | intel_pstate_set_update_util_hook(policy->cpu); |
| @@ -1558,8 +1561,11 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 1558 | 1561 | ||
| 1559 | /* cpuinfo and default policy values */ | 1562 | /* cpuinfo and default policy values */ |
| 1560 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; | 1563 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
| 1561 | policy->cpuinfo.max_freq = | 1564 | update_turbo_state(); |
| 1562 | cpu->pstate.turbo_pstate * cpu->pstate.scaling; | 1565 | policy->cpuinfo.max_freq = limits->turbo_disabled ? |
| 1566 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; | ||
| 1567 | policy->cpuinfo.max_freq *= cpu->pstate.scaling; | ||
| 1568 | |||
| 1563 | intel_pstate_init_acpi_perf_limits(policy); | 1569 | intel_pstate_init_acpi_perf_limits(policy); |
| 1564 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 1570 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
| 1565 | cpumask_set_cpu(policy->cpu, policy->cpus); | 1571 | cpumask_set_cpu(policy->cpu, policy->cpus); |
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c index a850cbc48d8d..c49d50e68aee 100644 --- a/drivers/firmware/efi/arm-init.c +++ b/drivers/firmware/efi/arm-init.c | |||
| @@ -174,6 +174,7 @@ static __init void reserve_regions(void) | |||
| 174 | { | 174 | { |
| 175 | efi_memory_desc_t *md; | 175 | efi_memory_desc_t *md; |
| 176 | u64 paddr, npages, size; | 176 | u64 paddr, npages, size; |
| 177 | int resv; | ||
| 177 | 178 | ||
| 178 | if (efi_enabled(EFI_DBG)) | 179 | if (efi_enabled(EFI_DBG)) |
| 179 | pr_info("Processing EFI memory map:\n"); | 180 | pr_info("Processing EFI memory map:\n"); |
| @@ -190,12 +191,14 @@ static __init void reserve_regions(void) | |||
| 190 | paddr = md->phys_addr; | 191 | paddr = md->phys_addr; |
| 191 | npages = md->num_pages; | 192 | npages = md->num_pages; |
| 192 | 193 | ||
| 194 | resv = is_reserve_region(md); | ||
| 193 | if (efi_enabled(EFI_DBG)) { | 195 | if (efi_enabled(EFI_DBG)) { |
| 194 | char buf[64]; | 196 | char buf[64]; |
| 195 | 197 | ||
| 196 | pr_info(" 0x%012llx-0x%012llx %s", | 198 | pr_info(" 0x%012llx-0x%012llx %s%s\n", |
| 197 | paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, | 199 | paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, |
| 198 | efi_md_typeattr_format(buf, sizeof(buf), md)); | 200 | efi_md_typeattr_format(buf, sizeof(buf), md), |
| 201 | resv ? "*" : ""); | ||
| 199 | } | 202 | } |
| 200 | 203 | ||
| 201 | memrange_efi_to_native(&paddr, &npages); | 204 | memrange_efi_to_native(&paddr, &npages); |
| @@ -204,14 +207,9 @@ static __init void reserve_regions(void) | |||
| 204 | if (is_normal_ram(md)) | 207 | if (is_normal_ram(md)) |
| 205 | early_init_dt_add_memory_arch(paddr, size); | 208 | early_init_dt_add_memory_arch(paddr, size); |
| 206 | 209 | ||
| 207 | if (is_reserve_region(md)) { | 210 | if (resv) |
| 208 | memblock_mark_nomap(paddr, size); | 211 | memblock_mark_nomap(paddr, size); |
| 209 | if (efi_enabled(EFI_DBG)) | ||
| 210 | pr_cont("*"); | ||
| 211 | } | ||
| 212 | 212 | ||
| 213 | if (efi_enabled(EFI_DBG)) | ||
| 214 | pr_cont("\n"); | ||
| 215 | } | 213 | } |
| 216 | 214 | ||
| 217 | set_bit(EFI_MEMMAP, &efi.flags); | 215 | set_bit(EFI_MEMMAP, &efi.flags); |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 48da857f4774..a116609b1914 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
| @@ -33,6 +33,7 @@ config ARCH_REQUIRE_GPIOLIB | |||
| 33 | 33 | ||
| 34 | menuconfig GPIOLIB | 34 | menuconfig GPIOLIB |
| 35 | bool "GPIO Support" | 35 | bool "GPIO Support" |
| 36 | select ANON_INODES | ||
| 36 | help | 37 | help |
| 37 | This enables GPIO support through the generic GPIO library. | 38 | This enables GPIO support through the generic GPIO library. |
| 38 | You only need to enable this, if you also want to enable | 39 | You only need to enable this, if you also want to enable |
diff --git a/drivers/gpio/gpio-104-dio-48e.c b/drivers/gpio/gpio-104-dio-48e.c index 1a647c07be67..fcf776971ca9 100644 --- a/drivers/gpio/gpio-104-dio-48e.c +++ b/drivers/gpio/gpio-104-dio-48e.c | |||
| @@ -75,7 +75,7 @@ static int dio48e_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
| 75 | { | 75 | { |
| 76 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); | 76 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); |
| 77 | const unsigned io_port = offset / 8; | 77 | const unsigned io_port = offset / 8; |
| 78 | const unsigned control_port = io_port / 2; | 78 | const unsigned int control_port = io_port / 3; |
| 79 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; | 79 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; |
| 80 | unsigned long flags; | 80 | unsigned long flags; |
| 81 | unsigned control; | 81 | unsigned control; |
| @@ -115,7 +115,7 @@ static int dio48e_gpio_direction_output(struct gpio_chip *chip, unsigned offset, | |||
| 115 | { | 115 | { |
| 116 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); | 116 | struct dio48e_gpio *const dio48egpio = gpiochip_get_data(chip); |
| 117 | const unsigned io_port = offset / 8; | 117 | const unsigned io_port = offset / 8; |
| 118 | const unsigned control_port = io_port / 2; | 118 | const unsigned int control_port = io_port / 3; |
| 119 | const unsigned mask = BIT(offset % 8); | 119 | const unsigned mask = BIT(offset % 8); |
| 120 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; | 120 | const unsigned control_addr = dio48egpio->base + 3 + control_port*4; |
| 121 | const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; | 121 | const unsigned out_port = (io_port > 2) ? io_port + 1 : io_port; |
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c index 9aabc48ff5de..953e4b829e32 100644 --- a/drivers/gpio/gpio-bcm-kona.c +++ b/drivers/gpio/gpio-bcm-kona.c | |||
| @@ -547,11 +547,11 @@ static void bcm_kona_gpio_reset(struct bcm_kona_gpio *kona_gpio) | |||
| 547 | /* disable interrupts and clear status */ | 547 | /* disable interrupts and clear status */ |
| 548 | for (i = 0; i < kona_gpio->num_bank; i++) { | 548 | for (i = 0; i < kona_gpio->num_bank; i++) { |
| 549 | /* Unlock the entire bank first */ | 549 | /* Unlock the entire bank first */ |
| 550 | bcm_kona_gpio_write_lock_regs(kona_gpio, i, UNLOCK_CODE); | 550 | bcm_kona_gpio_write_lock_regs(reg_base, i, UNLOCK_CODE); |
| 551 | writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); | 551 | writel(0xffffffff, reg_base + GPIO_INT_MASK(i)); |
| 552 | writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); | 552 | writel(0xffffffff, reg_base + GPIO_INT_STATUS(i)); |
| 553 | /* Now re-lock the bank */ | 553 | /* Now re-lock the bank */ |
| 554 | bcm_kona_gpio_write_lock_regs(kona_gpio, i, LOCK_CODE); | 554 | bcm_kona_gpio_write_lock_regs(reg_base, i, LOCK_CODE); |
| 555 | } | 555 | } |
| 556 | } | 556 | } |
| 557 | 557 | ||
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c index 75c6355b018d..e72794e463aa 100644 --- a/drivers/gpio/gpio-zynq.c +++ b/drivers/gpio/gpio-zynq.c | |||
| @@ -709,7 +709,13 @@ static int zynq_gpio_probe(struct platform_device *pdev) | |||
| 709 | dev_err(&pdev->dev, "input clock not found.\n"); | 709 | dev_err(&pdev->dev, "input clock not found.\n"); |
| 710 | return PTR_ERR(gpio->clk); | 710 | return PTR_ERR(gpio->clk); |
| 711 | } | 711 | } |
| 712 | ret = clk_prepare_enable(gpio->clk); | ||
| 713 | if (ret) { | ||
| 714 | dev_err(&pdev->dev, "Unable to enable clock.\n"); | ||
| 715 | return ret; | ||
| 716 | } | ||
| 712 | 717 | ||
| 718 | pm_runtime_set_active(&pdev->dev); | ||
| 713 | pm_runtime_enable(&pdev->dev); | 719 | pm_runtime_enable(&pdev->dev); |
| 714 | ret = pm_runtime_get_sync(&pdev->dev); | 720 | ret = pm_runtime_get_sync(&pdev->dev); |
| 715 | if (ret < 0) | 721 | if (ret < 0) |
| @@ -747,6 +753,7 @@ err_pm_put: | |||
| 747 | pm_runtime_put(&pdev->dev); | 753 | pm_runtime_put(&pdev->dev); |
| 748 | err_pm_dis: | 754 | err_pm_dis: |
| 749 | pm_runtime_disable(&pdev->dev); | 755 | pm_runtime_disable(&pdev->dev); |
| 756 | clk_disable_unprepare(gpio->clk); | ||
| 750 | 757 | ||
| 751 | return ret; | 758 | return ret; |
| 752 | } | 759 | } |
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index d22dcc38179d..4aabddb38b59 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
| 19 | #include <linux/io-mapping.h> | ||
| 19 | #include <linux/gpio/consumer.h> | 20 | #include <linux/gpio/consumer.h> |
| 20 | #include <linux/of.h> | 21 | #include <linux/of.h> |
| 21 | #include <linux/of_address.h> | 22 | #include <linux/of_address.h> |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 24f60d28f0c0..58d822d7e8da 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -449,7 +449,6 @@ static void gpiodevice_release(struct device *dev) | |||
| 449 | { | 449 | { |
| 450 | struct gpio_device *gdev = dev_get_drvdata(dev); | 450 | struct gpio_device *gdev = dev_get_drvdata(dev); |
| 451 | 451 | ||
| 452 | cdev_del(&gdev->chrdev); | ||
| 453 | list_del(&gdev->list); | 452 | list_del(&gdev->list); |
| 454 | ida_simple_remove(&gpio_ida, gdev->id); | 453 | ida_simple_remove(&gpio_ida, gdev->id); |
| 455 | kfree(gdev->label); | 454 | kfree(gdev->label); |
| @@ -482,7 +481,6 @@ static int gpiochip_setup_dev(struct gpio_device *gdev) | |||
| 482 | 481 | ||
| 483 | /* From this point, the .release() function cleans up gpio_device */ | 482 | /* From this point, the .release() function cleans up gpio_device */ |
| 484 | gdev->dev.release = gpiodevice_release; | 483 | gdev->dev.release = gpiodevice_release; |
| 485 | get_device(&gdev->dev); | ||
| 486 | pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", | 484 | pr_debug("%s: registered GPIOs %d to %d on device: %s (%s)\n", |
| 487 | __func__, gdev->base, gdev->base + gdev->ngpio - 1, | 485 | __func__, gdev->base, gdev->base + gdev->ngpio - 1, |
| 488 | dev_name(&gdev->dev), gdev->chip->label ? : "generic"); | 486 | dev_name(&gdev->dev), gdev->chip->label ? : "generic"); |
| @@ -770,6 +768,8 @@ void gpiochip_remove(struct gpio_chip *chip) | |||
| 770 | * be removed, else it will be dangling until the last user is | 768 | * be removed, else it will be dangling until the last user is |
| 771 | * gone. | 769 | * gone. |
| 772 | */ | 770 | */ |
| 771 | cdev_del(&gdev->chrdev); | ||
| 772 | device_del(&gdev->dev); | ||
| 773 | put_device(&gdev->dev); | 773 | put_device(&gdev->dev); |
| 774 | } | 774 | } |
| 775 | EXPORT_SYMBOL_GPL(gpiochip_remove); | 775 | EXPORT_SYMBOL_GPL(gpiochip_remove); |
| @@ -869,7 +869,7 @@ struct gpio_chip *gpiochip_find(void *data, | |||
| 869 | 869 | ||
| 870 | spin_lock_irqsave(&gpio_lock, flags); | 870 | spin_lock_irqsave(&gpio_lock, flags); |
| 871 | list_for_each_entry(gdev, &gpio_devices, list) | 871 | list_for_each_entry(gdev, &gpio_devices, list) |
| 872 | if (match(gdev->chip, data)) | 872 | if (gdev->chip && match(gdev->chip, data)) |
| 873 | break; | 873 | break; |
| 874 | 874 | ||
| 875 | /* No match? */ | 875 | /* No match? */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 992f00b65be4..01c36b8d6222 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
| @@ -799,6 +799,7 @@ struct amdgpu_ring { | |||
| 799 | unsigned cond_exe_offs; | 799 | unsigned cond_exe_offs; |
| 800 | u64 cond_exe_gpu_addr; | 800 | u64 cond_exe_gpu_addr; |
| 801 | volatile u32 *cond_exe_cpu_addr; | 801 | volatile u32 *cond_exe_cpu_addr; |
| 802 | int vmid; | ||
| 802 | }; | 803 | }; |
| 803 | 804 | ||
| 804 | /* | 805 | /* |
| @@ -936,7 +937,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
| 936 | unsigned vm_id, uint64_t pd_addr, | 937 | unsigned vm_id, uint64_t pd_addr, |
| 937 | uint32_t gds_base, uint32_t gds_size, | 938 | uint32_t gds_base, uint32_t gds_size, |
| 938 | uint32_t gws_base, uint32_t gws_size, | 939 | uint32_t gws_base, uint32_t gws_size, |
| 939 | uint32_t oa_base, uint32_t oa_size); | 940 | uint32_t oa_base, uint32_t oa_size, |
| 941 | bool vmid_switch); | ||
| 940 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); | 942 | void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); |
| 941 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); | 943 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); |
| 942 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, | 944 | int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 199f76baf22c..8943099eb135 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c | |||
| @@ -696,6 +696,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) | |||
| 696 | return result; | 696 | return result; |
| 697 | } | 697 | } |
| 698 | 698 | ||
| 699 | static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) | ||
| 700 | { | ||
| 701 | CGS_FUNC_ADEV; | ||
| 702 | if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { | ||
| 703 | release_firmware(adev->pm.fw); | ||
| 704 | return 0; | ||
| 705 | } | ||
| 706 | /* cannot release other firmware because they are not created by cgs */ | ||
| 707 | return -EINVAL; | ||
| 708 | } | ||
| 709 | |||
| 699 | static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, | 710 | static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, |
| 700 | enum cgs_ucode_id type, | 711 | enum cgs_ucode_id type, |
| 701 | struct cgs_firmware_info *info) | 712 | struct cgs_firmware_info *info) |
| @@ -1125,6 +1136,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { | |||
| 1125 | amdgpu_cgs_pm_query_clock_limits, | 1136 | amdgpu_cgs_pm_query_clock_limits, |
| 1126 | amdgpu_cgs_set_camera_voltages, | 1137 | amdgpu_cgs_set_camera_voltages, |
| 1127 | amdgpu_cgs_get_firmware_info, | 1138 | amdgpu_cgs_get_firmware_info, |
| 1139 | amdgpu_cgs_rel_firmware, | ||
| 1128 | amdgpu_cgs_set_powergating_state, | 1140 | amdgpu_cgs_set_powergating_state, |
| 1129 | amdgpu_cgs_set_clockgating_state, | 1141 | amdgpu_cgs_set_clockgating_state, |
| 1130 | amdgpu_cgs_get_active_displays_info, | 1142 | amdgpu_cgs_get_active_displays_info, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bb8b149786d7..964f31404f17 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -827,8 +827,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) | |||
| 827 | */ | 827 | */ |
| 828 | static void amdgpu_atombios_fini(struct amdgpu_device *adev) | 828 | static void amdgpu_atombios_fini(struct amdgpu_device *adev) |
| 829 | { | 829 | { |
| 830 | if (adev->mode_info.atom_context) | 830 | if (adev->mode_info.atom_context) { |
| 831 | kfree(adev->mode_info.atom_context->scratch); | 831 | kfree(adev->mode_info.atom_context->scratch); |
| 832 | kfree(adev->mode_info.atom_context->iio); | ||
| 833 | } | ||
| 832 | kfree(adev->mode_info.atom_context); | 834 | kfree(adev->mode_info.atom_context); |
| 833 | adev->mode_info.atom_context = NULL; | 835 | adev->mode_info.atom_context = NULL; |
| 834 | kfree(adev->mode_info.atom_card_info); | 836 | kfree(adev->mode_info.atom_card_info); |
| @@ -1325,6 +1327,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) | |||
| 1325 | adev->ip_block_status[i].valid = false; | 1327 | adev->ip_block_status[i].valid = false; |
| 1326 | } | 1328 | } |
| 1327 | 1329 | ||
| 1330 | for (i = adev->num_ip_blocks - 1; i >= 0; i--) { | ||
| 1331 | if (adev->ip_blocks[i].funcs->late_fini) | ||
| 1332 | adev->ip_blocks[i].funcs->late_fini((void *)adev); | ||
| 1333 | } | ||
| 1334 | |||
| 1328 | return 0; | 1335 | return 0; |
| 1329 | } | 1336 | } |
| 1330 | 1337 | ||
| @@ -1513,8 +1520,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, | |||
| 1513 | amdgpu_atombios_has_gpu_virtualization_table(adev); | 1520 | amdgpu_atombios_has_gpu_virtualization_table(adev); |
| 1514 | 1521 | ||
| 1515 | /* Post card if necessary */ | 1522 | /* Post card if necessary */ |
| 1516 | if (!amdgpu_card_posted(adev) || | 1523 | if (!amdgpu_card_posted(adev)) { |
| 1517 | adev->virtualization.supports_sr_iov) { | ||
| 1518 | if (!adev->bios) { | 1524 | if (!adev->bios) { |
| 1519 | dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); | 1525 | dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); |
| 1520 | return -EINVAL; | 1526 | return -EINVAL; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 34e35423b78e..7a0b1e50f293 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | |||
| @@ -122,6 +122,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 122 | bool skip_preamble, need_ctx_switch; | 122 | bool skip_preamble, need_ctx_switch; |
| 123 | unsigned patch_offset = ~0; | 123 | unsigned patch_offset = ~0; |
| 124 | struct amdgpu_vm *vm; | 124 | struct amdgpu_vm *vm; |
| 125 | int vmid = 0, old_vmid = ring->vmid; | ||
| 125 | struct fence *hwf; | 126 | struct fence *hwf; |
| 126 | uint64_t ctx; | 127 | uint64_t ctx; |
| 127 | 128 | ||
| @@ -135,9 +136,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 135 | if (job) { | 136 | if (job) { |
| 136 | vm = job->vm; | 137 | vm = job->vm; |
| 137 | ctx = job->ctx; | 138 | ctx = job->ctx; |
| 139 | vmid = job->vm_id; | ||
| 138 | } else { | 140 | } else { |
| 139 | vm = NULL; | 141 | vm = NULL; |
| 140 | ctx = 0; | 142 | ctx = 0; |
| 143 | vmid = 0; | ||
| 141 | } | 144 | } |
| 142 | 145 | ||
| 143 | if (!ring->ready) { | 146 | if (!ring->ready) { |
| @@ -163,7 +166,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 163 | r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, | 166 | r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, |
| 164 | job->gds_base, job->gds_size, | 167 | job->gds_base, job->gds_size, |
| 165 | job->gws_base, job->gws_size, | 168 | job->gws_base, job->gws_size, |
| 166 | job->oa_base, job->oa_size); | 169 | job->oa_base, job->oa_size, |
| 170 | (ring->current_ctx == ctx) && (old_vmid != vmid)); | ||
| 167 | if (r) { | 171 | if (r) { |
| 168 | amdgpu_ring_undo(ring); | 172 | amdgpu_ring_undo(ring); |
| 169 | return r; | 173 | return r; |
| @@ -180,7 +184,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 180 | need_ctx_switch = ring->current_ctx != ctx; | 184 | need_ctx_switch = ring->current_ctx != ctx; |
| 181 | for (i = 0; i < num_ibs; ++i) { | 185 | for (i = 0; i < num_ibs; ++i) { |
| 182 | ib = &ibs[i]; | 186 | ib = &ibs[i]; |
| 183 | |||
| 184 | /* drop preamble IBs if we don't have a context switch */ | 187 | /* drop preamble IBs if we don't have a context switch */ |
| 185 | if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) | 188 | if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble) |
| 186 | continue; | 189 | continue; |
| @@ -188,6 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 188 | amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, | 191 | amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0, |
| 189 | need_ctx_switch); | 192 | need_ctx_switch); |
| 190 | need_ctx_switch = false; | 193 | need_ctx_switch = false; |
| 194 | ring->vmid = vmid; | ||
| 191 | } | 195 | } |
| 192 | 196 | ||
| 193 | if (ring->funcs->emit_hdp_invalidate) | 197 | if (ring->funcs->emit_hdp_invalidate) |
| @@ -198,6 +202,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, | |||
| 198 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); | 202 | dev_err(adev->dev, "failed to emit fence (%d)\n", r); |
| 199 | if (job && job->vm_id) | 203 | if (job && job->vm_id) |
| 200 | amdgpu_vm_reset_id(adev, job->vm_id); | 204 | amdgpu_vm_reset_id(adev, job->vm_id); |
| 205 | ring->vmid = old_vmid; | ||
| 201 | amdgpu_ring_undo(ring); | 206 | amdgpu_ring_undo(ring); |
| 202 | return r; | 207 | return r; |
| 203 | } | 208 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 6bd961fb43dc..82256558e0f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c | |||
| @@ -183,13 +183,6 @@ static int amdgpu_pp_sw_fini(void *handle) | |||
| 183 | if (ret) | 183 | if (ret) |
| 184 | return ret; | 184 | return ret; |
| 185 | 185 | ||
| 186 | #ifdef CONFIG_DRM_AMD_POWERPLAY | ||
| 187 | if (adev->pp_enabled) { | ||
| 188 | amdgpu_pm_sysfs_fini(adev); | ||
| 189 | amd_powerplay_fini(adev->powerplay.pp_handle); | ||
| 190 | } | ||
| 191 | #endif | ||
| 192 | |||
| 193 | return ret; | 186 | return ret; |
| 194 | } | 187 | } |
| 195 | 188 | ||
| @@ -223,6 +216,22 @@ static int amdgpu_pp_hw_fini(void *handle) | |||
| 223 | return ret; | 216 | return ret; |
| 224 | } | 217 | } |
| 225 | 218 | ||
| 219 | static void amdgpu_pp_late_fini(void *handle) | ||
| 220 | { | ||
| 221 | #ifdef CONFIG_DRM_AMD_POWERPLAY | ||
| 222 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 223 | |||
| 224 | if (adev->pp_enabled) { | ||
| 225 | amdgpu_pm_sysfs_fini(adev); | ||
| 226 | amd_powerplay_fini(adev->powerplay.pp_handle); | ||
| 227 | } | ||
| 228 | |||
| 229 | if (adev->powerplay.ip_funcs->late_fini) | ||
| 230 | adev->powerplay.ip_funcs->late_fini( | ||
| 231 | adev->powerplay.pp_handle); | ||
| 232 | #endif | ||
| 233 | } | ||
| 234 | |||
| 226 | static int amdgpu_pp_suspend(void *handle) | 235 | static int amdgpu_pp_suspend(void *handle) |
| 227 | { | 236 | { |
| 228 | int ret = 0; | 237 | int ret = 0; |
| @@ -311,6 +320,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = { | |||
| 311 | .sw_fini = amdgpu_pp_sw_fini, | 320 | .sw_fini = amdgpu_pp_sw_fini, |
| 312 | .hw_init = amdgpu_pp_hw_init, | 321 | .hw_init = amdgpu_pp_hw_init, |
| 313 | .hw_fini = amdgpu_pp_hw_fini, | 322 | .hw_fini = amdgpu_pp_hw_fini, |
| 323 | .late_fini = amdgpu_pp_late_fini, | ||
| 314 | .suspend = amdgpu_pp_suspend, | 324 | .suspend = amdgpu_pp_suspend, |
| 315 | .resume = amdgpu_pp_resume, | 325 | .resume = amdgpu_pp_resume, |
| 316 | .is_idle = amdgpu_pp_is_idle, | 326 | .is_idle = amdgpu_pp_is_idle, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3b02272db678..870f9494252c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | |||
| @@ -343,6 +343,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) | |||
| 343 | ring->ring = NULL; | 343 | ring->ring = NULL; |
| 344 | ring->ring_obj = NULL; | 344 | ring->ring_obj = NULL; |
| 345 | 345 | ||
| 346 | amdgpu_wb_free(ring->adev, ring->cond_exe_offs); | ||
| 346 | amdgpu_wb_free(ring->adev, ring->fence_offs); | 347 | amdgpu_wb_free(ring->adev, ring->fence_offs); |
| 347 | amdgpu_wb_free(ring->adev, ring->rptr_offs); | 348 | amdgpu_wb_free(ring->adev, ring->rptr_offs); |
| 348 | amdgpu_wb_free(ring->adev, ring->wptr_offs); | 349 | amdgpu_wb_free(ring->adev, ring->wptr_offs); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index 8bf84efafb04..48618ee324eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c | |||
| @@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, | |||
| 115 | return r; | 115 | return r; |
| 116 | } | 116 | } |
| 117 | r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); | 117 | r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); |
| 118 | memset(sa_manager->cpu_ptr, 0, sa_manager->size); | ||
| 118 | amdgpu_bo_unreserve(sa_manager->bo); | 119 | amdgpu_bo_unreserve(sa_manager->bo); |
| 119 | return r; | 120 | return r; |
| 120 | } | 121 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 01abfc21b4a2..e19520c4b4b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | |||
| @@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | |||
| 253 | { | 253 | { |
| 254 | int r; | 254 | int r; |
| 255 | 255 | ||
| 256 | if (adev->uvd.vcpu_bo == NULL) | 256 | kfree(adev->uvd.saved_bo); |
| 257 | return 0; | ||
| 258 | 257 | ||
| 259 | amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); | 258 | amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); |
| 260 | 259 | ||
| 261 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | 260 | if (adev->uvd.vcpu_bo) { |
| 262 | if (!r) { | 261 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); |
| 263 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); | 262 | if (!r) { |
| 264 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); | 263 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); |
| 265 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | 264 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); |
| 266 | } | 265 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); |
| 266 | } | ||
| 267 | 267 | ||
| 268 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | 268 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); |
| 269 | } | ||
| 269 | 270 | ||
| 270 | amdgpu_ring_fini(&adev->uvd.ring); | 271 | amdgpu_ring_fini(&adev->uvd.ring); |
| 271 | 272 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9f36ed30ba11..62a4c127620f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
| @@ -298,7 +298,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
| 298 | unsigned vm_id, uint64_t pd_addr, | 298 | unsigned vm_id, uint64_t pd_addr, |
| 299 | uint32_t gds_base, uint32_t gds_size, | 299 | uint32_t gds_base, uint32_t gds_size, |
| 300 | uint32_t gws_base, uint32_t gws_size, | 300 | uint32_t gws_base, uint32_t gws_size, |
| 301 | uint32_t oa_base, uint32_t oa_size) | 301 | uint32_t oa_base, uint32_t oa_size, |
| 302 | bool vmid_switch) | ||
| 302 | { | 303 | { |
| 303 | struct amdgpu_device *adev = ring->adev; | 304 | struct amdgpu_device *adev = ring->adev; |
| 304 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; | 305 | struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; |
| @@ -312,8 +313,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, | |||
| 312 | int r; | 313 | int r; |
| 313 | 314 | ||
| 314 | if (ring->funcs->emit_pipeline_sync && ( | 315 | if (ring->funcs->emit_pipeline_sync && ( |
| 315 | pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || | 316 | pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch)) |
| 316 | ring->type == AMDGPU_RING_TYPE_COMPUTE)) | ||
| 317 | amdgpu_ring_emit_pipeline_sync(ring); | 317 | amdgpu_ring_emit_pipeline_sync(ring); |
| 318 | 318 | ||
| 319 | if (ring->funcs->emit_vm_flush && | 319 | if (ring->funcs->emit_vm_flush && |
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index ea407db1fbcf..5ec1f1e9c983 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c | |||
| @@ -6221,6 +6221,9 @@ static int ci_dpm_sw_fini(void *handle) | |||
| 6221 | ci_dpm_fini(adev); | 6221 | ci_dpm_fini(adev); |
| 6222 | mutex_unlock(&adev->pm.mutex); | 6222 | mutex_unlock(&adev->pm.mutex); |
| 6223 | 6223 | ||
| 6224 | release_firmware(adev->pm.fw); | ||
| 6225 | adev->pm.fw = NULL; | ||
| 6226 | |||
| 6224 | return 0; | 6227 | return 0; |
| 6225 | } | 6228 | } |
| 6226 | 6229 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 518dca43b133..9dc4e24e31e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c | |||
| @@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); | |||
| 66 | 66 | ||
| 67 | u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); | 67 | u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); |
| 68 | 68 | ||
| 69 | |||
| 70 | static void cik_sdma_free_microcode(struct amdgpu_device *adev) | ||
| 71 | { | ||
| 72 | int i; | ||
| 73 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 74 | release_firmware(adev->sdma.instance[i].fw); | ||
| 75 | adev->sdma.instance[i].fw = NULL; | ||
| 76 | } | ||
| 77 | } | ||
| 78 | |||
| 69 | /* | 79 | /* |
| 70 | * sDMA - System DMA | 80 | * sDMA - System DMA |
| 71 | * Starting with CIK, the GPU has new asynchronous | 81 | * Starting with CIK, the GPU has new asynchronous |
| @@ -419,6 +429,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
| 419 | /* Initialize the ring buffer's read and write pointers */ | 429 | /* Initialize the ring buffer's read and write pointers */ |
| 420 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 430 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 421 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 431 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 432 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 433 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 422 | 434 | ||
| 423 | /* set the wb address whether it's enabled or not */ | 435 | /* set the wb address whether it's enabled or not */ |
| 424 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 436 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -446,7 +458,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) | |||
| 446 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 458 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 447 | 459 | ||
| 448 | ring->ready = true; | 460 | ring->ready = true; |
| 461 | } | ||
| 462 | |||
| 463 | cik_sdma_enable(adev, true); | ||
| 449 | 464 | ||
| 465 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 466 | ring = &adev->sdma.instance[i].ring; | ||
| 450 | r = amdgpu_ring_test_ring(ring); | 467 | r = amdgpu_ring_test_ring(ring); |
| 451 | if (r) { | 468 | if (r) { |
| 452 | ring->ready = false; | 469 | ring->ready = false; |
| @@ -529,8 +546,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) | |||
| 529 | if (r) | 546 | if (r) |
| 530 | return r; | 547 | return r; |
| 531 | 548 | ||
| 532 | /* unhalt the MEs */ | 549 | /* halt the engine before programing */ |
| 533 | cik_sdma_enable(adev, true); | 550 | cik_sdma_enable(adev, false); |
| 534 | 551 | ||
| 535 | /* start the gfx rings and rlc compute queues */ | 552 | /* start the gfx rings and rlc compute queues */ |
| 536 | r = cik_sdma_gfx_resume(adev); | 553 | r = cik_sdma_gfx_resume(adev); |
| @@ -998,6 +1015,7 @@ static int cik_sdma_sw_fini(void *handle) | |||
| 998 | for (i = 0; i < adev->sdma.num_instances; i++) | 1015 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 999 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1016 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1000 | 1017 | ||
| 1018 | cik_sdma_free_microcode(adev); | ||
| 1001 | return 0; | 1019 | return 0; |
| 1002 | } | 1020 | } |
| 1003 | 1021 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c index 245cabf06575..ed03b75175d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c | |||
| @@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle) | |||
| 72 | 72 | ||
| 73 | static int fiji_dpm_sw_fini(void *handle) | 73 | static int fiji_dpm_sw_fini(void *handle) |
| 74 | { | 74 | { |
| 75 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 76 | |||
| 77 | release_firmware(adev->pm.fw); | ||
| 78 | adev->pm.fw = NULL; | ||
| 79 | |||
| 75 | return 0; | 80 | return 0; |
| 76 | } | 81 | } |
| 77 | 82 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 7f18a53ab53a..8c6ad1e72f02 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | |||
| @@ -991,6 +991,22 @@ out: | |||
| 991 | return err; | 991 | return err; |
| 992 | } | 992 | } |
| 993 | 993 | ||
| 994 | static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) | ||
| 995 | { | ||
| 996 | release_firmware(adev->gfx.pfp_fw); | ||
| 997 | adev->gfx.pfp_fw = NULL; | ||
| 998 | release_firmware(adev->gfx.me_fw); | ||
| 999 | adev->gfx.me_fw = NULL; | ||
| 1000 | release_firmware(adev->gfx.ce_fw); | ||
| 1001 | adev->gfx.ce_fw = NULL; | ||
| 1002 | release_firmware(adev->gfx.mec_fw); | ||
| 1003 | adev->gfx.mec_fw = NULL; | ||
| 1004 | release_firmware(adev->gfx.mec2_fw); | ||
| 1005 | adev->gfx.mec2_fw = NULL; | ||
| 1006 | release_firmware(adev->gfx.rlc_fw); | ||
| 1007 | adev->gfx.rlc_fw = NULL; | ||
| 1008 | } | ||
| 1009 | |||
| 994 | /** | 1010 | /** |
| 995 | * gfx_v7_0_tiling_mode_table_init - init the hw tiling table | 1011 | * gfx_v7_0_tiling_mode_table_init - init the hw tiling table |
| 996 | * | 1012 | * |
| @@ -4489,6 +4505,7 @@ static int gfx_v7_0_sw_fini(void *handle) | |||
| 4489 | gfx_v7_0_cp_compute_fini(adev); | 4505 | gfx_v7_0_cp_compute_fini(adev); |
| 4490 | gfx_v7_0_rlc_fini(adev); | 4506 | gfx_v7_0_rlc_fini(adev); |
| 4491 | gfx_v7_0_mec_fini(adev); | 4507 | gfx_v7_0_mec_fini(adev); |
| 4508 | gfx_v7_0_free_microcode(adev); | ||
| 4492 | 4509 | ||
| 4493 | return 0; | 4510 | return 0; |
| 4494 | } | 4511 | } |
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index f19bab68fd83..9f6f8669edc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | |||
| @@ -836,6 +836,26 @@ err1: | |||
| 836 | return r; | 836 | return r; |
| 837 | } | 837 | } |
| 838 | 838 | ||
| 839 | |||
| 840 | static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) { | ||
| 841 | release_firmware(adev->gfx.pfp_fw); | ||
| 842 | adev->gfx.pfp_fw = NULL; | ||
| 843 | release_firmware(adev->gfx.me_fw); | ||
| 844 | adev->gfx.me_fw = NULL; | ||
| 845 | release_firmware(adev->gfx.ce_fw); | ||
| 846 | adev->gfx.ce_fw = NULL; | ||
| 847 | release_firmware(adev->gfx.rlc_fw); | ||
| 848 | adev->gfx.rlc_fw = NULL; | ||
| 849 | release_firmware(adev->gfx.mec_fw); | ||
| 850 | adev->gfx.mec_fw = NULL; | ||
| 851 | if ((adev->asic_type != CHIP_STONEY) && | ||
| 852 | (adev->asic_type != CHIP_TOPAZ)) | ||
| 853 | release_firmware(adev->gfx.mec2_fw); | ||
| 854 | adev->gfx.mec2_fw = NULL; | ||
| 855 | |||
| 856 | kfree(adev->gfx.rlc.register_list_format); | ||
| 857 | } | ||
| 858 | |||
| 839 | static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) | 859 | static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) |
| 840 | { | 860 | { |
| 841 | const char *chip_name; | 861 | const char *chip_name; |
| @@ -1983,7 +2003,7 @@ static int gfx_v8_0_sw_fini(void *handle) | |||
| 1983 | 2003 | ||
| 1984 | gfx_v8_0_rlc_fini(adev); | 2004 | gfx_v8_0_rlc_fini(adev); |
| 1985 | 2005 | ||
| 1986 | kfree(adev->gfx.rlc.register_list_format); | 2006 | gfx_v8_0_free_microcode(adev); |
| 1987 | 2007 | ||
| 1988 | return 0; | 2008 | return 0; |
| 1989 | } | 2009 | } |
| @@ -3974,11 +3994,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) | |||
| 3974 | amdgpu_ring_write(ring, 0x3a00161a); | 3994 | amdgpu_ring_write(ring, 0x3a00161a); |
| 3975 | amdgpu_ring_write(ring, 0x0000002e); | 3995 | amdgpu_ring_write(ring, 0x0000002e); |
| 3976 | break; | 3996 | break; |
| 3977 | case CHIP_TOPAZ: | ||
| 3978 | case CHIP_CARRIZO: | 3997 | case CHIP_CARRIZO: |
| 3979 | amdgpu_ring_write(ring, 0x00000002); | 3998 | amdgpu_ring_write(ring, 0x00000002); |
| 3980 | amdgpu_ring_write(ring, 0x00000000); | 3999 | amdgpu_ring_write(ring, 0x00000000); |
| 3981 | break; | 4000 | break; |
| 4001 | case CHIP_TOPAZ: | ||
| 4002 | amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ? | ||
| 4003 | 0x00000000 : 0x00000002); | ||
| 4004 | amdgpu_ring_write(ring, 0x00000000); | ||
| 4005 | break; | ||
| 3982 | case CHIP_STONEY: | 4006 | case CHIP_STONEY: |
| 3983 | amdgpu_ring_write(ring, 0x00000000); | 4007 | amdgpu_ring_write(ring, 0x00000000); |
| 3984 | amdgpu_ring_write(ring, 0x00000000); | 4008 | amdgpu_ring_write(ring, 0x00000000); |
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 460bc8ad37e6..825ccd63f2dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c | |||
| @@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle) | |||
| 72 | 72 | ||
| 73 | static int iceland_dpm_sw_fini(void *handle) | 73 | static int iceland_dpm_sw_fini(void *handle) |
| 74 | { | 74 | { |
| 75 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 76 | |||
| 77 | release_firmware(adev->pm.fw); | ||
| 78 | adev->pm.fw = NULL; | ||
| 79 | |||
| 75 | return 0; | 80 | return 0; |
| 76 | } | 81 | } |
| 77 | 82 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index f4c3130d3fdb..b556bd0a8797 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | |||
| @@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) | |||
| 105 | } | 105 | } |
| 106 | } | 106 | } |
| 107 | 107 | ||
| 108 | static void sdma_v2_4_free_microcode(struct amdgpu_device *adev) | ||
| 109 | { | ||
| 110 | int i; | ||
| 111 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 112 | release_firmware(adev->sdma.instance[i].fw); | ||
| 113 | adev->sdma.instance[i].fw = NULL; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 108 | /** | 117 | /** |
| 109 | * sdma_v2_4_init_microcode - load ucode images from disk | 118 | * sdma_v2_4_init_microcode - load ucode images from disk |
| 110 | * | 119 | * |
| @@ -461,6 +470,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | |||
| 461 | /* Initialize the ring buffer's read and write pointers */ | 470 | /* Initialize the ring buffer's read and write pointers */ |
| 462 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 471 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 463 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 472 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 473 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 474 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 464 | 475 | ||
| 465 | /* set the wb address whether it's enabled or not */ | 476 | /* set the wb address whether it's enabled or not */ |
| 466 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 477 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -489,7 +500,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) | |||
| 489 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 500 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 490 | 501 | ||
| 491 | ring->ready = true; | 502 | ring->ready = true; |
| 503 | } | ||
| 492 | 504 | ||
| 505 | sdma_v2_4_enable(adev, true); | ||
| 506 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 507 | ring = &adev->sdma.instance[i].ring; | ||
| 493 | r = amdgpu_ring_test_ring(ring); | 508 | r = amdgpu_ring_test_ring(ring); |
| 494 | if (r) { | 509 | if (r) { |
| 495 | ring->ready = false; | 510 | ring->ready = false; |
| @@ -580,8 +595,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) | |||
| 580 | return -EINVAL; | 595 | return -EINVAL; |
| 581 | } | 596 | } |
| 582 | 597 | ||
| 583 | /* unhalt the MEs */ | 598 | /* halt the engine before programing */ |
| 584 | sdma_v2_4_enable(adev, true); | 599 | sdma_v2_4_enable(adev, false); |
| 585 | 600 | ||
| 586 | /* start the gfx rings and rlc compute queues */ | 601 | /* start the gfx rings and rlc compute queues */ |
| 587 | r = sdma_v2_4_gfx_resume(adev); | 602 | r = sdma_v2_4_gfx_resume(adev); |
| @@ -1012,6 +1027,7 @@ static int sdma_v2_4_sw_fini(void *handle) | |||
| 1012 | for (i = 0; i < adev->sdma.num_instances; i++) | 1027 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1013 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1028 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1014 | 1029 | ||
| 1030 | sdma_v2_4_free_microcode(adev); | ||
| 1015 | return 0; | 1031 | return 0; |
| 1016 | } | 1032 | } |
| 1017 | 1033 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 31d99b0010f7..532ea88da66a 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | |||
| @@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) | |||
| 236 | } | 236 | } |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) | ||
| 240 | { | ||
| 241 | int i; | ||
| 242 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 243 | release_firmware(adev->sdma.instance[i].fw); | ||
| 244 | adev->sdma.instance[i].fw = NULL; | ||
| 245 | } | ||
| 246 | } | ||
| 247 | |||
| 239 | /** | 248 | /** |
| 240 | * sdma_v3_0_init_microcode - load ucode images from disk | 249 | * sdma_v3_0_init_microcode - load ucode images from disk |
| 241 | * | 250 | * |
| @@ -672,6 +681,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
| 672 | /* Initialize the ring buffer's read and write pointers */ | 681 | /* Initialize the ring buffer's read and write pointers */ |
| 673 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); | 682 | WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); |
| 674 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); | 683 | WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); |
| 684 | WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); | ||
| 685 | WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); | ||
| 675 | 686 | ||
| 676 | /* set the wb address whether it's enabled or not */ | 687 | /* set the wb address whether it's enabled or not */ |
| 677 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], | 688 | WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], |
| @@ -711,7 +722,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) | |||
| 711 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); | 722 | WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); |
| 712 | 723 | ||
| 713 | ring->ready = true; | 724 | ring->ready = true; |
| 725 | } | ||
| 726 | |||
| 727 | /* unhalt the MEs */ | ||
| 728 | sdma_v3_0_enable(adev, true); | ||
| 729 | /* enable sdma ring preemption */ | ||
| 730 | sdma_v3_0_ctx_switch_enable(adev, true); | ||
| 714 | 731 | ||
| 732 | for (i = 0; i < adev->sdma.num_instances; i++) { | ||
| 733 | ring = &adev->sdma.instance[i].ring; | ||
| 715 | r = amdgpu_ring_test_ring(ring); | 734 | r = amdgpu_ring_test_ring(ring); |
| 716 | if (r) { | 735 | if (r) { |
| 717 | ring->ready = false; | 736 | ring->ready = false; |
| @@ -804,10 +823,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) | |||
| 804 | } | 823 | } |
| 805 | } | 824 | } |
| 806 | 825 | ||
| 807 | /* unhalt the MEs */ | 826 | /* disble sdma engine before programing it */ |
| 808 | sdma_v3_0_enable(adev, true); | 827 | sdma_v3_0_ctx_switch_enable(adev, false); |
| 809 | /* enable sdma ring preemption */ | 828 | sdma_v3_0_enable(adev, false); |
| 810 | sdma_v3_0_ctx_switch_enable(adev, true); | ||
| 811 | 829 | ||
| 812 | /* start the gfx rings and rlc compute queues */ | 830 | /* start the gfx rings and rlc compute queues */ |
| 813 | r = sdma_v3_0_gfx_resume(adev); | 831 | r = sdma_v3_0_gfx_resume(adev); |
| @@ -1247,6 +1265,7 @@ static int sdma_v3_0_sw_fini(void *handle) | |||
| 1247 | for (i = 0; i < adev->sdma.num_instances; i++) | 1265 | for (i = 0; i < adev->sdma.num_instances; i++) |
| 1248 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); | 1266 | amdgpu_ring_fini(&adev->sdma.instance[i].ring); |
| 1249 | 1267 | ||
| 1268 | sdma_v3_0_free_microcode(adev); | ||
| 1250 | return 0; | 1269 | return 0; |
| 1251 | } | 1270 | } |
| 1252 | 1271 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index b7615cefcac4..f06f6f4dc3a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c | |||
| @@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle) | |||
| 71 | 71 | ||
| 72 | static int tonga_dpm_sw_fini(void *handle) | 72 | static int tonga_dpm_sw_fini(void *handle) |
| 73 | { | 73 | { |
| 74 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | ||
| 75 | |||
| 76 | release_firmware(adev->pm.fw); | ||
| 77 | adev->pm.fw = NULL; | ||
| 78 | |||
| 74 | return 0; | 79 | return 0; |
| 75 | } | 80 | } |
| 76 | 81 | ||
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 6080951d539d..afce1edbe250 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h | |||
| @@ -157,6 +157,7 @@ struct amd_ip_funcs { | |||
| 157 | int (*hw_init)(void *handle); | 157 | int (*hw_init)(void *handle); |
| 158 | /* tears down the hw state */ | 158 | /* tears down the hw state */ |
| 159 | int (*hw_fini)(void *handle); | 159 | int (*hw_fini)(void *handle); |
| 160 | void (*late_fini)(void *handle); | ||
| 160 | /* handles IP specific hw/sw changes for suspend */ | 161 | /* handles IP specific hw/sw changes for suspend */ |
| 161 | int (*suspend)(void *handle); | 162 | int (*suspend)(void *handle); |
| 162 | /* handles IP specific hw/sw changes for resume */ | 163 | /* handles IP specific hw/sw changes for resume */ |
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index a461e155a160..7464daf89ca1 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h | |||
| @@ -581,6 +581,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device, | |||
| 581 | enum cgs_ucode_id type, | 581 | enum cgs_ucode_id type, |
| 582 | struct cgs_firmware_info *info); | 582 | struct cgs_firmware_info *info); |
| 583 | 583 | ||
| 584 | typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device, | ||
| 585 | enum cgs_ucode_id type); | ||
| 586 | |||
| 584 | typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, | 587 | typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, |
| 585 | enum amd_ip_block_type block_type, | 588 | enum amd_ip_block_type block_type, |
| 586 | enum amd_powergating_state state); | 589 | enum amd_powergating_state state); |
| @@ -645,6 +648,7 @@ struct cgs_ops { | |||
| 645 | cgs_set_camera_voltages_t set_camera_voltages; | 648 | cgs_set_camera_voltages_t set_camera_voltages; |
| 646 | /* Firmware Info */ | 649 | /* Firmware Info */ |
| 647 | cgs_get_firmware_info get_firmware_info; | 650 | cgs_get_firmware_info get_firmware_info; |
| 651 | cgs_rel_firmware rel_firmware; | ||
| 648 | /* cg pg interface*/ | 652 | /* cg pg interface*/ |
| 649 | cgs_set_powergating_state set_powergating_state; | 653 | cgs_set_powergating_state set_powergating_state; |
| 650 | cgs_set_clockgating_state set_clockgating_state; | 654 | cgs_set_clockgating_state set_clockgating_state; |
| @@ -738,6 +742,8 @@ struct cgs_device | |||
| 738 | CGS_CALL(set_camera_voltages,dev,mask,voltages) | 742 | CGS_CALL(set_camera_voltages,dev,mask,voltages) |
| 739 | #define cgs_get_firmware_info(dev, type, info) \ | 743 | #define cgs_get_firmware_info(dev, type, info) \ |
| 740 | CGS_CALL(get_firmware_info, dev, type, info) | 744 | CGS_CALL(get_firmware_info, dev, type, info) |
| 745 | #define cgs_rel_firmware(dev, type) \ | ||
| 746 | CGS_CALL(rel_firmware, dev, type) | ||
| 741 | #define cgs_set_powergating_state(dev, block_type, state) \ | 747 | #define cgs_set_powergating_state(dev, block_type, state) \ |
| 742 | CGS_CALL(set_powergating_state, dev, block_type, state) | 748 | CGS_CALL(set_powergating_state, dev, block_type, state) |
| 743 | #define cgs_set_clockgating_state(dev, block_type, state) \ | 749 | #define cgs_set_clockgating_state(dev, block_type, state) \ |
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 8e345bfddb69..e629f8a9fe93 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c | |||
| @@ -73,11 +73,14 @@ static int pp_sw_init(void *handle) | |||
| 73 | 73 | ||
| 74 | ret = hwmgr->hwmgr_func->backend_init(hwmgr); | 74 | ret = hwmgr->hwmgr_func->backend_init(hwmgr); |
| 75 | if (ret) | 75 | if (ret) |
| 76 | goto err; | 76 | goto err1; |
| 77 | 77 | ||
| 78 | pr_info("amdgpu: powerplay initialized\n"); | 78 | pr_info("amdgpu: powerplay initialized\n"); |
| 79 | 79 | ||
| 80 | return 0; | 80 | return 0; |
| 81 | err1: | ||
| 82 | if (hwmgr->pptable_func->pptable_fini) | ||
| 83 | hwmgr->pptable_func->pptable_fini(hwmgr); | ||
| 81 | err: | 84 | err: |
| 82 | pr_err("amdgpu: powerplay initialization failed\n"); | 85 | pr_err("amdgpu: powerplay initialization failed\n"); |
| 83 | return ret; | 86 | return ret; |
| @@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle) | |||
| 100 | if (hwmgr->hwmgr_func->backend_fini != NULL) | 103 | if (hwmgr->hwmgr_func->backend_fini != NULL) |
| 101 | ret = hwmgr->hwmgr_func->backend_fini(hwmgr); | 104 | ret = hwmgr->hwmgr_func->backend_fini(hwmgr); |
| 102 | 105 | ||
| 106 | if (hwmgr->pptable_func->pptable_fini) | ||
| 107 | hwmgr->pptable_func->pptable_fini(hwmgr); | ||
| 108 | |||
| 103 | return ret; | 109 | return ret; |
| 104 | } | 110 | } |
| 105 | 111 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c index 46410e3c7349..fb88e4e5d625 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c | |||
| @@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr) | |||
| 58 | pem_unregister_interrupts(eventmgr); | 58 | pem_unregister_interrupts(eventmgr); |
| 59 | 59 | ||
| 60 | pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); | 60 | pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); |
| 61 | |||
| 62 | if (eventmgr != NULL) | ||
| 63 | kfree(eventmgr); | ||
| 64 | } | 61 | } |
| 65 | 62 | ||
| 66 | int eventmgr_init(struct pp_instance *handle) | 63 | int eventmgr_init(struct pp_instance *handle) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 24a16e49b571..586f73276226 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c | |||
| @@ -1830,7 +1830,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci) | |||
| 1830 | 1830 | ||
| 1831 | PP_ASSERT_WITH_CODE(false, | 1831 | PP_ASSERT_WITH_CODE(false, |
| 1832 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", | 1832 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", |
| 1833 | return vddci_table->entries[i].value); | 1833 | return vddci_table->entries[i-1].value); |
| 1834 | } | 1834 | } |
| 1835 | 1835 | ||
| 1836 | static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, | 1836 | static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 1c48917da3cf..20f20e075588 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c | |||
| @@ -93,6 +93,13 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr) | |||
| 93 | if (hwmgr == NULL || hwmgr->ps == NULL) | 93 | if (hwmgr == NULL || hwmgr->ps == NULL) |
| 94 | return -EINVAL; | 94 | return -EINVAL; |
| 95 | 95 | ||
| 96 | /* do hwmgr finish*/ | ||
| 97 | kfree(hwmgr->backend); | ||
| 98 | |||
| 99 | kfree(hwmgr->start_thermal_controller.function_list); | ||
| 100 | |||
| 101 | kfree(hwmgr->set_temperature_range.function_list); | ||
| 102 | |||
| 96 | kfree(hwmgr->ps); | 103 | kfree(hwmgr->ps); |
| 97 | kfree(hwmgr); | 104 | kfree(hwmgr); |
| 98 | return 0; | 105 | return 0; |
| @@ -462,7 +469,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u | |||
| 462 | 469 | ||
| 463 | PP_ASSERT_WITH_CODE(false, | 470 | PP_ASSERT_WITH_CODE(false, |
| 464 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", | 471 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", |
| 465 | return vddci_table->entries[i].value); | 472 | return vddci_table->entries[i-1].value); |
| 466 | } | 473 | } |
| 467 | 474 | ||
| 468 | int phm_find_boot_level(void *table, | 475 | int phm_find_boot_level(void *table, |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c index 0b99ab3ba0c5..ae96f14b827c 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c | |||
| @@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) | |||
| 286 | 286 | ||
| 287 | if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, | 287 | if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, |
| 288 | (uint8_t *)&data->power_tune_table, | 288 | (uint8_t *)&data->power_tune_table, |
| 289 | sizeof(struct SMU74_Discrete_PmFuses), data->sram_end)) | 289 | (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end)) |
| 290 | PP_ASSERT_WITH_CODE(false, | 290 | PP_ASSERT_WITH_CODE(false, |
| 291 | "Attempt to download PmFuseTable Failed!", | 291 | "Attempt to download PmFuseTable Failed!", |
| 292 | return -EINVAL); | 292 | return -EINVAL); |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 16fed487973b..d27e8c40602a 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c | |||
| @@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) | |||
| 2847 | } | 2847 | } |
| 2848 | } | 2848 | } |
| 2849 | 2849 | ||
| 2850 | /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ | ||
| 2851 | for (i = 0; i < allowed_vdd_sclk_table->count; i++) { | ||
| 2852 | data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc; | ||
| 2853 | /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */ | ||
| 2854 | /* param1 is for corresponding std voltage */ | ||
| 2855 | data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; | ||
| 2856 | } | ||
| 2857 | data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; | ||
| 2858 | |||
| 2859 | if (NULL != allowed_vdd_mclk_table) { | ||
| 2860 | /* Initialize Vddci DPM table based on allow Mclk values */ | ||
| 2861 | for (i = 0; i < allowed_vdd_mclk_table->count; i++) { | ||
| 2862 | data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci; | ||
| 2863 | data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; | ||
| 2864 | data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd; | ||
| 2865 | data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; | ||
| 2866 | } | ||
| 2867 | data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; | ||
| 2868 | data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; | ||
| 2869 | } | ||
| 2870 | |||
| 2871 | /* setup PCIE gen speed levels*/ | 2850 | /* setup PCIE gen speed levels*/ |
| 2872 | tonga_setup_default_pcie_tables(hwmgr); | 2851 | tonga_setup_default_pcie_tables(hwmgr); |
| 2873 | 2852 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index 10e3630ee39d..296ec7ef6d45 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c | |||
| @@ -1040,48 +1040,44 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) | |||
| 1040 | struct phm_ppt_v1_information *pp_table_information = | 1040 | struct phm_ppt_v1_information *pp_table_information = |
| 1041 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | 1041 | (struct phm_ppt_v1_information *)(hwmgr->pptable); |
| 1042 | 1042 | ||
| 1043 | if (NULL != hwmgr->soft_pp_table) { | 1043 | if (NULL != hwmgr->soft_pp_table) |
| 1044 | kfree(hwmgr->soft_pp_table); | ||
| 1045 | hwmgr->soft_pp_table = NULL; | 1044 | hwmgr->soft_pp_table = NULL; |
| 1046 | } | ||
| 1047 | 1045 | ||
| 1048 | if (NULL != pp_table_information->vdd_dep_on_sclk) | 1046 | kfree(pp_table_information->vdd_dep_on_sclk); |
| 1049 | pp_table_information->vdd_dep_on_sclk = NULL; | 1047 | pp_table_information->vdd_dep_on_sclk = NULL; |
| 1050 | 1048 | ||
| 1051 | if (NULL != pp_table_information->vdd_dep_on_mclk) | 1049 | kfree(pp_table_information->vdd_dep_on_mclk); |
| 1052 | pp_table_information->vdd_dep_on_mclk = NULL; | 1050 | pp_table_information->vdd_dep_on_mclk = NULL; |
| 1053 | 1051 | ||
| 1054 | if (NULL != pp_table_information->valid_mclk_values) | 1052 | kfree(pp_table_information->valid_mclk_values); |
| 1055 | pp_table_information->valid_mclk_values = NULL; | 1053 | pp_table_information->valid_mclk_values = NULL; |
| 1056 | 1054 | ||
| 1057 | if (NULL != pp_table_information->valid_sclk_values) | 1055 | kfree(pp_table_information->valid_sclk_values); |
| 1058 | pp_table_information->valid_sclk_values = NULL; | 1056 | pp_table_information->valid_sclk_values = NULL; |
| 1059 | 1057 | ||
| 1060 | if (NULL != pp_table_information->vddc_lookup_table) | 1058 | kfree(pp_table_information->vddc_lookup_table); |
| 1061 | pp_table_information->vddc_lookup_table = NULL; | 1059 | pp_table_information->vddc_lookup_table = NULL; |
| 1062 | 1060 | ||
| 1063 | if (NULL != pp_table_information->vddgfx_lookup_table) | 1061 | kfree(pp_table_information->vddgfx_lookup_table); |
| 1064 | pp_table_information->vddgfx_lookup_table = NULL; | 1062 | pp_table_information->vddgfx_lookup_table = NULL; |
| 1065 | 1063 | ||
| 1066 | if (NULL != pp_table_information->mm_dep_table) | 1064 | kfree(pp_table_information->mm_dep_table); |
| 1067 | pp_table_information->mm_dep_table = NULL; | 1065 | pp_table_information->mm_dep_table = NULL; |
| 1068 | 1066 | ||
| 1069 | if (NULL != pp_table_information->cac_dtp_table) | 1067 | kfree(pp_table_information->cac_dtp_table); |
| 1070 | pp_table_information->cac_dtp_table = NULL; | 1068 | pp_table_information->cac_dtp_table = NULL; |
| 1071 | 1069 | ||
| 1072 | if (NULL != hwmgr->dyn_state.cac_dtp_table) | 1070 | kfree(hwmgr->dyn_state.cac_dtp_table); |
| 1073 | hwmgr->dyn_state.cac_dtp_table = NULL; | 1071 | hwmgr->dyn_state.cac_dtp_table = NULL; |
| 1074 | 1072 | ||
| 1075 | if (NULL != pp_table_information->ppm_parameter_table) | 1073 | kfree(pp_table_information->ppm_parameter_table); |
| 1076 | pp_table_information->ppm_parameter_table = NULL; | 1074 | pp_table_information->ppm_parameter_table = NULL; |
| 1077 | 1075 | ||
| 1078 | if (NULL != pp_table_information->pcie_table) | 1076 | kfree(pp_table_information->pcie_table); |
| 1079 | pp_table_information->pcie_table = NULL; | 1077 | pp_table_information->pcie_table = NULL; |
| 1080 | 1078 | ||
| 1081 | if (NULL != hwmgr->pptable) { | 1079 | kfree(hwmgr->pptable); |
| 1082 | kfree(hwmgr->pptable); | 1080 | hwmgr->pptable = NULL; |
| 1083 | hwmgr->pptable = NULL; | ||
| 1084 | } | ||
| 1085 | 1081 | ||
| 1086 | return result; | 1082 | return result; |
| 1087 | } | 1083 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 673a75c74e18..8e52a2e82db5 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c | |||
| @@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) | |||
| 1006 | 1006 | ||
| 1007 | static int fiji_smu_fini(struct pp_smumgr *smumgr) | 1007 | static int fiji_smu_fini(struct pp_smumgr *smumgr) |
| 1008 | { | 1008 | { |
| 1009 | struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); | ||
| 1010 | |||
| 1011 | smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); | ||
| 1012 | |||
| 1009 | if (smumgr->backend) { | 1013 | if (smumgr->backend) { |
| 1010 | kfree(smumgr->backend); | 1014 | kfree(smumgr->backend); |
| 1011 | smumgr->backend = NULL; | 1015 | smumgr->backend = NULL; |
| 1012 | } | 1016 | } |
| 1017 | |||
| 1018 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 1013 | return 0; | 1019 | return 0; |
| 1014 | } | 1020 | } |
| 1015 | 1021 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index de618ead9db8..043b6ac09d5f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c | |||
| @@ -469,6 +469,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr) | |||
| 469 | kfree(smumgr->backend); | 469 | kfree(smumgr->backend); |
| 470 | smumgr->backend = NULL; | 470 | smumgr->backend = NULL; |
| 471 | } | 471 | } |
| 472 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 472 | return 0; | 473 | return 0; |
| 473 | } | 474 | } |
| 474 | 475 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index c483baf6b4fb..0728c1e3d97a 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c | |||
| @@ -81,6 +81,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) | |||
| 81 | 81 | ||
| 82 | int smum_fini(struct pp_smumgr *smumgr) | 82 | int smum_fini(struct pp_smumgr *smumgr) |
| 83 | { | 83 | { |
| 84 | kfree(smumgr->device); | ||
| 84 | kfree(smumgr); | 85 | kfree(smumgr); |
| 85 | return 0; | 86 | return 0; |
| 86 | } | 87 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 32820b680d88..b22722eabafc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c | |||
| @@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, | |||
| 328 | 328 | ||
| 329 | static int tonga_smu_fini(struct pp_smumgr *smumgr) | 329 | static int tonga_smu_fini(struct pp_smumgr *smumgr) |
| 330 | { | 330 | { |
| 331 | struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend); | ||
| 332 | |||
| 333 | smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle); | ||
| 334 | smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); | ||
| 335 | |||
| 331 | if (smumgr->backend != NULL) { | 336 | if (smumgr->backend != NULL) { |
| 332 | kfree(smumgr->backend); | 337 | kfree(smumgr->backend); |
| 333 | smumgr->backend = NULL; | 338 | smumgr->backend = NULL; |
| 334 | } | 339 | } |
| 340 | |||
| 341 | cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); | ||
| 335 | return 0; | 342 | return 0; |
| 336 | } | 343 | } |
| 337 | 344 | ||
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 0ec1ad961e0d..dc723f7ead7d 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | |||
| @@ -42,9 +42,10 @@ static const struct regmap_config fsl_dcu_regmap_config = { | |||
| 42 | .reg_bits = 32, | 42 | .reg_bits = 32, |
| 43 | .reg_stride = 4, | 43 | .reg_stride = 4, |
| 44 | .val_bits = 32, | 44 | .val_bits = 32, |
| 45 | .cache_type = REGCACHE_RBTREE, | 45 | .cache_type = REGCACHE_FLAT, |
| 46 | 46 | ||
| 47 | .volatile_reg = fsl_dcu_drm_is_volatile_reg, | 47 | .volatile_reg = fsl_dcu_drm_is_volatile_reg, |
| 48 | .max_register = 0x11fc, | ||
| 48 | }; | 49 | }; |
| 49 | 50 | ||
| 50 | static int fsl_dcu_drm_irq_init(struct drm_device *dev) | 51 | static int fsl_dcu_drm_irq_init(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index fbe304ee6c80..2aec27dbb5bb 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c | |||
| @@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, | |||
| 408 | } | 408 | } |
| 409 | 409 | ||
| 410 | adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); | 410 | adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); |
| 411 | if (!adreno_gpu->memptrs) { | 411 | if (IS_ERR(adreno_gpu->memptrs)) { |
| 412 | dev_err(drm->dev, "could not vmap memptrs\n"); | 412 | dev_err(drm->dev, "could not vmap memptrs\n"); |
| 413 | return -ENOMEM; | 413 | return -ENOMEM; |
| 414 | } | 414 | } |
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index d9759bf3482e..c6cf837c5193 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c | |||
| @@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, | |||
| 159 | dev->mode_config.fb_base = paddr; | 159 | dev->mode_config.fb_base = paddr; |
| 160 | 160 | ||
| 161 | fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); | 161 | fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); |
| 162 | if (IS_ERR(fbi->screen_base)) { | ||
| 163 | ret = PTR_ERR(fbi->screen_base); | ||
| 164 | goto fail_unlock; | ||
| 165 | } | ||
| 162 | fbi->screen_size = fbdev->bo->size; | 166 | fbi->screen_size = fbdev->bo->size; |
| 163 | fbi->fix.smem_start = paddr; | 167 | fbi->fix.smem_start = paddr; |
| 164 | fbi->fix.smem_len = fbdev->bo->size; | 168 | fbi->fix.smem_len = fbdev->bo->size; |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 7daf4054dd2b..69836f5685b1 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj) | |||
| 398 | return ERR_CAST(pages); | 398 | return ERR_CAST(pages); |
| 399 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, | 399 | msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, |
| 400 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); | 400 | VM_MAP, pgprot_writecombine(PAGE_KERNEL)); |
| 401 | if (msm_obj->vaddr == NULL) | ||
| 402 | return ERR_PTR(-ENOMEM); | ||
| 401 | } | 403 | } |
| 402 | return msm_obj->vaddr; | 404 | return msm_obj->vaddr; |
| 403 | } | 405 | } |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b89ca5174863..eb4bb8b2f3a5 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, | |||
| 40 | 40 | ||
| 41 | submit->dev = dev; | 41 | submit->dev = dev; |
| 42 | submit->gpu = gpu; | 42 | submit->gpu = gpu; |
| 43 | submit->fence = NULL; | ||
| 43 | submit->pid = get_pid(task_pid(current)); | 44 | submit->pid = get_pid(task_pid(current)); |
| 44 | 45 | ||
| 45 | /* initially, until copy_from_user() and bo lookup succeeds: */ | 46 | /* initially, until copy_from_user() and bo lookup succeeds: */ |
| 46 | submit->nr_bos = 0; | 47 | submit->nr_bos = 0; |
| 47 | submit->nr_cmds = 0; | 48 | submit->nr_cmds = 0; |
| 48 | 49 | ||
| 50 | INIT_LIST_HEAD(&submit->node); | ||
| 49 | INIT_LIST_HEAD(&submit->bo_list); | 51 | INIT_LIST_HEAD(&submit->bo_list); |
| 50 | ww_acquire_init(&submit->ticket, &reservation_ww_class); | 52 | ww_acquire_init(&submit->ticket, &reservation_ww_class); |
| 51 | 53 | ||
| @@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, | |||
| 75 | void __user *userptr = | 77 | void __user *userptr = |
| 76 | u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); | 78 | u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); |
| 77 | 79 | ||
| 80 | /* make sure we don't have garbage flags, in case we hit | ||
| 81 | * error path before flags is initialized: | ||
| 82 | */ | ||
| 83 | submit->bos[i].flags = 0; | ||
| 84 | |||
| 78 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); | 85 | ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); |
| 79 | if (ret) { | 86 | if (ret) { |
| 80 | ret = -EFAULT; | 87 | ret = -EFAULT; |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index b48f73ac6389..0857710c2ff2 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
| @@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) | |||
| 312 | struct msm_gem_object *obj = submit->bos[idx].obj; | 312 | struct msm_gem_object *obj = submit->bos[idx].obj; |
| 313 | const char *buf = msm_gem_vaddr_locked(&obj->base); | 313 | const char *buf = msm_gem_vaddr_locked(&obj->base); |
| 314 | 314 | ||
| 315 | if (IS_ERR(buf)) | ||
| 316 | continue; | ||
| 317 | |||
| 315 | buf += iova - submit->bos[idx].iova; | 318 | buf += iova - submit->bos[idx].iova; |
| 316 | 319 | ||
| 317 | rd_write_section(rd, RD_GPUADDR, | 320 | rd_write_section(rd, RD_GPUADDR, |
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 1f14b908b221..42f5359cf988 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c | |||
| @@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) | |||
| 40 | } | 40 | } |
| 41 | 41 | ||
| 42 | ring->start = msm_gem_vaddr_locked(ring->bo); | 42 | ring->start = msm_gem_vaddr_locked(ring->bo); |
| 43 | if (IS_ERR(ring->start)) { | ||
| 44 | ret = PTR_ERR(ring->start); | ||
| 45 | goto fail; | ||
| 46 | } | ||
| 43 | ring->end = ring->start + (size / 4); | 47 | ring->end = ring->start + (size / 4); |
| 44 | ring->cur = ring->start; | 48 | ring->cur = ring->start; |
| 45 | 49 | ||
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index c612dc1f1eb4..126a85cc81bc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h | |||
| @@ -16,9 +16,9 @@ enum nvkm_devidx { | |||
| 16 | NVKM_SUBDEV_MC, | 16 | NVKM_SUBDEV_MC, |
| 17 | NVKM_SUBDEV_BUS, | 17 | NVKM_SUBDEV_BUS, |
| 18 | NVKM_SUBDEV_TIMER, | 18 | NVKM_SUBDEV_TIMER, |
| 19 | NVKM_SUBDEV_INSTMEM, | ||
| 19 | NVKM_SUBDEV_FB, | 20 | NVKM_SUBDEV_FB, |
| 20 | NVKM_SUBDEV_LTC, | 21 | NVKM_SUBDEV_LTC, |
| 21 | NVKM_SUBDEV_INSTMEM, | ||
| 22 | NVKM_SUBDEV_MMU, | 22 | NVKM_SUBDEV_MMU, |
| 23 | NVKM_SUBDEV_BAR, | 23 | NVKM_SUBDEV_BAR, |
| 24 | NVKM_SUBDEV_PMU, | 24 | NVKM_SUBDEV_PMU, |
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h index db10c11f0595..c5a6ebd5a478 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h | |||
| @@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask, | |||
| 25 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); | 25 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); |
| 26 | 26 | ||
| 27 | struct nvbios_ocfg { | 27 | struct nvbios_ocfg { |
| 28 | u16 match; | 28 | u8 proto; |
| 29 | u8 flags; | ||
| 29 | u16 clkcmp[2]; | 30 | u16 clkcmp[2]; |
| 30 | }; | 31 | }; |
| 31 | 32 | ||
| @@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx, | |||
| 33 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); | 34 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len); |
| 34 | u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, | 35 | u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, |
| 35 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); | 36 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); |
| 36 | u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type, | 37 | u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags, |
| 37 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); | 38 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); |
| 38 | u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); | 39 | u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); |
| 39 | #endif | 40 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 57aaf98a26f9..300ea03be8f0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -552,6 +552,7 @@ nouveau_fbcon_init(struct drm_device *dev) | |||
| 552 | if (ret) | 552 | if (ret) |
| 553 | goto fini; | 553 | goto fini; |
| 554 | 554 | ||
| 555 | fbcon->helper.fbdev->pixmap.buf_align = 4; | ||
| 555 | return 0; | 556 | return 0; |
| 556 | 557 | ||
| 557 | fini: | 558 | fini: |
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 0f3e4bb411cc..7d9248b8c664 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c | |||
| @@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 82 | uint32_t fg; | 82 | uint32_t fg; |
| 83 | uint32_t bg; | 83 | uint32_t bg; |
| 84 | uint32_t dsize; | 84 | uint32_t dsize; |
| 85 | uint32_t width; | ||
| 86 | uint32_t *data = (uint32_t *)image->data; | 85 | uint32_t *data = (uint32_t *)image->data; |
| 87 | int ret; | 86 | int ret; |
| 88 | 87 | ||
| @@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 93 | if (ret) | 92 | if (ret) |
| 94 | return ret; | 93 | return ret; |
| 95 | 94 | ||
| 96 | width = ALIGN(image->width, 8); | ||
| 97 | dsize = ALIGN(width * image->height, 32) >> 5; | ||
| 98 | |||
| 99 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 95 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 100 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 96 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| 101 | fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; | 97 | fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; |
| @@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 111 | ((image->dx + image->width) & 0xffff)); | 107 | ((image->dx + image->width) & 0xffff)); |
| 112 | OUT_RING(chan, bg); | 108 | OUT_RING(chan, bg); |
| 113 | OUT_RING(chan, fg); | 109 | OUT_RING(chan, fg); |
| 114 | OUT_RING(chan, (image->height << 16) | width); | 110 | OUT_RING(chan, (image->height << 16) | image->width); |
| 115 | OUT_RING(chan, (image->height << 16) | image->width); | 111 | OUT_RING(chan, (image->height << 16) | image->width); |
| 116 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); | 112 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); |
| 117 | 113 | ||
| 114 | dsize = ALIGN(image->width * image->height, 32) >> 5; | ||
| 118 | while (dsize) { | 115 | while (dsize) { |
| 119 | int iter_len = dsize > 128 ? 128 : dsize; | 116 | int iter_len = dsize > 128 ? 128 : dsize; |
| 120 | 117 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 33d9ee0fac40..1aeb698e9707 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c | |||
| @@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 95 | struct nouveau_fbdev *nfbdev = info->par; | 95 | struct nouveau_fbdev *nfbdev = info->par; |
| 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); | 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); |
| 97 | struct nouveau_channel *chan = drm->channel; | 97 | struct nouveau_channel *chan = drm->channel; |
| 98 | uint32_t width, dwords, *data = (uint32_t *)image->data; | 98 | uint32_t dwords, *data = (uint32_t *)image->data; |
| 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); | 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); |
| 100 | uint32_t *palette = info->pseudo_palette; | 100 | uint32_t *palette = info->pseudo_palette; |
| 101 | int ret; | 101 | int ret; |
| @@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 107 | if (ret) | 107 | if (ret) |
| 108 | return ret; | 108 | return ret; |
| 109 | 109 | ||
| 110 | width = ALIGN(image->width, 32); | ||
| 111 | dwords = (width * image->height) >> 5; | ||
| 112 | |||
| 113 | BEGIN_NV04(chan, NvSub2D, 0x0814, 2); | 110 | BEGIN_NV04(chan, NvSub2D, 0x0814, 2); |
| 114 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 111 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 115 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 112 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| @@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 128 | OUT_RING(chan, 0); | 125 | OUT_RING(chan, 0); |
| 129 | OUT_RING(chan, image->dy); | 126 | OUT_RING(chan, image->dy); |
| 130 | 127 | ||
| 128 | dwords = ALIGN(image->width * image->height, 32) >> 5; | ||
| 131 | while (dwords) { | 129 | while (dwords) { |
| 132 | int push = dwords > 2047 ? 2047 : dwords; | 130 | int push = dwords > 2047 ? 2047 : dwords; |
| 133 | 131 | ||
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index a0913359ac05..839f4c8c1805 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c | |||
| @@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 95 | struct nouveau_fbdev *nfbdev = info->par; | 95 | struct nouveau_fbdev *nfbdev = info->par; |
| 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); | 96 | struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); |
| 97 | struct nouveau_channel *chan = drm->channel; | 97 | struct nouveau_channel *chan = drm->channel; |
| 98 | uint32_t width, dwords, *data = (uint32_t *)image->data; | 98 | uint32_t dwords, *data = (uint32_t *)image->data; |
| 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); | 99 | uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); |
| 100 | uint32_t *palette = info->pseudo_palette; | 100 | uint32_t *palette = info->pseudo_palette; |
| 101 | int ret; | 101 | int ret; |
| @@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 107 | if (ret) | 107 | if (ret) |
| 108 | return ret; | 108 | return ret; |
| 109 | 109 | ||
| 110 | width = ALIGN(image->width, 32); | ||
| 111 | dwords = (width * image->height) >> 5; | ||
| 112 | |||
| 113 | BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); | 110 | BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); |
| 114 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || | 111 | if (info->fix.visual == FB_VISUAL_TRUECOLOR || |
| 115 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { | 112 | info->fix.visual == FB_VISUAL_DIRECTCOLOR) { |
| @@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) | |||
| 128 | OUT_RING (chan, 0); | 125 | OUT_RING (chan, 0); |
| 129 | OUT_RING (chan, image->dy); | 126 | OUT_RING (chan, image->dy); |
| 130 | 127 | ||
| 128 | dwords = ALIGN(image->width * image->height, 32) >> 5; | ||
| 131 | while (dwords) { | 129 | while (dwords) { |
| 132 | int push = dwords > 2047 ? 2047 : dwords; | 130 | int push = dwords > 2047 ? 2047 : dwords; |
| 133 | 131 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index a74c5dd27dc0..e2a64ed14b22 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild | |||
| @@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o | |||
| 18 | nvkm-y += nvkm/engine/disp/sornv50.o | 18 | nvkm-y += nvkm/engine/disp/sornv50.o |
| 19 | nvkm-y += nvkm/engine/disp/sorg94.o | 19 | nvkm-y += nvkm/engine/disp/sorg94.o |
| 20 | nvkm-y += nvkm/engine/disp/sorgf119.o | 20 | nvkm-y += nvkm/engine/disp/sorgf119.o |
| 21 | nvkm-y += nvkm/engine/disp/sorgm107.o | ||
| 21 | nvkm-y += nvkm/engine/disp/sorgm200.o | 22 | nvkm-y += nvkm/engine/disp/sorgm200.o |
| 22 | nvkm-y += nvkm/engine/disp/dport.o | 23 | nvkm-y += nvkm/engine/disp/dport.o |
| 23 | 24 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c index f0314664349c..5dd34382f55a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c | |||
| @@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, | |||
| 76 | mask |= 0x0001 << or; | 76 | mask |= 0x0001 << or; |
| 77 | mask |= 0x0100 << head; | 77 | mask |= 0x0100 << head; |
| 78 | 78 | ||
| 79 | |||
| 79 | list_for_each_entry(outp, &disp->base.outp, head) { | 80 | list_for_each_entry(outp, &disp->base.outp, head) { |
| 80 | if ((outp->info.hasht & 0xff) == type && | 81 | if ((outp->info.hasht & 0xff) == type && |
| 81 | (outp->info.hashm & mask) == mask) { | 82 | (outp->info.hashm & mask) == mask) { |
| @@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 155 | if (!outp) | 156 | if (!outp) |
| 156 | return NULL; | 157 | return NULL; |
| 157 | 158 | ||
| 159 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 158 | switch (outp->info.type) { | 160 | switch (outp->info.type) { |
| 159 | case DCB_OUTPUT_TMDS: | 161 | case DCB_OUTPUT_TMDS: |
| 160 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 161 | if (*conf == 5) | 162 | if (*conf == 5) |
| 162 | *conf |= 0x0100; | 163 | *conf |= 0x0100; |
| 163 | break; | 164 | break; |
| 164 | case DCB_OUTPUT_LVDS: | 165 | case DCB_OUTPUT_LVDS: |
| 165 | *conf = disp->sor.lvdsconf; | 166 | *conf |= disp->sor.lvdsconf; |
| 166 | break; | ||
| 167 | case DCB_OUTPUT_DP: | ||
| 168 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 169 | break; | 167 | break; |
| 170 | case DCB_OUTPUT_ANALOG: | ||
| 171 | default: | 168 | default: |
| 172 | *conf = 0x00ff; | ||
| 173 | break; | 169 | break; |
| 174 | } | 170 | } |
| 175 | 171 | ||
| 176 | data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); | 172 | data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, |
| 173 | &ver, &hdr, &cnt, &len, &info2); | ||
| 177 | if (data && id < 0xff) { | 174 | if (data && id < 0xff) { |
| 178 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); | 175 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); |
| 179 | if (data) { | 176 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c index b6944142d616..f4b9cf8574be 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c | |||
| @@ -36,7 +36,7 @@ gm107_disp = { | |||
| 36 | .outp.internal.crt = nv50_dac_output_new, | 36 | .outp.internal.crt = nv50_dac_output_new, |
| 37 | .outp.internal.tmds = nv50_sor_output_new, | 37 | .outp.internal.tmds = nv50_sor_output_new, |
| 38 | .outp.internal.lvds = nv50_sor_output_new, | 38 | .outp.internal.lvds = nv50_sor_output_new, |
| 39 | .outp.internal.dp = gf119_sor_dp_new, | 39 | .outp.internal.dp = gm107_sor_dp_new, |
| 40 | .dac.nr = 3, | 40 | .dac.nr = 3, |
| 41 | .dac.power = nv50_dac_power, | 41 | .dac.power = nv50_dac_power, |
| 42 | .dac.sense = nv50_dac_sense, | 42 | .dac.sense = nv50_dac_sense, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index 4226d2153b9c..fcb1b0c46d64 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c | |||
| @@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 387 | if (!outp) | 387 | if (!outp) |
| 388 | return NULL; | 388 | return NULL; |
| 389 | 389 | ||
| 390 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 390 | if (outp->info.location == 0) { | 391 | if (outp->info.location == 0) { |
| 391 | switch (outp->info.type) { | 392 | switch (outp->info.type) { |
| 392 | case DCB_OUTPUT_TMDS: | 393 | case DCB_OUTPUT_TMDS: |
| 393 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 394 | if (*conf == 5) | 394 | if (*conf == 5) |
| 395 | *conf |= 0x0100; | 395 | *conf |= 0x0100; |
| 396 | break; | 396 | break; |
| 397 | case DCB_OUTPUT_LVDS: | 397 | case DCB_OUTPUT_LVDS: |
| 398 | *conf = disp->sor.lvdsconf; | 398 | *conf |= disp->sor.lvdsconf; |
| 399 | break; | 399 | break; |
| 400 | case DCB_OUTPUT_DP: | ||
| 401 | *conf = (ctrl & 0x00000f00) >> 8; | ||
| 402 | break; | ||
| 403 | case DCB_OUTPUT_ANALOG: | ||
| 404 | default: | 400 | default: |
| 405 | *conf = 0x00ff; | ||
| 406 | break; | 401 | break; |
| 407 | } | 402 | } |
| 408 | } else { | 403 | } else { |
| @@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) | |||
| 410 | pclk = pclk / 2; | 405 | pclk = pclk / 2; |
| 411 | } | 406 | } |
| 412 | 407 | ||
| 413 | data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); | 408 | data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, |
| 409 | &ver, &hdr, &cnt, &len, &info2); | ||
| 414 | if (data && id < 0xff) { | 410 | if (data && id < 0xff) { |
| 415 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); | 411 | data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); |
| 416 | if (data) { | 412 | if (data) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h index e9067ba4e179..4e983f6d7032 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h | |||
| @@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int); | |||
| 62 | int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | 62 | int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, |
| 63 | struct nvkm_output **); | 63 | struct nvkm_output **); |
| 64 | int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); | 64 | int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); |
| 65 | int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int); | ||
| 65 | 66 | ||
| 66 | int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | 67 | int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, |
| 67 | struct nvkm_output **); | 68 | struct nvkm_output **); |
| 69 | int gm107_sor_dp_pattern(struct nvkm_output_dp *, int); | ||
| 70 | |||
| 71 | int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, | ||
| 72 | struct nvkm_output **); | ||
| 68 | #endif | 73 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c index b4b41b135643..22706c0a54b5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c | |||
| @@ -40,8 +40,7 @@ static int | |||
| 40 | gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | 40 | gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) |
| 41 | { | 41 | { |
| 42 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | 42 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; |
| 43 | const u32 loff = gf119_sor_loff(outp); | 43 | nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern); |
| 44 | nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); | ||
| 45 | return 0; | 44 | return 0; |
| 46 | } | 45 | } |
| 47 | 46 | ||
| @@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef) | |||
| 64 | return 0; | 63 | return 0; |
| 65 | } | 64 | } |
| 66 | 65 | ||
| 67 | static int | 66 | int |
| 68 | gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, | 67 | gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, |
| 69 | int ln, int vs, int pe, int pc) | 68 | int ln, int vs, int pe, int pc) |
| 70 | { | 69 | { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c new file mode 100644 index 000000000000..37790b2617c5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2016 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs <bskeggs@redhat.com> | ||
| 23 | */ | ||
| 24 | #include "nv50.h" | ||
| 25 | #include "outpdp.h" | ||
| 26 | |||
| 27 | int | ||
| 28 | gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | ||
| 29 | { | ||
| 30 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | ||
| 31 | const u32 soff = outp->base.or * 0x800; | ||
| 32 | const u32 data = 0x01010101 * pattern; | ||
| 33 | if (outp->base.info.sorconf.link & 1) | ||
| 34 | nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); | ||
| 35 | else | ||
| 36 | nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); | ||
| 37 | return 0; | ||
| 38 | } | ||
| 39 | |||
| 40 | static const struct nvkm_output_dp_func | ||
| 41 | gm107_sor_dp_func = { | ||
| 42 | .pattern = gm107_sor_dp_pattern, | ||
| 43 | .lnk_pwr = g94_sor_dp_lnk_pwr, | ||
| 44 | .lnk_ctl = gf119_sor_dp_lnk_ctl, | ||
| 45 | .drv_ctl = gf119_sor_dp_drv_ctl, | ||
| 46 | }; | ||
| 47 | |||
| 48 | int | ||
| 49 | gm107_sor_dp_new(struct nvkm_disp *disp, int index, | ||
| 50 | struct dcb_output *dcbE, struct nvkm_output **poutp) | ||
| 51 | { | ||
| 52 | return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp); | ||
| 53 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c index 2cfbef9c344f..c44fa7ea672a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c | |||
| @@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane) | |||
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | static int | 59 | static int |
| 60 | gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) | ||
| 61 | { | ||
| 62 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | ||
| 63 | const u32 soff = gm200_sor_soff(outp); | ||
| 64 | const u32 data = 0x01010101 * pattern; | ||
| 65 | if (outp->base.info.sorconf.link & 1) | ||
| 66 | nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); | ||
| 67 | else | ||
| 68 | nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static int | ||
| 73 | gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) | 60 | gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) |
| 74 | { | 61 | { |
| 75 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; | 62 | struct nvkm_device *device = outp->base.disp->engine.subdev.device; |
| @@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp, | |||
| 129 | 116 | ||
| 130 | static const struct nvkm_output_dp_func | 117 | static const struct nvkm_output_dp_func |
| 131 | gm200_sor_dp_func = { | 118 | gm200_sor_dp_func = { |
| 132 | .pattern = gm200_sor_dp_pattern, | 119 | .pattern = gm107_sor_dp_pattern, |
| 133 | .lnk_pwr = gm200_sor_dp_lnk_pwr, | 120 | .lnk_pwr = gm200_sor_dp_lnk_pwr, |
| 134 | .lnk_ctl = gf119_sor_dp_lnk_ctl, | 121 | .lnk_ctl = gf119_sor_dp_lnk_ctl, |
| 135 | .drv_ctl = gm200_sor_dp_drv_ctl, | 122 | .drv_ctl = gm200_sor_dp_drv_ctl, |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 9513badb8220..ae9ab5b1ab97 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c | |||
| @@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) | |||
| 949 | } | 949 | } |
| 950 | 950 | ||
| 951 | static const struct nvkm_enum gf100_mp_warp_error[] = { | 951 | static const struct nvkm_enum gf100_mp_warp_error[] = { |
| 952 | { 0x00, "NO_ERROR" }, | 952 | { 0x01, "STACK_ERROR" }, |
| 953 | { 0x01, "STACK_MISMATCH" }, | 953 | { 0x02, "API_STACK_ERROR" }, |
| 954 | { 0x03, "RET_EMPTY_STACK_ERROR" }, | ||
| 955 | { 0x04, "PC_WRAP" }, | ||
| 954 | { 0x05, "MISALIGNED_PC" }, | 956 | { 0x05, "MISALIGNED_PC" }, |
| 955 | { 0x08, "MISALIGNED_GPR" }, | 957 | { 0x06, "PC_OVERFLOW" }, |
| 956 | { 0x09, "INVALID_OPCODE" }, | 958 | { 0x07, "MISALIGNED_IMMC_ADDR" }, |
| 957 | { 0x0d, "GPR_OUT_OF_BOUNDS" }, | 959 | { 0x08, "MISALIGNED_REG" }, |
| 958 | { 0x0e, "MEM_OUT_OF_BOUNDS" }, | 960 | { 0x09, "ILLEGAL_INSTR_ENCODING" }, |
| 959 | { 0x0f, "UNALIGNED_MEM_ACCESS" }, | 961 | { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" }, |
| 962 | { 0x0b, "ILLEGAL_INSTR_PARAM" }, | ||
| 963 | { 0x0c, "INVALID_CONST_ADDR" }, | ||
| 964 | { 0x0d, "OOR_REG" }, | ||
| 965 | { 0x0e, "OOR_ADDR" }, | ||
| 966 | { 0x0f, "MISALIGNED_ADDR" }, | ||
| 960 | { 0x10, "INVALID_ADDR_SPACE" }, | 967 | { 0x10, "INVALID_ADDR_SPACE" }, |
| 961 | { 0x11, "INVALID_PARAM" }, | 968 | { 0x11, "ILLEGAL_INSTR_PARAM2" }, |
| 969 | { 0x12, "INVALID_CONST_ADDR_LDC" }, | ||
| 970 | { 0x13, "GEOMETRY_SM_ERROR" }, | ||
| 971 | { 0x14, "DIVERGENT" }, | ||
| 972 | { 0x15, "WARP_EXIT" }, | ||
| 962 | {} | 973 | {} |
| 963 | }; | 974 | }; |
| 964 | 975 | ||
| 965 | static const struct nvkm_bitfield gf100_mp_global_error[] = { | 976 | static const struct nvkm_bitfield gf100_mp_global_error[] = { |
| 977 | { 0x00000001, "SM_TO_SM_FAULT" }, | ||
| 978 | { 0x00000002, "L1_ERROR" }, | ||
| 966 | { 0x00000004, "MULTIPLE_WARP_ERRORS" }, | 979 | { 0x00000004, "MULTIPLE_WARP_ERRORS" }, |
| 967 | { 0x00000008, "OUT_OF_STACK_SPACE" }, | 980 | { 0x00000008, "PHYSICAL_STACK_OVERFLOW" }, |
| 981 | { 0x00000010, "BPT_INT" }, | ||
| 982 | { 0x00000020, "BPT_PAUSE" }, | ||
| 983 | { 0x00000040, "SINGLE_STEP_COMPLETE" }, | ||
| 984 | { 0x20000000, "ECC_SEC_ERROR" }, | ||
| 985 | { 0x40000000, "ECC_DED_ERROR" }, | ||
| 986 | { 0x80000000, "TIMEOUT" }, | ||
| 968 | {} | 987 | {} |
| 969 | }; | 988 | }; |
| 970 | 989 | ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c index a5e92135cd77..9efb1b48cd54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c | |||
| @@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, | |||
| 141 | { | 141 | { |
| 142 | u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); | 142 | u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); |
| 143 | if (data) { | 143 | if (data) { |
| 144 | info->match = nvbios_rd16(bios, data + 0x00); | 144 | info->proto = nvbios_rd08(bios, data + 0x00); |
| 145 | info->flags = nvbios_rd16(bios, data + 0x01); | ||
| 145 | info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); | 146 | info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); |
| 146 | info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); | 147 | info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); |
| 147 | } | 148 | } |
| @@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, | |||
| 149 | } | 150 | } |
| 150 | 151 | ||
| 151 | u16 | 152 | u16 |
| 152 | nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type, | 153 | nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags, |
| 153 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) | 154 | u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) |
| 154 | { | 155 | { |
| 155 | u16 data, idx = 0; | 156 | u16 data, idx = 0; |
| 156 | while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { | 157 | while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { |
| 157 | if (info->match == type) | 158 | if ((info->proto == proto || info->proto == 0xff) && |
| 159 | (info->flags == flags)) | ||
| 158 | break; | 160 | break; |
| 159 | } | 161 | } |
| 160 | return data; | 162 | return data; |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c index e292f5679418..389fb13a1998 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c | |||
| @@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) | |||
| 69 | } | 69 | } |
| 70 | 70 | ||
| 71 | static void | 71 | static void |
| 72 | gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s) | 72 | gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s) |
| 73 | { | 73 | { |
| 74 | struct nvkm_subdev *subdev = <c->subdev; | 74 | struct nvkm_subdev *subdev = <c->subdev; |
| 75 | struct nvkm_device *device = subdev->device; | 75 | struct nvkm_device *device = subdev->device; |
| 76 | u32 base = 0x140000 + (c * 0x2000) + (s * 0x200); | 76 | u32 base = 0x140400 + (c * 0x2000) + (s * 0x200); |
| 77 | u32 stat = nvkm_rd32(device, base + 0x00c); | 77 | u32 stat = nvkm_rd32(device, base + 0x00c); |
| 78 | 78 | ||
| 79 | if (stat) { | 79 | if (stat) { |
| @@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc) | |||
| 92 | while (mask) { | 92 | while (mask) { |
| 93 | u32 s, c = __ffs(mask); | 93 | u32 s, c = __ffs(mask); |
| 94 | for (s = 0; s < ltc->lts_nr; s++) | 94 | for (s = 0; s < ltc->lts_nr; s++) |
| 95 | gm107_ltc_lts_isr(ltc, c, s); | 95 | gm107_ltc_intr_lts(ltc, c, s); |
| 96 | mask &= ~(1 << c); | 96 | mask &= ~(1 << c); |
| 97 | } | 97 | } |
| 98 | } | 98 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c index 2a29bfd5125a..e18e0dc19ec8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c | |||
| @@ -46,7 +46,7 @@ static const struct nvkm_ltc_func | |||
| 46 | gm200_ltc = { | 46 | gm200_ltc = { |
| 47 | .oneinit = gm200_ltc_oneinit, | 47 | .oneinit = gm200_ltc_oneinit, |
| 48 | .init = gm200_ltc_init, | 48 | .init = gm200_ltc_init, |
| 49 | .intr = gm107_ltc_intr, /*XXX: not validated */ | 49 | .intr = gm107_ltc_intr, |
| 50 | .cbc_clear = gm107_ltc_cbc_clear, | 50 | .cbc_clear = gm107_ltc_cbc_clear, |
| 51 | .cbc_wait = gm107_ltc_cbc_wait, | 51 | .cbc_wait = gm107_ltc_cbc_wait, |
| 52 | .zbc = 16, | 52 | .zbc = 16, |
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 9ed8272e54ae..56c43f355ce3 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c | |||
| @@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) | |||
| 1167 | { | 1167 | { |
| 1168 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); | 1168 | struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); |
| 1169 | struct regulator *vdds_dsi; | 1169 | struct regulator *vdds_dsi; |
| 1170 | int r; | ||
| 1171 | 1170 | ||
| 1172 | if (dsi->vdds_dsi_reg != NULL) | 1171 | if (dsi->vdds_dsi_reg != NULL) |
| 1173 | return 0; | 1172 | return 0; |
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index e129245eb8a9..9255c0e1e4a7 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c | |||
| @@ -120,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) | |||
| 120 | 120 | ||
| 121 | static int hdmi_init_regulator(void) | 121 | static int hdmi_init_regulator(void) |
| 122 | { | 122 | { |
| 123 | int r; | ||
| 124 | struct regulator *reg; | 123 | struct regulator *reg; |
| 125 | 124 | ||
| 126 | if (hdmi.vdda_reg != NULL) | 125 | if (hdmi.vdda_reg != NULL) |
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 904d0754ad78..0f18b76c7906 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c | |||
| @@ -456,14 +456,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 456 | 456 | ||
| 457 | WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); | 457 | WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); |
| 458 | 458 | ||
| 459 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 460 | vc4_state->mm.start); | ||
| 461 | |||
| 462 | if (debug_dump_regs) { | ||
| 463 | DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); | ||
| 464 | vc4_hvs_dump_state(dev); | ||
| 465 | } | ||
| 466 | |||
| 467 | if (crtc->state->event) { | 459 | if (crtc->state->event) { |
| 468 | unsigned long flags; | 460 | unsigned long flags; |
| 469 | 461 | ||
| @@ -473,8 +465,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 473 | 465 | ||
| 474 | spin_lock_irqsave(&dev->event_lock, flags); | 466 | spin_lock_irqsave(&dev->event_lock, flags); |
| 475 | vc4_crtc->event = crtc->state->event; | 467 | vc4_crtc->event = crtc->state->event; |
| 476 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 477 | crtc->state->event = NULL; | 468 | crtc->state->event = NULL; |
| 469 | |||
| 470 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 471 | vc4_state->mm.start); | ||
| 472 | |||
| 473 | spin_unlock_irqrestore(&dev->event_lock, flags); | ||
| 474 | } else { | ||
| 475 | HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), | ||
| 476 | vc4_state->mm.start); | ||
| 477 | } | ||
| 478 | |||
| 479 | if (debug_dump_regs) { | ||
| 480 | DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); | ||
| 481 | vc4_hvs_dump_state(dev); | ||
| 478 | } | 482 | } |
| 479 | } | 483 | } |
| 480 | 484 | ||
| @@ -500,12 +504,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) | |||
| 500 | { | 504 | { |
| 501 | struct drm_crtc *crtc = &vc4_crtc->base; | 505 | struct drm_crtc *crtc = &vc4_crtc->base; |
| 502 | struct drm_device *dev = crtc->dev; | 506 | struct drm_device *dev = crtc->dev; |
| 507 | struct vc4_dev *vc4 = to_vc4_dev(dev); | ||
| 508 | struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); | ||
| 509 | u32 chan = vc4_crtc->channel; | ||
| 503 | unsigned long flags; | 510 | unsigned long flags; |
| 504 | 511 | ||
| 505 | spin_lock_irqsave(&dev->event_lock, flags); | 512 | spin_lock_irqsave(&dev->event_lock, flags); |
| 506 | if (vc4_crtc->event) { | 513 | if (vc4_crtc->event && |
| 514 | (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) { | ||
| 507 | drm_crtc_send_vblank_event(crtc, vc4_crtc->event); | 515 | drm_crtc_send_vblank_event(crtc, vc4_crtc->event); |
| 508 | vc4_crtc->event = NULL; | 516 | vc4_crtc->event = NULL; |
| 517 | drm_crtc_vblank_put(crtc); | ||
| 509 | } | 518 | } |
| 510 | spin_unlock_irqrestore(&dev->event_lock, flags); | 519 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 511 | } | 520 | } |
| @@ -556,6 +565,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) | |||
| 556 | spin_unlock_irqrestore(&dev->event_lock, flags); | 565 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 557 | } | 566 | } |
| 558 | 567 | ||
| 568 | drm_crtc_vblank_put(crtc); | ||
| 559 | drm_framebuffer_unreference(flip_state->fb); | 569 | drm_framebuffer_unreference(flip_state->fb); |
| 560 | kfree(flip_state); | 570 | kfree(flip_state); |
| 561 | 571 | ||
| @@ -598,6 +608,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, | |||
| 598 | return ret; | 608 | return ret; |
| 599 | } | 609 | } |
| 600 | 610 | ||
| 611 | WARN_ON(drm_crtc_vblank_get(crtc) != 0); | ||
| 612 | |||
| 601 | /* Immediately update the plane's legacy fb pointer, so that later | 613 | /* Immediately update the plane's legacy fb pointer, so that later |
| 602 | * modeset prep sees the state that will be present when the semaphore | 614 | * modeset prep sees the state that will be present when the semaphore |
| 603 | * is released. | 615 | * is released. |
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 3446ece21b4a..250ed7e3754c 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c | |||
| @@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = { | |||
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | static const struct drm_ioctl_desc vc4_drm_ioctls[] = { | 68 | static const struct drm_ioctl_desc vc4_drm_ioctls[] = { |
| 69 | DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0), | 69 | DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW), |
| 70 | DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0), | 70 | DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW), |
| 71 | DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), | 71 | DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW), |
| 72 | DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), | 72 | DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW), |
| 73 | DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), | 73 | DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW), |
| 74 | DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), | 74 | DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW), |
| 75 | DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, | 75 | DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, |
| 76 | DRM_ROOT_ONLY), | 76 | DRM_ROOT_ONLY), |
| 77 | }; | 77 | }; |
| @@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = { | |||
| 91 | 91 | ||
| 92 | .enable_vblank = vc4_enable_vblank, | 92 | .enable_vblank = vc4_enable_vblank, |
| 93 | .disable_vblank = vc4_disable_vblank, | 93 | .disable_vblank = vc4_disable_vblank, |
| 94 | .get_vblank_counter = drm_vblank_count, | 94 | .get_vblank_counter = drm_vblank_no_hw_counter, |
| 95 | 95 | ||
| 96 | #if defined(CONFIG_DEBUG_FS) | 96 | #if defined(CONFIG_DEBUG_FS) |
| 97 | .debugfs_init = vc4_debugfs_init, | 97 | .debugfs_init = vc4_debugfs_init, |
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index cb37751bc99f..861a623bc185 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c | |||
| @@ -117,10 +117,18 @@ static int vc4_atomic_commit(struct drm_device *dev, | |||
| 117 | return -ENOMEM; | 117 | return -ENOMEM; |
| 118 | 118 | ||
| 119 | /* Make sure that any outstanding modesets have finished. */ | 119 | /* Make sure that any outstanding modesets have finished. */ |
| 120 | ret = down_interruptible(&vc4->async_modeset); | 120 | if (nonblock) { |
| 121 | if (ret) { | 121 | ret = down_trylock(&vc4->async_modeset); |
| 122 | kfree(c); | 122 | if (ret) { |
| 123 | return ret; | 123 | kfree(c); |
| 124 | return -EBUSY; | ||
| 125 | } | ||
| 126 | } else { | ||
| 127 | ret = down_interruptible(&vc4->async_modeset); | ||
| 128 | if (ret) { | ||
| 129 | kfree(c); | ||
| 130 | return ret; | ||
| 131 | } | ||
| 124 | } | 132 | } |
| 125 | 133 | ||
| 126 | ret = drm_atomic_helper_prepare_planes(dev, state); | 134 | ret = drm_atomic_helper_prepare_planes(dev, state); |
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 6163b95c5411..f99eece4cc97 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h | |||
| @@ -341,6 +341,10 @@ | |||
| 341 | #define SCALER_DISPLACT0 0x00000030 | 341 | #define SCALER_DISPLACT0 0x00000030 |
| 342 | #define SCALER_DISPLACT1 0x00000034 | 342 | #define SCALER_DISPLACT1 0x00000034 |
| 343 | #define SCALER_DISPLACT2 0x00000038 | 343 | #define SCALER_DISPLACT2 0x00000038 |
| 344 | #define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \ | ||
| 345 | (x) * (SCALER_DISPLACT1 - \ | ||
| 346 | SCALER_DISPLACT0)) | ||
| 347 | |||
| 344 | #define SCALER_DISPCTRL0 0x00000040 | 348 | #define SCALER_DISPCTRL0 0x00000040 |
| 345 | # define SCALER_DISPCTRLX_ENABLE BIT(31) | 349 | # define SCALER_DISPCTRLX_ENABLE BIT(31) |
| 346 | # define SCALER_DISPCTRLX_RESET BIT(30) | 350 | # define SCALER_DISPCTRLX_RESET BIT(30) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 6de283c8fa3e..f0374f9b56ca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
| 29 | #include <linux/module.h> | 29 | #include <linux/module.h> |
| 30 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
| 31 | #include <linux/frame.h> | ||
| 31 | #include <asm/hypervisor.h> | 32 | #include <asm/hypervisor.h> |
| 32 | #include "drmP.h" | 33 | #include "drmP.h" |
| 33 | #include "vmwgfx_msg.h" | 34 | #include "vmwgfx_msg.h" |
| @@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) | |||
| 194 | 195 | ||
| 195 | return -EINVAL; | 196 | return -EINVAL; |
| 196 | } | 197 | } |
| 197 | 198 | STACK_FRAME_NON_STANDARD(vmw_send_msg); | |
| 198 | 199 | ||
| 199 | 200 | ||
| 200 | /** | 201 | /** |
| @@ -304,6 +305,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, | |||
| 304 | 305 | ||
| 305 | return 0; | 306 | return 0; |
| 306 | } | 307 | } |
| 308 | STACK_FRAME_NON_STANDARD(vmw_recv_msg); | ||
| 307 | 309 | ||
| 308 | 310 | ||
| 309 | /** | 311 | /** |
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c index eb97a9241d17..15aa49d082c4 100644 --- a/drivers/hwmon/fam15h_power.c +++ b/drivers/hwmon/fam15h_power.c | |||
| @@ -172,9 +172,9 @@ static void do_read_registers_on_cu(void *_data) | |||
| 172 | */ | 172 | */ |
| 173 | static int read_registers(struct fam15h_power_data *data) | 173 | static int read_registers(struct fam15h_power_data *data) |
| 174 | { | 174 | { |
| 175 | int this_cpu, ret, cpu; | ||
| 176 | int core, this_core; | 175 | int core, this_core; |
| 177 | cpumask_var_t mask; | 176 | cpumask_var_t mask; |
| 177 | int ret, cpu; | ||
| 178 | 178 | ||
| 179 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); | 179 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); |
| 180 | if (!ret) | 180 | if (!ret) |
| @@ -183,7 +183,6 @@ static int read_registers(struct fam15h_power_data *data) | |||
| 183 | memset(data->cu_on, 0, sizeof(int) * MAX_CUS); | 183 | memset(data->cu_on, 0, sizeof(int) * MAX_CUS); |
| 184 | 184 | ||
| 185 | get_online_cpus(); | 185 | get_online_cpus(); |
| 186 | this_cpu = smp_processor_id(); | ||
| 187 | 186 | ||
| 188 | /* | 187 | /* |
| 189 | * Choose the first online core of each compute unit, and then | 188 | * Choose the first online core of each compute unit, and then |
| @@ -205,12 +204,9 @@ static int read_registers(struct fam15h_power_data *data) | |||
| 205 | cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); | 204 | cpumask_set_cpu(cpumask_any(topology_sibling_cpumask(cpu)), mask); |
| 206 | } | 205 | } |
| 207 | 206 | ||
| 208 | if (cpumask_test_cpu(this_cpu, mask)) | 207 | on_each_cpu_mask(mask, do_read_registers_on_cu, data, true); |
| 209 | do_read_registers_on_cu(data); | ||
| 210 | 208 | ||
| 211 | smp_call_function_many(mask, do_read_registers_on_cu, data, true); | ||
| 212 | put_online_cpus(); | 209 | put_online_cpus(); |
| 213 | |||
| 214 | free_cpumask_var(mask); | 210 | free_cpumask_var(mask); |
| 215 | 211 | ||
| 216 | return 0; | 212 | return 0; |
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c index c9ff08dbe10c..e30a5939dc0d 100644 --- a/drivers/hwmon/lm90.c +++ b/drivers/hwmon/lm90.c | |||
| @@ -375,7 +375,7 @@ struct lm90_data { | |||
| 375 | int kind; | 375 | int kind; |
| 376 | u32 flags; | 376 | u32 flags; |
| 377 | 377 | ||
| 378 | int update_interval; /* in milliseconds */ | 378 | unsigned int update_interval; /* in milliseconds */ |
| 379 | 379 | ||
| 380 | u8 config_orig; /* Original configuration register value */ | 380 | u8 config_orig; /* Original configuration register value */ |
| 381 | u8 convrate_orig; /* Original conversion rate register value */ | 381 | u8 convrate_orig; /* Original conversion rate register value */ |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 64b1208bca5e..4a60ad214747 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -245,6 +245,13 @@ struct i801_priv { | |||
| 245 | struct platform_device *mux_pdev; | 245 | struct platform_device *mux_pdev; |
| 246 | #endif | 246 | #endif |
| 247 | struct platform_device *tco_pdev; | 247 | struct platform_device *tco_pdev; |
| 248 | |||
| 249 | /* | ||
| 250 | * If set to true the host controller registers are reserved for | ||
| 251 | * ACPI AML use. Protected by acpi_lock. | ||
| 252 | */ | ||
| 253 | bool acpi_reserved; | ||
| 254 | struct mutex acpi_lock; | ||
| 248 | }; | 255 | }; |
| 249 | 256 | ||
| 250 | #define FEATURE_SMBUS_PEC (1 << 0) | 257 | #define FEATURE_SMBUS_PEC (1 << 0) |
| @@ -718,6 +725,12 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, | |||
| 718 | int ret = 0, xact = 0; | 725 | int ret = 0, xact = 0; |
| 719 | struct i801_priv *priv = i2c_get_adapdata(adap); | 726 | struct i801_priv *priv = i2c_get_adapdata(adap); |
| 720 | 727 | ||
| 728 | mutex_lock(&priv->acpi_lock); | ||
| 729 | if (priv->acpi_reserved) { | ||
| 730 | mutex_unlock(&priv->acpi_lock); | ||
| 731 | return -EBUSY; | ||
| 732 | } | ||
| 733 | |||
| 721 | pm_runtime_get_sync(&priv->pci_dev->dev); | 734 | pm_runtime_get_sync(&priv->pci_dev->dev); |
| 722 | 735 | ||
| 723 | hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) | 736 | hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC) |
| @@ -820,6 +833,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr, | |||
| 820 | out: | 833 | out: |
| 821 | pm_runtime_mark_last_busy(&priv->pci_dev->dev); | 834 | pm_runtime_mark_last_busy(&priv->pci_dev->dev); |
| 822 | pm_runtime_put_autosuspend(&priv->pci_dev->dev); | 835 | pm_runtime_put_autosuspend(&priv->pci_dev->dev); |
| 836 | mutex_unlock(&priv->acpi_lock); | ||
| 823 | return ret; | 837 | return ret; |
| 824 | } | 838 | } |
| 825 | 839 | ||
| @@ -1257,6 +1271,83 @@ static void i801_add_tco(struct i801_priv *priv) | |||
| 1257 | priv->tco_pdev = pdev; | 1271 | priv->tco_pdev = pdev; |
| 1258 | } | 1272 | } |
| 1259 | 1273 | ||
| 1274 | #ifdef CONFIG_ACPI | ||
| 1275 | static acpi_status | ||
| 1276 | i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits, | ||
| 1277 | u64 *value, void *handler_context, void *region_context) | ||
| 1278 | { | ||
| 1279 | struct i801_priv *priv = handler_context; | ||
| 1280 | struct pci_dev *pdev = priv->pci_dev; | ||
| 1281 | acpi_status status; | ||
| 1282 | |||
| 1283 | /* | ||
| 1284 | * Once BIOS AML code touches the OpRegion we warn and inhibit any | ||
| 1285 | * further access from the driver itself. This device is now owned | ||
| 1286 | * by the system firmware. | ||
| 1287 | */ | ||
| 1288 | mutex_lock(&priv->acpi_lock); | ||
| 1289 | |||
| 1290 | if (!priv->acpi_reserved) { | ||
| 1291 | priv->acpi_reserved = true; | ||
| 1292 | |||
| 1293 | dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n"); | ||
| 1294 | dev_warn(&pdev->dev, "Driver SMBus register access inhibited\n"); | ||
| 1295 | |||
| 1296 | /* | ||
| 1297 | * BIOS is accessing the host controller so prevent it from | ||
| 1298 | * suspending automatically from now on. | ||
| 1299 | */ | ||
| 1300 | pm_runtime_get_sync(&pdev->dev); | ||
| 1301 | } | ||
| 1302 | |||
| 1303 | if ((function & ACPI_IO_MASK) == ACPI_READ) | ||
| 1304 | status = acpi_os_read_port(address, (u32 *)value, bits); | ||
| 1305 | else | ||
| 1306 | status = acpi_os_write_port(address, (u32)*value, bits); | ||
| 1307 | |||
| 1308 | mutex_unlock(&priv->acpi_lock); | ||
| 1309 | |||
| 1310 | return status; | ||
| 1311 | } | ||
| 1312 | |||
| 1313 | static int i801_acpi_probe(struct i801_priv *priv) | ||
| 1314 | { | ||
| 1315 | struct acpi_device *adev; | ||
| 1316 | acpi_status status; | ||
| 1317 | |||
| 1318 | adev = ACPI_COMPANION(&priv->pci_dev->dev); | ||
| 1319 | if (adev) { | ||
| 1320 | status = acpi_install_address_space_handler(adev->handle, | ||
| 1321 | ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler, | ||
| 1322 | NULL, priv); | ||
| 1323 | if (ACPI_SUCCESS(status)) | ||
| 1324 | return 0; | ||
| 1325 | } | ||
| 1326 | |||
| 1327 | return acpi_check_resource_conflict(&priv->pci_dev->resource[SMBBAR]); | ||
| 1328 | } | ||
| 1329 | |||
| 1330 | static void i801_acpi_remove(struct i801_priv *priv) | ||
| 1331 | { | ||
| 1332 | struct acpi_device *adev; | ||
| 1333 | |||
| 1334 | adev = ACPI_COMPANION(&priv->pci_dev->dev); | ||
| 1335 | if (!adev) | ||
| 1336 | return; | ||
| 1337 | |||
| 1338 | acpi_remove_address_space_handler(adev->handle, | ||
| 1339 | ACPI_ADR_SPACE_SYSTEM_IO, i801_acpi_io_handler); | ||
| 1340 | |||
| 1341 | mutex_lock(&priv->acpi_lock); | ||
| 1342 | if (priv->acpi_reserved) | ||
| 1343 | pm_runtime_put(&priv->pci_dev->dev); | ||
| 1344 | mutex_unlock(&priv->acpi_lock); | ||
| 1345 | } | ||
| 1346 | #else | ||
| 1347 | static inline int i801_acpi_probe(struct i801_priv *priv) { return 0; } | ||
| 1348 | static inline void i801_acpi_remove(struct i801_priv *priv) { } | ||
| 1349 | #endif | ||
| 1350 | |||
| 1260 | static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | 1351 | static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) |
| 1261 | { | 1352 | { |
| 1262 | unsigned char temp; | 1353 | unsigned char temp; |
| @@ -1274,6 +1365,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1274 | priv->adapter.dev.parent = &dev->dev; | 1365 | priv->adapter.dev.parent = &dev->dev; |
| 1275 | ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); | 1366 | ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&dev->dev)); |
| 1276 | priv->adapter.retries = 3; | 1367 | priv->adapter.retries = 3; |
| 1368 | mutex_init(&priv->acpi_lock); | ||
| 1277 | 1369 | ||
| 1278 | priv->pci_dev = dev; | 1370 | priv->pci_dev = dev; |
| 1279 | switch (dev->device) { | 1371 | switch (dev->device) { |
| @@ -1336,10 +1428,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1336 | return -ENODEV; | 1428 | return -ENODEV; |
| 1337 | } | 1429 | } |
| 1338 | 1430 | ||
| 1339 | err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); | 1431 | if (i801_acpi_probe(priv)) |
| 1340 | if (err) { | ||
| 1341 | return -ENODEV; | 1432 | return -ENODEV; |
| 1342 | } | ||
| 1343 | 1433 | ||
| 1344 | err = pcim_iomap_regions(dev, 1 << SMBBAR, | 1434 | err = pcim_iomap_regions(dev, 1 << SMBBAR, |
| 1345 | dev_driver_string(&dev->dev)); | 1435 | dev_driver_string(&dev->dev)); |
| @@ -1348,6 +1438,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1348 | "Failed to request SMBus region 0x%lx-0x%Lx\n", | 1438 | "Failed to request SMBus region 0x%lx-0x%Lx\n", |
| 1349 | priv->smba, | 1439 | priv->smba, |
| 1350 | (unsigned long long)pci_resource_end(dev, SMBBAR)); | 1440 | (unsigned long long)pci_resource_end(dev, SMBBAR)); |
| 1441 | i801_acpi_remove(priv); | ||
| 1351 | return err; | 1442 | return err; |
| 1352 | } | 1443 | } |
| 1353 | 1444 | ||
| @@ -1412,6 +1503,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1412 | err = i2c_add_adapter(&priv->adapter); | 1503 | err = i2c_add_adapter(&priv->adapter); |
| 1413 | if (err) { | 1504 | if (err) { |
| 1414 | dev_err(&dev->dev, "Failed to add SMBus adapter\n"); | 1505 | dev_err(&dev->dev, "Failed to add SMBus adapter\n"); |
| 1506 | i801_acpi_remove(priv); | ||
| 1415 | return err; | 1507 | return err; |
| 1416 | } | 1508 | } |
| 1417 | 1509 | ||
| @@ -1438,6 +1530,7 @@ static void i801_remove(struct pci_dev *dev) | |||
| 1438 | 1530 | ||
| 1439 | i801_del_mux(priv); | 1531 | i801_del_mux(priv); |
| 1440 | i2c_del_adapter(&priv->adapter); | 1532 | i2c_del_adapter(&priv->adapter); |
| 1533 | i801_acpi_remove(priv); | ||
| 1441 | pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); | 1534 | pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg); |
| 1442 | 1535 | ||
| 1443 | platform_device_unregister(priv->tco_pdev); | 1536 | platform_device_unregister(priv->tco_pdev); |
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c index aa5f01efd826..30ae35146723 100644 --- a/drivers/i2c/busses/i2c-octeon.c +++ b/drivers/i2c/busses/i2c-octeon.c | |||
| @@ -934,8 +934,15 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, | |||
| 934 | return result; | 934 | return result; |
| 935 | 935 | ||
| 936 | for (i = 0; i < length; i++) { | 936 | for (i = 0; i < length; i++) { |
| 937 | /* for the last byte TWSI_CTL_AAK must not be set */ | 937 | /* |
| 938 | if (i + 1 == length) | 938 | * For the last byte to receive TWSI_CTL_AAK must not be set. |
| 939 | * | ||
| 940 | * A special case is I2C_M_RECV_LEN where we don't know the | ||
| 941 | * additional length yet. If recv_len is set we assume we're | ||
| 942 | * not reading the final byte and therefore need to set | ||
| 943 | * TWSI_CTL_AAK. | ||
| 944 | */ | ||
| 945 | if ((i + 1 == length) && !(recv_len && i == 0)) | ||
| 939 | final_read = true; | 946 | final_read = true; |
| 940 | 947 | ||
| 941 | /* clear iflg to allow next event */ | 948 | /* clear iflg to allow next event */ |
| @@ -950,12 +957,8 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, | |||
| 950 | 957 | ||
| 951 | data[i] = octeon_i2c_data_read(i2c); | 958 | data[i] = octeon_i2c_data_read(i2c); |
| 952 | if (recv_len && i == 0) { | 959 | if (recv_len && i == 0) { |
| 953 | if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) { | 960 | if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) |
| 954 | dev_err(i2c->dev, | ||
| 955 | "%s: read len > I2C_SMBUS_BLOCK_MAX %d\n", | ||
| 956 | __func__, data[i]); | ||
| 957 | return -EPROTO; | 961 | return -EPROTO; |
| 958 | } | ||
| 959 | length += data[i]; | 962 | length += data[i]; |
| 960 | } | 963 | } |
| 961 | 964 | ||
diff --git a/drivers/i2c/muxes/i2c-mux-reg.c b/drivers/i2c/muxes/i2c-mux-reg.c index 6773cadf7c9f..26e7c5187a58 100644 --- a/drivers/i2c/muxes/i2c-mux-reg.c +++ b/drivers/i2c/muxes/i2c-mux-reg.c | |||
| @@ -260,6 +260,7 @@ static struct platform_driver i2c_mux_reg_driver = { | |||
| 260 | .remove = i2c_mux_reg_remove, | 260 | .remove = i2c_mux_reg_remove, |
| 261 | .driver = { | 261 | .driver = { |
| 262 | .name = "i2c-mux-reg", | 262 | .name = "i2c-mux-reg", |
| 263 | .of_match_table = of_match_ptr(i2c_mux_reg_of_match), | ||
| 263 | }, | 264 | }, |
| 264 | }; | 265 | }; |
| 265 | 266 | ||
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index c2e257d97eff..040966775f40 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c | |||
| @@ -178,6 +178,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 178 | { | 178 | { |
| 179 | int ret = 0; | 179 | int ret = 0; |
| 180 | struct net_device *old_net_dev; | 180 | struct net_device *old_net_dev; |
| 181 | enum ib_gid_type old_gid_type; | ||
| 181 | 182 | ||
| 182 | /* in rdma_cap_roce_gid_table, this funciton should be protected by a | 183 | /* in rdma_cap_roce_gid_table, this funciton should be protected by a |
| 183 | * sleep-able lock. | 184 | * sleep-able lock. |
| @@ -199,6 +200,7 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 199 | } | 200 | } |
| 200 | 201 | ||
| 201 | old_net_dev = table->data_vec[ix].attr.ndev; | 202 | old_net_dev = table->data_vec[ix].attr.ndev; |
| 203 | old_gid_type = table->data_vec[ix].attr.gid_type; | ||
| 202 | if (old_net_dev && old_net_dev != attr->ndev) | 204 | if (old_net_dev && old_net_dev != attr->ndev) |
| 203 | dev_put(old_net_dev); | 205 | dev_put(old_net_dev); |
| 204 | /* if modify_gid failed, just delete the old gid */ | 206 | /* if modify_gid failed, just delete the old gid */ |
| @@ -207,10 +209,14 @@ static int write_gid(struct ib_device *ib_dev, u8 port, | |||
| 207 | attr = &zattr; | 209 | attr = &zattr; |
| 208 | table->data_vec[ix].context = NULL; | 210 | table->data_vec[ix].context = NULL; |
| 209 | } | 211 | } |
| 210 | if (default_gid) | 212 | |
| 211 | table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; | ||
| 212 | memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); | 213 | memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid)); |
| 213 | memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); | 214 | memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr)); |
| 215 | if (default_gid) { | ||
| 216 | table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT; | ||
| 217 | if (action == GID_TABLE_WRITE_ACTION_DEL) | ||
| 218 | table->data_vec[ix].attr.gid_type = old_gid_type; | ||
| 219 | } | ||
| 214 | if (table->data_vec[ix].attr.ndev && | 220 | if (table->data_vec[ix].attr.ndev && |
| 215 | table->data_vec[ix].attr.ndev != old_net_dev) | 221 | table->data_vec[ix].attr.ndev != old_net_dev) |
| 216 | dev_hold(table->data_vec[ix].attr.ndev); | 222 | dev_hold(table->data_vec[ix].attr.ndev); |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1d92e091e22e..c99525512b34 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
| @@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id) | |||
| 3452 | work->cm_event.event = IB_CM_USER_ESTABLISHED; | 3452 | work->cm_event.event = IB_CM_USER_ESTABLISHED; |
| 3453 | 3453 | ||
| 3454 | /* Check if the device started its remove_one */ | 3454 | /* Check if the device started its remove_one */ |
| 3455 | spin_lock_irq(&cm.lock); | 3455 | spin_lock_irqsave(&cm.lock, flags); |
| 3456 | if (!cm_dev->going_down) { | 3456 | if (!cm_dev->going_down) { |
| 3457 | queue_delayed_work(cm.wq, &work->work, 0); | 3457 | queue_delayed_work(cm.wq, &work->work, 0); |
| 3458 | } else { | 3458 | } else { |
| 3459 | kfree(work); | 3459 | kfree(work); |
| 3460 | ret = -ENODEV; | 3460 | ret = -ENODEV; |
| 3461 | } | 3461 | } |
| 3462 | spin_unlock_irq(&cm.lock); | 3462 | spin_unlock_irqrestore(&cm.lock, flags); |
| 3463 | 3463 | ||
| 3464 | out: | 3464 | out: |
| 3465 | return ret; | 3465 | return ret; |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 5516fb070344..5c155fa91eec 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
| @@ -661,6 +661,9 @@ int ib_query_port(struct ib_device *device, | |||
| 661 | if (err || port_attr->subnet_prefix) | 661 | if (err || port_attr->subnet_prefix) |
| 662 | return err; | 662 | return err; |
| 663 | 663 | ||
| 664 | if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND) | ||
| 665 | return 0; | ||
| 666 | |||
| 664 | err = ib_query_gid(device, port_num, 0, &gid, NULL); | 667 | err = ib_query_gid(device, port_num, 0, &gid, NULL); |
| 665 | if (err) | 668 | if (err) |
| 666 | return err; | 669 | return err; |
| @@ -1024,7 +1027,8 @@ static int __init ib_core_init(void) | |||
| 1024 | goto err_mad; | 1027 | goto err_mad; |
| 1025 | } | 1028 | } |
| 1026 | 1029 | ||
| 1027 | if (ib_add_ibnl_clients()) { | 1030 | ret = ib_add_ibnl_clients(); |
| 1031 | if (ret) { | ||
| 1028 | pr_warn("Couldn't register ibnl clients\n"); | 1032 | pr_warn("Couldn't register ibnl clients\n"); |
| 1029 | goto err_sa; | 1033 | goto err_sa; |
| 1030 | } | 1034 | } |
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index 43e3fa27102b..1c41b95cefec 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c | |||
| @@ -506,7 +506,7 @@ int iwpm_add_and_query_mapping_cb(struct sk_buff *skb, | |||
| 506 | if (!nlmsg_request) { | 506 | if (!nlmsg_request) { |
| 507 | pr_info("%s: Could not find a matching request (seq = %u)\n", | 507 | pr_info("%s: Could not find a matching request (seq = %u)\n", |
| 508 | __func__, msg_seq); | 508 | __func__, msg_seq); |
| 509 | return -EINVAL; | 509 | return -EINVAL; |
| 510 | } | 510 | } |
| 511 | pm_msg = nlmsg_request->req_buffer; | 511 | pm_msg = nlmsg_request->req_buffer; |
| 512 | local_sockaddr = (struct sockaddr_storage *) | 512 | local_sockaddr = (struct sockaddr_storage *) |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 82fb511112da..2d49228f28b2 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -1638,9 +1638,9 @@ static void remove_mad_reg_req(struct ib_mad_agent_private *agent_priv) | |||
| 1638 | /* Now, check to see if there are any methods still in use */ | 1638 | /* Now, check to see if there are any methods still in use */ |
| 1639 | if (!check_method_table(method)) { | 1639 | if (!check_method_table(method)) { |
| 1640 | /* If not, release management method table */ | 1640 | /* If not, release management method table */ |
| 1641 | kfree(method); | 1641 | kfree(method); |
| 1642 | class->method_table[mgmt_class] = NULL; | 1642 | class->method_table[mgmt_class] = NULL; |
| 1643 | /* Any management classes left ? */ | 1643 | /* Any management classes left ? */ |
| 1644 | if (!check_class_table(class)) { | 1644 | if (!check_class_table(class)) { |
| 1645 | /* If not, release management class table */ | 1645 | /* If not, release management class table */ |
| 1646 | kfree(class); | 1646 | kfree(class); |
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 5e573bb18660..a5793c8f1590 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c | |||
| @@ -889,9 +889,9 @@ static struct attribute *alloc_hsa_lifespan(char *name, u8 port_num) | |||
| 889 | static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | 889 | static void setup_hw_stats(struct ib_device *device, struct ib_port *port, |
| 890 | u8 port_num) | 890 | u8 port_num) |
| 891 | { | 891 | { |
| 892 | struct attribute_group *hsag = NULL; | 892 | struct attribute_group *hsag; |
| 893 | struct rdma_hw_stats *stats; | 893 | struct rdma_hw_stats *stats; |
| 894 | int i = 0, ret; | 894 | int i, ret; |
| 895 | 895 | ||
| 896 | stats = device->alloc_hw_stats(device, port_num); | 896 | stats = device->alloc_hw_stats(device, port_num); |
| 897 | 897 | ||
| @@ -899,19 +899,22 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 899 | return; | 899 | return; |
| 900 | 900 | ||
| 901 | if (!stats->names || stats->num_counters <= 0) | 901 | if (!stats->names || stats->num_counters <= 0) |
| 902 | goto err; | 902 | goto err_free_stats; |
| 903 | 903 | ||
| 904 | /* | ||
| 905 | * Two extra attribue elements here, one for the lifespan entry and | ||
| 906 | * one to NULL terminate the list for the sysfs core code | ||
| 907 | */ | ||
| 904 | hsag = kzalloc(sizeof(*hsag) + | 908 | hsag = kzalloc(sizeof(*hsag) + |
| 905 | // 1 extra for the lifespan config entry | 909 | sizeof(void *) * (stats->num_counters + 2), |
| 906 | sizeof(void *) * (stats->num_counters + 1), | ||
| 907 | GFP_KERNEL); | 910 | GFP_KERNEL); |
| 908 | if (!hsag) | 911 | if (!hsag) |
| 909 | return; | 912 | goto err_free_stats; |
| 910 | 913 | ||
| 911 | ret = device->get_hw_stats(device, stats, port_num, | 914 | ret = device->get_hw_stats(device, stats, port_num, |
| 912 | stats->num_counters); | 915 | stats->num_counters); |
| 913 | if (ret != stats->num_counters) | 916 | if (ret != stats->num_counters) |
| 914 | goto err; | 917 | goto err_free_hsag; |
| 915 | 918 | ||
| 916 | stats->timestamp = jiffies; | 919 | stats->timestamp = jiffies; |
| 917 | 920 | ||
| @@ -922,10 +925,13 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 922 | hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); | 925 | hsag->attrs[i] = alloc_hsa(i, port_num, stats->names[i]); |
| 923 | if (!hsag->attrs[i]) | 926 | if (!hsag->attrs[i]) |
| 924 | goto err; | 927 | goto err; |
| 928 | sysfs_attr_init(hsag->attrs[i]); | ||
| 925 | } | 929 | } |
| 926 | 930 | ||
| 927 | /* treat an error here as non-fatal */ | 931 | /* treat an error here as non-fatal */ |
| 928 | hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); | 932 | hsag->attrs[i] = alloc_hsa_lifespan("lifespan", port_num); |
| 933 | if (hsag->attrs[i]) | ||
| 934 | sysfs_attr_init(hsag->attrs[i]); | ||
| 929 | 935 | ||
| 930 | if (port) { | 936 | if (port) { |
| 931 | struct kobject *kobj = &port->kobj; | 937 | struct kobject *kobj = &port->kobj; |
| @@ -946,10 +952,12 @@ static void setup_hw_stats(struct ib_device *device, struct ib_port *port, | |||
| 946 | return; | 952 | return; |
| 947 | 953 | ||
| 948 | err: | 954 | err: |
| 949 | kfree(stats); | ||
| 950 | for (; i >= 0; i--) | 955 | for (; i >= 0; i--) |
| 951 | kfree(hsag->attrs[i]); | 956 | kfree(hsag->attrs[i]); |
| 957 | err_free_hsag: | ||
| 952 | kfree(hsag); | 958 | kfree(hsag); |
| 959 | err_free_stats: | ||
| 960 | kfree(stats); | ||
| 953 | return; | 961 | return; |
| 954 | } | 962 | } |
| 955 | 963 | ||
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index 6e7050ab9e16..14d7eeb09be6 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c | |||
| @@ -300,16 +300,15 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 300 | const struct cpumask *node_mask, | 300 | const struct cpumask *node_mask, |
| 301 | *proc_mask = tsk_cpus_allowed(current); | 301 | *proc_mask = tsk_cpus_allowed(current); |
| 302 | struct cpu_mask_set *set = &dd->affinity->proc; | 302 | struct cpu_mask_set *set = &dd->affinity->proc; |
| 303 | char buf[1024]; | ||
| 304 | 303 | ||
| 305 | /* | 304 | /* |
| 306 | * check whether process/context affinity has already | 305 | * check whether process/context affinity has already |
| 307 | * been set | 306 | * been set |
| 308 | */ | 307 | */ |
| 309 | if (cpumask_weight(proc_mask) == 1) { | 308 | if (cpumask_weight(proc_mask) == 1) { |
| 310 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); | 309 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl", |
| 311 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %s", | 310 | current->pid, current->comm, |
| 312 | current->pid, current->comm, buf); | 311 | cpumask_pr_args(proc_mask)); |
| 313 | /* | 312 | /* |
| 314 | * Mark the pre-set CPU as used. This is atomic so we don't | 313 | * Mark the pre-set CPU as used. This is atomic so we don't |
| 315 | * need the lock | 314 | * need the lock |
| @@ -318,9 +317,9 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 318 | cpumask_set_cpu(cpu, &set->used); | 317 | cpumask_set_cpu(cpu, &set->used); |
| 319 | goto done; | 318 | goto done; |
| 320 | } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { | 319 | } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) { |
| 321 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(proc_mask)); | 320 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl", |
| 322 | hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %s", | 321 | current->pid, current->comm, |
| 323 | current->pid, current->comm, buf); | 322 | cpumask_pr_args(proc_mask)); |
| 324 | goto done; | 323 | goto done; |
| 325 | } | 324 | } |
| 326 | 325 | ||
| @@ -356,8 +355,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 356 | cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? | 355 | cpumask_or(intrs, intrs, (dd->affinity->rcv_intr.gen ? |
| 357 | &dd->affinity->rcv_intr.mask : | 356 | &dd->affinity->rcv_intr.mask : |
| 358 | &dd->affinity->rcv_intr.used)); | 357 | &dd->affinity->rcv_intr.used)); |
| 359 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(intrs)); | 358 | hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl", |
| 360 | hfi1_cdbg(PROC, "CPUs used by interrupts: %s", buf); | 359 | cpumask_pr_args(intrs)); |
| 361 | 360 | ||
| 362 | /* | 361 | /* |
| 363 | * If we don't have a NUMA node requested, preference is towards | 362 | * If we don't have a NUMA node requested, preference is towards |
| @@ -366,18 +365,16 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 366 | if (node == -1) | 365 | if (node == -1) |
| 367 | node = dd->node; | 366 | node = dd->node; |
| 368 | node_mask = cpumask_of_node(node); | 367 | node_mask = cpumask_of_node(node); |
| 369 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(node_mask)); | 368 | hfi1_cdbg(PROC, "device on NUMA %u, CPUs %*pbl", node, |
| 370 | hfi1_cdbg(PROC, "device on NUMA %u, CPUs %s", node, buf); | 369 | cpumask_pr_args(node_mask)); |
| 371 | 370 | ||
| 372 | /* diff will hold all unused cpus */ | 371 | /* diff will hold all unused cpus */ |
| 373 | cpumask_andnot(diff, &set->mask, &set->used); | 372 | cpumask_andnot(diff, &set->mask, &set->used); |
| 374 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(diff)); | 373 | hfi1_cdbg(PROC, "unused CPUs (all) %*pbl", cpumask_pr_args(diff)); |
| 375 | hfi1_cdbg(PROC, "unused CPUs (all) %s", buf); | ||
| 376 | 374 | ||
| 377 | /* get cpumask of available CPUs on preferred NUMA */ | 375 | /* get cpumask of available CPUs on preferred NUMA */ |
| 378 | cpumask_and(mask, diff, node_mask); | 376 | cpumask_and(mask, diff, node_mask); |
| 379 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); | 377 | hfi1_cdbg(PROC, "available cpus on NUMA %*pbl", cpumask_pr_args(mask)); |
| 380 | hfi1_cdbg(PROC, "available cpus on NUMA %s", buf); | ||
| 381 | 378 | ||
| 382 | /* | 379 | /* |
| 383 | * At first, we don't want to place processes on the same | 380 | * At first, we don't want to place processes on the same |
| @@ -395,8 +392,8 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) | |||
| 395 | cpumask_andnot(diff, &set->mask, &set->used); | 392 | cpumask_andnot(diff, &set->mask, &set->used); |
| 396 | cpumask_andnot(mask, diff, node_mask); | 393 | cpumask_andnot(mask, diff, node_mask); |
| 397 | } | 394 | } |
| 398 | scnprintf(buf, 1024, "%*pbl", cpumask_pr_args(mask)); | 395 | hfi1_cdbg(PROC, "possible CPUs for process %*pbl", |
| 399 | hfi1_cdbg(PROC, "possible CPUs for process %s", buf); | 396 | cpumask_pr_args(mask)); |
| 400 | 397 | ||
| 401 | cpu = cpumask_first(mask); | 398 | cpu = cpumask_first(mask); |
| 402 | if (cpu >= nr_cpu_ids) /* empty */ | 399 | if (cpu >= nr_cpu_ids) /* empty */ |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 3b876da745a1..81619fbb5842 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
| @@ -7832,8 +7832,8 @@ static void handle_dcc_err(struct hfi1_devdata *dd, u32 unused, u64 reg) | |||
| 7832 | * save first 2 flits in the packet that caused | 7832 | * save first 2 flits in the packet that caused |
| 7833 | * the error | 7833 | * the error |
| 7834 | */ | 7834 | */ |
| 7835 | dd->err_info_rcvport.packet_flit1 = hdr0; | 7835 | dd->err_info_rcvport.packet_flit1 = hdr0; |
| 7836 | dd->err_info_rcvport.packet_flit2 = hdr1; | 7836 | dd->err_info_rcvport.packet_flit2 = hdr1; |
| 7837 | } | 7837 | } |
| 7838 | switch (info) { | 7838 | switch (info) { |
| 7839 | case 1: | 7839 | case 1: |
| @@ -11906,7 +11906,7 @@ static void update_synth_timer(unsigned long opaque) | |||
| 11906 | hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); | 11906 | hfi1_cdbg(CNTR, "[%d] No update necessary", dd->unit); |
| 11907 | } | 11907 | } |
| 11908 | 11908 | ||
| 11909 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); | 11909 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); |
| 11910 | } | 11910 | } |
| 11911 | 11911 | ||
| 11912 | #define C_MAX_NAME 13 /* 12 chars + one for /0 */ | 11912 | #define C_MAX_NAME 13 /* 12 chars + one for /0 */ |
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 5cc492e5776d..0d28a5a40fae 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
| @@ -1337,7 +1337,7 @@ static void cleanup_device_data(struct hfi1_devdata *dd) | |||
| 1337 | dma_free_coherent(&dd->pcidev->dev, sizeof(u64), | 1337 | dma_free_coherent(&dd->pcidev->dev, sizeof(u64), |
| 1338 | (void *)dd->rcvhdrtail_dummy_kvaddr, | 1338 | (void *)dd->rcvhdrtail_dummy_kvaddr, |
| 1339 | dd->rcvhdrtail_dummy_physaddr); | 1339 | dd->rcvhdrtail_dummy_physaddr); |
| 1340 | dd->rcvhdrtail_dummy_kvaddr = NULL; | 1340 | dd->rcvhdrtail_dummy_kvaddr = NULL; |
| 1341 | } | 1341 | } |
| 1342 | 1342 | ||
| 1343 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { | 1343 | for (ctxt = 0; tmp && ctxt < dd->num_rcv_contexts; ctxt++) { |
diff --git a/drivers/infiniband/hw/hfi1/trace.c b/drivers/infiniband/hw/hfi1/trace.c index 79b2952c0dfb..4cfb13771897 100644 --- a/drivers/infiniband/hw/hfi1/trace.c +++ b/drivers/infiniband/hw/hfi1/trace.c | |||
| @@ -214,19 +214,6 @@ const char *print_u32_array( | |||
| 214 | return ret; | 214 | return ret; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | const char *print_u64_array( | ||
| 218 | struct trace_seq *p, | ||
| 219 | u64 *arr, int len) | ||
| 220 | { | ||
| 221 | int i; | ||
| 222 | const char *ret = trace_seq_buffer_ptr(p); | ||
| 223 | |||
| 224 | for (i = 0; i < len; i++) | ||
| 225 | trace_seq_printf(p, "%s0x%016llx", i == 0 ? "" : " ", arr[i]); | ||
| 226 | trace_seq_putc(p, 0); | ||
| 227 | return ret; | ||
| 228 | } | ||
| 229 | |||
| 230 | __hfi1_trace_fn(PKT); | 217 | __hfi1_trace_fn(PKT); |
| 231 | __hfi1_trace_fn(PROC); | 218 | __hfi1_trace_fn(PROC); |
| 232 | __hfi1_trace_fn(SDMA); | 219 | __hfi1_trace_fn(SDMA); |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 29f4795f866c..47ffd273ecbd 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
| @@ -183,7 +183,7 @@ struct user_sdma_iovec { | |||
| 183 | struct sdma_mmu_node *node; | 183 | struct sdma_mmu_node *node; |
| 184 | }; | 184 | }; |
| 185 | 185 | ||
| 186 | #define SDMA_CACHE_NODE_EVICT BIT(0) | 186 | #define SDMA_CACHE_NODE_EVICT 0 |
| 187 | 187 | ||
| 188 | struct sdma_mmu_node { | 188 | struct sdma_mmu_node { |
| 189 | struct mmu_rb_node rb; | 189 | struct mmu_rb_node rb; |
| @@ -1355,11 +1355,11 @@ static int set_txreq_header(struct user_sdma_request *req, | |||
| 1355 | */ | 1355 | */ |
| 1356 | SDMA_DBG(req, "TID offset %ubytes %uunits om%u", | 1356 | SDMA_DBG(req, "TID offset %ubytes %uunits om%u", |
| 1357 | req->tidoffset, req->tidoffset / req->omfactor, | 1357 | req->tidoffset, req->tidoffset / req->omfactor, |
| 1358 | !!(req->omfactor - KDETH_OM_SMALL)); | 1358 | req->omfactor != KDETH_OM_SMALL); |
| 1359 | KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, | 1359 | KDETH_SET(hdr->kdeth.ver_tid_offset, OFFSET, |
| 1360 | req->tidoffset / req->omfactor); | 1360 | req->tidoffset / req->omfactor); |
| 1361 | KDETH_SET(hdr->kdeth.ver_tid_offset, OM, | 1361 | KDETH_SET(hdr->kdeth.ver_tid_offset, OM, |
| 1362 | !!(req->omfactor - KDETH_OM_SMALL)); | 1362 | req->omfactor != KDETH_OM_SMALL); |
| 1363 | } | 1363 | } |
| 1364 | done: | 1364 | done: |
| 1365 | trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, | 1365 | trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index b01ef6eee6e8..0eb09e104542 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
| @@ -505,9 +505,9 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, | |||
| 505 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; | 505 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; |
| 506 | else | 506 | else |
| 507 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; | 507 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; |
| 508 | if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
| 509 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 510 | } | 508 | } |
| 509 | if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) | ||
| 510 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 511 | 511 | ||
| 512 | props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; | 512 | props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; |
| 513 | 513 | ||
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index dabcc65bd65e..9c0e67bd2ba7 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
| @@ -822,7 +822,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, | |||
| 822 | int eqn; | 822 | int eqn; |
| 823 | int err; | 823 | int err; |
| 824 | 824 | ||
| 825 | if (entries < 0) | 825 | if (entries < 0 || |
| 826 | (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) | ||
| 826 | return ERR_PTR(-EINVAL); | 827 | return ERR_PTR(-EINVAL); |
| 827 | 828 | ||
| 828 | if (check_cq_create_flags(attr->flags)) | 829 | if (check_cq_create_flags(attr->flags)) |
| @@ -1168,11 +1169,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) | |||
| 1168 | return -ENOSYS; | 1169 | return -ENOSYS; |
| 1169 | } | 1170 | } |
| 1170 | 1171 | ||
| 1171 | if (entries < 1) | 1172 | if (entries < 1 || |
| 1173 | entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { | ||
| 1174 | mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", | ||
| 1175 | entries, | ||
| 1176 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); | ||
| 1172 | return -EINVAL; | 1177 | return -EINVAL; |
| 1178 | } | ||
| 1173 | 1179 | ||
| 1174 | entries = roundup_pow_of_two(entries + 1); | 1180 | entries = roundup_pow_of_two(entries + 1); |
| 1175 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) | 1181 | if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) |
| 1176 | return -EINVAL; | 1182 | return -EINVAL; |
| 1177 | 1183 | ||
| 1178 | if (entries == ibcq->cqe + 1) | 1184 | if (entries == ibcq->cqe + 1) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index c72797cd9e4f..b48ad85315dc 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -524,6 +524,9 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, | |||
| 524 | MLX5_CAP_ETH(dev->mdev, scatter_fcs)) | 524 | MLX5_CAP_ETH(dev->mdev, scatter_fcs)) |
| 525 | props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; | 525 | props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; |
| 526 | 526 | ||
| 527 | if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) | ||
| 528 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; | ||
| 529 | |||
| 527 | props->vendor_part_id = mdev->pdev->device; | 530 | props->vendor_part_id = mdev->pdev->device; |
| 528 | props->hw_ver = mdev->pdev->revision; | 531 | props->hw_ver = mdev->pdev->revision; |
| 529 | 532 | ||
| @@ -915,7 +918,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 915 | num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; | 918 | num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE; |
| 916 | gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; | 919 | gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE; |
| 917 | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); | 920 | resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); |
| 918 | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | 921 | if (mlx5_core_is_pf(dev->mdev) && MLX5_CAP_GEN(dev->mdev, bf)) |
| 922 | resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); | ||
| 919 | resp.cache_line_size = L1_CACHE_BYTES; | 923 | resp.cache_line_size = L1_CACHE_BYTES; |
| 920 | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); | 924 | resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); |
| 921 | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); | 925 | resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); |
| @@ -988,7 +992,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev, | |||
| 988 | if (field_avail(typeof(resp), cqe_version, udata->outlen)) | 992 | if (field_avail(typeof(resp), cqe_version, udata->outlen)) |
| 989 | resp.response_length += sizeof(resp.cqe_version); | 993 | resp.response_length += sizeof(resp.cqe_version); |
| 990 | 994 | ||
| 991 | if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { | 995 | /* |
| 996 | * We don't want to expose information from the PCI bar that is located | ||
| 997 | * after 4096 bytes, so if the arch only supports larger pages, let's | ||
| 998 | * pretend we don't support reading the HCA's core clock. This is also | ||
| 999 | * forced by mmap function. | ||
| 1000 | */ | ||
| 1001 | if (PAGE_SIZE <= 4096 && | ||
| 1002 | field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) { | ||
| 992 | resp.comp_mask |= | 1003 | resp.comp_mask |= |
| 993 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; | 1004 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; |
| 994 | resp.hca_core_clock_offset = | 1005 | resp.hca_core_clock_offset = |
| @@ -1798,7 +1809,7 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr, | |||
| 1798 | { | 1809 | { |
| 1799 | struct mlx5_ib_dev *dev = | 1810 | struct mlx5_ib_dev *dev = |
| 1800 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); | 1811 | container_of(device, struct mlx5_ib_dev, ib_dev.dev); |
| 1801 | return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev), | 1812 | return sprintf(buf, "%d.%d.%04d\n", fw_rev_maj(dev->mdev), |
| 1802 | fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); | 1813 | fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev)); |
| 1803 | } | 1814 | } |
| 1804 | 1815 | ||
| @@ -1866,14 +1877,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
| 1866 | break; | 1877 | break; |
| 1867 | 1878 | ||
| 1868 | case MLX5_DEV_EVENT_PORT_DOWN: | 1879 | case MLX5_DEV_EVENT_PORT_DOWN: |
| 1880 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | ||
| 1869 | ibev.event = IB_EVENT_PORT_ERR; | 1881 | ibev.event = IB_EVENT_PORT_ERR; |
| 1870 | port = (u8)param; | 1882 | port = (u8)param; |
| 1871 | break; | 1883 | break; |
| 1872 | 1884 | ||
| 1873 | case MLX5_DEV_EVENT_PORT_INITIALIZED: | ||
| 1874 | /* not used by ULPs */ | ||
| 1875 | return; | ||
| 1876 | |||
| 1877 | case MLX5_DEV_EVENT_LID_CHANGE: | 1885 | case MLX5_DEV_EVENT_LID_CHANGE: |
| 1878 | ibev.event = IB_EVENT_LID_CHANGE; | 1886 | ibev.event = IB_EVENT_LID_CHANGE; |
| 1879 | port = (u8)param; | 1887 | port = (u8)param; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 504117657d41..ce434228a5ea 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
| @@ -235,6 +235,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, | |||
| 235 | qp->rq.max_gs = 0; | 235 | qp->rq.max_gs = 0; |
| 236 | qp->rq.wqe_cnt = 0; | 236 | qp->rq.wqe_cnt = 0; |
| 237 | qp->rq.wqe_shift = 0; | 237 | qp->rq.wqe_shift = 0; |
| 238 | cap->max_recv_wr = 0; | ||
| 239 | cap->max_recv_sge = 0; | ||
| 238 | } else { | 240 | } else { |
| 239 | if (ucmd) { | 241 | if (ucmd) { |
| 240 | qp->rq.wqe_cnt = ucmd->rq_wqe_count; | 242 | qp->rq.wqe_cnt = ucmd->rq_wqe_count; |
| @@ -1851,13 +1853,15 @@ static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, | |||
| 1851 | static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | 1853 | static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, |
| 1852 | const struct ib_ah_attr *ah, | 1854 | const struct ib_ah_attr *ah, |
| 1853 | struct mlx5_qp_path *path, u8 port, int attr_mask, | 1855 | struct mlx5_qp_path *path, u8 port, int attr_mask, |
| 1854 | u32 path_flags, const struct ib_qp_attr *attr) | 1856 | u32 path_flags, const struct ib_qp_attr *attr, |
| 1857 | bool alt) | ||
| 1855 | { | 1858 | { |
| 1856 | enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); | 1859 | enum rdma_link_layer ll = rdma_port_get_link_layer(&dev->ib_dev, port); |
| 1857 | int err; | 1860 | int err; |
| 1858 | 1861 | ||
| 1859 | if (attr_mask & IB_QP_PKEY_INDEX) | 1862 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 1860 | path->pkey_index = attr->pkey_index; | 1863 | path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index : |
| 1864 | attr->pkey_index); | ||
| 1861 | 1865 | ||
| 1862 | if (ah->ah_flags & IB_AH_GRH) { | 1866 | if (ah->ah_flags & IB_AH_GRH) { |
| 1863 | if (ah->grh.sgid_index >= | 1867 | if (ah->grh.sgid_index >= |
| @@ -1877,9 +1881,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 1877 | ah->grh.sgid_index); | 1881 | ah->grh.sgid_index); |
| 1878 | path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; | 1882 | path->dci_cfi_prio_sl = (ah->sl & 0x7) << 4; |
| 1879 | } else { | 1883 | } else { |
| 1880 | path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; | 1884 | path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0; |
| 1881 | path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : | 1885 | path->fl_free_ar |= |
| 1882 | 0; | 1886 | (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0; |
| 1883 | path->rlid = cpu_to_be16(ah->dlid); | 1887 | path->rlid = cpu_to_be16(ah->dlid); |
| 1884 | path->grh_mlid = ah->src_path_bits & 0x7f; | 1888 | path->grh_mlid = ah->src_path_bits & 0x7f; |
| 1885 | if (ah->ah_flags & IB_AH_GRH) | 1889 | if (ah->ah_flags & IB_AH_GRH) |
| @@ -1903,7 +1907,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 1903 | path->port = port; | 1907 | path->port = port; |
| 1904 | 1908 | ||
| 1905 | if (attr_mask & IB_QP_TIMEOUT) | 1909 | if (attr_mask & IB_QP_TIMEOUT) |
| 1906 | path->ackto_lt = attr->timeout << 3; | 1910 | path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3; |
| 1907 | 1911 | ||
| 1908 | if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) | 1912 | if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) |
| 1909 | return modify_raw_packet_eth_prio(dev->mdev, | 1913 | return modify_raw_packet_eth_prio(dev->mdev, |
| @@ -2264,7 +2268,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2264 | context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); | 2268 | context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num); |
| 2265 | 2269 | ||
| 2266 | if (attr_mask & IB_QP_PKEY_INDEX) | 2270 | if (attr_mask & IB_QP_PKEY_INDEX) |
| 2267 | context->pri_path.pkey_index = attr->pkey_index; | 2271 | context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index); |
| 2268 | 2272 | ||
| 2269 | /* todo implement counter_index functionality */ | 2273 | /* todo implement counter_index functionality */ |
| 2270 | 2274 | ||
| @@ -2277,7 +2281,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2277 | if (attr_mask & IB_QP_AV) { | 2281 | if (attr_mask & IB_QP_AV) { |
| 2278 | err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, | 2282 | err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path, |
| 2279 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port, | 2283 | attr_mask & IB_QP_PORT ? attr->port_num : qp->port, |
| 2280 | attr_mask, 0, attr); | 2284 | attr_mask, 0, attr, false); |
| 2281 | if (err) | 2285 | if (err) |
| 2282 | goto out; | 2286 | goto out; |
| 2283 | } | 2287 | } |
| @@ -2288,7 +2292,9 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, | |||
| 2288 | if (attr_mask & IB_QP_ALT_PATH) { | 2292 | if (attr_mask & IB_QP_ALT_PATH) { |
| 2289 | err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, | 2293 | err = mlx5_set_path(dev, qp, &attr->alt_ah_attr, |
| 2290 | &context->alt_path, | 2294 | &context->alt_path, |
| 2291 | attr->alt_port_num, attr_mask, 0, attr); | 2295 | attr->alt_port_num, |
| 2296 | attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT, | ||
| 2297 | 0, attr, true); | ||
| 2292 | if (err) | 2298 | if (err) |
| 2293 | goto out; | 2299 | goto out; |
| 2294 | } | 2300 | } |
| @@ -4013,11 +4019,12 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, | |||
| 4013 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { | 4019 | if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) { |
| 4014 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); | 4020 | to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path); |
| 4015 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); | 4021 | to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path); |
| 4016 | qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f; | 4022 | qp_attr->alt_pkey_index = |
| 4023 | be16_to_cpu(context->alt_path.pkey_index); | ||
| 4017 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; | 4024 | qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num; |
| 4018 | } | 4025 | } |
| 4019 | 4026 | ||
| 4020 | qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f; | 4027 | qp_attr->pkey_index = be16_to_cpu(context->pri_path.pkey_index); |
| 4021 | qp_attr->port_num = context->pri_path.port; | 4028 | qp_attr->port_num = context->pri_path.port; |
| 4022 | 4029 | ||
| 4023 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ | 4030 | /* qp_attr->en_sqd_async_notify is only applicable in modify qp */ |
| @@ -4079,17 +4086,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, | |||
| 4079 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; | 4086 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; |
| 4080 | 4087 | ||
| 4081 | if (!ibqp->uobject) { | 4088 | if (!ibqp->uobject) { |
| 4082 | qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; | 4089 | qp_attr->cap.max_send_wr = qp->sq.max_post; |
| 4083 | qp_attr->cap.max_send_sge = qp->sq.max_gs; | 4090 | qp_attr->cap.max_send_sge = qp->sq.max_gs; |
| 4091 | qp_init_attr->qp_context = ibqp->qp_context; | ||
| 4084 | } else { | 4092 | } else { |
| 4085 | qp_attr->cap.max_send_wr = 0; | 4093 | qp_attr->cap.max_send_wr = 0; |
| 4086 | qp_attr->cap.max_send_sge = 0; | 4094 | qp_attr->cap.max_send_sge = 0; |
| 4087 | } | 4095 | } |
| 4088 | 4096 | ||
| 4089 | /* We don't support inline sends for kernel QPs (yet), and we | 4097 | qp_init_attr->qp_type = ibqp->qp_type; |
| 4090 | * don't know what userspace's value should be. | 4098 | qp_init_attr->recv_cq = ibqp->recv_cq; |
| 4091 | */ | 4099 | qp_init_attr->send_cq = ibqp->send_cq; |
| 4092 | qp_attr->cap.max_inline_data = 0; | 4100 | qp_init_attr->srq = ibqp->srq; |
| 4101 | qp_attr->cap.max_inline_data = qp->max_inline_data; | ||
| 4093 | 4102 | ||
| 4094 | qp_init_attr->cap = qp_attr->cap; | 4103 | qp_init_attr->cap = qp_attr->cap; |
| 4095 | 4104 | ||
diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 7209fbc03ccb..a0b6ebee4d8a 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c | |||
| @@ -36,7 +36,6 @@ | |||
| 36 | #include <linux/dma-mapping.h> | 36 | #include <linux/dma-mapping.h> |
| 37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
| 38 | #include <linux/hugetlb.h> | 38 | #include <linux/hugetlb.h> |
| 39 | #include <linux/dma-attrs.h> | ||
| 40 | #include <linux/iommu.h> | 39 | #include <linux/iommu.h> |
| 41 | #include <linux/workqueue.h> | 40 | #include <linux/workqueue.h> |
| 42 | #include <linux/list.h> | 41 | #include <linux/list.h> |
| @@ -112,10 +111,6 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, | |||
| 112 | int i; | 111 | int i; |
| 113 | int flags; | 112 | int flags; |
| 114 | dma_addr_t pa; | 113 | dma_addr_t pa; |
| 115 | DEFINE_DMA_ATTRS(attrs); | ||
| 116 | |||
| 117 | if (dmasync) | ||
| 118 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | ||
| 119 | 114 | ||
| 120 | if (!can_do_mlock()) | 115 | if (!can_do_mlock()) |
| 121 | return -EPERM; | 116 | return -EPERM; |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 5fa4d4d81ee0..7de5134bec85 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
| @@ -502,6 +502,12 @@ static void rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) | |||
| 502 | */ | 502 | */ |
| 503 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, | 503 | static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, |
| 504 | enum ib_qp_type type) | 504 | enum ib_qp_type type) |
| 505 | __releases(&qp->s_lock) | ||
| 506 | __releases(&qp->s_hlock) | ||
| 507 | __releases(&qp->r_lock) | ||
| 508 | __acquires(&qp->r_lock) | ||
| 509 | __acquires(&qp->s_hlock) | ||
| 510 | __acquires(&qp->s_lock) | ||
| 505 | { | 511 | { |
| 506 | if (qp->state != IB_QPS_RESET) { | 512 | if (qp->state != IB_QPS_RESET) { |
| 507 | qp->state = IB_QPS_RESET; | 513 | qp->state = IB_QPS_RESET; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index bab7db6fa9ab..4f7d9b48df64 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
| @@ -94,6 +94,7 @@ enum { | |||
| 94 | IPOIB_NEIGH_TBL_FLUSH = 12, | 94 | IPOIB_NEIGH_TBL_FLUSH = 12, |
| 95 | IPOIB_FLAG_DEV_ADDR_SET = 13, | 95 | IPOIB_FLAG_DEV_ADDR_SET = 13, |
| 96 | IPOIB_FLAG_DEV_ADDR_CTRL = 14, | 96 | IPOIB_FLAG_DEV_ADDR_CTRL = 14, |
| 97 | IPOIB_FLAG_GOING_DOWN = 15, | ||
| 97 | 98 | ||
| 98 | IPOIB_MAX_BACKOFF_SECONDS = 16, | 99 | IPOIB_MAX_BACKOFF_SECONDS = 16, |
| 99 | 100 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index b2f42835d76d..951d9abcca8b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
| @@ -1486,6 +1486,10 @@ static ssize_t set_mode(struct device *d, struct device_attribute *attr, | |||
| 1486 | { | 1486 | { |
| 1487 | struct net_device *dev = to_net_dev(d); | 1487 | struct net_device *dev = to_net_dev(d); |
| 1488 | int ret; | 1488 | int ret; |
| 1489 | struct ipoib_dev_priv *priv = netdev_priv(dev); | ||
| 1490 | |||
| 1491 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags)) | ||
| 1492 | return -EPERM; | ||
| 1489 | 1493 | ||
| 1490 | if (!rtnl_trylock()) | 1494 | if (!rtnl_trylock()) |
| 1491 | return restart_syscall(); | 1495 | return restart_syscall(); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 45c40a17d6a6..dc6d241b9406 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
| @@ -1015,7 +1015,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1015 | if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) | 1015 | if (ib_query_gid(priv->ca, priv->port, 0, &gid0, NULL)) |
| 1016 | return false; | 1016 | return false; |
| 1017 | 1017 | ||
| 1018 | netif_addr_lock(priv->dev); | 1018 | netif_addr_lock_bh(priv->dev); |
| 1019 | 1019 | ||
| 1020 | /* The subnet prefix may have changed, update it now so we won't have | 1020 | /* The subnet prefix may have changed, update it now so we won't have |
| 1021 | * to do it later | 1021 | * to do it later |
| @@ -1026,12 +1026,12 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1026 | 1026 | ||
| 1027 | search_gid.global.interface_id = priv->local_gid.global.interface_id; | 1027 | search_gid.global.interface_id = priv->local_gid.global.interface_id; |
| 1028 | 1028 | ||
| 1029 | netif_addr_unlock(priv->dev); | 1029 | netif_addr_unlock_bh(priv->dev); |
| 1030 | 1030 | ||
| 1031 | err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, | 1031 | err = ib_find_gid(priv->ca, &search_gid, IB_GID_TYPE_IB, |
| 1032 | priv->dev, &port, &index); | 1032 | priv->dev, &port, &index); |
| 1033 | 1033 | ||
| 1034 | netif_addr_lock(priv->dev); | 1034 | netif_addr_lock_bh(priv->dev); |
| 1035 | 1035 | ||
| 1036 | if (search_gid.global.interface_id != | 1036 | if (search_gid.global.interface_id != |
| 1037 | priv->local_gid.global.interface_id) | 1037 | priv->local_gid.global.interface_id) |
| @@ -1092,7 +1092,7 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) | |||
| 1092 | } | 1092 | } |
| 1093 | 1093 | ||
| 1094 | out: | 1094 | out: |
| 1095 | netif_addr_unlock(priv->dev); | 1095 | netif_addr_unlock_bh(priv->dev); |
| 1096 | 1096 | ||
| 1097 | return ret; | 1097 | return ret; |
| 1098 | } | 1098 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 2d7c16346648..5f58c41ef787 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
| @@ -1206,7 +1206,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) | |||
| 1206 | neigh = NULL; | 1206 | neigh = NULL; |
| 1207 | goto out_unlock; | 1207 | goto out_unlock; |
| 1208 | } | 1208 | } |
| 1209 | neigh->alive = jiffies; | 1209 | |
| 1210 | if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) | ||
| 1211 | neigh->alive = jiffies; | ||
| 1210 | goto out_unlock; | 1212 | goto out_unlock; |
| 1211 | } | 1213 | } |
| 1212 | } | 1214 | } |
| @@ -1851,7 +1853,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) | |||
| 1851 | struct ipoib_dev_priv *child_priv; | 1853 | struct ipoib_dev_priv *child_priv; |
| 1852 | struct net_device *netdev = priv->dev; | 1854 | struct net_device *netdev = priv->dev; |
| 1853 | 1855 | ||
| 1854 | netif_addr_lock(netdev); | 1856 | netif_addr_lock_bh(netdev); |
| 1855 | 1857 | ||
| 1856 | memcpy(&priv->local_gid.global.interface_id, | 1858 | memcpy(&priv->local_gid.global.interface_id, |
| 1857 | &gid->global.interface_id, | 1859 | &gid->global.interface_id, |
| @@ -1859,7 +1861,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) | |||
| 1859 | memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); | 1861 | memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); |
| 1860 | clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); | 1862 | clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); |
| 1861 | 1863 | ||
| 1862 | netif_addr_unlock(netdev); | 1864 | netif_addr_unlock_bh(netdev); |
| 1863 | 1865 | ||
| 1864 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { | 1866 | if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) { |
| 1865 | down_read(&priv->vlan_rwsem); | 1867 | down_read(&priv->vlan_rwsem); |
| @@ -1875,7 +1877,7 @@ static int ipoib_check_lladdr(struct net_device *dev, | |||
| 1875 | union ib_gid *gid = (union ib_gid *)(ss->__data + 4); | 1877 | union ib_gid *gid = (union ib_gid *)(ss->__data + 4); |
| 1876 | int ret = 0; | 1878 | int ret = 0; |
| 1877 | 1879 | ||
| 1878 | netif_addr_lock(dev); | 1880 | netif_addr_lock_bh(dev); |
| 1879 | 1881 | ||
| 1880 | /* Make sure the QPN, reserved and subnet prefix match the current | 1882 | /* Make sure the QPN, reserved and subnet prefix match the current |
| 1881 | * lladdr, it also makes sure the lladdr is unicast. | 1883 | * lladdr, it also makes sure the lladdr is unicast. |
| @@ -1885,7 +1887,7 @@ static int ipoib_check_lladdr(struct net_device *dev, | |||
| 1885 | gid->global.interface_id == 0) | 1887 | gid->global.interface_id == 0) |
| 1886 | ret = -EINVAL; | 1888 | ret = -EINVAL; |
| 1887 | 1889 | ||
| 1888 | netif_addr_unlock(dev); | 1890 | netif_addr_unlock_bh(dev); |
| 1889 | 1891 | ||
| 1890 | return ret; | 1892 | return ret; |
| 1891 | } | 1893 | } |
| @@ -2141,6 +2143,9 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) | |||
| 2141 | ib_unregister_event_handler(&priv->event_handler); | 2143 | ib_unregister_event_handler(&priv->event_handler); |
| 2142 | flush_workqueue(ipoib_workqueue); | 2144 | flush_workqueue(ipoib_workqueue); |
| 2143 | 2145 | ||
| 2146 | /* mark interface in the middle of destruction */ | ||
| 2147 | set_bit(IPOIB_FLAG_GOING_DOWN, &priv->flags); | ||
| 2148 | |||
| 2144 | rtnl_lock(); | 2149 | rtnl_lock(); |
| 2145 | dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); | 2150 | dev_change_flags(priv->dev, priv->dev->flags & ~IFF_UP); |
| 2146 | rtnl_unlock(); | 2151 | rtnl_unlock(); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 82fbc9442608..d3394b6add24 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c | |||
| @@ -582,13 +582,13 @@ void ipoib_mcast_join_task(struct work_struct *work) | |||
| 582 | return; | 582 | return; |
| 583 | } | 583 | } |
| 584 | priv->local_lid = port_attr.lid; | 584 | priv->local_lid = port_attr.lid; |
| 585 | netif_addr_lock(dev); | 585 | netif_addr_lock_bh(dev); |
| 586 | 586 | ||
| 587 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { | 587 | if (!test_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags)) { |
| 588 | netif_addr_unlock(dev); | 588 | netif_addr_unlock_bh(dev); |
| 589 | return; | 589 | return; |
| 590 | } | 590 | } |
| 591 | netif_addr_unlock(dev); | 591 | netif_addr_unlock_bh(dev); |
| 592 | 592 | ||
| 593 | spin_lock_irq(&priv->lock); | 593 | spin_lock_irq(&priv->lock); |
| 594 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) | 594 | if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c index 64a35595eab8..a2f9f29c6ab5 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c | |||
| @@ -131,6 +131,9 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) | |||
| 131 | 131 | ||
| 132 | ppriv = netdev_priv(pdev); | 132 | ppriv = netdev_priv(pdev); |
| 133 | 133 | ||
| 134 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) | ||
| 135 | return -EPERM; | ||
| 136 | |||
| 134 | snprintf(intf_name, sizeof intf_name, "%s.%04x", | 137 | snprintf(intf_name, sizeof intf_name, "%s.%04x", |
| 135 | ppriv->dev->name, pkey); | 138 | ppriv->dev->name, pkey); |
| 136 | priv = ipoib_intf_alloc(intf_name); | 139 | priv = ipoib_intf_alloc(intf_name); |
| @@ -183,6 +186,9 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) | |||
| 183 | 186 | ||
| 184 | ppriv = netdev_priv(pdev); | 187 | ppriv = netdev_priv(pdev); |
| 185 | 188 | ||
| 189 | if (test_bit(IPOIB_FLAG_GOING_DOWN, &ppriv->flags)) | ||
| 190 | return -EPERM; | ||
| 191 | |||
| 186 | if (!rtnl_trylock()) | 192 | if (!rtnl_trylock()) |
| 187 | return restart_syscall(); | 193 | return restart_syscall(); |
| 188 | 194 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 646de170ec12..3322ed750172 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
| @@ -1457,7 +1457,6 @@ static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch, | |||
| 1457 | { | 1457 | { |
| 1458 | unsigned int sg_offset = 0; | 1458 | unsigned int sg_offset = 0; |
| 1459 | 1459 | ||
| 1460 | state->desc = req->indirect_desc; | ||
| 1461 | state->fr.next = req->fr_list; | 1460 | state->fr.next = req->fr_list; |
| 1462 | state->fr.end = req->fr_list + ch->target->mr_per_cmd; | 1461 | state->fr.end = req->fr_list + ch->target->mr_per_cmd; |
| 1463 | state->sg = scat; | 1462 | state->sg = scat; |
| @@ -1489,7 +1488,6 @@ static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch, | |||
| 1489 | struct scatterlist *sg; | 1488 | struct scatterlist *sg; |
| 1490 | int i; | 1489 | int i; |
| 1491 | 1490 | ||
| 1492 | state->desc = req->indirect_desc; | ||
| 1493 | for_each_sg(scat, sg, count, i) { | 1491 | for_each_sg(scat, sg, count, i) { |
| 1494 | srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), | 1492 | srp_map_desc(state, ib_sg_dma_address(dev->dev, sg), |
| 1495 | ib_sg_dma_len(dev->dev, sg), | 1493 | ib_sg_dma_len(dev->dev, sg), |
| @@ -1655,6 +1653,7 @@ static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch, | |||
| 1655 | target->indirect_size, DMA_TO_DEVICE); | 1653 | target->indirect_size, DMA_TO_DEVICE); |
| 1656 | 1654 | ||
| 1657 | memset(&state, 0, sizeof(state)); | 1655 | memset(&state, 0, sizeof(state)); |
| 1656 | state.desc = req->indirect_desc; | ||
| 1658 | if (dev->use_fast_reg) | 1657 | if (dev->use_fast_reg) |
| 1659 | ret = srp_map_sg_fr(&state, ch, req, scat, count); | 1658 | ret = srp_map_sg_fr(&state, ch, req, scat, count); |
| 1660 | else if (dev->use_fmr) | 1659 | else if (dev->use_fmr) |
| @@ -3526,7 +3525,7 @@ static void srp_add_one(struct ib_device *device) | |||
| 3526 | int mr_page_shift, p; | 3525 | int mr_page_shift, p; |
| 3527 | u64 max_pages_per_mr; | 3526 | u64 max_pages_per_mr; |
| 3528 | 3527 | ||
| 3529 | srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL); | 3528 | srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL); |
| 3530 | if (!srp_dev) | 3529 | if (!srp_dev) |
| 3531 | return; | 3530 | return; |
| 3532 | 3531 | ||
| @@ -3586,8 +3585,6 @@ static void srp_add_one(struct ib_device *device) | |||
| 3586 | IB_ACCESS_REMOTE_WRITE); | 3585 | IB_ACCESS_REMOTE_WRITE); |
| 3587 | if (IS_ERR(srp_dev->global_mr)) | 3586 | if (IS_ERR(srp_dev->global_mr)) |
| 3588 | goto err_pd; | 3587 | goto err_pd; |
| 3589 | } else { | ||
| 3590 | srp_dev->global_mr = NULL; | ||
| 3591 | } | 3588 | } |
| 3592 | 3589 | ||
| 3593 | for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { | 3590 | for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { |
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c index d7723ce772b3..c04bc6afb965 100644 --- a/drivers/media/usb/uvc/uvc_v4l2.c +++ b/drivers/media/usb/uvc/uvc_v4l2.c | |||
| @@ -1274,8 +1274,6 @@ struct uvc_xu_control_mapping32 { | |||
| 1274 | static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | 1274 | static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, |
| 1275 | const struct uvc_xu_control_mapping32 __user *up) | 1275 | const struct uvc_xu_control_mapping32 __user *up) |
| 1276 | { | 1276 | { |
| 1277 | struct uvc_menu_info __user *umenus; | ||
| 1278 | struct uvc_menu_info __user *kmenus; | ||
| 1279 | compat_caddr_t p; | 1277 | compat_caddr_t p; |
| 1280 | 1278 | ||
| 1281 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | 1279 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
| @@ -1292,17 +1290,7 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | |||
| 1292 | 1290 | ||
| 1293 | if (__get_user(p, &up->menu_info)) | 1291 | if (__get_user(p, &up->menu_info)) |
| 1294 | return -EFAULT; | 1292 | return -EFAULT; |
| 1295 | umenus = compat_ptr(p); | 1293 | kp->menu_info = compat_ptr(p); |
| 1296 | if (!access_ok(VERIFY_READ, umenus, kp->menu_count * sizeof(*umenus))) | ||
| 1297 | return -EFAULT; | ||
| 1298 | |||
| 1299 | kmenus = compat_alloc_user_space(kp->menu_count * sizeof(*kmenus)); | ||
| 1300 | if (kmenus == NULL) | ||
| 1301 | return -EFAULT; | ||
| 1302 | kp->menu_info = kmenus; | ||
| 1303 | |||
| 1304 | if (copy_in_user(kmenus, umenus, kp->menu_count * sizeof(*umenus))) | ||
| 1305 | return -EFAULT; | ||
| 1306 | 1294 | ||
| 1307 | return 0; | 1295 | return 0; |
| 1308 | } | 1296 | } |
| @@ -1310,10 +1298,6 @@ static int uvc_v4l2_get_xu_mapping(struct uvc_xu_control_mapping *kp, | |||
| 1310 | static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, | 1298 | static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, |
| 1311 | struct uvc_xu_control_mapping32 __user *up) | 1299 | struct uvc_xu_control_mapping32 __user *up) |
| 1312 | { | 1300 | { |
| 1313 | struct uvc_menu_info __user *umenus; | ||
| 1314 | struct uvc_menu_info __user *kmenus = kp->menu_info; | ||
| 1315 | compat_caddr_t p; | ||
| 1316 | |||
| 1317 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || | 1301 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
| 1318 | __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || | 1302 | __copy_to_user(up, kp, offsetof(typeof(*up), menu_info)) || |
| 1319 | __put_user(kp->menu_count, &up->menu_count)) | 1303 | __put_user(kp->menu_count, &up->menu_count)) |
| @@ -1322,16 +1306,6 @@ static int uvc_v4l2_put_xu_mapping(const struct uvc_xu_control_mapping *kp, | |||
| 1322 | if (__clear_user(up->reserved, sizeof(up->reserved))) | 1306 | if (__clear_user(up->reserved, sizeof(up->reserved))) |
| 1323 | return -EFAULT; | 1307 | return -EFAULT; |
| 1324 | 1308 | ||
| 1325 | if (kp->menu_count == 0) | ||
| 1326 | return 0; | ||
| 1327 | |||
| 1328 | if (get_user(p, &up->menu_info)) | ||
| 1329 | return -EFAULT; | ||
| 1330 | umenus = compat_ptr(p); | ||
| 1331 | |||
| 1332 | if (copy_in_user(umenus, kmenus, kp->menu_count * sizeof(*umenus))) | ||
| 1333 | return -EFAULT; | ||
| 1334 | |||
| 1335 | return 0; | 1309 | return 0; |
| 1336 | } | 1310 | } |
| 1337 | 1311 | ||
| @@ -1346,8 +1320,6 @@ struct uvc_xu_control_query32 { | |||
| 1346 | static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | 1320 | static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, |
| 1347 | const struct uvc_xu_control_query32 __user *up) | 1321 | const struct uvc_xu_control_query32 __user *up) |
| 1348 | { | 1322 | { |
| 1349 | u8 __user *udata; | ||
| 1350 | u8 __user *kdata; | ||
| 1351 | compat_caddr_t p; | 1323 | compat_caddr_t p; |
| 1352 | 1324 | ||
| 1353 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || | 1325 | if (!access_ok(VERIFY_READ, up, sizeof(*up)) || |
| @@ -1361,17 +1333,7 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | |||
| 1361 | 1333 | ||
| 1362 | if (__get_user(p, &up->data)) | 1334 | if (__get_user(p, &up->data)) |
| 1363 | return -EFAULT; | 1335 | return -EFAULT; |
| 1364 | udata = compat_ptr(p); | 1336 | kp->data = compat_ptr(p); |
| 1365 | if (!access_ok(VERIFY_READ, udata, kp->size)) | ||
| 1366 | return -EFAULT; | ||
| 1367 | |||
| 1368 | kdata = compat_alloc_user_space(kp->size); | ||
| 1369 | if (kdata == NULL) | ||
| 1370 | return -EFAULT; | ||
| 1371 | kp->data = kdata; | ||
| 1372 | |||
| 1373 | if (copy_in_user(kdata, udata, kp->size)) | ||
| 1374 | return -EFAULT; | ||
| 1375 | 1337 | ||
| 1376 | return 0; | 1338 | return 0; |
| 1377 | } | 1339 | } |
| @@ -1379,26 +1341,10 @@ static int uvc_v4l2_get_xu_query(struct uvc_xu_control_query *kp, | |||
| 1379 | static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, | 1341 | static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, |
| 1380 | struct uvc_xu_control_query32 __user *up) | 1342 | struct uvc_xu_control_query32 __user *up) |
| 1381 | { | 1343 | { |
| 1382 | u8 __user *udata; | ||
| 1383 | u8 __user *kdata = kp->data; | ||
| 1384 | compat_caddr_t p; | ||
| 1385 | |||
| 1386 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || | 1344 | if (!access_ok(VERIFY_WRITE, up, sizeof(*up)) || |
| 1387 | __copy_to_user(up, kp, offsetof(typeof(*up), data))) | 1345 | __copy_to_user(up, kp, offsetof(typeof(*up), data))) |
| 1388 | return -EFAULT; | 1346 | return -EFAULT; |
| 1389 | 1347 | ||
| 1390 | if (kp->size == 0) | ||
| 1391 | return 0; | ||
| 1392 | |||
| 1393 | if (get_user(p, &up->data)) | ||
| 1394 | return -EFAULT; | ||
| 1395 | udata = compat_ptr(p); | ||
| 1396 | if (!access_ok(VERIFY_READ, udata, kp->size)) | ||
| 1397 | return -EFAULT; | ||
| 1398 | |||
| 1399 | if (copy_in_user(udata, kdata, kp->size)) | ||
| 1400 | return -EFAULT; | ||
| 1401 | |||
| 1402 | return 0; | 1348 | return 0; |
| 1403 | } | 1349 | } |
| 1404 | 1350 | ||
| @@ -1408,47 +1354,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp, | |||
| 1408 | static long uvc_v4l2_compat_ioctl32(struct file *file, | 1354 | static long uvc_v4l2_compat_ioctl32(struct file *file, |
| 1409 | unsigned int cmd, unsigned long arg) | 1355 | unsigned int cmd, unsigned long arg) |
| 1410 | { | 1356 | { |
| 1357 | struct uvc_fh *handle = file->private_data; | ||
| 1411 | union { | 1358 | union { |
| 1412 | struct uvc_xu_control_mapping xmap; | 1359 | struct uvc_xu_control_mapping xmap; |
| 1413 | struct uvc_xu_control_query xqry; | 1360 | struct uvc_xu_control_query xqry; |
| 1414 | } karg; | 1361 | } karg; |
| 1415 | void __user *up = compat_ptr(arg); | 1362 | void __user *up = compat_ptr(arg); |
| 1416 | mm_segment_t old_fs; | ||
| 1417 | long ret; | 1363 | long ret; |
| 1418 | 1364 | ||
| 1419 | switch (cmd) { | 1365 | switch (cmd) { |
| 1420 | case UVCIOC_CTRL_MAP32: | 1366 | case UVCIOC_CTRL_MAP32: |
| 1421 | cmd = UVCIOC_CTRL_MAP; | ||
| 1422 | ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); | 1367 | ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up); |
| 1368 | if (ret) | ||
| 1369 | return ret; | ||
| 1370 | ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap); | ||
| 1371 | if (ret) | ||
| 1372 | return ret; | ||
| 1373 | ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up); | ||
| 1374 | if (ret) | ||
| 1375 | return ret; | ||
| 1376 | |||
| 1423 | break; | 1377 | break; |
| 1424 | 1378 | ||
| 1425 | case UVCIOC_CTRL_QUERY32: | 1379 | case UVCIOC_CTRL_QUERY32: |
| 1426 | cmd = UVCIOC_CTRL_QUERY; | ||
| 1427 | ret = uvc_v4l2_get_xu_query(&karg.xqry, up); | 1380 | ret = uvc_v4l2_get_xu_query(&karg.xqry, up); |
| 1381 | if (ret) | ||
| 1382 | return ret; | ||
| 1383 | ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry); | ||
| 1384 | if (ret) | ||
| 1385 | return ret; | ||
| 1386 | ret = uvc_v4l2_put_xu_query(&karg.xqry, up); | ||
| 1387 | if (ret) | ||
| 1388 | return ret; | ||
| 1428 | break; | 1389 | break; |
| 1429 | 1390 | ||
| 1430 | default: | 1391 | default: |
| 1431 | return -ENOIOCTLCMD; | 1392 | return -ENOIOCTLCMD; |
| 1432 | } | 1393 | } |
| 1433 | 1394 | ||
| 1434 | old_fs = get_fs(); | ||
| 1435 | set_fs(KERNEL_DS); | ||
| 1436 | ret = video_ioctl2(file, cmd, (unsigned long)&karg); | ||
| 1437 | set_fs(old_fs); | ||
| 1438 | |||
| 1439 | if (ret < 0) | ||
| 1440 | return ret; | ||
| 1441 | |||
| 1442 | switch (cmd) { | ||
| 1443 | case UVCIOC_CTRL_MAP: | ||
| 1444 | ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up); | ||
| 1445 | break; | ||
| 1446 | |||
| 1447 | case UVCIOC_CTRL_QUERY: | ||
| 1448 | ret = uvc_v4l2_put_xu_query(&karg.xqry, up); | ||
| 1449 | break; | ||
| 1450 | } | ||
| 1451 | |||
| 1452 | return ret; | 1395 | return ret; |
| 1453 | } | 1396 | } |
| 1454 | #endif | 1397 | #endif |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index c5fe915870ad..a59d55e25d5f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
| @@ -12895,52 +12895,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add) | |||
| 12895 | return rc; | 12895 | return rc; |
| 12896 | } | 12896 | } |
| 12897 | 12897 | ||
| 12898 | int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) | 12898 | static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp) |
| 12899 | { | 12899 | { |
| 12900 | struct bnx2x_vlan_entry *vlan; | 12900 | struct bnx2x_vlan_entry *vlan; |
| 12901 | int rc = 0; | 12901 | int rc = 0; |
| 12902 | 12902 | ||
| 12903 | if (!bp->vlan_cnt) { | 12903 | /* Configure all non-configured entries */ |
| 12904 | DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n"); | ||
| 12905 | return 0; | ||
| 12906 | } | ||
| 12907 | |||
| 12908 | list_for_each_entry(vlan, &bp->vlan_reg, link) { | 12904 | list_for_each_entry(vlan, &bp->vlan_reg, link) { |
| 12909 | /* Prepare for cleanup in case of errors */ | 12905 | if (vlan->hw) |
| 12910 | if (rc) { | ||
| 12911 | vlan->hw = false; | ||
| 12912 | continue; | ||
| 12913 | } | ||
| 12914 | |||
| 12915 | if (!vlan->hw) | ||
| 12916 | continue; | 12906 | continue; |
| 12917 | 12907 | ||
| 12918 | DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid); | 12908 | if (bp->vlan_cnt >= bp->vlan_credit) |
| 12909 | return -ENOBUFS; | ||
| 12919 | 12910 | ||
| 12920 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); | 12911 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); |
| 12921 | if (rc) { | 12912 | if (rc) { |
| 12922 | BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid); | 12913 | BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid); |
| 12923 | vlan->hw = false; | 12914 | return rc; |
| 12924 | rc = -EINVAL; | ||
| 12925 | continue; | ||
| 12926 | } | 12915 | } |
| 12916 | |||
| 12917 | DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid); | ||
| 12918 | vlan->hw = true; | ||
| 12919 | bp->vlan_cnt++; | ||
| 12927 | } | 12920 | } |
| 12928 | 12921 | ||
| 12929 | return rc; | 12922 | return 0; |
| 12923 | } | ||
| 12924 | |||
| 12925 | static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode) | ||
| 12926 | { | ||
| 12927 | bool need_accept_any_vlan; | ||
| 12928 | |||
| 12929 | need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp); | ||
| 12930 | |||
| 12931 | if (bp->accept_any_vlan != need_accept_any_vlan) { | ||
| 12932 | bp->accept_any_vlan = need_accept_any_vlan; | ||
| 12933 | DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n", | ||
| 12934 | bp->accept_any_vlan ? "raised" : "cleared"); | ||
| 12935 | if (set_rx_mode) { | ||
| 12936 | if (IS_PF(bp)) | ||
| 12937 | bnx2x_set_rx_mode_inner(bp); | ||
| 12938 | else | ||
| 12939 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 12940 | } | ||
| 12941 | } | ||
| 12942 | } | ||
| 12943 | |||
| 12944 | int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp) | ||
| 12945 | { | ||
| 12946 | struct bnx2x_vlan_entry *vlan; | ||
| 12947 | |||
| 12948 | /* The hw forgot all entries after reload */ | ||
| 12949 | list_for_each_entry(vlan, &bp->vlan_reg, link) | ||
| 12950 | vlan->hw = false; | ||
| 12951 | bp->vlan_cnt = 0; | ||
| 12952 | |||
| 12953 | /* Don't set rx mode here. Our caller will do it. */ | ||
| 12954 | bnx2x_vlan_configure(bp, false); | ||
| 12955 | |||
| 12956 | return 0; | ||
| 12930 | } | 12957 | } |
| 12931 | 12958 | ||
| 12932 | static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | 12959 | static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
| 12933 | { | 12960 | { |
| 12934 | struct bnx2x *bp = netdev_priv(dev); | 12961 | struct bnx2x *bp = netdev_priv(dev); |
| 12935 | struct bnx2x_vlan_entry *vlan; | 12962 | struct bnx2x_vlan_entry *vlan; |
| 12936 | bool hw = false; | ||
| 12937 | int rc = 0; | ||
| 12938 | |||
| 12939 | if (!netif_running(bp->dev)) { | ||
| 12940 | DP(NETIF_MSG_IFUP, | ||
| 12941 | "Ignoring VLAN configuration the interface is down\n"); | ||
| 12942 | return -EFAULT; | ||
| 12943 | } | ||
| 12944 | 12963 | ||
| 12945 | DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); | 12964 | DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid); |
| 12946 | 12965 | ||
| @@ -12948,93 +12967,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | |||
| 12948 | if (!vlan) | 12967 | if (!vlan) |
| 12949 | return -ENOMEM; | 12968 | return -ENOMEM; |
| 12950 | 12969 | ||
| 12951 | bp->vlan_cnt++; | ||
| 12952 | if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) { | ||
| 12953 | DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n"); | ||
| 12954 | bp->accept_any_vlan = true; | ||
| 12955 | if (IS_PF(bp)) | ||
| 12956 | bnx2x_set_rx_mode_inner(bp); | ||
| 12957 | else | ||
| 12958 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 12959 | } else if (bp->vlan_cnt <= bp->vlan_credit) { | ||
| 12960 | rc = __bnx2x_vlan_configure_vid(bp, vid, true); | ||
| 12961 | hw = true; | ||
| 12962 | } | ||
| 12963 | |||
| 12964 | vlan->vid = vid; | 12970 | vlan->vid = vid; |
| 12965 | vlan->hw = hw; | 12971 | vlan->hw = false; |
| 12972 | list_add_tail(&vlan->link, &bp->vlan_reg); | ||
| 12966 | 12973 | ||
| 12967 | if (!rc) { | 12974 | if (netif_running(dev)) |
| 12968 | list_add(&vlan->link, &bp->vlan_reg); | 12975 | bnx2x_vlan_configure(bp, true); |
| 12969 | } else { | ||
| 12970 | bp->vlan_cnt--; | ||
| 12971 | kfree(vlan); | ||
| 12972 | } | ||
| 12973 | |||
| 12974 | DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc); | ||
| 12975 | 12976 | ||
| 12976 | return rc; | 12977 | return 0; |
| 12977 | } | 12978 | } |
| 12978 | 12979 | ||
| 12979 | static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) | 12980 | static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) |
| 12980 | { | 12981 | { |
| 12981 | struct bnx2x *bp = netdev_priv(dev); | 12982 | struct bnx2x *bp = netdev_priv(dev); |
| 12982 | struct bnx2x_vlan_entry *vlan; | 12983 | struct bnx2x_vlan_entry *vlan; |
| 12984 | bool found = false; | ||
| 12983 | int rc = 0; | 12985 | int rc = 0; |
| 12984 | 12986 | ||
| 12985 | if (!netif_running(bp->dev)) { | ||
| 12986 | DP(NETIF_MSG_IFUP, | ||
| 12987 | "Ignoring VLAN configuration the interface is down\n"); | ||
| 12988 | return -EFAULT; | ||
| 12989 | } | ||
| 12990 | |||
| 12991 | DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); | 12987 | DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid); |
| 12992 | 12988 | ||
| 12993 | if (!bp->vlan_cnt) { | ||
| 12994 | BNX2X_ERR("Unable to kill VLAN %d\n", vid); | ||
| 12995 | return -EINVAL; | ||
| 12996 | } | ||
| 12997 | |||
| 12998 | list_for_each_entry(vlan, &bp->vlan_reg, link) | 12989 | list_for_each_entry(vlan, &bp->vlan_reg, link) |
| 12999 | if (vlan->vid == vid) | 12990 | if (vlan->vid == vid) { |
| 12991 | found = true; | ||
| 13000 | break; | 12992 | break; |
| 12993 | } | ||
| 13001 | 12994 | ||
| 13002 | if (vlan->vid != vid) { | 12995 | if (!found) { |
| 13003 | BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); | 12996 | BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid); |
| 13004 | return -EINVAL; | 12997 | return -EINVAL; |
| 13005 | } | 12998 | } |
| 13006 | 12999 | ||
| 13007 | if (vlan->hw) | 13000 | if (netif_running(dev) && vlan->hw) { |
| 13008 | rc = __bnx2x_vlan_configure_vid(bp, vid, false); | 13001 | rc = __bnx2x_vlan_configure_vid(bp, vid, false); |
| 13002 | DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid); | ||
| 13003 | bp->vlan_cnt--; | ||
| 13004 | } | ||
| 13009 | 13005 | ||
| 13010 | list_del(&vlan->link); | 13006 | list_del(&vlan->link); |
| 13011 | kfree(vlan); | 13007 | kfree(vlan); |
| 13012 | 13008 | ||
| 13013 | bp->vlan_cnt--; | 13009 | if (netif_running(dev)) |
| 13014 | 13010 | bnx2x_vlan_configure(bp, true); | |
| 13015 | if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) { | ||
| 13016 | /* Configure all non-configured entries */ | ||
| 13017 | list_for_each_entry(vlan, &bp->vlan_reg, link) { | ||
| 13018 | if (vlan->hw) | ||
| 13019 | continue; | ||
| 13020 | |||
| 13021 | rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true); | ||
| 13022 | if (rc) { | ||
| 13023 | BNX2X_ERR("Unable to config VLAN %d\n", | ||
| 13024 | vlan->vid); | ||
| 13025 | continue; | ||
| 13026 | } | ||
| 13027 | DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", | ||
| 13028 | vlan->vid); | ||
| 13029 | vlan->hw = true; | ||
| 13030 | } | ||
| 13031 | DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n"); | ||
| 13032 | bp->accept_any_vlan = false; | ||
| 13033 | if (IS_PF(bp)) | ||
| 13034 | bnx2x_set_rx_mode_inner(bp); | ||
| 13035 | else | ||
| 13036 | bnx2x_vfpf_storm_rx_mode(bp); | ||
| 13037 | } | ||
| 13038 | 13011 | ||
| 13039 | DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); | 13012 | DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc); |
| 13040 | 13013 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 72a2efff8e49..c777cde85ce4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
| @@ -286,7 +286,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 286 | cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); | 286 | cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); |
| 287 | txr->tx_prod = prod; | 287 | txr->tx_prod = prod; |
| 288 | 288 | ||
| 289 | tx_buf->is_push = 1; | ||
| 289 | netdev_tx_sent_queue(txq, skb->len); | 290 | netdev_tx_sent_queue(txq, skb->len); |
| 291 | wmb(); /* Sync is_push and byte queue before pushing data */ | ||
| 290 | 292 | ||
| 291 | push_len = (length + sizeof(*tx_push) + 7) / 8; | 293 | push_len = (length + sizeof(*tx_push) + 7) / 8; |
| 292 | if (push_len > 16) { | 294 | if (push_len > 16) { |
| @@ -298,7 +300,6 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 298 | push_len); | 300 | push_len); |
| 299 | } | 301 | } |
| 300 | 302 | ||
| 301 | tx_buf->is_push = 1; | ||
| 302 | goto tx_done; | 303 | goto tx_done; |
| 303 | } | 304 | } |
| 304 | 305 | ||
| @@ -1112,19 +1113,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, | |||
| 1112 | if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) | 1113 | if (tpa_info->hash_type != PKT_HASH_TYPE_NONE) |
| 1113 | skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); | 1114 | skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type); |
| 1114 | 1115 | ||
| 1115 | if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) { | 1116 | if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && |
| 1116 | netdev_features_t features = skb->dev->features; | 1117 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
| 1117 | u16 vlan_proto = tpa_info->metadata >> | 1118 | u16 vlan_proto = tpa_info->metadata >> |
| 1118 | RX_CMP_FLAGS2_METADATA_TPID_SFT; | 1119 | RX_CMP_FLAGS2_METADATA_TPID_SFT; |
| 1120 | u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; | ||
| 1119 | 1121 | ||
| 1120 | if (((features & NETIF_F_HW_VLAN_CTAG_RX) && | 1122 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
| 1121 | vlan_proto == ETH_P_8021Q) || | ||
| 1122 | ((features & NETIF_F_HW_VLAN_STAG_RX) && | ||
| 1123 | vlan_proto == ETH_P_8021AD)) { | ||
| 1124 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), | ||
| 1125 | tpa_info->metadata & | ||
| 1126 | RX_CMP_FLAGS2_METADATA_VID_MASK); | ||
| 1127 | } | ||
| 1128 | } | 1123 | } |
| 1129 | 1124 | ||
| 1130 | skb_checksum_none_assert(skb); | 1125 | skb_checksum_none_assert(skb); |
| @@ -1277,19 +1272,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, | |||
| 1277 | 1272 | ||
| 1278 | skb->protocol = eth_type_trans(skb, dev); | 1273 | skb->protocol = eth_type_trans(skb, dev); |
| 1279 | 1274 | ||
| 1280 | if (rxcmp1->rx_cmp_flags2 & | 1275 | if ((rxcmp1->rx_cmp_flags2 & |
| 1281 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) { | 1276 | cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && |
| 1282 | netdev_features_t features = skb->dev->features; | 1277 | (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { |
| 1283 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); | 1278 | u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); |
| 1279 | u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; | ||
| 1284 | u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; | 1280 | u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; |
| 1285 | 1281 | ||
| 1286 | if (((features & NETIF_F_HW_VLAN_CTAG_RX) && | 1282 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); |
| 1287 | vlan_proto == ETH_P_8021Q) || | ||
| 1288 | ((features & NETIF_F_HW_VLAN_STAG_RX) && | ||
| 1289 | vlan_proto == ETH_P_8021AD)) | ||
| 1290 | __vlan_hwaccel_put_tag(skb, htons(vlan_proto), | ||
| 1291 | meta_data & | ||
| 1292 | RX_CMP_FLAGS2_METADATA_VID_MASK); | ||
| 1293 | } | 1283 | } |
| 1294 | 1284 | ||
| 1295 | skb_checksum_none_assert(skb); | 1285 | skb_checksum_none_assert(skb); |
| @@ -5466,6 +5456,20 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev, | |||
| 5466 | 5456 | ||
| 5467 | if (!bnxt_rfs_capable(bp)) | 5457 | if (!bnxt_rfs_capable(bp)) |
| 5468 | features &= ~NETIF_F_NTUPLE; | 5458 | features &= ~NETIF_F_NTUPLE; |
| 5459 | |||
| 5460 | /* Both CTAG and STAG VLAN accelaration on the RX side have to be | ||
| 5461 | * turned on or off together. | ||
| 5462 | */ | ||
| 5463 | if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) != | ||
| 5464 | (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) { | ||
| 5465 | if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) | ||
| 5466 | features &= ~(NETIF_F_HW_VLAN_CTAG_RX | | ||
| 5467 | NETIF_F_HW_VLAN_STAG_RX); | ||
| 5468 | else | ||
| 5469 | features |= NETIF_F_HW_VLAN_CTAG_RX | | ||
| 5470 | NETIF_F_HW_VLAN_STAG_RX; | ||
| 5471 | } | ||
| 5472 | |||
| 5469 | return features; | 5473 | return features; |
| 5470 | } | 5474 | } |
| 5471 | 5475 | ||
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index a2cdfc1261dc..50812a1d67bd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | |||
| @@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN | |||
| 144 | CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ | 144 | CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */ |
| 145 | CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ | 145 | CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */ |
| 146 | CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ | 146 | CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */ |
| 147 | CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */ | ||
| 147 | CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ | 148 | CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */ |
| 148 | CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ | 149 | CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */ |
| 149 | CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ | 150 | CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */ |
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index 41b010645100..4edb98c3c6c7 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c | |||
| @@ -1195,7 +1195,7 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 1195 | priv->mdio = mdiobus_alloc(); | 1195 | priv->mdio = mdiobus_alloc(); |
| 1196 | if (!priv->mdio) { | 1196 | if (!priv->mdio) { |
| 1197 | ret = -ENOMEM; | 1197 | ret = -ENOMEM; |
| 1198 | goto free; | 1198 | goto free2; |
| 1199 | } | 1199 | } |
| 1200 | 1200 | ||
| 1201 | priv->mdio->name = "ethoc-mdio"; | 1201 | priv->mdio->name = "ethoc-mdio"; |
| @@ -1208,7 +1208,7 @@ static int ethoc_probe(struct platform_device *pdev) | |||
| 1208 | ret = mdiobus_register(priv->mdio); | 1208 | ret = mdiobus_register(priv->mdio); |
| 1209 | if (ret) { | 1209 | if (ret) { |
| 1210 | dev_err(&netdev->dev, "failed to register MDIO bus\n"); | 1210 | dev_err(&netdev->dev, "failed to register MDIO bus\n"); |
| 1211 | goto free; | 1211 | goto free2; |
| 1212 | } | 1212 | } |
| 1213 | 1213 | ||
| 1214 | ret = ethoc_mdio_probe(netdev); | 1214 | ret = ethoc_mdio_probe(netdev); |
| @@ -1241,9 +1241,10 @@ error2: | |||
| 1241 | error: | 1241 | error: |
| 1242 | mdiobus_unregister(priv->mdio); | 1242 | mdiobus_unregister(priv->mdio); |
| 1243 | mdiobus_free(priv->mdio); | 1243 | mdiobus_free(priv->mdio); |
| 1244 | free: | 1244 | free2: |
| 1245 | if (priv->clk) | 1245 | if (priv->clk) |
| 1246 | clk_disable_unprepare(priv->clk); | 1246 | clk_disable_unprepare(priv->clk); |
| 1247 | free: | ||
| 1247 | free_netdev(netdev); | 1248 | free_netdev(netdev); |
| 1248 | out: | 1249 | out: |
| 1249 | return ret; | 1250 | return ret; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 3c0255e98535..fea0f330ddbd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
| @@ -2416,24 +2416,24 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) | |||
| 2416 | return -EOPNOTSUPP; | 2416 | return -EOPNOTSUPP; |
| 2417 | 2417 | ||
| 2418 | if (ec->rx_max_coalesced_frames > 255) { | 2418 | if (ec->rx_max_coalesced_frames > 255) { |
| 2419 | pr_err("Rx coalesced frames exceed hardware limiation"); | 2419 | pr_err("Rx coalesced frames exceed hardware limitation\n"); |
| 2420 | return -EINVAL; | 2420 | return -EINVAL; |
| 2421 | } | 2421 | } |
| 2422 | 2422 | ||
| 2423 | if (ec->tx_max_coalesced_frames > 255) { | 2423 | if (ec->tx_max_coalesced_frames > 255) { |
| 2424 | pr_err("Tx coalesced frame exceed hardware limiation"); | 2424 | pr_err("Tx coalesced frame exceed hardware limitation\n"); |
| 2425 | return -EINVAL; | 2425 | return -EINVAL; |
| 2426 | } | 2426 | } |
| 2427 | 2427 | ||
| 2428 | cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); | 2428 | cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); |
| 2429 | if (cycle > 0xFFFF) { | 2429 | if (cycle > 0xFFFF) { |
| 2430 | pr_err("Rx coalesed usec exceeed hardware limiation"); | 2430 | pr_err("Rx coalesced usec exceed hardware limitation\n"); |
| 2431 | return -EINVAL; | 2431 | return -EINVAL; |
| 2432 | } | 2432 | } |
| 2433 | 2433 | ||
| 2434 | cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); | 2434 | cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); |
| 2435 | if (cycle > 0xFFFF) { | 2435 | if (cycle > 0xFFFF) { |
| 2436 | pr_err("Rx coalesed usec exceeed hardware limiation"); | 2436 | pr_err("Rx coalesced usec exceed hardware limitation\n"); |
| 2437 | return -EINVAL; | 2437 | return -EINVAL; |
| 2438 | } | 2438 | } |
| 2439 | 2439 | ||
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 7615e0668acb..2e6785b6e8be 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c | |||
| @@ -2440,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2440 | tx_queue->tx_ring_size); | 2440 | tx_queue->tx_ring_size); |
| 2441 | 2441 | ||
| 2442 | if (likely(!nr_frags)) { | 2442 | if (likely(!nr_frags)) { |
| 2443 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | 2443 | if (likely(!do_tstamp)) |
| 2444 | lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); | ||
| 2444 | } else { | 2445 | } else { |
| 2445 | u32 lstatus_start = lstatus; | 2446 | u32 lstatus_start = lstatus; |
| 2446 | 2447 | ||
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c984462fad2a..4763252bbf85 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c | |||
| @@ -133,6 +133,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) | |||
| 133 | static void mtk_phy_link_adjust(struct net_device *dev) | 133 | static void mtk_phy_link_adjust(struct net_device *dev) |
| 134 | { | 134 | { |
| 135 | struct mtk_mac *mac = netdev_priv(dev); | 135 | struct mtk_mac *mac = netdev_priv(dev); |
| 136 | u16 lcl_adv = 0, rmt_adv = 0; | ||
| 137 | u8 flowctrl; | ||
| 136 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | | 138 | u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG | |
| 137 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | | 139 | MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | |
| 138 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | | 140 | MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | |
| @@ -150,11 +152,30 @@ static void mtk_phy_link_adjust(struct net_device *dev) | |||
| 150 | if (mac->phy_dev->link) | 152 | if (mac->phy_dev->link) |
| 151 | mcr |= MAC_MCR_FORCE_LINK; | 153 | mcr |= MAC_MCR_FORCE_LINK; |
| 152 | 154 | ||
| 153 | if (mac->phy_dev->duplex) | 155 | if (mac->phy_dev->duplex) { |
| 154 | mcr |= MAC_MCR_FORCE_DPX; | 156 | mcr |= MAC_MCR_FORCE_DPX; |
| 155 | 157 | ||
| 156 | if (mac->phy_dev->pause) | 158 | if (mac->phy_dev->pause) |
| 157 | mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC; | 159 | rmt_adv = LPA_PAUSE_CAP; |
| 160 | if (mac->phy_dev->asym_pause) | ||
| 161 | rmt_adv |= LPA_PAUSE_ASYM; | ||
| 162 | |||
| 163 | if (mac->phy_dev->advertising & ADVERTISED_Pause) | ||
| 164 | lcl_adv |= ADVERTISE_PAUSE_CAP; | ||
| 165 | if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause) | ||
| 166 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | ||
| 167 | |||
| 168 | flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); | ||
| 169 | |||
| 170 | if (flowctrl & FLOW_CTRL_TX) | ||
| 171 | mcr |= MAC_MCR_FORCE_TX_FC; | ||
| 172 | if (flowctrl & FLOW_CTRL_RX) | ||
| 173 | mcr |= MAC_MCR_FORCE_RX_FC; | ||
| 174 | |||
| 175 | netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n", | ||
| 176 | flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled", | ||
| 177 | flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled"); | ||
| 178 | } | ||
| 158 | 179 | ||
| 159 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); | 180 | mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); |
| 160 | 181 | ||
| @@ -208,10 +229,16 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
| 208 | u32 val, ge_mode; | 229 | u32 val, ge_mode; |
| 209 | 230 | ||
| 210 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); | 231 | np = of_parse_phandle(mac->of_node, "phy-handle", 0); |
| 232 | if (!np && of_phy_is_fixed_link(mac->of_node)) | ||
| 233 | if (!of_phy_register_fixed_link(mac->of_node)) | ||
| 234 | np = of_node_get(mac->of_node); | ||
| 211 | if (!np) | 235 | if (!np) |
| 212 | return -ENODEV; | 236 | return -ENODEV; |
| 213 | 237 | ||
| 214 | switch (of_get_phy_mode(np)) { | 238 | switch (of_get_phy_mode(np)) { |
| 239 | case PHY_INTERFACE_MODE_RGMII_TXID: | ||
| 240 | case PHY_INTERFACE_MODE_RGMII_RXID: | ||
| 241 | case PHY_INTERFACE_MODE_RGMII_ID: | ||
| 215 | case PHY_INTERFACE_MODE_RGMII: | 242 | case PHY_INTERFACE_MODE_RGMII: |
| 216 | ge_mode = 0; | 243 | ge_mode = 0; |
| 217 | break; | 244 | break; |
| @@ -236,7 +263,8 @@ static int mtk_phy_connect(struct mtk_mac *mac) | |||
| 236 | mac->phy_dev->autoneg = AUTONEG_ENABLE; | 263 | mac->phy_dev->autoneg = AUTONEG_ENABLE; |
| 237 | mac->phy_dev->speed = 0; | 264 | mac->phy_dev->speed = 0; |
| 238 | mac->phy_dev->duplex = 0; | 265 | mac->phy_dev->duplex = 0; |
| 239 | mac->phy_dev->supported &= PHY_BASIC_FEATURES; | 266 | mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | |
| 267 | SUPPORTED_Asym_Pause; | ||
| 240 | mac->phy_dev->advertising = mac->phy_dev->supported | | 268 | mac->phy_dev->advertising = mac->phy_dev->supported | |
| 241 | ADVERTISED_Autoneg; | 269 | ADVERTISED_Autoneg; |
| 242 | phy_start_aneg(mac->phy_dev); | 270 | phy_start_aneg(mac->phy_dev); |
| @@ -280,7 +308,7 @@ static int mtk_mdio_init(struct mtk_eth *eth) | |||
| 280 | return 0; | 308 | return 0; |
| 281 | 309 | ||
| 282 | err_free_bus: | 310 | err_free_bus: |
| 283 | kfree(eth->mii_bus); | 311 | mdiobus_free(eth->mii_bus); |
| 284 | 312 | ||
| 285 | err_put_node: | 313 | err_put_node: |
| 286 | of_node_put(mii_np); | 314 | of_node_put(mii_np); |
| @@ -295,7 +323,7 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth) | |||
| 295 | 323 | ||
| 296 | mdiobus_unregister(eth->mii_bus); | 324 | mdiobus_unregister(eth->mii_bus); |
| 297 | of_node_put(eth->mii_bus->dev.of_node); | 325 | of_node_put(eth->mii_bus->dev.of_node); |
| 298 | kfree(eth->mii_bus); | 326 | mdiobus_free(eth->mii_bus); |
| 299 | } | 327 | } |
| 300 | 328 | ||
| 301 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) | 329 | static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index fd4392999eee..f5c8d5db25a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -3192,10 +3192,7 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv) | |||
| 3192 | flush_workqueue(priv->wq); | 3192 | flush_workqueue(priv->wq); |
| 3193 | if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { | 3193 | if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) { |
| 3194 | netif_device_detach(netdev); | 3194 | netif_device_detach(netdev); |
| 3195 | mutex_lock(&priv->state_lock); | 3195 | mlx5e_close(netdev); |
| 3196 | if (test_bit(MLX5E_STATE_OPENED, &priv->state)) | ||
| 3197 | mlx5e_close_locked(netdev); | ||
| 3198 | mutex_unlock(&priv->state_lock); | ||
| 3199 | } else { | 3196 | } else { |
| 3200 | unregister_netdev(netdev); | 3197 | unregister_netdev(netdev); |
| 3201 | } | 3198 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 229ab16fb8d3..b000ddc29553 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
| @@ -317,7 +317,8 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
| 317 | while ((sq->pc & wq->sz_m1) > sq->edge) | 317 | while ((sq->pc & wq->sz_m1) > sq->edge) |
| 318 | mlx5e_send_nop(sq, false); | 318 | mlx5e_send_nop(sq, false); |
| 319 | 319 | ||
| 320 | sq->bf_budget = bf ? sq->bf_budget - 1 : 0; | 320 | if (bf) |
| 321 | sq->bf_budget--; | ||
| 321 | 322 | ||
| 322 | sq->stats.packets++; | 323 | sq->stats.packets++; |
| 323 | sq->stats.bytes += num_bytes; | 324 | sq->stats.bytes += num_bytes; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index b84a6918a700..aebbd6ccb9fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
| @@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, | |||
| 383 | match_v, | 383 | match_v, |
| 384 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, | 384 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, |
| 385 | 0, &dest); | 385 | 0, &dest); |
| 386 | if (IS_ERR_OR_NULL(flow_rule)) { | 386 | if (IS_ERR(flow_rule)) { |
| 387 | pr_warn( | 387 | pr_warn( |
| 388 | "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", | 388 | "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", |
| 389 | dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); | 389 | dmac_v, dmac_c, vport, PTR_ERR(flow_rule)); |
| @@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 457 | 457 | ||
| 458 | table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); | 458 | table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); |
| 459 | fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); | 459 | fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); |
| 460 | if (IS_ERR_OR_NULL(fdb)) { | 460 | if (IS_ERR(fdb)) { |
| 461 | err = PTR_ERR(fdb); | 461 | err = PTR_ERR(fdb); |
| 462 | esw_warn(dev, "Failed to create FDB Table err %d\n", err); | 462 | esw_warn(dev, "Failed to create FDB Table err %d\n", err); |
| 463 | goto out; | 463 | goto out; |
| @@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 474 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); | 474 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3); |
| 475 | eth_broadcast_addr(dmac); | 475 | eth_broadcast_addr(dmac); |
| 476 | g = mlx5_create_flow_group(fdb, flow_group_in); | 476 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 477 | if (IS_ERR_OR_NULL(g)) { | 477 | if (IS_ERR(g)) { |
| 478 | err = PTR_ERR(g); | 478 | err = PTR_ERR(g); |
| 479 | esw_warn(dev, "Failed to create flow group err(%d)\n", err); | 479 | esw_warn(dev, "Failed to create flow group err(%d)\n", err); |
| 480 | goto out; | 480 | goto out; |
| @@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 489 | eth_zero_addr(dmac); | 489 | eth_zero_addr(dmac); |
| 490 | dmac[0] = 0x01; | 490 | dmac[0] = 0x01; |
| 491 | g = mlx5_create_flow_group(fdb, flow_group_in); | 491 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 492 | if (IS_ERR_OR_NULL(g)) { | 492 | if (IS_ERR(g)) { |
| 493 | err = PTR_ERR(g); | 493 | err = PTR_ERR(g); |
| 494 | esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); | 494 | esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err); |
| 495 | goto out; | 495 | goto out; |
| @@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports) | |||
| 506 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); | 506 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1); |
| 507 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); | 507 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1); |
| 508 | g = mlx5_create_flow_group(fdb, flow_group_in); | 508 | g = mlx5_create_flow_group(fdb, flow_group_in); |
| 509 | if (IS_ERR_OR_NULL(g)) { | 509 | if (IS_ERR(g)) { |
| 510 | err = PTR_ERR(g); | 510 | err = PTR_ERR(g); |
| 511 | esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); | 511 | esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err); |
| 512 | goto out; | 512 | goto out; |
| @@ -529,7 +529,7 @@ out: | |||
| 529 | } | 529 | } |
| 530 | } | 530 | } |
| 531 | 531 | ||
| 532 | kfree(flow_group_in); | 532 | kvfree(flow_group_in); |
| 533 | return err; | 533 | return err; |
| 534 | } | 534 | } |
| 535 | 535 | ||
| @@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, | |||
| 651 | esw_fdb_set_vport_rule(esw, | 651 | esw_fdb_set_vport_rule(esw, |
| 652 | mac, | 652 | mac, |
| 653 | vport_idx); | 653 | vport_idx); |
| 654 | iter_vaddr->mc_promisc = true; | ||
| 654 | break; | 655 | break; |
| 655 | case MLX5_ACTION_DEL: | 656 | case MLX5_ACTION_DEL: |
| 656 | if (!iter_vaddr) | 657 | if (!iter_vaddr) |
| @@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1060 | return; | 1061 | return; |
| 1061 | 1062 | ||
| 1062 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); | 1063 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); |
| 1063 | if (IS_ERR_OR_NULL(acl)) { | 1064 | if (IS_ERR(acl)) { |
| 1064 | err = PTR_ERR(acl); | 1065 | err = PTR_ERR(acl); |
| 1065 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", | 1066 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n", |
| 1066 | vport->vport, err); | 1067 | vport->vport, err); |
| @@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1075 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); | 1076 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); |
| 1076 | 1077 | ||
| 1077 | vlan_grp = mlx5_create_flow_group(acl, flow_group_in); | 1078 | vlan_grp = mlx5_create_flow_group(acl, flow_group_in); |
| 1078 | if (IS_ERR_OR_NULL(vlan_grp)) { | 1079 | if (IS_ERR(vlan_grp)) { |
| 1079 | err = PTR_ERR(vlan_grp); | 1080 | err = PTR_ERR(vlan_grp); |
| 1080 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", | 1081 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n", |
| 1081 | vport->vport, err); | 1082 | vport->vport, err); |
| @@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1086 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); | 1087 | MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1); |
| 1087 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); | 1088 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); |
| 1088 | drop_grp = mlx5_create_flow_group(acl, flow_group_in); | 1089 | drop_grp = mlx5_create_flow_group(acl, flow_group_in); |
| 1089 | if (IS_ERR_OR_NULL(drop_grp)) { | 1090 | if (IS_ERR(drop_grp)) { |
| 1090 | err = PTR_ERR(drop_grp); | 1091 | err = PTR_ERR(drop_grp); |
| 1091 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", | 1092 | esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n", |
| 1092 | vport->vport, err); | 1093 | vport->vport, err); |
| @@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw, | |||
| 1097 | vport->egress.drop_grp = drop_grp; | 1098 | vport->egress.drop_grp = drop_grp; |
| 1098 | vport->egress.allowed_vlans_grp = vlan_grp; | 1099 | vport->egress.allowed_vlans_grp = vlan_grp; |
| 1099 | out: | 1100 | out: |
| 1100 | kfree(flow_group_in); | 1101 | kvfree(flow_group_in); |
| 1101 | if (err && !IS_ERR_OR_NULL(vlan_grp)) | 1102 | if (err && !IS_ERR_OR_NULL(vlan_grp)) |
| 1102 | mlx5_destroy_flow_group(vlan_grp); | 1103 | mlx5_destroy_flow_group(vlan_grp); |
| 1103 | if (err && !IS_ERR_OR_NULL(acl)) | 1104 | if (err && !IS_ERR_OR_NULL(acl)) |
| @@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1174 | return; | 1175 | return; |
| 1175 | 1176 | ||
| 1176 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); | 1177 | acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport); |
| 1177 | if (IS_ERR_OR_NULL(acl)) { | 1178 | if (IS_ERR(acl)) { |
| 1178 | err = PTR_ERR(acl); | 1179 | err = PTR_ERR(acl); |
| 1179 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", | 1180 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n", |
| 1180 | vport->vport, err); | 1181 | vport->vport, err); |
| @@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1192 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); | 1193 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); |
| 1193 | 1194 | ||
| 1194 | g = mlx5_create_flow_group(acl, flow_group_in); | 1195 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1195 | if (IS_ERR_OR_NULL(g)) { | 1196 | if (IS_ERR(g)) { |
| 1196 | err = PTR_ERR(g); | 1197 | err = PTR_ERR(g); |
| 1197 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", | 1198 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n", |
| 1198 | vport->vport, err); | 1199 | vport->vport, err); |
| @@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1207 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); | 1208 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1); |
| 1208 | 1209 | ||
| 1209 | g = mlx5_create_flow_group(acl, flow_group_in); | 1210 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1210 | if (IS_ERR_OR_NULL(g)) { | 1211 | if (IS_ERR(g)) { |
| 1211 | err = PTR_ERR(g); | 1212 | err = PTR_ERR(g); |
| 1212 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", | 1213 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n", |
| 1213 | vport->vport, err); | 1214 | vport->vport, err); |
| @@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1223 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); | 1224 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2); |
| 1224 | 1225 | ||
| 1225 | g = mlx5_create_flow_group(acl, flow_group_in); | 1226 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1226 | if (IS_ERR_OR_NULL(g)) { | 1227 | if (IS_ERR(g)) { |
| 1227 | err = PTR_ERR(g); | 1228 | err = PTR_ERR(g); |
| 1228 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", | 1229 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n", |
| 1229 | vport->vport, err); | 1230 | vport->vport, err); |
| @@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw, | |||
| 1236 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); | 1237 | MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3); |
| 1237 | 1238 | ||
| 1238 | g = mlx5_create_flow_group(acl, flow_group_in); | 1239 | g = mlx5_create_flow_group(acl, flow_group_in); |
| 1239 | if (IS_ERR_OR_NULL(g)) { | 1240 | if (IS_ERR(g)) { |
| 1240 | err = PTR_ERR(g); | 1241 | err = PTR_ERR(g); |
| 1241 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", | 1242 | esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n", |
| 1242 | vport->vport, err); | 1243 | vport->vport, err); |
| @@ -1259,7 +1260,7 @@ out: | |||
| 1259 | mlx5_destroy_flow_table(vport->ingress.acl); | 1260 | mlx5_destroy_flow_table(vport->ingress.acl); |
| 1260 | } | 1261 | } |
| 1261 | 1262 | ||
| 1262 | kfree(flow_group_in); | 1263 | kvfree(flow_group_in); |
| 1263 | } | 1264 | } |
| 1264 | 1265 | ||
| 1265 | static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, | 1266 | static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, |
| @@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, | |||
| 1363 | match_v, | 1364 | match_v, |
| 1364 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, | 1365 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, |
| 1365 | 0, NULL); | 1366 | 0, NULL); |
| 1366 | if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) { | 1367 | if (IS_ERR(vport->ingress.allow_rule)) { |
| 1367 | err = PTR_ERR(vport->ingress.allow_rule); | 1368 | err = PTR_ERR(vport->ingress.allow_rule); |
| 1368 | pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", | 1369 | pr_warn("vport[%d] configure ingress allow rule, err(%d)\n", |
| 1369 | vport->vport, err); | 1370 | vport->vport, err); |
| @@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, | |||
| 1380 | match_v, | 1381 | match_v, |
| 1381 | MLX5_FLOW_CONTEXT_ACTION_DROP, | 1382 | MLX5_FLOW_CONTEXT_ACTION_DROP, |
| 1382 | 0, NULL); | 1383 | 0, NULL); |
| 1383 | if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) { | 1384 | if (IS_ERR(vport->ingress.drop_rule)) { |
| 1384 | err = PTR_ERR(vport->ingress.drop_rule); | 1385 | err = PTR_ERR(vport->ingress.drop_rule); |
| 1385 | pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", | 1386 | pr_warn("vport[%d] configure ingress drop rule, err(%d)\n", |
| 1386 | vport->vport, err); | 1387 | vport->vport, err); |
| @@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, | |||
| 1439 | match_v, | 1440 | match_v, |
| 1440 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, | 1441 | MLX5_FLOW_CONTEXT_ACTION_ALLOW, |
| 1441 | 0, NULL); | 1442 | 0, NULL); |
| 1442 | if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) { | 1443 | if (IS_ERR(vport->egress.allowed_vlan)) { |
| 1443 | err = PTR_ERR(vport->egress.allowed_vlan); | 1444 | err = PTR_ERR(vport->egress.allowed_vlan); |
| 1444 | pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", | 1445 | pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n", |
| 1445 | vport->vport, err); | 1446 | vport->vport, err); |
| @@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, | |||
| 1457 | match_v, | 1458 | match_v, |
| 1458 | MLX5_FLOW_CONTEXT_ACTION_DROP, | 1459 | MLX5_FLOW_CONTEXT_ACTION_DROP, |
| 1459 | 0, NULL); | 1460 | 0, NULL); |
| 1460 | if (IS_ERR_OR_NULL(vport->egress.drop_rule)) { | 1461 | if (IS_ERR(vport->egress.drop_rule)) { |
| 1461 | err = PTR_ERR(vport->egress.drop_rule); | 1462 | err = PTR_ERR(vport->egress.drop_rule); |
| 1462 | pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", | 1463 | pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n", |
| 1463 | vport->vport, err); | 1464 | vport->vport, err); |
| @@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, | |||
| 1491 | 1492 | ||
| 1492 | /* Sync with current vport context */ | 1493 | /* Sync with current vport context */ |
| 1493 | vport->enabled_events = enable_events; | 1494 | vport->enabled_events = enable_events; |
| 1494 | esw_vport_change_handle_locked(vport); | ||
| 1495 | |||
| 1496 | vport->enabled = true; | 1495 | vport->enabled = true; |
| 1497 | 1496 | ||
| 1498 | /* only PF is trusted by default */ | 1497 | /* only PF is trusted by default */ |
| 1499 | vport->trusted = (vport_num) ? false : true; | 1498 | vport->trusted = (vport_num) ? false : true; |
| 1500 | 1499 | esw_vport_change_handle_locked(vport); | |
| 1501 | arm_vport_context_events_cmd(esw->dev, vport_num, enable_events); | ||
| 1502 | 1500 | ||
| 1503 | esw->enabled_vports++; | 1501 | esw->enabled_vports++; |
| 1504 | esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); | 1502 | esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num); |
| @@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) | |||
| 1728 | (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) | 1726 | (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev)) |
| 1729 | #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) | 1727 | #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) |
| 1730 | 1728 | ||
| 1729 | static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) | ||
| 1730 | { | ||
| 1731 | ((u8 *)node_guid)[7] = mac[0]; | ||
| 1732 | ((u8 *)node_guid)[6] = mac[1]; | ||
| 1733 | ((u8 *)node_guid)[5] = mac[2]; | ||
| 1734 | ((u8 *)node_guid)[4] = 0xff; | ||
| 1735 | ((u8 *)node_guid)[3] = 0xfe; | ||
| 1736 | ((u8 *)node_guid)[2] = mac[3]; | ||
| 1737 | ((u8 *)node_guid)[1] = mac[4]; | ||
| 1738 | ((u8 *)node_guid)[0] = mac[5]; | ||
| 1739 | } | ||
| 1740 | |||
| 1731 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | 1741 | int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, |
| 1732 | int vport, u8 mac[ETH_ALEN]) | 1742 | int vport, u8 mac[ETH_ALEN]) |
| 1733 | { | 1743 | { |
| 1734 | int err = 0; | ||
| 1735 | struct mlx5_vport *evport; | 1744 | struct mlx5_vport *evport; |
| 1745 | u64 node_guid; | ||
| 1746 | int err = 0; | ||
| 1736 | 1747 | ||
| 1737 | if (!ESW_ALLOWED(esw)) | 1748 | if (!ESW_ALLOWED(esw)) |
| 1738 | return -EPERM; | 1749 | return -EPERM; |
| @@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |||
| 1756 | return err; | 1767 | return err; |
| 1757 | } | 1768 | } |
| 1758 | 1769 | ||
| 1770 | node_guid_gen_from_mac(&node_guid, mac); | ||
| 1771 | err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid); | ||
| 1772 | if (err) | ||
| 1773 | mlx5_core_warn(esw->dev, | ||
| 1774 | "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n", | ||
| 1775 | vport, err); | ||
| 1776 | |||
| 1759 | mutex_lock(&esw->state_lock); | 1777 | mutex_lock(&esw->state_lock); |
| 1760 | if (evport->enabled) | 1778 | if (evport->enabled) |
| 1761 | err = esw_vport_ingress_config(esw, evport); | 1779 | err = esw_vport_ingress_config(esw, evport); |
| 1762 | mutex_unlock(&esw->state_lock); | 1780 | mutex_unlock(&esw->state_lock); |
| 1763 | |||
| 1764 | return err; | 1781 | return err; |
| 1765 | } | 1782 | } |
| 1766 | 1783 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 8b5f0b2c0d5c..e912a3d2505e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
| @@ -1292,8 +1292,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft) | |||
| 1292 | ft->id); | 1292 | ft->id); |
| 1293 | return err; | 1293 | return err; |
| 1294 | } | 1294 | } |
| 1295 | root->root_ft = new_root_ft; | ||
| 1296 | } | 1295 | } |
| 1296 | root->root_ft = new_root_ft; | ||
| 1297 | return 0; | 1297 | return 0; |
| 1298 | } | 1298 | } |
| 1299 | 1299 | ||
| @@ -1767,6 +1767,9 @@ static void cleanup_root_ns(struct mlx5_core_dev *dev) | |||
| 1767 | 1767 | ||
| 1768 | void mlx5_cleanup_fs(struct mlx5_core_dev *dev) | 1768 | void mlx5_cleanup_fs(struct mlx5_core_dev *dev) |
| 1769 | { | 1769 | { |
| 1770 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | ||
| 1771 | return; | ||
| 1772 | |||
| 1770 | cleanup_root_ns(dev); | 1773 | cleanup_root_ns(dev); |
| 1771 | cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); | 1774 | cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns); |
| 1772 | cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); | 1775 | cleanup_single_prio_root_ns(dev, dev->priv.esw_egress_root_ns); |
| @@ -1828,29 +1831,36 @@ int mlx5_init_fs(struct mlx5_core_dev *dev) | |||
| 1828 | { | 1831 | { |
| 1829 | int err = 0; | 1832 | int err = 0; |
| 1830 | 1833 | ||
| 1834 | if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) | ||
| 1835 | return 0; | ||
| 1836 | |||
| 1831 | err = mlx5_init_fc_stats(dev); | 1837 | err = mlx5_init_fc_stats(dev); |
| 1832 | if (err) | 1838 | if (err) |
| 1833 | return err; | 1839 | return err; |
| 1834 | 1840 | ||
| 1835 | if (MLX5_CAP_GEN(dev, nic_flow_table)) { | 1841 | if (MLX5_CAP_GEN(dev, nic_flow_table) && |
| 1842 | MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) { | ||
| 1836 | err = init_root_ns(dev); | 1843 | err = init_root_ns(dev); |
| 1837 | if (err) | 1844 | if (err) |
| 1838 | goto err; | 1845 | goto err; |
| 1839 | } | 1846 | } |
| 1847 | |||
| 1840 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { | 1848 | if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { |
| 1841 | err = init_fdb_root_ns(dev); | 1849 | if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { |
| 1842 | if (err) | 1850 | err = init_fdb_root_ns(dev); |
| 1843 | goto err; | 1851 | if (err) |
| 1844 | } | 1852 | goto err; |
| 1845 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { | 1853 | } |
| 1846 | err = init_egress_acl_root_ns(dev); | 1854 | if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) { |
| 1847 | if (err) | 1855 | err = init_egress_acl_root_ns(dev); |
| 1848 | goto err; | 1856 | if (err) |
| 1849 | } | 1857 | goto err; |
| 1850 | if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { | 1858 | } |
| 1851 | err = init_ingress_acl_root_ns(dev); | 1859 | if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { |
| 1852 | if (err) | 1860 | err = init_ingress_acl_root_ns(dev); |
| 1853 | goto err; | 1861 | if (err) |
| 1862 | goto err; | ||
| 1863 | } | ||
| 1854 | } | 1864 | } |
| 1855 | 1865 | ||
| 1856 | return 0; | 1866 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index b720a274220d..b82d65802d96 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c | |||
| @@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) | |||
| 418 | if (out.hdr.status) | 418 | if (out.hdr.status) |
| 419 | err = mlx5_cmd_status_to_err(&out.hdr); | 419 | err = mlx5_cmd_status_to_err(&out.hdr); |
| 420 | else | 420 | else |
| 421 | *xrcdn = be32_to_cpu(out.xrcdn); | 421 | *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff; |
| 422 | 422 | ||
| 423 | return err; | 423 | return err; |
| 424 | } | 424 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index b69dadcfb897..daf44cd4c566 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c | |||
| @@ -508,6 +508,44 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid) | |||
| 508 | } | 508 | } |
| 509 | EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); | 509 | EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); |
| 510 | 510 | ||
| 511 | int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, | ||
| 512 | u32 vport, u64 node_guid) | ||
| 513 | { | ||
| 514 | int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in); | ||
| 515 | void *nic_vport_context; | ||
| 516 | u8 *guid; | ||
| 517 | void *in; | ||
| 518 | int err; | ||
| 519 | |||
| 520 | if (!vport) | ||
| 521 | return -EINVAL; | ||
| 522 | if (!MLX5_CAP_GEN(mdev, vport_group_manager)) | ||
| 523 | return -EACCES; | ||
| 524 | if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify)) | ||
| 525 | return -ENOTSUPP; | ||
| 526 | |||
| 527 | in = mlx5_vzalloc(inlen); | ||
| 528 | if (!in) | ||
| 529 | return -ENOMEM; | ||
| 530 | |||
| 531 | MLX5_SET(modify_nic_vport_context_in, in, | ||
| 532 | field_select.node_guid, 1); | ||
| 533 | MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); | ||
| 534 | MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport); | ||
| 535 | |||
| 536 | nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in, | ||
| 537 | in, nic_vport_context); | ||
| 538 | guid = MLX5_ADDR_OF(nic_vport_context, nic_vport_context, | ||
| 539 | node_guid); | ||
| 540 | MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid); | ||
| 541 | |||
| 542 | err = mlx5_modify_nic_vport_context(mdev, in, inlen); | ||
| 543 | |||
| 544 | kvfree(in); | ||
| 545 | |||
| 546 | return err; | ||
| 547 | } | ||
| 548 | |||
| 511 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, | 549 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, |
| 512 | u16 *qkey_viol_cntr) | 550 | u16 *qkey_viol_cntr) |
| 513 | { | 551 | { |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 4a7273771028..6f9e3ddff4a8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -247,15 +247,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) | |||
| 247 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); | 247 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); |
| 248 | } | 248 | } |
| 249 | 249 | ||
| 250 | static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) | 250 | static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 251 | u8 swid) | ||
| 251 | { | 252 | { |
| 252 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | ||
| 253 | char pspa_pl[MLXSW_REG_PSPA_LEN]; | 253 | char pspa_pl[MLXSW_REG_PSPA_LEN]; |
| 254 | 254 | ||
| 255 | mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port); | 255 | mlxsw_reg_pspa_pack(pspa_pl, swid, local_port); |
| 256 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); | 256 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid) | ||
| 260 | { | ||
| 261 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | ||
| 262 | |||
| 263 | return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port, | ||
| 264 | swid); | ||
| 265 | } | ||
| 266 | |||
| 259 | static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, | 267 | static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, |
| 260 | bool enable) | 268 | bool enable) |
| 261 | { | 269 | { |
| @@ -305,9 +313,9 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port) | |||
| 305 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); | 313 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl); |
| 306 | } | 314 | } |
| 307 | 315 | ||
| 308 | static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | 316 | static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, |
| 309 | u8 local_port, u8 *p_module, | 317 | u8 local_port, u8 *p_module, |
| 310 | u8 *p_width, u8 *p_lane) | 318 | u8 *p_width, u8 *p_lane) |
| 311 | { | 319 | { |
| 312 | char pmlp_pl[MLXSW_REG_PMLP_LEN]; | 320 | char pmlp_pl[MLXSW_REG_PMLP_LEN]; |
| 313 | int err; | 321 | int err; |
| @@ -322,16 +330,6 @@ static int __mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | |||
| 322 | return 0; | 330 | return 0; |
| 323 | } | 331 | } |
| 324 | 332 | ||
| 325 | static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, | ||
| 326 | u8 local_port, u8 *p_module, | ||
| 327 | u8 *p_width) | ||
| 328 | { | ||
| 329 | u8 lane; | ||
| 330 | |||
| 331 | return __mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, p_module, | ||
| 332 | p_width, &lane); | ||
| 333 | } | ||
| 334 | |||
| 335 | static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 333 | static int mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 336 | u8 module, u8 width, u8 lane) | 334 | u8 module, u8 width, u8 lane) |
| 337 | { | 335 | { |
| @@ -949,17 +947,11 @@ static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name, | |||
| 949 | size_t len) | 947 | size_t len) |
| 950 | { | 948 | { |
| 951 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 949 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
| 952 | u8 module, width, lane; | 950 | u8 module = mlxsw_sp_port->mapping.module; |
| 951 | u8 width = mlxsw_sp_port->mapping.width; | ||
| 952 | u8 lane = mlxsw_sp_port->mapping.lane; | ||
| 953 | int err; | 953 | int err; |
| 954 | 954 | ||
| 955 | err = __mlxsw_sp_port_module_info_get(mlxsw_sp_port->mlxsw_sp, | ||
| 956 | mlxsw_sp_port->local_port, | ||
| 957 | &module, &width, &lane); | ||
| 958 | if (err) { | ||
| 959 | netdev_err(dev, "Failed to retrieve module information\n"); | ||
| 960 | return err; | ||
| 961 | } | ||
| 962 | |||
| 963 | if (!mlxsw_sp_port->split) | 955 | if (!mlxsw_sp_port->split) |
| 964 | err = snprintf(name, len, "p%d", module + 1); | 956 | err = snprintf(name, len, "p%d", module + 1); |
| 965 | else | 957 | else |
| @@ -1681,8 +1673,8 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port) | |||
| 1681 | return 0; | 1673 | return 0; |
| 1682 | } | 1674 | } |
| 1683 | 1675 | ||
| 1684 | static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | 1676 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, |
| 1685 | bool split, u8 module, u8 width) | 1677 | bool split, u8 module, u8 width, u8 lane) |
| 1686 | { | 1678 | { |
| 1687 | struct mlxsw_sp_port *mlxsw_sp_port; | 1679 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 1688 | struct net_device *dev; | 1680 | struct net_device *dev; |
| @@ -1697,6 +1689,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | |||
| 1697 | mlxsw_sp_port->mlxsw_sp = mlxsw_sp; | 1689 | mlxsw_sp_port->mlxsw_sp = mlxsw_sp; |
| 1698 | mlxsw_sp_port->local_port = local_port; | 1690 | mlxsw_sp_port->local_port = local_port; |
| 1699 | mlxsw_sp_port->split = split; | 1691 | mlxsw_sp_port->split = split; |
| 1692 | mlxsw_sp_port->mapping.module = module; | ||
| 1693 | mlxsw_sp_port->mapping.width = width; | ||
| 1694 | mlxsw_sp_port->mapping.lane = lane; | ||
| 1700 | bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); | 1695 | bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); |
| 1701 | mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); | 1696 | mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); |
| 1702 | if (!mlxsw_sp_port->active_vlans) { | 1697 | if (!mlxsw_sp_port->active_vlans) { |
| @@ -1839,28 +1834,6 @@ err_port_active_vlans_alloc: | |||
| 1839 | return err; | 1834 | return err; |
| 1840 | } | 1835 | } |
| 1841 | 1836 | ||
| 1842 | static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, | ||
| 1843 | bool split, u8 module, u8 width, u8 lane) | ||
| 1844 | { | ||
| 1845 | int err; | ||
| 1846 | |||
| 1847 | err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, | ||
| 1848 | lane); | ||
| 1849 | if (err) | ||
| 1850 | return err; | ||
| 1851 | |||
| 1852 | err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module, | ||
| 1853 | width); | ||
| 1854 | if (err) | ||
| 1855 | goto err_port_create; | ||
| 1856 | |||
| 1857 | return 0; | ||
| 1858 | |||
| 1859 | err_port_create: | ||
| 1860 | mlxsw_sp_port_module_unmap(mlxsw_sp, local_port); | ||
| 1861 | return err; | ||
| 1862 | } | ||
| 1863 | |||
| 1864 | static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) | 1837 | static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) |
| 1865 | { | 1838 | { |
| 1866 | struct net_device *dev = mlxsw_sp_port->dev; | 1839 | struct net_device *dev = mlxsw_sp_port->dev; |
| @@ -1909,8 +1882,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp) | |||
| 1909 | 1882 | ||
| 1910 | static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) | 1883 | static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) |
| 1911 | { | 1884 | { |
| 1885 | u8 module, width, lane; | ||
| 1912 | size_t alloc_size; | 1886 | size_t alloc_size; |
| 1913 | u8 module, width; | ||
| 1914 | int i; | 1887 | int i; |
| 1915 | int err; | 1888 | int err; |
| 1916 | 1889 | ||
| @@ -1921,13 +1894,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) | |||
| 1921 | 1894 | ||
| 1922 | for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { | 1895 | for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) { |
| 1923 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, | 1896 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module, |
| 1924 | &width); | 1897 | &width, &lane); |
| 1925 | if (err) | 1898 | if (err) |
| 1926 | goto err_port_module_info_get; | 1899 | goto err_port_module_info_get; |
| 1927 | if (!width) | 1900 | if (!width) |
| 1928 | continue; | 1901 | continue; |
| 1929 | mlxsw_sp->port_to_module[i] = module; | 1902 | mlxsw_sp->port_to_module[i] = module; |
| 1930 | err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width); | 1903 | err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width, |
| 1904 | lane); | ||
| 1931 | if (err) | 1905 | if (err) |
| 1932 | goto err_port_create; | 1906 | goto err_port_create; |
| 1933 | } | 1907 | } |
| @@ -1948,12 +1922,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port) | |||
| 1948 | return local_port - offset; | 1922 | return local_port - offset; |
| 1949 | } | 1923 | } |
| 1950 | 1924 | ||
| 1925 | static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port, | ||
| 1926 | u8 module, unsigned int count) | ||
| 1927 | { | ||
| 1928 | u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; | ||
| 1929 | int err, i; | ||
| 1930 | |||
| 1931 | for (i = 0; i < count; i++) { | ||
| 1932 | err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module, | ||
| 1933 | width, i * width); | ||
| 1934 | if (err) | ||
| 1935 | goto err_port_module_map; | ||
| 1936 | } | ||
| 1937 | |||
| 1938 | for (i = 0; i < count; i++) { | ||
| 1939 | err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0); | ||
| 1940 | if (err) | ||
| 1941 | goto err_port_swid_set; | ||
| 1942 | } | ||
| 1943 | |||
| 1944 | for (i = 0; i < count; i++) { | ||
| 1945 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, | ||
| 1946 | module, width, i * width); | ||
| 1947 | if (err) | ||
| 1948 | goto err_port_create; | ||
| 1949 | } | ||
| 1950 | |||
| 1951 | return 0; | ||
| 1952 | |||
| 1953 | err_port_create: | ||
| 1954 | for (i--; i >= 0; i--) | ||
| 1955 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | ||
| 1956 | i = count; | ||
| 1957 | err_port_swid_set: | ||
| 1958 | for (i--; i >= 0; i--) | ||
| 1959 | __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, | ||
| 1960 | MLXSW_PORT_SWID_DISABLED_PORT); | ||
| 1961 | i = count; | ||
| 1962 | err_port_module_map: | ||
| 1963 | for (i--; i >= 0; i--) | ||
| 1964 | mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i); | ||
| 1965 | return err; | ||
| 1966 | } | ||
| 1967 | |||
| 1968 | static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp, | ||
| 1969 | u8 base_port, unsigned int count) | ||
| 1970 | { | ||
| 1971 | u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH; | ||
| 1972 | int i; | ||
| 1973 | |||
| 1974 | /* Split by four means we need to re-create two ports, otherwise | ||
| 1975 | * only one. | ||
| 1976 | */ | ||
| 1977 | count = count / 2; | ||
| 1978 | |||
| 1979 | for (i = 0; i < count; i++) { | ||
| 1980 | local_port = base_port + i * 2; | ||
| 1981 | module = mlxsw_sp->port_to_module[local_port]; | ||
| 1982 | |||
| 1983 | mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width, | ||
| 1984 | 0); | ||
| 1985 | } | ||
| 1986 | |||
| 1987 | for (i = 0; i < count; i++) | ||
| 1988 | __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0); | ||
| 1989 | |||
| 1990 | for (i = 0; i < count; i++) { | ||
| 1991 | local_port = base_port + i * 2; | ||
| 1992 | module = mlxsw_sp->port_to_module[local_port]; | ||
| 1993 | |||
| 1994 | mlxsw_sp_port_create(mlxsw_sp, local_port, false, module, | ||
| 1995 | width, 0); | ||
| 1996 | } | ||
| 1997 | } | ||
| 1998 | |||
| 1951 | static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | 1999 | static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, |
| 1952 | unsigned int count) | 2000 | unsigned int count) |
| 1953 | { | 2001 | { |
| 1954 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); | 2002 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
| 1955 | struct mlxsw_sp_port *mlxsw_sp_port; | 2003 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 1956 | u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count; | ||
| 1957 | u8 module, cur_width, base_port; | 2004 | u8 module, cur_width, base_port; |
| 1958 | int i; | 2005 | int i; |
| 1959 | int err; | 2006 | int err; |
| @@ -1965,18 +2012,14 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | |||
| 1965 | return -EINVAL; | 2012 | return -EINVAL; |
| 1966 | } | 2013 | } |
| 1967 | 2014 | ||
| 2015 | module = mlxsw_sp_port->mapping.module; | ||
| 2016 | cur_width = mlxsw_sp_port->mapping.width; | ||
| 2017 | |||
| 1968 | if (count != 2 && count != 4) { | 2018 | if (count != 2 && count != 4) { |
| 1969 | netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); | 2019 | netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n"); |
| 1970 | return -EINVAL; | 2020 | return -EINVAL; |
| 1971 | } | 2021 | } |
| 1972 | 2022 | ||
| 1973 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, | ||
| 1974 | &cur_width); | ||
| 1975 | if (err) { | ||
| 1976 | netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); | ||
| 1977 | return err; | ||
| 1978 | } | ||
| 1979 | |||
| 1980 | if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { | 2023 | if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) { |
| 1981 | netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); | 2024 | netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n"); |
| 1982 | return -EINVAL; | 2025 | return -EINVAL; |
| @@ -2001,25 +2044,16 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, | |||
| 2001 | for (i = 0; i < count; i++) | 2044 | for (i = 0; i < count; i++) |
| 2002 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | 2045 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); |
| 2003 | 2046 | ||
| 2004 | for (i = 0; i < count; i++) { | 2047 | err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count); |
| 2005 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true, | 2048 | if (err) { |
| 2006 | module, width, i * width); | 2049 | dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n"); |
| 2007 | if (err) { | 2050 | goto err_port_split_create; |
| 2008 | dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n"); | ||
| 2009 | goto err_port_create; | ||
| 2010 | } | ||
| 2011 | } | 2051 | } |
| 2012 | 2052 | ||
| 2013 | return 0; | 2053 | return 0; |
| 2014 | 2054 | ||
| 2015 | err_port_create: | 2055 | err_port_split_create: |
| 2016 | for (i--; i >= 0; i--) | 2056 | mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); |
| 2017 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | ||
| 2018 | for (i = 0; i < count / 2; i++) { | ||
| 2019 | module = mlxsw_sp->port_to_module[base_port + i * 2]; | ||
| 2020 | mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, | ||
| 2021 | module, MLXSW_PORT_MODULE_MAX_WIDTH, 0); | ||
| 2022 | } | ||
| 2023 | return err; | 2057 | return err; |
| 2024 | } | 2058 | } |
| 2025 | 2059 | ||
| @@ -2027,10 +2061,9 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2027 | { | 2061 | { |
| 2028 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); | 2062 | struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); |
| 2029 | struct mlxsw_sp_port *mlxsw_sp_port; | 2063 | struct mlxsw_sp_port *mlxsw_sp_port; |
| 2030 | u8 module, cur_width, base_port; | 2064 | u8 cur_width, base_port; |
| 2031 | unsigned int count; | 2065 | unsigned int count; |
| 2032 | int i; | 2066 | int i; |
| 2033 | int err; | ||
| 2034 | 2067 | ||
| 2035 | mlxsw_sp_port = mlxsw_sp->ports[local_port]; | 2068 | mlxsw_sp_port = mlxsw_sp->ports[local_port]; |
| 2036 | if (!mlxsw_sp_port) { | 2069 | if (!mlxsw_sp_port) { |
| @@ -2044,12 +2077,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2044 | return -EINVAL; | 2077 | return -EINVAL; |
| 2045 | } | 2078 | } |
| 2046 | 2079 | ||
| 2047 | err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module, | 2080 | cur_width = mlxsw_sp_port->mapping.width; |
| 2048 | &cur_width); | ||
| 2049 | if (err) { | ||
| 2050 | netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n"); | ||
| 2051 | return err; | ||
| 2052 | } | ||
| 2053 | count = cur_width == 1 ? 4 : 2; | 2081 | count = cur_width == 1 ? 4 : 2; |
| 2054 | 2082 | ||
| 2055 | base_port = mlxsw_sp_cluster_base_port_get(local_port); | 2083 | base_port = mlxsw_sp_cluster_base_port_get(local_port); |
| @@ -2061,14 +2089,7 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port) | |||
| 2061 | for (i = 0; i < count; i++) | 2089 | for (i = 0; i < count; i++) |
| 2062 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); | 2090 | mlxsw_sp_port_remove(mlxsw_sp, base_port + i); |
| 2063 | 2091 | ||
| 2064 | for (i = 0; i < count / 2; i++) { | 2092 | mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count); |
| 2065 | module = mlxsw_sp->port_to_module[base_port + i * 2]; | ||
| 2066 | err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false, | ||
| 2067 | module, MLXSW_PORT_MODULE_MAX_WIDTH, | ||
| 2068 | 0); | ||
| 2069 | if (err) | ||
| 2070 | dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n"); | ||
| 2071 | } | ||
| 2072 | 2093 | ||
| 2073 | return 0; | 2094 | return 0; |
| 2074 | } | 2095 | } |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index e2c022d3e2f3..13b30eaa13d4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
| @@ -229,6 +229,11 @@ struct mlxsw_sp_port { | |||
| 229 | struct ieee_maxrate *maxrate; | 229 | struct ieee_maxrate *maxrate; |
| 230 | struct ieee_pfc *pfc; | 230 | struct ieee_pfc *pfc; |
| 231 | } dcb; | 231 | } dcb; |
| 232 | struct { | ||
| 233 | u8 module; | ||
| 234 | u8 width; | ||
| 235 | u8 lane; | ||
| 236 | } mapping; | ||
| 232 | /* 802.1Q bridge VLANs */ | 237 | /* 802.1Q bridge VLANs */ |
| 233 | unsigned long *active_vlans; | 238 | unsigned long *active_vlans; |
| 234 | unsigned long *untagged_vlans; | 239 | unsigned long *untagged_vlans; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 753064679bde..61cc6869fa65 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
| @@ -1105,6 +1105,39 @@ static int qed_get_port_type(u32 media_type) | |||
| 1105 | return port_type; | 1105 | return port_type; |
| 1106 | } | 1106 | } |
| 1107 | 1107 | ||
| 1108 | static int qed_get_link_data(struct qed_hwfn *hwfn, | ||
| 1109 | struct qed_mcp_link_params *params, | ||
| 1110 | struct qed_mcp_link_state *link, | ||
| 1111 | struct qed_mcp_link_capabilities *link_caps) | ||
| 1112 | { | ||
| 1113 | void *p; | ||
| 1114 | |||
| 1115 | if (!IS_PF(hwfn->cdev)) { | ||
| 1116 | qed_vf_get_link_params(hwfn, params); | ||
| 1117 | qed_vf_get_link_state(hwfn, link); | ||
| 1118 | qed_vf_get_link_caps(hwfn, link_caps); | ||
| 1119 | |||
| 1120 | return 0; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | p = qed_mcp_get_link_params(hwfn); | ||
| 1124 | if (!p) | ||
| 1125 | return -ENXIO; | ||
| 1126 | memcpy(params, p, sizeof(*params)); | ||
| 1127 | |||
| 1128 | p = qed_mcp_get_link_state(hwfn); | ||
| 1129 | if (!p) | ||
| 1130 | return -ENXIO; | ||
| 1131 | memcpy(link, p, sizeof(*link)); | ||
| 1132 | |||
| 1133 | p = qed_mcp_get_link_capabilities(hwfn); | ||
| 1134 | if (!p) | ||
| 1135 | return -ENXIO; | ||
| 1136 | memcpy(link_caps, p, sizeof(*link_caps)); | ||
| 1137 | |||
| 1138 | return 0; | ||
| 1139 | } | ||
| 1140 | |||
| 1108 | static void qed_fill_link(struct qed_hwfn *hwfn, | 1141 | static void qed_fill_link(struct qed_hwfn *hwfn, |
| 1109 | struct qed_link_output *if_link) | 1142 | struct qed_link_output *if_link) |
| 1110 | { | 1143 | { |
| @@ -1116,15 +1149,9 @@ static void qed_fill_link(struct qed_hwfn *hwfn, | |||
| 1116 | memset(if_link, 0, sizeof(*if_link)); | 1149 | memset(if_link, 0, sizeof(*if_link)); |
| 1117 | 1150 | ||
| 1118 | /* Prepare source inputs */ | 1151 | /* Prepare source inputs */ |
| 1119 | if (IS_PF(hwfn->cdev)) { | 1152 | if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) { |
| 1120 | memcpy(¶ms, qed_mcp_get_link_params(hwfn), sizeof(params)); | 1153 | dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n"); |
| 1121 | memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link)); | 1154 | return; |
| 1122 | memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn), | ||
| 1123 | sizeof(link_caps)); | ||
| 1124 | } else { | ||
| 1125 | qed_vf_get_link_params(hwfn, ¶ms); | ||
| 1126 | qed_vf_get_link_state(hwfn, &link); | ||
| 1127 | qed_vf_get_link_caps(hwfn, &link_caps); | ||
| 1128 | } | 1155 | } |
| 1129 | 1156 | ||
| 1130 | /* Set the link parameters to pass to protocol driver */ | 1157 | /* Set the link parameters to pass to protocol driver */ |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index c8667c65e685..c90b2b6ad969 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h | |||
| @@ -12,11 +12,13 @@ | |||
| 12 | #include "qed_vf.h" | 12 | #include "qed_vf.h" |
| 13 | #define QED_VF_ARRAY_LENGTH (3) | 13 | #define QED_VF_ARRAY_LENGTH (3) |
| 14 | 14 | ||
| 15 | #ifdef CONFIG_QED_SRIOV | ||
| 15 | #define IS_VF(cdev) ((cdev)->b_is_vf) | 16 | #define IS_VF(cdev) ((cdev)->b_is_vf) |
| 16 | #define IS_PF(cdev) (!((cdev)->b_is_vf)) | 17 | #define IS_PF(cdev) (!((cdev)->b_is_vf)) |
| 17 | #ifdef CONFIG_QED_SRIOV | ||
| 18 | #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) | 18 | #define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info)) |
| 19 | #else | 19 | #else |
| 20 | #define IS_VF(cdev) (0) | ||
| 21 | #define IS_PF(cdev) (1) | ||
| 20 | #define IS_PF_SRIOV(p_hwfn) (0) | 22 | #define IS_PF_SRIOV(p_hwfn) (0) |
| 21 | #endif | 23 | #endif |
| 22 | #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) | 24 | #define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 5d00d1404bfc..5733d1888223 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
| @@ -87,7 +87,9 @@ static const struct pci_device_id qede_pci_tbl[] = { | |||
| 87 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, | 87 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF}, |
| 88 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, | 88 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF}, |
| 89 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, | 89 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF}, |
| 90 | #ifdef CONFIG_QED_SRIOV | ||
| 90 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, | 91 | {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF}, |
| 92 | #endif | ||
| 91 | { 0 } | 93 | { 0 } |
| 92 | }; | 94 | }; |
| 93 | 95 | ||
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c index 7f295c4d7b80..2a9228a6e4a0 100644 --- a/drivers/net/ethernet/sfc/mcdi_port.c +++ b/drivers/net/ethernet/sfc/mcdi_port.c | |||
| @@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap) | |||
| 189 | 189 | ||
| 190 | case MC_CMD_MEDIA_XFP: | 190 | case MC_CMD_MEDIA_XFP: |
| 191 | case MC_CMD_MEDIA_SFP_PLUS: | 191 | case MC_CMD_MEDIA_SFP_PLUS: |
| 192 | result |= SUPPORTED_FIBRE; | ||
| 193 | break; | ||
| 194 | |||
| 195 | case MC_CMD_MEDIA_QSFP_PLUS: | 192 | case MC_CMD_MEDIA_QSFP_PLUS: |
| 196 | result |= SUPPORTED_FIBRE; | 193 | result |= SUPPORTED_FIBRE; |
| 194 | if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) | ||
| 195 | result |= SUPPORTED_1000baseT_Full; | ||
| 196 | if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) | ||
| 197 | result |= SUPPORTED_10000baseT_Full; | ||
| 197 | if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) | 198 | if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) |
| 198 | result |= SUPPORTED_40000baseCR4_Full; | 199 | result |= SUPPORTED_40000baseCR4_Full; |
| 199 | break; | 200 | break; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index 4f7283d05588..44da877d2483 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | |||
| @@ -156,7 +156,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw, | |||
| 156 | struct netdev_hw_addr *ha; | 156 | struct netdev_hw_addr *ha; |
| 157 | 157 | ||
| 158 | netdev_for_each_uc_addr(ha, dev) { | 158 | netdev_for_each_uc_addr(ha, dev) { |
| 159 | dwmac4_set_umac_addr(ioaddr, ha->addr, reg); | 159 | dwmac4_set_umac_addr(hw, ha->addr, reg); |
| 160 | reg++; | 160 | reg++; |
| 161 | } | 161 | } |
| 162 | } | 162 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index eac45d0c75e2..a473c182c91d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
| @@ -3450,8 +3450,6 @@ int stmmac_resume(struct device *dev) | |||
| 3450 | if (!netif_running(ndev)) | 3450 | if (!netif_running(ndev)) |
| 3451 | return 0; | 3451 | return 0; |
| 3452 | 3452 | ||
| 3453 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3454 | |||
| 3455 | /* Power Down bit, into the PM register, is cleared | 3453 | /* Power Down bit, into the PM register, is cleared |
| 3456 | * automatically as soon as a magic packet or a Wake-up frame | 3454 | * automatically as soon as a magic packet or a Wake-up frame |
| 3457 | * is received. Anyway, it's better to manually clear | 3455 | * is received. Anyway, it's better to manually clear |
| @@ -3459,7 +3457,9 @@ int stmmac_resume(struct device *dev) | |||
| 3459 | * from another devices (e.g. serial console). | 3457 | * from another devices (e.g. serial console). |
| 3460 | */ | 3458 | */ |
| 3461 | if (device_may_wakeup(priv->device)) { | 3459 | if (device_may_wakeup(priv->device)) { |
| 3460 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3462 | priv->hw->mac->pmt(priv->hw, 0); | 3461 | priv->hw->mac->pmt(priv->hw, 0); |
| 3462 | spin_unlock_irqrestore(&priv->lock, flags); | ||
| 3463 | priv->irq_wake = 0; | 3463 | priv->irq_wake = 0; |
| 3464 | } else { | 3464 | } else { |
| 3465 | pinctrl_pm_select_default_state(priv->device); | 3465 | pinctrl_pm_select_default_state(priv->device); |
| @@ -3473,6 +3473,8 @@ int stmmac_resume(struct device *dev) | |||
| 3473 | 3473 | ||
| 3474 | netif_device_attach(ndev); | 3474 | netif_device_attach(ndev); |
| 3475 | 3475 | ||
| 3476 | spin_lock_irqsave(&priv->lock, flags); | ||
| 3477 | |||
| 3476 | priv->cur_rx = 0; | 3478 | priv->cur_rx = 0; |
| 3477 | priv->dirty_rx = 0; | 3479 | priv->dirty_rx = 0; |
| 3478 | priv->dirty_tx = 0; | 3480 | priv->dirty_tx = 0; |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 4b08a2f52b3e..e6bb0ecb12c7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
| @@ -1339,7 +1339,7 @@ static int cpsw_ndo_open(struct net_device *ndev) | |||
| 1339 | if (priv->coal_intvl != 0) { | 1339 | if (priv->coal_intvl != 0) { |
| 1340 | struct ethtool_coalesce coal; | 1340 | struct ethtool_coalesce coal; |
| 1341 | 1341 | ||
| 1342 | coal.rx_coalesce_usecs = (priv->coal_intvl << 4); | 1342 | coal.rx_coalesce_usecs = priv->coal_intvl; |
| 1343 | cpsw_set_coalesce(ndev, &coal); | 1343 | cpsw_set_coalesce(ndev, &coal); |
| 1344 | } | 1344 | } |
| 1345 | 1345 | ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index db8022ae415b..08885bc8d6db 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
| @@ -1369,7 +1369,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, | |||
| 1369 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; | 1369 | rcdlro = (struct Vmxnet3_RxCompDescExt *)rcd; |
| 1370 | 1370 | ||
| 1371 | segCnt = rcdlro->segCnt; | 1371 | segCnt = rcdlro->segCnt; |
| 1372 | BUG_ON(segCnt <= 1); | 1372 | WARN_ON_ONCE(segCnt == 0); |
| 1373 | mss = rcdlro->mss; | 1373 | mss = rcdlro->mss; |
| 1374 | if (unlikely(segCnt <= 1)) | 1374 | if (unlikely(segCnt <= 1)) |
| 1375 | segCnt = 0; | 1375 | segCnt = 0; |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index c4825392d64b..3d2b64e63408 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
| @@ -69,10 +69,10 @@ | |||
| 69 | /* | 69 | /* |
| 70 | * Version numbers | 70 | * Version numbers |
| 71 | */ | 71 | */ |
| 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.7.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.4.8.0-k" |
| 73 | 73 | ||
| 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
| 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040700 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01040800 |
| 76 | 76 | ||
| 77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
| 78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index d0631b6cfd53..62f475e31077 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
| @@ -2540,12 +2540,14 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, | |||
| 2540 | const u8 *mac, struct station_info *sinfo) | 2540 | const u8 *mac, struct station_info *sinfo) |
| 2541 | { | 2541 | { |
| 2542 | struct brcmf_if *ifp = netdev_priv(ndev); | 2542 | struct brcmf_if *ifp = netdev_priv(ndev); |
| 2543 | struct brcmf_scb_val_le scb_val; | ||
| 2543 | s32 err = 0; | 2544 | s32 err = 0; |
| 2544 | struct brcmf_sta_info_le sta_info_le; | 2545 | struct brcmf_sta_info_le sta_info_le; |
| 2545 | u32 sta_flags; | 2546 | u32 sta_flags; |
| 2546 | u32 is_tdls_peer; | 2547 | u32 is_tdls_peer; |
| 2547 | s32 total_rssi; | 2548 | s32 total_rssi; |
| 2548 | s32 count_rssi; | 2549 | s32 count_rssi; |
| 2550 | int rssi; | ||
| 2549 | u32 i; | 2551 | u32 i; |
| 2550 | 2552 | ||
| 2551 | brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); | 2553 | brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac); |
| @@ -2629,6 +2631,20 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, | |||
| 2629 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); | 2631 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); |
| 2630 | total_rssi /= count_rssi; | 2632 | total_rssi /= count_rssi; |
| 2631 | sinfo->signal = total_rssi; | 2633 | sinfo->signal = total_rssi; |
| 2634 | } else if (test_bit(BRCMF_VIF_STATUS_CONNECTED, | ||
| 2635 | &ifp->vif->sme_state)) { | ||
| 2636 | memset(&scb_val, 0, sizeof(scb_val)); | ||
| 2637 | err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_RSSI, | ||
| 2638 | &scb_val, sizeof(scb_val)); | ||
| 2639 | if (err) { | ||
| 2640 | brcmf_err("Could not get rssi (%d)\n", err); | ||
| 2641 | goto done; | ||
| 2642 | } else { | ||
| 2643 | rssi = le32_to_cpu(scb_val.val); | ||
| 2644 | sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL); | ||
| 2645 | sinfo->signal = rssi; | ||
| 2646 | brcmf_dbg(CONN, "RSSI %d dBm\n", rssi); | ||
| 2647 | } | ||
| 2632 | } | 2648 | } |
| 2633 | } | 2649 | } |
| 2634 | done: | 2650 | done: |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c index 68f1ce02f4bf..2b9a2bc429d6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c | |||
| @@ -1157,6 +1157,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf) | |||
| 1157 | brcmu_pkt_buf_free_skb(skb); | 1157 | brcmu_pkt_buf_free_skb(skb); |
| 1158 | return; | 1158 | return; |
| 1159 | } | 1159 | } |
| 1160 | |||
| 1161 | skb->protocol = eth_type_trans(skb, ifp->ndev); | ||
| 1160 | brcmf_netif_rx(ifp, skb); | 1162 | brcmf_netif_rx(ifp, skb); |
| 1161 | } | 1163 | } |
| 1162 | 1164 | ||
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 9ed0ed1bf514..4dd5adcdd29b 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
| @@ -2776,6 +2776,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, | |||
| 2776 | if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || | 2776 | if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || |
| 2777 | !info->attrs[HWSIM_ATTR_FLAGS] || | 2777 | !info->attrs[HWSIM_ATTR_FLAGS] || |
| 2778 | !info->attrs[HWSIM_ATTR_COOKIE] || | 2778 | !info->attrs[HWSIM_ATTR_COOKIE] || |
| 2779 | !info->attrs[HWSIM_ATTR_SIGNAL] || | ||
| 2779 | !info->attrs[HWSIM_ATTR_TX_INFO]) | 2780 | !info->attrs[HWSIM_ATTR_TX_INFO]) |
| 2780 | goto out; | 2781 | goto out; |
| 2781 | 2782 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 0f48048b8654..3a0faa8fe9d4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c | |||
| @@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m); | |||
| 54 | void rtl_addr_delay(u32 addr) | 54 | void rtl_addr_delay(u32 addr) |
| 55 | { | 55 | { |
| 56 | if (addr == 0xfe) | 56 | if (addr == 0xfe) |
| 57 | msleep(50); | 57 | mdelay(50); |
| 58 | else if (addr == 0xfd) | 58 | else if (addr == 0xfd) |
| 59 | msleep(5); | 59 | msleep(5); |
| 60 | else if (addr == 0xfc) | 60 | else if (addr == 0xfc) |
| @@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr, | |||
| 75 | rtl_addr_delay(addr); | 75 | rtl_addr_delay(addr); |
| 76 | } else { | 76 | } else { |
| 77 | rtl_set_rfreg(hw, rfpath, addr, mask, data); | 77 | rtl_set_rfreg(hw, rfpath, addr, mask, data); |
| 78 | usleep_range(1, 2); | 78 | udelay(1); |
| 79 | } | 79 | } |
| 80 | } | 80 | } |
| 81 | EXPORT_SYMBOL(rtl_rfreg_delay); | 81 | EXPORT_SYMBOL(rtl_rfreg_delay); |
| @@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data) | |||
| 86 | rtl_addr_delay(addr); | 86 | rtl_addr_delay(addr); |
| 87 | } else { | 87 | } else { |
| 88 | rtl_set_bbreg(hw, addr, MASKDWORD, data); | 88 | rtl_set_bbreg(hw, addr, MASKDWORD, data); |
| 89 | usleep_range(1, 2); | 89 | udelay(1); |
| 90 | } | 90 | } |
| 91 | } | 91 | } |
| 92 | EXPORT_SYMBOL(rtl_bb_delay); | 92 | EXPORT_SYMBOL(rtl_bb_delay); |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 78dca3193ca4..befac5b19490 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -1679,9 +1679,14 @@ static int nvme_pci_enable(struct nvme_dev *dev) | |||
| 1679 | 1679 | ||
| 1680 | static void nvme_dev_unmap(struct nvme_dev *dev) | 1680 | static void nvme_dev_unmap(struct nvme_dev *dev) |
| 1681 | { | 1681 | { |
| 1682 | struct pci_dev *pdev = to_pci_dev(dev->dev); | ||
| 1683 | int bars; | ||
| 1684 | |||
| 1682 | if (dev->bar) | 1685 | if (dev->bar) |
| 1683 | iounmap(dev->bar); | 1686 | iounmap(dev->bar); |
| 1684 | pci_release_regions(to_pci_dev(dev->dev)); | 1687 | |
| 1688 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
| 1689 | pci_release_selected_regions(pdev, bars); | ||
| 1685 | } | 1690 | } |
| 1686 | 1691 | ||
| 1687 | static void nvme_pci_disable(struct nvme_dev *dev) | 1692 | static void nvme_pci_disable(struct nvme_dev *dev) |
| @@ -1924,7 +1929,7 @@ static int nvme_dev_map(struct nvme_dev *dev) | |||
| 1924 | 1929 | ||
| 1925 | return 0; | 1930 | return 0; |
| 1926 | release: | 1931 | release: |
| 1927 | pci_release_regions(pdev); | 1932 | pci_release_selected_regions(pdev, bars); |
| 1928 | return -ENODEV; | 1933 | return -ENODEV; |
| 1929 | } | 1934 | } |
| 1930 | 1935 | ||
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 14f2f8c7c260..33daffc4392c 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c | |||
| @@ -395,7 +395,7 @@ static int unflatten_dt_nodes(const void *blob, | |||
| 395 | struct device_node **nodepp) | 395 | struct device_node **nodepp) |
| 396 | { | 396 | { |
| 397 | struct device_node *root; | 397 | struct device_node *root; |
| 398 | int offset = 0, depth = 0; | 398 | int offset = 0, depth = 0, initial_depth = 0; |
| 399 | #define FDT_MAX_DEPTH 64 | 399 | #define FDT_MAX_DEPTH 64 |
| 400 | unsigned int fpsizes[FDT_MAX_DEPTH]; | 400 | unsigned int fpsizes[FDT_MAX_DEPTH]; |
| 401 | struct device_node *nps[FDT_MAX_DEPTH]; | 401 | struct device_node *nps[FDT_MAX_DEPTH]; |
| @@ -405,11 +405,22 @@ static int unflatten_dt_nodes(const void *blob, | |||
| 405 | if (nodepp) | 405 | if (nodepp) |
| 406 | *nodepp = NULL; | 406 | *nodepp = NULL; |
| 407 | 407 | ||
| 408 | /* | ||
| 409 | * We're unflattening device sub-tree if @dad is valid. There are | ||
| 410 | * possibly multiple nodes in the first level of depth. We need | ||
| 411 | * set @depth to 1 to make fdt_next_node() happy as it bails | ||
| 412 | * immediately when negative @depth is found. Otherwise, the device | ||
| 413 | * nodes except the first one won't be unflattened successfully. | ||
| 414 | */ | ||
| 415 | if (dad) | ||
| 416 | depth = initial_depth = 1; | ||
| 417 | |||
| 408 | root = dad; | 418 | root = dad; |
| 409 | fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0; | 419 | fpsizes[depth] = dad ? strlen(of_node_full_name(dad)) : 0; |
| 410 | nps[depth] = dad; | 420 | nps[depth] = dad; |
| 421 | |||
| 411 | for (offset = 0; | 422 | for (offset = 0; |
| 412 | offset >= 0 && depth >= 0; | 423 | offset >= 0 && depth >= initial_depth; |
| 413 | offset = fdt_next_node(blob, offset, &depth)) { | 424 | offset = fdt_next_node(blob, offset, &depth)) { |
| 414 | if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) | 425 | if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH)) |
| 415 | continue; | 426 | continue; |
diff --git a/drivers/of/irq.c b/drivers/of/irq.c index e7bfc175b8e1..6ec743faabe8 100644 --- a/drivers/of/irq.c +++ b/drivers/of/irq.c | |||
| @@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r) | |||
| 386 | EXPORT_SYMBOL_GPL(of_irq_to_resource); | 386 | EXPORT_SYMBOL_GPL(of_irq_to_resource); |
| 387 | 387 | ||
| 388 | /** | 388 | /** |
| 389 | * of_irq_get - Decode a node's IRQ and return it as a Linux irq number | 389 | * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number |
| 390 | * @dev: pointer to device tree node | 390 | * @dev: pointer to device tree node |
| 391 | * @index: zero-based index of the irq | 391 | * @index: zero-based index of the IRQ |
| 392 | * | ||
| 393 | * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain | ||
| 394 | * is not yet created. | ||
| 395 | * | 392 | * |
| 393 | * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or | ||
| 394 | * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case | ||
| 395 | * of any other failure. | ||
| 396 | */ | 396 | */ |
| 397 | int of_irq_get(struct device_node *dev, int index) | 397 | int of_irq_get(struct device_node *dev, int index) |
| 398 | { | 398 | { |
| @@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index) | |||
| 413 | EXPORT_SYMBOL_GPL(of_irq_get); | 413 | EXPORT_SYMBOL_GPL(of_irq_get); |
| 414 | 414 | ||
| 415 | /** | 415 | /** |
| 416 | * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number | 416 | * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number |
| 417 | * @dev: pointer to device tree node | 417 | * @dev: pointer to device tree node |
| 418 | * @name: irq name | 418 | * @name: IRQ name |
| 419 | * | 419 | * |
| 420 | * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain | 420 | * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or |
| 421 | * is not yet created, or error code in case of any other failure. | 421 | * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case |
| 422 | * of any other failure. | ||
| 422 | */ | 423 | */ |
| 423 | int of_irq_get_byname(struct device_node *dev, const char *name) | 424 | int of_irq_get_byname(struct device_node *dev, const char *name) |
| 424 | { | 425 | { |
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index ed01c0172e4a..216648233874 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
| @@ -127,8 +127,15 @@ static int __init __reserved_mem_alloc_size(unsigned long node, | |||
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | /* Need adjust the alignment to satisfy the CMA requirement */ | 129 | /* Need adjust the alignment to satisfy the CMA requirement */ |
| 130 | if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) | 130 | if (IS_ENABLED(CONFIG_CMA) |
| 131 | align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order)); | 131 | && of_flat_dt_is_compatible(node, "shared-dma-pool") |
| 132 | && of_get_flat_dt_prop(node, "reusable", NULL) | ||
| 133 | && !of_get_flat_dt_prop(node, "no-map", NULL)) { | ||
| 134 | unsigned long order = | ||
| 135 | max_t(unsigned long, MAX_ORDER - 1, pageblock_order); | ||
| 136 | |||
| 137 | align = max(align, (phys_addr_t)PAGE_SIZE << order); | ||
| 138 | } | ||
| 132 | 139 | ||
| 133 | prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); | 140 | prop = of_get_flat_dt_prop(node, "alloc-ranges", &len); |
| 134 | if (prop) { | 141 | if (prop) { |
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 56a17ec5b5ef..526bf23dcb49 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c | |||
| @@ -140,6 +140,19 @@ static const struct regulator_ops rpm_smps_ldo_ops = { | |||
| 140 | .enable = rpm_reg_enable, | 140 | .enable = rpm_reg_enable, |
| 141 | .disable = rpm_reg_disable, | 141 | .disable = rpm_reg_disable, |
| 142 | .is_enabled = rpm_reg_is_enabled, | 142 | .is_enabled = rpm_reg_is_enabled, |
| 143 | .list_voltage = regulator_list_voltage_linear_range, | ||
| 144 | |||
| 145 | .get_voltage = rpm_reg_get_voltage, | ||
| 146 | .set_voltage = rpm_reg_set_voltage, | ||
| 147 | |||
| 148 | .set_load = rpm_reg_set_load, | ||
| 149 | }; | ||
| 150 | |||
| 151 | static const struct regulator_ops rpm_smps_ldo_ops_fixed = { | ||
| 152 | .enable = rpm_reg_enable, | ||
| 153 | .disable = rpm_reg_disable, | ||
| 154 | .is_enabled = rpm_reg_is_enabled, | ||
| 155 | .list_voltage = regulator_list_voltage_linear_range, | ||
| 143 | 156 | ||
| 144 | .get_voltage = rpm_reg_get_voltage, | 157 | .get_voltage = rpm_reg_get_voltage, |
| 145 | .set_voltage = rpm_reg_set_voltage, | 158 | .set_voltage = rpm_reg_set_voltage, |
| @@ -247,7 +260,7 @@ static const struct regulator_desc pm8941_nldo = { | |||
| 247 | static const struct regulator_desc pm8941_lnldo = { | 260 | static const struct regulator_desc pm8941_lnldo = { |
| 248 | .fixed_uV = 1740000, | 261 | .fixed_uV = 1740000, |
| 249 | .n_voltages = 1, | 262 | .n_voltages = 1, |
| 250 | .ops = &rpm_smps_ldo_ops, | 263 | .ops = &rpm_smps_ldo_ops_fixed, |
| 251 | }; | 264 | }; |
| 252 | 265 | ||
| 253 | static const struct regulator_desc pm8941_switch = { | 266 | static const struct regulator_desc pm8941_switch = { |
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c index 572816e30095..c139890c1514 100644 --- a/drivers/regulator/tps51632-regulator.c +++ b/drivers/regulator/tps51632-regulator.c | |||
| @@ -94,11 +94,14 @@ static int tps51632_dcdc_set_ramp_delay(struct regulator_dev *rdev, | |||
| 94 | int ramp_delay) | 94 | int ramp_delay) |
| 95 | { | 95 | { |
| 96 | struct tps51632_chip *tps = rdev_get_drvdata(rdev); | 96 | struct tps51632_chip *tps = rdev_get_drvdata(rdev); |
| 97 | int bit = ramp_delay/6000; | 97 | int bit; |
| 98 | int ret; | 98 | int ret; |
| 99 | 99 | ||
| 100 | if (bit) | 100 | if (ramp_delay == 0) |
| 101 | bit--; | 101 | bit = 0; |
| 102 | else | ||
| 103 | bit = DIV_ROUND_UP(ramp_delay, 6000) - 1; | ||
| 104 | |||
| 102 | ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit)); | 105 | ret = regmap_write(tps->regmap, TPS51632_SLEW_REGS, BIT(bit)); |
| 103 | if (ret < 0) | 106 | if (ret < 0) |
| 104 | dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret); | 107 | dev_err(tps->dev, "SLEW reg write failed, err %d\n", ret); |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 3408578b08d6..ff41c310c900 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
| @@ -230,6 +230,7 @@ static struct { | |||
| 230 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, | 230 | {"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, |
| 231 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, | 231 | {"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC}, |
| 232 | {"Promise", "", NULL, BLIST_SPARSELUN}, | 232 | {"Promise", "", NULL, BLIST_SPARSELUN}, |
| 233 | {"QEMU", "QEMU CD-ROM", NULL, BLIST_SKIP_VPD_PAGES}, | ||
| 233 | {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, | 234 | {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024}, |
| 234 | {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, | 235 | {"SYNOLOGY", "iSCSI Storage", NULL, BLIST_MAX_1024}, |
| 235 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, | 236 | {"QUANTUM", "XP34301", "1071", BLIST_NOTQ}, |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index f459dff30512..60bff78e9ead 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -2867,10 +2867,10 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 2867 | if (sdkp->opt_xfer_blocks && | 2867 | if (sdkp->opt_xfer_blocks && |
| 2868 | sdkp->opt_xfer_blocks <= dev_max && | 2868 | sdkp->opt_xfer_blocks <= dev_max && |
| 2869 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && | 2869 | sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS && |
| 2870 | sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE) | 2870 | logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) { |
| 2871 | rw_max = q->limits.io_opt = | 2871 | q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); |
| 2872 | sdkp->opt_xfer_blocks * sdp->sector_size; | 2872 | rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks); |
| 2873 | else | 2873 | } else |
| 2874 | rw_max = BLK_DEF_MAX_SECTORS; | 2874 | rw_max = BLK_DEF_MAX_SECTORS; |
| 2875 | 2875 | ||
| 2876 | /* Combine with controller limits */ | 2876 | /* Combine with controller limits */ |
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 654630bb7d0e..765a6f1ac1b7 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h | |||
| @@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo | |||
| 151 | return blocks << (ilog2(sdev->sector_size) - 9); | 151 | return blocks << (ilog2(sdev->sector_size) - 9); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks) | ||
| 155 | { | ||
| 156 | return blocks * sdev->sector_size; | ||
| 157 | } | ||
| 158 | |||
| 154 | /* | 159 | /* |
| 155 | * A DIF-capable target device can be formatted with different | 160 | * A DIF-capable target device can be formatted with different |
| 156 | * protection schemes. Currently 0 through 3 are defined: | 161 | * protection schemes. Currently 0 through 3 are defined: |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 6ceac4f2d4b2..5b4b47ed948b 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
| @@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np, | |||
| 857 | goto free_power_table; | 857 | goto free_power_table; |
| 858 | } | 858 | } |
| 859 | 859 | ||
| 860 | snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", | ||
| 861 | cpufreq_dev->id); | ||
| 862 | |||
| 863 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | ||
| 864 | &cpufreq_cooling_ops); | ||
| 865 | if (IS_ERR(cool_dev)) | ||
| 866 | goto remove_idr; | ||
| 867 | |||
| 868 | /* Fill freq-table in descending order of frequencies */ | 860 | /* Fill freq-table in descending order of frequencies */ |
| 869 | for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { | 861 | for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { |
| 870 | freq = find_next_max(table, freq); | 862 | freq = find_next_max(table, freq); |
| @@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np, | |||
| 877 | pr_debug("%s: freq:%u KHz\n", __func__, freq); | 869 | pr_debug("%s: freq:%u KHz\n", __func__, freq); |
| 878 | } | 870 | } |
| 879 | 871 | ||
| 872 | snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", | ||
| 873 | cpufreq_dev->id); | ||
| 874 | |||
| 875 | cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, | ||
| 876 | &cpufreq_cooling_ops); | ||
| 877 | if (IS_ERR(cool_dev)) | ||
| 878 | goto remove_idr; | ||
| 879 | |||
| 880 | cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; | 880 | cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; |
| 881 | cpufreq_dev->cool_dev = cool_dev; | 881 | cpufreq_dev->cool_dev = cool_dev; |
| 882 | 882 | ||
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 427c36b430a6..46025688f1d0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
| @@ -1373,7 +1373,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, | |||
| 1373 | 1373 | ||
| 1374 | if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { | 1374 | if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { |
| 1375 | BUG_ON(tm->slot != 0); | 1375 | BUG_ON(tm->slot != 0); |
| 1376 | eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); | 1376 | eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start, |
| 1377 | eb->len); | ||
| 1377 | if (!eb_rewin) { | 1378 | if (!eb_rewin) { |
| 1378 | btrfs_tree_read_unlock_blocking(eb); | 1379 | btrfs_tree_read_unlock_blocking(eb); |
| 1379 | free_extent_buffer(eb); | 1380 | free_extent_buffer(eb); |
| @@ -1454,7 +1455,8 @@ get_old_root(struct btrfs_root *root, u64 time_seq) | |||
| 1454 | } else if (old_root) { | 1455 | } else if (old_root) { |
| 1455 | btrfs_tree_read_unlock(eb_root); | 1456 | btrfs_tree_read_unlock(eb_root); |
| 1456 | free_extent_buffer(eb_root); | 1457 | free_extent_buffer(eb_root); |
| 1457 | eb = alloc_dummy_extent_buffer(root->fs_info, logical); | 1458 | eb = alloc_dummy_extent_buffer(root->fs_info, logical, |
| 1459 | root->nodesize); | ||
| 1458 | } else { | 1460 | } else { |
| 1459 | btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); | 1461 | btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); |
| 1460 | eb = btrfs_clone_extent_buffer(eb_root); | 1462 | eb = btrfs_clone_extent_buffer(eb_root); |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6628fca9f4ed..1142127f6e5e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
| @@ -1147,7 +1147,8 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, | |||
| 1147 | u64 bytenr) | 1147 | u64 bytenr) |
| 1148 | { | 1148 | { |
| 1149 | if (btrfs_test_is_dummy_root(root)) | 1149 | if (btrfs_test_is_dummy_root(root)) |
| 1150 | return alloc_test_extent_buffer(root->fs_info, bytenr); | 1150 | return alloc_test_extent_buffer(root->fs_info, bytenr, |
| 1151 | root->nodesize); | ||
| 1151 | return alloc_extent_buffer(root->fs_info, bytenr); | 1152 | return alloc_extent_buffer(root->fs_info, bytenr); |
| 1152 | } | 1153 | } |
| 1153 | 1154 | ||
| @@ -1314,14 +1315,16 @@ static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, | |||
| 1314 | 1315 | ||
| 1315 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 1316 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| 1316 | /* Should only be used by the testing infrastructure */ | 1317 | /* Should only be used by the testing infrastructure */ |
| 1317 | struct btrfs_root *btrfs_alloc_dummy_root(void) | 1318 | struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize) |
| 1318 | { | 1319 | { |
| 1319 | struct btrfs_root *root; | 1320 | struct btrfs_root *root; |
| 1320 | 1321 | ||
| 1321 | root = btrfs_alloc_root(NULL, GFP_KERNEL); | 1322 | root = btrfs_alloc_root(NULL, GFP_KERNEL); |
| 1322 | if (!root) | 1323 | if (!root) |
| 1323 | return ERR_PTR(-ENOMEM); | 1324 | return ERR_PTR(-ENOMEM); |
| 1324 | __setup_root(4096, 4096, 4096, root, NULL, 1); | 1325 | /* We don't use the stripesize in selftest, set it as sectorsize */ |
| 1326 | __setup_root(nodesize, sectorsize, sectorsize, root, NULL, | ||
| 1327 | BTRFS_ROOT_TREE_OBJECTID); | ||
| 1325 | set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state); | 1328 | set_bit(BTRFS_ROOT_DUMMY_ROOT, &root->state); |
| 1326 | root->alloc_bytenr = 0; | 1329 | root->alloc_bytenr = 0; |
| 1327 | 1330 | ||
| @@ -4130,6 +4133,17 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, | |||
| 4130 | * Hint to catch really bogus numbers, bitflips or so, more exact checks are | 4133 | * Hint to catch really bogus numbers, bitflips or so, more exact checks are |
| 4131 | * done later | 4134 | * done later |
| 4132 | */ | 4135 | */ |
| 4136 | if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { | ||
| 4137 | btrfs_err(fs_info, "bytes_used is too small %llu", | ||
| 4138 | btrfs_super_bytes_used(sb)); | ||
| 4139 | ret = -EINVAL; | ||
| 4140 | } | ||
| 4141 | if (!is_power_of_2(btrfs_super_stripesize(sb)) || | ||
| 4142 | btrfs_super_stripesize(sb) != sectorsize) { | ||
| 4143 | btrfs_err(fs_info, "invalid stripesize %u", | ||
| 4144 | btrfs_super_stripesize(sb)); | ||
| 4145 | ret = -EINVAL; | ||
| 4146 | } | ||
| 4133 | if (btrfs_super_num_devices(sb) > (1UL << 31)) | 4147 | if (btrfs_super_num_devices(sb) > (1UL << 31)) |
| 4134 | printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", | 4148 | printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", |
| 4135 | btrfs_super_num_devices(sb)); | 4149 | btrfs_super_num_devices(sb)); |
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 8e79d0070bcf..acba821499a9 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h | |||
| @@ -90,7 +90,7 @@ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, | |||
| 90 | void btrfs_free_fs_root(struct btrfs_root *root); | 90 | void btrfs_free_fs_root(struct btrfs_root *root); |
| 91 | 91 | ||
| 92 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 92 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| 93 | struct btrfs_root *btrfs_alloc_dummy_root(void); | 93 | struct btrfs_root *btrfs_alloc_dummy_root(u32 sectorsize, u32 nodesize); |
| 94 | #endif | 94 | #endif |
| 95 | 95 | ||
| 96 | /* | 96 | /* |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 6e953de83f08..a3412d68ad37 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -4728,16 +4728,16 @@ err: | |||
| 4728 | } | 4728 | } |
| 4729 | 4729 | ||
| 4730 | struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, | 4730 | struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, |
| 4731 | u64 start) | 4731 | u64 start, u32 nodesize) |
| 4732 | { | 4732 | { |
| 4733 | unsigned long len; | 4733 | unsigned long len; |
| 4734 | 4734 | ||
| 4735 | if (!fs_info) { | 4735 | if (!fs_info) { |
| 4736 | /* | 4736 | /* |
| 4737 | * Called only from tests that don't always have a fs_info | 4737 | * Called only from tests that don't always have a fs_info |
| 4738 | * available, but we know that nodesize is 4096 | 4738 | * available |
| 4739 | */ | 4739 | */ |
| 4740 | len = 4096; | 4740 | len = nodesize; |
| 4741 | } else { | 4741 | } else { |
| 4742 | len = fs_info->tree_root->nodesize; | 4742 | len = fs_info->tree_root->nodesize; |
| 4743 | } | 4743 | } |
| @@ -4833,7 +4833,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, | |||
| 4833 | 4833 | ||
| 4834 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS | 4834 | #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS |
| 4835 | struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, | 4835 | struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, |
| 4836 | u64 start) | 4836 | u64 start, u32 nodesize) |
| 4837 | { | 4837 | { |
| 4838 | struct extent_buffer *eb, *exists = NULL; | 4838 | struct extent_buffer *eb, *exists = NULL; |
| 4839 | int ret; | 4839 | int ret; |
| @@ -4841,7 +4841,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, | |||
| 4841 | eb = find_extent_buffer(fs_info, start); | 4841 | eb = find_extent_buffer(fs_info, start); |
| 4842 | if (eb) | 4842 | if (eb) |
| 4843 | return eb; | 4843 | return eb; |
| 4844 | eb = alloc_dummy_extent_buffer(fs_info, start); | 4844 | eb = alloc_dummy_extent_buffer(fs_info, start, nodesize); |
| 4845 | if (!eb) | 4845 | if (!eb) |
| 4846 | return NULL; | 4846 | return NULL; |
| 4847 | eb->fs_info = fs_info; | 4847 | eb->fs_info = fs_info; |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 1baf19c9b79d..c0c1c4fef6ce 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
| @@ -348,7 +348,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, | |||
| 348 | struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, | 348 | struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, |
| 349 | u64 start, unsigned long len); | 349 | u64 start, unsigned long len); |
| 350 | struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, | 350 | struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, |
| 351 | u64 start); | 351 | u64 start, u32 nodesize); |
| 352 | struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); | 352 | struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); |
| 353 | struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, | 353 | struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, |
| 354 | u64 start); | 354 | u64 start); |
| @@ -468,5 +468,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode, | |||
| 468 | u64 *end, u64 max_bytes); | 468 | u64 *end, u64 max_bytes); |
| 469 | #endif | 469 | #endif |
| 470 | struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, | 470 | struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, |
| 471 | u64 start); | 471 | u64 start, u32 nodesize); |
| 472 | #endif | 472 | #endif |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index c6dc1183f542..69d270f6602c 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
| @@ -29,7 +29,7 @@ | |||
| 29 | #include "inode-map.h" | 29 | #include "inode-map.h" |
| 30 | #include "volumes.h" | 30 | #include "volumes.h" |
| 31 | 31 | ||
| 32 | #define BITS_PER_BITMAP (PAGE_SIZE * 8) | 32 | #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) |
| 33 | #define MAX_CACHE_BYTES_PER_GIG SZ_32K | 33 | #define MAX_CACHE_BYTES_PER_GIG SZ_32K |
| 34 | 34 | ||
| 35 | struct btrfs_trim_range { | 35 | struct btrfs_trim_range { |
| @@ -1415,11 +1415,11 @@ static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, | |||
| 1415 | u64 offset) | 1415 | u64 offset) |
| 1416 | { | 1416 | { |
| 1417 | u64 bitmap_start; | 1417 | u64 bitmap_start; |
| 1418 | u32 bytes_per_bitmap; | 1418 | u64 bytes_per_bitmap; |
| 1419 | 1419 | ||
| 1420 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; | 1420 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; |
| 1421 | bitmap_start = offset - ctl->start; | 1421 | bitmap_start = offset - ctl->start; |
| 1422 | bitmap_start = div_u64(bitmap_start, bytes_per_bitmap); | 1422 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); |
| 1423 | bitmap_start *= bytes_per_bitmap; | 1423 | bitmap_start *= bytes_per_bitmap; |
| 1424 | bitmap_start += ctl->start; | 1424 | bitmap_start += ctl->start; |
| 1425 | 1425 | ||
| @@ -1638,10 +1638,10 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) | |||
| 1638 | u64 bitmap_bytes; | 1638 | u64 bitmap_bytes; |
| 1639 | u64 extent_bytes; | 1639 | u64 extent_bytes; |
| 1640 | u64 size = block_group->key.offset; | 1640 | u64 size = block_group->key.offset; |
| 1641 | u32 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; | 1641 | u64 bytes_per_bg = BITS_PER_BITMAP * ctl->unit; |
| 1642 | u32 max_bitmaps = div_u64(size + bytes_per_bg - 1, bytes_per_bg); | 1642 | u64 max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); |
| 1643 | 1643 | ||
| 1644 | max_bitmaps = max_t(u32, max_bitmaps, 1); | 1644 | max_bitmaps = max_t(u64, max_bitmaps, 1); |
| 1645 | 1645 | ||
| 1646 | ASSERT(ctl->total_bitmaps <= max_bitmaps); | 1646 | ASSERT(ctl->total_bitmaps <= max_bitmaps); |
| 1647 | 1647 | ||
| @@ -1660,7 +1660,7 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) | |||
| 1660 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as | 1660 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as |
| 1661 | * we add more bitmaps. | 1661 | * we add more bitmaps. |
| 1662 | */ | 1662 | */ |
| 1663 | bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_SIZE; | 1663 | bitmap_bytes = (ctl->total_bitmaps + 1) * ctl->unit; |
| 1664 | 1664 | ||
| 1665 | if (bitmap_bytes >= max_bytes) { | 1665 | if (bitmap_bytes >= max_bytes) { |
| 1666 | ctl->extents_thresh = 0; | 1666 | ctl->extents_thresh = 0; |
| @@ -3662,7 +3662,7 @@ have_info: | |||
| 3662 | if (tmp->offset + tmp->bytes < offset) | 3662 | if (tmp->offset + tmp->bytes < offset) |
| 3663 | break; | 3663 | break; |
| 3664 | if (offset + bytes < tmp->offset) { | 3664 | if (offset + bytes < tmp->offset) { |
| 3665 | n = rb_prev(&info->offset_index); | 3665 | n = rb_prev(&tmp->offset_index); |
| 3666 | continue; | 3666 | continue; |
| 3667 | } | 3667 | } |
| 3668 | info = tmp; | 3668 | info = tmp; |
| @@ -3676,7 +3676,7 @@ have_info: | |||
| 3676 | if (offset + bytes < tmp->offset) | 3676 | if (offset + bytes < tmp->offset) |
| 3677 | break; | 3677 | break; |
| 3678 | if (tmp->offset + tmp->bytes < offset) { | 3678 | if (tmp->offset + tmp->bytes < offset) { |
| 3679 | n = rb_next(&info->offset_index); | 3679 | n = rb_next(&tmp->offset_index); |
| 3680 | continue; | 3680 | continue; |
| 3681 | } | 3681 | } |
| 3682 | info = tmp; | 3682 | info = tmp; |
diff --git a/fs/btrfs/hash.c b/fs/btrfs/hash.c index aae520b2aee5..a97fdc156a03 100644 --- a/fs/btrfs/hash.c +++ b/fs/btrfs/hash.c | |||
| @@ -24,6 +24,11 @@ int __init btrfs_hash_init(void) | |||
| 24 | return PTR_ERR_OR_ZERO(tfm); | 24 | return PTR_ERR_OR_ZERO(tfm); |
| 25 | } | 25 | } |
| 26 | 26 | ||
| 27 | const char* btrfs_crc32c_impl(void) | ||
| 28 | { | ||
| 29 | return crypto_tfm_alg_driver_name(crypto_shash_tfm(tfm)); | ||
| 30 | } | ||
| 31 | |||
| 27 | void btrfs_hash_exit(void) | 32 | void btrfs_hash_exit(void) |
| 28 | { | 33 | { |
| 29 | crypto_free_shash(tfm); | 34 | crypto_free_shash(tfm); |
diff --git a/fs/btrfs/hash.h b/fs/btrfs/hash.h index 118a2316e5d3..c3a2ec554361 100644 --- a/fs/btrfs/hash.h +++ b/fs/btrfs/hash.h | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | int __init btrfs_hash_init(void); | 22 | int __init btrfs_hash_init(void); |
| 23 | 23 | ||
| 24 | void btrfs_hash_exit(void); | 24 | void btrfs_hash_exit(void); |
| 25 | const char* btrfs_crc32c_impl(void); | ||
| 25 | 26 | ||
| 26 | u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length); | 27 | u32 btrfs_crc32c(u32 crc, const void *address, unsigned int length); |
| 27 | 28 | ||
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 4e59a91a11e0..4339b6613f19 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
| @@ -2303,7 +2303,7 @@ static void btrfs_interface_exit(void) | |||
| 2303 | 2303 | ||
| 2304 | static void btrfs_print_mod_info(void) | 2304 | static void btrfs_print_mod_info(void) |
| 2305 | { | 2305 | { |
| 2306 | printk(KERN_INFO "Btrfs loaded" | 2306 | printk(KERN_INFO "Btrfs loaded, crc32c=%s" |
| 2307 | #ifdef CONFIG_BTRFS_DEBUG | 2307 | #ifdef CONFIG_BTRFS_DEBUG |
| 2308 | ", debug=on" | 2308 | ", debug=on" |
| 2309 | #endif | 2309 | #endif |
| @@ -2313,33 +2313,48 @@ static void btrfs_print_mod_info(void) | |||
| 2313 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY | 2313 | #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY |
| 2314 | ", integrity-checker=on" | 2314 | ", integrity-checker=on" |
| 2315 | #endif | 2315 | #endif |
| 2316 | "\n"); | 2316 | "\n", |
| 2317 | btrfs_crc32c_impl()); | ||
| 2317 | } | 2318 | } |
| 2318 | 2319 | ||
| 2319 | static int btrfs_run_sanity_tests(void) | 2320 | static int btrfs_run_sanity_tests(void) |
| 2320 | { | 2321 | { |
| 2321 | int ret; | 2322 | int ret, i; |
| 2322 | 2323 | u32 sectorsize, nodesize; | |
| 2324 | u32 test_sectorsize[] = { | ||
| 2325 | PAGE_SIZE, | ||
| 2326 | }; | ||
| 2323 | ret = btrfs_init_test_fs(); | 2327 | ret = btrfs_init_test_fs(); |
| 2324 | if (ret) | 2328 | if (ret) |
| 2325 | return ret; | 2329 | return ret; |
| 2326 | 2330 | for (i = 0; i < ARRAY_SIZE(test_sectorsize); i++) { | |
| 2327 | ret = btrfs_test_free_space_cache(); | 2331 | sectorsize = test_sectorsize[i]; |
| 2328 | if (ret) | 2332 | for (nodesize = sectorsize; |
| 2329 | goto out; | 2333 | nodesize <= BTRFS_MAX_METADATA_BLOCKSIZE; |
| 2330 | ret = btrfs_test_extent_buffer_operations(); | 2334 | nodesize <<= 1) { |
| 2331 | if (ret) | 2335 | pr_info("BTRFS: selftest: sectorsize: %u nodesize: %u\n", |
| 2332 | goto out; | 2336 | sectorsize, nodesize); |
| 2333 | ret = btrfs_test_extent_io(); | 2337 | ret = btrfs_test_free_space_cache(sectorsize, nodesize); |
| 2334 | if (ret) | 2338 | if (ret) |
| 2335 | goto out; | 2339 | goto out; |
| 2336 | ret = btrfs_test_inodes(); | 2340 | ret = btrfs_test_extent_buffer_operations(sectorsize, |
| 2337 | if (ret) | 2341 | nodesize); |
| 2338 | goto out; | 2342 | if (ret) |
| 2339 | ret = btrfs_test_qgroups(); | 2343 | goto out; |
| 2340 | if (ret) | 2344 | ret = btrfs_test_extent_io(sectorsize, nodesize); |
| 2341 | goto out; | 2345 | if (ret) |
| 2342 | ret = btrfs_test_free_space_tree(); | 2346 | goto out; |
| 2347 | ret = btrfs_test_inodes(sectorsize, nodesize); | ||
| 2348 | if (ret) | ||
| 2349 | goto out; | ||
| 2350 | ret = btrfs_test_qgroups(sectorsize, nodesize); | ||
| 2351 | if (ret) | ||
| 2352 | goto out; | ||
| 2353 | ret = btrfs_test_free_space_tree(sectorsize, nodesize); | ||
| 2354 | if (ret) | ||
| 2355 | goto out; | ||
| 2356 | } | ||
| 2357 | } | ||
| 2343 | out: | 2358 | out: |
| 2344 | btrfs_destroy_test_fs(); | 2359 | btrfs_destroy_test_fs(); |
| 2345 | return ret; | 2360 | return ret; |
diff --git a/fs/btrfs/tests/btrfs-tests.c b/fs/btrfs/tests/btrfs-tests.c index f54bf450bad3..10eb249ef891 100644 --- a/fs/btrfs/tests/btrfs-tests.c +++ b/fs/btrfs/tests/btrfs-tests.c | |||
| @@ -175,7 +175,7 @@ void btrfs_free_dummy_root(struct btrfs_root *root) | |||
| 175 | } | 175 | } |
| 176 | 176 | ||
| 177 | struct btrfs_block_group_cache * | 177 | struct btrfs_block_group_cache * |
| 178 | btrfs_alloc_dummy_block_group(unsigned long length) | 178 | btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize) |
| 179 | { | 179 | { |
| 180 | struct btrfs_block_group_cache *cache; | 180 | struct btrfs_block_group_cache *cache; |
| 181 | 181 | ||
| @@ -192,8 +192,8 @@ btrfs_alloc_dummy_block_group(unsigned long length) | |||
| 192 | cache->key.objectid = 0; | 192 | cache->key.objectid = 0; |
| 193 | cache->key.offset = length; | 193 | cache->key.offset = length; |
| 194 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; | 194 | cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; |
| 195 | cache->sectorsize = 4096; | 195 | cache->sectorsize = sectorsize; |
| 196 | cache->full_stripe_len = 4096; | 196 | cache->full_stripe_len = sectorsize; |
| 197 | 197 | ||
| 198 | INIT_LIST_HEAD(&cache->list); | 198 | INIT_LIST_HEAD(&cache->list); |
| 199 | INIT_LIST_HEAD(&cache->cluster_list); | 199 | INIT_LIST_HEAD(&cache->cluster_list); |
diff --git a/fs/btrfs/tests/btrfs-tests.h b/fs/btrfs/tests/btrfs-tests.h index 054b8c73c951..66fb6b701eb7 100644 --- a/fs/btrfs/tests/btrfs-tests.h +++ b/fs/btrfs/tests/btrfs-tests.h | |||
| @@ -26,27 +26,28 @@ | |||
| 26 | struct btrfs_root; | 26 | struct btrfs_root; |
| 27 | struct btrfs_trans_handle; | 27 | struct btrfs_trans_handle; |
| 28 | 28 | ||
| 29 | int btrfs_test_free_space_cache(void); | 29 | int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize); |
| 30 | int btrfs_test_extent_buffer_operations(void); | 30 | int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize); |
| 31 | int btrfs_test_extent_io(void); | 31 | int btrfs_test_extent_io(u32 sectorsize, u32 nodesize); |
| 32 | int btrfs_test_inodes(void); | 32 | int btrfs_test_inodes(u32 sectorsize, u32 nodesize); |
| 33 | int btrfs_test_qgroups(void); | 33 | int btrfs_test_qgroups(u32 sectorsize, u32 nodesize); |
| 34 | int btrfs_test_free_space_tree(void); | 34 | int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize); |
| 35 | int btrfs_init_test_fs(void); | 35 | int btrfs_init_test_fs(void); |
| 36 | void btrfs_destroy_test_fs(void); | 36 | void btrfs_destroy_test_fs(void); |
| 37 | struct inode *btrfs_new_test_inode(void); | 37 | struct inode *btrfs_new_test_inode(void); |
| 38 | struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void); | 38 | struct btrfs_fs_info *btrfs_alloc_dummy_fs_info(void); |
| 39 | void btrfs_free_dummy_root(struct btrfs_root *root); | 39 | void btrfs_free_dummy_root(struct btrfs_root *root); |
| 40 | struct btrfs_block_group_cache * | 40 | struct btrfs_block_group_cache * |
| 41 | btrfs_alloc_dummy_block_group(unsigned long length); | 41 | btrfs_alloc_dummy_block_group(unsigned long length, u32 sectorsize); |
| 42 | void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache); | 42 | void btrfs_free_dummy_block_group(struct btrfs_block_group_cache *cache); |
| 43 | void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans); | 43 | void btrfs_init_dummy_trans(struct btrfs_trans_handle *trans); |
| 44 | #else | 44 | #else |
| 45 | static inline int btrfs_test_free_space_cache(void) | 45 | static inline int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize) |
| 46 | { | 46 | { |
| 47 | return 0; | 47 | return 0; |
| 48 | } | 48 | } |
| 49 | static inline int btrfs_test_extent_buffer_operations(void) | 49 | static inline int btrfs_test_extent_buffer_operations(u32 sectorsize, |
| 50 | u32 nodesize) | ||
| 50 | { | 51 | { |
| 51 | return 0; | 52 | return 0; |
| 52 | } | 53 | } |
| @@ -57,19 +58,19 @@ static inline int btrfs_init_test_fs(void) | |||
| 57 | static inline void btrfs_destroy_test_fs(void) | 58 | static inline void btrfs_destroy_test_fs(void) |
| 58 | { | 59 | { |
| 59 | } | 60 | } |
| 60 | static inline int btrfs_test_extent_io(void) | 61 | static inline int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) |
| 61 | { | 62 | { |
| 62 | return 0; | 63 | return 0; |
| 63 | } | 64 | } |
| 64 | static inline int btrfs_test_inodes(void) | 65 | static inline int btrfs_test_inodes(u32 sectorsize, u32 nodesize) |
| 65 | { | 66 | { |
| 66 | return 0; | 67 | return 0; |
| 67 | } | 68 | } |
| 68 | static inline int btrfs_test_qgroups(void) | 69 | static inline int btrfs_test_qgroups(u32 sectorsize, u32 nodesize) |
| 69 | { | 70 | { |
| 70 | return 0; | 71 | return 0; |
| 71 | } | 72 | } |
| 72 | static inline int btrfs_test_free_space_tree(void) | 73 | static inline int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize) |
| 73 | { | 74 | { |
| 74 | return 0; | 75 | return 0; |
| 75 | } | 76 | } |
diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c index f51963a8f929..4f8cbd1ec5ee 100644 --- a/fs/btrfs/tests/extent-buffer-tests.c +++ b/fs/btrfs/tests/extent-buffer-tests.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | #include "../extent_io.h" | 22 | #include "../extent_io.h" |
| 23 | #include "../disk-io.h" | 23 | #include "../disk-io.h" |
| 24 | 24 | ||
| 25 | static int test_btrfs_split_item(void) | 25 | static int test_btrfs_split_item(u32 sectorsize, u32 nodesize) |
| 26 | { | 26 | { |
| 27 | struct btrfs_path *path; | 27 | struct btrfs_path *path; |
| 28 | struct btrfs_root *root; | 28 | struct btrfs_root *root; |
| @@ -40,7 +40,7 @@ static int test_btrfs_split_item(void) | |||
| 40 | 40 | ||
| 41 | test_msg("Running btrfs_split_item tests\n"); | 41 | test_msg("Running btrfs_split_item tests\n"); |
| 42 | 42 | ||
| 43 | root = btrfs_alloc_dummy_root(); | 43 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 44 | if (IS_ERR(root)) { | 44 | if (IS_ERR(root)) { |
| 45 | test_msg("Could not allocate root\n"); | 45 | test_msg("Could not allocate root\n"); |
| 46 | return PTR_ERR(root); | 46 | return PTR_ERR(root); |
| @@ -53,7 +53,8 @@ static int test_btrfs_split_item(void) | |||
| 53 | return -ENOMEM; | 53 | return -ENOMEM; |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096); | 56 | path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, nodesize, |
| 57 | nodesize); | ||
| 57 | if (!eb) { | 58 | if (!eb) { |
| 58 | test_msg("Could not allocate dummy buffer\n"); | 59 | test_msg("Could not allocate dummy buffer\n"); |
| 59 | ret = -ENOMEM; | 60 | ret = -ENOMEM; |
| @@ -222,8 +223,8 @@ out: | |||
| 222 | return ret; | 223 | return ret; |
| 223 | } | 224 | } |
| 224 | 225 | ||
| 225 | int btrfs_test_extent_buffer_operations(void) | 226 | int btrfs_test_extent_buffer_operations(u32 sectorsize, u32 nodesize) |
| 226 | { | 227 | { |
| 227 | test_msg("Running extent buffer operation tests"); | 228 | test_msg("Running extent buffer operation tests\n"); |
| 228 | return test_btrfs_split_item(); | 229 | return test_btrfs_split_item(sectorsize, nodesize); |
| 229 | } | 230 | } |
diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 55724607f79b..d19ab0317283 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
| 22 | #include <linux/sizes.h> | 22 | #include <linux/sizes.h> |
| 23 | #include "btrfs-tests.h" | 23 | #include "btrfs-tests.h" |
| 24 | #include "../ctree.h" | ||
| 24 | #include "../extent_io.h" | 25 | #include "../extent_io.h" |
| 25 | 26 | ||
| 26 | #define PROCESS_UNLOCK (1 << 0) | 27 | #define PROCESS_UNLOCK (1 << 0) |
| @@ -65,7 +66,7 @@ static noinline int process_page_range(struct inode *inode, u64 start, u64 end, | |||
| 65 | return count; | 66 | return count; |
| 66 | } | 67 | } |
| 67 | 68 | ||
| 68 | static int test_find_delalloc(void) | 69 | static int test_find_delalloc(u32 sectorsize) |
| 69 | { | 70 | { |
| 70 | struct inode *inode; | 71 | struct inode *inode; |
| 71 | struct extent_io_tree tmp; | 72 | struct extent_io_tree tmp; |
| @@ -113,7 +114,7 @@ static int test_find_delalloc(void) | |||
| 113 | * |--- delalloc ---| | 114 | * |--- delalloc ---| |
| 114 | * |--- search ---| | 115 | * |--- search ---| |
| 115 | */ | 116 | */ |
| 116 | set_extent_delalloc(&tmp, 0, 4095, NULL); | 117 | set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL); |
| 117 | start = 0; | 118 | start = 0; |
| 118 | end = 0; | 119 | end = 0; |
| 119 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, | 120 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| @@ -122,9 +123,9 @@ static int test_find_delalloc(void) | |||
| 122 | test_msg("Should have found at least one delalloc\n"); | 123 | test_msg("Should have found at least one delalloc\n"); |
| 123 | goto out_bits; | 124 | goto out_bits; |
| 124 | } | 125 | } |
| 125 | if (start != 0 || end != 4095) { | 126 | if (start != 0 || end != (sectorsize - 1)) { |
| 126 | test_msg("Expected start 0 end 4095, got start %Lu end %Lu\n", | 127 | test_msg("Expected start 0 end %u, got start %llu end %llu\n", |
| 127 | start, end); | 128 | sectorsize - 1, start, end); |
| 128 | goto out_bits; | 129 | goto out_bits; |
| 129 | } | 130 | } |
| 130 | unlock_extent(&tmp, start, end); | 131 | unlock_extent(&tmp, start, end); |
| @@ -144,7 +145,7 @@ static int test_find_delalloc(void) | |||
| 144 | test_msg("Couldn't find the locked page\n"); | 145 | test_msg("Couldn't find the locked page\n"); |
| 145 | goto out_bits; | 146 | goto out_bits; |
| 146 | } | 147 | } |
| 147 | set_extent_delalloc(&tmp, 4096, max_bytes - 1, NULL); | 148 | set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL); |
| 148 | start = test_start; | 149 | start = test_start; |
| 149 | end = 0; | 150 | end = 0; |
| 150 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, | 151 | found = find_lock_delalloc_range(inode, &tmp, locked_page, &start, |
| @@ -172,7 +173,7 @@ static int test_find_delalloc(void) | |||
| 172 | * |--- delalloc ---| | 173 | * |--- delalloc ---| |
| 173 | * |--- search ---| | 174 | * |--- search ---| |
| 174 | */ | 175 | */ |
| 175 | test_start = max_bytes + 4096; | 176 | test_start = max_bytes + sectorsize; |
| 176 | locked_page = find_lock_page(inode->i_mapping, test_start >> | 177 | locked_page = find_lock_page(inode->i_mapping, test_start >> |
| 177 | PAGE_SHIFT); | 178 | PAGE_SHIFT); |
| 178 | if (!locked_page) { | 179 | if (!locked_page) { |
| @@ -272,6 +273,16 @@ out: | |||
| 272 | return ret; | 273 | return ret; |
| 273 | } | 274 | } |
| 274 | 275 | ||
| 276 | /** | ||
| 277 | * test_bit_in_byte - Determine whether a bit is set in a byte | ||
| 278 | * @nr: bit number to test | ||
| 279 | * @addr: Address to start counting from | ||
| 280 | */ | ||
| 281 | static inline int test_bit_in_byte(int nr, const u8 *addr) | ||
| 282 | { | ||
| 283 | return 1UL & (addr[nr / BITS_PER_BYTE] >> (nr & (BITS_PER_BYTE - 1))); | ||
| 284 | } | ||
| 285 | |||
| 275 | static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, | 286 | static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, |
| 276 | unsigned long len) | 287 | unsigned long len) |
| 277 | { | 288 | { |
| @@ -298,25 +309,29 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, | |||
| 298 | return -EINVAL; | 309 | return -EINVAL; |
| 299 | } | 310 | } |
| 300 | 311 | ||
| 301 | bitmap_set(bitmap, (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, | 312 | /* Straddling pages test */ |
| 302 | sizeof(long) * BITS_PER_BYTE); | 313 | if (len > PAGE_SIZE) { |
| 303 | extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, | 314 | bitmap_set(bitmap, |
| 304 | sizeof(long) * BITS_PER_BYTE); | 315 | (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, |
| 305 | if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { | 316 | sizeof(long) * BITS_PER_BYTE); |
| 306 | test_msg("Setting straddling pages failed\n"); | 317 | extent_buffer_bitmap_set(eb, PAGE_SIZE - sizeof(long) / 2, 0, |
| 307 | return -EINVAL; | 318 | sizeof(long) * BITS_PER_BYTE); |
| 308 | } | 319 | if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { |
| 320 | test_msg("Setting straddling pages failed\n"); | ||
| 321 | return -EINVAL; | ||
| 322 | } | ||
| 309 | 323 | ||
| 310 | bitmap_set(bitmap, 0, len * BITS_PER_BYTE); | 324 | bitmap_set(bitmap, 0, len * BITS_PER_BYTE); |
| 311 | bitmap_clear(bitmap, | 325 | bitmap_clear(bitmap, |
| 312 | (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, | 326 | (PAGE_SIZE - sizeof(long) / 2) * BITS_PER_BYTE, |
| 313 | sizeof(long) * BITS_PER_BYTE); | 327 | sizeof(long) * BITS_PER_BYTE); |
| 314 | extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); | 328 | extent_buffer_bitmap_set(eb, 0, 0, len * BITS_PER_BYTE); |
| 315 | extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, | 329 | extent_buffer_bitmap_clear(eb, PAGE_SIZE - sizeof(long) / 2, 0, |
| 316 | sizeof(long) * BITS_PER_BYTE); | 330 | sizeof(long) * BITS_PER_BYTE); |
| 317 | if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { | 331 | if (memcmp_extent_buffer(eb, bitmap, 0, len) != 0) { |
| 318 | test_msg("Clearing straddling pages failed\n"); | 332 | test_msg("Clearing straddling pages failed\n"); |
| 319 | return -EINVAL; | 333 | return -EINVAL; |
| 334 | } | ||
| 320 | } | 335 | } |
| 321 | 336 | ||
| 322 | /* | 337 | /* |
| @@ -333,7 +348,7 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, | |||
| 333 | for (i = 0; i < len * BITS_PER_BYTE; i++) { | 348 | for (i = 0; i < len * BITS_PER_BYTE; i++) { |
| 334 | int bit, bit1; | 349 | int bit, bit1; |
| 335 | 350 | ||
| 336 | bit = !!test_bit(i, bitmap); | 351 | bit = !!test_bit_in_byte(i, (u8 *)bitmap); |
| 337 | bit1 = !!extent_buffer_test_bit(eb, 0, i); | 352 | bit1 = !!extent_buffer_test_bit(eb, 0, i); |
| 338 | if (bit1 != bit) { | 353 | if (bit1 != bit) { |
| 339 | test_msg("Testing bit pattern failed\n"); | 354 | test_msg("Testing bit pattern failed\n"); |
| @@ -351,15 +366,22 @@ static int __test_eb_bitmaps(unsigned long *bitmap, struct extent_buffer *eb, | |||
| 351 | return 0; | 366 | return 0; |
| 352 | } | 367 | } |
| 353 | 368 | ||
| 354 | static int test_eb_bitmaps(void) | 369 | static int test_eb_bitmaps(u32 sectorsize, u32 nodesize) |
| 355 | { | 370 | { |
| 356 | unsigned long len = PAGE_SIZE * 4; | 371 | unsigned long len; |
| 357 | unsigned long *bitmap; | 372 | unsigned long *bitmap; |
| 358 | struct extent_buffer *eb; | 373 | struct extent_buffer *eb; |
| 359 | int ret; | 374 | int ret; |
| 360 | 375 | ||
| 361 | test_msg("Running extent buffer bitmap tests\n"); | 376 | test_msg("Running extent buffer bitmap tests\n"); |
| 362 | 377 | ||
| 378 | /* | ||
| 379 | * In ppc64, sectorsize can be 64K, thus 4 * 64K will be larger than | ||
| 380 | * BTRFS_MAX_METADATA_BLOCKSIZE. | ||
| 381 | */ | ||
| 382 | len = (sectorsize < BTRFS_MAX_METADATA_BLOCKSIZE) | ||
| 383 | ? sectorsize * 4 : sectorsize; | ||
| 384 | |||
| 363 | bitmap = kmalloc(len, GFP_KERNEL); | 385 | bitmap = kmalloc(len, GFP_KERNEL); |
| 364 | if (!bitmap) { | 386 | if (!bitmap) { |
| 365 | test_msg("Couldn't allocate test bitmap\n"); | 387 | test_msg("Couldn't allocate test bitmap\n"); |
| @@ -379,7 +401,7 @@ static int test_eb_bitmaps(void) | |||
| 379 | 401 | ||
| 380 | /* Do it over again with an extent buffer which isn't page-aligned. */ | 402 | /* Do it over again with an extent buffer which isn't page-aligned. */ |
| 381 | free_extent_buffer(eb); | 403 | free_extent_buffer(eb); |
| 382 | eb = __alloc_dummy_extent_buffer(NULL, PAGE_SIZE / 2, len); | 404 | eb = __alloc_dummy_extent_buffer(NULL, nodesize / 2, len); |
| 383 | if (!eb) { | 405 | if (!eb) { |
| 384 | test_msg("Couldn't allocate test extent buffer\n"); | 406 | test_msg("Couldn't allocate test extent buffer\n"); |
| 385 | kfree(bitmap); | 407 | kfree(bitmap); |
| @@ -393,17 +415,17 @@ out: | |||
| 393 | return ret; | 415 | return ret; |
| 394 | } | 416 | } |
| 395 | 417 | ||
| 396 | int btrfs_test_extent_io(void) | 418 | int btrfs_test_extent_io(u32 sectorsize, u32 nodesize) |
| 397 | { | 419 | { |
| 398 | int ret; | 420 | int ret; |
| 399 | 421 | ||
| 400 | test_msg("Running extent I/O tests\n"); | 422 | test_msg("Running extent I/O tests\n"); |
| 401 | 423 | ||
| 402 | ret = test_find_delalloc(); | 424 | ret = test_find_delalloc(sectorsize); |
| 403 | if (ret) | 425 | if (ret) |
| 404 | goto out; | 426 | goto out; |
| 405 | 427 | ||
| 406 | ret = test_eb_bitmaps(); | 428 | ret = test_eb_bitmaps(sectorsize, nodesize); |
| 407 | out: | 429 | out: |
| 408 | test_msg("Extent I/O tests finished\n"); | 430 | test_msg("Extent I/O tests finished\n"); |
| 409 | return ret; | 431 | return ret; |
diff --git a/fs/btrfs/tests/free-space-tests.c b/fs/btrfs/tests/free-space-tests.c index 0eeb8f3d6b67..3956bb2ff84c 100644 --- a/fs/btrfs/tests/free-space-tests.c +++ b/fs/btrfs/tests/free-space-tests.c | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | #include "../disk-io.h" | 22 | #include "../disk-io.h" |
| 23 | #include "../free-space-cache.h" | 23 | #include "../free-space-cache.h" |
| 24 | 24 | ||
| 25 | #define BITS_PER_BITMAP (PAGE_SIZE * 8) | 25 | #define BITS_PER_BITMAP (PAGE_SIZE * 8UL) |
| 26 | 26 | ||
| 27 | /* | 27 | /* |
| 28 | * This test just does basic sanity checking, making sure we can add an extent | 28 | * This test just does basic sanity checking, making sure we can add an extent |
| @@ -99,7 +99,8 @@ static int test_extents(struct btrfs_block_group_cache *cache) | |||
| 99 | return 0; | 99 | return 0; |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | static int test_bitmaps(struct btrfs_block_group_cache *cache) | 102 | static int test_bitmaps(struct btrfs_block_group_cache *cache, |
| 103 | u32 sectorsize) | ||
| 103 | { | 104 | { |
| 104 | u64 next_bitmap_offset; | 105 | u64 next_bitmap_offset; |
| 105 | int ret; | 106 | int ret; |
| @@ -139,7 +140,7 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache) | |||
| 139 | * The first bitmap we have starts at offset 0 so the next one is just | 140 | * The first bitmap we have starts at offset 0 so the next one is just |
| 140 | * at the end of the first bitmap. | 141 | * at the end of the first bitmap. |
| 141 | */ | 142 | */ |
| 142 | next_bitmap_offset = (u64)(BITS_PER_BITMAP * 4096); | 143 | next_bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize); |
| 143 | 144 | ||
| 144 | /* Test a bit straddling two bitmaps */ | 145 | /* Test a bit straddling two bitmaps */ |
| 145 | ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M, | 146 | ret = test_add_free_space_entry(cache, next_bitmap_offset - SZ_2M, |
| @@ -167,9 +168,10 @@ static int test_bitmaps(struct btrfs_block_group_cache *cache) | |||
| 167 | } | 168 | } |
| 168 | 169 | ||
| 169 | /* This is the high grade jackassery */ | 170 | /* This is the high grade jackassery */ |
| 170 | static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache) | 171 | static int test_bitmaps_and_extents(struct btrfs_block_group_cache *cache, |
| 172 | u32 sectorsize) | ||
| 171 | { | 173 | { |
| 172 | u64 bitmap_offset = (u64)(BITS_PER_BITMAP * 4096); | 174 | u64 bitmap_offset = (u64)(BITS_PER_BITMAP * sectorsize); |
| 173 | int ret; | 175 | int ret; |
| 174 | 176 | ||
| 175 | test_msg("Running bitmap and extent tests\n"); | 177 | test_msg("Running bitmap and extent tests\n"); |
| @@ -401,7 +403,8 @@ static int check_cache_empty(struct btrfs_block_group_cache *cache) | |||
| 401 | * requests. | 403 | * requests. |
| 402 | */ | 404 | */ |
| 403 | static int | 405 | static int |
| 404 | test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | 406 | test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache, |
| 407 | u32 sectorsize) | ||
| 405 | { | 408 | { |
| 406 | int ret; | 409 | int ret; |
| 407 | u64 offset; | 410 | u64 offset; |
| @@ -539,7 +542,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 539 | * The goal is to test that the bitmap entry space stealing doesn't | 542 | * The goal is to test that the bitmap entry space stealing doesn't |
| 540 | * steal this space region. | 543 | * steal this space region. |
| 541 | */ | 544 | */ |
| 542 | ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, 4096); | 545 | ret = btrfs_add_free_space(cache, SZ_128M + SZ_16M, sectorsize); |
| 543 | if (ret) { | 546 | if (ret) { |
| 544 | test_msg("Error adding free space: %d\n", ret); | 547 | test_msg("Error adding free space: %d\n", ret); |
| 545 | return ret; | 548 | return ret; |
| @@ -597,8 +600,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 597 | return -ENOENT; | 600 | return -ENOENT; |
| 598 | } | 601 | } |
| 599 | 602 | ||
| 600 | if (cache->free_space_ctl->free_space != (SZ_1M + 4096)) { | 603 | if (cache->free_space_ctl->free_space != (SZ_1M + sectorsize)) { |
| 601 | test_msg("Cache free space is not 1Mb + 4Kb\n"); | 604 | test_msg("Cache free space is not 1Mb + %u\n", sectorsize); |
| 602 | return -EINVAL; | 605 | return -EINVAL; |
| 603 | } | 606 | } |
| 604 | 607 | ||
| @@ -611,22 +614,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 611 | return -EINVAL; | 614 | return -EINVAL; |
| 612 | } | 615 | } |
| 613 | 616 | ||
| 614 | /* All that remains is a 4Kb free space region in a bitmap. Confirm. */ | 617 | /* |
| 618 | * All that remains is a sectorsize free space region in a bitmap. | ||
| 619 | * Confirm. | ||
| 620 | */ | ||
| 615 | ret = check_num_extents_and_bitmaps(cache, 1, 1); | 621 | ret = check_num_extents_and_bitmaps(cache, 1, 1); |
| 616 | if (ret) | 622 | if (ret) |
| 617 | return ret; | 623 | return ret; |
| 618 | 624 | ||
| 619 | if (cache->free_space_ctl->free_space != 4096) { | 625 | if (cache->free_space_ctl->free_space != sectorsize) { |
| 620 | test_msg("Cache free space is not 4Kb\n"); | 626 | test_msg("Cache free space is not %u\n", sectorsize); |
| 621 | return -EINVAL; | 627 | return -EINVAL; |
| 622 | } | 628 | } |
| 623 | 629 | ||
| 624 | offset = btrfs_find_space_for_alloc(cache, | 630 | offset = btrfs_find_space_for_alloc(cache, |
| 625 | 0, 4096, 0, | 631 | 0, sectorsize, 0, |
| 626 | &max_extent_size); | 632 | &max_extent_size); |
| 627 | if (offset != (SZ_128M + SZ_16M)) { | 633 | if (offset != (SZ_128M + SZ_16M)) { |
| 628 | test_msg("Failed to allocate 4Kb from space cache, returned offset is: %llu\n", | 634 | test_msg("Failed to allocate %u, returned offset : %llu\n", |
| 629 | offset); | 635 | sectorsize, offset); |
| 630 | return -EINVAL; | 636 | return -EINVAL; |
| 631 | } | 637 | } |
| 632 | 638 | ||
| @@ -733,7 +739,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 733 | * The goal is to test that the bitmap entry space stealing doesn't | 739 | * The goal is to test that the bitmap entry space stealing doesn't |
| 734 | * steal this space region. | 740 | * steal this space region. |
| 735 | */ | 741 | */ |
| 736 | ret = btrfs_add_free_space(cache, SZ_32M, 8192); | 742 | ret = btrfs_add_free_space(cache, SZ_32M, 2 * sectorsize); |
| 737 | if (ret) { | 743 | if (ret) { |
| 738 | test_msg("Error adding free space: %d\n", ret); | 744 | test_msg("Error adding free space: %d\n", ret); |
| 739 | return ret; | 745 | return ret; |
| @@ -757,7 +763,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 757 | 763 | ||
| 758 | /* | 764 | /* |
| 759 | * Confirm that our extent entry didn't stole all free space from the | 765 | * Confirm that our extent entry didn't stole all free space from the |
| 760 | * bitmap, because of the small 8Kb free space region. | 766 | * bitmap, because of the small 2 * sectorsize free space region. |
| 761 | */ | 767 | */ |
| 762 | ret = check_num_extents_and_bitmaps(cache, 2, 1); | 768 | ret = check_num_extents_and_bitmaps(cache, 2, 1); |
| 763 | if (ret) | 769 | if (ret) |
| @@ -783,8 +789,8 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 783 | return -ENOENT; | 789 | return -ENOENT; |
| 784 | } | 790 | } |
| 785 | 791 | ||
| 786 | if (cache->free_space_ctl->free_space != (SZ_1M + 8192)) { | 792 | if (cache->free_space_ctl->free_space != (SZ_1M + 2 * sectorsize)) { |
| 787 | test_msg("Cache free space is not 1Mb + 8Kb\n"); | 793 | test_msg("Cache free space is not 1Mb + %u\n", 2 * sectorsize); |
| 788 | return -EINVAL; | 794 | return -EINVAL; |
| 789 | } | 795 | } |
| 790 | 796 | ||
| @@ -796,21 +802,25 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 796 | return -EINVAL; | 802 | return -EINVAL; |
| 797 | } | 803 | } |
| 798 | 804 | ||
| 799 | /* All that remains is a 8Kb free space region in a bitmap. Confirm. */ | 805 | /* |
| 806 | * All that remains is 2 * sectorsize free space region | ||
| 807 | * in a bitmap. Confirm. | ||
| 808 | */ | ||
| 800 | ret = check_num_extents_and_bitmaps(cache, 1, 1); | 809 | ret = check_num_extents_and_bitmaps(cache, 1, 1); |
| 801 | if (ret) | 810 | if (ret) |
| 802 | return ret; | 811 | return ret; |
| 803 | 812 | ||
| 804 | if (cache->free_space_ctl->free_space != 8192) { | 813 | if (cache->free_space_ctl->free_space != 2 * sectorsize) { |
| 805 | test_msg("Cache free space is not 8Kb\n"); | 814 | test_msg("Cache free space is not %u\n", 2 * sectorsize); |
| 806 | return -EINVAL; | 815 | return -EINVAL; |
| 807 | } | 816 | } |
| 808 | 817 | ||
| 809 | offset = btrfs_find_space_for_alloc(cache, | 818 | offset = btrfs_find_space_for_alloc(cache, |
| 810 | 0, 8192, 0, | 819 | 0, 2 * sectorsize, 0, |
| 811 | &max_extent_size); | 820 | &max_extent_size); |
| 812 | if (offset != SZ_32M) { | 821 | if (offset != SZ_32M) { |
| 813 | test_msg("Failed to allocate 8Kb from space cache, returned offset is: %llu\n", | 822 | test_msg("Failed to allocate %u, offset: %llu\n", |
| 823 | 2 * sectorsize, | ||
| 814 | offset); | 824 | offset); |
| 815 | return -EINVAL; | 825 | return -EINVAL; |
| 816 | } | 826 | } |
| @@ -825,7 +835,7 @@ test_steal_space_from_bitmap_to_extent(struct btrfs_block_group_cache *cache) | |||
| 825 | return 0; | 835 | return 0; |
| 826 | } | 836 | } |
| 827 | 837 | ||
| 828 | int btrfs_test_free_space_cache(void) | 838 | int btrfs_test_free_space_cache(u32 sectorsize, u32 nodesize) |
| 829 | { | 839 | { |
| 830 | struct btrfs_block_group_cache *cache; | 840 | struct btrfs_block_group_cache *cache; |
| 831 | struct btrfs_root *root = NULL; | 841 | struct btrfs_root *root = NULL; |
| @@ -833,13 +843,19 @@ int btrfs_test_free_space_cache(void) | |||
| 833 | 843 | ||
| 834 | test_msg("Running btrfs free space cache tests\n"); | 844 | test_msg("Running btrfs free space cache tests\n"); |
| 835 | 845 | ||
| 836 | cache = btrfs_alloc_dummy_block_group(1024 * 1024 * 1024); | 846 | /* |
| 847 | * For ppc64 (with 64k page size), bytes per bitmap might be | ||
| 848 | * larger than 1G. To make bitmap test available in ppc64, | ||
| 849 | * alloc dummy block group whose size cross bitmaps. | ||
| 850 | */ | ||
| 851 | cache = btrfs_alloc_dummy_block_group(BITS_PER_BITMAP * sectorsize | ||
| 852 | + PAGE_SIZE, sectorsize); | ||
| 837 | if (!cache) { | 853 | if (!cache) { |
| 838 | test_msg("Couldn't run the tests\n"); | 854 | test_msg("Couldn't run the tests\n"); |
| 839 | return 0; | 855 | return 0; |
| 840 | } | 856 | } |
| 841 | 857 | ||
| 842 | root = btrfs_alloc_dummy_root(); | 858 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 843 | if (IS_ERR(root)) { | 859 | if (IS_ERR(root)) { |
| 844 | ret = PTR_ERR(root); | 860 | ret = PTR_ERR(root); |
| 845 | goto out; | 861 | goto out; |
| @@ -855,14 +871,14 @@ int btrfs_test_free_space_cache(void) | |||
| 855 | ret = test_extents(cache); | 871 | ret = test_extents(cache); |
| 856 | if (ret) | 872 | if (ret) |
| 857 | goto out; | 873 | goto out; |
| 858 | ret = test_bitmaps(cache); | 874 | ret = test_bitmaps(cache, sectorsize); |
| 859 | if (ret) | 875 | if (ret) |
| 860 | goto out; | 876 | goto out; |
| 861 | ret = test_bitmaps_and_extents(cache); | 877 | ret = test_bitmaps_and_extents(cache, sectorsize); |
| 862 | if (ret) | 878 | if (ret) |
| 863 | goto out; | 879 | goto out; |
| 864 | 880 | ||
| 865 | ret = test_steal_space_from_bitmap_to_extent(cache); | 881 | ret = test_steal_space_from_bitmap_to_extent(cache, sectorsize); |
| 866 | out: | 882 | out: |
| 867 | btrfs_free_dummy_block_group(cache); | 883 | btrfs_free_dummy_block_group(cache); |
| 868 | btrfs_free_dummy_root(root); | 884 | btrfs_free_dummy_root(root); |
diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c index 7cea4462acd5..aac507085ab0 100644 --- a/fs/btrfs/tests/free-space-tree-tests.c +++ b/fs/btrfs/tests/free-space-tree-tests.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/types.h> | ||
| 19 | #include "btrfs-tests.h" | 20 | #include "btrfs-tests.h" |
| 20 | #include "../ctree.h" | 21 | #include "../ctree.h" |
| 21 | #include "../disk-io.h" | 22 | #include "../disk-io.h" |
| @@ -30,7 +31,7 @@ struct free_space_extent { | |||
| 30 | * The test cases align their operations to this in order to hit some of the | 31 | * The test cases align their operations to this in order to hit some of the |
| 31 | * edge cases in the bitmap code. | 32 | * edge cases in the bitmap code. |
| 32 | */ | 33 | */ |
| 33 | #define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * 4096) | 34 | #define BITMAP_RANGE (BTRFS_FREE_SPACE_BITMAP_BITS * PAGE_SIZE) |
| 34 | 35 | ||
| 35 | static int __check_free_space_extents(struct btrfs_trans_handle *trans, | 36 | static int __check_free_space_extents(struct btrfs_trans_handle *trans, |
| 36 | struct btrfs_fs_info *fs_info, | 37 | struct btrfs_fs_info *fs_info, |
| @@ -439,7 +440,8 @@ typedef int (*test_func_t)(struct btrfs_trans_handle *, | |||
| 439 | struct btrfs_block_group_cache *, | 440 | struct btrfs_block_group_cache *, |
| 440 | struct btrfs_path *); | 441 | struct btrfs_path *); |
| 441 | 442 | ||
| 442 | static int run_test(test_func_t test_func, int bitmaps) | 443 | static int run_test(test_func_t test_func, int bitmaps, |
| 444 | u32 sectorsize, u32 nodesize) | ||
| 443 | { | 445 | { |
| 444 | struct btrfs_root *root = NULL; | 446 | struct btrfs_root *root = NULL; |
| 445 | struct btrfs_block_group_cache *cache = NULL; | 447 | struct btrfs_block_group_cache *cache = NULL; |
| @@ -447,7 +449,7 @@ static int run_test(test_func_t test_func, int bitmaps) | |||
| 447 | struct btrfs_path *path = NULL; | 449 | struct btrfs_path *path = NULL; |
| 448 | int ret; | 450 | int ret; |
| 449 | 451 | ||
| 450 | root = btrfs_alloc_dummy_root(); | 452 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 451 | if (IS_ERR(root)) { | 453 | if (IS_ERR(root)) { |
| 452 | test_msg("Couldn't allocate dummy root\n"); | 454 | test_msg("Couldn't allocate dummy root\n"); |
| 453 | ret = PTR_ERR(root); | 455 | ret = PTR_ERR(root); |
| @@ -466,7 +468,8 @@ static int run_test(test_func_t test_func, int bitmaps) | |||
| 466 | root->fs_info->free_space_root = root; | 468 | root->fs_info->free_space_root = root; |
| 467 | root->fs_info->tree_root = root; | 469 | root->fs_info->tree_root = root; |
| 468 | 470 | ||
| 469 | root->node = alloc_test_extent_buffer(root->fs_info, 4096); | 471 | root->node = alloc_test_extent_buffer(root->fs_info, |
| 472 | nodesize, nodesize); | ||
| 470 | if (!root->node) { | 473 | if (!root->node) { |
| 471 | test_msg("Couldn't allocate dummy buffer\n"); | 474 | test_msg("Couldn't allocate dummy buffer\n"); |
| 472 | ret = -ENOMEM; | 475 | ret = -ENOMEM; |
| @@ -474,9 +477,9 @@ static int run_test(test_func_t test_func, int bitmaps) | |||
| 474 | } | 477 | } |
| 475 | btrfs_set_header_level(root->node, 0); | 478 | btrfs_set_header_level(root->node, 0); |
| 476 | btrfs_set_header_nritems(root->node, 0); | 479 | btrfs_set_header_nritems(root->node, 0); |
| 477 | root->alloc_bytenr += 8192; | 480 | root->alloc_bytenr += 2 * nodesize; |
| 478 | 481 | ||
| 479 | cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE); | 482 | cache = btrfs_alloc_dummy_block_group(8 * BITMAP_RANGE, sectorsize); |
| 480 | if (!cache) { | 483 | if (!cache) { |
| 481 | test_msg("Couldn't allocate dummy block group cache\n"); | 484 | test_msg("Couldn't allocate dummy block group cache\n"); |
| 482 | ret = -ENOMEM; | 485 | ret = -ENOMEM; |
| @@ -534,17 +537,18 @@ out: | |||
| 534 | return ret; | 537 | return ret; |
| 535 | } | 538 | } |
| 536 | 539 | ||
| 537 | static int run_test_both_formats(test_func_t test_func) | 540 | static int run_test_both_formats(test_func_t test_func, |
| 541 | u32 sectorsize, u32 nodesize) | ||
| 538 | { | 542 | { |
| 539 | int ret; | 543 | int ret; |
| 540 | 544 | ||
| 541 | ret = run_test(test_func, 0); | 545 | ret = run_test(test_func, 0, sectorsize, nodesize); |
| 542 | if (ret) | 546 | if (ret) |
| 543 | return ret; | 547 | return ret; |
| 544 | return run_test(test_func, 1); | 548 | return run_test(test_func, 1, sectorsize, nodesize); |
| 545 | } | 549 | } |
| 546 | 550 | ||
| 547 | int btrfs_test_free_space_tree(void) | 551 | int btrfs_test_free_space_tree(u32 sectorsize, u32 nodesize) |
| 548 | { | 552 | { |
| 549 | test_func_t tests[] = { | 553 | test_func_t tests[] = { |
| 550 | test_empty_block_group, | 554 | test_empty_block_group, |
| @@ -561,9 +565,11 @@ int btrfs_test_free_space_tree(void) | |||
| 561 | 565 | ||
| 562 | test_msg("Running free space tree tests\n"); | 566 | test_msg("Running free space tree tests\n"); |
| 563 | for (i = 0; i < ARRAY_SIZE(tests); i++) { | 567 | for (i = 0; i < ARRAY_SIZE(tests); i++) { |
| 564 | int ret = run_test_both_formats(tests[i]); | 568 | int ret = run_test_both_formats(tests[i], sectorsize, |
| 569 | nodesize); | ||
| 565 | if (ret) { | 570 | if (ret) { |
| 566 | test_msg("%pf failed\n", tests[i]); | 571 | test_msg("%pf : sectorsize %u failed\n", |
| 572 | tests[i], sectorsize); | ||
| 567 | return ret; | 573 | return ret; |
| 568 | } | 574 | } |
| 569 | } | 575 | } |
diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 8a25fe8b7c45..29648c0a39f1 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/types.h> | ||
| 19 | #include "btrfs-tests.h" | 20 | #include "btrfs-tests.h" |
| 20 | #include "../ctree.h" | 21 | #include "../ctree.h" |
| 21 | #include "../btrfs_inode.h" | 22 | #include "../btrfs_inode.h" |
| @@ -86,19 +87,19 @@ static void insert_inode_item_key(struct btrfs_root *root) | |||
| 86 | * diagram of how the extents will look though this may not be possible we still | 87 | * diagram of how the extents will look though this may not be possible we still |
| 87 | * want to make sure everything acts normally (the last number is not inclusive) | 88 | * want to make sure everything acts normally (the last number is not inclusive) |
| 88 | * | 89 | * |
| 89 | * [0 - 5][5 - 6][6 - 10][10 - 4096][ 4096 - 8192 ][8192 - 12288] | 90 | * [0 - 5][5 - 6][ 6 - 4096 ][ 4096 - 4100][4100 - 8195][8195 - 12291] |
| 90 | * [hole ][inline][ hole ][ regular ][regular1 split][ hole ] | 91 | * [hole ][inline][hole but no extent][ hole ][ regular ][regular1 split] |
| 91 | * | 92 | * |
| 92 | * [ 12288 - 20480][20480 - 24576][ 24576 - 28672 ][28672 - 36864][36864 - 45056] | 93 | * [12291 - 16387][16387 - 24579][24579 - 28675][ 28675 - 32771][32771 - 36867 ] |
| 93 | * [regular1 split][ prealloc1 ][prealloc1 written][ prealloc1 ][ compressed ] | 94 | * [ hole ][regular1 split][ prealloc ][ prealloc1 ][prealloc1 written] |
| 94 | * | 95 | * |
| 95 | * [45056 - 49152][49152-53248][53248-61440][61440-65536][ 65536+81920 ] | 96 | * [36867 - 45059][45059 - 53251][53251 - 57347][57347 - 61443][61443- 69635] |
| 96 | * [ compressed1 ][ regular ][compressed1][ regular ][ hole but no extent] | 97 | * [ prealloc1 ][ compressed ][ compressed1 ][ regular ][ compressed1] |
| 97 | * | 98 | * |
| 98 | * [81920-86016] | 99 | * [69635-73731][ 73731 - 86019 ][86019-90115] |
| 99 | * [ regular ] | 100 | * [ regular ][ hole but no extent][ regular ] |
| 100 | */ | 101 | */ |
| 101 | static void setup_file_extents(struct btrfs_root *root) | 102 | static void setup_file_extents(struct btrfs_root *root, u32 sectorsize) |
| 102 | { | 103 | { |
| 103 | int slot = 0; | 104 | int slot = 0; |
| 104 | u64 disk_bytenr = SZ_1M; | 105 | u64 disk_bytenr = SZ_1M; |
| @@ -119,7 +120,7 @@ static void setup_file_extents(struct btrfs_root *root) | |||
| 119 | insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0, | 120 | insert_extent(root, offset, 1, 1, 0, 0, 0, BTRFS_FILE_EXTENT_INLINE, 0, |
| 120 | slot); | 121 | slot); |
| 121 | slot++; | 122 | slot++; |
| 122 | offset = 4096; | 123 | offset = sectorsize; |
| 123 | 124 | ||
| 124 | /* Now another hole */ | 125 | /* Now another hole */ |
| 125 | insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0, | 126 | insert_extent(root, offset, 4, 4, 0, 0, 0, BTRFS_FILE_EXTENT_REG, 0, |
| @@ -128,99 +129,106 @@ static void setup_file_extents(struct btrfs_root *root) | |||
| 128 | offset += 4; | 129 | offset += 4; |
| 129 | 130 | ||
| 130 | /* Now for a regular extent */ | 131 | /* Now for a regular extent */ |
| 131 | insert_extent(root, offset, 4095, 4095, 0, disk_bytenr, 4096, | 132 | insert_extent(root, offset, sectorsize - 1, sectorsize - 1, 0, |
| 132 | BTRFS_FILE_EXTENT_REG, 0, slot); | 133 | disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); |
| 133 | slot++; | 134 | slot++; |
| 134 | disk_bytenr += 4096; | 135 | disk_bytenr += sectorsize; |
| 135 | offset += 4095; | 136 | offset += sectorsize - 1; |
| 136 | 137 | ||
| 137 | /* | 138 | /* |
| 138 | * Now for 3 extents that were split from a hole punch so we test | 139 | * Now for 3 extents that were split from a hole punch so we test |
| 139 | * offsets properly. | 140 | * offsets properly. |
| 140 | */ | 141 | */ |
| 141 | insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384, | 142 | insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr, |
| 142 | BTRFS_FILE_EXTENT_REG, 0, slot); | 143 | 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); |
| 143 | slot++; | 144 | slot++; |
| 144 | offset += 4096; | 145 | offset += sectorsize; |
| 145 | insert_extent(root, offset, 4096, 4096, 0, 0, 0, BTRFS_FILE_EXTENT_REG, | 146 | insert_extent(root, offset, sectorsize, sectorsize, 0, 0, 0, |
| 146 | 0, slot); | 147 | BTRFS_FILE_EXTENT_REG, 0, slot); |
| 147 | slot++; | 148 | slot++; |
| 148 | offset += 4096; | 149 | offset += sectorsize; |
| 149 | insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384, | 150 | insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize, |
| 151 | 2 * sectorsize, disk_bytenr, 4 * sectorsize, | ||
| 150 | BTRFS_FILE_EXTENT_REG, 0, slot); | 152 | BTRFS_FILE_EXTENT_REG, 0, slot); |
| 151 | slot++; | 153 | slot++; |
| 152 | offset += 8192; | 154 | offset += 2 * sectorsize; |
| 153 | disk_bytenr += 16384; | 155 | disk_bytenr += 4 * sectorsize; |
| 154 | 156 | ||
| 155 | /* Now for a unwritten prealloc extent */ | 157 | /* Now for a unwritten prealloc extent */ |
| 156 | insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, | 158 | insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr, |
| 157 | BTRFS_FILE_EXTENT_PREALLOC, 0, slot); | 159 | sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot); |
| 158 | slot++; | 160 | slot++; |
| 159 | offset += 4096; | 161 | offset += sectorsize; |
| 160 | 162 | ||
| 161 | /* | 163 | /* |
| 162 | * We want to jack up disk_bytenr a little more so the em stuff doesn't | 164 | * We want to jack up disk_bytenr a little more so the em stuff doesn't |
| 163 | * merge our records. | 165 | * merge our records. |
| 164 | */ | 166 | */ |
| 165 | disk_bytenr += 8192; | 167 | disk_bytenr += 2 * sectorsize; |
| 166 | 168 | ||
| 167 | /* | 169 | /* |
| 168 | * Now for a partially written prealloc extent, basically the same as | 170 | * Now for a partially written prealloc extent, basically the same as |
| 169 | * the hole punch example above. Ram_bytes never changes when you mark | 171 | * the hole punch example above. Ram_bytes never changes when you mark |
| 170 | * extents written btw. | 172 | * extents written btw. |
| 171 | */ | 173 | */ |
| 172 | insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 16384, | 174 | insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr, |
| 173 | BTRFS_FILE_EXTENT_PREALLOC, 0, slot); | 175 | 4 * sectorsize, BTRFS_FILE_EXTENT_PREALLOC, 0, slot); |
| 174 | slot++; | 176 | slot++; |
| 175 | offset += 4096; | 177 | offset += sectorsize; |
| 176 | insert_extent(root, offset, 4096, 16384, 4096, disk_bytenr, 16384, | 178 | insert_extent(root, offset, sectorsize, 4 * sectorsize, sectorsize, |
| 177 | BTRFS_FILE_EXTENT_REG, 0, slot); | 179 | disk_bytenr, 4 * sectorsize, BTRFS_FILE_EXTENT_REG, 0, |
| 180 | slot); | ||
| 178 | slot++; | 181 | slot++; |
| 179 | offset += 4096; | 182 | offset += sectorsize; |
| 180 | insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 16384, | 183 | insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize, |
| 184 | 2 * sectorsize, disk_bytenr, 4 * sectorsize, | ||
| 181 | BTRFS_FILE_EXTENT_PREALLOC, 0, slot); | 185 | BTRFS_FILE_EXTENT_PREALLOC, 0, slot); |
| 182 | slot++; | 186 | slot++; |
| 183 | offset += 8192; | 187 | offset += 2 * sectorsize; |
| 184 | disk_bytenr += 16384; | 188 | disk_bytenr += 4 * sectorsize; |
| 185 | 189 | ||
| 186 | /* Now a normal compressed extent */ | 190 | /* Now a normal compressed extent */ |
| 187 | insert_extent(root, offset, 8192, 8192, 0, disk_bytenr, 4096, | 191 | insert_extent(root, offset, 2 * sectorsize, 2 * sectorsize, 0, |
| 188 | BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); | 192 | disk_bytenr, sectorsize, BTRFS_FILE_EXTENT_REG, |
| 193 | BTRFS_COMPRESS_ZLIB, slot); | ||
| 189 | slot++; | 194 | slot++; |
| 190 | offset += 8192; | 195 | offset += 2 * sectorsize; |
| 191 | /* No merges */ | 196 | /* No merges */ |
| 192 | disk_bytenr += 8192; | 197 | disk_bytenr += 2 * sectorsize; |
| 193 | 198 | ||
| 194 | /* Now a split compressed extent */ | 199 | /* Now a split compressed extent */ |
| 195 | insert_extent(root, offset, 4096, 16384, 0, disk_bytenr, 4096, | 200 | insert_extent(root, offset, sectorsize, 4 * sectorsize, 0, disk_bytenr, |
| 196 | BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); | 201 | sectorsize, BTRFS_FILE_EXTENT_REG, |
| 202 | BTRFS_COMPRESS_ZLIB, slot); | ||
| 197 | slot++; | 203 | slot++; |
| 198 | offset += 4096; | 204 | offset += sectorsize; |
| 199 | insert_extent(root, offset, 4096, 4096, 0, disk_bytenr + 4096, 4096, | 205 | insert_extent(root, offset, sectorsize, sectorsize, 0, |
| 206 | disk_bytenr + sectorsize, sectorsize, | ||
| 200 | BTRFS_FILE_EXTENT_REG, 0, slot); | 207 | BTRFS_FILE_EXTENT_REG, 0, slot); |
| 201 | slot++; | 208 | slot++; |
| 202 | offset += 4096; | 209 | offset += sectorsize; |
| 203 | insert_extent(root, offset, 8192, 16384, 8192, disk_bytenr, 4096, | 210 | insert_extent(root, offset, 2 * sectorsize, 4 * sectorsize, |
| 211 | 2 * sectorsize, disk_bytenr, sectorsize, | ||
| 204 | BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); | 212 | BTRFS_FILE_EXTENT_REG, BTRFS_COMPRESS_ZLIB, slot); |
| 205 | slot++; | 213 | slot++; |
| 206 | offset += 8192; | 214 | offset += 2 * sectorsize; |
| 207 | disk_bytenr += 8192; | 215 | disk_bytenr += 2 * sectorsize; |
| 208 | 216 | ||
| 209 | /* Now extents that have a hole but no hole extent */ | 217 | /* Now extents that have a hole but no hole extent */ |
| 210 | insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, | 218 | insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr, |
| 211 | BTRFS_FILE_EXTENT_REG, 0, slot); | 219 | sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); |
| 212 | slot++; | 220 | slot++; |
| 213 | offset += 16384; | 221 | offset += 4 * sectorsize; |
| 214 | disk_bytenr += 4096; | 222 | disk_bytenr += sectorsize; |
| 215 | insert_extent(root, offset, 4096, 4096, 0, disk_bytenr, 4096, | 223 | insert_extent(root, offset, sectorsize, sectorsize, 0, disk_bytenr, |
| 216 | BTRFS_FILE_EXTENT_REG, 0, slot); | 224 | sectorsize, BTRFS_FILE_EXTENT_REG, 0, slot); |
| 217 | } | 225 | } |
| 218 | 226 | ||
| 219 | static unsigned long prealloc_only = 0; | 227 | static unsigned long prealloc_only = 0; |
| 220 | static unsigned long compressed_only = 0; | 228 | static unsigned long compressed_only = 0; |
| 221 | static unsigned long vacancy_only = 0; | 229 | static unsigned long vacancy_only = 0; |
| 222 | 230 | ||
| 223 | static noinline int test_btrfs_get_extent(void) | 231 | static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) |
| 224 | { | 232 | { |
| 225 | struct inode *inode = NULL; | 233 | struct inode *inode = NULL; |
| 226 | struct btrfs_root *root = NULL; | 234 | struct btrfs_root *root = NULL; |
| @@ -240,7 +248,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 240 | BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; | 248 | BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; |
| 241 | BTRFS_I(inode)->location.offset = 0; | 249 | BTRFS_I(inode)->location.offset = 0; |
| 242 | 250 | ||
| 243 | root = btrfs_alloc_dummy_root(); | 251 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 244 | if (IS_ERR(root)) { | 252 | if (IS_ERR(root)) { |
| 245 | test_msg("Couldn't allocate root\n"); | 253 | test_msg("Couldn't allocate root\n"); |
| 246 | goto out; | 254 | goto out; |
| @@ -256,7 +264,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 256 | goto out; | 264 | goto out; |
| 257 | } | 265 | } |
| 258 | 266 | ||
| 259 | root->node = alloc_dummy_extent_buffer(NULL, 4096); | 267 | root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize); |
| 260 | if (!root->node) { | 268 | if (!root->node) { |
| 261 | test_msg("Couldn't allocate dummy buffer\n"); | 269 | test_msg("Couldn't allocate dummy buffer\n"); |
| 262 | goto out; | 270 | goto out; |
| @@ -273,7 +281,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 273 | 281 | ||
| 274 | /* First with no extents */ | 282 | /* First with no extents */ |
| 275 | BTRFS_I(inode)->root = root; | 283 | BTRFS_I(inode)->root = root; |
| 276 | em = btrfs_get_extent(inode, NULL, 0, 0, 4096, 0); | 284 | em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0); |
| 277 | if (IS_ERR(em)) { | 285 | if (IS_ERR(em)) { |
| 278 | em = NULL; | 286 | em = NULL; |
| 279 | test_msg("Got an error when we shouldn't have\n"); | 287 | test_msg("Got an error when we shouldn't have\n"); |
| @@ -295,7 +303,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 295 | * setup_file_extents, so if you change anything there you need to | 303 | * setup_file_extents, so if you change anything there you need to |
| 296 | * update the comment and update the expected values below. | 304 | * update the comment and update the expected values below. |
| 297 | */ | 305 | */ |
| 298 | setup_file_extents(root); | 306 | setup_file_extents(root, sectorsize); |
| 299 | 307 | ||
| 300 | em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0); | 308 | em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0); |
| 301 | if (IS_ERR(em)) { | 309 | if (IS_ERR(em)) { |
| @@ -318,7 +326,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 318 | offset = em->start + em->len; | 326 | offset = em->start + em->len; |
| 319 | free_extent_map(em); | 327 | free_extent_map(em); |
| 320 | 328 | ||
| 321 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 329 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 322 | if (IS_ERR(em)) { | 330 | if (IS_ERR(em)) { |
| 323 | test_msg("Got an error when we shouldn't have\n"); | 331 | test_msg("Got an error when we shouldn't have\n"); |
| 324 | goto out; | 332 | goto out; |
| @@ -327,7 +335,8 @@ static noinline int test_btrfs_get_extent(void) | |||
| 327 | test_msg("Expected an inline, got %llu\n", em->block_start); | 335 | test_msg("Expected an inline, got %llu\n", em->block_start); |
| 328 | goto out; | 336 | goto out; |
| 329 | } | 337 | } |
| 330 | if (em->start != offset || em->len != 4091) { | 338 | |
| 339 | if (em->start != offset || em->len != (sectorsize - 5)) { | ||
| 331 | test_msg("Unexpected extent wanted start %llu len 1, got start " | 340 | test_msg("Unexpected extent wanted start %llu len 1, got start " |
| 332 | "%llu len %llu\n", offset, em->start, em->len); | 341 | "%llu len %llu\n", offset, em->start, em->len); |
| 333 | goto out; | 342 | goto out; |
| @@ -344,7 +353,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 344 | offset = em->start + em->len; | 353 | offset = em->start + em->len; |
| 345 | free_extent_map(em); | 354 | free_extent_map(em); |
| 346 | 355 | ||
| 347 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 356 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 348 | if (IS_ERR(em)) { | 357 | if (IS_ERR(em)) { |
| 349 | test_msg("Got an error when we shouldn't have\n"); | 358 | test_msg("Got an error when we shouldn't have\n"); |
| 350 | goto out; | 359 | goto out; |
| @@ -366,7 +375,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 366 | free_extent_map(em); | 375 | free_extent_map(em); |
| 367 | 376 | ||
| 368 | /* Regular extent */ | 377 | /* Regular extent */ |
| 369 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 378 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 370 | if (IS_ERR(em)) { | 379 | if (IS_ERR(em)) { |
| 371 | test_msg("Got an error when we shouldn't have\n"); | 380 | test_msg("Got an error when we shouldn't have\n"); |
| 372 | goto out; | 381 | goto out; |
| @@ -375,7 +384,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 375 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 384 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 376 | goto out; | 385 | goto out; |
| 377 | } | 386 | } |
| 378 | if (em->start != offset || em->len != 4095) { | 387 | if (em->start != offset || em->len != sectorsize - 1) { |
| 379 | test_msg("Unexpected extent wanted start %llu len 4095, got " | 388 | test_msg("Unexpected extent wanted start %llu len 4095, got " |
| 380 | "start %llu len %llu\n", offset, em->start, em->len); | 389 | "start %llu len %llu\n", offset, em->start, em->len); |
| 381 | goto out; | 390 | goto out; |
| @@ -393,7 +402,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 393 | free_extent_map(em); | 402 | free_extent_map(em); |
| 394 | 403 | ||
| 395 | /* The next 3 are split extents */ | 404 | /* The next 3 are split extents */ |
| 396 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 405 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 397 | if (IS_ERR(em)) { | 406 | if (IS_ERR(em)) { |
| 398 | test_msg("Got an error when we shouldn't have\n"); | 407 | test_msg("Got an error when we shouldn't have\n"); |
| 399 | goto out; | 408 | goto out; |
| @@ -402,9 +411,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 402 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 411 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 403 | goto out; | 412 | goto out; |
| 404 | } | 413 | } |
| 405 | if (em->start != offset || em->len != 4096) { | 414 | if (em->start != offset || em->len != sectorsize) { |
| 406 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 415 | test_msg("Unexpected extent start %llu len %u, " |
| 407 | "start %llu len %llu\n", offset, em->start, em->len); | 416 | "got start %llu len %llu\n", |
| 417 | offset, sectorsize, em->start, em->len); | ||
| 408 | goto out; | 418 | goto out; |
| 409 | } | 419 | } |
| 410 | if (em->flags != 0) { | 420 | if (em->flags != 0) { |
| @@ -421,7 +431,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 421 | offset = em->start + em->len; | 431 | offset = em->start + em->len; |
| 422 | free_extent_map(em); | 432 | free_extent_map(em); |
| 423 | 433 | ||
| 424 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 434 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 425 | if (IS_ERR(em)) { | 435 | if (IS_ERR(em)) { |
| 426 | test_msg("Got an error when we shouldn't have\n"); | 436 | test_msg("Got an error when we shouldn't have\n"); |
| 427 | goto out; | 437 | goto out; |
| @@ -430,9 +440,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 430 | test_msg("Expected a hole, got %llu\n", em->block_start); | 440 | test_msg("Expected a hole, got %llu\n", em->block_start); |
| 431 | goto out; | 441 | goto out; |
| 432 | } | 442 | } |
| 433 | if (em->start != offset || em->len != 4096) { | 443 | if (em->start != offset || em->len != sectorsize) { |
| 434 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 444 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 435 | "start %llu len %llu\n", offset, em->start, em->len); | 445 | "got start %llu len %llu\n", |
| 446 | offset, sectorsize, em->start, em->len); | ||
| 436 | goto out; | 447 | goto out; |
| 437 | } | 448 | } |
| 438 | if (em->flags != 0) { | 449 | if (em->flags != 0) { |
| @@ -442,7 +453,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 442 | offset = em->start + em->len; | 453 | offset = em->start + em->len; |
| 443 | free_extent_map(em); | 454 | free_extent_map(em); |
| 444 | 455 | ||
| 445 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 456 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 446 | if (IS_ERR(em)) { | 457 | if (IS_ERR(em)) { |
| 447 | test_msg("Got an error when we shouldn't have\n"); | 458 | test_msg("Got an error when we shouldn't have\n"); |
| 448 | goto out; | 459 | goto out; |
| @@ -451,9 +462,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 451 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 462 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 452 | goto out; | 463 | goto out; |
| 453 | } | 464 | } |
| 454 | if (em->start != offset || em->len != 8192) { | 465 | if (em->start != offset || em->len != 2 * sectorsize) { |
| 455 | test_msg("Unexpected extent wanted start %llu len 8192, got " | 466 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 456 | "start %llu len %llu\n", offset, em->start, em->len); | 467 | "got start %llu len %llu\n", |
| 468 | offset, 2 * sectorsize, em->start, em->len); | ||
| 457 | goto out; | 469 | goto out; |
| 458 | } | 470 | } |
| 459 | if (em->flags != 0) { | 471 | if (em->flags != 0) { |
| @@ -475,7 +487,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 475 | free_extent_map(em); | 487 | free_extent_map(em); |
| 476 | 488 | ||
| 477 | /* Prealloc extent */ | 489 | /* Prealloc extent */ |
| 478 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 490 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 479 | if (IS_ERR(em)) { | 491 | if (IS_ERR(em)) { |
| 480 | test_msg("Got an error when we shouldn't have\n"); | 492 | test_msg("Got an error when we shouldn't have\n"); |
| 481 | goto out; | 493 | goto out; |
| @@ -484,9 +496,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 484 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 496 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 485 | goto out; | 497 | goto out; |
| 486 | } | 498 | } |
| 487 | if (em->start != offset || em->len != 4096) { | 499 | if (em->start != offset || em->len != sectorsize) { |
| 488 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 500 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 489 | "start %llu len %llu\n", offset, em->start, em->len); | 501 | "got start %llu len %llu\n", |
| 502 | offset, sectorsize, em->start, em->len); | ||
| 490 | goto out; | 503 | goto out; |
| 491 | } | 504 | } |
| 492 | if (em->flags != prealloc_only) { | 505 | if (em->flags != prealloc_only) { |
| @@ -503,7 +516,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 503 | free_extent_map(em); | 516 | free_extent_map(em); |
| 504 | 517 | ||
| 505 | /* The next 3 are a half written prealloc extent */ | 518 | /* The next 3 are a half written prealloc extent */ |
| 506 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 519 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 507 | if (IS_ERR(em)) { | 520 | if (IS_ERR(em)) { |
| 508 | test_msg("Got an error when we shouldn't have\n"); | 521 | test_msg("Got an error when we shouldn't have\n"); |
| 509 | goto out; | 522 | goto out; |
| @@ -512,9 +525,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 512 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 525 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 513 | goto out; | 526 | goto out; |
| 514 | } | 527 | } |
| 515 | if (em->start != offset || em->len != 4096) { | 528 | if (em->start != offset || em->len != sectorsize) { |
| 516 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 529 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 517 | "start %llu len %llu\n", offset, em->start, em->len); | 530 | "got start %llu len %llu\n", |
| 531 | offset, sectorsize, em->start, em->len); | ||
| 518 | goto out; | 532 | goto out; |
| 519 | } | 533 | } |
| 520 | if (em->flags != prealloc_only) { | 534 | if (em->flags != prealloc_only) { |
| @@ -532,7 +546,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 532 | offset = em->start + em->len; | 546 | offset = em->start + em->len; |
| 533 | free_extent_map(em); | 547 | free_extent_map(em); |
| 534 | 548 | ||
| 535 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 549 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 536 | if (IS_ERR(em)) { | 550 | if (IS_ERR(em)) { |
| 537 | test_msg("Got an error when we shouldn't have\n"); | 551 | test_msg("Got an error when we shouldn't have\n"); |
| 538 | goto out; | 552 | goto out; |
| @@ -541,9 +555,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 541 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 555 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 542 | goto out; | 556 | goto out; |
| 543 | } | 557 | } |
| 544 | if (em->start != offset || em->len != 4096) { | 558 | if (em->start != offset || em->len != sectorsize) { |
| 545 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 559 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 546 | "start %llu len %llu\n", offset, em->start, em->len); | 560 | "got start %llu len %llu\n", |
| 561 | offset, sectorsize, em->start, em->len); | ||
| 547 | goto out; | 562 | goto out; |
| 548 | } | 563 | } |
| 549 | if (em->flags != 0) { | 564 | if (em->flags != 0) { |
| @@ -564,7 +579,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 564 | offset = em->start + em->len; | 579 | offset = em->start + em->len; |
| 565 | free_extent_map(em); | 580 | free_extent_map(em); |
| 566 | 581 | ||
| 567 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 582 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 568 | if (IS_ERR(em)) { | 583 | if (IS_ERR(em)) { |
| 569 | test_msg("Got an error when we shouldn't have\n"); | 584 | test_msg("Got an error when we shouldn't have\n"); |
| 570 | goto out; | 585 | goto out; |
| @@ -573,9 +588,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 573 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 588 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 574 | goto out; | 589 | goto out; |
| 575 | } | 590 | } |
| 576 | if (em->start != offset || em->len != 8192) { | 591 | if (em->start != offset || em->len != 2 * sectorsize) { |
| 577 | test_msg("Unexpected extent wanted start %llu len 8192, got " | 592 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 578 | "start %llu len %llu\n", offset, em->start, em->len); | 593 | "got start %llu len %llu\n", |
| 594 | offset, 2 * sectorsize, em->start, em->len); | ||
| 579 | goto out; | 595 | goto out; |
| 580 | } | 596 | } |
| 581 | if (em->flags != prealloc_only) { | 597 | if (em->flags != prealloc_only) { |
| @@ -598,7 +614,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 598 | free_extent_map(em); | 614 | free_extent_map(em); |
| 599 | 615 | ||
| 600 | /* Now for the compressed extent */ | 616 | /* Now for the compressed extent */ |
| 601 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 617 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 602 | if (IS_ERR(em)) { | 618 | if (IS_ERR(em)) { |
| 603 | test_msg("Got an error when we shouldn't have\n"); | 619 | test_msg("Got an error when we shouldn't have\n"); |
| 604 | goto out; | 620 | goto out; |
| @@ -607,9 +623,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 607 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 623 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 608 | goto out; | 624 | goto out; |
| 609 | } | 625 | } |
| 610 | if (em->start != offset || em->len != 8192) { | 626 | if (em->start != offset || em->len != 2 * sectorsize) { |
| 611 | test_msg("Unexpected extent wanted start %llu len 8192, got " | 627 | test_msg("Unexpected extent wanted start %llu len %u," |
| 612 | "start %llu len %llu\n", offset, em->start, em->len); | 628 | "got start %llu len %llu\n", |
| 629 | offset, 2 * sectorsize, em->start, em->len); | ||
| 613 | goto out; | 630 | goto out; |
| 614 | } | 631 | } |
| 615 | if (em->flags != compressed_only) { | 632 | if (em->flags != compressed_only) { |
| @@ -631,7 +648,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 631 | free_extent_map(em); | 648 | free_extent_map(em); |
| 632 | 649 | ||
| 633 | /* Split compressed extent */ | 650 | /* Split compressed extent */ |
| 634 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 651 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 635 | if (IS_ERR(em)) { | 652 | if (IS_ERR(em)) { |
| 636 | test_msg("Got an error when we shouldn't have\n"); | 653 | test_msg("Got an error when we shouldn't have\n"); |
| 637 | goto out; | 654 | goto out; |
| @@ -640,9 +657,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 640 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 657 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 641 | goto out; | 658 | goto out; |
| 642 | } | 659 | } |
| 643 | if (em->start != offset || em->len != 4096) { | 660 | if (em->start != offset || em->len != sectorsize) { |
| 644 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 661 | test_msg("Unexpected extent wanted start %llu len %u," |
| 645 | "start %llu len %llu\n", offset, em->start, em->len); | 662 | "got start %llu len %llu\n", |
| 663 | offset, sectorsize, em->start, em->len); | ||
| 646 | goto out; | 664 | goto out; |
| 647 | } | 665 | } |
| 648 | if (em->flags != compressed_only) { | 666 | if (em->flags != compressed_only) { |
| @@ -665,7 +683,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 665 | offset = em->start + em->len; | 683 | offset = em->start + em->len; |
| 666 | free_extent_map(em); | 684 | free_extent_map(em); |
| 667 | 685 | ||
| 668 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 686 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 669 | if (IS_ERR(em)) { | 687 | if (IS_ERR(em)) { |
| 670 | test_msg("Got an error when we shouldn't have\n"); | 688 | test_msg("Got an error when we shouldn't have\n"); |
| 671 | goto out; | 689 | goto out; |
| @@ -674,9 +692,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 674 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 692 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 675 | goto out; | 693 | goto out; |
| 676 | } | 694 | } |
| 677 | if (em->start != offset || em->len != 4096) { | 695 | if (em->start != offset || em->len != sectorsize) { |
| 678 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 696 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 679 | "start %llu len %llu\n", offset, em->start, em->len); | 697 | "got start %llu len %llu\n", |
| 698 | offset, sectorsize, em->start, em->len); | ||
| 680 | goto out; | 699 | goto out; |
| 681 | } | 700 | } |
| 682 | if (em->flags != 0) { | 701 | if (em->flags != 0) { |
| @@ -691,7 +710,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 691 | offset = em->start + em->len; | 710 | offset = em->start + em->len; |
| 692 | free_extent_map(em); | 711 | free_extent_map(em); |
| 693 | 712 | ||
| 694 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 713 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 695 | if (IS_ERR(em)) { | 714 | if (IS_ERR(em)) { |
| 696 | test_msg("Got an error when we shouldn't have\n"); | 715 | test_msg("Got an error when we shouldn't have\n"); |
| 697 | goto out; | 716 | goto out; |
| @@ -701,9 +720,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 701 | disk_bytenr, em->block_start); | 720 | disk_bytenr, em->block_start); |
| 702 | goto out; | 721 | goto out; |
| 703 | } | 722 | } |
| 704 | if (em->start != offset || em->len != 8192) { | 723 | if (em->start != offset || em->len != 2 * sectorsize) { |
| 705 | test_msg("Unexpected extent wanted start %llu len 8192, got " | 724 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 706 | "start %llu len %llu\n", offset, em->start, em->len); | 725 | "got start %llu len %llu\n", |
| 726 | offset, 2 * sectorsize, em->start, em->len); | ||
| 707 | goto out; | 727 | goto out; |
| 708 | } | 728 | } |
| 709 | if (em->flags != compressed_only) { | 729 | if (em->flags != compressed_only) { |
| @@ -725,7 +745,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 725 | free_extent_map(em); | 745 | free_extent_map(em); |
| 726 | 746 | ||
| 727 | /* A hole between regular extents but no hole extent */ | 747 | /* A hole between regular extents but no hole extent */ |
| 728 | em = btrfs_get_extent(inode, NULL, 0, offset + 6, 4096, 0); | 748 | em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0); |
| 729 | if (IS_ERR(em)) { | 749 | if (IS_ERR(em)) { |
| 730 | test_msg("Got an error when we shouldn't have\n"); | 750 | test_msg("Got an error when we shouldn't have\n"); |
| 731 | goto out; | 751 | goto out; |
| @@ -734,9 +754,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 734 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 754 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 735 | goto out; | 755 | goto out; |
| 736 | } | 756 | } |
| 737 | if (em->start != offset || em->len != 4096) { | 757 | if (em->start != offset || em->len != sectorsize) { |
| 738 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 758 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 739 | "start %llu len %llu\n", offset, em->start, em->len); | 759 | "got start %llu len %llu\n", |
| 760 | offset, sectorsize, em->start, em->len); | ||
| 740 | goto out; | 761 | goto out; |
| 741 | } | 762 | } |
| 742 | if (em->flags != 0) { | 763 | if (em->flags != 0) { |
| @@ -765,9 +786,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 765 | * length of the actual hole, if this changes we'll have to change this | 786 | * length of the actual hole, if this changes we'll have to change this |
| 766 | * test. | 787 | * test. |
| 767 | */ | 788 | */ |
| 768 | if (em->start != offset || em->len != 12288) { | 789 | if (em->start != offset || em->len != 3 * sectorsize) { |
| 769 | test_msg("Unexpected extent wanted start %llu len 12288, got " | 790 | test_msg("Unexpected extent wanted start %llu len %u, " |
| 770 | "start %llu len %llu\n", offset, em->start, em->len); | 791 | "got start %llu len %llu\n", |
| 792 | offset, 3 * sectorsize, em->start, em->len); | ||
| 771 | goto out; | 793 | goto out; |
| 772 | } | 794 | } |
| 773 | if (em->flags != vacancy_only) { | 795 | if (em->flags != vacancy_only) { |
| @@ -783,7 +805,7 @@ static noinline int test_btrfs_get_extent(void) | |||
| 783 | offset = em->start + em->len; | 805 | offset = em->start + em->len; |
| 784 | free_extent_map(em); | 806 | free_extent_map(em); |
| 785 | 807 | ||
| 786 | em = btrfs_get_extent(inode, NULL, 0, offset, 4096, 0); | 808 | em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); |
| 787 | if (IS_ERR(em)) { | 809 | if (IS_ERR(em)) { |
| 788 | test_msg("Got an error when we shouldn't have\n"); | 810 | test_msg("Got an error when we shouldn't have\n"); |
| 789 | goto out; | 811 | goto out; |
| @@ -792,9 +814,10 @@ static noinline int test_btrfs_get_extent(void) | |||
| 792 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 814 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 793 | goto out; | 815 | goto out; |
| 794 | } | 816 | } |
| 795 | if (em->start != offset || em->len != 4096) { | 817 | if (em->start != offset || em->len != sectorsize) { |
| 796 | test_msg("Unexpected extent wanted start %llu len 4096, got " | 818 | test_msg("Unexpected extent wanted start %llu len %u," |
| 797 | "start %llu len %llu\n", offset, em->start, em->len); | 819 | "got start %llu len %llu\n", |
| 820 | offset, sectorsize, em->start, em->len); | ||
| 798 | goto out; | 821 | goto out; |
| 799 | } | 822 | } |
| 800 | if (em->flags != 0) { | 823 | if (em->flags != 0) { |
| @@ -815,7 +838,7 @@ out: | |||
| 815 | return ret; | 838 | return ret; |
| 816 | } | 839 | } |
| 817 | 840 | ||
| 818 | static int test_hole_first(void) | 841 | static int test_hole_first(u32 sectorsize, u32 nodesize) |
| 819 | { | 842 | { |
| 820 | struct inode *inode = NULL; | 843 | struct inode *inode = NULL; |
| 821 | struct btrfs_root *root = NULL; | 844 | struct btrfs_root *root = NULL; |
| @@ -832,7 +855,7 @@ static int test_hole_first(void) | |||
| 832 | BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; | 855 | BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID; |
| 833 | BTRFS_I(inode)->location.offset = 0; | 856 | BTRFS_I(inode)->location.offset = 0; |
| 834 | 857 | ||
| 835 | root = btrfs_alloc_dummy_root(); | 858 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 836 | if (IS_ERR(root)) { | 859 | if (IS_ERR(root)) { |
| 837 | test_msg("Couldn't allocate root\n"); | 860 | test_msg("Couldn't allocate root\n"); |
| 838 | goto out; | 861 | goto out; |
| @@ -844,7 +867,7 @@ static int test_hole_first(void) | |||
| 844 | goto out; | 867 | goto out; |
| 845 | } | 868 | } |
| 846 | 869 | ||
| 847 | root->node = alloc_dummy_extent_buffer(NULL, 4096); | 870 | root->node = alloc_dummy_extent_buffer(NULL, nodesize, nodesize); |
| 848 | if (!root->node) { | 871 | if (!root->node) { |
| 849 | test_msg("Couldn't allocate dummy buffer\n"); | 872 | test_msg("Couldn't allocate dummy buffer\n"); |
| 850 | goto out; | 873 | goto out; |
| @@ -861,9 +884,9 @@ static int test_hole_first(void) | |||
| 861 | * btrfs_get_extent. | 884 | * btrfs_get_extent. |
| 862 | */ | 885 | */ |
| 863 | insert_inode_item_key(root); | 886 | insert_inode_item_key(root); |
| 864 | insert_extent(root, 4096, 4096, 4096, 0, 4096, 4096, | 887 | insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize, |
| 865 | BTRFS_FILE_EXTENT_REG, 0, 1); | 888 | sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1); |
| 866 | em = btrfs_get_extent(inode, NULL, 0, 0, 8192, 0); | 889 | em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0); |
| 867 | if (IS_ERR(em)) { | 890 | if (IS_ERR(em)) { |
| 868 | test_msg("Got an error when we shouldn't have\n"); | 891 | test_msg("Got an error when we shouldn't have\n"); |
| 869 | goto out; | 892 | goto out; |
| @@ -872,9 +895,10 @@ static int test_hole_first(void) | |||
| 872 | test_msg("Expected a hole, got %llu\n", em->block_start); | 895 | test_msg("Expected a hole, got %llu\n", em->block_start); |
| 873 | goto out; | 896 | goto out; |
| 874 | } | 897 | } |
| 875 | if (em->start != 0 || em->len != 4096) { | 898 | if (em->start != 0 || em->len != sectorsize) { |
| 876 | test_msg("Unexpected extent wanted start 0 len 4096, got start " | 899 | test_msg("Unexpected extent wanted start 0 len %u, " |
| 877 | "%llu len %llu\n", em->start, em->len); | 900 | "got start %llu len %llu\n", |
| 901 | sectorsize, em->start, em->len); | ||
| 878 | goto out; | 902 | goto out; |
| 879 | } | 903 | } |
| 880 | if (em->flags != vacancy_only) { | 904 | if (em->flags != vacancy_only) { |
| @@ -884,18 +908,19 @@ static int test_hole_first(void) | |||
| 884 | } | 908 | } |
| 885 | free_extent_map(em); | 909 | free_extent_map(em); |
| 886 | 910 | ||
| 887 | em = btrfs_get_extent(inode, NULL, 0, 4096, 8192, 0); | 911 | em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0); |
| 888 | if (IS_ERR(em)) { | 912 | if (IS_ERR(em)) { |
| 889 | test_msg("Got an error when we shouldn't have\n"); | 913 | test_msg("Got an error when we shouldn't have\n"); |
| 890 | goto out; | 914 | goto out; |
| 891 | } | 915 | } |
| 892 | if (em->block_start != 4096) { | 916 | if (em->block_start != sectorsize) { |
| 893 | test_msg("Expected a real extent, got %llu\n", em->block_start); | 917 | test_msg("Expected a real extent, got %llu\n", em->block_start); |
| 894 | goto out; | 918 | goto out; |
| 895 | } | 919 | } |
| 896 | if (em->start != 4096 || em->len != 4096) { | 920 | if (em->start != sectorsize || em->len != sectorsize) { |
| 897 | test_msg("Unexpected extent wanted start 4096 len 4096, got " | 921 | test_msg("Unexpected extent wanted start %u len %u, " |
| 898 | "start %llu len %llu\n", em->start, em->len); | 922 | "got start %llu len %llu\n", |
| 923 | sectorsize, sectorsize, em->start, em->len); | ||
| 899 | goto out; | 924 | goto out; |
| 900 | } | 925 | } |
| 901 | if (em->flags != 0) { | 926 | if (em->flags != 0) { |
| @@ -912,7 +937,7 @@ out: | |||
| 912 | return ret; | 937 | return ret; |
| 913 | } | 938 | } |
| 914 | 939 | ||
| 915 | static int test_extent_accounting(void) | 940 | static int test_extent_accounting(u32 sectorsize, u32 nodesize) |
| 916 | { | 941 | { |
| 917 | struct inode *inode = NULL; | 942 | struct inode *inode = NULL; |
| 918 | struct btrfs_root *root = NULL; | 943 | struct btrfs_root *root = NULL; |
| @@ -924,7 +949,7 @@ static int test_extent_accounting(void) | |||
| 924 | return ret; | 949 | return ret; |
| 925 | } | 950 | } |
| 926 | 951 | ||
| 927 | root = btrfs_alloc_dummy_root(); | 952 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 928 | if (IS_ERR(root)) { | 953 | if (IS_ERR(root)) { |
| 929 | test_msg("Couldn't allocate root\n"); | 954 | test_msg("Couldn't allocate root\n"); |
| 930 | goto out; | 955 | goto out; |
| @@ -954,10 +979,11 @@ static int test_extent_accounting(void) | |||
| 954 | goto out; | 979 | goto out; |
| 955 | } | 980 | } |
| 956 | 981 | ||
| 957 | /* [BTRFS_MAX_EXTENT_SIZE][4k] */ | 982 | /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */ |
| 958 | BTRFS_I(inode)->outstanding_extents++; | 983 | BTRFS_I(inode)->outstanding_extents++; |
| 959 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, | 984 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE, |
| 960 | BTRFS_MAX_EXTENT_SIZE + 4095, NULL); | 985 | BTRFS_MAX_EXTENT_SIZE + sectorsize - 1, |
| 986 | NULL); | ||
| 961 | if (ret) { | 987 | if (ret) { |
| 962 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | 988 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); |
| 963 | goto out; | 989 | goto out; |
| @@ -969,10 +995,10 @@ static int test_extent_accounting(void) | |||
| 969 | goto out; | 995 | goto out; |
| 970 | } | 996 | } |
| 971 | 997 | ||
| 972 | /* [BTRFS_MAX_EXTENT_SIZE/2][4K HOLE][the rest] */ | 998 | /* [BTRFS_MAX_EXTENT_SIZE/2][sectorsize HOLE][the rest] */ |
| 973 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, | 999 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, |
| 974 | BTRFS_MAX_EXTENT_SIZE >> 1, | 1000 | BTRFS_MAX_EXTENT_SIZE >> 1, |
| 975 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | 1001 | (BTRFS_MAX_EXTENT_SIZE >> 1) + sectorsize - 1, |
| 976 | EXTENT_DELALLOC | EXTENT_DIRTY | | 1002 | EXTENT_DELALLOC | EXTENT_DIRTY | |
| 977 | EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, | 1003 | EXTENT_UPTODATE | EXTENT_DO_ACCOUNTING, 0, 0, |
| 978 | NULL, GFP_KERNEL); | 1004 | NULL, GFP_KERNEL); |
| @@ -987,10 +1013,11 @@ static int test_extent_accounting(void) | |||
| 987 | goto out; | 1013 | goto out; |
| 988 | } | 1014 | } |
| 989 | 1015 | ||
| 990 | /* [BTRFS_MAX_EXTENT_SIZE][4K] */ | 1016 | /* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */ |
| 991 | BTRFS_I(inode)->outstanding_extents++; | 1017 | BTRFS_I(inode)->outstanding_extents++; |
| 992 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, | 1018 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1, |
| 993 | (BTRFS_MAX_EXTENT_SIZE >> 1) + 4095, | 1019 | (BTRFS_MAX_EXTENT_SIZE >> 1) |
| 1020 | + sectorsize - 1, | ||
| 994 | NULL); | 1021 | NULL); |
| 995 | if (ret) { | 1022 | if (ret) { |
| 996 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | 1023 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); |
| @@ -1004,16 +1031,17 @@ static int test_extent_accounting(void) | |||
| 1004 | } | 1031 | } |
| 1005 | 1032 | ||
| 1006 | /* | 1033 | /* |
| 1007 | * [BTRFS_MAX_EXTENT_SIZE+4K][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4K] | 1034 | * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize HOLE][BTRFS_MAX_EXTENT_SIZE+sectorsize] |
| 1008 | * | 1035 | * |
| 1009 | * I'm artificially adding 2 to outstanding_extents because in the | 1036 | * I'm artificially adding 2 to outstanding_extents because in the |
| 1010 | * buffered IO case we'd add things up as we go, but I don't feel like | 1037 | * buffered IO case we'd add things up as we go, but I don't feel like |
| 1011 | * doing that here, this isn't the interesting case we want to test. | 1038 | * doing that here, this isn't the interesting case we want to test. |
| 1012 | */ | 1039 | */ |
| 1013 | BTRFS_I(inode)->outstanding_extents += 2; | 1040 | BTRFS_I(inode)->outstanding_extents += 2; |
| 1014 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE + 8192, | 1041 | ret = btrfs_set_extent_delalloc(inode, |
| 1015 | (BTRFS_MAX_EXTENT_SIZE << 1) + 12287, | 1042 | BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize, |
| 1016 | NULL); | 1043 | (BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1, |
| 1044 | NULL); | ||
| 1017 | if (ret) { | 1045 | if (ret) { |
| 1018 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | 1046 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); |
| 1019 | goto out; | 1047 | goto out; |
| @@ -1025,10 +1053,13 @@ static int test_extent_accounting(void) | |||
| 1025 | goto out; | 1053 | goto out; |
| 1026 | } | 1054 | } |
| 1027 | 1055 | ||
| 1028 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4k][BTRFS_MAX_EXTENT_SIZE+4k] */ | 1056 | /* |
| 1057 | * [BTRFS_MAX_EXTENT_SIZE+sectorsize][sectorsize][BTRFS_MAX_EXTENT_SIZE+sectorsize] | ||
| 1058 | */ | ||
| 1029 | BTRFS_I(inode)->outstanding_extents++; | 1059 | BTRFS_I(inode)->outstanding_extents++; |
| 1030 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, | 1060 | ret = btrfs_set_extent_delalloc(inode, |
| 1031 | BTRFS_MAX_EXTENT_SIZE+8191, NULL); | 1061 | BTRFS_MAX_EXTENT_SIZE + sectorsize, |
| 1062 | BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL); | ||
| 1032 | if (ret) { | 1063 | if (ret) { |
| 1033 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | 1064 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); |
| 1034 | goto out; | 1065 | goto out; |
| @@ -1042,8 +1073,8 @@ static int test_extent_accounting(void) | |||
| 1042 | 1073 | ||
| 1043 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ | 1074 | /* [BTRFS_MAX_EXTENT_SIZE+4k][4K HOLE][BTRFS_MAX_EXTENT_SIZE+4k] */ |
| 1044 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, | 1075 | ret = clear_extent_bit(&BTRFS_I(inode)->io_tree, |
| 1045 | BTRFS_MAX_EXTENT_SIZE+4096, | 1076 | BTRFS_MAX_EXTENT_SIZE + sectorsize, |
| 1046 | BTRFS_MAX_EXTENT_SIZE+8191, | 1077 | BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, |
| 1047 | EXTENT_DIRTY | EXTENT_DELALLOC | | 1078 | EXTENT_DIRTY | EXTENT_DELALLOC | |
| 1048 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, | 1079 | EXTENT_DO_ACCOUNTING | EXTENT_UPTODATE, 0, 0, |
| 1049 | NULL, GFP_KERNEL); | 1080 | NULL, GFP_KERNEL); |
| @@ -1063,8 +1094,9 @@ static int test_extent_accounting(void) | |||
| 1063 | * might fail and I'd rather satisfy my paranoia at this point. | 1094 | * might fail and I'd rather satisfy my paranoia at this point. |
| 1064 | */ | 1095 | */ |
| 1065 | BTRFS_I(inode)->outstanding_extents++; | 1096 | BTRFS_I(inode)->outstanding_extents++; |
| 1066 | ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE+4096, | 1097 | ret = btrfs_set_extent_delalloc(inode, |
| 1067 | BTRFS_MAX_EXTENT_SIZE+8191, NULL); | 1098 | BTRFS_MAX_EXTENT_SIZE + sectorsize, |
| 1099 | BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL); | ||
| 1068 | if (ret) { | 1100 | if (ret) { |
| 1069 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); | 1101 | test_msg("btrfs_set_extent_delalloc returned %d\n", ret); |
| 1070 | goto out; | 1102 | goto out; |
| @@ -1103,7 +1135,7 @@ out: | |||
| 1103 | return ret; | 1135 | return ret; |
| 1104 | } | 1136 | } |
| 1105 | 1137 | ||
| 1106 | int btrfs_test_inodes(void) | 1138 | int btrfs_test_inodes(u32 sectorsize, u32 nodesize) |
| 1107 | { | 1139 | { |
| 1108 | int ret; | 1140 | int ret; |
| 1109 | 1141 | ||
| @@ -1112,13 +1144,13 @@ int btrfs_test_inodes(void) | |||
| 1112 | set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only); | 1144 | set_bit(EXTENT_FLAG_PREALLOC, &prealloc_only); |
| 1113 | 1145 | ||
| 1114 | test_msg("Running btrfs_get_extent tests\n"); | 1146 | test_msg("Running btrfs_get_extent tests\n"); |
| 1115 | ret = test_btrfs_get_extent(); | 1147 | ret = test_btrfs_get_extent(sectorsize, nodesize); |
| 1116 | if (ret) | 1148 | if (ret) |
| 1117 | return ret; | 1149 | return ret; |
| 1118 | test_msg("Running hole first btrfs_get_extent test\n"); | 1150 | test_msg("Running hole first btrfs_get_extent test\n"); |
| 1119 | ret = test_hole_first(); | 1151 | ret = test_hole_first(sectorsize, nodesize); |
| 1120 | if (ret) | 1152 | if (ret) |
| 1121 | return ret; | 1153 | return ret; |
| 1122 | test_msg("Running outstanding_extents tests\n"); | 1154 | test_msg("Running outstanding_extents tests\n"); |
| 1123 | return test_extent_accounting(); | 1155 | return test_extent_accounting(sectorsize, nodesize); |
| 1124 | } | 1156 | } |
diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index 8aa4ded31326..57a12c0d680b 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #include <linux/types.h> | ||
| 19 | #include "btrfs-tests.h" | 20 | #include "btrfs-tests.h" |
| 20 | #include "../ctree.h" | 21 | #include "../ctree.h" |
| 21 | #include "../transaction.h" | 22 | #include "../transaction.h" |
| @@ -216,7 +217,8 @@ static int remove_extent_ref(struct btrfs_root *root, u64 bytenr, | |||
| 216 | return ret; | 217 | return ret; |
| 217 | } | 218 | } |
| 218 | 219 | ||
| 219 | static int test_no_shared_qgroup(struct btrfs_root *root) | 220 | static int test_no_shared_qgroup(struct btrfs_root *root, |
| 221 | u32 sectorsize, u32 nodesize) | ||
| 220 | { | 222 | { |
| 221 | struct btrfs_trans_handle trans; | 223 | struct btrfs_trans_handle trans; |
| 222 | struct btrfs_fs_info *fs_info = root->fs_info; | 224 | struct btrfs_fs_info *fs_info = root->fs_info; |
| @@ -227,7 +229,7 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
| 227 | btrfs_init_dummy_trans(&trans); | 229 | btrfs_init_dummy_trans(&trans); |
| 228 | 230 | ||
| 229 | test_msg("Qgroup basic add\n"); | 231 | test_msg("Qgroup basic add\n"); |
| 230 | ret = btrfs_create_qgroup(NULL, fs_info, 5); | 232 | ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FS_TREE_OBJECTID); |
| 231 | if (ret) { | 233 | if (ret) { |
| 232 | test_msg("Couldn't create a qgroup %d\n", ret); | 234 | test_msg("Couldn't create a qgroup %d\n", ret); |
| 233 | return ret; | 235 | return ret; |
| @@ -238,18 +240,19 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
| 238 | * we can only call btrfs_qgroup_account_extent() directly to test | 240 | * we can only call btrfs_qgroup_account_extent() directly to test |
| 239 | * quota. | 241 | * quota. |
| 240 | */ | 242 | */ |
| 241 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); | 243 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots); |
| 242 | if (ret) { | 244 | if (ret) { |
| 243 | ulist_free(old_roots); | 245 | ulist_free(old_roots); |
| 244 | test_msg("Couldn't find old roots: %d\n", ret); | 246 | test_msg("Couldn't find old roots: %d\n", ret); |
| 245 | return ret; | 247 | return ret; |
| 246 | } | 248 | } |
| 247 | 249 | ||
| 248 | ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5); | 250 | ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, |
| 251 | BTRFS_FS_TREE_OBJECTID); | ||
| 249 | if (ret) | 252 | if (ret) |
| 250 | return ret; | 253 | return ret; |
| 251 | 254 | ||
| 252 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); | 255 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots); |
| 253 | if (ret) { | 256 | if (ret) { |
| 254 | ulist_free(old_roots); | 257 | ulist_free(old_roots); |
| 255 | ulist_free(new_roots); | 258 | ulist_free(new_roots); |
| @@ -257,32 +260,33 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
| 257 | return ret; | 260 | return ret; |
| 258 | } | 261 | } |
| 259 | 262 | ||
| 260 | ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, | 263 | ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize, |
| 261 | old_roots, new_roots); | 264 | nodesize, old_roots, new_roots); |
| 262 | if (ret) { | 265 | if (ret) { |
| 263 | test_msg("Couldn't account space for a qgroup %d\n", ret); | 266 | test_msg("Couldn't account space for a qgroup %d\n", ret); |
| 264 | return ret; | 267 | return ret; |
| 265 | } | 268 | } |
| 266 | 269 | ||
| 267 | if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { | 270 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, |
| 271 | nodesize, nodesize)) { | ||
| 268 | test_msg("Qgroup counts didn't match expected values\n"); | 272 | test_msg("Qgroup counts didn't match expected values\n"); |
| 269 | return -EINVAL; | 273 | return -EINVAL; |
| 270 | } | 274 | } |
| 271 | old_roots = NULL; | 275 | old_roots = NULL; |
| 272 | new_roots = NULL; | 276 | new_roots = NULL; |
| 273 | 277 | ||
| 274 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); | 278 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots); |
| 275 | if (ret) { | 279 | if (ret) { |
| 276 | ulist_free(old_roots); | 280 | ulist_free(old_roots); |
| 277 | test_msg("Couldn't find old roots: %d\n", ret); | 281 | test_msg("Couldn't find old roots: %d\n", ret); |
| 278 | return ret; | 282 | return ret; |
| 279 | } | 283 | } |
| 280 | 284 | ||
| 281 | ret = remove_extent_item(root, 4096, 4096); | 285 | ret = remove_extent_item(root, nodesize, nodesize); |
| 282 | if (ret) | 286 | if (ret) |
| 283 | return -EINVAL; | 287 | return -EINVAL; |
| 284 | 288 | ||
| 285 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); | 289 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots); |
| 286 | if (ret) { | 290 | if (ret) { |
| 287 | ulist_free(old_roots); | 291 | ulist_free(old_roots); |
| 288 | ulist_free(new_roots); | 292 | ulist_free(new_roots); |
| @@ -290,14 +294,14 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
| 290 | return ret; | 294 | return ret; |
| 291 | } | 295 | } |
| 292 | 296 | ||
| 293 | ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, | 297 | ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize, |
| 294 | old_roots, new_roots); | 298 | nodesize, old_roots, new_roots); |
| 295 | if (ret) { | 299 | if (ret) { |
| 296 | test_msg("Couldn't account space for a qgroup %d\n", ret); | 300 | test_msg("Couldn't account space for a qgroup %d\n", ret); |
| 297 | return -EINVAL; | 301 | return -EINVAL; |
| 298 | } | 302 | } |
| 299 | 303 | ||
| 300 | if (btrfs_verify_qgroup_counts(fs_info, 5, 0, 0)) { | 304 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, 0, 0)) { |
| 301 | test_msg("Qgroup counts didn't match expected values\n"); | 305 | test_msg("Qgroup counts didn't match expected values\n"); |
| 302 | return -EINVAL; | 306 | return -EINVAL; |
| 303 | } | 307 | } |
| @@ -310,7 +314,8 @@ static int test_no_shared_qgroup(struct btrfs_root *root) | |||
| 310 | * right, also remove one of the roots and make sure the exclusive count is | 314 | * right, also remove one of the roots and make sure the exclusive count is |
| 311 | * adjusted properly. | 315 | * adjusted properly. |
| 312 | */ | 316 | */ |
| 313 | static int test_multiple_refs(struct btrfs_root *root) | 317 | static int test_multiple_refs(struct btrfs_root *root, |
| 318 | u32 sectorsize, u32 nodesize) | ||
| 314 | { | 319 | { |
| 315 | struct btrfs_trans_handle trans; | 320 | struct btrfs_trans_handle trans; |
| 316 | struct btrfs_fs_info *fs_info = root->fs_info; | 321 | struct btrfs_fs_info *fs_info = root->fs_info; |
| @@ -322,25 +327,29 @@ static int test_multiple_refs(struct btrfs_root *root) | |||
| 322 | 327 | ||
| 323 | test_msg("Qgroup multiple refs test\n"); | 328 | test_msg("Qgroup multiple refs test\n"); |
| 324 | 329 | ||
| 325 | /* We have 5 created already from the previous test */ | 330 | /* |
| 326 | ret = btrfs_create_qgroup(NULL, fs_info, 256); | 331 | * We have BTRFS_FS_TREE_OBJECTID created already from the |
| 332 | * previous test. | ||
| 333 | */ | ||
| 334 | ret = btrfs_create_qgroup(NULL, fs_info, BTRFS_FIRST_FREE_OBJECTID); | ||
| 327 | if (ret) { | 335 | if (ret) { |
| 328 | test_msg("Couldn't create a qgroup %d\n", ret); | 336 | test_msg("Couldn't create a qgroup %d\n", ret); |
| 329 | return ret; | 337 | return ret; |
| 330 | } | 338 | } |
| 331 | 339 | ||
| 332 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); | 340 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots); |
| 333 | if (ret) { | 341 | if (ret) { |
| 334 | ulist_free(old_roots); | 342 | ulist_free(old_roots); |
| 335 | test_msg("Couldn't find old roots: %d\n", ret); | 343 | test_msg("Couldn't find old roots: %d\n", ret); |
| 336 | return ret; | 344 | return ret; |
| 337 | } | 345 | } |
| 338 | 346 | ||
| 339 | ret = insert_normal_tree_ref(root, 4096, 4096, 0, 5); | 347 | ret = insert_normal_tree_ref(root, nodesize, nodesize, 0, |
| 348 | BTRFS_FS_TREE_OBJECTID); | ||
| 340 | if (ret) | 349 | if (ret) |
| 341 | return ret; | 350 | return ret; |
| 342 | 351 | ||
| 343 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); | 352 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots); |
| 344 | if (ret) { | 353 | if (ret) { |
| 345 | ulist_free(old_roots); | 354 | ulist_free(old_roots); |
| 346 | ulist_free(new_roots); | 355 | ulist_free(new_roots); |
| @@ -348,30 +357,32 @@ static int test_multiple_refs(struct btrfs_root *root) | |||
| 348 | return ret; | 357 | return ret; |
| 349 | } | 358 | } |
| 350 | 359 | ||
| 351 | ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, | 360 | ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize, |
| 352 | old_roots, new_roots); | 361 | nodesize, old_roots, new_roots); |
| 353 | if (ret) { | 362 | if (ret) { |
| 354 | test_msg("Couldn't account space for a qgroup %d\n", ret); | 363 | test_msg("Couldn't account space for a qgroup %d\n", ret); |
| 355 | return ret; | 364 | return ret; |
| 356 | } | 365 | } |
| 357 | 366 | ||
| 358 | if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { | 367 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, |
| 368 | nodesize, nodesize)) { | ||
| 359 | test_msg("Qgroup counts didn't match expected values\n"); | 369 | test_msg("Qgroup counts didn't match expected values\n"); |
| 360 | return -EINVAL; | 370 | return -EINVAL; |
| 361 | } | 371 | } |
| 362 | 372 | ||
| 363 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); | 373 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots); |
| 364 | if (ret) { | 374 | if (ret) { |
| 365 | ulist_free(old_roots); | 375 | ulist_free(old_roots); |
| 366 | test_msg("Couldn't find old roots: %d\n", ret); | 376 | test_msg("Couldn't find old roots: %d\n", ret); |
| 367 | return ret; | 377 | return ret; |
| 368 | } | 378 | } |
| 369 | 379 | ||
| 370 | ret = add_tree_ref(root, 4096, 4096, 0, 256); | 380 | ret = add_tree_ref(root, nodesize, nodesize, 0, |
| 381 | BTRFS_FIRST_FREE_OBJECTID); | ||
| 371 | if (ret) | 382 | if (ret) |
| 372 | return ret; | 383 | return ret; |
| 373 | 384 | ||
| 374 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); | 385 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots); |
| 375 | if (ret) { | 386 | if (ret) { |
| 376 | ulist_free(old_roots); | 387 | ulist_free(old_roots); |
| 377 | ulist_free(new_roots); | 388 | ulist_free(new_roots); |
| @@ -379,35 +390,38 @@ static int test_multiple_refs(struct btrfs_root *root) | |||
| 379 | return ret; | 390 | return ret; |
| 380 | } | 391 | } |
| 381 | 392 | ||
| 382 | ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, | 393 | ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize, |
| 383 | old_roots, new_roots); | 394 | nodesize, old_roots, new_roots); |
| 384 | if (ret) { | 395 | if (ret) { |
| 385 | test_msg("Couldn't account space for a qgroup %d\n", ret); | 396 | test_msg("Couldn't account space for a qgroup %d\n", ret); |
| 386 | return ret; | 397 | return ret; |
| 387 | } | 398 | } |
| 388 | 399 | ||
| 389 | if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 0)) { | 400 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, |
| 401 | nodesize, 0)) { | ||
| 390 | test_msg("Qgroup counts didn't match expected values\n"); | 402 | test_msg("Qgroup counts didn't match expected values\n"); |
| 391 | return -EINVAL; | 403 | return -EINVAL; |
| 392 | } | 404 | } |
| 393 | 405 | ||
| 394 | if (btrfs_verify_qgroup_counts(fs_info, 256, 4096, 0)) { | 406 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID, |
| 407 | nodesize, 0)) { | ||
| 395 | test_msg("Qgroup counts didn't match expected values\n"); | 408 | test_msg("Qgroup counts didn't match expected values\n"); |
| 396 | return -EINVAL; | 409 | return -EINVAL; |
| 397 | } | 410 | } |
| 398 | 411 | ||
| 399 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &old_roots); | 412 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &old_roots); |
| 400 | if (ret) { | 413 | if (ret) { |
| 401 | ulist_free(old_roots); | 414 | ulist_free(old_roots); |
| 402 | test_msg("Couldn't find old roots: %d\n", ret); | 415 | test_msg("Couldn't find old roots: %d\n", ret); |
| 403 | return ret; | 416 | return ret; |
| 404 | } | 417 | } |
| 405 | 418 | ||
| 406 | ret = remove_extent_ref(root, 4096, 4096, 0, 256); | 419 | ret = remove_extent_ref(root, nodesize, nodesize, 0, |
| 420 | BTRFS_FIRST_FREE_OBJECTID); | ||
| 407 | if (ret) | 421 | if (ret) |
| 408 | return ret; | 422 | return ret; |
| 409 | 423 | ||
| 410 | ret = btrfs_find_all_roots(&trans, fs_info, 4096, 0, &new_roots); | 424 | ret = btrfs_find_all_roots(&trans, fs_info, nodesize, 0, &new_roots); |
| 411 | if (ret) { | 425 | if (ret) { |
| 412 | ulist_free(old_roots); | 426 | ulist_free(old_roots); |
| 413 | ulist_free(new_roots); | 427 | ulist_free(new_roots); |
| @@ -415,19 +429,21 @@ static int test_multiple_refs(struct btrfs_root *root) | |||
| 415 | return ret; | 429 | return ret; |
| 416 | } | 430 | } |
| 417 | 431 | ||
| 418 | ret = btrfs_qgroup_account_extent(&trans, fs_info, 4096, 4096, | 432 | ret = btrfs_qgroup_account_extent(&trans, fs_info, nodesize, |
| 419 | old_roots, new_roots); | 433 | nodesize, old_roots, new_roots); |
| 420 | if (ret) { | 434 | if (ret) { |
| 421 | test_msg("Couldn't account space for a qgroup %d\n", ret); | 435 | test_msg("Couldn't account space for a qgroup %d\n", ret); |
| 422 | return ret; | 436 | return ret; |
| 423 | } | 437 | } |
| 424 | 438 | ||
| 425 | if (btrfs_verify_qgroup_counts(fs_info, 256, 0, 0)) { | 439 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FIRST_FREE_OBJECTID, |
| 440 | 0, 0)) { | ||
| 426 | test_msg("Qgroup counts didn't match expected values\n"); | 441 | test_msg("Qgroup counts didn't match expected values\n"); |
| 427 | return -EINVAL; | 442 | return -EINVAL; |
| 428 | } | 443 | } |
| 429 | 444 | ||
| 430 | if (btrfs_verify_qgroup_counts(fs_info, 5, 4096, 4096)) { | 445 | if (btrfs_verify_qgroup_counts(fs_info, BTRFS_FS_TREE_OBJECTID, |
| 446 | nodesize, nodesize)) { | ||
| 431 | test_msg("Qgroup counts didn't match expected values\n"); | 447 | test_msg("Qgroup counts didn't match expected values\n"); |
| 432 | return -EINVAL; | 448 | return -EINVAL; |
| 433 | } | 449 | } |
| @@ -435,13 +451,13 @@ static int test_multiple_refs(struct btrfs_root *root) | |||
| 435 | return 0; | 451 | return 0; |
| 436 | } | 452 | } |
| 437 | 453 | ||
| 438 | int btrfs_test_qgroups(void) | 454 | int btrfs_test_qgroups(u32 sectorsize, u32 nodesize) |
| 439 | { | 455 | { |
| 440 | struct btrfs_root *root; | 456 | struct btrfs_root *root; |
| 441 | struct btrfs_root *tmp_root; | 457 | struct btrfs_root *tmp_root; |
| 442 | int ret = 0; | 458 | int ret = 0; |
| 443 | 459 | ||
| 444 | root = btrfs_alloc_dummy_root(); | 460 | root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 445 | if (IS_ERR(root)) { | 461 | if (IS_ERR(root)) { |
| 446 | test_msg("Couldn't allocate root\n"); | 462 | test_msg("Couldn't allocate root\n"); |
| 447 | return PTR_ERR(root); | 463 | return PTR_ERR(root); |
| @@ -468,7 +484,8 @@ int btrfs_test_qgroups(void) | |||
| 468 | * Can't use bytenr 0, some things freak out | 484 | * Can't use bytenr 0, some things freak out |
| 469 | * *cough*backref walking code*cough* | 485 | * *cough*backref walking code*cough* |
| 470 | */ | 486 | */ |
| 471 | root->node = alloc_test_extent_buffer(root->fs_info, 4096); | 487 | root->node = alloc_test_extent_buffer(root->fs_info, nodesize, |
| 488 | nodesize); | ||
| 472 | if (!root->node) { | 489 | if (!root->node) { |
| 473 | test_msg("Couldn't allocate dummy buffer\n"); | 490 | test_msg("Couldn't allocate dummy buffer\n"); |
| 474 | ret = -ENOMEM; | 491 | ret = -ENOMEM; |
| @@ -476,16 +493,16 @@ int btrfs_test_qgroups(void) | |||
| 476 | } | 493 | } |
| 477 | btrfs_set_header_level(root->node, 0); | 494 | btrfs_set_header_level(root->node, 0); |
| 478 | btrfs_set_header_nritems(root->node, 0); | 495 | btrfs_set_header_nritems(root->node, 0); |
| 479 | root->alloc_bytenr += 8192; | 496 | root->alloc_bytenr += 2 * nodesize; |
| 480 | 497 | ||
| 481 | tmp_root = btrfs_alloc_dummy_root(); | 498 | tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 482 | if (IS_ERR(tmp_root)) { | 499 | if (IS_ERR(tmp_root)) { |
| 483 | test_msg("Couldn't allocate a fs root\n"); | 500 | test_msg("Couldn't allocate a fs root\n"); |
| 484 | ret = PTR_ERR(tmp_root); | 501 | ret = PTR_ERR(tmp_root); |
| 485 | goto out; | 502 | goto out; |
| 486 | } | 503 | } |
| 487 | 504 | ||
| 488 | tmp_root->root_key.objectid = 5; | 505 | tmp_root->root_key.objectid = BTRFS_FS_TREE_OBJECTID; |
| 489 | root->fs_info->fs_root = tmp_root; | 506 | root->fs_info->fs_root = tmp_root; |
| 490 | ret = btrfs_insert_fs_root(root->fs_info, tmp_root); | 507 | ret = btrfs_insert_fs_root(root->fs_info, tmp_root); |
| 491 | if (ret) { | 508 | if (ret) { |
| @@ -493,14 +510,14 @@ int btrfs_test_qgroups(void) | |||
| 493 | goto out; | 510 | goto out; |
| 494 | } | 511 | } |
| 495 | 512 | ||
| 496 | tmp_root = btrfs_alloc_dummy_root(); | 513 | tmp_root = btrfs_alloc_dummy_root(sectorsize, nodesize); |
| 497 | if (IS_ERR(tmp_root)) { | 514 | if (IS_ERR(tmp_root)) { |
| 498 | test_msg("Couldn't allocate a fs root\n"); | 515 | test_msg("Couldn't allocate a fs root\n"); |
| 499 | ret = PTR_ERR(tmp_root); | 516 | ret = PTR_ERR(tmp_root); |
| 500 | goto out; | 517 | goto out; |
| 501 | } | 518 | } |
| 502 | 519 | ||
| 503 | tmp_root->root_key.objectid = 256; | 520 | tmp_root->root_key.objectid = BTRFS_FIRST_FREE_OBJECTID; |
| 504 | ret = btrfs_insert_fs_root(root->fs_info, tmp_root); | 521 | ret = btrfs_insert_fs_root(root->fs_info, tmp_root); |
| 505 | if (ret) { | 522 | if (ret) { |
| 506 | test_msg("Couldn't insert fs root %d\n", ret); | 523 | test_msg("Couldn't insert fs root %d\n", ret); |
| @@ -508,10 +525,10 @@ int btrfs_test_qgroups(void) | |||
| 508 | } | 525 | } |
| 509 | 526 | ||
| 510 | test_msg("Running qgroup tests\n"); | 527 | test_msg("Running qgroup tests\n"); |
| 511 | ret = test_no_shared_qgroup(root); | 528 | ret = test_no_shared_qgroup(root, sectorsize, nodesize); |
| 512 | if (ret) | 529 | if (ret) |
| 513 | goto out; | 530 | goto out; |
| 514 | ret = test_multiple_refs(root); | 531 | ret = test_multiple_refs(root, sectorsize, nodesize); |
| 515 | out: | 532 | out: |
| 516 | btrfs_free_dummy_root(root); | 533 | btrfs_free_dummy_root(root); |
| 517 | return ret; | 534 | return ret; |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index da9e0036a864..548faaa9e169 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
| @@ -4241,6 +4241,7 @@ int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info) | |||
| 4241 | if (IS_ERR(uuid_root)) { | 4241 | if (IS_ERR(uuid_root)) { |
| 4242 | ret = PTR_ERR(uuid_root); | 4242 | ret = PTR_ERR(uuid_root); |
| 4243 | btrfs_abort_transaction(trans, tree_root, ret); | 4243 | btrfs_abort_transaction(trans, tree_root, ret); |
| 4244 | btrfs_end_transaction(trans, tree_root); | ||
| 4244 | return ret; | 4245 | return ret; |
| 4245 | } | 4246 | } |
| 4246 | 4247 | ||
| @@ -6258,27 +6259,23 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, | |||
| 6258 | return dev; | 6259 | return dev; |
| 6259 | } | 6260 | } |
| 6260 | 6261 | ||
| 6261 | static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, | 6262 | /* Return -EIO if any error, otherwise return 0. */ |
| 6262 | struct extent_buffer *leaf, | 6263 | static int btrfs_check_chunk_valid(struct btrfs_root *root, |
| 6263 | struct btrfs_chunk *chunk) | 6264 | struct extent_buffer *leaf, |
| 6265 | struct btrfs_chunk *chunk, u64 logical) | ||
| 6264 | { | 6266 | { |
| 6265 | struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; | ||
| 6266 | struct map_lookup *map; | ||
| 6267 | struct extent_map *em; | ||
| 6268 | u64 logical; | ||
| 6269 | u64 length; | 6267 | u64 length; |
| 6270 | u64 stripe_len; | 6268 | u64 stripe_len; |
| 6271 | u64 devid; | 6269 | u16 num_stripes; |
| 6272 | u8 uuid[BTRFS_UUID_SIZE]; | 6270 | u16 sub_stripes; |
| 6273 | int num_stripes; | 6271 | u64 type; |
| 6274 | int ret; | ||
| 6275 | int i; | ||
| 6276 | 6272 | ||
| 6277 | logical = key->offset; | ||
| 6278 | length = btrfs_chunk_length(leaf, chunk); | 6273 | length = btrfs_chunk_length(leaf, chunk); |
| 6279 | stripe_len = btrfs_chunk_stripe_len(leaf, chunk); | 6274 | stripe_len = btrfs_chunk_stripe_len(leaf, chunk); |
| 6280 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); | 6275 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); |
| 6281 | /* Validation check */ | 6276 | sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk); |
| 6277 | type = btrfs_chunk_type(leaf, chunk); | ||
| 6278 | |||
| 6282 | if (!num_stripes) { | 6279 | if (!num_stripes) { |
| 6283 | btrfs_err(root->fs_info, "invalid chunk num_stripes: %u", | 6280 | btrfs_err(root->fs_info, "invalid chunk num_stripes: %u", |
| 6284 | num_stripes); | 6281 | num_stripes); |
| @@ -6289,6 +6286,11 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, | |||
| 6289 | "invalid chunk logical %llu", logical); | 6286 | "invalid chunk logical %llu", logical); |
| 6290 | return -EIO; | 6287 | return -EIO; |
| 6291 | } | 6288 | } |
| 6289 | if (btrfs_chunk_sector_size(leaf, chunk) != root->sectorsize) { | ||
| 6290 | btrfs_err(root->fs_info, "invalid chunk sectorsize %u", | ||
| 6291 | btrfs_chunk_sector_size(leaf, chunk)); | ||
| 6292 | return -EIO; | ||
| 6293 | } | ||
| 6292 | if (!length || !IS_ALIGNED(length, root->sectorsize)) { | 6294 | if (!length || !IS_ALIGNED(length, root->sectorsize)) { |
| 6293 | btrfs_err(root->fs_info, | 6295 | btrfs_err(root->fs_info, |
| 6294 | "invalid chunk length %llu", length); | 6296 | "invalid chunk length %llu", length); |
| @@ -6300,13 +6302,54 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, | |||
| 6300 | return -EIO; | 6302 | return -EIO; |
| 6301 | } | 6303 | } |
| 6302 | if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & | 6304 | if (~(BTRFS_BLOCK_GROUP_TYPE_MASK | BTRFS_BLOCK_GROUP_PROFILE_MASK) & |
| 6303 | btrfs_chunk_type(leaf, chunk)) { | 6305 | type) { |
| 6304 | btrfs_err(root->fs_info, "unrecognized chunk type: %llu", | 6306 | btrfs_err(root->fs_info, "unrecognized chunk type: %llu", |
| 6305 | ~(BTRFS_BLOCK_GROUP_TYPE_MASK | | 6307 | ~(BTRFS_BLOCK_GROUP_TYPE_MASK | |
| 6306 | BTRFS_BLOCK_GROUP_PROFILE_MASK) & | 6308 | BTRFS_BLOCK_GROUP_PROFILE_MASK) & |
| 6307 | btrfs_chunk_type(leaf, chunk)); | 6309 | btrfs_chunk_type(leaf, chunk)); |
| 6308 | return -EIO; | 6310 | return -EIO; |
| 6309 | } | 6311 | } |
| 6312 | if ((type & BTRFS_BLOCK_GROUP_RAID10 && sub_stripes != 2) || | ||
| 6313 | (type & BTRFS_BLOCK_GROUP_RAID1 && num_stripes < 1) || | ||
| 6314 | (type & BTRFS_BLOCK_GROUP_RAID5 && num_stripes < 2) || | ||
| 6315 | (type & BTRFS_BLOCK_GROUP_RAID6 && num_stripes < 3) || | ||
| 6316 | (type & BTRFS_BLOCK_GROUP_DUP && num_stripes > 2) || | ||
| 6317 | ((type & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 && | ||
| 6318 | num_stripes != 1)) { | ||
| 6319 | btrfs_err(root->fs_info, | ||
| 6320 | "invalid num_stripes:sub_stripes %u:%u for profile %llu", | ||
| 6321 | num_stripes, sub_stripes, | ||
| 6322 | type & BTRFS_BLOCK_GROUP_PROFILE_MASK); | ||
| 6323 | return -EIO; | ||
| 6324 | } | ||
| 6325 | |||
| 6326 | return 0; | ||
| 6327 | } | ||
| 6328 | |||
| 6329 | static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, | ||
| 6330 | struct extent_buffer *leaf, | ||
| 6331 | struct btrfs_chunk *chunk) | ||
| 6332 | { | ||
| 6333 | struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; | ||
| 6334 | struct map_lookup *map; | ||
| 6335 | struct extent_map *em; | ||
| 6336 | u64 logical; | ||
| 6337 | u64 length; | ||
| 6338 | u64 stripe_len; | ||
| 6339 | u64 devid; | ||
| 6340 | u8 uuid[BTRFS_UUID_SIZE]; | ||
| 6341 | int num_stripes; | ||
| 6342 | int ret; | ||
| 6343 | int i; | ||
| 6344 | |||
| 6345 | logical = key->offset; | ||
| 6346 | length = btrfs_chunk_length(leaf, chunk); | ||
| 6347 | stripe_len = btrfs_chunk_stripe_len(leaf, chunk); | ||
| 6348 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); | ||
| 6349 | |||
| 6350 | ret = btrfs_check_chunk_valid(root, leaf, chunk, logical); | ||
| 6351 | if (ret) | ||
| 6352 | return ret; | ||
| 6310 | 6353 | ||
| 6311 | read_lock(&map_tree->map_tree.lock); | 6354 | read_lock(&map_tree->map_tree.lock); |
| 6312 | em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); | 6355 | em = lookup_extent_mapping(&map_tree->map_tree, logical, 1); |
| @@ -6554,6 +6597,7 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
| 6554 | u32 array_size; | 6597 | u32 array_size; |
| 6555 | u32 len = 0; | 6598 | u32 len = 0; |
| 6556 | u32 cur_offset; | 6599 | u32 cur_offset; |
| 6600 | u64 type; | ||
| 6557 | struct btrfs_key key; | 6601 | struct btrfs_key key; |
| 6558 | 6602 | ||
| 6559 | ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); | 6603 | ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); |
| @@ -6620,6 +6664,15 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
| 6620 | break; | 6664 | break; |
| 6621 | } | 6665 | } |
| 6622 | 6666 | ||
| 6667 | type = btrfs_chunk_type(sb, chunk); | ||
| 6668 | if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) { | ||
| 6669 | btrfs_err(root->fs_info, | ||
| 6670 | "invalid chunk type %llu in sys_array at offset %u", | ||
| 6671 | type, cur_offset); | ||
| 6672 | ret = -EIO; | ||
| 6673 | break; | ||
| 6674 | } | ||
| 6675 | |||
| 6623 | len = btrfs_chunk_item_size(num_stripes); | 6676 | len = btrfs_chunk_item_size(num_stripes); |
| 6624 | if (cur_offset + len > array_size) | 6677 | if (cur_offset + len > array_size) |
| 6625 | goto out_short_read; | 6678 | goto out_short_read; |
| @@ -6638,12 +6691,14 @@ int btrfs_read_sys_array(struct btrfs_root *root) | |||
| 6638 | sb_array_offset += len; | 6691 | sb_array_offset += len; |
| 6639 | cur_offset += len; | 6692 | cur_offset += len; |
| 6640 | } | 6693 | } |
| 6694 | clear_extent_buffer_uptodate(sb); | ||
| 6641 | free_extent_buffer_stale(sb); | 6695 | free_extent_buffer_stale(sb); |
| 6642 | return ret; | 6696 | return ret; |
| 6643 | 6697 | ||
| 6644 | out_short_read: | 6698 | out_short_read: |
| 6645 | printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", | 6699 | printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", |
| 6646 | len, cur_offset); | 6700 | len, cur_offset); |
| 6701 | clear_extent_buffer_uptodate(sb); | ||
| 6647 | free_extent_buffer_stale(sb); | 6702 | free_extent_buffer_stale(sb); |
| 6648 | return -EIO; | 6703 | return -EIO; |
| 6649 | } | 6704 | } |
| @@ -6656,6 +6711,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root) | |||
| 6656 | struct btrfs_key found_key; | 6711 | struct btrfs_key found_key; |
| 6657 | int ret; | 6712 | int ret; |
| 6658 | int slot; | 6713 | int slot; |
| 6714 | u64 total_dev = 0; | ||
| 6659 | 6715 | ||
| 6660 | root = root->fs_info->chunk_root; | 6716 | root = root->fs_info->chunk_root; |
| 6661 | 6717 | ||
| @@ -6697,6 +6753,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root) | |||
| 6697 | ret = read_one_dev(root, leaf, dev_item); | 6753 | ret = read_one_dev(root, leaf, dev_item); |
| 6698 | if (ret) | 6754 | if (ret) |
| 6699 | goto error; | 6755 | goto error; |
| 6756 | total_dev++; | ||
| 6700 | } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { | 6757 | } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) { |
| 6701 | struct btrfs_chunk *chunk; | 6758 | struct btrfs_chunk *chunk; |
| 6702 | chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); | 6759 | chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk); |
| @@ -6706,6 +6763,28 @@ int btrfs_read_chunk_tree(struct btrfs_root *root) | |||
| 6706 | } | 6763 | } |
| 6707 | path->slots[0]++; | 6764 | path->slots[0]++; |
| 6708 | } | 6765 | } |
| 6766 | |||
| 6767 | /* | ||
| 6768 | * After loading chunk tree, we've got all device information, | ||
| 6769 | * do another round of validation checks. | ||
| 6770 | */ | ||
| 6771 | if (total_dev != root->fs_info->fs_devices->total_devices) { | ||
| 6772 | btrfs_err(root->fs_info, | ||
| 6773 | "super_num_devices %llu mismatch with num_devices %llu found here", | ||
| 6774 | btrfs_super_num_devices(root->fs_info->super_copy), | ||
| 6775 | total_dev); | ||
| 6776 | ret = -EINVAL; | ||
| 6777 | goto error; | ||
| 6778 | } | ||
| 6779 | if (btrfs_super_total_bytes(root->fs_info->super_copy) < | ||
| 6780 | root->fs_info->fs_devices->total_rw_bytes) { | ||
| 6781 | btrfs_err(root->fs_info, | ||
| 6782 | "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu", | ||
| 6783 | btrfs_super_total_bytes(root->fs_info->super_copy), | ||
| 6784 | root->fs_info->fs_devices->total_rw_bytes); | ||
| 6785 | ret = -EINVAL; | ||
| 6786 | goto error; | ||
| 6787 | } | ||
| 6709 | ret = 0; | 6788 | ret = 0; |
| 6710 | error: | 6789 | error: |
| 6711 | unlock_chunks(root); | 6790 | unlock_chunks(root); |
diff --git a/fs/ecryptfs/kthread.c b/fs/ecryptfs/kthread.c index 866bb18efefe..e818f5ac7a26 100644 --- a/fs/ecryptfs/kthread.c +++ b/fs/ecryptfs/kthread.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 26 | #include <linux/wait.h> | 26 | #include <linux/wait.h> |
| 27 | #include <linux/mount.h> | 27 | #include <linux/mount.h> |
| 28 | #include <linux/file.h> | ||
| 28 | #include "ecryptfs_kernel.h" | 29 | #include "ecryptfs_kernel.h" |
| 29 | 30 | ||
| 30 | struct ecryptfs_open_req { | 31 | struct ecryptfs_open_req { |
| @@ -147,7 +148,7 @@ int ecryptfs_privileged_open(struct file **lower_file, | |||
| 147 | flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR; | 148 | flags |= IS_RDONLY(d_inode(lower_dentry)) ? O_RDONLY : O_RDWR; |
| 148 | (*lower_file) = dentry_open(&req.path, flags, cred); | 149 | (*lower_file) = dentry_open(&req.path, flags, cred); |
| 149 | if (!IS_ERR(*lower_file)) | 150 | if (!IS_ERR(*lower_file)) |
| 150 | goto out; | 151 | goto have_file; |
| 151 | if ((flags & O_ACCMODE) == O_RDONLY) { | 152 | if ((flags & O_ACCMODE) == O_RDONLY) { |
| 152 | rc = PTR_ERR((*lower_file)); | 153 | rc = PTR_ERR((*lower_file)); |
| 153 | goto out; | 154 | goto out; |
| @@ -165,8 +166,16 @@ int ecryptfs_privileged_open(struct file **lower_file, | |||
| 165 | mutex_unlock(&ecryptfs_kthread_ctl.mux); | 166 | mutex_unlock(&ecryptfs_kthread_ctl.mux); |
| 166 | wake_up(&ecryptfs_kthread_ctl.wait); | 167 | wake_up(&ecryptfs_kthread_ctl.wait); |
| 167 | wait_for_completion(&req.done); | 168 | wait_for_completion(&req.done); |
| 168 | if (IS_ERR(*lower_file)) | 169 | if (IS_ERR(*lower_file)) { |
| 169 | rc = PTR_ERR(*lower_file); | 170 | rc = PTR_ERR(*lower_file); |
| 171 | goto out; | ||
| 172 | } | ||
| 173 | have_file: | ||
| 174 | if ((*lower_file)->f_op->mmap == NULL) { | ||
| 175 | fput(*lower_file); | ||
| 176 | *lower_file = NULL; | ||
| 177 | rc = -EMEDIUMTYPE; | ||
| 178 | } | ||
| 170 | out: | 179 | out: |
| 171 | return rc; | 180 | return rc; |
| 172 | } | 181 | } |
diff --git a/fs/proc/root.c b/fs/proc/root.c index 55bc7d6c8aac..06702783bf40 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c | |||
| @@ -121,6 +121,13 @@ static struct dentry *proc_mount(struct file_system_type *fs_type, | |||
| 121 | if (IS_ERR(sb)) | 121 | if (IS_ERR(sb)) |
| 122 | return ERR_CAST(sb); | 122 | return ERR_CAST(sb); |
| 123 | 123 | ||
| 124 | /* | ||
| 125 | * procfs isn't actually a stacking filesystem; however, there is | ||
| 126 | * too much magic going on inside it to permit stacking things on | ||
| 127 | * top of it | ||
| 128 | */ | ||
| 129 | sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; | ||
| 130 | |||
| 124 | if (!proc_parse_options(options, ns)) { | 131 | if (!proc_parse_options(options, ns)) { |
| 125 | deactivate_locked_super(sb); | 132 | deactivate_locked_super(sb); |
| 126 | return ERR_PTR(-EINVAL); | 133 | return ERR_PTR(-EINVAL); |
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 6bd05700d8c9..05f05f17a7c2 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h | |||
| @@ -22,37 +22,33 @@ | |||
| 22 | #include <asm-generic/qspinlock_types.h> | 22 | #include <asm-generic/qspinlock_types.h> |
| 23 | 23 | ||
| 24 | /** | 24 | /** |
| 25 | * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock | ||
| 26 | * @lock : Pointer to queued spinlock structure | ||
| 27 | * | ||
| 28 | * There is a very slight possibility of live-lock if the lockers keep coming | ||
| 29 | * and the waiter is just unfortunate enough to not see any unlock state. | ||
| 30 | */ | ||
| 31 | #ifndef queued_spin_unlock_wait | ||
| 32 | extern void queued_spin_unlock_wait(struct qspinlock *lock); | ||
| 33 | #endif | ||
| 34 | |||
| 35 | /** | ||
| 25 | * queued_spin_is_locked - is the spinlock locked? | 36 | * queued_spin_is_locked - is the spinlock locked? |
| 26 | * @lock: Pointer to queued spinlock structure | 37 | * @lock: Pointer to queued spinlock structure |
| 27 | * Return: 1 if it is locked, 0 otherwise | 38 | * Return: 1 if it is locked, 0 otherwise |
| 28 | */ | 39 | */ |
| 40 | #ifndef queued_spin_is_locked | ||
| 29 | static __always_inline int queued_spin_is_locked(struct qspinlock *lock) | 41 | static __always_inline int queued_spin_is_locked(struct qspinlock *lock) |
| 30 | { | 42 | { |
| 31 | /* | 43 | /* |
| 32 | * queued_spin_lock_slowpath() can ACQUIRE the lock before | 44 | * See queued_spin_unlock_wait(). |
| 33 | * issuing the unordered store that sets _Q_LOCKED_VAL. | ||
| 34 | * | ||
| 35 | * See both smp_cond_acquire() sites for more detail. | ||
| 36 | * | ||
| 37 | * This however means that in code like: | ||
| 38 | * | ||
| 39 | * spin_lock(A) spin_lock(B) | ||
| 40 | * spin_unlock_wait(B) spin_is_locked(A) | ||
| 41 | * do_something() do_something() | ||
| 42 | * | ||
| 43 | * Both CPUs can end up running do_something() because the store | ||
| 44 | * setting _Q_LOCKED_VAL will pass through the loads in | ||
| 45 | * spin_unlock_wait() and/or spin_is_locked(). | ||
| 46 | * | 45 | * |
| 47 | * Avoid this by issuing a full memory barrier between the spin_lock() | 46 | * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL |
| 48 | * and the loads in spin_unlock_wait() and spin_is_locked(). | 47 | * isn't immediately observable. |
| 49 | * | ||
| 50 | * Note that regular mutual exclusion doesn't care about this | ||
| 51 | * delayed store. | ||
| 52 | */ | 48 | */ |
| 53 | smp_mb(); | 49 | return atomic_read(&lock->val); |
| 54 | return atomic_read(&lock->val) & _Q_LOCKED_MASK; | ||
| 55 | } | 50 | } |
| 51 | #endif | ||
| 56 | 52 | ||
| 57 | /** | 53 | /** |
| 58 | * queued_spin_value_unlocked - is the spinlock structure unlocked? | 54 | * queued_spin_value_unlocked - is the spinlock structure unlocked? |
| @@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock) | |||
| 122 | } | 118 | } |
| 123 | #endif | 119 | #endif |
| 124 | 120 | ||
| 125 | /** | ||
| 126 | * queued_spin_unlock_wait - wait until current lock holder releases the lock | ||
| 127 | * @lock : Pointer to queued spinlock structure | ||
| 128 | * | ||
| 129 | * There is a very slight possibility of live-lock if the lockers keep coming | ||
| 130 | * and the waiter is just unfortunate enough to not see any unlock state. | ||
| 131 | */ | ||
| 132 | static inline void queued_spin_unlock_wait(struct qspinlock *lock) | ||
| 133 | { | ||
| 134 | /* See queued_spin_is_locked() */ | ||
| 135 | smp_mb(); | ||
| 136 | while (atomic_read(&lock->val) & _Q_LOCKED_MASK) | ||
| 137 | cpu_relax(); | ||
| 138 | } | ||
| 139 | |||
| 140 | #ifndef virt_spin_lock | 121 | #ifndef virt_spin_lock |
| 141 | static __always_inline bool virt_spin_lock(struct qspinlock *lock) | 122 | static __always_inline bool virt_spin_lock(struct qspinlock *lock) |
| 142 | { | 123 | { |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 786ad32631a6..07b83d32f66c 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev); | |||
| 152 | extern int cpuidle_play_dead(void); | 152 | extern int cpuidle_play_dead(void); |
| 153 | 153 | ||
| 154 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); | 154 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); |
| 155 | static inline struct cpuidle_device *cpuidle_get_device(void) | ||
| 156 | {return __this_cpu_read(cpuidle_devices); } | ||
| 155 | #else | 157 | #else |
| 156 | static inline void disable_cpuidle(void) { } | 158 | static inline void disable_cpuidle(void) { } |
| 157 | static inline bool cpuidle_not_available(struct cpuidle_driver *drv, | 159 | static inline bool cpuidle_not_available(struct cpuidle_driver *drv, |
| @@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } | |||
| 187 | static inline int cpuidle_play_dead(void) {return -ENODEV; } | 189 | static inline int cpuidle_play_dead(void) {return -ENODEV; } |
| 188 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( | 190 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( |
| 189 | struct cpuidle_device *dev) {return NULL; } | 191 | struct cpuidle_device *dev) {return NULL; } |
| 192 | static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; } | ||
| 190 | #endif | 193 | #endif |
| 191 | 194 | ||
| 192 | #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) | 195 | #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND) |
diff --git a/include/linux/efi.h b/include/linux/efi.h index c2db3ca22217..f196dd0b0f2f 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -1005,7 +1005,7 @@ extern int efi_memattr_apply_permissions(struct mm_struct *mm, | |||
| 1005 | /* Iterate through an efi_memory_map */ | 1005 | /* Iterate through an efi_memory_map */ |
| 1006 | #define for_each_efi_memory_desc_in_map(m, md) \ | 1006 | #define for_each_efi_memory_desc_in_map(m, md) \ |
| 1007 | for ((md) = (m)->map; \ | 1007 | for ((md) = (m)->map; \ |
| 1008 | (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ | 1008 | ((void *)(md) + (m)->desc_size) <= (m)->map_end; \ |
| 1009 | (md) = (void *)(md) + (m)->desc_size) | 1009 | (md) = (void *)(md) + (m)->desc_size) |
| 1010 | 1010 | ||
| 1011 | /** | 1011 | /** |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 035abdf62cfe..73a48479892d 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -1240,8 +1240,6 @@ struct mlx5_destroy_psv_out { | |||
| 1240 | u8 rsvd[8]; | 1240 | u8 rsvd[8]; |
| 1241 | }; | 1241 | }; |
| 1242 | 1242 | ||
| 1243 | #define MLX5_CMD_OP_MAX 0x920 | ||
| 1244 | |||
| 1245 | enum { | 1243 | enum { |
| 1246 | VPORT_STATE_DOWN = 0x0, | 1244 | VPORT_STATE_DOWN = 0x0, |
| 1247 | VPORT_STATE_UP = 0x1, | 1245 | VPORT_STATE_UP = 0x1, |
| @@ -1369,6 +1367,12 @@ enum mlx5_cap_type { | |||
| 1369 | #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ | 1367 | #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ |
| 1370 | MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) | 1368 | MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) |
| 1371 | 1369 | ||
| 1370 | #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ | ||
| 1371 | MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) | ||
| 1372 | |||
| 1373 | #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ | ||
| 1374 | MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) | ||
| 1375 | |||
| 1372 | #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ | 1376 | #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ |
| 1373 | MLX5_GET(flow_table_eswitch_cap, \ | 1377 | MLX5_GET(flow_table_eswitch_cap, \ |
| 1374 | mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) | 1378 | mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 9a05cd7e5890..e955a2859009 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
| @@ -205,7 +205,8 @@ enum { | |||
| 205 | MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, | 205 | MLX5_CMD_OP_ALLOC_FLOW_COUNTER = 0x939, |
| 206 | MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, | 206 | MLX5_CMD_OP_DEALLOC_FLOW_COUNTER = 0x93a, |
| 207 | MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, | 207 | MLX5_CMD_OP_QUERY_FLOW_COUNTER = 0x93b, |
| 208 | MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c | 208 | MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c, |
| 209 | MLX5_CMD_OP_MAX | ||
| 209 | }; | 210 | }; |
| 210 | 211 | ||
| 211 | struct mlx5_ifc_flow_table_fields_supported_bits { | 212 | struct mlx5_ifc_flow_table_fields_supported_bits { |
| @@ -500,7 +501,9 @@ struct mlx5_ifc_e_switch_cap_bits { | |||
| 500 | u8 vport_svlan_insert[0x1]; | 501 | u8 vport_svlan_insert[0x1]; |
| 501 | u8 vport_cvlan_insert_if_not_exist[0x1]; | 502 | u8 vport_cvlan_insert_if_not_exist[0x1]; |
| 502 | u8 vport_cvlan_insert_overwrite[0x1]; | 503 | u8 vport_cvlan_insert_overwrite[0x1]; |
| 503 | u8 reserved_at_5[0x1b]; | 504 | u8 reserved_at_5[0x19]; |
| 505 | u8 nic_vport_node_guid_modify[0x1]; | ||
| 506 | u8 nic_vport_port_guid_modify[0x1]; | ||
| 504 | 507 | ||
| 505 | u8 reserved_at_20[0x7e0]; | 508 | u8 reserved_at_20[0x7e0]; |
| 506 | }; | 509 | }; |
| @@ -4583,7 +4586,10 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { | |||
| 4583 | }; | 4586 | }; |
| 4584 | 4587 | ||
| 4585 | struct mlx5_ifc_modify_nic_vport_field_select_bits { | 4588 | struct mlx5_ifc_modify_nic_vport_field_select_bits { |
| 4586 | u8 reserved_at_0[0x19]; | 4589 | u8 reserved_at_0[0x16]; |
| 4590 | u8 node_guid[0x1]; | ||
| 4591 | u8 port_guid[0x1]; | ||
| 4592 | u8 reserved_at_18[0x1]; | ||
| 4587 | u8 mtu[0x1]; | 4593 | u8 mtu[0x1]; |
| 4588 | u8 change_event[0x1]; | 4594 | u8 change_event[0x1]; |
| 4589 | u8 promisc[0x1]; | 4595 | u8 promisc[0x1]; |
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 64221027bf1f..266320feb160 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
| @@ -460,10 +460,9 @@ struct mlx5_core_qp { | |||
| 460 | }; | 460 | }; |
| 461 | 461 | ||
| 462 | struct mlx5_qp_path { | 462 | struct mlx5_qp_path { |
| 463 | u8 fl; | 463 | u8 fl_free_ar; |
| 464 | u8 rsvd3; | 464 | u8 rsvd3; |
| 465 | u8 free_ar; | 465 | __be16 pkey_index; |
| 466 | u8 pkey_index; | ||
| 467 | u8 rsvd0; | 466 | u8 rsvd0; |
| 468 | u8 grh_mlid; | 467 | u8 grh_mlid; |
| 469 | __be16 rlid; | 468 | __be16 rlid; |
| @@ -560,6 +559,7 @@ struct mlx5_modify_qp_mbox_in { | |||
| 560 | __be32 optparam; | 559 | __be32 optparam; |
| 561 | u8 rsvd0[4]; | 560 | u8 rsvd0[4]; |
| 562 | struct mlx5_qp_context ctx; | 561 | struct mlx5_qp_context ctx; |
| 562 | u8 rsvd2[16]; | ||
| 563 | }; | 563 | }; |
| 564 | 564 | ||
| 565 | struct mlx5_modify_qp_mbox_out { | 565 | struct mlx5_modify_qp_mbox_out { |
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 301da4a5e6bf..6c16c198f680 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h | |||
| @@ -50,6 +50,8 @@ int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu); | |||
| 50 | int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, | 50 | int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, |
| 51 | u64 *system_image_guid); | 51 | u64 *system_image_guid); |
| 52 | int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); | 52 | int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); |
| 53 | int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, | ||
| 54 | u32 vport, u64 node_guid); | ||
| 53 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, | 55 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, |
| 54 | u16 *qkey_viol_cntr); | 56 | u16 *qkey_viol_cntr); |
| 55 | int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, | 57 | int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, |
diff --git a/include/linux/of.h b/include/linux/of.h index c7292e8ea080..74eb28cadbef 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -614,7 +614,7 @@ static inline struct device_node *of_parse_phandle(const struct device_node *np, | |||
| 614 | return NULL; | 614 | return NULL; |
| 615 | } | 615 | } |
| 616 | 616 | ||
| 617 | static inline int of_parse_phandle_with_args(struct device_node *np, | 617 | static inline int of_parse_phandle_with_args(const struct device_node *np, |
| 618 | const char *list_name, | 618 | const char *list_name, |
| 619 | const char *cells_name, | 619 | const char *cells_name, |
| 620 | int index, | 620 | int index, |
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index f6e9e85164e8..b969e9443962 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h | |||
| @@ -8,7 +8,7 @@ struct pci_dev; | |||
| 8 | struct of_phandle_args; | 8 | struct of_phandle_args; |
| 9 | struct device_node; | 9 | struct device_node; |
| 10 | 10 | ||
| 11 | #ifdef CONFIG_OF | 11 | #ifdef CONFIG_OF_PCI |
| 12 | int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); | 12 | int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); |
| 13 | struct device_node *of_pci_find_child_device(struct device_node *parent, | 13 | struct device_node *of_pci_find_child_device(struct device_node *parent, |
| 14 | unsigned int devfn); | 14 | unsigned int devfn); |
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index ad2f67054372..c201060e0c6d 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h | |||
| @@ -31,6 +31,13 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem); | |||
| 31 | int of_reserved_mem_device_init(struct device *dev); | 31 | int of_reserved_mem_device_init(struct device *dev); |
| 32 | void of_reserved_mem_device_release(struct device *dev); | 32 | void of_reserved_mem_device_release(struct device *dev); |
| 33 | 33 | ||
| 34 | int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, | ||
| 35 | phys_addr_t align, | ||
| 36 | phys_addr_t start, | ||
| 37 | phys_addr_t end, | ||
| 38 | bool nomap, | ||
| 39 | phys_addr_t *res_base); | ||
| 40 | |||
| 34 | void fdt_init_reserved_mem(void); | 41 | void fdt_init_reserved_mem(void); |
| 35 | void fdt_reserved_mem_save_node(unsigned long node, const char *uname, | 42 | void fdt_reserved_mem_save_node(unsigned long node, const char *uname, |
| 36 | phys_addr_t base, phys_addr_t size); | 43 | phys_addr_t base, phys_addr_t size); |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 7973a821ac58..ead97654c4e9 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
| @@ -277,7 +277,10 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) | |||
| 277 | 277 | ||
| 278 | static inline int raw_read_seqcount_latch(seqcount_t *s) | 278 | static inline int raw_read_seqcount_latch(seqcount_t *s) |
| 279 | { | 279 | { |
| 280 | return lockless_dereference(s)->sequence; | 280 | int seq = READ_ONCE(s->sequence); |
| 281 | /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ | ||
| 282 | smp_read_barrier_depends(); | ||
| 283 | return seq; | ||
| 281 | } | 284 | } |
| 282 | 285 | ||
| 283 | /** | 286 | /** |
| @@ -331,7 +334,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s) | |||
| 331 | * unsigned seq, idx; | 334 | * unsigned seq, idx; |
| 332 | * | 335 | * |
| 333 | * do { | 336 | * do { |
| 334 | * seq = lockless_dereference(latch)->seq; | 337 | * seq = raw_read_seqcount_latch(&latch->seq); |
| 335 | * | 338 | * |
| 336 | * idx = seq & 0x01; | 339 | * idx = seq & 0x01; |
| 337 | * entry = data_query(latch->data[idx], ...); | 340 | * entry = data_query(latch->data[idx], ...); |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index e45abe7db9a6..ee517bef0db0 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
| @@ -335,6 +335,8 @@ struct thermal_genl_event { | |||
| 335 | * @get_trend: a pointer to a function that reads the sensor temperature trend. | 335 | * @get_trend: a pointer to a function that reads the sensor temperature trend. |
| 336 | * @set_emul_temp: a pointer to a function that sets sensor emulated | 336 | * @set_emul_temp: a pointer to a function that sets sensor emulated |
| 337 | * temperature. | 337 | * temperature. |
| 338 | * @set_trip_temp: a pointer to a function that sets the trip temperature on | ||
| 339 | * hardware. | ||
| 338 | */ | 340 | */ |
| 339 | struct thermal_zone_of_device_ops { | 341 | struct thermal_zone_of_device_ops { |
| 340 | int (*get_temp)(void *, int *); | 342 | int (*get_temp)(void *, int *); |
diff --git a/include/net/compat.h b/include/net/compat.h index 48103cf94e97..13de0ccaa059 100644 --- a/include/net/compat.h +++ b/include/net/compat.h | |||
| @@ -42,6 +42,7 @@ int compat_sock_get_timestampns(struct sock *, struct timespec __user *); | |||
| 42 | 42 | ||
| 43 | int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, | 43 | int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *, |
| 44 | struct sockaddr __user **, struct iovec **); | 44 | struct sockaddr __user **, struct iovec **); |
| 45 | struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval); | ||
| 45 | asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, | 46 | asmlinkage long compat_sys_sendmsg(int, struct compat_msghdr __user *, |
| 46 | unsigned int); | 47 | unsigned int); |
| 47 | asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, | 48 | asmlinkage long compat_sys_sendmmsg(int, struct compat_mmsghdr __user *, |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index af4c10ebb241..cd6018a9ee24 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
| @@ -1232,7 +1232,7 @@ void ip_vs_conn_expire_now(struct ip_vs_conn *cp); | |||
| 1232 | const char *ip_vs_state_name(__u16 proto, int state); | 1232 | const char *ip_vs_state_name(__u16 proto, int state); |
| 1233 | 1233 | ||
| 1234 | void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); | 1234 | void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp); |
| 1235 | int ip_vs_check_template(struct ip_vs_conn *ct); | 1235 | int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest); |
| 1236 | void ip_vs_random_dropentry(struct netns_ipvs *ipvs); | 1236 | void ip_vs_random_dropentry(struct netns_ipvs *ipvs); |
| 1237 | int ip_vs_conn_init(void); | 1237 | int ip_vs_conn_init(void); |
| 1238 | void ip_vs_conn_cleanup(void); | 1238 | void ip_vs_conn_cleanup(void); |
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h index 9c5638ad872e..0dbce55437f2 100644 --- a/include/net/netfilter/nf_queue.h +++ b/include/net/netfilter/nf_queue.h | |||
| @@ -28,8 +28,8 @@ struct nf_queue_handler { | |||
| 28 | struct nf_hook_ops *ops); | 28 | struct nf_hook_ops *ops); |
| 29 | }; | 29 | }; |
| 30 | 30 | ||
| 31 | void nf_register_queue_handler(const struct nf_queue_handler *qh); | 31 | void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh); |
| 32 | void nf_unregister_queue_handler(void); | 32 | void nf_unregister_queue_handler(struct net *net); |
| 33 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); | 33 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict); |
| 34 | 34 | ||
| 35 | void nf_queue_entry_get_refs(struct nf_queue_entry *entry); | 35 | void nf_queue_entry_get_refs(struct nf_queue_entry *entry); |
diff --git a/include/net/netns/netfilter.h b/include/net/netns/netfilter.h index 38aa4983e2a9..36d723579af2 100644 --- a/include/net/netns/netfilter.h +++ b/include/net/netns/netfilter.h | |||
| @@ -5,11 +5,13 @@ | |||
| 5 | 5 | ||
| 6 | struct proc_dir_entry; | 6 | struct proc_dir_entry; |
| 7 | struct nf_logger; | 7 | struct nf_logger; |
| 8 | struct nf_queue_handler; | ||
| 8 | 9 | ||
| 9 | struct netns_nf { | 10 | struct netns_nf { |
| 10 | #if defined CONFIG_PROC_FS | 11 | #if defined CONFIG_PROC_FS |
| 11 | struct proc_dir_entry *proc_netfilter; | 12 | struct proc_dir_entry *proc_netfilter; |
| 12 | #endif | 13 | #endif |
| 14 | const struct nf_queue_handler __rcu *queue_handler; | ||
| 13 | const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; | 15 | const struct nf_logger __rcu *nf_loggers[NFPROTO_NUMPROTO]; |
| 14 | #ifdef CONFIG_SYSCTL | 16 | #ifdef CONFIG_SYSCTL |
| 15 | struct ctl_table_header *nf_log_dir_header; | 17 | struct ctl_table_header *nf_log_dir_header; |
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 0f7efa88f210..3722dda0199d 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
| @@ -392,16 +392,20 @@ struct tc_cls_u32_offload { | |||
| 392 | }; | 392 | }; |
| 393 | }; | 393 | }; |
| 394 | 394 | ||
| 395 | static inline bool tc_should_offload(struct net_device *dev, u32 flags) | 395 | static inline bool tc_should_offload(const struct net_device *dev, |
| 396 | const struct tcf_proto *tp, u32 flags) | ||
| 396 | { | 397 | { |
| 398 | const struct Qdisc *sch = tp->q; | ||
| 399 | const struct Qdisc_class_ops *cops = sch->ops->cl_ops; | ||
| 400 | |||
| 397 | if (!(dev->features & NETIF_F_HW_TC)) | 401 | if (!(dev->features & NETIF_F_HW_TC)) |
| 398 | return false; | 402 | return false; |
| 399 | |||
| 400 | if (flags & TCA_CLS_FLAGS_SKIP_HW) | 403 | if (flags & TCA_CLS_FLAGS_SKIP_HW) |
| 401 | return false; | 404 | return false; |
| 402 | |||
| 403 | if (!dev->netdev_ops->ndo_setup_tc) | 405 | if (!dev->netdev_ops->ndo_setup_tc) |
| 404 | return false; | 406 | return false; |
| 407 | if (cops && cops->tcf_cl_offload) | ||
| 408 | return cops->tcf_cl_offload(tp->classid); | ||
| 405 | 409 | ||
| 406 | return true; | 410 | return true; |
| 407 | } | 411 | } |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index a1fd76c22a59..62d553184e91 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
| @@ -168,6 +168,7 @@ struct Qdisc_class_ops { | |||
| 168 | 168 | ||
| 169 | /* Filter manipulation */ | 169 | /* Filter manipulation */ |
| 170 | struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); | 170 | struct tcf_proto __rcu ** (*tcf_chain)(struct Qdisc *, unsigned long); |
| 171 | bool (*tcf_cl_offload)(u32 classid); | ||
| 171 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, | 172 | unsigned long (*bind_tcf)(struct Qdisc *, unsigned long, |
| 172 | u32 classid); | 173 | u32 classid); |
| 173 | void (*unbind_tcf)(struct Qdisc *, unsigned long); | 174 | void (*unbind_tcf)(struct Qdisc *, unsigned long); |
| @@ -691,9 +692,11 @@ static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch) | |||
| 691 | /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ | 692 | /* we can reuse ->gso_skb because peek isn't called for root qdiscs */ |
| 692 | if (!sch->gso_skb) { | 693 | if (!sch->gso_skb) { |
| 693 | sch->gso_skb = sch->dequeue(sch); | 694 | sch->gso_skb = sch->dequeue(sch); |
| 694 | if (sch->gso_skb) | 695 | if (sch->gso_skb) { |
| 695 | /* it's still part of the queue */ | 696 | /* it's still part of the queue */ |
| 697 | qdisc_qstats_backlog_inc(sch, sch->gso_skb); | ||
| 696 | sch->q.qlen++; | 698 | sch->q.qlen++; |
| 699 | } | ||
| 697 | } | 700 | } |
| 698 | 701 | ||
| 699 | return sch->gso_skb; | 702 | return sch->gso_skb; |
| @@ -706,6 +709,7 @@ static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch) | |||
| 706 | 709 | ||
| 707 | if (skb) { | 710 | if (skb) { |
| 708 | sch->gso_skb = NULL; | 711 | sch->gso_skb = NULL; |
| 712 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 709 | sch->q.qlen--; | 713 | sch->q.qlen--; |
| 710 | } else { | 714 | } else { |
| 711 | skb = sch->dequeue(sch); | 715 | skb = sch->dequeue(sch); |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 432bed510369..7e440d41487a 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -217,10 +217,10 @@ enum ib_device_cap_flags { | |||
| 217 | IB_DEVICE_CROSS_CHANNEL = (1 << 27), | 217 | IB_DEVICE_CROSS_CHANNEL = (1 << 27), |
| 218 | IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), | 218 | IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29), |
| 219 | IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), | 219 | IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30), |
| 220 | IB_DEVICE_ON_DEMAND_PAGING = (1 << 31), | 220 | IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31), |
| 221 | IB_DEVICE_SG_GAPS_REG = (1ULL << 32), | 221 | IB_DEVICE_SG_GAPS_REG = (1ULL << 32), |
| 222 | IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33), | 222 | IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33), |
| 223 | IB_DEVICE_RAW_SCATTER_FCS = ((u64)1 << 34), | 223 | IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34), |
| 224 | }; | 224 | }; |
| 225 | 225 | ||
| 226 | enum ib_signature_prot_cap { | 226 | enum ib_signature_prot_cap { |
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 23c6960e94a4..2bdd1e3e7007 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h | |||
| @@ -118,7 +118,7 @@ struct btrfs_ioctl_vol_args_v2 { | |||
| 118 | }; | 118 | }; |
| 119 | union { | 119 | union { |
| 120 | char name[BTRFS_SUBVOL_NAME_MAX + 1]; | 120 | char name[BTRFS_SUBVOL_NAME_MAX + 1]; |
| 121 | u64 devid; | 121 | __u64 devid; |
| 122 | }; | 122 | }; |
| 123 | }; | 123 | }; |
| 124 | 124 | ||
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h index ca1054dd8249..72a04a0e8cce 100644 --- a/include/uapi/linux/gtp.h +++ b/include/uapi/linux/gtp.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | #ifndef _UAPI_LINUX_GTP_H_ | 1 | #ifndef _UAPI_LINUX_GTP_H_ |
| 2 | #define _UAPI_LINUX_GTP_H__ | 2 | #define _UAPI_LINUX_GTP_H_ |
| 3 | 3 | ||
| 4 | enum gtp_genl_cmds { | 4 | enum gtp_genl_cmds { |
| 5 | GTP_CMD_NEWPDP, | 5 | GTP_CMD_NEWPDP, |
diff --git a/include/uapi/sound/Kbuild b/include/uapi/sound/Kbuild index a7f27704f980..691984cb0b91 100644 --- a/include/uapi/sound/Kbuild +++ b/include/uapi/sound/Kbuild | |||
| @@ -1,5 +1,6 @@ | |||
| 1 | # UAPI Header export list | 1 | # UAPI Header export list |
| 2 | header-y += asequencer.h | 2 | header-y += asequencer.h |
| 3 | header-y += asoc.h | ||
| 3 | header-y += asound.h | 4 | header-y += asound.h |
| 4 | header-y += asound_fm.h | 5 | header-y += asound_fm.h |
| 5 | header-y += compress_offload.h | 6 | header-y += compress_offload.h |
| @@ -10,3 +11,5 @@ header-y += hdsp.h | |||
| 10 | header-y += hdspm.h | 11 | header-y += hdspm.h |
| 11 | header-y += sb16_csp.h | 12 | header-y += sb16_csp.h |
| 12 | header-y += sfnt_info.h | 13 | header-y += sfnt_info.h |
| 14 | header-y += tlv.h | ||
| 15 | header-y += usb_stream.h | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 05b923e2111a..9345028f2a82 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -3905,10 +3905,8 @@ static void _free_event(struct perf_event *event) | |||
| 3905 | if (event->ctx) | 3905 | if (event->ctx) |
| 3906 | put_ctx(event->ctx); | 3906 | put_ctx(event->ctx); |
| 3907 | 3907 | ||
| 3908 | if (event->pmu) { | 3908 | exclusive_event_destroy(event); |
| 3909 | exclusive_event_destroy(event); | 3909 | module_put(event->pmu->module); |
| 3910 | module_put(event->pmu->module); | ||
| 3911 | } | ||
| 3912 | 3910 | ||
| 3913 | call_rcu(&event->rcu_head, free_event_rcu); | 3911 | call_rcu(&event->rcu_head, free_event_rcu); |
| 3914 | } | 3912 | } |
diff --git a/kernel/futex.c b/kernel/futex.c index ee25f5ba4aca..33664f70e2d2 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
| 469 | { | 469 | { |
| 470 | unsigned long address = (unsigned long)uaddr; | 470 | unsigned long address = (unsigned long)uaddr; |
| 471 | struct mm_struct *mm = current->mm; | 471 | struct mm_struct *mm = current->mm; |
| 472 | struct page *page; | 472 | struct page *page, *tail; |
| 473 | struct address_space *mapping; | 473 | struct address_space *mapping; |
| 474 | int err, ro = 0; | 474 | int err, ro = 0; |
| 475 | 475 | ||
| @@ -530,7 +530,15 @@ again: | |||
| 530 | * considered here and page lock forces unnecessarily serialization | 530 | * considered here and page lock forces unnecessarily serialization |
| 531 | * From this point on, mapping will be re-verified if necessary and | 531 | * From this point on, mapping will be re-verified if necessary and |
| 532 | * page lock will be acquired only if it is unavoidable | 532 | * page lock will be acquired only if it is unavoidable |
| 533 | */ | 533 | * |
| 534 | * Mapping checks require the head page for any compound page so the | ||
| 535 | * head page and mapping is looked up now. For anonymous pages, it | ||
| 536 | * does not matter if the page splits in the future as the key is | ||
| 537 | * based on the address. For filesystem-backed pages, the tail is | ||
| 538 | * required as the index of the page determines the key. For | ||
| 539 | * base pages, there is no tail page and tail == page. | ||
| 540 | */ | ||
| 541 | tail = page; | ||
| 534 | page = compound_head(page); | 542 | page = compound_head(page); |
| 535 | mapping = READ_ONCE(page->mapping); | 543 | mapping = READ_ONCE(page->mapping); |
| 536 | 544 | ||
| @@ -654,7 +662,7 @@ again: | |||
| 654 | 662 | ||
| 655 | key->both.offset |= FUT_OFF_INODE; /* inode-based key */ | 663 | key->both.offset |= FUT_OFF_INODE; /* inode-based key */ |
| 656 | key->shared.inode = inode; | 664 | key->shared.inode = inode; |
| 657 | key->shared.pgoff = basepage_index(page); | 665 | key->shared.pgoff = basepage_index(tail); |
| 658 | rcu_read_unlock(); | 666 | rcu_read_unlock(); |
| 659 | } | 667 | } |
| 660 | 668 | ||
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c index e364b424b019..79d2d765a75f 100644 --- a/kernel/locking/mutex.c +++ b/kernel/locking/mutex.c | |||
| @@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) | |||
| 486 | if (!hold_ctx) | 486 | if (!hold_ctx) |
| 487 | return 0; | 487 | return 0; |
| 488 | 488 | ||
| 489 | if (unlikely(ctx == hold_ctx)) | ||
| 490 | return -EALREADY; | ||
| 491 | |||
| 492 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && | 489 | if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && |
| 493 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { | 490 | (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { |
| 494 | #ifdef CONFIG_DEBUG_MUTEXES | 491 | #ifdef CONFIG_DEBUG_MUTEXES |
| @@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
| 514 | unsigned long flags; | 511 | unsigned long flags; |
| 515 | int ret; | 512 | int ret; |
| 516 | 513 | ||
| 514 | if (use_ww_ctx) { | ||
| 515 | struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); | ||
| 516 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) | ||
| 517 | return -EALREADY; | ||
| 518 | } | ||
| 519 | |||
| 517 | preempt_disable(); | 520 | preempt_disable(); |
| 518 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); | 521 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
| 519 | 522 | ||
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c index ce2f75e32ae1..5fc8c311b8fe 100644 --- a/kernel/locking/qspinlock.c +++ b/kernel/locking/qspinlock.c | |||
| @@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, | |||
| 267 | #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath | 267 | #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath |
| 268 | #endif | 268 | #endif |
| 269 | 269 | ||
| 270 | /* | ||
| 271 | * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before | ||
| 272 | * issuing an _unordered_ store to set _Q_LOCKED_VAL. | ||
| 273 | * | ||
| 274 | * This means that the store can be delayed, but no later than the | ||
| 275 | * store-release from the unlock. This means that simply observing | ||
| 276 | * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired. | ||
| 277 | * | ||
| 278 | * There are two paths that can issue the unordered store: | ||
| 279 | * | ||
| 280 | * (1) clear_pending_set_locked(): *,1,0 -> *,0,1 | ||
| 281 | * | ||
| 282 | * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0 | ||
| 283 | * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1 | ||
| 284 | * | ||
| 285 | * However, in both cases we have other !0 state we've set before to queue | ||
| 286 | * ourseves: | ||
| 287 | * | ||
| 288 | * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our | ||
| 289 | * load is constrained by that ACQUIRE to not pass before that, and thus must | ||
| 290 | * observe the store. | ||
| 291 | * | ||
| 292 | * For (2) we have a more intersting scenario. We enqueue ourselves using | ||
| 293 | * xchg_tail(), which ends up being a RELEASE. This in itself is not | ||
| 294 | * sufficient, however that is followed by an smp_cond_acquire() on the same | ||
| 295 | * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and | ||
| 296 | * guarantees we must observe that store. | ||
| 297 | * | ||
| 298 | * Therefore both cases have other !0 state that is observable before the | ||
| 299 | * unordered locked byte store comes through. This means we can use that to | ||
| 300 | * wait for the lock store, and then wait for an unlock. | ||
| 301 | */ | ||
| 302 | #ifndef queued_spin_unlock_wait | ||
| 303 | void queued_spin_unlock_wait(struct qspinlock *lock) | ||
| 304 | { | ||
| 305 | u32 val; | ||
| 306 | |||
| 307 | for (;;) { | ||
| 308 | val = atomic_read(&lock->val); | ||
| 309 | |||
| 310 | if (!val) /* not locked, we're done */ | ||
| 311 | goto done; | ||
| 312 | |||
| 313 | if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */ | ||
| 314 | break; | ||
| 315 | |||
| 316 | /* not locked, but pending, wait until we observe the lock */ | ||
| 317 | cpu_relax(); | ||
| 318 | } | ||
| 319 | |||
| 320 | /* any unlock is good */ | ||
| 321 | while (atomic_read(&lock->val) & _Q_LOCKED_MASK) | ||
| 322 | cpu_relax(); | ||
| 323 | |||
| 324 | done: | ||
| 325 | smp_rmb(); /* CTRL + RMB -> ACQUIRE */ | ||
| 326 | } | ||
| 327 | EXPORT_SYMBOL(queued_spin_unlock_wait); | ||
| 328 | #endif | ||
| 329 | |||
| 270 | #endif /* _GEN_PV_LOCK_SLOWPATH */ | 330 | #endif /* _GEN_PV_LOCK_SLOWPATH */ |
| 271 | 331 | ||
| 272 | /** | 332 | /** |
diff --git a/kernel/relay.c b/kernel/relay.c index 074994bcfa9b..04d7cf3ef8cf 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
| @@ -614,6 +614,7 @@ free_bufs: | |||
| 614 | 614 | ||
| 615 | kref_put(&chan->kref, relay_destroy_channel); | 615 | kref_put(&chan->kref, relay_destroy_channel); |
| 616 | mutex_unlock(&relay_channels_mutex); | 616 | mutex_unlock(&relay_channels_mutex); |
| 617 | kfree(chan); | ||
| 617 | return NULL; | 618 | return NULL; |
| 618 | } | 619 | } |
| 619 | EXPORT_SYMBOL_GPL(relay_open); | 620 | EXPORT_SYMBOL_GPL(relay_open); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f2cae4620c7..017d5394f5dc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -2253,9 +2253,11 @@ int sysctl_numa_balancing(struct ctl_table *table, int write, | |||
| 2253 | #endif | 2253 | #endif |
| 2254 | #endif | 2254 | #endif |
| 2255 | 2255 | ||
| 2256 | #ifdef CONFIG_SCHEDSTATS | ||
| 2257 | |||
| 2256 | DEFINE_STATIC_KEY_FALSE(sched_schedstats); | 2258 | DEFINE_STATIC_KEY_FALSE(sched_schedstats); |
| 2259 | static bool __initdata __sched_schedstats = false; | ||
| 2257 | 2260 | ||
| 2258 | #ifdef CONFIG_SCHEDSTATS | ||
| 2259 | static void set_schedstats(bool enabled) | 2261 | static void set_schedstats(bool enabled) |
| 2260 | { | 2262 | { |
| 2261 | if (enabled) | 2263 | if (enabled) |
| @@ -2278,11 +2280,16 @@ static int __init setup_schedstats(char *str) | |||
| 2278 | if (!str) | 2280 | if (!str) |
| 2279 | goto out; | 2281 | goto out; |
| 2280 | 2282 | ||
| 2283 | /* | ||
| 2284 | * This code is called before jump labels have been set up, so we can't | ||
| 2285 | * change the static branch directly just yet. Instead set a temporary | ||
| 2286 | * variable so init_schedstats() can do it later. | ||
| 2287 | */ | ||
| 2281 | if (!strcmp(str, "enable")) { | 2288 | if (!strcmp(str, "enable")) { |
| 2282 | set_schedstats(true); | 2289 | __sched_schedstats = true; |
| 2283 | ret = 1; | 2290 | ret = 1; |
| 2284 | } else if (!strcmp(str, "disable")) { | 2291 | } else if (!strcmp(str, "disable")) { |
| 2285 | set_schedstats(false); | 2292 | __sched_schedstats = false; |
| 2286 | ret = 1; | 2293 | ret = 1; |
| 2287 | } | 2294 | } |
| 2288 | out: | 2295 | out: |
| @@ -2293,6 +2300,11 @@ out: | |||
| 2293 | } | 2300 | } |
| 2294 | __setup("schedstats=", setup_schedstats); | 2301 | __setup("schedstats=", setup_schedstats); |
| 2295 | 2302 | ||
| 2303 | static void __init init_schedstats(void) | ||
| 2304 | { | ||
| 2305 | set_schedstats(__sched_schedstats); | ||
| 2306 | } | ||
| 2307 | |||
| 2296 | #ifdef CONFIG_PROC_SYSCTL | 2308 | #ifdef CONFIG_PROC_SYSCTL |
| 2297 | int sysctl_schedstats(struct ctl_table *table, int write, | 2309 | int sysctl_schedstats(struct ctl_table *table, int write, |
| 2298 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2310 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| @@ -2313,8 +2325,10 @@ int sysctl_schedstats(struct ctl_table *table, int write, | |||
| 2313 | set_schedstats(state); | 2325 | set_schedstats(state); |
| 2314 | return err; | 2326 | return err; |
| 2315 | } | 2327 | } |
| 2316 | #endif | 2328 | #endif /* CONFIG_PROC_SYSCTL */ |
| 2317 | #endif | 2329 | #else /* !CONFIG_SCHEDSTATS */ |
| 2330 | static inline void init_schedstats(void) {} | ||
| 2331 | #endif /* CONFIG_SCHEDSTATS */ | ||
| 2318 | 2332 | ||
| 2319 | /* | 2333 | /* |
| 2320 | * fork()/clone()-time setup: | 2334 | * fork()/clone()-time setup: |
| @@ -3156,7 +3170,8 @@ static noinline void __schedule_bug(struct task_struct *prev) | |||
| 3156 | static inline void schedule_debug(struct task_struct *prev) | 3170 | static inline void schedule_debug(struct task_struct *prev) |
| 3157 | { | 3171 | { |
| 3158 | #ifdef CONFIG_SCHED_STACK_END_CHECK | 3172 | #ifdef CONFIG_SCHED_STACK_END_CHECK |
| 3159 | BUG_ON(task_stack_end_corrupted(prev)); | 3173 | if (task_stack_end_corrupted(prev)) |
| 3174 | panic("corrupted stack end detected inside scheduler\n"); | ||
| 3160 | #endif | 3175 | #endif |
| 3161 | 3176 | ||
| 3162 | if (unlikely(in_atomic_preempt_off())) { | 3177 | if (unlikely(in_atomic_preempt_off())) { |
| @@ -7487,6 +7502,8 @@ void __init sched_init(void) | |||
| 7487 | #endif | 7502 | #endif |
| 7488 | init_sched_fair_class(); | 7503 | init_sched_fair_class(); |
| 7489 | 7504 | ||
| 7505 | init_schedstats(); | ||
| 7506 | |||
| 7490 | scheduler_running = 1; | 7507 | scheduler_running = 1; |
| 7491 | } | 7508 | } |
| 7492 | 7509 | ||
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index cf905f655ba1..0368c393a336 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c | |||
| @@ -427,19 +427,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | |||
| 427 | SPLIT_NS(p->se.vruntime), | 427 | SPLIT_NS(p->se.vruntime), |
| 428 | (long long)(p->nvcsw + p->nivcsw), | 428 | (long long)(p->nvcsw + p->nivcsw), |
| 429 | p->prio); | 429 | p->prio); |
| 430 | #ifdef CONFIG_SCHEDSTATS | 430 | |
| 431 | if (schedstat_enabled()) { | ||
| 432 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", | ||
| 433 | SPLIT_NS(p->se.statistics.wait_sum), | ||
| 434 | SPLIT_NS(p->se.sum_exec_runtime), | ||
| 435 | SPLIT_NS(p->se.statistics.sum_sleep_runtime)); | ||
| 436 | } | ||
| 437 | #else | ||
| 438 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", | 431 | SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld", |
| 439 | 0LL, 0L, | 432 | SPLIT_NS(schedstat_val(p, se.statistics.wait_sum)), |
| 440 | SPLIT_NS(p->se.sum_exec_runtime), | 433 | SPLIT_NS(p->se.sum_exec_runtime), |
| 441 | 0LL, 0L); | 434 | SPLIT_NS(schedstat_val(p, se.statistics.sum_sleep_runtime))); |
| 442 | #endif | 435 | |
| 443 | #ifdef CONFIG_NUMA_BALANCING | 436 | #ifdef CONFIG_NUMA_BALANCING |
| 444 | SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); | 437 | SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); |
| 445 | #endif | 438 | #endif |
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index bd12c6c714ec..c5aeedf4e93a 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
| @@ -127,7 +127,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |||
| 127 | */ | 127 | */ |
| 128 | static void cpuidle_idle_call(void) | 128 | static void cpuidle_idle_call(void) |
| 129 | { | 129 | { |
| 130 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | 130 | struct cpuidle_device *dev = cpuidle_get_device(); |
| 131 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | 131 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); |
| 132 | int next_state, entered_state; | 132 | int next_state, entered_state; |
| 133 | 133 | ||
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 70b3b6a20fb0..78955cbea31c 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h | |||
| @@ -33,6 +33,8 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) | |||
| 33 | # define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) | 33 | # define schedstat_inc(rq, field) do { if (schedstat_enabled()) { (rq)->field++; } } while (0) |
| 34 | # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) | 34 | # define schedstat_add(rq, field, amt) do { if (schedstat_enabled()) { (rq)->field += (amt); } } while (0) |
| 35 | # define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) | 35 | # define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0) |
| 36 | # define schedstat_val(rq, field) ((schedstat_enabled()) ? (rq)->field : 0) | ||
| 37 | |||
| 36 | #else /* !CONFIG_SCHEDSTATS */ | 38 | #else /* !CONFIG_SCHEDSTATS */ |
| 37 | static inline void | 39 | static inline void |
| 38 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) | 40 | rq_sched_info_arrive(struct rq *rq, unsigned long long delta) |
| @@ -47,6 +49,7 @@ rq_sched_info_depart(struct rq *rq, unsigned long long delta) | |||
| 47 | # define schedstat_inc(rq, field) do { } while (0) | 49 | # define schedstat_inc(rq, field) do { } while (0) |
| 48 | # define schedstat_add(rq, field, amt) do { } while (0) | 50 | # define schedstat_add(rq, field, amt) do { } while (0) |
| 49 | # define schedstat_set(var, val) do { } while (0) | 51 | # define schedstat_set(var, val) do { } while (0) |
| 52 | # define schedstat_val(rq, field) 0 | ||
| 50 | #endif | 53 | #endif |
| 51 | 54 | ||
| 52 | #ifdef CONFIG_SCHED_INFO | 55 | #ifdef CONFIG_SCHED_INFO |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 780bcbe1d4de..720b7bb01d43 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
| @@ -198,7 +198,7 @@ static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5) | |||
| 198 | if (unlikely(index >= array->map.max_entries)) | 198 | if (unlikely(index >= array->map.max_entries)) |
| 199 | return -E2BIG; | 199 | return -E2BIG; |
| 200 | 200 | ||
| 201 | file = (struct file *)array->ptrs[index]; | 201 | file = READ_ONCE(array->ptrs[index]); |
| 202 | if (unlikely(!file)) | 202 | if (unlikely(!file)) |
| 203 | return -ENOENT; | 203 | return -ENOENT; |
| 204 | 204 | ||
| @@ -247,7 +247,7 @@ static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 flags, u64 r4, u64 size) | |||
| 247 | if (unlikely(index >= array->map.max_entries)) | 247 | if (unlikely(index >= array->map.max_entries)) |
| 248 | return -E2BIG; | 248 | return -E2BIG; |
| 249 | 249 | ||
| 250 | file = (struct file *)array->ptrs[index]; | 250 | file = READ_ONCE(array->ptrs[index]); |
| 251 | if (unlikely(!file)) | 251 | if (unlikely(!file)) |
| 252 | return -ENOENT; | 252 | return -ENOENT; |
| 253 | 253 | ||
diff --git a/mm/fadvise.c b/mm/fadvise.c index b8024fa7101d..6c707bfe02fd 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c | |||
| @@ -126,6 +126,17 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice) | |||
| 126 | */ | 126 | */ |
| 127 | start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT; | 127 | start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT; |
| 128 | end_index = (endbyte >> PAGE_SHIFT); | 128 | end_index = (endbyte >> PAGE_SHIFT); |
| 129 | if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) { | ||
| 130 | /* First page is tricky as 0 - 1 = -1, but pgoff_t | ||
| 131 | * is unsigned, so the end_index >= start_index | ||
| 132 | * check below would be true and we'll discard the whole | ||
| 133 | * file cache which is not what was asked. | ||
| 134 | */ | ||
| 135 | if (end_index == 0) | ||
| 136 | break; | ||
| 137 | |||
| 138 | end_index--; | ||
| 139 | } | ||
| 129 | 140 | ||
| 130 | if (end_index >= start_index) { | 141 | if (end_index >= start_index) { |
| 131 | unsigned long count = invalidate_mapping_pages(mapping, | 142 | unsigned long count = invalidate_mapping_pages(mapping, |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d26162e81fea..388c2bb9b55c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
| @@ -832,8 +832,27 @@ static bool vma_has_reserves(struct vm_area_struct *vma, long chg) | |||
| 832 | * Only the process that called mmap() has reserves for | 832 | * Only the process that called mmap() has reserves for |
| 833 | * private mappings. | 833 | * private mappings. |
| 834 | */ | 834 | */ |
| 835 | if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) | 835 | if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { |
| 836 | return true; | 836 | /* |
| 837 | * Like the shared case above, a hole punch or truncate | ||
| 838 | * could have been performed on the private mapping. | ||
| 839 | * Examine the value of chg to determine if reserves | ||
| 840 | * actually exist or were previously consumed. | ||
| 841 | * Very Subtle - The value of chg comes from a previous | ||
| 842 | * call to vma_needs_reserves(). The reserve map for | ||
| 843 | * private mappings has different (opposite) semantics | ||
| 844 | * than that of shared mappings. vma_needs_reserves() | ||
| 845 | * has already taken this difference in semantics into | ||
| 846 | * account. Therefore, the meaning of chg is the same | ||
| 847 | * as in the shared case above. Code could easily be | ||
| 848 | * combined, but keeping it separate draws attention to | ||
| 849 | * subtle differences. | ||
| 850 | */ | ||
| 851 | if (chg) | ||
| 852 | return false; | ||
| 853 | else | ||
| 854 | return true; | ||
| 855 | } | ||
| 837 | 856 | ||
| 838 | return false; | 857 | return false; |
| 839 | } | 858 | } |
| @@ -1816,6 +1835,25 @@ static long __vma_reservation_common(struct hstate *h, | |||
| 1816 | 1835 | ||
| 1817 | if (vma->vm_flags & VM_MAYSHARE) | 1836 | if (vma->vm_flags & VM_MAYSHARE) |
| 1818 | return ret; | 1837 | return ret; |
| 1838 | else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) { | ||
| 1839 | /* | ||
| 1840 | * In most cases, reserves always exist for private mappings. | ||
| 1841 | * However, a file associated with mapping could have been | ||
| 1842 | * hole punched or truncated after reserves were consumed. | ||
| 1843 | * As subsequent fault on such a range will not use reserves. | ||
| 1844 | * Subtle - The reserve map for private mappings has the | ||
| 1845 | * opposite meaning than that of shared mappings. If NO | ||
| 1846 | * entry is in the reserve map, it means a reservation exists. | ||
| 1847 | * If an entry exists in the reserve map, it means the | ||
| 1848 | * reservation has already been consumed. As a result, the | ||
| 1849 | * return value of this routine is the opposite of the | ||
| 1850 | * value returned from reserve map manipulation routines above. | ||
| 1851 | */ | ||
| 1852 | if (ret) | ||
| 1853 | return 0; | ||
| 1854 | else | ||
| 1855 | return 1; | ||
| 1856 | } | ||
| 1819 | else | 1857 | else |
| 1820 | return ret < 0 ? ret : 0; | 1858 | return ret < 0 ? ret : 0; |
| 1821 | } | 1859 | } |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index 18b6a2b8d183..28439acda6ec 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c | |||
| @@ -763,8 +763,8 @@ static int kasan_mem_notifier(struct notifier_block *nb, | |||
| 763 | 763 | ||
| 764 | static int __init kasan_memhotplug_init(void) | 764 | static int __init kasan_memhotplug_init(void) |
| 765 | { | 765 | { |
| 766 | pr_err("WARNING: KASAN doesn't support memory hot-add\n"); | 766 | pr_info("WARNING: KASAN doesn't support memory hot-add\n"); |
| 767 | pr_err("Memory hot-add will be disabled\n"); | 767 | pr_info("Memory hot-add will be disabled\n"); |
| 768 | 768 | ||
| 769 | hotplug_memory_notifier(kasan_mem_notifier, 0); | 769 | hotplug_memory_notifier(kasan_mem_notifier, 0); |
| 770 | 770 | ||
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 58c69c94402a..75e74408cc8f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -1608,7 +1608,7 @@ static void memcg_oom_recover(struct mem_cgroup *memcg) | |||
| 1608 | 1608 | ||
| 1609 | static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) | 1609 | static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order) |
| 1610 | { | 1610 | { |
| 1611 | if (!current->memcg_may_oom || current->memcg_in_oom) | 1611 | if (!current->memcg_may_oom) |
| 1612 | return; | 1612 | return; |
| 1613 | /* | 1613 | /* |
| 1614 | * We are in the middle of the charge context here, so we | 1614 | * We are in the middle of the charge context here, so we |
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b9956fdee8f5..e2481949494c 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
| @@ -373,8 +373,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc) | |||
| 373 | struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); | 373 | struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc); |
| 374 | unsigned long bytes = vm_dirty_bytes; | 374 | unsigned long bytes = vm_dirty_bytes; |
| 375 | unsigned long bg_bytes = dirty_background_bytes; | 375 | unsigned long bg_bytes = dirty_background_bytes; |
| 376 | unsigned long ratio = vm_dirty_ratio; | 376 | /* convert ratios to per-PAGE_SIZE for higher precision */ |
| 377 | unsigned long bg_ratio = dirty_background_ratio; | 377 | unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100; |
| 378 | unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100; | ||
| 378 | unsigned long thresh; | 379 | unsigned long thresh; |
| 379 | unsigned long bg_thresh; | 380 | unsigned long bg_thresh; |
| 380 | struct task_struct *tsk; | 381 | struct task_struct *tsk; |
| @@ -386,26 +387,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc) | |||
| 386 | /* | 387 | /* |
| 387 | * The byte settings can't be applied directly to memcg | 388 | * The byte settings can't be applied directly to memcg |
| 388 | * domains. Convert them to ratios by scaling against | 389 | * domains. Convert them to ratios by scaling against |
| 389 | * globally available memory. | 390 | * globally available memory. As the ratios are in |
| 391 | * per-PAGE_SIZE, they can be obtained by dividing bytes by | ||
| 392 | * number of pages. | ||
| 390 | */ | 393 | */ |
| 391 | if (bytes) | 394 | if (bytes) |
| 392 | ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 / | 395 | ratio = min(DIV_ROUND_UP(bytes, global_avail), |
| 393 | global_avail, 100UL); | 396 | PAGE_SIZE); |
| 394 | if (bg_bytes) | 397 | if (bg_bytes) |
| 395 | bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 / | 398 | bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail), |
| 396 | global_avail, 100UL); | 399 | PAGE_SIZE); |
| 397 | bytes = bg_bytes = 0; | 400 | bytes = bg_bytes = 0; |
| 398 | } | 401 | } |
| 399 | 402 | ||
| 400 | if (bytes) | 403 | if (bytes) |
| 401 | thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); | 404 | thresh = DIV_ROUND_UP(bytes, PAGE_SIZE); |
| 402 | else | 405 | else |
| 403 | thresh = (ratio * available_memory) / 100; | 406 | thresh = (ratio * available_memory) / PAGE_SIZE; |
| 404 | 407 | ||
| 405 | if (bg_bytes) | 408 | if (bg_bytes) |
| 406 | bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); | 409 | bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE); |
| 407 | else | 410 | else |
| 408 | bg_thresh = (bg_ratio * available_memory) / 100; | 411 | bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE; |
| 409 | 412 | ||
| 410 | if (bg_thresh >= thresh) | 413 | if (bg_thresh >= thresh) |
| 411 | bg_thresh = thresh / 2; | 414 | bg_thresh = thresh / 2; |
diff --git a/mm/percpu.c b/mm/percpu.c index 0c59684f1ff2..9903830aaebb 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
| @@ -112,7 +112,7 @@ struct pcpu_chunk { | |||
| 112 | int map_used; /* # of map entries used before the sentry */ | 112 | int map_used; /* # of map entries used before the sentry */ |
| 113 | int map_alloc; /* # of map entries allocated */ | 113 | int map_alloc; /* # of map entries allocated */ |
| 114 | int *map; /* allocation map */ | 114 | int *map; /* allocation map */ |
| 115 | struct work_struct map_extend_work;/* async ->map[] extension */ | 115 | struct list_head map_extend_list;/* on pcpu_map_extend_chunks */ |
| 116 | 116 | ||
| 117 | void *data; /* chunk data */ | 117 | void *data; /* chunk data */ |
| 118 | int first_free; /* no free below this */ | 118 | int first_free; /* no free below this */ |
| @@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk; | |||
| 162 | static int pcpu_reserved_chunk_limit; | 162 | static int pcpu_reserved_chunk_limit; |
| 163 | 163 | ||
| 164 | static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ | 164 | static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */ |
| 165 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */ | 165 | static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */ |
| 166 | 166 | ||
| 167 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ | 167 | static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ |
| 168 | 168 | ||
| 169 | /* chunks which need their map areas extended, protected by pcpu_lock */ | ||
| 170 | static LIST_HEAD(pcpu_map_extend_chunks); | ||
| 171 | |||
| 169 | /* | 172 | /* |
| 170 | * The number of empty populated pages, protected by pcpu_lock. The | 173 | * The number of empty populated pages, protected by pcpu_lock. The |
| 171 | * reserved chunk doesn't contribute to the count. | 174 | * reserved chunk doesn't contribute to the count. |
| @@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) | |||
| 395 | { | 398 | { |
| 396 | int margin, new_alloc; | 399 | int margin, new_alloc; |
| 397 | 400 | ||
| 401 | lockdep_assert_held(&pcpu_lock); | ||
| 402 | |||
| 398 | if (is_atomic) { | 403 | if (is_atomic) { |
| 399 | margin = 3; | 404 | margin = 3; |
| 400 | 405 | ||
| 401 | if (chunk->map_alloc < | 406 | if (chunk->map_alloc < |
| 402 | chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW && | 407 | chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) { |
| 403 | pcpu_async_enabled) | 408 | if (list_empty(&chunk->map_extend_list)) { |
| 404 | schedule_work(&chunk->map_extend_work); | 409 | list_add_tail(&chunk->map_extend_list, |
| 410 | &pcpu_map_extend_chunks); | ||
| 411 | pcpu_schedule_balance_work(); | ||
| 412 | } | ||
| 413 | } | ||
| 405 | } else { | 414 | } else { |
| 406 | margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; | 415 | margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; |
| 407 | } | 416 | } |
| @@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) | |||
| 435 | size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); | 444 | size_t old_size = 0, new_size = new_alloc * sizeof(new[0]); |
| 436 | unsigned long flags; | 445 | unsigned long flags; |
| 437 | 446 | ||
| 447 | lockdep_assert_held(&pcpu_alloc_mutex); | ||
| 448 | |||
| 438 | new = pcpu_mem_zalloc(new_size); | 449 | new = pcpu_mem_zalloc(new_size); |
| 439 | if (!new) | 450 | if (!new) |
| 440 | return -ENOMEM; | 451 | return -ENOMEM; |
| @@ -467,20 +478,6 @@ out_unlock: | |||
| 467 | return 0; | 478 | return 0; |
| 468 | } | 479 | } |
| 469 | 480 | ||
| 470 | static void pcpu_map_extend_workfn(struct work_struct *work) | ||
| 471 | { | ||
| 472 | struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk, | ||
| 473 | map_extend_work); | ||
| 474 | int new_alloc; | ||
| 475 | |||
| 476 | spin_lock_irq(&pcpu_lock); | ||
| 477 | new_alloc = pcpu_need_to_extend(chunk, false); | ||
| 478 | spin_unlock_irq(&pcpu_lock); | ||
| 479 | |||
| 480 | if (new_alloc) | ||
| 481 | pcpu_extend_area_map(chunk, new_alloc); | ||
| 482 | } | ||
| 483 | |||
| 484 | /** | 481 | /** |
| 485 | * pcpu_fit_in_area - try to fit the requested allocation in a candidate area | 482 | * pcpu_fit_in_area - try to fit the requested allocation in a candidate area |
| 486 | * @chunk: chunk the candidate area belongs to | 483 | * @chunk: chunk the candidate area belongs to |
| @@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) | |||
| 740 | chunk->map_used = 1; | 737 | chunk->map_used = 1; |
| 741 | 738 | ||
| 742 | INIT_LIST_HEAD(&chunk->list); | 739 | INIT_LIST_HEAD(&chunk->list); |
| 743 | INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn); | 740 | INIT_LIST_HEAD(&chunk->map_extend_list); |
| 744 | chunk->free_size = pcpu_unit_size; | 741 | chunk->free_size = pcpu_unit_size; |
| 745 | chunk->contig_hint = pcpu_unit_size; | 742 | chunk->contig_hint = pcpu_unit_size; |
| 746 | 743 | ||
| @@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, | |||
| 895 | return NULL; | 892 | return NULL; |
| 896 | } | 893 | } |
| 897 | 894 | ||
| 895 | if (!is_atomic) | ||
| 896 | mutex_lock(&pcpu_alloc_mutex); | ||
| 897 | |||
| 898 | spin_lock_irqsave(&pcpu_lock, flags); | 898 | spin_lock_irqsave(&pcpu_lock, flags); |
| 899 | 899 | ||
| 900 | /* serve reserved allocations from the reserved chunk if available */ | 900 | /* serve reserved allocations from the reserved chunk if available */ |
| @@ -967,12 +967,9 @@ restart: | |||
| 967 | if (is_atomic) | 967 | if (is_atomic) |
| 968 | goto fail; | 968 | goto fail; |
| 969 | 969 | ||
| 970 | mutex_lock(&pcpu_alloc_mutex); | ||
| 971 | |||
| 972 | if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { | 970 | if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) { |
| 973 | chunk = pcpu_create_chunk(); | 971 | chunk = pcpu_create_chunk(); |
| 974 | if (!chunk) { | 972 | if (!chunk) { |
| 975 | mutex_unlock(&pcpu_alloc_mutex); | ||
| 976 | err = "failed to allocate new chunk"; | 973 | err = "failed to allocate new chunk"; |
| 977 | goto fail; | 974 | goto fail; |
| 978 | } | 975 | } |
| @@ -983,7 +980,6 @@ restart: | |||
| 983 | spin_lock_irqsave(&pcpu_lock, flags); | 980 | spin_lock_irqsave(&pcpu_lock, flags); |
| 984 | } | 981 | } |
| 985 | 982 | ||
| 986 | mutex_unlock(&pcpu_alloc_mutex); | ||
| 987 | goto restart; | 983 | goto restart; |
| 988 | 984 | ||
| 989 | area_found: | 985 | area_found: |
| @@ -993,8 +989,6 @@ area_found: | |||
| 993 | if (!is_atomic) { | 989 | if (!is_atomic) { |
| 994 | int page_start, page_end, rs, re; | 990 | int page_start, page_end, rs, re; |
| 995 | 991 | ||
| 996 | mutex_lock(&pcpu_alloc_mutex); | ||
| 997 | |||
| 998 | page_start = PFN_DOWN(off); | 992 | page_start = PFN_DOWN(off); |
| 999 | page_end = PFN_UP(off + size); | 993 | page_end = PFN_UP(off + size); |
| 1000 | 994 | ||
| @@ -1005,7 +999,6 @@ area_found: | |||
| 1005 | 999 | ||
| 1006 | spin_lock_irqsave(&pcpu_lock, flags); | 1000 | spin_lock_irqsave(&pcpu_lock, flags); |
| 1007 | if (ret) { | 1001 | if (ret) { |
| 1008 | mutex_unlock(&pcpu_alloc_mutex); | ||
| 1009 | pcpu_free_area(chunk, off, &occ_pages); | 1002 | pcpu_free_area(chunk, off, &occ_pages); |
| 1010 | err = "failed to populate"; | 1003 | err = "failed to populate"; |
| 1011 | goto fail_unlock; | 1004 | goto fail_unlock; |
| @@ -1045,6 +1038,8 @@ fail: | |||
| 1045 | /* see the flag handling in pcpu_blance_workfn() */ | 1038 | /* see the flag handling in pcpu_blance_workfn() */ |
| 1046 | pcpu_atomic_alloc_failed = true; | 1039 | pcpu_atomic_alloc_failed = true; |
| 1047 | pcpu_schedule_balance_work(); | 1040 | pcpu_schedule_balance_work(); |
| 1041 | } else { | ||
| 1042 | mutex_unlock(&pcpu_alloc_mutex); | ||
| 1048 | } | 1043 | } |
| 1049 | return NULL; | 1044 | return NULL; |
| 1050 | } | 1045 | } |
| @@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work) | |||
| 1129 | if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) | 1124 | if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) |
| 1130 | continue; | 1125 | continue; |
| 1131 | 1126 | ||
| 1127 | list_del_init(&chunk->map_extend_list); | ||
| 1132 | list_move(&chunk->list, &to_free); | 1128 | list_move(&chunk->list, &to_free); |
| 1133 | } | 1129 | } |
| 1134 | 1130 | ||
| @@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work) | |||
| 1146 | pcpu_destroy_chunk(chunk); | 1142 | pcpu_destroy_chunk(chunk); |
| 1147 | } | 1143 | } |
| 1148 | 1144 | ||
| 1145 | /* service chunks which requested async area map extension */ | ||
| 1146 | do { | ||
| 1147 | int new_alloc = 0; | ||
| 1148 | |||
| 1149 | spin_lock_irq(&pcpu_lock); | ||
| 1150 | |||
| 1151 | chunk = list_first_entry_or_null(&pcpu_map_extend_chunks, | ||
| 1152 | struct pcpu_chunk, map_extend_list); | ||
| 1153 | if (chunk) { | ||
| 1154 | list_del_init(&chunk->map_extend_list); | ||
| 1155 | new_alloc = pcpu_need_to_extend(chunk, false); | ||
| 1156 | } | ||
| 1157 | |||
| 1158 | spin_unlock_irq(&pcpu_lock); | ||
| 1159 | |||
| 1160 | if (new_alloc) | ||
| 1161 | pcpu_extend_area_map(chunk, new_alloc); | ||
| 1162 | } while (chunk); | ||
| 1163 | |||
| 1149 | /* | 1164 | /* |
| 1150 | * Ensure there are certain number of free populated pages for | 1165 | * Ensure there are certain number of free populated pages for |
| 1151 | * atomic allocs. Fill up from the most packed so that atomic | 1166 | * atomic allocs. Fill up from the most packed so that atomic |
| @@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
| 1644 | */ | 1659 | */ |
| 1645 | schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); | 1660 | schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); |
| 1646 | INIT_LIST_HEAD(&schunk->list); | 1661 | INIT_LIST_HEAD(&schunk->list); |
| 1647 | INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn); | 1662 | INIT_LIST_HEAD(&schunk->map_extend_list); |
| 1648 | schunk->base_addr = base_addr; | 1663 | schunk->base_addr = base_addr; |
| 1649 | schunk->map = smap; | 1664 | schunk->map = smap; |
| 1650 | schunk->map_alloc = ARRAY_SIZE(smap); | 1665 | schunk->map_alloc = ARRAY_SIZE(smap); |
| @@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
| 1673 | if (dyn_size) { | 1688 | if (dyn_size) { |
| 1674 | dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); | 1689 | dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); |
| 1675 | INIT_LIST_HEAD(&dchunk->list); | 1690 | INIT_LIST_HEAD(&dchunk->list); |
| 1676 | INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn); | 1691 | INIT_LIST_HEAD(&dchunk->map_extend_list); |
| 1677 | dchunk->base_addr = base_addr; | 1692 | dchunk->base_addr = base_addr; |
| 1678 | dchunk->map = dmap; | 1693 | dchunk->map = dmap; |
| 1679 | dchunk->map_alloc = ARRAY_SIZE(dmap); | 1694 | dchunk->map_alloc = ARRAY_SIZE(dmap); |
| @@ -667,6 +667,24 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy) | |||
| 667 | 667 | ||
| 668 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); | 668 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); |
| 669 | 669 | ||
| 670 | /* | ||
| 671 | * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM | ||
| 672 | * workqueue, aiding in getting memory freed. | ||
| 673 | */ | ||
| 674 | static struct workqueue_struct *lru_add_drain_wq; | ||
| 675 | |||
| 676 | static int __init lru_init(void) | ||
| 677 | { | ||
| 678 | lru_add_drain_wq = alloc_workqueue("lru-add-drain", WQ_MEM_RECLAIM, 0); | ||
| 679 | |||
| 680 | if (WARN(!lru_add_drain_wq, | ||
| 681 | "Failed to create workqueue lru_add_drain_wq")) | ||
| 682 | return -ENOMEM; | ||
| 683 | |||
| 684 | return 0; | ||
| 685 | } | ||
| 686 | early_initcall(lru_init); | ||
| 687 | |||
| 670 | void lru_add_drain_all(void) | 688 | void lru_add_drain_all(void) |
| 671 | { | 689 | { |
| 672 | static DEFINE_MUTEX(lock); | 690 | static DEFINE_MUTEX(lock); |
| @@ -686,7 +704,7 @@ void lru_add_drain_all(void) | |||
| 686 | pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || | 704 | pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || |
| 687 | need_activate_page_drain(cpu)) { | 705 | need_activate_page_drain(cpu)) { |
| 688 | INIT_WORK(work, lru_add_drain_per_cpu); | 706 | INIT_WORK(work, lru_add_drain_per_cpu); |
| 689 | schedule_work_on(cpu, work); | 707 | queue_work_on(cpu, lru_add_drain_wq, work); |
| 690 | cpumask_set_cpu(cpu, &has_work); | 708 | cpumask_set_cpu(cpu, &has_work); |
| 691 | } | 709 | } |
| 692 | } | 710 | } |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 0d457e7db8d6..c99463ac02fb 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
| @@ -252,7 +252,10 @@ static inline void free_swap_cache(struct page *page) | |||
| 252 | void free_page_and_swap_cache(struct page *page) | 252 | void free_page_and_swap_cache(struct page *page) |
| 253 | { | 253 | { |
| 254 | free_swap_cache(page); | 254 | free_swap_cache(page); |
| 255 | put_page(page); | 255 | if (is_huge_zero_page(page)) |
| 256 | put_huge_zero_page(); | ||
| 257 | else | ||
| 258 | put_page(page); | ||
| 256 | } | 259 | } |
| 257 | 260 | ||
| 258 | /* | 261 | /* |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index dcea4f4c62b3..c18080ad4085 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
| @@ -279,6 +279,8 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr) | |||
| 279 | * change from under us. | 279 | * change from under us. |
| 280 | */ | 280 | */ |
| 281 | list_for_each_entry(v, &vg->vlan_list, vlist) { | 281 | list_for_each_entry(v, &vg->vlan_list, vlist) { |
| 282 | if (!br_vlan_should_use(v)) | ||
| 283 | continue; | ||
| 282 | f = __br_fdb_get(br, br->dev->dev_addr, v->vid); | 284 | f = __br_fdb_get(br, br->dev->dev_addr, v->vid); |
| 283 | if (f && f->is_local && !f->dst) | 285 | if (f && f->is_local && !f->dst) |
| 284 | fdb_delete_local(br, NULL, f); | 286 | fdb_delete_local(br, NULL, f); |
diff --git a/net/compat.c b/net/compat.c index 5cfd26a0006f..1cd2ec046164 100644 --- a/net/compat.c +++ b/net/compat.c | |||
| @@ -309,8 +309,8 @@ void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm) | |||
| 309 | __scm_destroy(scm); | 309 | __scm_destroy(scm); |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | static int do_set_attach_filter(struct socket *sock, int level, int optname, | 312 | /* allocate a 64-bit sock_fprog on the user stack for duration of syscall. */ |
| 313 | char __user *optval, unsigned int optlen) | 313 | struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval) |
| 314 | { | 314 | { |
| 315 | struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; | 315 | struct compat_sock_fprog __user *fprog32 = (struct compat_sock_fprog __user *)optval; |
| 316 | struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); | 316 | struct sock_fprog __user *kfprog = compat_alloc_user_space(sizeof(struct sock_fprog)); |
| @@ -323,6 +323,19 @@ static int do_set_attach_filter(struct socket *sock, int level, int optname, | |||
| 323 | __get_user(ptr, &fprog32->filter) || | 323 | __get_user(ptr, &fprog32->filter) || |
| 324 | __put_user(len, &kfprog->len) || | 324 | __put_user(len, &kfprog->len) || |
| 325 | __put_user(compat_ptr(ptr), &kfprog->filter)) | 325 | __put_user(compat_ptr(ptr), &kfprog->filter)) |
| 326 | return NULL; | ||
| 327 | |||
| 328 | return kfprog; | ||
| 329 | } | ||
| 330 | EXPORT_SYMBOL_GPL(get_compat_bpf_fprog); | ||
| 331 | |||
| 332 | static int do_set_attach_filter(struct socket *sock, int level, int optname, | ||
| 333 | char __user *optval, unsigned int optlen) | ||
| 334 | { | ||
| 335 | struct sock_fprog __user *kfprog; | ||
| 336 | |||
| 337 | kfprog = get_compat_bpf_fprog(optval); | ||
| 338 | if (!kfprog) | ||
| 326 | return -EFAULT; | 339 | return -EFAULT; |
| 327 | 340 | ||
| 328 | return sock_setsockopt(sock, level, optname, (char __user *)kfprog, | 341 | return sock_setsockopt(sock, level, optname, (char __user *)kfprog, |
| @@ -354,7 +367,8 @@ static int do_set_sock_timeout(struct socket *sock, int level, | |||
| 354 | static int compat_sock_setsockopt(struct socket *sock, int level, int optname, | 367 | static int compat_sock_setsockopt(struct socket *sock, int level, int optname, |
| 355 | char __user *optval, unsigned int optlen) | 368 | char __user *optval, unsigned int optlen) |
| 356 | { | 369 | { |
| 357 | if (optname == SO_ATTACH_FILTER) | 370 | if (optname == SO_ATTACH_FILTER || |
| 371 | optname == SO_ATTACH_REUSEPORT_CBPF) | ||
| 358 | return do_set_attach_filter(sock, level, optname, | 372 | return do_set_attach_filter(sock, level, optname, |
| 359 | optval, optlen); | 373 | optval, optlen); |
| 360 | if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) | 374 | if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO) |
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index f96ee8b9478d..be873e4e3125 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c | |||
| @@ -47,6 +47,7 @@ nla_put_failure: | |||
| 47 | * @xstats_type: TLV type for backward compatibility xstats TLV | 47 | * @xstats_type: TLV type for backward compatibility xstats TLV |
| 48 | * @lock: statistics lock | 48 | * @lock: statistics lock |
| 49 | * @d: dumping handle | 49 | * @d: dumping handle |
| 50 | * @padattr: padding attribute | ||
| 50 | * | 51 | * |
| 51 | * Initializes the dumping handle, grabs the statistic lock and appends | 52 | * Initializes the dumping handle, grabs the statistic lock and appends |
| 52 | * an empty TLV header to the socket buffer for use a container for all | 53 | * an empty TLV header to the socket buffer for use a container for all |
| @@ -87,6 +88,7 @@ EXPORT_SYMBOL(gnet_stats_start_copy_compat); | |||
| 87 | * @type: TLV type for top level statistic TLV | 88 | * @type: TLV type for top level statistic TLV |
| 88 | * @lock: statistics lock | 89 | * @lock: statistics lock |
| 89 | * @d: dumping handle | 90 | * @d: dumping handle |
| 91 | * @padattr: padding attribute | ||
| 90 | * | 92 | * |
| 91 | * Initializes the dumping handle, grabs the statistic lock and appends | 93 | * Initializes the dumping handle, grabs the statistic lock and appends |
| 92 | * an empty TLV header to the socket buffer for use a container for all | 94 | * an empty TLV header to the socket buffer for use a container for all |
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 2b3f76fe65f4..7a0b616557ab 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/jiffies.h> | 24 | #include <linux/jiffies.h> |
| 25 | #include <linux/pm_runtime.h> | 25 | #include <linux/pm_runtime.h> |
| 26 | #include <linux/of.h> | 26 | #include <linux/of.h> |
| 27 | #include <linux/of_net.h> | ||
| 27 | 28 | ||
| 28 | #include "net-sysfs.h" | 29 | #include "net-sysfs.h" |
| 29 | 30 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d56c0559b477..0ff31d97d485 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -1618,12 +1618,12 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 1618 | } | 1618 | } |
| 1619 | } | 1619 | } |
| 1620 | 1620 | ||
| 1621 | if (rcu_access_pointer(sk->sk_filter)) { | 1621 | if (rcu_access_pointer(sk->sk_filter) && |
| 1622 | if (udp_lib_checksum_complete(skb)) | 1622 | udp_lib_checksum_complete(skb)) |
| 1623 | goto csum_error; | 1623 | goto csum_error; |
| 1624 | if (sk_filter(sk, skb)) | 1624 | |
| 1625 | goto drop; | 1625 | if (sk_filter(sk, skb)) |
| 1626 | } | 1626 | goto drop; |
| 1627 | 1627 | ||
| 1628 | udp_csum_pull_header(skb); | 1628 | udp_csum_pull_header(skb); |
| 1629 | if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { | 1629 | if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index f4ac2842d4d9..fdc9de276ab1 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -1256,6 +1256,8 @@ static int ip6gre_tap_init(struct net_device *dev) | |||
| 1256 | if (ret) | 1256 | if (ret) |
| 1257 | return ret; | 1257 | return ret; |
| 1258 | 1258 | ||
| 1259 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | ||
| 1260 | |||
| 1259 | tunnel = netdev_priv(dev); | 1261 | tunnel = netdev_priv(dev); |
| 1260 | 1262 | ||
| 1261 | ip6gre_tnl_link_config(tunnel, 1); | 1263 | ip6gre_tnl_link_config(tunnel, 1); |
| @@ -1289,6 +1291,7 @@ static void ip6gre_tap_setup(struct net_device *dev) | |||
| 1289 | 1291 | ||
| 1290 | dev->features |= NETIF_F_NETNS_LOCAL; | 1292 | dev->features |= NETIF_F_NETNS_LOCAL; |
| 1291 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1293 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
| 1294 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | ||
| 1292 | } | 1295 | } |
| 1293 | 1296 | ||
| 1294 | static bool ip6gre_netlink_encap_parms(struct nlattr *data[], | 1297 | static bool ip6gre_netlink_encap_parms(struct nlattr *data[], |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index cbf127ae7c67..635b8d340cdb 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
| @@ -1071,17 +1071,12 @@ struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, | |||
| 1071 | const struct in6_addr *final_dst) | 1071 | const struct in6_addr *final_dst) |
| 1072 | { | 1072 | { |
| 1073 | struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); | 1073 | struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie); |
| 1074 | int err; | ||
| 1075 | 1074 | ||
| 1076 | dst = ip6_sk_dst_check(sk, dst, fl6); | 1075 | dst = ip6_sk_dst_check(sk, dst, fl6); |
| 1076 | if (!dst) | ||
| 1077 | dst = ip6_dst_lookup_flow(sk, fl6, final_dst); | ||
| 1077 | 1078 | ||
| 1078 | err = ip6_dst_lookup_tail(sock_net(sk), sk, &dst, fl6); | 1079 | return dst; |
| 1079 | if (err) | ||
| 1080 | return ERR_PTR(err); | ||
| 1081 | if (final_dst) | ||
| 1082 | fl6->daddr = *final_dst; | ||
| 1083 | |||
| 1084 | return xfrm_lookup_route(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0); | ||
| 1085 | } | 1080 | } |
| 1086 | EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); | 1081 | EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow); |
| 1087 | 1082 | ||
diff --git a/net/ipv6/netfilter/nf_dup_ipv6.c b/net/ipv6/netfilter/nf_dup_ipv6.c index 6989c70ae29f..4a84b5ad9ecb 100644 --- a/net/ipv6/netfilter/nf_dup_ipv6.c +++ b/net/ipv6/netfilter/nf_dup_ipv6.c | |||
| @@ -33,6 +33,7 @@ static bool nf_dup_ipv6_route(struct net *net, struct sk_buff *skb, | |||
| 33 | fl6.daddr = *gw; | 33 | fl6.daddr = *gw; |
| 34 | fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) | | 34 | fl6.flowlabel = (__force __be32)(((iph->flow_lbl[0] & 0xF) << 16) | |
| 35 | (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]); | 35 | (iph->flow_lbl[1] << 8) | iph->flow_lbl[2]); |
| 36 | fl6.flowi6_flags = FLOWI_FLAG_KNOWN_NH; | ||
| 36 | dst = ip6_route_output(net, NULL, &fl6); | 37 | dst = ip6_route_output(net, NULL, &fl6); |
| 37 | if (dst->error) { | 38 | if (dst->error) { |
| 38 | dst_release(dst); | 39 | dst_release(dst); |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 79e33e02f11a..f36c2d076fce 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -1721,7 +1721,9 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) | |||
| 1721 | destp = ntohs(inet->inet_dport); | 1721 | destp = ntohs(inet->inet_dport); |
| 1722 | srcp = ntohs(inet->inet_sport); | 1722 | srcp = ntohs(inet->inet_sport); |
| 1723 | 1723 | ||
| 1724 | if (icsk->icsk_pending == ICSK_TIME_RETRANS) { | 1724 | if (icsk->icsk_pending == ICSK_TIME_RETRANS || |
| 1725 | icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || | ||
| 1726 | icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { | ||
| 1725 | timer_active = 1; | 1727 | timer_active = 1; |
| 1726 | timer_expires = icsk->icsk_timeout; | 1728 | timer_expires = icsk->icsk_timeout; |
| 1727 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { | 1729 | } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 2da1896af934..f421c9f23c5b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -653,12 +653,12 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
| 653 | } | 653 | } |
| 654 | } | 654 | } |
| 655 | 655 | ||
| 656 | if (rcu_access_pointer(sk->sk_filter)) { | 656 | if (rcu_access_pointer(sk->sk_filter) && |
| 657 | if (udp_lib_checksum_complete(skb)) | 657 | udp_lib_checksum_complete(skb)) |
| 658 | goto csum_error; | 658 | goto csum_error; |
| 659 | if (sk_filter(sk, skb)) | 659 | |
| 660 | goto drop; | 660 | if (sk_filter(sk, skb)) |
| 661 | } | 661 | goto drop; |
| 662 | 662 | ||
| 663 | udp_csum_pull_header(skb); | 663 | udp_csum_pull_header(skb); |
| 664 | if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { | 664 | if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 6edfa9980314..1e40dacaa137 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -1581,7 +1581,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
| 1581 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ | 1581 | /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */ |
| 1582 | tunnel->encap = encap; | 1582 | tunnel->encap = encap; |
| 1583 | if (encap == L2TP_ENCAPTYPE_UDP) { | 1583 | if (encap == L2TP_ENCAPTYPE_UDP) { |
| 1584 | struct udp_tunnel_sock_cfg udp_cfg; | 1584 | struct udp_tunnel_sock_cfg udp_cfg = { }; |
| 1585 | 1585 | ||
| 1586 | udp_cfg.sk_user_data = tunnel; | 1586 | udp_cfg.sk_user_data = tunnel; |
| 1587 | udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP; | 1587 | udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP; |
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 4c6404e1ad6e..21b1fdf5d01d 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c | |||
| @@ -161,6 +161,10 @@ void mesh_sta_cleanup(struct sta_info *sta) | |||
| 161 | del_timer_sync(&sta->mesh->plink_timer); | 161 | del_timer_sync(&sta->mesh->plink_timer); |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | /* make sure no readers can access nexthop sta from here on */ | ||
| 165 | mesh_path_flush_by_nexthop(sta); | ||
| 166 | synchronize_net(); | ||
| 167 | |||
| 164 | if (changed) | 168 | if (changed) |
| 165 | ieee80211_mbss_info_change_notify(sdata, changed); | 169 | ieee80211_mbss_info_change_notify(sdata, changed); |
| 166 | } | 170 | } |
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index c8b8ccc370eb..78b0ef32dddd 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
| @@ -280,7 +280,7 @@ struct ieee80211_fast_tx { | |||
| 280 | u8 sa_offs, da_offs, pn_offs; | 280 | u8 sa_offs, da_offs, pn_offs; |
| 281 | u8 band; | 281 | u8 band; |
| 282 | u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV + | 282 | u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV + |
| 283 | sizeof(rfc1042_header)]; | 283 | sizeof(rfc1042_header)] __aligned(2); |
| 284 | 284 | ||
| 285 | struct rcu_head rcu_head; | 285 | struct rcu_head rcu_head; |
| 286 | }; | 286 | }; |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 2cb3c626cd43..096a45103f14 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
| @@ -762,7 +762,7 @@ static int expire_quiescent_template(struct netns_ipvs *ipvs, | |||
| 762 | * If available, return 1, otherwise invalidate this connection | 762 | * If available, return 1, otherwise invalidate this connection |
| 763 | * template and return 0. | 763 | * template and return 0. |
| 764 | */ | 764 | */ |
| 765 | int ip_vs_check_template(struct ip_vs_conn *ct) | 765 | int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest) |
| 766 | { | 766 | { |
| 767 | struct ip_vs_dest *dest = ct->dest; | 767 | struct ip_vs_dest *dest = ct->dest; |
| 768 | struct netns_ipvs *ipvs = ct->ipvs; | 768 | struct netns_ipvs *ipvs = ct->ipvs; |
| @@ -772,7 +772,8 @@ int ip_vs_check_template(struct ip_vs_conn *ct) | |||
| 772 | */ | 772 | */ |
| 773 | if ((dest == NULL) || | 773 | if ((dest == NULL) || |
| 774 | !(dest->flags & IP_VS_DEST_F_AVAILABLE) || | 774 | !(dest->flags & IP_VS_DEST_F_AVAILABLE) || |
| 775 | expire_quiescent_template(ipvs, dest)) { | 775 | expire_quiescent_template(ipvs, dest) || |
| 776 | (cdest && (dest != cdest))) { | ||
| 776 | IP_VS_DBG_BUF(9, "check_template: dest not available for " | 777 | IP_VS_DBG_BUF(9, "check_template: dest not available for " |
| 777 | "protocol %s s:%s:%d v:%s:%d " | 778 | "protocol %s s:%s:%d v:%s:%d " |
| 778 | "-> d:%s:%d\n", | 779 | "-> d:%s:%d\n", |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 1207f20d24e4..2c1b498a7a27 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
| @@ -321,7 +321,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, | |||
| 321 | 321 | ||
| 322 | /* Check if a template already exists */ | 322 | /* Check if a template already exists */ |
| 323 | ct = ip_vs_ct_in_get(¶m); | 323 | ct = ip_vs_ct_in_get(¶m); |
| 324 | if (!ct || !ip_vs_check_template(ct)) { | 324 | if (!ct || !ip_vs_check_template(ct, NULL)) { |
| 325 | struct ip_vs_scheduler *sched; | 325 | struct ip_vs_scheduler *sched; |
| 326 | 326 | ||
| 327 | /* | 327 | /* |
| @@ -1154,7 +1154,8 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, | |||
| 1154 | vport, ¶m) < 0) | 1154 | vport, ¶m) < 0) |
| 1155 | return NULL; | 1155 | return NULL; |
| 1156 | ct = ip_vs_ct_in_get(¶m); | 1156 | ct = ip_vs_ct_in_get(¶m); |
| 1157 | if (!ct) { | 1157 | /* check if template exists and points to the same dest */ |
| 1158 | if (!ct || !ip_vs_check_template(ct, dest)) { | ||
| 1158 | ct = ip_vs_conn_new(¶m, dest->af, daddr, dport, | 1159 | ct = ip_vs_conn_new(¶m, dest->af, daddr, dport, |
| 1159 | IP_VS_CONN_F_TEMPLATE, dest, 0); | 1160 | IP_VS_CONN_F_TEMPLATE, dest, 0); |
| 1160 | if (!ct) { | 1161 | if (!ct) { |
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c index 883c691ec8d0..19efeba02abb 100644 --- a/net/netfilter/nf_conntrack_ftp.c +++ b/net/netfilter/nf_conntrack_ftp.c | |||
| @@ -632,6 +632,7 @@ static int __init nf_conntrack_ftp_init(void) | |||
| 632 | if (ret) { | 632 | if (ret) { |
| 633 | pr_err("failed to register helper for pf: %d port: %d\n", | 633 | pr_err("failed to register helper for pf: %d port: %d\n", |
| 634 | ftp[i][j].tuple.src.l3num, ports[i]); | 634 | ftp[i][j].tuple.src.l3num, ports[i]); |
| 635 | ports_c = i; | ||
| 635 | nf_conntrack_ftp_fini(); | 636 | nf_conntrack_ftp_fini(); |
| 636 | return ret; | 637 | return ret; |
| 637 | } | 638 | } |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index f703adb7e5f7..196cb39649e1 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
| @@ -361,9 +361,10 @@ EXPORT_SYMBOL_GPL(nf_ct_helper_log); | |||
| 361 | 361 | ||
| 362 | int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | 362 | int nf_conntrack_helper_register(struct nf_conntrack_helper *me) |
| 363 | { | 363 | { |
| 364 | int ret = 0; | 364 | struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) }; |
| 365 | struct nf_conntrack_helper *cur; | ||
| 366 | unsigned int h = helper_hash(&me->tuple); | 365 | unsigned int h = helper_hash(&me->tuple); |
| 366 | struct nf_conntrack_helper *cur; | ||
| 367 | int ret = 0; | ||
| 367 | 368 | ||
| 368 | BUG_ON(me->expect_policy == NULL); | 369 | BUG_ON(me->expect_policy == NULL); |
| 369 | BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); | 370 | BUG_ON(me->expect_class_max >= NF_CT_MAX_EXPECT_CLASSES); |
| @@ -371,9 +372,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me) | |||
| 371 | 372 | ||
| 372 | mutex_lock(&nf_ct_helper_mutex); | 373 | mutex_lock(&nf_ct_helper_mutex); |
| 373 | hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { | 374 | hlist_for_each_entry(cur, &nf_ct_helper_hash[h], hnode) { |
| 374 | if (strncmp(cur->name, me->name, NF_CT_HELPER_NAME_LEN) == 0 && | 375 | if (nf_ct_tuple_src_mask_cmp(&cur->tuple, &me->tuple, &mask)) { |
| 375 | cur->tuple.src.l3num == me->tuple.src.l3num && | ||
| 376 | cur->tuple.dst.protonum == me->tuple.dst.protonum) { | ||
| 377 | ret = -EEXIST; | 376 | ret = -EEXIST; |
| 378 | goto out; | 377 | goto out; |
| 379 | } | 378 | } |
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c index 8b6da2719600..f97ac61d2536 100644 --- a/net/netfilter/nf_conntrack_irc.c +++ b/net/netfilter/nf_conntrack_irc.c | |||
| @@ -271,6 +271,7 @@ static int __init nf_conntrack_irc_init(void) | |||
| 271 | if (ret) { | 271 | if (ret) { |
| 272 | pr_err("failed to register helper for pf: %u port: %u\n", | 272 | pr_err("failed to register helper for pf: %u port: %u\n", |
| 273 | irc[i].tuple.src.l3num, ports[i]); | 273 | irc[i].tuple.src.l3num, ports[i]); |
| 274 | ports_c = i; | ||
| 274 | nf_conntrack_irc_fini(); | 275 | nf_conntrack_irc_fini(); |
| 275 | return ret; | 276 | return ret; |
| 276 | } | 277 | } |
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c index 7523a575f6d1..3fcbaab83b3d 100644 --- a/net/netfilter/nf_conntrack_sane.c +++ b/net/netfilter/nf_conntrack_sane.c | |||
| @@ -223,6 +223,7 @@ static int __init nf_conntrack_sane_init(void) | |||
| 223 | if (ret) { | 223 | if (ret) { |
| 224 | pr_err("failed to register helper for pf: %d port: %d\n", | 224 | pr_err("failed to register helper for pf: %d port: %d\n", |
| 225 | sane[i][j].tuple.src.l3num, ports[i]); | 225 | sane[i][j].tuple.src.l3num, ports[i]); |
| 226 | ports_c = i; | ||
| 226 | nf_conntrack_sane_fini(); | 227 | nf_conntrack_sane_fini(); |
| 227 | return ret; | 228 | return ret; |
| 228 | } | 229 | } |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 3e06402739e0..f72ba5587588 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
| @@ -1669,6 +1669,7 @@ static int __init nf_conntrack_sip_init(void) | |||
| 1669 | if (ret) { | 1669 | if (ret) { |
| 1670 | pr_err("failed to register helper for pf: %u port: %u\n", | 1670 | pr_err("failed to register helper for pf: %u port: %u\n", |
| 1671 | sip[i][j].tuple.src.l3num, ports[i]); | 1671 | sip[i][j].tuple.src.l3num, ports[i]); |
| 1672 | ports_c = i; | ||
| 1672 | nf_conntrack_sip_fini(); | 1673 | nf_conntrack_sip_fini(); |
| 1673 | return ret; | 1674 | return ret; |
| 1674 | } | 1675 | } |
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index f87e84ebcec3..c026c472ea80 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c | |||
| @@ -487,8 +487,6 @@ static struct ctl_table nf_ct_sysctl_table[] = { | |||
| 487 | { } | 487 | { } |
| 488 | }; | 488 | }; |
| 489 | 489 | ||
| 490 | #define NET_NF_CONNTRACK_MAX 2089 | ||
| 491 | |||
| 492 | static struct ctl_table nf_ct_netfilter_table[] = { | 490 | static struct ctl_table nf_ct_netfilter_table[] = { |
| 493 | { | 491 | { |
| 494 | .procname = "nf_conntrack_max", | 492 | .procname = "nf_conntrack_max", |
diff --git a/net/netfilter/nf_conntrack_tftp.c b/net/netfilter/nf_conntrack_tftp.c index 36f964066461..2e65b5430fba 100644 --- a/net/netfilter/nf_conntrack_tftp.c +++ b/net/netfilter/nf_conntrack_tftp.c | |||
| @@ -142,6 +142,7 @@ static int __init nf_conntrack_tftp_init(void) | |||
| 142 | if (ret) { | 142 | if (ret) { |
| 143 | pr_err("failed to register helper for pf: %u port: %u\n", | 143 | pr_err("failed to register helper for pf: %u port: %u\n", |
| 144 | tftp[i][j].tuple.src.l3num, ports[i]); | 144 | tftp[i][j].tuple.src.l3num, ports[i]); |
| 145 | ports_c = i; | ||
| 145 | nf_conntrack_tftp_fini(); | 146 | nf_conntrack_tftp_fini(); |
| 146 | return ret; | 147 | return ret; |
| 147 | } | 148 | } |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index 5baa8e24e6ac..b19ad20a705c 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
| @@ -26,23 +26,21 @@ | |||
| 26 | * Once the queue is registered it must reinject all packets it | 26 | * Once the queue is registered it must reinject all packets it |
| 27 | * receives, no matter what. | 27 | * receives, no matter what. |
| 28 | */ | 28 | */ |
| 29 | static const struct nf_queue_handler __rcu *queue_handler __read_mostly; | ||
| 30 | 29 | ||
| 31 | /* return EBUSY when somebody else is registered, return EEXIST if the | 30 | /* return EBUSY when somebody else is registered, return EEXIST if the |
| 32 | * same handler is registered, return 0 in case of success. */ | 31 | * same handler is registered, return 0 in case of success. */ |
| 33 | void nf_register_queue_handler(const struct nf_queue_handler *qh) | 32 | void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh) |
| 34 | { | 33 | { |
| 35 | /* should never happen, we only have one queueing backend in kernel */ | 34 | /* should never happen, we only have one queueing backend in kernel */ |
| 36 | WARN_ON(rcu_access_pointer(queue_handler)); | 35 | WARN_ON(rcu_access_pointer(net->nf.queue_handler)); |
| 37 | rcu_assign_pointer(queue_handler, qh); | 36 | rcu_assign_pointer(net->nf.queue_handler, qh); |
| 38 | } | 37 | } |
| 39 | EXPORT_SYMBOL(nf_register_queue_handler); | 38 | EXPORT_SYMBOL(nf_register_queue_handler); |
| 40 | 39 | ||
| 41 | /* The caller must flush their queue before this */ | 40 | /* The caller must flush their queue before this */ |
| 42 | void nf_unregister_queue_handler(void) | 41 | void nf_unregister_queue_handler(struct net *net) |
| 43 | { | 42 | { |
| 44 | RCU_INIT_POINTER(queue_handler, NULL); | 43 | RCU_INIT_POINTER(net->nf.queue_handler, NULL); |
| 45 | synchronize_rcu(); | ||
| 46 | } | 44 | } |
| 47 | EXPORT_SYMBOL(nf_unregister_queue_handler); | 45 | EXPORT_SYMBOL(nf_unregister_queue_handler); |
| 48 | 46 | ||
| @@ -103,7 +101,7 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops) | |||
| 103 | const struct nf_queue_handler *qh; | 101 | const struct nf_queue_handler *qh; |
| 104 | 102 | ||
| 105 | rcu_read_lock(); | 103 | rcu_read_lock(); |
| 106 | qh = rcu_dereference(queue_handler); | 104 | qh = rcu_dereference(net->nf.queue_handler); |
| 107 | if (qh) | 105 | if (qh) |
| 108 | qh->nf_hook_drop(net, ops); | 106 | qh->nf_hook_drop(net, ops); |
| 109 | rcu_read_unlock(); | 107 | rcu_read_unlock(); |
| @@ -122,9 +120,10 @@ int nf_queue(struct sk_buff *skb, | |||
| 122 | struct nf_queue_entry *entry = NULL; | 120 | struct nf_queue_entry *entry = NULL; |
| 123 | const struct nf_afinfo *afinfo; | 121 | const struct nf_afinfo *afinfo; |
| 124 | const struct nf_queue_handler *qh; | 122 | const struct nf_queue_handler *qh; |
| 123 | struct net *net = state->net; | ||
| 125 | 124 | ||
| 126 | /* QUEUE == DROP if no one is waiting, to be safe. */ | 125 | /* QUEUE == DROP if no one is waiting, to be safe. */ |
| 127 | qh = rcu_dereference(queue_handler); | 126 | qh = rcu_dereference(net->nf.queue_handler); |
| 128 | if (!qh) { | 127 | if (!qh) { |
| 129 | status = -ESRCH; | 128 | status = -ESRCH; |
| 130 | goto err; | 129 | goto err; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 4d292b933b5c..7b7aa871a174 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -2647,6 +2647,8 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk, | |||
| 2647 | /* Only accept unspec with dump */ | 2647 | /* Only accept unspec with dump */ |
| 2648 | if (nfmsg->nfgen_family == NFPROTO_UNSPEC) | 2648 | if (nfmsg->nfgen_family == NFPROTO_UNSPEC) |
| 2649 | return -EAFNOSUPPORT; | 2649 | return -EAFNOSUPPORT; |
| 2650 | if (!nla[NFTA_SET_TABLE]) | ||
| 2651 | return -EINVAL; | ||
| 2650 | 2652 | ||
| 2651 | set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); | 2653 | set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]); |
| 2652 | if (IS_ERR(set)) | 2654 | if (IS_ERR(set)) |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index aa93877ab6e2..5d36a0926b4a 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
| @@ -557,7 +557,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, | |||
| 557 | 557 | ||
| 558 | if (entskb->tstamp.tv64) { | 558 | if (entskb->tstamp.tv64) { |
| 559 | struct nfqnl_msg_packet_timestamp ts; | 559 | struct nfqnl_msg_packet_timestamp ts; |
| 560 | struct timespec64 kts = ktime_to_timespec64(skb->tstamp); | 560 | struct timespec64 kts = ktime_to_timespec64(entskb->tstamp); |
| 561 | 561 | ||
| 562 | ts.sec = cpu_to_be64(kts.tv_sec); | 562 | ts.sec = cpu_to_be64(kts.tv_sec); |
| 563 | ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); | 563 | ts.usec = cpu_to_be64(kts.tv_nsec / NSEC_PER_USEC); |
| @@ -1482,21 +1482,29 @@ static int __net_init nfnl_queue_net_init(struct net *net) | |||
| 1482 | net->nf.proc_netfilter, &nfqnl_file_ops)) | 1482 | net->nf.proc_netfilter, &nfqnl_file_ops)) |
| 1483 | return -ENOMEM; | 1483 | return -ENOMEM; |
| 1484 | #endif | 1484 | #endif |
| 1485 | nf_register_queue_handler(net, &nfqh); | ||
| 1485 | return 0; | 1486 | return 0; |
| 1486 | } | 1487 | } |
| 1487 | 1488 | ||
| 1488 | static void __net_exit nfnl_queue_net_exit(struct net *net) | 1489 | static void __net_exit nfnl_queue_net_exit(struct net *net) |
| 1489 | { | 1490 | { |
| 1491 | nf_unregister_queue_handler(net); | ||
| 1490 | #ifdef CONFIG_PROC_FS | 1492 | #ifdef CONFIG_PROC_FS |
| 1491 | remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); | 1493 | remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); |
| 1492 | #endif | 1494 | #endif |
| 1493 | } | 1495 | } |
| 1494 | 1496 | ||
| 1497 | static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list) | ||
| 1498 | { | ||
| 1499 | synchronize_rcu(); | ||
| 1500 | } | ||
| 1501 | |||
| 1495 | static struct pernet_operations nfnl_queue_net_ops = { | 1502 | static struct pernet_operations nfnl_queue_net_ops = { |
| 1496 | .init = nfnl_queue_net_init, | 1503 | .init = nfnl_queue_net_init, |
| 1497 | .exit = nfnl_queue_net_exit, | 1504 | .exit = nfnl_queue_net_exit, |
| 1498 | .id = &nfnl_queue_net_id, | 1505 | .exit_batch = nfnl_queue_net_exit_batch, |
| 1499 | .size = sizeof(struct nfnl_queue_net), | 1506 | .id = &nfnl_queue_net_id, |
| 1507 | .size = sizeof(struct nfnl_queue_net), | ||
| 1500 | }; | 1508 | }; |
| 1501 | 1509 | ||
| 1502 | static int __init nfnetlink_queue_init(void) | 1510 | static int __init nfnetlink_queue_init(void) |
| @@ -1517,7 +1525,6 @@ static int __init nfnetlink_queue_init(void) | |||
| 1517 | } | 1525 | } |
| 1518 | 1526 | ||
| 1519 | register_netdevice_notifier(&nfqnl_dev_notifier); | 1527 | register_netdevice_notifier(&nfqnl_dev_notifier); |
| 1520 | nf_register_queue_handler(&nfqh); | ||
| 1521 | return status; | 1528 | return status; |
| 1522 | 1529 | ||
| 1523 | cleanup_netlink_notifier: | 1530 | cleanup_netlink_notifier: |
| @@ -1529,7 +1536,6 @@ out: | |||
| 1529 | 1536 | ||
| 1530 | static void __exit nfnetlink_queue_fini(void) | 1537 | static void __exit nfnetlink_queue_fini(void) |
| 1531 | { | 1538 | { |
| 1532 | nf_unregister_queue_handler(); | ||
| 1533 | unregister_netdevice_notifier(&nfqnl_dev_notifier); | 1539 | unregister_netdevice_notifier(&nfqnl_dev_notifier); |
| 1534 | nfnetlink_subsys_unregister(&nfqnl_subsys); | 1540 | nfnetlink_subsys_unregister(&nfqnl_subsys); |
| 1535 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); | 1541 | netlink_unregister_notifier(&nfqnl_rtnl_notifier); |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index c69c892231d7..2675d580c490 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
| @@ -612,7 +612,7 @@ int xt_compat_check_entry_offsets(const void *base, const char *elems, | |||
| 612 | return -EINVAL; | 612 | return -EINVAL; |
| 613 | 613 | ||
| 614 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && | 614 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
| 615 | target_offset + sizeof(struct compat_xt_standard_target) != next_offset) | 615 | COMPAT_XT_ALIGN(target_offset + sizeof(struct compat_xt_standard_target)) != next_offset) |
| 616 | return -EINVAL; | 616 | return -EINVAL; |
| 617 | 617 | ||
| 618 | /* compat_xt_entry match has less strict aligment requirements, | 618 | /* compat_xt_entry match has less strict aligment requirements, |
| @@ -694,7 +694,7 @@ int xt_check_entry_offsets(const void *base, | |||
| 694 | return -EINVAL; | 694 | return -EINVAL; |
| 695 | 695 | ||
| 696 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && | 696 | if (strcmp(t->u.user.name, XT_STANDARD_TARGET) == 0 && |
| 697 | target_offset + sizeof(struct xt_standard_target) != next_offset) | 697 | XT_ALIGN(target_offset + sizeof(struct xt_standard_target)) != next_offset) |
| 698 | return -EINVAL; | 698 | return -EINVAL; |
| 699 | 699 | ||
| 700 | return xt_check_entry_match(elems, base + target_offset, | 700 | return xt_check_entry_match(elems, base + target_offset, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4040eb92d9c9..9bff6ef16fa7 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -93,6 +93,7 @@ | |||
| 93 | #include <net/inet_common.h> | 93 | #include <net/inet_common.h> |
| 94 | #endif | 94 | #endif |
| 95 | #include <linux/bpf.h> | 95 | #include <linux/bpf.h> |
| 96 | #include <net/compat.h> | ||
| 96 | 97 | ||
| 97 | #include "internal.h" | 98 | #include "internal.h" |
| 98 | 99 | ||
| @@ -3940,6 +3941,27 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
| 3940 | } | 3941 | } |
| 3941 | 3942 | ||
| 3942 | 3943 | ||
| 3944 | #ifdef CONFIG_COMPAT | ||
| 3945 | static int compat_packet_setsockopt(struct socket *sock, int level, int optname, | ||
| 3946 | char __user *optval, unsigned int optlen) | ||
| 3947 | { | ||
| 3948 | struct packet_sock *po = pkt_sk(sock->sk); | ||
| 3949 | |||
| 3950 | if (level != SOL_PACKET) | ||
| 3951 | return -ENOPROTOOPT; | ||
| 3952 | |||
| 3953 | if (optname == PACKET_FANOUT_DATA && | ||
| 3954 | po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) { | ||
| 3955 | optval = (char __user *)get_compat_bpf_fprog(optval); | ||
| 3956 | if (!optval) | ||
| 3957 | return -EFAULT; | ||
| 3958 | optlen = sizeof(struct sock_fprog); | ||
| 3959 | } | ||
| 3960 | |||
| 3961 | return packet_setsockopt(sock, level, optname, optval, optlen); | ||
| 3962 | } | ||
| 3963 | #endif | ||
| 3964 | |||
| 3943 | static int packet_notifier(struct notifier_block *this, | 3965 | static int packet_notifier(struct notifier_block *this, |
| 3944 | unsigned long msg, void *ptr) | 3966 | unsigned long msg, void *ptr) |
| 3945 | { | 3967 | { |
| @@ -4416,6 +4438,9 @@ static const struct proto_ops packet_ops = { | |||
| 4416 | .shutdown = sock_no_shutdown, | 4438 | .shutdown = sock_no_shutdown, |
| 4417 | .setsockopt = packet_setsockopt, | 4439 | .setsockopt = packet_setsockopt, |
| 4418 | .getsockopt = packet_getsockopt, | 4440 | .getsockopt = packet_getsockopt, |
| 4441 | #ifdef CONFIG_COMPAT | ||
| 4442 | .compat_setsockopt = compat_packet_setsockopt, | ||
| 4443 | #endif | ||
| 4419 | .sendmsg = packet_sendmsg, | 4444 | .sendmsg = packet_sendmsg, |
| 4420 | .recvmsg = packet_recvmsg, | 4445 | .recvmsg = packet_recvmsg, |
| 4421 | .mmap = packet_mmap, | 4446 | .mmap = packet_mmap, |
diff --git a/net/rds/rds.h b/net/rds/rds.h index 80256b08eac0..387df5f32e49 100644 --- a/net/rds/rds.h +++ b/net/rds/rds.h | |||
| @@ -74,6 +74,7 @@ enum { | |||
| 74 | RDS_CONN_CONNECTING, | 74 | RDS_CONN_CONNECTING, |
| 75 | RDS_CONN_DISCONNECTING, | 75 | RDS_CONN_DISCONNECTING, |
| 76 | RDS_CONN_UP, | 76 | RDS_CONN_UP, |
| 77 | RDS_CONN_RESETTING, | ||
| 77 | RDS_CONN_ERROR, | 78 | RDS_CONN_ERROR, |
| 78 | }; | 79 | }; |
| 79 | 80 | ||
| @@ -813,6 +814,7 @@ void rds_connect_worker(struct work_struct *); | |||
| 813 | void rds_shutdown_worker(struct work_struct *); | 814 | void rds_shutdown_worker(struct work_struct *); |
| 814 | void rds_send_worker(struct work_struct *); | 815 | void rds_send_worker(struct work_struct *); |
| 815 | void rds_recv_worker(struct work_struct *); | 816 | void rds_recv_worker(struct work_struct *); |
| 817 | void rds_connect_path_complete(struct rds_connection *conn, int curr); | ||
| 816 | void rds_connect_complete(struct rds_connection *conn); | 818 | void rds_connect_complete(struct rds_connection *conn); |
| 817 | 819 | ||
| 818 | /* transport.c */ | 820 | /* transport.c */ |
diff --git a/net/rds/recv.c b/net/rds/recv.c index c0be1ecd11c9..8413f6c99e13 100644 --- a/net/rds/recv.c +++ b/net/rds/recv.c | |||
| @@ -561,5 +561,7 @@ void rds_inc_info_copy(struct rds_incoming *inc, | |||
| 561 | minfo.fport = inc->i_hdr.h_dport; | 561 | minfo.fport = inc->i_hdr.h_dport; |
| 562 | } | 562 | } |
| 563 | 563 | ||
| 564 | minfo.flags = 0; | ||
| 565 | |||
| 564 | rds_info_copy(iter, &minfo, sizeof(minfo)); | 566 | rds_info_copy(iter, &minfo, sizeof(minfo)); |
| 565 | } | 567 | } |
diff --git a/net/rds/send.c b/net/rds/send.c index c9cdb358ea88..b1962f8e30f7 100644 --- a/net/rds/send.c +++ b/net/rds/send.c | |||
| @@ -99,6 +99,7 @@ void rds_send_reset(struct rds_connection *conn) | |||
| 99 | list_splice_init(&conn->c_retrans, &conn->c_send_queue); | 99 | list_splice_init(&conn->c_retrans, &conn->c_send_queue); |
| 100 | spin_unlock_irqrestore(&conn->c_lock, flags); | 100 | spin_unlock_irqrestore(&conn->c_lock, flags); |
| 101 | } | 101 | } |
| 102 | EXPORT_SYMBOL_GPL(rds_send_reset); | ||
| 102 | 103 | ||
| 103 | static int acquire_in_xmit(struct rds_connection *conn) | 104 | static int acquire_in_xmit(struct rds_connection *conn) |
| 104 | { | 105 | { |
diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 86187dad1440..74ee126a6fe6 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c | |||
| @@ -126,9 +126,81 @@ void rds_tcp_restore_callbacks(struct socket *sock, | |||
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | /* | 128 | /* |
| 129 | * This is the only path that sets tc->t_sock. Send and receive trust that | 129 | * rds_tcp_reset_callbacks() switches the to the new sock and |
| 130 | * it is set. The RDS_CONN_UP bit protects those paths from being | 130 | * returns the existing tc->t_sock. |
| 131 | * called while it isn't set. | 131 | * |
| 132 | * The only functions that set tc->t_sock are rds_tcp_set_callbacks | ||
| 133 | * and rds_tcp_reset_callbacks. Send and receive trust that | ||
| 134 | * it is set. The absence of RDS_CONN_UP bit protects those paths | ||
| 135 | * from being called while it isn't set. | ||
| 136 | */ | ||
| 137 | void rds_tcp_reset_callbacks(struct socket *sock, | ||
| 138 | struct rds_connection *conn) | ||
| 139 | { | ||
| 140 | struct rds_tcp_connection *tc = conn->c_transport_data; | ||
| 141 | struct socket *osock = tc->t_sock; | ||
| 142 | |||
| 143 | if (!osock) | ||
| 144 | goto newsock; | ||
| 145 | |||
| 146 | /* Need to resolve a duelling SYN between peers. | ||
| 147 | * We have an outstanding SYN to this peer, which may | ||
| 148 | * potentially have transitioned to the RDS_CONN_UP state, | ||
| 149 | * so we must quiesce any send threads before resetting | ||
| 150 | * c_transport_data. We quiesce these threads by setting | ||
| 151 | * c_state to something other than RDS_CONN_UP, and then | ||
| 152 | * waiting for any existing threads in rds_send_xmit to | ||
| 153 | * complete release_in_xmit(). (Subsequent threads entering | ||
| 154 | * rds_send_xmit() will bail on !rds_conn_up(). | ||
| 155 | * | ||
| 156 | * However an incoming syn-ack at this point would end up | ||
| 157 | * marking the conn as RDS_CONN_UP, and would again permit | ||
| 158 | * rds_send_xmi() threads through, so ideally we would | ||
| 159 | * synchronize on RDS_CONN_UP after lock_sock(), but cannot | ||
| 160 | * do that: waiting on !RDS_IN_XMIT after lock_sock() may | ||
| 161 | * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT | ||
| 162 | * would not get set. As a result, we set c_state to | ||
| 163 | * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change | ||
| 164 | * cannot mark rds_conn_path_up() in the window before lock_sock() | ||
| 165 | */ | ||
| 166 | atomic_set(&conn->c_state, RDS_CONN_RESETTING); | ||
| 167 | wait_event(conn->c_waitq, !test_bit(RDS_IN_XMIT, &conn->c_flags)); | ||
| 168 | lock_sock(osock->sk); | ||
| 169 | /* reset receive side state for rds_tcp_data_recv() for osock */ | ||
| 170 | if (tc->t_tinc) { | ||
| 171 | rds_inc_put(&tc->t_tinc->ti_inc); | ||
| 172 | tc->t_tinc = NULL; | ||
| 173 | } | ||
| 174 | tc->t_tinc_hdr_rem = sizeof(struct rds_header); | ||
| 175 | tc->t_tinc_data_rem = 0; | ||
| 176 | tc->t_sock = NULL; | ||
| 177 | |||
| 178 | write_lock_bh(&osock->sk->sk_callback_lock); | ||
| 179 | |||
| 180 | osock->sk->sk_user_data = NULL; | ||
| 181 | osock->sk->sk_data_ready = tc->t_orig_data_ready; | ||
| 182 | osock->sk->sk_write_space = tc->t_orig_write_space; | ||
| 183 | osock->sk->sk_state_change = tc->t_orig_state_change; | ||
| 184 | write_unlock_bh(&osock->sk->sk_callback_lock); | ||
| 185 | release_sock(osock->sk); | ||
| 186 | sock_release(osock); | ||
| 187 | newsock: | ||
| 188 | rds_send_reset(conn); | ||
| 189 | lock_sock(sock->sk); | ||
| 190 | write_lock_bh(&sock->sk->sk_callback_lock); | ||
| 191 | tc->t_sock = sock; | ||
| 192 | sock->sk->sk_user_data = conn; | ||
| 193 | sock->sk->sk_data_ready = rds_tcp_data_ready; | ||
| 194 | sock->sk->sk_write_space = rds_tcp_write_space; | ||
| 195 | sock->sk->sk_state_change = rds_tcp_state_change; | ||
| 196 | |||
| 197 | write_unlock_bh(&sock->sk->sk_callback_lock); | ||
| 198 | release_sock(sock->sk); | ||
| 199 | } | ||
| 200 | |||
| 201 | /* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments | ||
| 202 | * above rds_tcp_reset_callbacks for notes about synchronization | ||
| 203 | * with data path | ||
| 132 | */ | 204 | */ |
| 133 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) | 205 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn) |
| 134 | { | 206 | { |
diff --git a/net/rds/tcp.h b/net/rds/tcp.h index 41c228300525..ec0602b0dc24 100644 --- a/net/rds/tcp.h +++ b/net/rds/tcp.h | |||
| @@ -50,6 +50,7 @@ struct rds_tcp_statistics { | |||
| 50 | void rds_tcp_tune(struct socket *sock); | 50 | void rds_tcp_tune(struct socket *sock); |
| 51 | void rds_tcp_nonagle(struct socket *sock); | 51 | void rds_tcp_nonagle(struct socket *sock); |
| 52 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn); | 52 | void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn); |
| 53 | void rds_tcp_reset_callbacks(struct socket *sock, struct rds_connection *conn); | ||
| 53 | void rds_tcp_restore_callbacks(struct socket *sock, | 54 | void rds_tcp_restore_callbacks(struct socket *sock, |
| 54 | struct rds_tcp_connection *tc); | 55 | struct rds_tcp_connection *tc); |
| 55 | u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); | 56 | u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc); |
diff --git a/net/rds/tcp_connect.c b/net/rds/tcp_connect.c index fb82e0a0bf89..fba13d0305fb 100644 --- a/net/rds/tcp_connect.c +++ b/net/rds/tcp_connect.c | |||
| @@ -60,7 +60,7 @@ void rds_tcp_state_change(struct sock *sk) | |||
| 60 | case TCP_SYN_RECV: | 60 | case TCP_SYN_RECV: |
| 61 | break; | 61 | break; |
| 62 | case TCP_ESTABLISHED: | 62 | case TCP_ESTABLISHED: |
| 63 | rds_connect_complete(conn); | 63 | rds_connect_path_complete(conn, RDS_CONN_CONNECTING); |
| 64 | break; | 64 | break; |
| 65 | case TCP_CLOSE_WAIT: | 65 | case TCP_CLOSE_WAIT: |
| 66 | case TCP_CLOSE: | 66 | case TCP_CLOSE: |
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c index 4bf4befe5066..686b1d03a558 100644 --- a/net/rds/tcp_listen.c +++ b/net/rds/tcp_listen.c | |||
| @@ -78,7 +78,6 @@ int rds_tcp_accept_one(struct socket *sock) | |||
| 78 | struct inet_sock *inet; | 78 | struct inet_sock *inet; |
| 79 | struct rds_tcp_connection *rs_tcp = NULL; | 79 | struct rds_tcp_connection *rs_tcp = NULL; |
| 80 | int conn_state; | 80 | int conn_state; |
| 81 | struct sock *nsk; | ||
| 82 | 81 | ||
| 83 | if (!sock) /* module unload or netns delete in progress */ | 82 | if (!sock) /* module unload or netns delete in progress */ |
| 84 | return -ENETUNREACH; | 83 | return -ENETUNREACH; |
| @@ -136,26 +135,21 @@ int rds_tcp_accept_one(struct socket *sock) | |||
| 136 | !conn->c_outgoing) { | 135 | !conn->c_outgoing) { |
| 137 | goto rst_nsk; | 136 | goto rst_nsk; |
| 138 | } else { | 137 | } else { |
| 139 | atomic_set(&conn->c_state, RDS_CONN_CONNECTING); | 138 | rds_tcp_reset_callbacks(new_sock, conn); |
| 140 | wait_event(conn->c_waitq, | ||
| 141 | !test_bit(RDS_IN_XMIT, &conn->c_flags)); | ||
| 142 | rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp); | ||
| 143 | conn->c_outgoing = 0; | 139 | conn->c_outgoing = 0; |
| 140 | /* rds_connect_path_complete() marks RDS_CONN_UP */ | ||
| 141 | rds_connect_path_complete(conn, RDS_CONN_DISCONNECTING); | ||
| 144 | } | 142 | } |
| 143 | } else { | ||
| 144 | rds_tcp_set_callbacks(new_sock, conn); | ||
| 145 | rds_connect_path_complete(conn, RDS_CONN_CONNECTING); | ||
| 145 | } | 146 | } |
| 146 | rds_tcp_set_callbacks(new_sock, conn); | ||
| 147 | rds_connect_complete(conn); /* marks RDS_CONN_UP */ | ||
| 148 | new_sock = NULL; | 147 | new_sock = NULL; |
| 149 | ret = 0; | 148 | ret = 0; |
| 150 | goto out; | 149 | goto out; |
| 151 | rst_nsk: | 150 | rst_nsk: |
| 152 | /* reset the newly returned accept sock and bail */ | 151 | /* reset the newly returned accept sock and bail */ |
| 153 | nsk = new_sock->sk; | 152 | kernel_sock_shutdown(new_sock, SHUT_RDWR); |
| 154 | rds_tcp_stats_inc(s_tcp_listen_closed_stale); | ||
| 155 | nsk->sk_user_data = NULL; | ||
| 156 | nsk->sk_prot->disconnect(nsk, 0); | ||
| 157 | tcp_done(nsk); | ||
| 158 | new_sock = NULL; | ||
| 159 | ret = 0; | 153 | ret = 0; |
| 160 | out: | 154 | out: |
| 161 | if (rs_tcp) | 155 | if (rs_tcp) |
diff --git a/net/rds/threads.c b/net/rds/threads.c index 454aa6d23327..4a323045719b 100644 --- a/net/rds/threads.c +++ b/net/rds/threads.c | |||
| @@ -71,9 +71,9 @@ | |||
| 71 | struct workqueue_struct *rds_wq; | 71 | struct workqueue_struct *rds_wq; |
| 72 | EXPORT_SYMBOL_GPL(rds_wq); | 72 | EXPORT_SYMBOL_GPL(rds_wq); |
| 73 | 73 | ||
| 74 | void rds_connect_complete(struct rds_connection *conn) | 74 | void rds_connect_path_complete(struct rds_connection *conn, int curr) |
| 75 | { | 75 | { |
| 76 | if (!rds_conn_transition(conn, RDS_CONN_CONNECTING, RDS_CONN_UP)) { | 76 | if (!rds_conn_transition(conn, curr, RDS_CONN_UP)) { |
| 77 | printk(KERN_WARNING "%s: Cannot transition to state UP, " | 77 | printk(KERN_WARNING "%s: Cannot transition to state UP, " |
| 78 | "current state is %d\n", | 78 | "current state is %d\n", |
| 79 | __func__, | 79 | __func__, |
| @@ -90,6 +90,12 @@ void rds_connect_complete(struct rds_connection *conn) | |||
| 90 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); | 90 | queue_delayed_work(rds_wq, &conn->c_send_w, 0); |
| 91 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); | 91 | queue_delayed_work(rds_wq, &conn->c_recv_w, 0); |
| 92 | } | 92 | } |
| 93 | EXPORT_SYMBOL_GPL(rds_connect_path_complete); | ||
| 94 | |||
| 95 | void rds_connect_complete(struct rds_connection *conn) | ||
| 96 | { | ||
| 97 | rds_connect_path_complete(conn, RDS_CONN_CONNECTING); | ||
| 98 | } | ||
| 93 | EXPORT_SYMBOL_GPL(rds_connect_complete); | 99 | EXPORT_SYMBOL_GPL(rds_connect_complete); |
| 94 | 100 | ||
| 95 | /* | 101 | /* |
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c index 6b726a046a7d..bab56ed649ba 100644 --- a/net/rxrpc/rxkad.c +++ b/net/rxrpc/rxkad.c | |||
| @@ -1162,9 +1162,7 @@ static int rxkad_init(void) | |||
| 1162 | /* pin the cipher we need so that the crypto layer doesn't invoke | 1162 | /* pin the cipher we need so that the crypto layer doesn't invoke |
| 1163 | * keventd to go get it */ | 1163 | * keventd to go get it */ |
| 1164 | rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); | 1164 | rxkad_ci = crypto_alloc_skcipher("pcbc(fcrypt)", 0, CRYPTO_ALG_ASYNC); |
| 1165 | if (IS_ERR(rxkad_ci)) | 1165 | return PTR_ERR_OR_ZERO(rxkad_ci); |
| 1166 | return PTR_ERR(rxkad_ci); | ||
| 1167 | return 0; | ||
| 1168 | } | 1166 | } |
| 1169 | 1167 | ||
| 1170 | /* | 1168 | /* |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index b884dae692a1..c557789765dc 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
| @@ -38,7 +38,7 @@ struct tcf_police { | |||
| 38 | bool peak_present; | 38 | bool peak_present; |
| 39 | }; | 39 | }; |
| 40 | #define to_police(pc) \ | 40 | #define to_police(pc) \ |
| 41 | container_of(pc, struct tcf_police, common) | 41 | container_of(pc->priv, struct tcf_police, common) |
| 42 | 42 | ||
| 43 | #define POL_TAB_MASK 15 | 43 | #define POL_TAB_MASK 15 |
| 44 | 44 | ||
| @@ -119,14 +119,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
| 119 | struct nlattr *est, struct tc_action *a, | 119 | struct nlattr *est, struct tc_action *a, |
| 120 | int ovr, int bind) | 120 | int ovr, int bind) |
| 121 | { | 121 | { |
| 122 | unsigned int h; | ||
| 123 | int ret = 0, err; | 122 | int ret = 0, err; |
| 124 | struct nlattr *tb[TCA_POLICE_MAX + 1]; | 123 | struct nlattr *tb[TCA_POLICE_MAX + 1]; |
| 125 | struct tc_police *parm; | 124 | struct tc_police *parm; |
| 126 | struct tcf_police *police; | 125 | struct tcf_police *police; |
| 127 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; | 126 | struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL; |
| 128 | struct tc_action_net *tn = net_generic(net, police_net_id); | 127 | struct tc_action_net *tn = net_generic(net, police_net_id); |
| 129 | struct tcf_hashinfo *hinfo = tn->hinfo; | ||
| 130 | int size; | 128 | int size; |
| 131 | 129 | ||
| 132 | if (nla == NULL) | 130 | if (nla == NULL) |
| @@ -145,7 +143,7 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
| 145 | 143 | ||
| 146 | if (parm->index) { | 144 | if (parm->index) { |
| 147 | if (tcf_hash_search(tn, a, parm->index)) { | 145 | if (tcf_hash_search(tn, a, parm->index)) { |
| 148 | police = to_police(a->priv); | 146 | police = to_police(a); |
| 149 | if (bind) { | 147 | if (bind) { |
| 150 | police->tcf_bindcnt += 1; | 148 | police->tcf_bindcnt += 1; |
| 151 | police->tcf_refcnt += 1; | 149 | police->tcf_refcnt += 1; |
| @@ -156,16 +154,15 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla, | |||
| 156 | /* not replacing */ | 154 | /* not replacing */ |
| 157 | return -EEXIST; | 155 | return -EEXIST; |
| 158 | } | 156 | } |
| 157 | } else { | ||
| 158 | ret = tcf_hash_create(tn, parm->index, NULL, a, | ||
| 159 | sizeof(*police), bind, false); | ||
| 160 | if (ret) | ||
| 161 | return ret; | ||
| 162 | ret = ACT_P_CREATED; | ||
| 159 | } | 163 | } |
| 160 | 164 | ||
| 161 | police = kzalloc(sizeof(*police), GFP_KERNEL); | 165 | police = to_police(a); |
| 162 | if (police == NULL) | ||
| 163 | return -ENOMEM; | ||
| 164 | ret = ACT_P_CREATED; | ||
| 165 | police->tcf_refcnt = 1; | ||
| 166 | spin_lock_init(&police->tcf_lock); | ||
| 167 | if (bind) | ||
| 168 | police->tcf_bindcnt = 1; | ||
| 169 | override: | 166 | override: |
| 170 | if (parm->rate.rate) { | 167 | if (parm->rate.rate) { |
| 171 | err = -ENOMEM; | 168 | err = -ENOMEM; |
| @@ -237,16 +234,8 @@ override: | |||
| 237 | return ret; | 234 | return ret; |
| 238 | 235 | ||
| 239 | police->tcfp_t_c = ktime_get_ns(); | 236 | police->tcfp_t_c = ktime_get_ns(); |
| 240 | police->tcf_index = parm->index ? parm->index : | 237 | tcf_hash_insert(tn, a); |
| 241 | tcf_hash_new_index(tn); | ||
| 242 | police->tcf_tm.install = jiffies; | ||
| 243 | police->tcf_tm.lastuse = jiffies; | ||
| 244 | h = tcf_hash(police->tcf_index, POL_TAB_MASK); | ||
| 245 | spin_lock_bh(&hinfo->lock); | ||
| 246 | hlist_add_head(&police->tcf_head, &hinfo->htab[h]); | ||
| 247 | spin_unlock_bh(&hinfo->lock); | ||
| 248 | 238 | ||
| 249 | a->priv = police; | ||
| 250 | return ret; | 239 | return ret; |
| 251 | 240 | ||
| 252 | failure_unlock: | 241 | failure_unlock: |
| @@ -255,7 +244,7 @@ failure: | |||
| 255 | qdisc_put_rtab(P_tab); | 244 | qdisc_put_rtab(P_tab); |
| 256 | qdisc_put_rtab(R_tab); | 245 | qdisc_put_rtab(R_tab); |
| 257 | if (ret == ACT_P_CREATED) | 246 | if (ret == ACT_P_CREATED) |
| 258 | kfree(police); | 247 | tcf_hash_cleanup(a, est); |
| 259 | return err; | 248 | return err; |
| 260 | } | 249 | } |
| 261 | 250 | ||
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 730aacafc22d..b3b7978f4182 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
| @@ -171,7 +171,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, unsigned long cookie) | |||
| 171 | struct tc_cls_flower_offload offload = {0}; | 171 | struct tc_cls_flower_offload offload = {0}; |
| 172 | struct tc_to_netdev tc; | 172 | struct tc_to_netdev tc; |
| 173 | 173 | ||
| 174 | if (!tc_should_offload(dev, 0)) | 174 | if (!tc_should_offload(dev, tp, 0)) |
| 175 | return; | 175 | return; |
| 176 | 176 | ||
| 177 | offload.command = TC_CLSFLOWER_DESTROY; | 177 | offload.command = TC_CLSFLOWER_DESTROY; |
| @@ -194,7 +194,7 @@ static void fl_hw_replace_filter(struct tcf_proto *tp, | |||
| 194 | struct tc_cls_flower_offload offload = {0}; | 194 | struct tc_cls_flower_offload offload = {0}; |
| 195 | struct tc_to_netdev tc; | 195 | struct tc_to_netdev tc; |
| 196 | 196 | ||
| 197 | if (!tc_should_offload(dev, flags)) | 197 | if (!tc_should_offload(dev, tp, flags)) |
| 198 | return; | 198 | return; |
| 199 | 199 | ||
| 200 | offload.command = TC_CLSFLOWER_REPLACE; | 200 | offload.command = TC_CLSFLOWER_REPLACE; |
| @@ -216,7 +216,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) | |||
| 216 | struct tc_cls_flower_offload offload = {0}; | 216 | struct tc_cls_flower_offload offload = {0}; |
| 217 | struct tc_to_netdev tc; | 217 | struct tc_to_netdev tc; |
| 218 | 218 | ||
| 219 | if (!tc_should_offload(dev, 0)) | 219 | if (!tc_should_offload(dev, tp, 0)) |
| 220 | return; | 220 | return; |
| 221 | 221 | ||
| 222 | offload.command = TC_CLSFLOWER_STATS; | 222 | offload.command = TC_CLSFLOWER_STATS; |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 079b43b3c5d2..ffe593efe930 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
| @@ -440,7 +440,7 @@ static void u32_remove_hw_knode(struct tcf_proto *tp, u32 handle) | |||
| 440 | offload.type = TC_SETUP_CLSU32; | 440 | offload.type = TC_SETUP_CLSU32; |
| 441 | offload.cls_u32 = &u32_offload; | 441 | offload.cls_u32 = &u32_offload; |
| 442 | 442 | ||
| 443 | if (tc_should_offload(dev, 0)) { | 443 | if (tc_should_offload(dev, tp, 0)) { |
| 444 | offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; | 444 | offload.cls_u32->command = TC_CLSU32_DELETE_KNODE; |
| 445 | offload.cls_u32->knode.handle = handle; | 445 | offload.cls_u32->knode.handle = handle; |
| 446 | dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, | 446 | dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, |
| @@ -457,20 +457,21 @@ static int u32_replace_hw_hnode(struct tcf_proto *tp, | |||
| 457 | struct tc_to_netdev offload; | 457 | struct tc_to_netdev offload; |
| 458 | int err; | 458 | int err; |
| 459 | 459 | ||
| 460 | if (!tc_should_offload(dev, tp, flags)) | ||
| 461 | return tc_skip_sw(flags) ? -EINVAL : 0; | ||
| 462 | |||
| 460 | offload.type = TC_SETUP_CLSU32; | 463 | offload.type = TC_SETUP_CLSU32; |
| 461 | offload.cls_u32 = &u32_offload; | 464 | offload.cls_u32 = &u32_offload; |
| 462 | 465 | ||
| 463 | if (tc_should_offload(dev, flags)) { | 466 | offload.cls_u32->command = TC_CLSU32_NEW_HNODE; |
| 464 | offload.cls_u32->command = TC_CLSU32_NEW_HNODE; | 467 | offload.cls_u32->hnode.divisor = h->divisor; |
| 465 | offload.cls_u32->hnode.divisor = h->divisor; | 468 | offload.cls_u32->hnode.handle = h->handle; |
| 466 | offload.cls_u32->hnode.handle = h->handle; | 469 | offload.cls_u32->hnode.prio = h->prio; |
| 467 | offload.cls_u32->hnode.prio = h->prio; | ||
| 468 | 470 | ||
| 469 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, | 471 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, |
| 470 | tp->protocol, &offload); | 472 | tp->protocol, &offload); |
| 471 | if (tc_skip_sw(flags)) | 473 | if (tc_skip_sw(flags)) |
| 472 | return err; | 474 | return err; |
| 473 | } | ||
| 474 | 475 | ||
| 475 | return 0; | 476 | return 0; |
| 476 | } | 477 | } |
| @@ -484,7 +485,7 @@ static void u32_clear_hw_hnode(struct tcf_proto *tp, struct tc_u_hnode *h) | |||
| 484 | offload.type = TC_SETUP_CLSU32; | 485 | offload.type = TC_SETUP_CLSU32; |
| 485 | offload.cls_u32 = &u32_offload; | 486 | offload.cls_u32 = &u32_offload; |
| 486 | 487 | ||
| 487 | if (tc_should_offload(dev, 0)) { | 488 | if (tc_should_offload(dev, tp, 0)) { |
| 488 | offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; | 489 | offload.cls_u32->command = TC_CLSU32_DELETE_HNODE; |
| 489 | offload.cls_u32->hnode.divisor = h->divisor; | 490 | offload.cls_u32->hnode.divisor = h->divisor; |
| 490 | offload.cls_u32->hnode.handle = h->handle; | 491 | offload.cls_u32->hnode.handle = h->handle; |
| @@ -507,27 +508,28 @@ static int u32_replace_hw_knode(struct tcf_proto *tp, | |||
| 507 | offload.type = TC_SETUP_CLSU32; | 508 | offload.type = TC_SETUP_CLSU32; |
| 508 | offload.cls_u32 = &u32_offload; | 509 | offload.cls_u32 = &u32_offload; |
| 509 | 510 | ||
| 510 | if (tc_should_offload(dev, flags)) { | 511 | if (!tc_should_offload(dev, tp, flags)) |
| 511 | offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; | 512 | return tc_skip_sw(flags) ? -EINVAL : 0; |
| 512 | offload.cls_u32->knode.handle = n->handle; | 513 | |
| 513 | offload.cls_u32->knode.fshift = n->fshift; | 514 | offload.cls_u32->command = TC_CLSU32_REPLACE_KNODE; |
| 515 | offload.cls_u32->knode.handle = n->handle; | ||
| 516 | offload.cls_u32->knode.fshift = n->fshift; | ||
| 514 | #ifdef CONFIG_CLS_U32_MARK | 517 | #ifdef CONFIG_CLS_U32_MARK |
| 515 | offload.cls_u32->knode.val = n->val; | 518 | offload.cls_u32->knode.val = n->val; |
| 516 | offload.cls_u32->knode.mask = n->mask; | 519 | offload.cls_u32->knode.mask = n->mask; |
| 517 | #else | 520 | #else |
| 518 | offload.cls_u32->knode.val = 0; | 521 | offload.cls_u32->knode.val = 0; |
| 519 | offload.cls_u32->knode.mask = 0; | 522 | offload.cls_u32->knode.mask = 0; |
| 520 | #endif | 523 | #endif |
| 521 | offload.cls_u32->knode.sel = &n->sel; | 524 | offload.cls_u32->knode.sel = &n->sel; |
| 522 | offload.cls_u32->knode.exts = &n->exts; | 525 | offload.cls_u32->knode.exts = &n->exts; |
| 523 | if (n->ht_down) | 526 | if (n->ht_down) |
| 524 | offload.cls_u32->knode.link_handle = n->ht_down->handle; | 527 | offload.cls_u32->knode.link_handle = n->ht_down->handle; |
| 525 | 528 | ||
| 526 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, | 529 | err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle, |
| 527 | tp->protocol, &offload); | 530 | tp->protocol, &offload); |
| 528 | if (tc_skip_sw(flags)) | 531 | if (tc_skip_sw(flags)) |
| 529 | return err; | 532 | return err; |
| 530 | } | ||
| 531 | 533 | ||
| 532 | return 0; | 534 | return 0; |
| 533 | } | 535 | } |
| @@ -863,7 +865,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
| 863 | if (tb[TCA_U32_FLAGS]) { | 865 | if (tb[TCA_U32_FLAGS]) { |
| 864 | flags = nla_get_u32(tb[TCA_U32_FLAGS]); | 866 | flags = nla_get_u32(tb[TCA_U32_FLAGS]); |
| 865 | if (!tc_flags_valid(flags)) | 867 | if (!tc_flags_valid(flags)) |
| 866 | return err; | 868 | return -EINVAL; |
| 867 | } | 869 | } |
| 868 | 870 | ||
| 869 | n = (struct tc_u_knode *)*arg; | 871 | n = (struct tc_u_knode *)*arg; |
| @@ -921,11 +923,17 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, | |||
| 921 | ht->divisor = divisor; | 923 | ht->divisor = divisor; |
| 922 | ht->handle = handle; | 924 | ht->handle = handle; |
| 923 | ht->prio = tp->prio; | 925 | ht->prio = tp->prio; |
| 926 | |||
| 927 | err = u32_replace_hw_hnode(tp, ht, flags); | ||
| 928 | if (err) { | ||
| 929 | kfree(ht); | ||
| 930 | return err; | ||
| 931 | } | ||
| 932 | |||
| 924 | RCU_INIT_POINTER(ht->next, tp_c->hlist); | 933 | RCU_INIT_POINTER(ht->next, tp_c->hlist); |
| 925 | rcu_assign_pointer(tp_c->hlist, ht); | 934 | rcu_assign_pointer(tp_c->hlist, ht); |
| 926 | *arg = (unsigned long)ht; | 935 | *arg = (unsigned long)ht; |
| 927 | 936 | ||
| 928 | u32_replace_hw_hnode(tp, ht, flags); | ||
| 929 | return 0; | 937 | return 0; |
| 930 | } | 938 | } |
| 931 | 939 | ||
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c index a63e879e8975..bf8af2c43c2c 100644 --- a/net/sched/sch_drr.c +++ b/net/sched/sch_drr.c | |||
| @@ -375,6 +375,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 375 | cl->deficit = cl->quantum; | 375 | cl->deficit = cl->quantum; |
| 376 | } | 376 | } |
| 377 | 377 | ||
| 378 | qdisc_qstats_backlog_inc(sch, skb); | ||
| 378 | sch->q.qlen++; | 379 | sch->q.qlen++; |
| 379 | return err; | 380 | return err; |
| 380 | } | 381 | } |
| @@ -407,6 +408,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch) | |||
| 407 | 408 | ||
| 408 | bstats_update(&cl->bstats, skb); | 409 | bstats_update(&cl->bstats, skb); |
| 409 | qdisc_bstats_update(sch, skb); | 410 | qdisc_bstats_update(sch, skb); |
| 411 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 410 | sch->q.qlen--; | 412 | sch->q.qlen--; |
| 411 | return skb; | 413 | return skb; |
| 412 | } | 414 | } |
| @@ -428,6 +430,7 @@ static unsigned int drr_drop(struct Qdisc *sch) | |||
| 428 | if (cl->qdisc->ops->drop) { | 430 | if (cl->qdisc->ops->drop) { |
| 429 | len = cl->qdisc->ops->drop(cl->qdisc); | 431 | len = cl->qdisc->ops->drop(cl->qdisc); |
| 430 | if (len > 0) { | 432 | if (len > 0) { |
| 433 | sch->qstats.backlog -= len; | ||
| 431 | sch->q.qlen--; | 434 | sch->q.qlen--; |
| 432 | if (cl->qdisc->q.qlen == 0) | 435 | if (cl->qdisc->q.qlen == 0) |
| 433 | list_del(&cl->alist); | 436 | list_del(&cl->alist); |
| @@ -463,6 +466,7 @@ static void drr_reset_qdisc(struct Qdisc *sch) | |||
| 463 | qdisc_reset(cl->qdisc); | 466 | qdisc_reset(cl->qdisc); |
| 464 | } | 467 | } |
| 465 | } | 468 | } |
| 469 | sch->qstats.backlog = 0; | ||
| 466 | sch->q.qlen = 0; | 470 | sch->q.qlen = 0; |
| 467 | } | 471 | } |
| 468 | 472 | ||
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 6883a8971562..da250b2e06ae 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
| @@ -199,6 +199,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 199 | unsigned int idx, prev_backlog, prev_qlen; | 199 | unsigned int idx, prev_backlog, prev_qlen; |
| 200 | struct fq_codel_flow *flow; | 200 | struct fq_codel_flow *flow; |
| 201 | int uninitialized_var(ret); | 201 | int uninitialized_var(ret); |
| 202 | unsigned int pkt_len; | ||
| 202 | bool memory_limited; | 203 | bool memory_limited; |
| 203 | 204 | ||
| 204 | idx = fq_codel_classify(skb, sch, &ret); | 205 | idx = fq_codel_classify(skb, sch, &ret); |
| @@ -230,6 +231,8 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 230 | prev_backlog = sch->qstats.backlog; | 231 | prev_backlog = sch->qstats.backlog; |
| 231 | prev_qlen = sch->q.qlen; | 232 | prev_qlen = sch->q.qlen; |
| 232 | 233 | ||
| 234 | /* save this packet length as it might be dropped by fq_codel_drop() */ | ||
| 235 | pkt_len = qdisc_pkt_len(skb); | ||
| 233 | /* fq_codel_drop() is quite expensive, as it performs a linear search | 236 | /* fq_codel_drop() is quite expensive, as it performs a linear search |
| 234 | * in q->backlogs[] to find a fat flow. | 237 | * in q->backlogs[] to find a fat flow. |
| 235 | * So instead of dropping a single packet, drop half of its backlog | 238 | * So instead of dropping a single packet, drop half of its backlog |
| @@ -237,14 +240,23 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 237 | */ | 240 | */ |
| 238 | ret = fq_codel_drop(sch, q->drop_batch_size); | 241 | ret = fq_codel_drop(sch, q->drop_batch_size); |
| 239 | 242 | ||
| 240 | q->drop_overlimit += prev_qlen - sch->q.qlen; | 243 | prev_qlen -= sch->q.qlen; |
| 244 | prev_backlog -= sch->qstats.backlog; | ||
| 245 | q->drop_overlimit += prev_qlen; | ||
| 241 | if (memory_limited) | 246 | if (memory_limited) |
| 242 | q->drop_overmemory += prev_qlen - sch->q.qlen; | 247 | q->drop_overmemory += prev_qlen; |
| 243 | /* As we dropped packet(s), better let upper stack know this */ | ||
| 244 | qdisc_tree_reduce_backlog(sch, prev_qlen - sch->q.qlen, | ||
| 245 | prev_backlog - sch->qstats.backlog); | ||
| 246 | 248 | ||
| 247 | return ret == idx ? NET_XMIT_CN : NET_XMIT_SUCCESS; | 249 | /* As we dropped packet(s), better let upper stack know this. |
| 250 | * If we dropped a packet for this flow, return NET_XMIT_CN, | ||
| 251 | * but in this case, our parents wont increase their backlogs. | ||
| 252 | */ | ||
| 253 | if (ret == idx) { | ||
| 254 | qdisc_tree_reduce_backlog(sch, prev_qlen - 1, | ||
| 255 | prev_backlog - pkt_len); | ||
| 256 | return NET_XMIT_CN; | ||
| 257 | } | ||
| 258 | qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog); | ||
| 259 | return NET_XMIT_SUCCESS; | ||
| 248 | } | 260 | } |
| 249 | 261 | ||
| 250 | /* This is the specific function called from codel_dequeue() | 262 | /* This is the specific function called from codel_dequeue() |
| @@ -649,7 +661,7 @@ static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl, | |||
| 649 | qs.backlog = q->backlogs[idx]; | 661 | qs.backlog = q->backlogs[idx]; |
| 650 | qs.drops = flow->dropped; | 662 | qs.drops = flow->dropped; |
| 651 | } | 663 | } |
| 652 | if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) | 664 | if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) |
| 653 | return -1; | 665 | return -1; |
| 654 | if (idx < q->flows_cnt) | 666 | if (idx < q->flows_cnt) |
| 655 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); | 667 | return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 269dd71b3828..f9e0e9c03d0a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
| @@ -49,6 +49,7 @@ static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) | |||
| 49 | { | 49 | { |
| 50 | q->gso_skb = skb; | 50 | q->gso_skb = skb; |
| 51 | q->qstats.requeues++; | 51 | q->qstats.requeues++; |
| 52 | qdisc_qstats_backlog_inc(q, skb); | ||
| 52 | q->q.qlen++; /* it's still part of the queue */ | 53 | q->q.qlen++; /* it's still part of the queue */ |
| 53 | __netif_schedule(q); | 54 | __netif_schedule(q); |
| 54 | 55 | ||
| @@ -92,6 +93,7 @@ static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, | |||
| 92 | txq = skb_get_tx_queue(txq->dev, skb); | 93 | txq = skb_get_tx_queue(txq->dev, skb); |
| 93 | if (!netif_xmit_frozen_or_stopped(txq)) { | 94 | if (!netif_xmit_frozen_or_stopped(txq)) { |
| 94 | q->gso_skb = NULL; | 95 | q->gso_skb = NULL; |
| 96 | qdisc_qstats_backlog_dec(q, skb); | ||
| 95 | q->q.qlen--; | 97 | q->q.qlen--; |
| 96 | } else | 98 | } else |
| 97 | skb = NULL; | 99 | skb = NULL; |
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c index d783d7cc3348..1ac9f9f03fe3 100644 --- a/net/sched/sch_hfsc.c +++ b/net/sched/sch_hfsc.c | |||
| @@ -1529,6 +1529,7 @@ hfsc_reset_qdisc(struct Qdisc *sch) | |||
| 1529 | q->eligible = RB_ROOT; | 1529 | q->eligible = RB_ROOT; |
| 1530 | INIT_LIST_HEAD(&q->droplist); | 1530 | INIT_LIST_HEAD(&q->droplist); |
| 1531 | qdisc_watchdog_cancel(&q->watchdog); | 1531 | qdisc_watchdog_cancel(&q->watchdog); |
| 1532 | sch->qstats.backlog = 0; | ||
| 1532 | sch->q.qlen = 0; | 1533 | sch->q.qlen = 0; |
| 1533 | } | 1534 | } |
| 1534 | 1535 | ||
| @@ -1559,14 +1560,6 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) | |||
| 1559 | struct hfsc_sched *q = qdisc_priv(sch); | 1560 | struct hfsc_sched *q = qdisc_priv(sch); |
| 1560 | unsigned char *b = skb_tail_pointer(skb); | 1561 | unsigned char *b = skb_tail_pointer(skb); |
| 1561 | struct tc_hfsc_qopt qopt; | 1562 | struct tc_hfsc_qopt qopt; |
| 1562 | struct hfsc_class *cl; | ||
| 1563 | unsigned int i; | ||
| 1564 | |||
| 1565 | sch->qstats.backlog = 0; | ||
| 1566 | for (i = 0; i < q->clhash.hashsize; i++) { | ||
| 1567 | hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) | ||
| 1568 | sch->qstats.backlog += cl->qdisc->qstats.backlog; | ||
| 1569 | } | ||
| 1570 | 1563 | ||
| 1571 | qopt.defcls = q->defcls; | 1564 | qopt.defcls = q->defcls; |
| 1572 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) | 1565 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) |
| @@ -1604,6 +1597,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 1604 | if (cl->qdisc->q.qlen == 1) | 1597 | if (cl->qdisc->q.qlen == 1) |
| 1605 | set_active(cl, qdisc_pkt_len(skb)); | 1598 | set_active(cl, qdisc_pkt_len(skb)); |
| 1606 | 1599 | ||
| 1600 | qdisc_qstats_backlog_inc(sch, skb); | ||
| 1607 | sch->q.qlen++; | 1601 | sch->q.qlen++; |
| 1608 | 1602 | ||
| 1609 | return NET_XMIT_SUCCESS; | 1603 | return NET_XMIT_SUCCESS; |
| @@ -1672,6 +1666,7 @@ hfsc_dequeue(struct Qdisc *sch) | |||
| 1672 | 1666 | ||
| 1673 | qdisc_unthrottled(sch); | 1667 | qdisc_unthrottled(sch); |
| 1674 | qdisc_bstats_update(sch, skb); | 1668 | qdisc_bstats_update(sch, skb); |
| 1669 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 1675 | sch->q.qlen--; | 1670 | sch->q.qlen--; |
| 1676 | 1671 | ||
| 1677 | return skb; | 1672 | return skb; |
| @@ -1695,6 +1690,7 @@ hfsc_drop(struct Qdisc *sch) | |||
| 1695 | } | 1690 | } |
| 1696 | cl->qstats.drops++; | 1691 | cl->qstats.drops++; |
| 1697 | qdisc_qstats_drop(sch); | 1692 | qdisc_qstats_drop(sch); |
| 1693 | sch->qstats.backlog -= len; | ||
| 1698 | sch->q.qlen--; | 1694 | sch->q.qlen--; |
| 1699 | return len; | 1695 | return len; |
| 1700 | } | 1696 | } |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 10adbc617905..8fe6999b642a 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
| @@ -27,6 +27,11 @@ static unsigned long ingress_get(struct Qdisc *sch, u32 classid) | |||
| 27 | return TC_H_MIN(classid) + 1; | 27 | return TC_H_MIN(classid) + 1; |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | static bool ingress_cl_offload(u32 classid) | ||
| 31 | { | ||
| 32 | return true; | ||
| 33 | } | ||
| 34 | |||
| 30 | static unsigned long ingress_bind_filter(struct Qdisc *sch, | 35 | static unsigned long ingress_bind_filter(struct Qdisc *sch, |
| 31 | unsigned long parent, u32 classid) | 36 | unsigned long parent, u32 classid) |
| 32 | { | 37 | { |
| @@ -86,6 +91,7 @@ static const struct Qdisc_class_ops ingress_class_ops = { | |||
| 86 | .put = ingress_put, | 91 | .put = ingress_put, |
| 87 | .walk = ingress_walk, | 92 | .walk = ingress_walk, |
| 88 | .tcf_chain = ingress_find_tcf, | 93 | .tcf_chain = ingress_find_tcf, |
| 94 | .tcf_cl_offload = ingress_cl_offload, | ||
| 89 | .bind_tcf = ingress_bind_filter, | 95 | .bind_tcf = ingress_bind_filter, |
| 90 | .unbind_tcf = ingress_put, | 96 | .unbind_tcf = ingress_put, |
| 91 | }; | 97 | }; |
| @@ -110,6 +116,11 @@ static unsigned long clsact_get(struct Qdisc *sch, u32 classid) | |||
| 110 | } | 116 | } |
| 111 | } | 117 | } |
| 112 | 118 | ||
| 119 | static bool clsact_cl_offload(u32 classid) | ||
| 120 | { | ||
| 121 | return TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS); | ||
| 122 | } | ||
| 123 | |||
| 113 | static unsigned long clsact_bind_filter(struct Qdisc *sch, | 124 | static unsigned long clsact_bind_filter(struct Qdisc *sch, |
| 114 | unsigned long parent, u32 classid) | 125 | unsigned long parent, u32 classid) |
| 115 | { | 126 | { |
| @@ -158,6 +169,7 @@ static const struct Qdisc_class_ops clsact_class_ops = { | |||
| 158 | .put = ingress_put, | 169 | .put = ingress_put, |
| 159 | .walk = ingress_walk, | 170 | .walk = ingress_walk, |
| 160 | .tcf_chain = clsact_find_tcf, | 171 | .tcf_chain = clsact_find_tcf, |
| 172 | .tcf_cl_offload = clsact_cl_offload, | ||
| 161 | .bind_tcf = clsact_bind_filter, | 173 | .bind_tcf = clsact_bind_filter, |
| 162 | .unbind_tcf = ingress_put, | 174 | .unbind_tcf = ingress_put, |
| 163 | }; | 175 | }; |
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c index fee1b15506b2..4b0a82191bc4 100644 --- a/net/sched/sch_prio.c +++ b/net/sched/sch_prio.c | |||
| @@ -85,6 +85,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 85 | 85 | ||
| 86 | ret = qdisc_enqueue(skb, qdisc); | 86 | ret = qdisc_enqueue(skb, qdisc); |
| 87 | if (ret == NET_XMIT_SUCCESS) { | 87 | if (ret == NET_XMIT_SUCCESS) { |
| 88 | qdisc_qstats_backlog_inc(sch, skb); | ||
| 88 | sch->q.qlen++; | 89 | sch->q.qlen++; |
| 89 | return NET_XMIT_SUCCESS; | 90 | return NET_XMIT_SUCCESS; |
| 90 | } | 91 | } |
| @@ -117,6 +118,7 @@ static struct sk_buff *prio_dequeue(struct Qdisc *sch) | |||
| 117 | struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); | 118 | struct sk_buff *skb = qdisc_dequeue_peeked(qdisc); |
| 118 | if (skb) { | 119 | if (skb) { |
| 119 | qdisc_bstats_update(sch, skb); | 120 | qdisc_bstats_update(sch, skb); |
| 121 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 120 | sch->q.qlen--; | 122 | sch->q.qlen--; |
| 121 | return skb; | 123 | return skb; |
| 122 | } | 124 | } |
| @@ -135,6 +137,7 @@ static unsigned int prio_drop(struct Qdisc *sch) | |||
| 135 | for (prio = q->bands-1; prio >= 0; prio--) { | 137 | for (prio = q->bands-1; prio >= 0; prio--) { |
| 136 | qdisc = q->queues[prio]; | 138 | qdisc = q->queues[prio]; |
| 137 | if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { | 139 | if (qdisc->ops->drop && (len = qdisc->ops->drop(qdisc)) != 0) { |
| 140 | sch->qstats.backlog -= len; | ||
| 138 | sch->q.qlen--; | 141 | sch->q.qlen--; |
| 139 | return len; | 142 | return len; |
| 140 | } | 143 | } |
| @@ -151,6 +154,7 @@ prio_reset(struct Qdisc *sch) | |||
| 151 | 154 | ||
| 152 | for (prio = 0; prio < q->bands; prio++) | 155 | for (prio = 0; prio < q->bands; prio++) |
| 153 | qdisc_reset(q->queues[prio]); | 156 | qdisc_reset(q->queues[prio]); |
| 157 | sch->qstats.backlog = 0; | ||
| 154 | sch->q.qlen = 0; | 158 | sch->q.qlen = 0; |
| 155 | } | 159 | } |
| 156 | 160 | ||
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index 8d2d8d953432..f18857febdad 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c | |||
| @@ -1235,8 +1235,10 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 1235 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); | 1235 | cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid); |
| 1236 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, | 1236 | err = qfq_change_agg(sch, cl, cl->agg->class_weight, |
| 1237 | qdisc_pkt_len(skb)); | 1237 | qdisc_pkt_len(skb)); |
| 1238 | if (err) | 1238 | if (err) { |
| 1239 | return err; | 1239 | cl->qstats.drops++; |
| 1240 | return qdisc_drop(skb, sch); | ||
| 1241 | } | ||
| 1240 | } | 1242 | } |
| 1241 | 1243 | ||
| 1242 | err = qdisc_enqueue(skb, cl->qdisc); | 1244 | err = qdisc_enqueue(skb, cl->qdisc); |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 8c0508c0e287..91578bdd378c 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
| @@ -97,6 +97,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 97 | 97 | ||
| 98 | ret = qdisc_enqueue(skb, child); | 98 | ret = qdisc_enqueue(skb, child); |
| 99 | if (likely(ret == NET_XMIT_SUCCESS)) { | 99 | if (likely(ret == NET_XMIT_SUCCESS)) { |
| 100 | qdisc_qstats_backlog_inc(sch, skb); | ||
| 100 | sch->q.qlen++; | 101 | sch->q.qlen++; |
| 101 | } else if (net_xmit_drop_count(ret)) { | 102 | } else if (net_xmit_drop_count(ret)) { |
| 102 | q->stats.pdrop++; | 103 | q->stats.pdrop++; |
| @@ -118,6 +119,7 @@ static struct sk_buff *red_dequeue(struct Qdisc *sch) | |||
| 118 | skb = child->dequeue(child); | 119 | skb = child->dequeue(child); |
| 119 | if (skb) { | 120 | if (skb) { |
| 120 | qdisc_bstats_update(sch, skb); | 121 | qdisc_bstats_update(sch, skb); |
| 122 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 121 | sch->q.qlen--; | 123 | sch->q.qlen--; |
| 122 | } else { | 124 | } else { |
| 123 | if (!red_is_idling(&q->vars)) | 125 | if (!red_is_idling(&q->vars)) |
| @@ -143,6 +145,7 @@ static unsigned int red_drop(struct Qdisc *sch) | |||
| 143 | if (child->ops->drop && (len = child->ops->drop(child)) > 0) { | 145 | if (child->ops->drop && (len = child->ops->drop(child)) > 0) { |
| 144 | q->stats.other++; | 146 | q->stats.other++; |
| 145 | qdisc_qstats_drop(sch); | 147 | qdisc_qstats_drop(sch); |
| 148 | sch->qstats.backlog -= len; | ||
| 146 | sch->q.qlen--; | 149 | sch->q.qlen--; |
| 147 | return len; | 150 | return len; |
| 148 | } | 151 | } |
| @@ -158,6 +161,7 @@ static void red_reset(struct Qdisc *sch) | |||
| 158 | struct red_sched_data *q = qdisc_priv(sch); | 161 | struct red_sched_data *q = qdisc_priv(sch); |
| 159 | 162 | ||
| 160 | qdisc_reset(q->qdisc); | 163 | qdisc_reset(q->qdisc); |
| 164 | sch->qstats.backlog = 0; | ||
| 161 | sch->q.qlen = 0; | 165 | sch->q.qlen = 0; |
| 162 | red_restart(&q->vars); | 166 | red_restart(&q->vars); |
| 163 | } | 167 | } |
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c index 83b90b584fae..3161e491990b 100644 --- a/net/sched/sch_tbf.c +++ b/net/sched/sch_tbf.c | |||
| @@ -207,6 +207,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
| 207 | return ret; | 207 | return ret; |
| 208 | } | 208 | } |
| 209 | 209 | ||
| 210 | qdisc_qstats_backlog_inc(sch, skb); | ||
| 210 | sch->q.qlen++; | 211 | sch->q.qlen++; |
| 211 | return NET_XMIT_SUCCESS; | 212 | return NET_XMIT_SUCCESS; |
| 212 | } | 213 | } |
| @@ -217,6 +218,7 @@ static unsigned int tbf_drop(struct Qdisc *sch) | |||
| 217 | unsigned int len = 0; | 218 | unsigned int len = 0; |
| 218 | 219 | ||
| 219 | if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { | 220 | if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { |
| 221 | sch->qstats.backlog -= len; | ||
| 220 | sch->q.qlen--; | 222 | sch->q.qlen--; |
| 221 | qdisc_qstats_drop(sch); | 223 | qdisc_qstats_drop(sch); |
| 222 | } | 224 | } |
| @@ -263,6 +265,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch) | |||
| 263 | q->t_c = now; | 265 | q->t_c = now; |
| 264 | q->tokens = toks; | 266 | q->tokens = toks; |
| 265 | q->ptokens = ptoks; | 267 | q->ptokens = ptoks; |
| 268 | qdisc_qstats_backlog_dec(sch, skb); | ||
| 266 | sch->q.qlen--; | 269 | sch->q.qlen--; |
| 267 | qdisc_unthrottled(sch); | 270 | qdisc_unthrottled(sch); |
| 268 | qdisc_bstats_update(sch, skb); | 271 | qdisc_bstats_update(sch, skb); |
| @@ -294,6 +297,7 @@ static void tbf_reset(struct Qdisc *sch) | |||
| 294 | struct tbf_sched_data *q = qdisc_priv(sch); | 297 | struct tbf_sched_data *q = qdisc_priv(sch); |
| 295 | 298 | ||
| 296 | qdisc_reset(q->qdisc); | 299 | qdisc_reset(q->qdisc); |
| 300 | sch->qstats.backlog = 0; | ||
| 297 | sch->q.qlen = 0; | 301 | sch->q.qlen = 0; |
| 298 | q->t_c = ktime_get_ns(); | 302 | q->t_c = ktime_get_ns(); |
| 299 | q->tokens = q->buffer; | 303 | q->tokens = q->buffer; |
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c index f795b1dd0ccd..3ad9fab1985f 100644 --- a/net/tipc/netlink_compat.c +++ b/net/tipc/netlink_compat.c | |||
| @@ -604,7 +604,8 @@ static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg, | |||
| 604 | 604 | ||
| 605 | link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); | 605 | link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]); |
| 606 | link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); | 606 | link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP])); |
| 607 | strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME])); | 607 | nla_strlcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]), |
| 608 | TIPC_MAX_LINK_NAME); | ||
| 608 | 609 | ||
| 609 | return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, | 610 | return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO, |
| 610 | &link_info, sizeof(link_info)); | 611 | &link_info, sizeof(link_info)); |
diff --git a/net/wireless/core.c b/net/wireless/core.c index d25c82bc1bbe..ecca3896b9f7 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
| @@ -363,8 +363,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv, | |||
| 363 | WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel); | 363 | WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel); |
| 364 | WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch); | 364 | WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch); |
| 365 | WARN_ON(ops->add_tx_ts && !ops->del_tx_ts); | 365 | WARN_ON(ops->add_tx_ts && !ops->del_tx_ts); |
| 366 | WARN_ON(ops->set_tx_power && !ops->get_tx_power); | ||
| 367 | WARN_ON(ops->set_antenna && !ops->get_antenna); | ||
| 368 | 366 | ||
| 369 | alloc_size = sizeof(*rdev) + sizeof_priv; | 367 | alloc_size = sizeof(*rdev) + sizeof_priv; |
| 370 | 368 | ||
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 6250b1cfcde5..dbb2738e356a 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c | |||
| @@ -958,8 +958,29 @@ static int wireless_process_ioctl(struct net *net, struct ifreq *ifr, | |||
| 958 | return private(dev, iwr, cmd, info, handler); | 958 | return private(dev, iwr, cmd, info, handler); |
| 959 | } | 959 | } |
| 960 | /* Old driver API : call driver ioctl handler */ | 960 | /* Old driver API : call driver ioctl handler */ |
| 961 | if (dev->netdev_ops->ndo_do_ioctl) | 961 | if (dev->netdev_ops->ndo_do_ioctl) { |
| 962 | return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); | 962 | #ifdef CONFIG_COMPAT |
| 963 | if (info->flags & IW_REQUEST_FLAG_COMPAT) { | ||
| 964 | int ret = 0; | ||
| 965 | struct iwreq iwr_lcl; | ||
| 966 | struct compat_iw_point *iwp_compat = (void *) &iwr->u.data; | ||
| 967 | |||
| 968 | memcpy(&iwr_lcl, iwr, sizeof(struct iwreq)); | ||
| 969 | iwr_lcl.u.data.pointer = compat_ptr(iwp_compat->pointer); | ||
| 970 | iwr_lcl.u.data.length = iwp_compat->length; | ||
| 971 | iwr_lcl.u.data.flags = iwp_compat->flags; | ||
| 972 | |||
| 973 | ret = dev->netdev_ops->ndo_do_ioctl(dev, (void *) &iwr_lcl, cmd); | ||
| 974 | |||
| 975 | iwp_compat->pointer = ptr_to_compat(iwr_lcl.u.data.pointer); | ||
| 976 | iwp_compat->length = iwr_lcl.u.data.length; | ||
| 977 | iwp_compat->flags = iwr_lcl.u.data.flags; | ||
| 978 | |||
| 979 | return ret; | ||
| 980 | } else | ||
| 981 | #endif | ||
| 982 | return dev->netdev_ops->ndo_do_ioctl(dev, ifr, cmd); | ||
| 983 | } | ||
| 963 | return -EOPNOTSUPP; | 984 | return -EOPNOTSUPP; |
| 964 | } | 985 | } |
| 965 | 986 | ||
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c index a9155077feef..fec75786f75b 100644 --- a/scripts/mod/file2alias.c +++ b/scripts/mod/file2alias.c | |||
| @@ -384,7 +384,7 @@ static void do_of_entry_multi(void *symval, struct module *mod) | |||
| 384 | len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", | 384 | len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*", |
| 385 | (*type)[0] ? *type : "*"); | 385 | (*type)[0] ? *type : "*"); |
| 386 | 386 | ||
| 387 | if (compatible[0]) | 387 | if ((*compatible)[0]) |
| 388 | sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", | 388 | sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "", |
| 389 | *compatible); | 389 | *compatible); |
| 390 | 390 | ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 9a0d1445ca5c..94089fc71884 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
| @@ -365,8 +365,11 @@ enum { | |||
| 365 | 365 | ||
| 366 | #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170) | 366 | #define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170) |
| 367 | #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) | 367 | #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70) |
| 368 | #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171) | ||
| 369 | #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71) | ||
| 368 | #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) | 370 | #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) |
| 369 | #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) | 371 | #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \ |
| 372 | IS_KBL(pci) || IS_KBL_LP(pci) | ||
| 370 | 373 | ||
| 371 | static char *driver_short_names[] = { | 374 | static char *driver_short_names[] = { |
| 372 | [AZX_DRIVER_ICH] = "HDA Intel", | 375 | [AZX_DRIVER_ICH] = "HDA Intel", |
| @@ -2181,6 +2184,12 @@ static const struct pci_device_id azx_ids[] = { | |||
| 2181 | /* Sunrise Point-LP */ | 2184 | /* Sunrise Point-LP */ |
| 2182 | { PCI_DEVICE(0x8086, 0x9d70), | 2185 | { PCI_DEVICE(0x8086, 0x9d70), |
| 2183 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | 2186 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
| 2187 | /* Kabylake */ | ||
| 2188 | { PCI_DEVICE(0x8086, 0xa171), | ||
| 2189 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | ||
| 2190 | /* Kabylake-LP */ | ||
| 2191 | { PCI_DEVICE(0x8086, 0x9d71), | ||
| 2192 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | ||
| 2184 | /* Broxton-P(Apollolake) */ | 2193 | /* Broxton-P(Apollolake) */ |
| 2185 | { PCI_DEVICE(0x8086, 0x5a98), | 2194 | { PCI_DEVICE(0x8086, 0x5a98), |
| 2186 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, | 2195 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON }, |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d53c25e7a1c1..0fe18ede3e85 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -346,6 +346,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec) | |||
| 346 | case 0x10ec0234: | 346 | case 0x10ec0234: |
| 347 | case 0x10ec0274: | 347 | case 0x10ec0274: |
| 348 | case 0x10ec0294: | 348 | case 0x10ec0294: |
| 349 | case 0x10ec0700: | ||
| 350 | case 0x10ec0701: | ||
| 351 | case 0x10ec0703: | ||
| 349 | alc_update_coef_idx(codec, 0x10, 1<<15, 0); | 352 | alc_update_coef_idx(codec, 0x10, 1<<15, 0); |
| 350 | break; | 353 | break; |
| 351 | case 0x10ec0662: | 354 | case 0x10ec0662: |
| @@ -2655,6 +2658,7 @@ enum { | |||
| 2655 | ALC269_TYPE_ALC256, | 2658 | ALC269_TYPE_ALC256, |
| 2656 | ALC269_TYPE_ALC225, | 2659 | ALC269_TYPE_ALC225, |
| 2657 | ALC269_TYPE_ALC294, | 2660 | ALC269_TYPE_ALC294, |
| 2661 | ALC269_TYPE_ALC700, | ||
| 2658 | }; | 2662 | }; |
| 2659 | 2663 | ||
| 2660 | /* | 2664 | /* |
| @@ -2686,6 +2690,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec) | |||
| 2686 | case ALC269_TYPE_ALC256: | 2690 | case ALC269_TYPE_ALC256: |
| 2687 | case ALC269_TYPE_ALC225: | 2691 | case ALC269_TYPE_ALC225: |
| 2688 | case ALC269_TYPE_ALC294: | 2692 | case ALC269_TYPE_ALC294: |
| 2693 | case ALC269_TYPE_ALC700: | ||
| 2689 | ssids = alc269_ssids; | 2694 | ssids = alc269_ssids; |
| 2690 | break; | 2695 | break; |
| 2691 | default: | 2696 | default: |
| @@ -3618,13 +3623,20 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, | |||
| 3618 | static void alc_headset_mode_unplugged(struct hda_codec *codec) | 3623 | static void alc_headset_mode_unplugged(struct hda_codec *codec) |
| 3619 | { | 3624 | { |
| 3620 | static struct coef_fw coef0255[] = { | 3625 | static struct coef_fw coef0255[] = { |
| 3621 | WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ | ||
| 3622 | WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ | 3626 | WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */ |
| 3623 | UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ | 3627 | UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/ |
| 3624 | WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ | 3628 | WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */ |
| 3625 | WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */ | 3629 | WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */ |
| 3626 | {} | 3630 | {} |
| 3627 | }; | 3631 | }; |
| 3632 | static struct coef_fw coef0255_1[] = { | ||
| 3633 | WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */ | ||
| 3634 | {} | ||
| 3635 | }; | ||
| 3636 | static struct coef_fw coef0256[] = { | ||
| 3637 | WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */ | ||
| 3638 | {} | ||
| 3639 | }; | ||
| 3628 | static struct coef_fw coef0233[] = { | 3640 | static struct coef_fw coef0233[] = { |
| 3629 | WRITE_COEF(0x1b, 0x0c0b), | 3641 | WRITE_COEF(0x1b, 0x0c0b), |
| 3630 | WRITE_COEF(0x45, 0xc429), | 3642 | WRITE_COEF(0x45, 0xc429), |
| @@ -3677,7 +3689,11 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec) | |||
| 3677 | 3689 | ||
| 3678 | switch (codec->core.vendor_id) { | 3690 | switch (codec->core.vendor_id) { |
| 3679 | case 0x10ec0255: | 3691 | case 0x10ec0255: |
| 3692 | alc_process_coef_fw(codec, coef0255_1); | ||
| 3693 | alc_process_coef_fw(codec, coef0255); | ||
| 3694 | break; | ||
| 3680 | case 0x10ec0256: | 3695 | case 0x10ec0256: |
| 3696 | alc_process_coef_fw(codec, coef0256); | ||
| 3681 | alc_process_coef_fw(codec, coef0255); | 3697 | alc_process_coef_fw(codec, coef0255); |
| 3682 | break; | 3698 | break; |
| 3683 | case 0x10ec0233: | 3699 | case 0x10ec0233: |
| @@ -3896,6 +3912,12 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) | |||
| 3896 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), | 3912 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), |
| 3897 | {} | 3913 | {} |
| 3898 | }; | 3914 | }; |
| 3915 | static struct coef_fw coef0256[] = { | ||
| 3916 | WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */ | ||
| 3917 | WRITE_COEF(0x1b, 0x0c6b), | ||
| 3918 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), | ||
| 3919 | {} | ||
| 3920 | }; | ||
| 3899 | static struct coef_fw coef0233[] = { | 3921 | static struct coef_fw coef0233[] = { |
| 3900 | WRITE_COEF(0x45, 0xd429), | 3922 | WRITE_COEF(0x45, 0xd429), |
| 3901 | WRITE_COEF(0x1b, 0x0c2b), | 3923 | WRITE_COEF(0x1b, 0x0c2b), |
| @@ -3936,9 +3958,11 @@ static void alc_headset_mode_ctia(struct hda_codec *codec) | |||
| 3936 | 3958 | ||
| 3937 | switch (codec->core.vendor_id) { | 3959 | switch (codec->core.vendor_id) { |
| 3938 | case 0x10ec0255: | 3960 | case 0x10ec0255: |
| 3939 | case 0x10ec0256: | ||
| 3940 | alc_process_coef_fw(codec, coef0255); | 3961 | alc_process_coef_fw(codec, coef0255); |
| 3941 | break; | 3962 | break; |
| 3963 | case 0x10ec0256: | ||
| 3964 | alc_process_coef_fw(codec, coef0256); | ||
| 3965 | break; | ||
| 3942 | case 0x10ec0233: | 3966 | case 0x10ec0233: |
| 3943 | case 0x10ec0283: | 3967 | case 0x10ec0283: |
| 3944 | alc_process_coef_fw(codec, coef0233); | 3968 | alc_process_coef_fw(codec, coef0233); |
| @@ -3978,6 +4002,12 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) | |||
| 3978 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), | 4002 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), |
| 3979 | {} | 4003 | {} |
| 3980 | }; | 4004 | }; |
| 4005 | static struct coef_fw coef0256[] = { | ||
| 4006 | WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */ | ||
| 4007 | WRITE_COEF(0x1b, 0x0c6b), | ||
| 4008 | WRITE_COEFEX(0x57, 0x03, 0x8ea6), | ||
| 4009 | {} | ||
| 4010 | }; | ||
| 3981 | static struct coef_fw coef0233[] = { | 4011 | static struct coef_fw coef0233[] = { |
| 3982 | WRITE_COEF(0x45, 0xe429), | 4012 | WRITE_COEF(0x45, 0xe429), |
| 3983 | WRITE_COEF(0x1b, 0x0c2b), | 4013 | WRITE_COEF(0x1b, 0x0c2b), |
| @@ -4018,9 +4048,11 @@ static void alc_headset_mode_omtp(struct hda_codec *codec) | |||
| 4018 | 4048 | ||
| 4019 | switch (codec->core.vendor_id) { | 4049 | switch (codec->core.vendor_id) { |
| 4020 | case 0x10ec0255: | 4050 | case 0x10ec0255: |
| 4021 | case 0x10ec0256: | ||
| 4022 | alc_process_coef_fw(codec, coef0255); | 4051 | alc_process_coef_fw(codec, coef0255); |
| 4023 | break; | 4052 | break; |
| 4053 | case 0x10ec0256: | ||
| 4054 | alc_process_coef_fw(codec, coef0256); | ||
| 4055 | break; | ||
| 4024 | case 0x10ec0233: | 4056 | case 0x10ec0233: |
| 4025 | case 0x10ec0283: | 4057 | case 0x10ec0283: |
| 4026 | alc_process_coef_fw(codec, coef0233); | 4058 | alc_process_coef_fw(codec, coef0233); |
| @@ -4266,7 +4298,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, | |||
| 4266 | static void alc255_set_default_jack_type(struct hda_codec *codec) | 4298 | static void alc255_set_default_jack_type(struct hda_codec *codec) |
| 4267 | { | 4299 | { |
| 4268 | /* Set to iphone type */ | 4300 | /* Set to iphone type */ |
| 4269 | static struct coef_fw fw[] = { | 4301 | static struct coef_fw alc255fw[] = { |
| 4270 | WRITE_COEF(0x1b, 0x880b), | 4302 | WRITE_COEF(0x1b, 0x880b), |
| 4271 | WRITE_COEF(0x45, 0xd089), | 4303 | WRITE_COEF(0x45, 0xd089), |
| 4272 | WRITE_COEF(0x1b, 0x080b), | 4304 | WRITE_COEF(0x1b, 0x080b), |
| @@ -4274,7 +4306,22 @@ static void alc255_set_default_jack_type(struct hda_codec *codec) | |||
| 4274 | WRITE_COEF(0x1b, 0x0c0b), | 4306 | WRITE_COEF(0x1b, 0x0c0b), |
| 4275 | {} | 4307 | {} |
| 4276 | }; | 4308 | }; |
| 4277 | alc_process_coef_fw(codec, fw); | 4309 | static struct coef_fw alc256fw[] = { |
| 4310 | WRITE_COEF(0x1b, 0x884b), | ||
| 4311 | WRITE_COEF(0x45, 0xd089), | ||
| 4312 | WRITE_COEF(0x1b, 0x084b), | ||
| 4313 | WRITE_COEF(0x46, 0x0004), | ||
| 4314 | WRITE_COEF(0x1b, 0x0c4b), | ||
| 4315 | {} | ||
| 4316 | }; | ||
| 4317 | switch (codec->core.vendor_id) { | ||
| 4318 | case 0x10ec0255: | ||
| 4319 | alc_process_coef_fw(codec, alc255fw); | ||
| 4320 | break; | ||
| 4321 | case 0x10ec0256: | ||
| 4322 | alc_process_coef_fw(codec, alc256fw); | ||
| 4323 | break; | ||
| 4324 | } | ||
| 4278 | msleep(30); | 4325 | msleep(30); |
| 4279 | } | 4326 | } |
| 4280 | 4327 | ||
| @@ -5587,6 +5634,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 5587 | SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), | 5634 | SND_PCI_QUIRK(0x17aa, 0x2218, "Thinkpad X1 Carbon 2nd", ALC292_FIXUP_TPT440_DOCK), |
| 5588 | SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), | 5635 | SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), |
| 5589 | SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), | 5636 | SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), |
| 5637 | SND_PCI_QUIRK(0x17aa, 0x2231, "Thinkpad T560", ALC292_FIXUP_TPT460), | ||
| 5590 | SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), | 5638 | SND_PCI_QUIRK(0x17aa, 0x2233, "Thinkpad", ALC292_FIXUP_TPT460), |
| 5591 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 5639 | SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
| 5592 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), | 5640 | SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), |
| @@ -5775,6 +5823,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
| 5775 | {0x12, 0x90a60180}, | 5823 | {0x12, 0x90a60180}, |
| 5776 | {0x14, 0x90170130}, | 5824 | {0x14, 0x90170130}, |
| 5777 | {0x21, 0x02211040}), | 5825 | {0x21, 0x02211040}), |
| 5826 | SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5565", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
| 5827 | {0x12, 0x90a60180}, | ||
| 5828 | {0x14, 0x90170120}, | ||
| 5829 | {0x21, 0x02211030}), | ||
| 5778 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | 5830 | SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
| 5779 | {0x12, 0x90a60160}, | 5831 | {0x12, 0x90a60160}, |
| 5780 | {0x14, 0x90170120}, | 5832 | {0x14, 0x90170120}, |
| @@ -6053,6 +6105,14 @@ static int patch_alc269(struct hda_codec *codec) | |||
| 6053 | case 0x10ec0294: | 6105 | case 0x10ec0294: |
| 6054 | spec->codec_variant = ALC269_TYPE_ALC294; | 6106 | spec->codec_variant = ALC269_TYPE_ALC294; |
| 6055 | break; | 6107 | break; |
| 6108 | case 0x10ec0700: | ||
| 6109 | case 0x10ec0701: | ||
| 6110 | case 0x10ec0703: | ||
| 6111 | spec->codec_variant = ALC269_TYPE_ALC700; | ||
| 6112 | spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ | ||
| 6113 | alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */ | ||
| 6114 | break; | ||
| 6115 | |||
| 6056 | } | 6116 | } |
| 6057 | 6117 | ||
| 6058 | if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) { | 6118 | if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) { |
| @@ -7008,6 +7068,9 @@ static const struct hda_device_id snd_hda_id_realtek[] = { | |||
| 7008 | HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662), | 7068 | HDA_CODEC_ENTRY(0x10ec0670, "ALC670", patch_alc662), |
| 7009 | HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662), | 7069 | HDA_CODEC_ENTRY(0x10ec0671, "ALC671", patch_alc662), |
| 7010 | HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680), | 7070 | HDA_CODEC_ENTRY(0x10ec0680, "ALC680", patch_alc680), |
| 7071 | HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269), | ||
| 7072 | HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269), | ||
| 7073 | HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269), | ||
| 7011 | HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882), | 7074 | HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc882), |
| 7012 | HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880), | 7075 | HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880), |
| 7013 | HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882), | 7076 | HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882), |
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c index 96ba386b1b7b..4a8217448f20 100644 --- a/tools/testing/selftests/net/reuseport_bpf.c +++ b/tools/testing/selftests/net/reuseport_bpf.c | |||
| @@ -111,9 +111,9 @@ static void attach_ebpf(int fd, uint16_t mod) | |||
| 111 | memset(&attr, 0, sizeof(attr)); | 111 | memset(&attr, 0, sizeof(attr)); |
| 112 | attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; | 112 | attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; |
| 113 | attr.insn_cnt = ARRAY_SIZE(prog); | 113 | attr.insn_cnt = ARRAY_SIZE(prog); |
| 114 | attr.insns = (uint64_t)prog; | 114 | attr.insns = (unsigned long) &prog; |
| 115 | attr.license = (uint64_t)bpf_license; | 115 | attr.license = (unsigned long) &bpf_license; |
| 116 | attr.log_buf = (uint64_t)bpf_log_buf; | 116 | attr.log_buf = (unsigned long) &bpf_log_buf; |
| 117 | attr.log_size = sizeof(bpf_log_buf); | 117 | attr.log_size = sizeof(bpf_log_buf); |
| 118 | attr.log_level = 1; | 118 | attr.log_level = 1; |
| 119 | attr.kern_version = 0; | 119 | attr.kern_version = 0; |
| @@ -351,8 +351,8 @@ static void test_filter_no_reuseport(const struct test_params p) | |||
| 351 | memset(&eprog, 0, sizeof(eprog)); | 351 | memset(&eprog, 0, sizeof(eprog)); |
| 352 | eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; | 352 | eprog.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; |
| 353 | eprog.insn_cnt = ARRAY_SIZE(ecode); | 353 | eprog.insn_cnt = ARRAY_SIZE(ecode); |
| 354 | eprog.insns = (uint64_t)ecode; | 354 | eprog.insns = (unsigned long) &ecode; |
| 355 | eprog.license = (uint64_t)bpf_license; | 355 | eprog.license = (unsigned long) &bpf_license; |
| 356 | eprog.kern_version = 0; | 356 | eprog.kern_version = 0; |
| 357 | 357 | ||
| 358 | memset(&cprog, 0, sizeof(cprog)); | 358 | memset(&cprog, 0, sizeof(cprog)); |
