diff options
554 files changed, 5226 insertions, 3263 deletions
| @@ -68,6 +68,8 @@ Jacob Shin <Jacob.Shin@amd.com> | |||
| 68 | James Bottomley <jejb@mulgrave.(none)> | 68 | James Bottomley <jejb@mulgrave.(none)> |
| 69 | James Bottomley <jejb@titanic.il.steeleye.com> | 69 | James Bottomley <jejb@titanic.il.steeleye.com> |
| 70 | James E Wilson <wilson@specifix.com> | 70 | James E Wilson <wilson@specifix.com> |
| 71 | James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com> | ||
| 72 | James Hogan <jhogan@kernel.org> <james@albanarts.com> | ||
| 71 | James Ketrenos <jketreno@io.(none)> | 73 | James Ketrenos <jketreno@io.(none)> |
| 72 | Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com> | 74 | Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com> |
| 73 | <javier@osg.samsung.com> <javier.martinez@collabora.co.uk> | 75 | <javier@osg.samsung.com> <javier.martinez@collabora.co.uk> |
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-swap b/Documentation/ABI/testing/sysfs-kernel-mm-swap index 587db52084c7..94672016c268 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-swap +++ b/Documentation/ABI/testing/sysfs-kernel-mm-swap | |||
| @@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead. | |||
| 14 | still used for tmpfs etc. other users. If set to | 14 | still used for tmpfs etc. other users. If set to |
| 15 | false, the global swap readahead algorithm will be | 15 | false, the global swap readahead algorithm will be |
| 16 | used for all swappable pages. | 16 | used for all swappable pages. |
| 17 | |||
| 18 | What: /sys/kernel/mm/swap/vma_ra_max_order | ||
| 19 | Date: August 2017 | ||
| 20 | Contact: Linux memory management mailing list <linux-mm@kvack.org> | ||
| 21 | Description: The max readahead size in order for VMA based swap readahead | ||
| 22 | |||
| 23 | VMA based swap readahead algorithm will readahead at | ||
| 24 | most 1 << max_order pages for each readahead. The | ||
| 25 | real readahead size for each readahead will be scaled | ||
| 26 | according to the estimation algorithm. | ||
diff --git a/Documentation/ABI/testing/sysfs-power b/Documentation/ABI/testing/sysfs-power index 713cab1d5f12..a1d1612f3651 100644 --- a/Documentation/ABI/testing/sysfs-power +++ b/Documentation/ABI/testing/sysfs-power | |||
| @@ -127,7 +127,7 @@ Description: | |||
| 127 | 127 | ||
| 128 | What; /sys/power/pm_trace_dev_match | 128 | What; /sys/power/pm_trace_dev_match |
| 129 | Date: October 2010 | 129 | Date: October 2010 |
| 130 | Contact: James Hogan <james@albanarts.com> | 130 | Contact: James Hogan <jhogan@kernel.org> |
| 131 | Description: | 131 | Description: |
| 132 | The /sys/power/pm_trace_dev_match file contains the name of the | 132 | The /sys/power/pm_trace_dev_match file contains the name of the |
| 133 | device associated with the last PM event point saved in the RTC | 133 | device associated with the last PM event point saved in the RTC |
diff --git a/Documentation/device-mapper/dm-raid.txt b/Documentation/device-mapper/dm-raid.txt index 4a0a7469fdd7..32df07e29f68 100644 --- a/Documentation/device-mapper/dm-raid.txt +++ b/Documentation/device-mapper/dm-raid.txt | |||
| @@ -344,3 +344,4 @@ Version History | |||
| 344 | (wrong raid10_copies/raid10_format sequence) | 344 | (wrong raid10_copies/raid10_format sequence) |
| 345 | 1.11.1 Add raid4/5/6 journal write-back support via journal_mode option | 345 | 1.11.1 Add raid4/5/6 journal write-back support via journal_mode option |
| 346 | 1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available | 346 | 1.12.1 fix for MD deadlock between mddev_suspend() and md_write_start() available |
| 347 | 1.13.0 Fix dev_health status at end of "recover" (was 'a', now 'A') | ||
diff --git a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt index b878a1e305af..ed1456f5c94d 100644 --- a/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt +++ b/Documentation/devicetree/bindings/mmc/marvell,xenon-sdhci.txt | |||
| @@ -16,11 +16,13 @@ Required Properties: | |||
| 16 | 16 | ||
| 17 | - clocks: | 17 | - clocks: |
| 18 | Array of clocks required for SDHC. | 18 | Array of clocks required for SDHC. |
| 19 | Require at least input clock for Xenon IP core. | 19 | Require at least input clock for Xenon IP core. For Armada AP806 and |
| 20 | CP110, the AXI clock is also mandatory. | ||
| 20 | 21 | ||
| 21 | - clock-names: | 22 | - clock-names: |
| 22 | Array of names corresponding to clocks property. | 23 | Array of names corresponding to clocks property. |
| 23 | The input clock for Xenon IP core should be named as "core". | 24 | The input clock for Xenon IP core should be named as "core". |
| 25 | The input clock for the AXI bus must be named as "axi". | ||
| 24 | 26 | ||
| 25 | - reg: | 27 | - reg: |
| 26 | * For "marvell,armada-3700-sdhci", two register areas. | 28 | * For "marvell,armada-3700-sdhci", two register areas. |
| @@ -106,8 +108,8 @@ Example: | |||
| 106 | compatible = "marvell,armada-ap806-sdhci"; | 108 | compatible = "marvell,armada-ap806-sdhci"; |
| 107 | reg = <0xaa0000 0x1000>; | 109 | reg = <0xaa0000 0x1000>; |
| 108 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH> | 110 | interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH> |
| 109 | clocks = <&emmc_clk>; | 111 | clocks = <&emmc_clk>,<&axi_clk>; |
| 110 | clock-names = "core"; | 112 | clock-names = "core", "axi"; |
| 111 | bus-width = <4>; | 113 | bus-width = <4>; |
| 112 | marvell,xenon-phy-slow-mode; | 114 | marvell,xenon-phy-slow-mode; |
| 113 | marvell,xenon-tun-count = <11>; | 115 | marvell,xenon-tun-count = <11>; |
| @@ -126,8 +128,8 @@ Example: | |||
| 126 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH> | 128 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH> |
| 127 | vqmmc-supply = <&sd_vqmmc_regulator>; | 129 | vqmmc-supply = <&sd_vqmmc_regulator>; |
| 128 | vmmc-supply = <&sd_vmmc_regulator>; | 130 | vmmc-supply = <&sd_vmmc_regulator>; |
| 129 | clocks = <&sdclk>; | 131 | clocks = <&sdclk>, <&axi_clk>; |
| 130 | clock-names = "core"; | 132 | clock-names = "core", "axi"; |
| 131 | bus-width = <4>; | 133 | bus-width = <4>; |
| 132 | marvell,xenon-tun-count = <9>; | 134 | marvell,xenon-tun-count = <9>; |
| 133 | }; | 135 | }; |
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt index 7e2dad08a12e..1814fa13f6ab 100644 --- a/Documentation/devicetree/bindings/net/marvell-pp2.txt +++ b/Documentation/devicetree/bindings/net/marvell-pp2.txt | |||
| @@ -21,8 +21,9 @@ Required properties: | |||
| 21 | - main controller clock (for both armada-375-pp2 and armada-7k-pp2) | 21 | - main controller clock (for both armada-375-pp2 and armada-7k-pp2) |
| 22 | - GOP clock (for both armada-375-pp2 and armada-7k-pp2) | 22 | - GOP clock (for both armada-375-pp2 and armada-7k-pp2) |
| 23 | - MG clock (only for armada-7k-pp2) | 23 | - MG clock (only for armada-7k-pp2) |
| 24 | - clock-names: names of used clocks, must be "pp_clk", "gop_clk" and | 24 | - AXI clock (only for armada-7k-pp2) |
| 25 | "mg_clk" (the latter only for armada-7k-pp2). | 25 | - clock-names: names of used clocks, must be "pp_clk", "gop_clk", "mg_clk" |
| 26 | and "axi_clk" (the 2 latter only for armada-7k-pp2). | ||
| 26 | 27 | ||
| 27 | The ethernet ports are represented by subnodes. At least one port is | 28 | The ethernet ports are represented by subnodes. At least one port is |
| 28 | required. | 29 | required. |
| @@ -78,8 +79,9 @@ Example for marvell,armada-7k-pp2: | |||
| 78 | cpm_ethernet: ethernet@0 { | 79 | cpm_ethernet: ethernet@0 { |
| 79 | compatible = "marvell,armada-7k-pp22"; | 80 | compatible = "marvell,armada-7k-pp22"; |
| 80 | reg = <0x0 0x100000>, <0x129000 0xb000>; | 81 | reg = <0x0 0x100000>, <0x129000 0xb000>; |
| 81 | clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, <&cpm_syscon0 1 5>; | 82 | clocks = <&cpm_syscon0 1 3>, <&cpm_syscon0 1 9>, |
| 82 | clock-names = "pp_clk", "gop_clk", "gp_clk"; | 83 | <&cpm_syscon0 1 5>, <&cpm_syscon0 1 18>; |
| 84 | clock-names = "pp_clk", "gop_clk", "gp_clk", "axi_clk"; | ||
| 83 | 85 | ||
| 84 | eth0: eth0 { | 86 | eth0: eth0 { |
| 85 | interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>, | 87 | interrupts = <ICU_GRP_NSR 39 IRQ_TYPE_LEVEL_HIGH>, |
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt index 6af8eed1adeb..9c16ee2965a2 100644 --- a/Documentation/devicetree/bindings/net/rockchip-dwmac.txt +++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.txt | |||
| @@ -4,6 +4,7 @@ The device node has following properties. | |||
| 4 | 4 | ||
| 5 | Required properties: | 5 | Required properties: |
| 6 | - compatible: should be "rockchip,<name>-gamc" | 6 | - compatible: should be "rockchip,<name>-gamc" |
| 7 | "rockchip,rk3128-gmac": found on RK312x SoCs | ||
| 7 | "rockchip,rk3228-gmac": found on RK322x SoCs | 8 | "rockchip,rk3228-gmac": found on RK322x SoCs |
| 8 | "rockchip,rk3288-gmac": found on RK3288 SoCs | 9 | "rockchip,rk3288-gmac": found on RK3288 SoCs |
| 9 | "rockchip,rk3328-gmac": found on RK3328 SoCs | 10 | "rockchip,rk3328-gmac": found on RK3328 SoCs |
diff --git a/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt new file mode 100644 index 000000000000..830069b1c37c --- /dev/null +++ b/Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | Binding for the Synopsys HSDK reset controller | ||
| 2 | |||
| 3 | This binding uses the common reset binding[1]. | ||
| 4 | |||
| 5 | [1] Documentation/devicetree/bindings/reset/reset.txt | ||
| 6 | |||
| 7 | Required properties: | ||
| 8 | - compatible: should be "snps,hsdk-reset". | ||
| 9 | - reg: should always contain 2 pairs address - length: first for reset | ||
| 10 | configuration register and second for corresponding SW reset and status bits | ||
| 11 | register. | ||
| 12 | - #reset-cells: from common reset binding; Should always be set to 1. | ||
| 13 | |||
| 14 | Example: | ||
| 15 | reset: reset@880 { | ||
| 16 | compatible = "snps,hsdk-reset"; | ||
| 17 | #reset-cells = <1>; | ||
| 18 | reg = <0x8A0 0x4>, <0xFF0 0x4>; | ||
| 19 | }; | ||
| 20 | |||
| 21 | Specifying reset lines connected to IP modules: | ||
| 22 | ethernet@.... { | ||
| 23 | .... | ||
| 24 | resets = <&reset HSDK_V1_ETH_RESET>; | ||
| 25 | .... | ||
| 26 | }; | ||
| 27 | |||
| 28 | The index could be found in <dt-bindings/reset/snps,hsdk-reset.h> | ||
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 36f528a7fdd6..8caa60734647 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt | |||
| @@ -210,8 +210,11 @@ path as another overlay mount and it may use a lower layer path that is | |||
| 210 | beneath or above the path of another overlay lower layer path. | 210 | beneath or above the path of another overlay lower layer path. |
| 211 | 211 | ||
| 212 | Using an upper layer path and/or a workdir path that are already used by | 212 | Using an upper layer path and/or a workdir path that are already used by |
| 213 | another overlay mount is not allowed and will fail with EBUSY. Using | 213 | another overlay mount is not allowed and may fail with EBUSY. Using |
| 214 | partially overlapping paths is not allowed but will not fail with EBUSY. | 214 | partially overlapping paths is not allowed but will not fail with EBUSY. |
| 215 | If files are accessed from two overlayfs mounts which share or overlap the | ||
| 216 | upper layer and/or workdir path the behavior of the overlay is undefined, | ||
| 217 | though it will not result in a crash or deadlock. | ||
| 215 | 218 | ||
| 216 | Mounting an overlay using an upper layer path, where the upper layer path | 219 | Mounting an overlay using an upper layer path, where the upper layer path |
| 217 | was previously used by another mounted overlay in combination with a | 220 | was previously used by another mounted overlay in combination with a |
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801 index 0500193434cb..d47702456926 100644 --- a/Documentation/i2c/busses/i2c-i801 +++ b/Documentation/i2c/busses/i2c-i801 | |||
| @@ -36,6 +36,7 @@ Supported adapters: | |||
| 36 | * Intel Gemini Lake (SOC) | 36 | * Intel Gemini Lake (SOC) |
| 37 | * Intel Cannon Lake-H (PCH) | 37 | * Intel Cannon Lake-H (PCH) |
| 38 | * Intel Cannon Lake-LP (PCH) | 38 | * Intel Cannon Lake-LP (PCH) |
| 39 | * Intel Cedar Fork (PCH) | ||
| 39 | Datasheets: Publicly available at the Intel website | 40 | Datasheets: Publicly available at the Intel website |
| 40 | 41 | ||
| 41 | On Intel Patsburg and later chipsets, both the normal host SMBus controller | 42 | On Intel Patsburg and later chipsets, both the normal host SMBus controller |
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 57f52cdce32e..9ba04c0bab8d 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt | |||
| @@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this | |||
| 2387 | and packet type ID), so in a "gatewayed" configuration, all | 2387 | and packet type ID), so in a "gatewayed" configuration, all |
| 2388 | outgoing traffic will generally use the same device. Incoming | 2388 | outgoing traffic will generally use the same device. Incoming |
| 2389 | traffic may also end up on a single device, but that is | 2389 | traffic may also end up on a single device, but that is |
| 2390 | dependent upon the balancing policy of the peer's 8023.ad | 2390 | dependent upon the balancing policy of the peer's 802.3ad |
| 2391 | implementation. In a "local" configuration, traffic will be | 2391 | implementation. In a "local" configuration, traffic will be |
| 2392 | distributed across the devices in the bond. | 2392 | distributed across the devices in the bond. |
| 2393 | 2393 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 65b0c88d5ee0..a74227ad082e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -5259,7 +5259,8 @@ S: Maintained | |||
| 5259 | F: drivers/iommu/exynos-iommu.c | 5259 | F: drivers/iommu/exynos-iommu.c |
| 5260 | 5260 | ||
| 5261 | EZchip NPS platform support | 5261 | EZchip NPS platform support |
| 5262 | M: Noam Camus <noamc@ezchip.com> | 5262 | M: Elad Kanfi <eladkan@mellanox.com> |
| 5263 | M: Vineet Gupta <vgupta@synopsys.com> | ||
| 5263 | S: Supported | 5264 | S: Supported |
| 5264 | F: arch/arc/plat-eznps | 5265 | F: arch/arc/plat-eznps |
| 5265 | F: arch/arc/boot/dts/eznps.dts | 5266 | F: arch/arc/boot/dts/eznps.dts |
| @@ -5345,9 +5346,7 @@ M: "J. Bruce Fields" <bfields@fieldses.org> | |||
| 5345 | L: linux-fsdevel@vger.kernel.org | 5346 | L: linux-fsdevel@vger.kernel.org |
| 5346 | S: Maintained | 5347 | S: Maintained |
| 5347 | F: include/linux/fcntl.h | 5348 | F: include/linux/fcntl.h |
| 5348 | F: include/linux/fs.h | ||
| 5349 | F: include/uapi/linux/fcntl.h | 5349 | F: include/uapi/linux/fcntl.h |
| 5350 | F: include/uapi/linux/fs.h | ||
| 5351 | F: fs/fcntl.c | 5350 | F: fs/fcntl.c |
| 5352 | F: fs/locks.c | 5351 | F: fs/locks.c |
| 5353 | 5352 | ||
| @@ -5356,6 +5355,8 @@ M: Alexander Viro <viro@zeniv.linux.org.uk> | |||
| 5356 | L: linux-fsdevel@vger.kernel.org | 5355 | L: linux-fsdevel@vger.kernel.org |
| 5357 | S: Maintained | 5356 | S: Maintained |
| 5358 | F: fs/* | 5357 | F: fs/* |
| 5358 | F: include/linux/fs.h | ||
| 5359 | F: include/uapi/linux/fs.h | ||
| 5359 | 5360 | ||
| 5360 | FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER | 5361 | FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER |
| 5361 | M: Riku Voipio <riku.voipio@iki.fi> | 5362 | M: Riku Voipio <riku.voipio@iki.fi> |
| @@ -6738,7 +6739,7 @@ F: Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt | |||
| 6738 | F: drivers/auxdisplay/img-ascii-lcd.c | 6739 | F: drivers/auxdisplay/img-ascii-lcd.c |
| 6739 | 6740 | ||
| 6740 | IMGTEC IR DECODER DRIVER | 6741 | IMGTEC IR DECODER DRIVER |
| 6741 | M: James Hogan <james.hogan@imgtec.com> | 6742 | M: James Hogan <jhogan@kernel.org> |
| 6742 | S: Maintained | 6743 | S: Maintained |
| 6743 | F: drivers/media/rc/img-ir/ | 6744 | F: drivers/media/rc/img-ir/ |
| 6744 | 6745 | ||
| @@ -7562,7 +7563,7 @@ F: arch/arm64/include/asm/kvm* | |||
| 7562 | F: arch/arm64/kvm/ | 7563 | F: arch/arm64/kvm/ |
| 7563 | 7564 | ||
| 7564 | KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) | 7565 | KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) |
| 7565 | M: James Hogan <james.hogan@imgtec.com> | 7566 | M: James Hogan <jhogan@kernel.org> |
| 7566 | L: linux-mips@linux-mips.org | 7567 | L: linux-mips@linux-mips.org |
| 7567 | S: Supported | 7568 | S: Supported |
| 7568 | F: arch/mips/include/uapi/asm/kvm* | 7569 | F: arch/mips/include/uapi/asm/kvm* |
| @@ -7570,7 +7571,7 @@ F: arch/mips/include/asm/kvm* | |||
| 7570 | F: arch/mips/kvm/ | 7571 | F: arch/mips/kvm/ |
| 7571 | 7572 | ||
| 7572 | KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) | 7573 | KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) |
| 7573 | M: Alexander Graf <agraf@suse.com> | 7574 | M: Paul Mackerras <paulus@ozlabs.org> |
| 7574 | L: kvm-ppc@vger.kernel.org | 7575 | L: kvm-ppc@vger.kernel.org |
| 7575 | W: http://www.linux-kvm.org/ | 7576 | W: http://www.linux-kvm.org/ |
| 7576 | T: git git://github.com/agraf/linux-2.6.git | 7577 | T: git git://github.com/agraf/linux-2.6.git |
| @@ -8264,6 +8265,12 @@ L: libertas-dev@lists.infradead.org | |||
| 8264 | S: Orphan | 8265 | S: Orphan |
| 8265 | F: drivers/net/wireless/marvell/libertas/ | 8266 | F: drivers/net/wireless/marvell/libertas/ |
| 8266 | 8267 | ||
| 8268 | MARVELL MACCHIATOBIN SUPPORT | ||
| 8269 | M: Russell King <rmk@armlinux.org.uk> | ||
| 8270 | L: linux-arm-kernel@lists.infradead.org | ||
| 8271 | S: Maintained | ||
| 8272 | F: arch/arm64/boot/dts/marvell/armada-8040-mcbin.dts | ||
| 8273 | |||
| 8267 | MARVELL MV643XX ETHERNET DRIVER | 8274 | MARVELL MV643XX ETHERNET DRIVER |
| 8268 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> | 8275 | M: Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
| 8269 | L: netdev@vger.kernel.org | 8276 | L: netdev@vger.kernel.org |
| @@ -8885,7 +8892,7 @@ F: Documentation/devicetree/bindings/media/meson-ao-cec.txt | |||
| 8885 | T: git git://linuxtv.org/media_tree.git | 8892 | T: git git://linuxtv.org/media_tree.git |
| 8886 | 8893 | ||
| 8887 | METAG ARCHITECTURE | 8894 | METAG ARCHITECTURE |
| 8888 | M: James Hogan <james.hogan@imgtec.com> | 8895 | M: James Hogan <jhogan@kernel.org> |
| 8889 | L: linux-metag@vger.kernel.org | 8896 | L: linux-metag@vger.kernel.org |
| 8890 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git | 8897 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag.git |
| 8891 | S: Odd Fixes | 8898 | S: Odd Fixes |
| @@ -9354,7 +9361,7 @@ NETWORK BLOCK DEVICE (NBD) | |||
| 9354 | M: Josef Bacik <jbacik@fb.com> | 9361 | M: Josef Bacik <jbacik@fb.com> |
| 9355 | S: Maintained | 9362 | S: Maintained |
| 9356 | L: linux-block@vger.kernel.org | 9363 | L: linux-block@vger.kernel.org |
| 9357 | L: nbd-general@lists.sourceforge.net | 9364 | L: nbd@other.debian.org |
| 9358 | F: Documentation/blockdev/nbd.txt | 9365 | F: Documentation/blockdev/nbd.txt |
| 9359 | F: drivers/block/nbd.c | 9366 | F: drivers/block/nbd.c |
| 9360 | F: include/uapi/linux/nbd.h | 9367 | F: include/uapi/linux/nbd.h |
| @@ -12931,9 +12938,9 @@ F: drivers/mmc/host/dw_mmc* | |||
| 12931 | SYNOPSYS HSDK RESET CONTROLLER DRIVER | 12938 | SYNOPSYS HSDK RESET CONTROLLER DRIVER |
| 12932 | M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> | 12939 | M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> |
| 12933 | S: Supported | 12940 | S: Supported |
| 12934 | F: drivers/reset/reset-hsdk-v1.c | 12941 | F: drivers/reset/reset-hsdk.c |
| 12935 | F: include/dt-bindings/reset/snps,hsdk-v1-reset.h | 12942 | F: include/dt-bindings/reset/snps,hsdk-reset.h |
| 12936 | F: Documentation/devicetree/bindings/reset/snps,hsdk-v1-reset.txt | 12943 | F: Documentation/devicetree/bindings/reset/snps,hsdk-reset.txt |
| 12937 | 12944 | ||
| 12938 | SYSTEM CONFIGURATION (SYSCON) | 12945 | SYSTEM CONFIGURATION (SYSCON) |
| 12939 | M: Lee Jones <lee.jones@linaro.org> | 12946 | M: Lee Jones <lee.jones@linaro.org> |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 4 | 1 | VERSION = 4 |
| 2 | PATCHLEVEL = 14 | 2 | PATCHLEVEL = 14 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc4 |
| 5 | NAME = Fearless Coyote | 5 | NAME = Fearless Coyote |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
| @@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION | |||
| 933 | ifeq ($(has_libelf),1) | 933 | ifeq ($(has_libelf),1) |
| 934 | objtool_target := tools/objtool FORCE | 934 | objtool_target := tools/objtool FORCE |
| 935 | else | 935 | else |
| 936 | $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel") | 936 | ifdef CONFIG_ORC_UNWINDER |
| 937 | $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") | ||
| 938 | else | ||
| 939 | $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") | ||
| 940 | endif | ||
| 937 | SKIP_STACK_VALIDATION := 1 | 941 | SKIP_STACK_VALIDATION := 1 |
| 938 | export SKIP_STACK_VALIDATION | 942 | export SKIP_STACK_VALIDATION |
| 939 | endif | 943 | endif |
diff --git a/arch/Kconfig b/arch/Kconfig index 1aafb4efbb51..d789a89cb32c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
| @@ -937,9 +937,6 @@ config STRICT_MODULE_RWX | |||
| 937 | and non-text memory will be made non-executable. This provides | 937 | and non-text memory will be made non-executable. This provides |
| 938 | protection against certain security exploits (e.g. writing to text) | 938 | protection against certain security exploits (e.g. writing to text) |
| 939 | 939 | ||
| 940 | config ARCH_WANT_RELAX_ORDER | ||
| 941 | bool | ||
| 942 | |||
| 943 | config ARCH_HAS_REFCOUNT | 940 | config ARCH_HAS_REFCOUNT |
| 944 | bool | 941 | bool |
| 945 | help | 942 | help |
diff --git a/arch/alpha/include/asm/mmu_context.h b/arch/alpha/include/asm/mmu_context.h index 384bd47b5187..45c020a0fe76 100644 --- a/arch/alpha/include/asm/mmu_context.h +++ b/arch/alpha/include/asm/mmu_context.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/mm_types.h> | 10 | #include <linux/mm_types.h> |
| 11 | #include <linux/sched.h> | ||
| 11 | 12 | ||
| 12 | #include <asm/machvec.h> | 13 | #include <asm/machvec.h> |
| 13 | #include <asm/compiler.h> | 14 | #include <asm/compiler.h> |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index a598641eed98..c84e67fdea09 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
| @@ -24,7 +24,7 @@ config ARC | |||
| 24 | select GENERIC_SMP_IDLE_THREAD | 24 | select GENERIC_SMP_IDLE_THREAD |
| 25 | select HAVE_ARCH_KGDB | 25 | select HAVE_ARCH_KGDB |
| 26 | select HAVE_ARCH_TRACEHOOK | 26 | select HAVE_ARCH_TRACEHOOK |
| 27 | select HAVE_FUTEX_CMPXCHG | 27 | select HAVE_FUTEX_CMPXCHG if FUTEX |
| 28 | select HAVE_IOREMAP_PROT | 28 | select HAVE_IOREMAP_PROT |
| 29 | select HAVE_KPROBES | 29 | select HAVE_KPROBES |
| 30 | select HAVE_KRETPROBES | 30 | select HAVE_KRETPROBES |
diff --git a/arch/arc/Makefile b/arch/arc/Makefile index 3a4b52b7e09d..d37f49d6a27f 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile | |||
| @@ -6,8 +6,6 @@ | |||
| 6 | # published by the Free Software Foundation. | 6 | # published by the Free Software Foundation. |
| 7 | # | 7 | # |
| 8 | 8 | ||
| 9 | UTS_MACHINE := arc | ||
| 10 | |||
| 11 | ifeq ($(CROSS_COMPILE),) | 9 | ifeq ($(CROSS_COMPILE),) |
| 12 | ifndef CONFIG_CPU_BIG_ENDIAN | 10 | ifndef CONFIG_CPU_BIG_ENDIAN |
| 13 | CROSS_COMPILE := arc-linux- | 11 | CROSS_COMPILE := arc-linux- |
diff --git a/arch/arc/boot/dts/axs10x_mb.dtsi b/arch/arc/boot/dts/axs10x_mb.dtsi index 2367a67c5f10..e114000a84f5 100644 --- a/arch/arc/boot/dts/axs10x_mb.dtsi +++ b/arch/arc/boot/dts/axs10x_mb.dtsi | |||
| @@ -44,7 +44,14 @@ | |||
| 44 | 44 | ||
| 45 | mmcclk: mmcclk { | 45 | mmcclk: mmcclk { |
| 46 | compatible = "fixed-clock"; | 46 | compatible = "fixed-clock"; |
| 47 | clock-frequency = <50000000>; | 47 | /* |
| 48 | * DW sdio controller has external ciu clock divider | ||
| 49 | * controlled via register in SDIO IP. It divides | ||
| 50 | * sdio_ref_clk (which comes from CGU) by 16 for | ||
| 51 | * default. So default mmcclk clock (which comes | ||
| 52 | * to sdk_in) is 25000000 Hz. | ||
| 53 | */ | ||
| 54 | clock-frequency = <25000000>; | ||
| 48 | #clock-cells = <0>; | 55 | #clock-cells = <0>; |
| 49 | }; | 56 | }; |
| 50 | 57 | ||
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index 229d13adbce4..8adde1b492f1 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | /dts-v1/; | 12 | /dts-v1/; |
| 13 | 13 | ||
| 14 | #include <dt-bindings/net/ti-dp83867.h> | 14 | #include <dt-bindings/net/ti-dp83867.h> |
| 15 | #include <dt-bindings/reset/snps,hsdk-reset.h> | ||
| 15 | 16 | ||
| 16 | / { | 17 | / { |
| 17 | model = "snps,hsdk"; | 18 | model = "snps,hsdk"; |
| @@ -57,10 +58,10 @@ | |||
| 57 | }; | 58 | }; |
| 58 | }; | 59 | }; |
| 59 | 60 | ||
| 60 | core_clk: core-clk { | 61 | input_clk: input-clk { |
| 61 | #clock-cells = <0>; | 62 | #clock-cells = <0>; |
| 62 | compatible = "fixed-clock"; | 63 | compatible = "fixed-clock"; |
| 63 | clock-frequency = <500000000>; | 64 | clock-frequency = <33333333>; |
| 64 | }; | 65 | }; |
| 65 | 66 | ||
| 66 | cpu_intc: cpu-interrupt-controller { | 67 | cpu_intc: cpu-interrupt-controller { |
| @@ -102,6 +103,19 @@ | |||
| 102 | 103 | ||
| 103 | ranges = <0x00000000 0xf0000000 0x10000000>; | 104 | ranges = <0x00000000 0xf0000000 0x10000000>; |
| 104 | 105 | ||
| 106 | cgu_rst: reset-controller@8a0 { | ||
| 107 | compatible = "snps,hsdk-reset"; | ||
| 108 | #reset-cells = <1>; | ||
| 109 | reg = <0x8A0 0x4>, <0xFF0 0x4>; | ||
| 110 | }; | ||
| 111 | |||
| 112 | core_clk: core-clk@0 { | ||
| 113 | compatible = "snps,hsdk-core-pll-clock"; | ||
| 114 | reg = <0x00 0x10>, <0x14B8 0x4>; | ||
| 115 | #clock-cells = <0>; | ||
| 116 | clocks = <&input_clk>; | ||
| 117 | }; | ||
| 118 | |||
| 105 | serial: serial@5000 { | 119 | serial: serial@5000 { |
| 106 | compatible = "snps,dw-apb-uart"; | 120 | compatible = "snps,dw-apb-uart"; |
| 107 | reg = <0x5000 0x100>; | 121 | reg = <0x5000 0x100>; |
| @@ -120,7 +134,17 @@ | |||
| 120 | 134 | ||
| 121 | mmcclk_ciu: mmcclk-ciu { | 135 | mmcclk_ciu: mmcclk-ciu { |
| 122 | compatible = "fixed-clock"; | 136 | compatible = "fixed-clock"; |
| 123 | clock-frequency = <100000000>; | 137 | /* |
| 138 | * DW sdio controller has external ciu clock divider | ||
| 139 | * controlled via register in SDIO IP. Due to its | ||
| 140 | * unexpected default value (it should devide by 1 | ||
| 141 | * but it devides by 8) SDIO IP uses wrong clock and | ||
| 142 | * works unstable (see STAR 9001204800) | ||
| 143 | * So add temporary fix and change clock frequency | ||
| 144 | * from 100000000 to 12500000 Hz until we fix dw sdio | ||
| 145 | * driver itself. | ||
| 146 | */ | ||
| 147 | clock-frequency = <12500000>; | ||
| 124 | #clock-cells = <0>; | 148 | #clock-cells = <0>; |
| 125 | }; | 149 | }; |
| 126 | 150 | ||
| @@ -141,6 +165,8 @@ | |||
| 141 | clocks = <&gmacclk>; | 165 | clocks = <&gmacclk>; |
| 142 | clock-names = "stmmaceth"; | 166 | clock-names = "stmmaceth"; |
| 143 | phy-handle = <&phy0>; | 167 | phy-handle = <&phy0>; |
| 168 | resets = <&cgu_rst HSDK_ETH_RESET>; | ||
| 169 | reset-names = "stmmaceth"; | ||
| 144 | 170 | ||
| 145 | mdio { | 171 | mdio { |
| 146 | #address-cells = <1>; | 172 | #address-cells = <1>; |
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig index 6980b966a364..ec7c849a5c8e 100644 --- a/arch/arc/configs/axs101_defconfig +++ b/arch/arc/configs/axs101_defconfig | |||
| @@ -105,7 +105,7 @@ CONFIG_NLS_ISO8859_1=y | |||
| 105 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 105 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
| 106 | # CONFIG_ENABLE_MUST_CHECK is not set | 106 | # CONFIG_ENABLE_MUST_CHECK is not set |
| 107 | CONFIG_STRIP_ASM_SYMS=y | 107 | CONFIG_STRIP_ASM_SYMS=y |
| 108 | CONFIG_LOCKUP_DETECTOR=y | 108 | CONFIG_SOFTLOCKUP_DETECTOR=y |
| 109 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 109 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
| 110 | # CONFIG_SCHED_DEBUG is not set | 110 | # CONFIG_SCHED_DEBUG is not set |
| 111 | # CONFIG_DEBUG_PREEMPT is not set | 111 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig index 2233f5777a71..63d3cf69e0b0 100644 --- a/arch/arc/configs/axs103_defconfig +++ b/arch/arc/configs/axs103_defconfig | |||
| @@ -104,7 +104,7 @@ CONFIG_NLS_ISO8859_1=y | |||
| 104 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 104 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
| 105 | # CONFIG_ENABLE_MUST_CHECK is not set | 105 | # CONFIG_ENABLE_MUST_CHECK is not set |
| 106 | CONFIG_STRIP_ASM_SYMS=y | 106 | CONFIG_STRIP_ASM_SYMS=y |
| 107 | CONFIG_LOCKUP_DETECTOR=y | 107 | CONFIG_SOFTLOCKUP_DETECTOR=y |
| 108 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 108 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
| 109 | # CONFIG_SCHED_DEBUG is not set | 109 | # CONFIG_SCHED_DEBUG is not set |
| 110 | # CONFIG_DEBUG_PREEMPT is not set | 110 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig index 30a3d4cf53d2..f613ecac14a7 100644 --- a/arch/arc/configs/axs103_smp_defconfig +++ b/arch/arc/configs/axs103_smp_defconfig | |||
| @@ -107,7 +107,7 @@ CONFIG_NLS_ISO8859_1=y | |||
| 107 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 107 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
| 108 | # CONFIG_ENABLE_MUST_CHECK is not set | 108 | # CONFIG_ENABLE_MUST_CHECK is not set |
| 109 | CONFIG_STRIP_ASM_SYMS=y | 109 | CONFIG_STRIP_ASM_SYMS=y |
| 110 | CONFIG_LOCKUP_DETECTOR=y | 110 | CONFIG_SOFTLOCKUP_DETECTOR=y |
| 111 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 111 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
| 112 | # CONFIG_SCHED_DEBUG is not set | 112 | # CONFIG_SCHED_DEBUG is not set |
| 113 | # CONFIG_DEBUG_PREEMPT is not set | 113 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig index 821a2e562f3f..3507be2af6fe 100644 --- a/arch/arc/configs/haps_hs_smp_defconfig +++ b/arch/arc/configs/haps_hs_smp_defconfig | |||
| @@ -84,5 +84,5 @@ CONFIG_TMPFS=y | |||
| 84 | CONFIG_NFS_FS=y | 84 | CONFIG_NFS_FS=y |
| 85 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 85 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
| 86 | # CONFIG_ENABLE_MUST_CHECK is not set | 86 | # CONFIG_ENABLE_MUST_CHECK is not set |
| 87 | CONFIG_LOCKUP_DETECTOR=y | 87 | CONFIG_SOFTLOCKUP_DETECTOR=y |
| 88 | # CONFIG_DEBUG_PREEMPT is not set | 88 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 9a3fcf446388..15f0f6b5fec1 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig | |||
| @@ -63,6 +63,7 @@ CONFIG_MMC_SDHCI=y | |||
| 63 | CONFIG_MMC_SDHCI_PLTFM=y | 63 | CONFIG_MMC_SDHCI_PLTFM=y |
| 64 | CONFIG_MMC_DW=y | 64 | CONFIG_MMC_DW=y |
| 65 | # CONFIG_IOMMU_SUPPORT is not set | 65 | # CONFIG_IOMMU_SUPPORT is not set |
| 66 | CONFIG_RESET_HSDK=y | ||
| 66 | CONFIG_EXT3_FS=y | 67 | CONFIG_EXT3_FS=y |
| 67 | CONFIG_VFAT_FS=y | 68 | CONFIG_VFAT_FS=y |
| 68 | CONFIG_TMPFS=y | 69 | CONFIG_TMPFS=y |
| @@ -72,7 +73,7 @@ CONFIG_NLS_ISO8859_1=y | |||
| 72 | # CONFIG_ENABLE_WARN_DEPRECATED is not set | 73 | # CONFIG_ENABLE_WARN_DEPRECATED is not set |
| 73 | # CONFIG_ENABLE_MUST_CHECK is not set | 74 | # CONFIG_ENABLE_MUST_CHECK is not set |
| 74 | CONFIG_STRIP_ASM_SYMS=y | 75 | CONFIG_STRIP_ASM_SYMS=y |
| 75 | CONFIG_LOCKUP_DETECTOR=y | 76 | CONFIG_SOFTLOCKUP_DETECTOR=y |
| 76 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 77 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
| 77 | # CONFIG_SCHED_DEBUG is not set | 78 | # CONFIG_SCHED_DEBUG is not set |
| 78 | # CONFIG_DEBUG_PREEMPT is not set | 79 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index c0d6a010751a..4fcf4f2503f6 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig | |||
| @@ -94,7 +94,7 @@ CONFIG_NLS_ISO8859_1=y | |||
| 94 | # CONFIG_ENABLE_MUST_CHECK is not set | 94 | # CONFIG_ENABLE_MUST_CHECK is not set |
| 95 | CONFIG_STRIP_ASM_SYMS=y | 95 | CONFIG_STRIP_ASM_SYMS=y |
| 96 | CONFIG_DEBUG_SHIRQ=y | 96 | CONFIG_DEBUG_SHIRQ=y |
| 97 | CONFIG_LOCKUP_DETECTOR=y | 97 | CONFIG_SOFTLOCKUP_DETECTOR=y |
| 98 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 98 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
| 99 | # CONFIG_SCHED_DEBUG is not set | 99 | # CONFIG_SCHED_DEBUG is not set |
| 100 | # CONFIG_DEBUG_PREEMPT is not set | 100 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index 5c0971787acf..7b71464f6c2f 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig | |||
| @@ -98,7 +98,7 @@ CONFIG_NLS_ISO8859_1=y | |||
| 98 | # CONFIG_ENABLE_MUST_CHECK is not set | 98 | # CONFIG_ENABLE_MUST_CHECK is not set |
| 99 | CONFIG_STRIP_ASM_SYMS=y | 99 | CONFIG_STRIP_ASM_SYMS=y |
| 100 | CONFIG_DEBUG_SHIRQ=y | 100 | CONFIG_DEBUG_SHIRQ=y |
| 101 | CONFIG_LOCKUP_DETECTOR=y | 101 | CONFIG_SOFTLOCKUP_DETECTOR=y |
| 102 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 | 102 | CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=10 |
| 103 | # CONFIG_SCHED_DEBUG is not set | 103 | # CONFIG_SCHED_DEBUG is not set |
| 104 | # CONFIG_DEBUG_PREEMPT is not set | 104 | # CONFIG_DEBUG_PREEMPT is not set |
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index ba8e802dba80..b1c56d35f2a9 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h | |||
| @@ -98,6 +98,7 @@ | |||
| 98 | 98 | ||
| 99 | /* Auxiliary registers */ | 99 | /* Auxiliary registers */ |
| 100 | #define AUX_IDENTITY 4 | 100 | #define AUX_IDENTITY 4 |
| 101 | #define AUX_EXEC_CTRL 8 | ||
| 101 | #define AUX_INTR_VEC_BASE 0x25 | 102 | #define AUX_INTR_VEC_BASE 0x25 |
| 102 | #define AUX_VOL 0x5e | 103 | #define AUX_VOL 0x5e |
| 103 | 104 | ||
| @@ -135,12 +136,12 @@ struct bcr_identity { | |||
| 135 | #endif | 136 | #endif |
| 136 | }; | 137 | }; |
| 137 | 138 | ||
| 138 | struct bcr_isa { | 139 | struct bcr_isa_arcv2 { |
| 139 | #ifdef CONFIG_CPU_BIG_ENDIAN | 140 | #ifdef CONFIG_CPU_BIG_ENDIAN |
| 140 | unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1, | 141 | unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1, |
| 141 | pad1:11, atomic1:1, ver:8; | 142 | pad1:12, ver:8; |
| 142 | #else | 143 | #else |
| 143 | unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1, | 144 | unsigned int ver:8, pad1:12, be:1, atomic:1, unalign:1, |
| 144 | ldd:1, pad2:4, div_rem:4; | 145 | ldd:1, pad2:4, div_rem:4; |
| 145 | #endif | 146 | #endif |
| 146 | }; | 147 | }; |
| @@ -263,13 +264,13 @@ struct cpuinfo_arc { | |||
| 263 | struct cpuinfo_arc_mmu mmu; | 264 | struct cpuinfo_arc_mmu mmu; |
| 264 | struct cpuinfo_arc_bpu bpu; | 265 | struct cpuinfo_arc_bpu bpu; |
| 265 | struct bcr_identity core; | 266 | struct bcr_identity core; |
| 266 | struct bcr_isa isa; | 267 | struct bcr_isa_arcv2 isa; |
| 267 | const char *details, *name; | 268 | const char *details, *name; |
| 268 | unsigned int vec_base; | 269 | unsigned int vec_base; |
| 269 | struct cpuinfo_arc_ccm iccm, dccm; | 270 | struct cpuinfo_arc_ccm iccm, dccm; |
| 270 | struct { | 271 | struct { |
| 271 | unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, | 272 | unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, |
| 272 | fpu_sp:1, fpu_dp:1, pad2:6, | 273 | fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4, |
| 273 | debug:1, ap:1, smart:1, rtt:1, pad3:4, | 274 | debug:1, ap:1, smart:1, rtt:1, pad3:4, |
| 274 | timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; | 275 | timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; |
| 275 | } extn; | 276 | } extn; |
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 877cec8f5ea2..fb83844daeea 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
| @@ -51,6 +51,7 @@ static const struct id_to_str arc_cpu_rel[] = { | |||
| 51 | { 0x51, "R2.0" }, | 51 | { 0x51, "R2.0" }, |
| 52 | { 0x52, "R2.1" }, | 52 | { 0x52, "R2.1" }, |
| 53 | { 0x53, "R3.0" }, | 53 | { 0x53, "R3.0" }, |
| 54 | { 0x54, "R4.0" }, | ||
| 54 | #endif | 55 | #endif |
| 55 | { 0x00, NULL } | 56 | { 0x00, NULL } |
| 56 | }; | 57 | }; |
| @@ -62,6 +63,7 @@ static const struct id_to_str arc_cpu_nm[] = { | |||
| 62 | #else | 63 | #else |
| 63 | { 0x40, "ARC EM" }, | 64 | { 0x40, "ARC EM" }, |
| 64 | { 0x50, "ARC HS38" }, | 65 | { 0x50, "ARC HS38" }, |
| 66 | { 0x54, "ARC HS48" }, | ||
| 65 | #endif | 67 | #endif |
| 66 | { 0x00, "Unknown" } | 68 | { 0x00, "Unknown" } |
| 67 | }; | 69 | }; |
| @@ -119,11 +121,11 @@ static void read_arc_build_cfg_regs(void) | |||
| 119 | struct bcr_generic bcr; | 121 | struct bcr_generic bcr; |
| 120 | struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; | 122 | struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; |
| 121 | const struct id_to_str *tbl; | 123 | const struct id_to_str *tbl; |
| 124 | struct bcr_isa_arcv2 isa; | ||
| 122 | 125 | ||
| 123 | FIX_PTR(cpu); | 126 | FIX_PTR(cpu); |
| 124 | 127 | ||
| 125 | READ_BCR(AUX_IDENTITY, cpu->core); | 128 | READ_BCR(AUX_IDENTITY, cpu->core); |
| 126 | READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa); | ||
| 127 | 129 | ||
| 128 | for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) { | 130 | for (tbl = &arc_cpu_rel[0]; tbl->id != 0; tbl++) { |
| 129 | if (cpu->core.family == tbl->id) { | 131 | if (cpu->core.family == tbl->id) { |
| @@ -133,7 +135,7 @@ static void read_arc_build_cfg_regs(void) | |||
| 133 | } | 135 | } |
| 134 | 136 | ||
| 135 | for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) { | 137 | for (tbl = &arc_cpu_nm[0]; tbl->id != 0; tbl++) { |
| 136 | if ((cpu->core.family & 0xF0) == tbl->id) | 138 | if ((cpu->core.family & 0xF4) == tbl->id) |
| 137 | break; | 139 | break; |
| 138 | } | 140 | } |
| 139 | cpu->name = tbl->str; | 141 | cpu->name = tbl->str; |
| @@ -192,6 +194,14 @@ static void read_arc_build_cfg_regs(void) | |||
| 192 | cpu->bpu.full = bpu.ft; | 194 | cpu->bpu.full = bpu.ft; |
| 193 | cpu->bpu.num_cache = 256 << bpu.bce; | 195 | cpu->bpu.num_cache = 256 << bpu.bce; |
| 194 | cpu->bpu.num_pred = 2048 << bpu.pte; | 196 | cpu->bpu.num_pred = 2048 << bpu.pte; |
| 197 | |||
| 198 | if (cpu->core.family >= 0x54) { | ||
| 199 | unsigned int exec_ctrl; | ||
| 200 | |||
| 201 | READ_BCR(AUX_EXEC_CTRL, exec_ctrl); | ||
| 202 | cpu->extn.dual_iss_exist = 1; | ||
| 203 | cpu->extn.dual_iss_enb = exec_ctrl & 1; | ||
| 204 | } | ||
| 195 | } | 205 | } |
| 196 | 206 | ||
| 197 | READ_BCR(ARC_REG_AP_BCR, bcr); | 207 | READ_BCR(ARC_REG_AP_BCR, bcr); |
| @@ -205,18 +215,25 @@ static void read_arc_build_cfg_regs(void) | |||
| 205 | 215 | ||
| 206 | cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; | 216 | cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; |
| 207 | 217 | ||
| 218 | READ_BCR(ARC_REG_ISA_CFG_BCR, isa); | ||
| 219 | |||
| 208 | /* some hacks for lack of feature BCR info in old ARC700 cores */ | 220 | /* some hacks for lack of feature BCR info in old ARC700 cores */ |
| 209 | if (is_isa_arcompact()) { | 221 | if (is_isa_arcompact()) { |
| 210 | if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */ | 222 | if (!isa.ver) /* ISA BCR absent, use Kconfig info */ |
| 211 | cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); | 223 | cpu->isa.atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC); |
| 212 | else | 224 | else { |
| 213 | cpu->isa.atomic = cpu->isa.atomic1; | 225 | /* ARC700_BUILD only has 2 bits of isa info */ |
| 226 | struct bcr_generic bcr = *(struct bcr_generic *)&isa; | ||
| 227 | cpu->isa.atomic = bcr.info & 1; | ||
| 228 | } | ||
| 214 | 229 | ||
| 215 | cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); | 230 | cpu->isa.be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); |
| 216 | 231 | ||
| 217 | /* there's no direct way to distinguish 750 vs. 770 */ | 232 | /* there's no direct way to distinguish 750 vs. 770 */ |
| 218 | if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3)) | 233 | if (unlikely(cpu->core.family < 0x34 || cpu->mmu.ver < 3)) |
| 219 | cpu->name = "ARC750"; | 234 | cpu->name = "ARC750"; |
| 235 | } else { | ||
| 236 | cpu->isa = isa; | ||
| 220 | } | 237 | } |
| 221 | } | 238 | } |
| 222 | 239 | ||
| @@ -232,10 +249,11 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) | |||
| 232 | "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", | 249 | "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n", |
| 233 | core->family, core->cpu_id, core->chip_id); | 250 | core->family, core->cpu_id, core->chip_id); |
| 234 | 251 | ||
| 235 | n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s\n", | 252 | n += scnprintf(buf + n, len - n, "processor [%d]\t: %s %s (%s ISA) %s%s%s\n", |
| 236 | cpu_id, cpu->name, cpu->details, | 253 | cpu_id, cpu->name, cpu->details, |
| 237 | is_isa_arcompact() ? "ARCompact" : "ARCv2", | 254 | is_isa_arcompact() ? "ARCompact" : "ARCv2", |
| 238 | IS_AVAIL1(cpu->isa.be, "[Big-Endian]")); | 255 | IS_AVAIL1(cpu->isa.be, "[Big-Endian]"), |
| 256 | IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue")); | ||
| 239 | 257 | ||
| 240 | n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ", | 258 | n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ", |
| 241 | IS_AVAIL1(cpu->extn.timer0, "Timer0 "), | 259 | IS_AVAIL1(cpu->extn.timer0, "Timer0 "), |
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c index f1ac6790da5f..cf14ebc36916 100644 --- a/arch/arc/plat-axs10x/axs10x.c +++ b/arch/arc/plat-axs10x/axs10x.c | |||
| @@ -111,6 +111,13 @@ static void __init axs10x_early_init(void) | |||
| 111 | 111 | ||
| 112 | axs10x_enable_gpio_intc_wire(); | 112 | axs10x_enable_gpio_intc_wire(); |
| 113 | 113 | ||
| 114 | /* | ||
| 115 | * Reset ethernet IP core. | ||
| 116 | * TODO: get rid of this quirk after axs10x reset driver (or simple | ||
| 117 | * reset driver) will be available in upstream. | ||
| 118 | */ | ||
| 119 | iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET); | ||
| 120 | |||
| 114 | scnprintf(mb, 32, "MainBoard v%d", mb_rev); | 121 | scnprintf(mb, 32, "MainBoard v%d", mb_rev); |
| 115 | axs10x_print_board_ver(CREG_MB_VER, mb); | 122 | axs10x_print_board_ver(CREG_MB_VER, mb); |
| 116 | } | 123 | } |
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index 5a6ed5afb009..bd08de4be75e 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig | |||
| @@ -6,4 +6,5 @@ | |||
| 6 | # | 6 | # |
| 7 | 7 | ||
| 8 | menuconfig ARC_SOC_HSDK | 8 | menuconfig ARC_SOC_HSDK |
| 9 | bool "ARC HS Development Kit SOC" | 9 | bool "ARC HS Development Kit SOC" |
| 10 | select CLK_HSDK | ||
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c index a2e7fd17e36d..744e62e58788 100644 --- a/arch/arc/plat-hsdk/platform.c +++ b/arch/arc/plat-hsdk/platform.c | |||
| @@ -38,6 +38,42 @@ static void __init hsdk_init_per_cpu(unsigned int cpu) | |||
| 38 | #define CREG_PAE (CREG_BASE + 0x180) | 38 | #define CREG_PAE (CREG_BASE + 0x180) |
| 39 | #define CREG_PAE_UPDATE (CREG_BASE + 0x194) | 39 | #define CREG_PAE_UPDATE (CREG_BASE + 0x194) |
| 40 | 40 | ||
| 41 | #define CREG_CORE_IF_CLK_DIV (CREG_BASE + 0x4B8) | ||
| 42 | #define CREG_CORE_IF_CLK_DIV_2 0x1 | ||
| 43 | #define CGU_BASE ARC_PERIPHERAL_BASE | ||
| 44 | #define CGU_PLL_STATUS (ARC_PERIPHERAL_BASE + 0x4) | ||
| 45 | #define CGU_PLL_CTRL (ARC_PERIPHERAL_BASE + 0x0) | ||
| 46 | #define CGU_PLL_STATUS_LOCK BIT(0) | ||
| 47 | #define CGU_PLL_STATUS_ERR BIT(1) | ||
| 48 | #define CGU_PLL_CTRL_1GHZ 0x3A10 | ||
| 49 | #define HSDK_PLL_LOCK_TIMEOUT 500 | ||
| 50 | |||
| 51 | #define HSDK_PLL_LOCKED() \ | ||
| 52 | !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_LOCK) | ||
| 53 | |||
| 54 | #define HSDK_PLL_ERR() \ | ||
| 55 | !!(ioread32((void __iomem *) CGU_PLL_STATUS) & CGU_PLL_STATUS_ERR) | ||
| 56 | |||
| 57 | static void __init hsdk_set_cpu_freq_1ghz(void) | ||
| 58 | { | ||
| 59 | u32 timeout = HSDK_PLL_LOCK_TIMEOUT; | ||
| 60 | |||
| 61 | /* | ||
| 62 | * As we set cpu clock which exceeds 500MHz, the divider for the interface | ||
| 63 | * clock must be programmed to div-by-2. | ||
| 64 | */ | ||
| 65 | iowrite32(CREG_CORE_IF_CLK_DIV_2, (void __iomem *) CREG_CORE_IF_CLK_DIV); | ||
| 66 | |||
| 67 | /* Set cpu clock to 1GHz */ | ||
| 68 | iowrite32(CGU_PLL_CTRL_1GHZ, (void __iomem *) CGU_PLL_CTRL); | ||
| 69 | |||
| 70 | while (!HSDK_PLL_LOCKED() && timeout--) | ||
| 71 | cpu_relax(); | ||
| 72 | |||
| 73 | if (!HSDK_PLL_LOCKED() || HSDK_PLL_ERR()) | ||
| 74 | pr_err("Failed to setup CPU frequency to 1GHz!"); | ||
| 75 | } | ||
| 76 | |||
| 41 | static void __init hsdk_init_early(void) | 77 | static void __init hsdk_init_early(void) |
| 42 | { | 78 | { |
| 43 | /* | 79 | /* |
| @@ -52,6 +88,12 @@ static void __init hsdk_init_early(void) | |||
| 52 | 88 | ||
| 53 | /* Really apply settings made above */ | 89 | /* Really apply settings made above */ |
| 54 | writel(1, (void __iomem *) CREG_PAE_UPDATE); | 90 | writel(1, (void __iomem *) CREG_PAE_UPDATE); |
| 91 | |||
| 92 | /* | ||
| 93 | * Setup CPU frequency to 1GHz. | ||
| 94 | * TODO: remove it after smart hsdk pll driver will be introduced. | ||
| 95 | */ | ||
| 96 | hsdk_set_cpu_freq_1ghz(); | ||
| 55 | } | 97 | } |
| 56 | 98 | ||
| 57 | static const char *hsdk_compat[] __initconst = { | 99 | static const char *hsdk_compat[] __initconst = { |
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 7d7ca054c557..e58fab8aec5d 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi | |||
| @@ -36,6 +36,8 @@ | |||
| 36 | phy1 = &usb1_phy; | 36 | phy1 = &usb1_phy; |
| 37 | ethernet0 = &cpsw_emac0; | 37 | ethernet0 = &cpsw_emac0; |
| 38 | ethernet1 = &cpsw_emac1; | 38 | ethernet1 = &cpsw_emac1; |
| 39 | spi0 = &spi0; | ||
| 40 | spi1 = &spi1; | ||
| 39 | }; | 41 | }; |
| 40 | 42 | ||
| 41 | cpus { | 43 | cpus { |
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index 9d276af7c539..081fa68b6f98 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts | |||
| @@ -388,6 +388,7 @@ | |||
| 388 | pinctrl-0 = <&cpsw_default>; | 388 | pinctrl-0 = <&cpsw_default>; |
| 389 | pinctrl-1 = <&cpsw_sleep>; | 389 | pinctrl-1 = <&cpsw_sleep>; |
| 390 | status = "okay"; | 390 | status = "okay"; |
| 391 | slaves = <1>; | ||
| 391 | }; | 392 | }; |
| 392 | 393 | ||
| 393 | &davinci_mdio { | 394 | &davinci_mdio { |
| @@ -402,11 +403,6 @@ | |||
| 402 | phy-mode = "rmii"; | 403 | phy-mode = "rmii"; |
| 403 | }; | 404 | }; |
| 404 | 405 | ||
| 405 | &cpsw_emac1 { | ||
| 406 | phy_id = <&davinci_mdio>, <1>; | ||
| 407 | phy-mode = "rmii"; | ||
| 408 | }; | ||
| 409 | |||
| 410 | &phy_sel { | 406 | &phy_sel { |
| 411 | rmii-clock-ext; | 407 | rmii-clock-ext; |
| 412 | }; | 408 | }; |
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts index 9c9088c99cc4..60cb084a8d92 100644 --- a/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts +++ b/arch/arm/boot/dts/at91-sama5d27_som1_ek.dts | |||
| @@ -67,7 +67,10 @@ | |||
| 67 | 67 | ||
| 68 | usb1: ohci@00400000 { | 68 | usb1: ohci@00400000 { |
| 69 | num-ports = <3>; | 69 | num-ports = <3>; |
| 70 | atmel,vbus-gpio = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>; | 70 | atmel,vbus-gpio = <0 /* &pioA PIN_PD20 GPIO_ACTIVE_HIGH */ |
| 71 | &pioA PIN_PA27 GPIO_ACTIVE_HIGH | ||
| 72 | 0 | ||
| 73 | >; | ||
| 71 | pinctrl-names = "default"; | 74 | pinctrl-names = "default"; |
| 72 | pinctrl-0 = <&pinctrl_usb_default>; | 75 | pinctrl-0 = <&pinctrl_usb_default>; |
| 73 | status = "okay"; | 76 | status = "okay"; |
| @@ -120,7 +123,7 @@ | |||
| 120 | pinctrl-names = "default"; | 123 | pinctrl-names = "default"; |
| 121 | pinctrl-0 = <&pinctrl_mikrobus2_uart>; | 124 | pinctrl-0 = <&pinctrl_mikrobus2_uart>; |
| 122 | atmel,use-dma-rx; | 125 | atmel,use-dma-rx; |
| 123 | atmel-use-dma-tx; | 126 | atmel,use-dma-tx; |
| 124 | status = "okay"; | 127 | status = "okay"; |
| 125 | }; | 128 | }; |
| 126 | 129 | ||
| @@ -178,7 +181,7 @@ | |||
| 178 | uart4: serial@fc00c000 { | 181 | uart4: serial@fc00c000 { |
| 179 | atmel,use-dma-rx; | 182 | atmel,use-dma-rx; |
| 180 | atmel,use-dma-tx; | 183 | atmel,use-dma-tx; |
| 181 | pinctrl-name = "default"; | 184 | pinctrl-names = "default"; |
| 182 | pinctrl-0 = <&pinctrl_mikrobus1_uart>; | 185 | pinctrl-0 = <&pinctrl_mikrobus1_uart>; |
| 183 | status = "okay"; | 186 | status = "okay"; |
| 184 | }; | 187 | }; |
| @@ -330,7 +333,7 @@ | |||
| 330 | }; | 333 | }; |
| 331 | 334 | ||
| 332 | pinctrl_led_gpio_default: led_gpio_default { | 335 | pinctrl_led_gpio_default: led_gpio_default { |
| 333 | pinmux = <PIN_PA27__GPIO>, | 336 | pinmux = <PIN_PA10__GPIO>, |
| 334 | <PIN_PB1__GPIO>, | 337 | <PIN_PB1__GPIO>, |
| 335 | <PIN_PA31__GPIO>; | 338 | <PIN_PA31__GPIO>; |
| 336 | bias-pull-up; | 339 | bias-pull-up; |
| @@ -396,7 +399,7 @@ | |||
| 396 | }; | 399 | }; |
| 397 | 400 | ||
| 398 | pinctrl_usb_default: usb_default { | 401 | pinctrl_usb_default: usb_default { |
| 399 | pinmux = <PIN_PA10__GPIO>, | 402 | pinmux = <PIN_PA27__GPIO>, |
| 400 | <PIN_PD19__GPIO>; | 403 | <PIN_PD19__GPIO>; |
| 401 | bias-disable; | 404 | bias-disable; |
| 402 | }; | 405 | }; |
| @@ -520,17 +523,17 @@ | |||
| 520 | 523 | ||
| 521 | red { | 524 | red { |
| 522 | label = "red"; | 525 | label = "red"; |
| 523 | gpios = <&pioA PIN_PA27 GPIO_ACTIVE_LOW>; | 526 | gpios = <&pioA PIN_PA10 GPIO_ACTIVE_HIGH>; |
| 524 | }; | 527 | }; |
| 525 | 528 | ||
| 526 | green { | 529 | green { |
| 527 | label = "green"; | 530 | label = "green"; |
| 528 | gpios = <&pioA PIN_PB1 GPIO_ACTIVE_LOW>; | 531 | gpios = <&pioA PIN_PB1 GPIO_ACTIVE_HIGH>; |
| 529 | }; | 532 | }; |
| 530 | 533 | ||
| 531 | blue { | 534 | blue { |
| 532 | label = "blue"; | 535 | label = "blue"; |
| 533 | gpios = <&pioA PIN_PA31 GPIO_ACTIVE_LOW>; | 536 | gpios = <&pioA PIN_PA31 GPIO_ACTIVE_HIGH>; |
| 534 | linux,default-trigger = "heartbeat"; | 537 | linux,default-trigger = "heartbeat"; |
| 535 | }; | 538 | }; |
| 536 | }; | 539 | }; |
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts index 67e72bc72e80..c75507922f7d 100644 --- a/arch/arm/boot/dts/da850-evm.dts +++ b/arch/arm/boot/dts/da850-evm.dts | |||
| @@ -15,6 +15,13 @@ | |||
| 15 | compatible = "ti,da850-evm", "ti,da850"; | 15 | compatible = "ti,da850-evm", "ti,da850"; |
| 16 | model = "DA850/AM1808/OMAP-L138 EVM"; | 16 | model = "DA850/AM1808/OMAP-L138 EVM"; |
| 17 | 17 | ||
| 18 | aliases { | ||
| 19 | serial0 = &serial0; | ||
| 20 | serial1 = &serial1; | ||
| 21 | serial2 = &serial2; | ||
| 22 | ethernet0 = ð0; | ||
| 23 | }; | ||
| 24 | |||
| 18 | soc@1c00000 { | 25 | soc@1c00000 { |
| 19 | pmx_core: pinmux@14120 { | 26 | pmx_core: pinmux@14120 { |
| 20 | status = "okay"; | 27 | status = "okay"; |
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index cf229dfabf61..e62b62875cba 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi | |||
| @@ -1817,6 +1817,8 @@ | |||
| 1817 | clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; | 1817 | clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; |
| 1818 | ti,bit-shift = <24>; | 1818 | ti,bit-shift = <24>; |
| 1819 | reg = <0x1868>; | 1819 | reg = <0x1868>; |
| 1820 | assigned-clocks = <&mcasp3_ahclkx_mux>; | ||
| 1821 | assigned-clock-parents = <&abe_24m_fclk>; | ||
| 1820 | }; | 1822 | }; |
| 1821 | 1823 | ||
| 1822 | mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux@1868 { | 1824 | mcasp3_aux_gfclk_mux: mcasp3_aux_gfclk_mux@1868 { |
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 26c20e1167b9..4acd32a1c4ef 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
| @@ -144,15 +144,6 @@ | |||
| 144 | io-channel-names = "temp", "bsi", "vbat"; | 144 | io-channel-names = "temp", "bsi", "vbat"; |
| 145 | }; | 145 | }; |
| 146 | 146 | ||
| 147 | rear_camera: camera@0 { | ||
| 148 | compatible = "linux,camera"; | ||
| 149 | |||
| 150 | module { | ||
| 151 | model = "TCM8341MD"; | ||
| 152 | sensor = <&cam1>; | ||
| 153 | }; | ||
| 154 | }; | ||
| 155 | |||
| 156 | pwm9: dmtimer-pwm { | 147 | pwm9: dmtimer-pwm { |
| 157 | compatible = "ti,omap-dmtimer-pwm"; | 148 | compatible = "ti,omap-dmtimer-pwm"; |
| 158 | #pwm-cells = <3>; | 149 | #pwm-cells = <3>; |
| @@ -189,10 +180,8 @@ | |||
| 189 | clock-lanes = <1>; | 180 | clock-lanes = <1>; |
| 190 | data-lanes = <0>; | 181 | data-lanes = <0>; |
| 191 | lane-polarity = <0 0>; | 182 | lane-polarity = <0 0>; |
| 192 | clock-inv = <0>; | ||
| 193 | /* Select strobe = <1> for back camera, <0> for front camera */ | 183 | /* Select strobe = <1> for back camera, <0> for front camera */ |
| 194 | strobe = <1>; | 184 | strobe = <1>; |
| 195 | crc = <0>; | ||
| 196 | }; | 185 | }; |
| 197 | }; | 186 | }; |
| 198 | }; | 187 | }; |
diff --git a/arch/arm/boot/dts/stm32429i-eval.dts b/arch/arm/boot/dts/stm32429i-eval.dts index 97b1c2321ba9..293ecb957227 100644 --- a/arch/arm/boot/dts/stm32429i-eval.dts +++ b/arch/arm/boot/dts/stm32429i-eval.dts | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | 47 | ||
| 48 | /dts-v1/; | 48 | /dts-v1/; |
| 49 | #include "stm32f429.dtsi" | 49 | #include "stm32f429.dtsi" |
| 50 | #include "stm32f429-pinctrl.dtsi" | ||
| 50 | #include <dt-bindings/input/input.h> | 51 | #include <dt-bindings/input/input.h> |
| 51 | #include <dt-bindings/gpio/gpio.h> | 52 | #include <dt-bindings/gpio/gpio.h> |
| 52 | 53 | ||
| @@ -202,10 +203,8 @@ | |||
| 202 | stmpe1600: stmpe1600@42 { | 203 | stmpe1600: stmpe1600@42 { |
| 203 | compatible = "st,stmpe1600"; | 204 | compatible = "st,stmpe1600"; |
| 204 | reg = <0x42>; | 205 | reg = <0x42>; |
| 205 | irq-gpio = <&gpioi 8 0>; | ||
| 206 | irq-trigger = <3>; | ||
| 207 | interrupts = <8 3>; | 206 | interrupts = <8 3>; |
| 208 | interrupt-parent = <&exti>; | 207 | interrupt-parent = <&gpioi>; |
| 209 | interrupt-controller; | 208 | interrupt-controller; |
| 210 | wakeup-source; | 209 | wakeup-source; |
| 211 | 210 | ||
diff --git a/arch/arm/boot/dts/stm32f4-pinctrl.dtsi b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi new file mode 100644 index 000000000000..7f3560c0211d --- /dev/null +++ b/arch/arm/boot/dts/stm32f4-pinctrl.dtsi | |||
| @@ -0,0 +1,343 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com> | ||
| 3 | * | ||
| 4 | * This file is dual-licensed: you can use it either under the terms | ||
| 5 | * of the GPL or the X11 license, at your option. Note that this dual | ||
| 6 | * licensing only applies to this file, and not this project as a | ||
| 7 | * whole. | ||
| 8 | * | ||
| 9 | * a) This file is free software; you can redistribute it and/or | ||
| 10 | * modify it under the terms of the GNU General Public License as | ||
| 11 | * published by the Free Software Foundation; either version 2 of the | ||
| 12 | * License, or (at your option) any later version. | ||
| 13 | * | ||
| 14 | * This file is distributed in the hope that it will be useful, | ||
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | * GNU General Public License for more details. | ||
| 18 | * | ||
| 19 | * Or, alternatively, | ||
| 20 | * | ||
| 21 | * b) Permission is hereby granted, free of charge, to any person | ||
| 22 | * obtaining a copy of this software and associated documentation | ||
| 23 | * files (the "Software"), to deal in the Software without | ||
| 24 | * restriction, including without limitation the rights to use, | ||
| 25 | * copy, modify, merge, publish, distribute, sublicense, and/or | ||
| 26 | * sell copies of the Software, and to permit persons to whom the | ||
| 27 | * Software is furnished to do so, subject to the following | ||
| 28 | * conditions: | ||
| 29 | * | ||
| 30 | * The above copyright notice and this permission notice shall be | ||
| 31 | * included in all copies or substantial portions of the Software. | ||
| 32 | * | ||
| 33 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 34 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES | ||
| 35 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 36 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT | ||
| 37 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, | ||
| 38 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 39 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 40 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 41 | */ | ||
| 42 | |||
| 43 | #include <dt-bindings/pinctrl/stm32f429-pinfunc.h> | ||
| 44 | #include <dt-bindings/mfd/stm32f4-rcc.h> | ||
| 45 | |||
| 46 | / { | ||
| 47 | soc { | ||
| 48 | pinctrl: pin-controller { | ||
| 49 | #address-cells = <1>; | ||
| 50 | #size-cells = <1>; | ||
| 51 | ranges = <0 0x40020000 0x3000>; | ||
| 52 | interrupt-parent = <&exti>; | ||
| 53 | st,syscfg = <&syscfg 0x8>; | ||
| 54 | pins-are-numbered; | ||
| 55 | |||
| 56 | gpioa: gpio@40020000 { | ||
| 57 | gpio-controller; | ||
| 58 | #gpio-cells = <2>; | ||
| 59 | interrupt-controller; | ||
| 60 | #interrupt-cells = <2>; | ||
| 61 | reg = <0x0 0x400>; | ||
| 62 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>; | ||
| 63 | st,bank-name = "GPIOA"; | ||
| 64 | }; | ||
| 65 | |||
| 66 | gpiob: gpio@40020400 { | ||
| 67 | gpio-controller; | ||
| 68 | #gpio-cells = <2>; | ||
| 69 | interrupt-controller; | ||
| 70 | #interrupt-cells = <2>; | ||
| 71 | reg = <0x400 0x400>; | ||
| 72 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>; | ||
| 73 | st,bank-name = "GPIOB"; | ||
| 74 | }; | ||
| 75 | |||
| 76 | gpioc: gpio@40020800 { | ||
| 77 | gpio-controller; | ||
| 78 | #gpio-cells = <2>; | ||
| 79 | interrupt-controller; | ||
| 80 | #interrupt-cells = <2>; | ||
| 81 | reg = <0x800 0x400>; | ||
| 82 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>; | ||
| 83 | st,bank-name = "GPIOC"; | ||
| 84 | }; | ||
| 85 | |||
| 86 | gpiod: gpio@40020c00 { | ||
| 87 | gpio-controller; | ||
| 88 | #gpio-cells = <2>; | ||
| 89 | interrupt-controller; | ||
| 90 | #interrupt-cells = <2>; | ||
| 91 | reg = <0xc00 0x400>; | ||
| 92 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>; | ||
| 93 | st,bank-name = "GPIOD"; | ||
| 94 | }; | ||
| 95 | |||
| 96 | gpioe: gpio@40021000 { | ||
| 97 | gpio-controller; | ||
| 98 | #gpio-cells = <2>; | ||
| 99 | interrupt-controller; | ||
| 100 | #interrupt-cells = <2>; | ||
| 101 | reg = <0x1000 0x400>; | ||
| 102 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>; | ||
| 103 | st,bank-name = "GPIOE"; | ||
| 104 | }; | ||
| 105 | |||
| 106 | gpiof: gpio@40021400 { | ||
| 107 | gpio-controller; | ||
| 108 | #gpio-cells = <2>; | ||
| 109 | interrupt-controller; | ||
| 110 | #interrupt-cells = <2>; | ||
| 111 | reg = <0x1400 0x400>; | ||
| 112 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>; | ||
| 113 | st,bank-name = "GPIOF"; | ||
| 114 | }; | ||
| 115 | |||
| 116 | gpiog: gpio@40021800 { | ||
| 117 | gpio-controller; | ||
| 118 | #gpio-cells = <2>; | ||
| 119 | interrupt-controller; | ||
| 120 | #interrupt-cells = <2>; | ||
| 121 | reg = <0x1800 0x400>; | ||
| 122 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>; | ||
| 123 | st,bank-name = "GPIOG"; | ||
| 124 | }; | ||
| 125 | |||
| 126 | gpioh: gpio@40021c00 { | ||
| 127 | gpio-controller; | ||
| 128 | #gpio-cells = <2>; | ||
| 129 | interrupt-controller; | ||
| 130 | #interrupt-cells = <2>; | ||
| 131 | reg = <0x1c00 0x400>; | ||
| 132 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>; | ||
| 133 | st,bank-name = "GPIOH"; | ||
| 134 | }; | ||
| 135 | |||
| 136 | gpioi: gpio@40022000 { | ||
| 137 | gpio-controller; | ||
| 138 | #gpio-cells = <2>; | ||
| 139 | interrupt-controller; | ||
| 140 | #interrupt-cells = <2>; | ||
| 141 | reg = <0x2000 0x400>; | ||
| 142 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>; | ||
| 143 | st,bank-name = "GPIOI"; | ||
| 144 | }; | ||
| 145 | |||
| 146 | gpioj: gpio@40022400 { | ||
| 147 | gpio-controller; | ||
| 148 | #gpio-cells = <2>; | ||
| 149 | interrupt-controller; | ||
| 150 | #interrupt-cells = <2>; | ||
| 151 | reg = <0x2400 0x400>; | ||
| 152 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>; | ||
| 153 | st,bank-name = "GPIOJ"; | ||
| 154 | }; | ||
| 155 | |||
| 156 | gpiok: gpio@40022800 { | ||
| 157 | gpio-controller; | ||
| 158 | #gpio-cells = <2>; | ||
| 159 | interrupt-controller; | ||
| 160 | #interrupt-cells = <2>; | ||
| 161 | reg = <0x2800 0x400>; | ||
| 162 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>; | ||
| 163 | st,bank-name = "GPIOK"; | ||
| 164 | }; | ||
| 165 | |||
| 166 | usart1_pins_a: usart1@0 { | ||
| 167 | pins1 { | ||
| 168 | pinmux = <STM32F429_PA9_FUNC_USART1_TX>; | ||
| 169 | bias-disable; | ||
| 170 | drive-push-pull; | ||
| 171 | slew-rate = <0>; | ||
| 172 | }; | ||
| 173 | pins2 { | ||
| 174 | pinmux = <STM32F429_PA10_FUNC_USART1_RX>; | ||
| 175 | bias-disable; | ||
| 176 | }; | ||
| 177 | }; | ||
| 178 | |||
| 179 | usart3_pins_a: usart3@0 { | ||
| 180 | pins1 { | ||
| 181 | pinmux = <STM32F429_PB10_FUNC_USART3_TX>; | ||
| 182 | bias-disable; | ||
| 183 | drive-push-pull; | ||
| 184 | slew-rate = <0>; | ||
| 185 | }; | ||
| 186 | pins2 { | ||
| 187 | pinmux = <STM32F429_PB11_FUNC_USART3_RX>; | ||
| 188 | bias-disable; | ||
| 189 | }; | ||
| 190 | }; | ||
| 191 | |||
| 192 | usbotg_fs_pins_a: usbotg_fs@0 { | ||
| 193 | pins { | ||
| 194 | pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>, | ||
| 195 | <STM32F429_PA11_FUNC_OTG_FS_DM>, | ||
| 196 | <STM32F429_PA12_FUNC_OTG_FS_DP>; | ||
| 197 | bias-disable; | ||
| 198 | drive-push-pull; | ||
| 199 | slew-rate = <2>; | ||
| 200 | }; | ||
| 201 | }; | ||
| 202 | |||
| 203 | usbotg_fs_pins_b: usbotg_fs@1 { | ||
| 204 | pins { | ||
| 205 | pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>, | ||
| 206 | <STM32F429_PB14_FUNC_OTG_HS_DM>, | ||
| 207 | <STM32F429_PB15_FUNC_OTG_HS_DP>; | ||
| 208 | bias-disable; | ||
| 209 | drive-push-pull; | ||
| 210 | slew-rate = <2>; | ||
| 211 | }; | ||
| 212 | }; | ||
| 213 | |||
| 214 | usbotg_hs_pins_a: usbotg_hs@0 { | ||
| 215 | pins { | ||
| 216 | pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>, | ||
| 217 | <STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR>, | ||
| 218 | <STM32F429_PC0_FUNC_OTG_HS_ULPI_STP>, | ||
| 219 | <STM32F429_PA5_FUNC_OTG_HS_ULPI_CK>, | ||
| 220 | <STM32F429_PA3_FUNC_OTG_HS_ULPI_D0>, | ||
| 221 | <STM32F429_PB0_FUNC_OTG_HS_ULPI_D1>, | ||
| 222 | <STM32F429_PB1_FUNC_OTG_HS_ULPI_D2>, | ||
| 223 | <STM32F429_PB10_FUNC_OTG_HS_ULPI_D3>, | ||
| 224 | <STM32F429_PB11_FUNC_OTG_HS_ULPI_D4>, | ||
| 225 | <STM32F429_PB12_FUNC_OTG_HS_ULPI_D5>, | ||
| 226 | <STM32F429_PB13_FUNC_OTG_HS_ULPI_D6>, | ||
| 227 | <STM32F429_PB5_FUNC_OTG_HS_ULPI_D7>; | ||
| 228 | bias-disable; | ||
| 229 | drive-push-pull; | ||
| 230 | slew-rate = <2>; | ||
| 231 | }; | ||
| 232 | }; | ||
| 233 | |||
| 234 | ethernet_mii: mii@0 { | ||
| 235 | pins { | ||
| 236 | pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>, | ||
| 237 | <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>, | ||
| 238 | <STM32F429_PC2_FUNC_ETH_MII_TXD2>, | ||
| 239 | <STM32F429_PB8_FUNC_ETH_MII_TXD3>, | ||
| 240 | <STM32F429_PC3_FUNC_ETH_MII_TX_CLK>, | ||
| 241 | <STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN>, | ||
| 242 | <STM32F429_PA2_FUNC_ETH_MDIO>, | ||
| 243 | <STM32F429_PC1_FUNC_ETH_MDC>, | ||
| 244 | <STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK>, | ||
| 245 | <STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV>, | ||
| 246 | <STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0>, | ||
| 247 | <STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1>, | ||
| 248 | <STM32F429_PH6_FUNC_ETH_MII_RXD2>, | ||
| 249 | <STM32F429_PH7_FUNC_ETH_MII_RXD3>; | ||
| 250 | slew-rate = <2>; | ||
| 251 | }; | ||
| 252 | }; | ||
| 253 | |||
| 254 | adc3_in8_pin: adc@200 { | ||
| 255 | pins { | ||
| 256 | pinmux = <STM32F429_PF10_FUNC_ANALOG>; | ||
| 257 | }; | ||
| 258 | }; | ||
| 259 | |||
| 260 | pwm1_pins: pwm@1 { | ||
| 261 | pins { | ||
| 262 | pinmux = <STM32F429_PA8_FUNC_TIM1_CH1>, | ||
| 263 | <STM32F429_PB13_FUNC_TIM1_CH1N>, | ||
| 264 | <STM32F429_PB12_FUNC_TIM1_BKIN>; | ||
| 265 | }; | ||
| 266 | }; | ||
| 267 | |||
| 268 | pwm3_pins: pwm@3 { | ||
| 269 | pins { | ||
| 270 | pinmux = <STM32F429_PB4_FUNC_TIM3_CH1>, | ||
| 271 | <STM32F429_PB5_FUNC_TIM3_CH2>; | ||
| 272 | }; | ||
| 273 | }; | ||
| 274 | |||
| 275 | i2c1_pins: i2c1@0 { | ||
| 276 | pins { | ||
| 277 | pinmux = <STM32F429_PB9_FUNC_I2C1_SDA>, | ||
| 278 | <STM32F429_PB6_FUNC_I2C1_SCL>; | ||
| 279 | bias-disable; | ||
| 280 | drive-open-drain; | ||
| 281 | slew-rate = <3>; | ||
| 282 | }; | ||
| 283 | }; | ||
| 284 | |||
| 285 | ltdc_pins: ltdc@0 { | ||
| 286 | pins { | ||
| 287 | pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>, | ||
| 288 | <STM32F429_PI13_FUNC_LCD_VSYNC>, | ||
| 289 | <STM32F429_PI14_FUNC_LCD_CLK>, | ||
| 290 | <STM32F429_PI15_FUNC_LCD_R0>, | ||
| 291 | <STM32F429_PJ0_FUNC_LCD_R1>, | ||
| 292 | <STM32F429_PJ1_FUNC_LCD_R2>, | ||
| 293 | <STM32F429_PJ2_FUNC_LCD_R3>, | ||
| 294 | <STM32F429_PJ3_FUNC_LCD_R4>, | ||
| 295 | <STM32F429_PJ4_FUNC_LCD_R5>, | ||
| 296 | <STM32F429_PJ5_FUNC_LCD_R6>, | ||
| 297 | <STM32F429_PJ6_FUNC_LCD_R7>, | ||
| 298 | <STM32F429_PJ7_FUNC_LCD_G0>, | ||
| 299 | <STM32F429_PJ8_FUNC_LCD_G1>, | ||
| 300 | <STM32F429_PJ9_FUNC_LCD_G2>, | ||
| 301 | <STM32F429_PJ10_FUNC_LCD_G3>, | ||
| 302 | <STM32F429_PJ11_FUNC_LCD_G4>, | ||
| 303 | <STM32F429_PJ12_FUNC_LCD_B0>, | ||
| 304 | <STM32F429_PJ13_FUNC_LCD_B1>, | ||
| 305 | <STM32F429_PJ14_FUNC_LCD_B2>, | ||
| 306 | <STM32F429_PJ15_FUNC_LCD_B3>, | ||
| 307 | <STM32F429_PK0_FUNC_LCD_G5>, | ||
| 308 | <STM32F429_PK1_FUNC_LCD_G6>, | ||
| 309 | <STM32F429_PK2_FUNC_LCD_G7>, | ||
| 310 | <STM32F429_PK3_FUNC_LCD_B4>, | ||
| 311 | <STM32F429_PK4_FUNC_LCD_B5>, | ||
| 312 | <STM32F429_PK5_FUNC_LCD_B6>, | ||
| 313 | <STM32F429_PK6_FUNC_LCD_B7>, | ||
| 314 | <STM32F429_PK7_FUNC_LCD_DE>; | ||
| 315 | slew-rate = <2>; | ||
| 316 | }; | ||
| 317 | }; | ||
| 318 | |||
| 319 | dcmi_pins: dcmi@0 { | ||
| 320 | pins { | ||
| 321 | pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>, | ||
| 322 | <STM32F429_PB7_FUNC_DCMI_VSYNC>, | ||
| 323 | <STM32F429_PA6_FUNC_DCMI_PIXCLK>, | ||
| 324 | <STM32F429_PC6_FUNC_DCMI_D0>, | ||
| 325 | <STM32F429_PC7_FUNC_DCMI_D1>, | ||
| 326 | <STM32F429_PC8_FUNC_DCMI_D2>, | ||
| 327 | <STM32F429_PC9_FUNC_DCMI_D3>, | ||
| 328 | <STM32F429_PC11_FUNC_DCMI_D4>, | ||
| 329 | <STM32F429_PD3_FUNC_DCMI_D5>, | ||
| 330 | <STM32F429_PB8_FUNC_DCMI_D6>, | ||
| 331 | <STM32F429_PE6_FUNC_DCMI_D7>, | ||
| 332 | <STM32F429_PC10_FUNC_DCMI_D8>, | ||
| 333 | <STM32F429_PC12_FUNC_DCMI_D9>, | ||
| 334 | <STM32F429_PD6_FUNC_DCMI_D10>, | ||
| 335 | <STM32F429_PD2_FUNC_DCMI_D11>; | ||
| 336 | bias-disable; | ||
| 337 | drive-push-pull; | ||
| 338 | slew-rate = <3>; | ||
| 339 | }; | ||
| 340 | }; | ||
| 341 | }; | ||
| 342 | }; | ||
| 343 | }; | ||
diff --git a/arch/arm/boot/dts/stm32f429-disco.dts b/arch/arm/boot/dts/stm32f429-disco.dts index c66d617e4245..5ceb2cf3777f 100644 --- a/arch/arm/boot/dts/stm32f429-disco.dts +++ b/arch/arm/boot/dts/stm32f429-disco.dts | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | 47 | ||
| 48 | /dts-v1/; | 48 | /dts-v1/; |
| 49 | #include "stm32f429.dtsi" | 49 | #include "stm32f429.dtsi" |
| 50 | #include "stm32f429-pinctrl.dtsi" | ||
| 50 | #include <dt-bindings/input/input.h> | 51 | #include <dt-bindings/input/input.h> |
| 51 | 52 | ||
| 52 | / { | 53 | / { |
diff --git a/arch/arm/boot/dts/stm32f429-pinctrl.dtsi b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi new file mode 100644 index 000000000000..3e7a17d9112e --- /dev/null +++ b/arch/arm/boot/dts/stm32f429-pinctrl.dtsi | |||
| @@ -0,0 +1,95 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com> | ||
| 3 | * | ||
| 4 | * This file is dual-licensed: you can use it either under the terms | ||
| 5 | * of the GPL or the X11 license, at your option. Note that this dual | ||
| 6 | * licensing only applies to this file, and not this project as a | ||
| 7 | * whole. | ||
| 8 | * | ||
| 9 | * a) This file is free software; you can redistribute it and/or | ||
| 10 | * modify it under the terms of the GNU General Public License as | ||
| 11 | * published by the Free Software Foundation; either version 2 of the | ||
| 12 | * License, or (at your option) any later version. | ||
| 13 | * | ||
| 14 | * This file is distributed in the hope that it will be useful, | ||
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | * GNU General Public License for more details. | ||
| 18 | * | ||
| 19 | * Or, alternatively, | ||
| 20 | * | ||
| 21 | * b) Permission is hereby granted, free of charge, to any person | ||
| 22 | * obtaining a copy of this software and associated documentation | ||
| 23 | * files (the "Software"), to deal in the Software without | ||
| 24 | * restriction, including without limitation the rights to use, | ||
| 25 | * copy, modify, merge, publish, distribute, sublicense, and/or | ||
| 26 | * sell copies of the Software, and to permit persons to whom the | ||
| 27 | * Software is furnished to do so, subject to the following | ||
| 28 | * conditions: | ||
| 29 | * | ||
| 30 | * The above copyright notice and this permission notice shall be | ||
| 31 | * included in all copies or substantial portions of the Software. | ||
| 32 | * | ||
| 33 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 34 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES | ||
| 35 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 36 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT | ||
| 37 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, | ||
| 38 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 39 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 40 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 41 | */ | ||
| 42 | |||
| 43 | #include "stm32f4-pinctrl.dtsi" | ||
| 44 | |||
| 45 | / { | ||
| 46 | soc { | ||
| 47 | pinctrl: pin-controller { | ||
| 48 | compatible = "st,stm32f429-pinctrl"; | ||
| 49 | |||
| 50 | gpioa: gpio@40020000 { | ||
| 51 | gpio-ranges = <&pinctrl 0 0 16>; | ||
| 52 | }; | ||
| 53 | |||
| 54 | gpiob: gpio@40020400 { | ||
| 55 | gpio-ranges = <&pinctrl 0 16 16>; | ||
| 56 | }; | ||
| 57 | |||
| 58 | gpioc: gpio@40020800 { | ||
| 59 | gpio-ranges = <&pinctrl 0 32 16>; | ||
| 60 | }; | ||
| 61 | |||
| 62 | gpiod: gpio@40020c00 { | ||
| 63 | gpio-ranges = <&pinctrl 0 48 16>; | ||
| 64 | }; | ||
| 65 | |||
| 66 | gpioe: gpio@40021000 { | ||
| 67 | gpio-ranges = <&pinctrl 0 64 16>; | ||
| 68 | }; | ||
| 69 | |||
| 70 | gpiof: gpio@40021400 { | ||
| 71 | gpio-ranges = <&pinctrl 0 80 16>; | ||
| 72 | }; | ||
| 73 | |||
| 74 | gpiog: gpio@40021800 { | ||
| 75 | gpio-ranges = <&pinctrl 0 96 16>; | ||
| 76 | }; | ||
| 77 | |||
| 78 | gpioh: gpio@40021c00 { | ||
| 79 | gpio-ranges = <&pinctrl 0 112 16>; | ||
| 80 | }; | ||
| 81 | |||
| 82 | gpioi: gpio@40022000 { | ||
| 83 | gpio-ranges = <&pinctrl 0 128 16>; | ||
| 84 | }; | ||
| 85 | |||
| 86 | gpioj: gpio@40022400 { | ||
| 87 | gpio-ranges = <&pinctrl 0 144 16>; | ||
| 88 | }; | ||
| 89 | |||
| 90 | gpiok: gpio@40022800 { | ||
| 91 | gpio-ranges = <&pinctrl 0 160 8>; | ||
| 92 | }; | ||
| 93 | }; | ||
| 94 | }; | ||
| 95 | }; | ||
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi index dd7e99b1f43b..5b36eb114ddc 100644 --- a/arch/arm/boot/dts/stm32f429.dtsi +++ b/arch/arm/boot/dts/stm32f429.dtsi | |||
| @@ -47,7 +47,6 @@ | |||
| 47 | 47 | ||
| 48 | #include "skeleton.dtsi" | 48 | #include "skeleton.dtsi" |
| 49 | #include "armv7-m.dtsi" | 49 | #include "armv7-m.dtsi" |
| 50 | #include <dt-bindings/pinctrl/stm32f429-pinfunc.h> | ||
| 51 | #include <dt-bindings/clock/stm32fx-clock.h> | 50 | #include <dt-bindings/clock/stm32fx-clock.h> |
| 52 | #include <dt-bindings/mfd/stm32f4-rcc.h> | 51 | #include <dt-bindings/mfd/stm32f4-rcc.h> |
| 53 | 52 | ||
| @@ -591,302 +590,6 @@ | |||
| 591 | status = "disabled"; | 590 | status = "disabled"; |
| 592 | }; | 591 | }; |
| 593 | 592 | ||
| 594 | pinctrl: pin-controller { | ||
| 595 | #address-cells = <1>; | ||
| 596 | #size-cells = <1>; | ||
| 597 | compatible = "st,stm32f429-pinctrl"; | ||
| 598 | ranges = <0 0x40020000 0x3000>; | ||
| 599 | interrupt-parent = <&exti>; | ||
| 600 | st,syscfg = <&syscfg 0x8>; | ||
| 601 | pins-are-numbered; | ||
| 602 | |||
| 603 | gpioa: gpio@40020000 { | ||
| 604 | gpio-controller; | ||
| 605 | #gpio-cells = <2>; | ||
| 606 | interrupt-controller; | ||
| 607 | #interrupt-cells = <2>; | ||
| 608 | reg = <0x0 0x400>; | ||
| 609 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOA)>; | ||
| 610 | st,bank-name = "GPIOA"; | ||
| 611 | }; | ||
| 612 | |||
| 613 | gpiob: gpio@40020400 { | ||
| 614 | gpio-controller; | ||
| 615 | #gpio-cells = <2>; | ||
| 616 | interrupt-controller; | ||
| 617 | #interrupt-cells = <2>; | ||
| 618 | reg = <0x400 0x400>; | ||
| 619 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOB)>; | ||
| 620 | st,bank-name = "GPIOB"; | ||
| 621 | }; | ||
| 622 | |||
| 623 | gpioc: gpio@40020800 { | ||
| 624 | gpio-controller; | ||
| 625 | #gpio-cells = <2>; | ||
| 626 | interrupt-controller; | ||
| 627 | #interrupt-cells = <2>; | ||
| 628 | reg = <0x800 0x400>; | ||
| 629 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOC)>; | ||
| 630 | st,bank-name = "GPIOC"; | ||
| 631 | }; | ||
| 632 | |||
| 633 | gpiod: gpio@40020c00 { | ||
| 634 | gpio-controller; | ||
| 635 | #gpio-cells = <2>; | ||
| 636 | interrupt-controller; | ||
| 637 | #interrupt-cells = <2>; | ||
| 638 | reg = <0xc00 0x400>; | ||
| 639 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOD)>; | ||
| 640 | st,bank-name = "GPIOD"; | ||
| 641 | }; | ||
| 642 | |||
| 643 | gpioe: gpio@40021000 { | ||
| 644 | gpio-controller; | ||
| 645 | #gpio-cells = <2>; | ||
| 646 | interrupt-controller; | ||
| 647 | #interrupt-cells = <2>; | ||
| 648 | reg = <0x1000 0x400>; | ||
| 649 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOE)>; | ||
| 650 | st,bank-name = "GPIOE"; | ||
| 651 | }; | ||
| 652 | |||
| 653 | gpiof: gpio@40021400 { | ||
| 654 | gpio-controller; | ||
| 655 | #gpio-cells = <2>; | ||
| 656 | interrupt-controller; | ||
| 657 | #interrupt-cells = <2>; | ||
| 658 | reg = <0x1400 0x400>; | ||
| 659 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOF)>; | ||
| 660 | st,bank-name = "GPIOF"; | ||
| 661 | }; | ||
| 662 | |||
| 663 | gpiog: gpio@40021800 { | ||
| 664 | gpio-controller; | ||
| 665 | #gpio-cells = <2>; | ||
| 666 | interrupt-controller; | ||
| 667 | #interrupt-cells = <2>; | ||
| 668 | reg = <0x1800 0x400>; | ||
| 669 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOG)>; | ||
| 670 | st,bank-name = "GPIOG"; | ||
| 671 | }; | ||
| 672 | |||
| 673 | gpioh: gpio@40021c00 { | ||
| 674 | gpio-controller; | ||
| 675 | #gpio-cells = <2>; | ||
| 676 | interrupt-controller; | ||
| 677 | #interrupt-cells = <2>; | ||
| 678 | reg = <0x1c00 0x400>; | ||
| 679 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOH)>; | ||
| 680 | st,bank-name = "GPIOH"; | ||
| 681 | }; | ||
| 682 | |||
| 683 | gpioi: gpio@40022000 { | ||
| 684 | gpio-controller; | ||
| 685 | #gpio-cells = <2>; | ||
| 686 | interrupt-controller; | ||
| 687 | #interrupt-cells = <2>; | ||
| 688 | reg = <0x2000 0x400>; | ||
| 689 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOI)>; | ||
| 690 | st,bank-name = "GPIOI"; | ||
| 691 | }; | ||
| 692 | |||
| 693 | gpioj: gpio@40022400 { | ||
| 694 | gpio-controller; | ||
| 695 | #gpio-cells = <2>; | ||
| 696 | interrupt-controller; | ||
| 697 | #interrupt-cells = <2>; | ||
| 698 | reg = <0x2400 0x400>; | ||
| 699 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOJ)>; | ||
| 700 | st,bank-name = "GPIOJ"; | ||
| 701 | }; | ||
| 702 | |||
| 703 | gpiok: gpio@40022800 { | ||
| 704 | gpio-controller; | ||
| 705 | #gpio-cells = <2>; | ||
| 706 | interrupt-controller; | ||
| 707 | #interrupt-cells = <2>; | ||
| 708 | reg = <0x2800 0x400>; | ||
| 709 | clocks = <&rcc 0 STM32F4_AHB1_CLOCK(GPIOK)>; | ||
| 710 | st,bank-name = "GPIOK"; | ||
| 711 | }; | ||
| 712 | |||
| 713 | usart1_pins_a: usart1@0 { | ||
| 714 | pins1 { | ||
| 715 | pinmux = <STM32F429_PA9_FUNC_USART1_TX>; | ||
| 716 | bias-disable; | ||
| 717 | drive-push-pull; | ||
| 718 | slew-rate = <0>; | ||
| 719 | }; | ||
| 720 | pins2 { | ||
| 721 | pinmux = <STM32F429_PA10_FUNC_USART1_RX>; | ||
| 722 | bias-disable; | ||
| 723 | }; | ||
| 724 | }; | ||
| 725 | |||
| 726 | usart3_pins_a: usart3@0 { | ||
| 727 | pins1 { | ||
| 728 | pinmux = <STM32F429_PB10_FUNC_USART3_TX>; | ||
| 729 | bias-disable; | ||
| 730 | drive-push-pull; | ||
| 731 | slew-rate = <0>; | ||
| 732 | }; | ||
| 733 | pins2 { | ||
| 734 | pinmux = <STM32F429_PB11_FUNC_USART3_RX>; | ||
| 735 | bias-disable; | ||
| 736 | }; | ||
| 737 | }; | ||
| 738 | |||
| 739 | usbotg_fs_pins_a: usbotg_fs@0 { | ||
| 740 | pins { | ||
| 741 | pinmux = <STM32F429_PA10_FUNC_OTG_FS_ID>, | ||
| 742 | <STM32F429_PA11_FUNC_OTG_FS_DM>, | ||
| 743 | <STM32F429_PA12_FUNC_OTG_FS_DP>; | ||
| 744 | bias-disable; | ||
| 745 | drive-push-pull; | ||
| 746 | slew-rate = <2>; | ||
| 747 | }; | ||
| 748 | }; | ||
| 749 | |||
| 750 | usbotg_fs_pins_b: usbotg_fs@1 { | ||
| 751 | pins { | ||
| 752 | pinmux = <STM32F429_PB12_FUNC_OTG_HS_ID>, | ||
| 753 | <STM32F429_PB14_FUNC_OTG_HS_DM>, | ||
| 754 | <STM32F429_PB15_FUNC_OTG_HS_DP>; | ||
| 755 | bias-disable; | ||
| 756 | drive-push-pull; | ||
| 757 | slew-rate = <2>; | ||
| 758 | }; | ||
| 759 | }; | ||
| 760 | |||
| 761 | usbotg_hs_pins_a: usbotg_hs@0 { | ||
| 762 | pins { | ||
| 763 | pinmux = <STM32F429_PH4_FUNC_OTG_HS_ULPI_NXT>, | ||
| 764 | <STM32F429_PI11_FUNC_OTG_HS_ULPI_DIR>, | ||
| 765 | <STM32F429_PC0_FUNC_OTG_HS_ULPI_STP>, | ||
| 766 | <STM32F429_PA5_FUNC_OTG_HS_ULPI_CK>, | ||
| 767 | <STM32F429_PA3_FUNC_OTG_HS_ULPI_D0>, | ||
| 768 | <STM32F429_PB0_FUNC_OTG_HS_ULPI_D1>, | ||
| 769 | <STM32F429_PB1_FUNC_OTG_HS_ULPI_D2>, | ||
| 770 | <STM32F429_PB10_FUNC_OTG_HS_ULPI_D3>, | ||
| 771 | <STM32F429_PB11_FUNC_OTG_HS_ULPI_D4>, | ||
| 772 | <STM32F429_PB12_FUNC_OTG_HS_ULPI_D5>, | ||
| 773 | <STM32F429_PB13_FUNC_OTG_HS_ULPI_D6>, | ||
| 774 | <STM32F429_PB5_FUNC_OTG_HS_ULPI_D7>; | ||
| 775 | bias-disable; | ||
| 776 | drive-push-pull; | ||
| 777 | slew-rate = <2>; | ||
| 778 | }; | ||
| 779 | }; | ||
| 780 | |||
| 781 | ethernet_mii: mii@0 { | ||
| 782 | pins { | ||
| 783 | pinmux = <STM32F429_PG13_FUNC_ETH_MII_TXD0_ETH_RMII_TXD0>, | ||
| 784 | <STM32F429_PG14_FUNC_ETH_MII_TXD1_ETH_RMII_TXD1>, | ||
| 785 | <STM32F429_PC2_FUNC_ETH_MII_TXD2>, | ||
| 786 | <STM32F429_PB8_FUNC_ETH_MII_TXD3>, | ||
| 787 | <STM32F429_PC3_FUNC_ETH_MII_TX_CLK>, | ||
| 788 | <STM32F429_PG11_FUNC_ETH_MII_TX_EN_ETH_RMII_TX_EN>, | ||
| 789 | <STM32F429_PA2_FUNC_ETH_MDIO>, | ||
| 790 | <STM32F429_PC1_FUNC_ETH_MDC>, | ||
| 791 | <STM32F429_PA1_FUNC_ETH_MII_RX_CLK_ETH_RMII_REF_CLK>, | ||
| 792 | <STM32F429_PA7_FUNC_ETH_MII_RX_DV_ETH_RMII_CRS_DV>, | ||
| 793 | <STM32F429_PC4_FUNC_ETH_MII_RXD0_ETH_RMII_RXD0>, | ||
| 794 | <STM32F429_PC5_FUNC_ETH_MII_RXD1_ETH_RMII_RXD1>, | ||
| 795 | <STM32F429_PH6_FUNC_ETH_MII_RXD2>, | ||
| 796 | <STM32F429_PH7_FUNC_ETH_MII_RXD3>; | ||
| 797 | slew-rate = <2>; | ||
| 798 | }; | ||
| 799 | }; | ||
| 800 | |||
| 801 | adc3_in8_pin: adc@200 { | ||
| 802 | pins { | ||
| 803 | pinmux = <STM32F429_PF10_FUNC_ANALOG>; | ||
| 804 | }; | ||
| 805 | }; | ||
| 806 | |||
| 807 | pwm1_pins: pwm@1 { | ||
| 808 | pins { | ||
| 809 | pinmux = <STM32F429_PA8_FUNC_TIM1_CH1>, | ||
| 810 | <STM32F429_PB13_FUNC_TIM1_CH1N>, | ||
| 811 | <STM32F429_PB12_FUNC_TIM1_BKIN>; | ||
| 812 | }; | ||
| 813 | }; | ||
| 814 | |||
| 815 | pwm3_pins: pwm@3 { | ||
| 816 | pins { | ||
| 817 | pinmux = <STM32F429_PB4_FUNC_TIM3_CH1>, | ||
| 818 | <STM32F429_PB5_FUNC_TIM3_CH2>; | ||
| 819 | }; | ||
| 820 | }; | ||
| 821 | |||
| 822 | i2c1_pins: i2c1@0 { | ||
| 823 | pins { | ||
| 824 | pinmux = <STM32F429_PB9_FUNC_I2C1_SDA>, | ||
| 825 | <STM32F429_PB6_FUNC_I2C1_SCL>; | ||
| 826 | bias-disable; | ||
| 827 | drive-open-drain; | ||
| 828 | slew-rate = <3>; | ||
| 829 | }; | ||
| 830 | }; | ||
| 831 | |||
| 832 | ltdc_pins: ltdc@0 { | ||
| 833 | pins { | ||
| 834 | pinmux = <STM32F429_PI12_FUNC_LCD_HSYNC>, | ||
| 835 | <STM32F429_PI13_FUNC_LCD_VSYNC>, | ||
| 836 | <STM32F429_PI14_FUNC_LCD_CLK>, | ||
| 837 | <STM32F429_PI15_FUNC_LCD_R0>, | ||
| 838 | <STM32F429_PJ0_FUNC_LCD_R1>, | ||
| 839 | <STM32F429_PJ1_FUNC_LCD_R2>, | ||
| 840 | <STM32F429_PJ2_FUNC_LCD_R3>, | ||
| 841 | <STM32F429_PJ3_FUNC_LCD_R4>, | ||
| 842 | <STM32F429_PJ4_FUNC_LCD_R5>, | ||
| 843 | <STM32F429_PJ5_FUNC_LCD_R6>, | ||
| 844 | <STM32F429_PJ6_FUNC_LCD_R7>, | ||
| 845 | <STM32F429_PJ7_FUNC_LCD_G0>, | ||
| 846 | <STM32F429_PJ8_FUNC_LCD_G1>, | ||
| 847 | <STM32F429_PJ9_FUNC_LCD_G2>, | ||
| 848 | <STM32F429_PJ10_FUNC_LCD_G3>, | ||
| 849 | <STM32F429_PJ11_FUNC_LCD_G4>, | ||
| 850 | <STM32F429_PJ12_FUNC_LCD_B0>, | ||
| 851 | <STM32F429_PJ13_FUNC_LCD_B1>, | ||
| 852 | <STM32F429_PJ14_FUNC_LCD_B2>, | ||
| 853 | <STM32F429_PJ15_FUNC_LCD_B3>, | ||
| 854 | <STM32F429_PK0_FUNC_LCD_G5>, | ||
| 855 | <STM32F429_PK1_FUNC_LCD_G6>, | ||
| 856 | <STM32F429_PK2_FUNC_LCD_G7>, | ||
| 857 | <STM32F429_PK3_FUNC_LCD_B4>, | ||
| 858 | <STM32F429_PK4_FUNC_LCD_B5>, | ||
| 859 | <STM32F429_PK5_FUNC_LCD_B6>, | ||
| 860 | <STM32F429_PK6_FUNC_LCD_B7>, | ||
| 861 | <STM32F429_PK7_FUNC_LCD_DE>; | ||
| 862 | slew-rate = <2>; | ||
| 863 | }; | ||
| 864 | }; | ||
| 865 | |||
| 866 | dcmi_pins: dcmi@0 { | ||
| 867 | pins { | ||
| 868 | pinmux = <STM32F429_PA4_FUNC_DCMI_HSYNC>, | ||
| 869 | <STM32F429_PB7_FUNC_DCMI_VSYNC>, | ||
| 870 | <STM32F429_PA6_FUNC_DCMI_PIXCLK>, | ||
| 871 | <STM32F429_PC6_FUNC_DCMI_D0>, | ||
| 872 | <STM32F429_PC7_FUNC_DCMI_D1>, | ||
| 873 | <STM32F429_PC8_FUNC_DCMI_D2>, | ||
| 874 | <STM32F429_PC9_FUNC_DCMI_D3>, | ||
| 875 | <STM32F429_PC11_FUNC_DCMI_D4>, | ||
| 876 | <STM32F429_PD3_FUNC_DCMI_D5>, | ||
| 877 | <STM32F429_PB8_FUNC_DCMI_D6>, | ||
| 878 | <STM32F429_PE6_FUNC_DCMI_D7>, | ||
| 879 | <STM32F429_PC10_FUNC_DCMI_D8>, | ||
| 880 | <STM32F429_PC12_FUNC_DCMI_D9>, | ||
| 881 | <STM32F429_PD6_FUNC_DCMI_D10>, | ||
| 882 | <STM32F429_PD2_FUNC_DCMI_D11>; | ||
| 883 | bias-disable; | ||
| 884 | drive-push-pull; | ||
| 885 | slew-rate = <3>; | ||
| 886 | }; | ||
| 887 | }; | ||
| 888 | }; | ||
| 889 | |||
| 890 | crc: crc@40023000 { | 593 | crc: crc@40023000 { |
| 891 | compatible = "st,stm32f4-crc"; | 594 | compatible = "st,stm32f4-crc"; |
| 892 | reg = <0x40023000 0x400>; | 595 | reg = <0x40023000 0x400>; |
diff --git a/arch/arm/boot/dts/stm32f469-disco.dts b/arch/arm/boot/dts/stm32f469-disco.dts index 6ae1f037f3f0..c18acbe4cf4e 100644 --- a/arch/arm/boot/dts/stm32f469-disco.dts +++ b/arch/arm/boot/dts/stm32f469-disco.dts | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | 47 | ||
| 48 | /dts-v1/; | 48 | /dts-v1/; |
| 49 | #include "stm32f429.dtsi" | 49 | #include "stm32f429.dtsi" |
| 50 | #include "stm32f469-pinctrl.dtsi" | ||
| 50 | 51 | ||
| 51 | / { | 52 | / { |
| 52 | model = "STMicroelectronics STM32F469i-DISCO board"; | 53 | model = "STMicroelectronics STM32F469i-DISCO board"; |
diff --git a/arch/arm/boot/dts/stm32f469-pinctrl.dtsi b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi new file mode 100644 index 000000000000..fff542662eea --- /dev/null +++ b/arch/arm/boot/dts/stm32f469-pinctrl.dtsi | |||
| @@ -0,0 +1,96 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2017 - Alexandre Torgue <alexandre.torgue@st.com> | ||
| 3 | * | ||
| 4 | * This file is dual-licensed: you can use it either under the terms | ||
| 5 | * of the GPL or the X11 license, at your option. Note that this dual | ||
| 6 | * licensing only applies to this file, and not this project as a | ||
| 7 | * whole. | ||
| 8 | * | ||
| 9 | * a) This file is free software; you can redistribute it and/or | ||
| 10 | * modify it under the terms of the GNU General Public License as | ||
| 11 | * published by the Free Software Foundation; either version 2 of the | ||
| 12 | * License, or (at your option) any later version. | ||
| 13 | * | ||
| 14 | * This file is distributed in the hope that it will be useful, | ||
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | * GNU General Public License for more details. | ||
| 18 | * | ||
| 19 | * Or, alternatively, | ||
| 20 | * | ||
| 21 | * b) Permission is hereby granted, free of charge, to any person | ||
| 22 | * obtaining a copy of this software and associated documentation | ||
| 23 | * files (the "Software"), to deal in the Software without | ||
| 24 | * restriction, including without limitation the rights to use, | ||
| 25 | * copy, modify, merge, publish, distribute, sublicense, and/or | ||
| 26 | * sell copies of the Software, and to permit persons to whom the | ||
| 27 | * Software is furnished to do so, subject to the following | ||
| 28 | * conditions: | ||
| 29 | * | ||
| 30 | * The above copyright notice and this permission notice shall be | ||
| 31 | * included in all copies or substantial portions of the Software. | ||
| 32 | * | ||
| 33 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
| 34 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES | ||
| 35 | * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
| 36 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT | ||
| 37 | * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, | ||
| 38 | * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
| 39 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 40 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 41 | */ | ||
| 42 | |||
| 43 | #include "stm32f4-pinctrl.dtsi" | ||
| 44 | |||
| 45 | / { | ||
| 46 | soc { | ||
| 47 | pinctrl: pin-controller { | ||
| 48 | compatible = "st,stm32f469-pinctrl"; | ||
| 49 | |||
| 50 | gpioa: gpio@40020000 { | ||
| 51 | gpio-ranges = <&pinctrl 0 0 16>; | ||
| 52 | }; | ||
| 53 | |||
| 54 | gpiob: gpio@40020400 { | ||
| 55 | gpio-ranges = <&pinctrl 0 16 16>; | ||
| 56 | }; | ||
| 57 | |||
| 58 | gpioc: gpio@40020800 { | ||
| 59 | gpio-ranges = <&pinctrl 0 32 16>; | ||
| 60 | }; | ||
| 61 | |||
| 62 | gpiod: gpio@40020c00 { | ||
| 63 | gpio-ranges = <&pinctrl 0 48 16>; | ||
| 64 | }; | ||
| 65 | |||
| 66 | gpioe: gpio@40021000 { | ||
| 67 | gpio-ranges = <&pinctrl 0 64 16>; | ||
| 68 | }; | ||
| 69 | |||
| 70 | gpiof: gpio@40021400 { | ||
| 71 | gpio-ranges = <&pinctrl 0 80 16>; | ||
| 72 | }; | ||
| 73 | |||
| 74 | gpiog: gpio@40021800 { | ||
| 75 | gpio-ranges = <&pinctrl 0 96 16>; | ||
| 76 | }; | ||
| 77 | |||
| 78 | gpioh: gpio@40021c00 { | ||
| 79 | gpio-ranges = <&pinctrl 0 112 16>; | ||
| 80 | }; | ||
| 81 | |||
| 82 | gpioi: gpio@40022000 { | ||
| 83 | gpio-ranges = <&pinctrl 0 128 16>; | ||
| 84 | }; | ||
| 85 | |||
| 86 | gpioj: gpio@40022400 { | ||
| 87 | gpio-ranges = <&pinctrl 0 144 6>, | ||
| 88 | <&pinctrl 12 156 4>; | ||
| 89 | }; | ||
| 90 | |||
| 91 | gpiok: gpio@40022800 { | ||
| 92 | gpio-ranges = <&pinctrl 3 163 5>; | ||
| 93 | }; | ||
| 94 | }; | ||
| 95 | }; | ||
| 96 | }; | ||
diff --git a/arch/arm/configs/gemini_defconfig b/arch/arm/configs/gemini_defconfig index d2d75fa664a6..2a63fa10c813 100644 --- a/arch/arm/configs/gemini_defconfig +++ b/arch/arm/configs/gemini_defconfig | |||
| @@ -32,6 +32,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 | |||
| 32 | CONFIG_BLK_DEV_SD=y | 32 | CONFIG_BLK_DEV_SD=y |
| 33 | # CONFIG_SCSI_LOWLEVEL is not set | 33 | # CONFIG_SCSI_LOWLEVEL is not set |
| 34 | CONFIG_ATA=y | 34 | CONFIG_ATA=y |
| 35 | CONFIG_PATA_FTIDE010=y | ||
| 35 | CONFIG_INPUT_EVDEV=y | 36 | CONFIG_INPUT_EVDEV=y |
| 36 | CONFIG_KEYBOARD_GPIO=y | 37 | CONFIG_KEYBOARD_GPIO=y |
| 37 | # CONFIG_INPUT_MOUSE is not set | 38 | # CONFIG_INPUT_MOUSE is not set |
| @@ -55,8 +56,8 @@ CONFIG_LEDS_GPIO=y | |||
| 55 | CONFIG_LEDS_TRIGGERS=y | 56 | CONFIG_LEDS_TRIGGERS=y |
| 56 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | 57 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y |
| 57 | CONFIG_RTC_CLASS=y | 58 | CONFIG_RTC_CLASS=y |
| 58 | CONFIG_RTC_DRV_GEMINI=y | ||
| 59 | CONFIG_DMADEVICES=y | 59 | CONFIG_DMADEVICES=y |
| 60 | CONFIG_AMBA_PL08X=y | ||
| 60 | # CONFIG_DNOTIFY is not set | 61 | # CONFIG_DNOTIFY is not set |
| 61 | CONFIG_TMPFS=y | 62 | CONFIG_TMPFS=y |
| 62 | CONFIG_TMPFS_POSIX_ACL=y | 63 | CONFIG_TMPFS_POSIX_ACL=y |
diff --git a/arch/arm/configs/pxa_defconfig b/arch/arm/configs/pxa_defconfig index 64e3a2a8cede..d5e1370ec303 100644 --- a/arch/arm/configs/pxa_defconfig +++ b/arch/arm/configs/pxa_defconfig | |||
| @@ -471,7 +471,7 @@ CONFIG_LCD_PLATFORM=m | |||
| 471 | CONFIG_LCD_TOSA=m | 471 | CONFIG_LCD_TOSA=m |
| 472 | CONFIG_BACKLIGHT_PWM=m | 472 | CONFIG_BACKLIGHT_PWM=m |
| 473 | CONFIG_BACKLIGHT_TOSA=m | 473 | CONFIG_BACKLIGHT_TOSA=m |
| 474 | CONFIG_FRAMEBUFFER_CONSOLE=m | 474 | CONFIG_FRAMEBUFFER_CONSOLE=y |
| 475 | CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y | 475 | CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y |
| 476 | CONFIG_LOGO=y | 476 | CONFIG_LOGO=y |
| 477 | CONFIG_SOUND=m | 477 | CONFIG_SOUND=m |
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig index 44d4fa57ba0a..070e5074f1ee 100644 --- a/arch/arm/configs/viper_defconfig +++ b/arch/arm/configs/viper_defconfig | |||
| @@ -113,7 +113,7 @@ CONFIG_FB_PXA_PARAMETERS=y | |||
| 113 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 113 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
| 114 | CONFIG_BACKLIGHT_PWM=m | 114 | CONFIG_BACKLIGHT_PWM=m |
| 115 | # CONFIG_VGA_CONSOLE is not set | 115 | # CONFIG_VGA_CONSOLE is not set |
| 116 | CONFIG_FRAMEBUFFER_CONSOLE=m | 116 | CONFIG_FRAMEBUFFER_CONSOLE=y |
| 117 | CONFIG_LOGO=y | 117 | CONFIG_LOGO=y |
| 118 | CONFIG_SOUND=m | 118 | CONFIG_SOUND=m |
| 119 | CONFIG_SND=m | 119 | CONFIG_SND=m |
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig index 8d4c0c926c34..09e7050d5653 100644 --- a/arch/arm/configs/zeus_defconfig +++ b/arch/arm/configs/zeus_defconfig | |||
| @@ -112,7 +112,7 @@ CONFIG_FB_PXA=m | |||
| 112 | CONFIG_FB_PXA_PARAMETERS=y | 112 | CONFIG_FB_PXA_PARAMETERS=y |
| 113 | CONFIG_BACKLIGHT_LCD_SUPPORT=y | 113 | CONFIG_BACKLIGHT_LCD_SUPPORT=y |
| 114 | # CONFIG_VGA_CONSOLE is not set | 114 | # CONFIG_VGA_CONSOLE is not set |
| 115 | CONFIG_FRAMEBUFFER_CONSOLE=m | 115 | CONFIG_FRAMEBUFFER_CONSOLE=y |
| 116 | CONFIG_LOGO=y | 116 | CONFIG_LOGO=y |
| 117 | CONFIG_SOUND=m | 117 | CONFIG_SOUND=m |
| 118 | CONFIG_SND=m | 118 | CONFIG_SND=m |
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 5036f996e694..849014c01cf4 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
| @@ -533,8 +533,8 @@ static void __init at91_pm_backup_init(void) | |||
| 533 | } | 533 | } |
| 534 | 534 | ||
| 535 | pm_bu->suspended = 0; | 535 | pm_bu->suspended = 0; |
| 536 | pm_bu->canary = virt_to_phys(&canary); | 536 | pm_bu->canary = __pa_symbol(&canary); |
| 537 | pm_bu->resume = virt_to_phys(cpu_resume); | 537 | pm_bu->resume = __pa_symbol(cpu_resume); |
| 538 | 538 | ||
| 539 | return; | 539 | return; |
| 540 | 540 | ||
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index 5b614388d72f..6d28aa20a7d3 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c | |||
| @@ -58,10 +58,10 @@ void omap_hsmmc_late_init(struct omap2_hsmmc_info *c) | |||
| 58 | struct platform_device *pdev; | 58 | struct platform_device *pdev; |
| 59 | int res; | 59 | int res; |
| 60 | 60 | ||
| 61 | if (omap_hsmmc_done != 1) | 61 | if (omap_hsmmc_done) |
| 62 | return; | 62 | return; |
| 63 | 63 | ||
| 64 | omap_hsmmc_done++; | 64 | omap_hsmmc_done = 1; |
| 65 | 65 | ||
| 66 | for (; c->mmc; c++) { | 66 | for (; c->mmc; c++) { |
| 67 | pdev = c->pdev; | 67 | pdev = c->pdev; |
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index f040244c57e7..2f4f7002f38d 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
| @@ -839,6 +839,7 @@ static struct omap_hwmod dra7xx_gpio1_hwmod = { | |||
| 839 | .name = "gpio1", | 839 | .name = "gpio1", |
| 840 | .class = &dra7xx_gpio_hwmod_class, | 840 | .class = &dra7xx_gpio_hwmod_class, |
| 841 | .clkdm_name = "wkupaon_clkdm", | 841 | .clkdm_name = "wkupaon_clkdm", |
| 842 | .flags = HWMOD_CONTROL_OPT_CLKS_IN_RESET, | ||
| 842 | .main_clk = "wkupaon_iclk_mux", | 843 | .main_clk = "wkupaon_iclk_mux", |
| 843 | .prcm = { | 844 | .prcm = { |
| 844 | .omap4 = { | 845 | .omap4 = { |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index c89010e56488..4157987f4a3d 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi | |||
| @@ -168,7 +168,8 @@ | |||
| 168 | &sd_emmc_a { | 168 | &sd_emmc_a { |
| 169 | status = "okay"; | 169 | status = "okay"; |
| 170 | pinctrl-0 = <&sdio_pins>; | 170 | pinctrl-0 = <&sdio_pins>; |
| 171 | pinctrl-names = "default"; | 171 | pinctrl-1 = <&sdio_clk_gate_pins>; |
| 172 | pinctrl-names = "default", "clk-gate"; | ||
| 172 | #address-cells = <1>; | 173 | #address-cells = <1>; |
| 173 | #size-cells = <0>; | 174 | #size-cells = <0>; |
| 174 | 175 | ||
| @@ -194,7 +195,8 @@ | |||
| 194 | &sd_emmc_b { | 195 | &sd_emmc_b { |
| 195 | status = "okay"; | 196 | status = "okay"; |
| 196 | pinctrl-0 = <&sdcard_pins>; | 197 | pinctrl-0 = <&sdcard_pins>; |
| 197 | pinctrl-names = "default"; | 198 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 199 | pinctrl-names = "default", "clk-gate"; | ||
| 198 | 200 | ||
| 199 | bus-width = <4>; | 201 | bus-width = <4>; |
| 200 | cap-sd-highspeed; | 202 | cap-sd-highspeed; |
| @@ -212,10 +214,10 @@ | |||
| 212 | &sd_emmc_c { | 214 | &sd_emmc_c { |
| 213 | status = "okay"; | 215 | status = "okay"; |
| 214 | pinctrl-0 = <&emmc_pins>; | 216 | pinctrl-0 = <&emmc_pins>; |
| 215 | pinctrl-names = "default"; | 217 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 218 | pinctrl-names = "default", "clk-gate"; | ||
| 216 | 219 | ||
| 217 | bus-width = <8>; | 220 | bus-width = <8>; |
| 218 | cap-sd-highspeed; | ||
| 219 | cap-mmc-highspeed; | 221 | cap-mmc-highspeed; |
| 220 | max-frequency = <200000000>; | 222 | max-frequency = <200000000>; |
| 221 | non-removable; | 223 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index 9697a7a79464..4b17a76959b2 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts | |||
| @@ -107,6 +107,9 @@ | |||
| 107 | 107 | ||
| 108 | states = <3300000 0>, | 108 | states = <3300000 0>, |
| 109 | <1800000 1>; | 109 | <1800000 1>; |
| 110 | |||
| 111 | regulator-settling-time-up-us = <100>; | ||
| 112 | regulator-settling-time-down-us = <5000>; | ||
| 110 | }; | 113 | }; |
| 111 | 114 | ||
| 112 | wifi_32k: wifi-32k { | 115 | wifi_32k: wifi-32k { |
| @@ -250,7 +253,8 @@ | |||
| 250 | &sd_emmc_a { | 253 | &sd_emmc_a { |
| 251 | status = "okay"; | 254 | status = "okay"; |
| 252 | pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>; | 255 | pinctrl-0 = <&sdio_pins>, <&sdio_irq_pins>; |
| 253 | pinctrl-names = "default"; | 256 | pinctrl-1 = <&sdio_clk_gate_pins>; |
| 257 | pinctrl-names = "default", "clk-gate"; | ||
| 254 | #address-cells = <1>; | 258 | #address-cells = <1>; |
| 255 | #size-cells = <0>; | 259 | #size-cells = <0>; |
| 256 | 260 | ||
| @@ -276,11 +280,16 @@ | |||
| 276 | &sd_emmc_b { | 280 | &sd_emmc_b { |
| 277 | status = "okay"; | 281 | status = "okay"; |
| 278 | pinctrl-0 = <&sdcard_pins>; | 282 | pinctrl-0 = <&sdcard_pins>; |
| 279 | pinctrl-names = "default"; | 283 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 284 | pinctrl-names = "default", "clk-gate"; | ||
| 280 | 285 | ||
| 281 | bus-width = <4>; | 286 | bus-width = <4>; |
| 282 | cap-sd-highspeed; | 287 | cap-sd-highspeed; |
| 283 | max-frequency = <100000000>; | 288 | sd-uhs-sdr12; |
| 289 | sd-uhs-sdr25; | ||
| 290 | sd-uhs-sdr50; | ||
| 291 | sd-uhs-sdr104; | ||
| 292 | max-frequency = <200000000>; | ||
| 284 | disable-wp; | 293 | disable-wp; |
| 285 | 294 | ||
| 286 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 295 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; |
| @@ -294,10 +303,10 @@ | |||
| 294 | &sd_emmc_c { | 303 | &sd_emmc_c { |
| 295 | status = "disabled"; | 304 | status = "disabled"; |
| 296 | pinctrl-0 = <&emmc_pins>; | 305 | pinctrl-0 = <&emmc_pins>; |
| 297 | pinctrl-names = "default"; | 306 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 307 | pinctrl-names = "default", "clk-gate"; | ||
| 298 | 308 | ||
| 299 | bus-width = <8>; | 309 | bus-width = <8>; |
| 300 | cap-sd-highspeed; | ||
| 301 | max-frequency = <200000000>; | 310 | max-frequency = <200000000>; |
| 302 | non-removable; | 311 | non-removable; |
| 303 | disable-wp; | 312 | disable-wp; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts index 9c59c3c6d1b6..38dfdde5c147 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts | |||
| @@ -51,7 +51,7 @@ | |||
| 51 | / { | 51 | / { |
| 52 | compatible = "nexbox,a95x", "amlogic,meson-gxbb"; | 52 | compatible = "nexbox,a95x", "amlogic,meson-gxbb"; |
| 53 | model = "NEXBOX A95X"; | 53 | model = "NEXBOX A95X"; |
| 54 | 54 | ||
| 55 | aliases { | 55 | aliases { |
| 56 | serial0 = &uart_AO; | 56 | serial0 = &uart_AO; |
| 57 | }; | 57 | }; |
| @@ -232,7 +232,8 @@ | |||
| 232 | &sd_emmc_a { | 232 | &sd_emmc_a { |
| 233 | status = "okay"; | 233 | status = "okay"; |
| 234 | pinctrl-0 = <&sdio_pins>; | 234 | pinctrl-0 = <&sdio_pins>; |
| 235 | pinctrl-names = "default"; | 235 | pinctrl-1 = <&sdio_clk_gate_pins>; |
| 236 | pinctrl-names = "default", "clk-gate"; | ||
| 236 | #address-cells = <1>; | 237 | #address-cells = <1>; |
| 237 | #size-cells = <0>; | 238 | #size-cells = <0>; |
| 238 | 239 | ||
| @@ -253,7 +254,8 @@ | |||
| 253 | &sd_emmc_b { | 254 | &sd_emmc_b { |
| 254 | status = "okay"; | 255 | status = "okay"; |
| 255 | pinctrl-0 = <&sdcard_pins>; | 256 | pinctrl-0 = <&sdcard_pins>; |
| 256 | pinctrl-names = "default"; | 257 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 258 | pinctrl-names = "default", "clk-gate"; | ||
| 257 | 259 | ||
| 258 | bus-width = <4>; | 260 | bus-width = <4>; |
| 259 | cap-sd-highspeed; | 261 | cap-sd-highspeed; |
| @@ -271,10 +273,10 @@ | |||
| 271 | &sd_emmc_c { | 273 | &sd_emmc_c { |
| 272 | status = "okay"; | 274 | status = "okay"; |
| 273 | pinctrl-0 = <&emmc_pins>; | 275 | pinctrl-0 = <&emmc_pins>; |
| 274 | pinctrl-names = "default"; | 276 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 277 | pinctrl-names = "default", "clk-gate"; | ||
| 275 | 278 | ||
| 276 | bus-width = <8>; | 279 | bus-width = <8>; |
| 277 | cap-sd-highspeed; | ||
| 278 | cap-mmc-highspeed; | 280 | cap-mmc-highspeed; |
| 279 | max-frequency = <200000000>; | 281 | max-frequency = <200000000>; |
| 280 | non-removable; | 282 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index d147c853ab05..1ffa1c238a72 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts | |||
| @@ -50,7 +50,7 @@ | |||
| 50 | / { | 50 | / { |
| 51 | compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb"; | 51 | compatible = "hardkernel,odroid-c2", "amlogic,meson-gxbb"; |
| 52 | model = "Hardkernel ODROID-C2"; | 52 | model = "Hardkernel ODROID-C2"; |
| 53 | 53 | ||
| 54 | aliases { | 54 | aliases { |
| 55 | serial0 = &uart_AO; | 55 | serial0 = &uart_AO; |
| 56 | }; | 56 | }; |
| @@ -253,7 +253,8 @@ | |||
| 253 | &sd_emmc_b { | 253 | &sd_emmc_b { |
| 254 | status = "okay"; | 254 | status = "okay"; |
| 255 | pinctrl-0 = <&sdcard_pins>; | 255 | pinctrl-0 = <&sdcard_pins>; |
| 256 | pinctrl-names = "default"; | 256 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 257 | pinctrl-names = "default", "clk-gate"; | ||
| 257 | 258 | ||
| 258 | bus-width = <4>; | 259 | bus-width = <4>; |
| 259 | cap-sd-highspeed; | 260 | cap-sd-highspeed; |
| @@ -271,10 +272,10 @@ | |||
| 271 | &sd_emmc_c { | 272 | &sd_emmc_c { |
| 272 | status = "okay"; | 273 | status = "okay"; |
| 273 | pinctrl-0 = <&emmc_pins>; | 274 | pinctrl-0 = <&emmc_pins>; |
| 274 | pinctrl-names = "default"; | 275 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 276 | pinctrl-names = "default", "clk-gate"; | ||
| 275 | 277 | ||
| 276 | bus-width = <8>; | 278 | bus-width = <8>; |
| 277 | cap-sd-highspeed; | ||
| 278 | max-frequency = <200000000>; | 279 | max-frequency = <200000000>; |
| 279 | non-removable; | 280 | non-removable; |
| 280 | disable-wp; | 281 | disable-wp; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi index 81ffc689a5bf..23c08c3afd0a 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi | |||
| @@ -194,7 +194,8 @@ | |||
| 194 | &sd_emmc_a { | 194 | &sd_emmc_a { |
| 195 | status = "okay"; | 195 | status = "okay"; |
| 196 | pinctrl-0 = <&sdio_pins>; | 196 | pinctrl-0 = <&sdio_pins>; |
| 197 | pinctrl-names = "default"; | 197 | pinctrl-1 = <&sdio_clk_gate_pins>; |
| 198 | pinctrl-names = "default", "clk-gate"; | ||
| 198 | #address-cells = <1>; | 199 | #address-cells = <1>; |
| 199 | #size-cells = <0>; | 200 | #size-cells = <0>; |
| 200 | 201 | ||
| @@ -220,10 +221,14 @@ | |||
| 220 | &sd_emmc_b { | 221 | &sd_emmc_b { |
| 221 | status = "okay"; | 222 | status = "okay"; |
| 222 | pinctrl-0 = <&sdcard_pins>; | 223 | pinctrl-0 = <&sdcard_pins>; |
| 223 | pinctrl-names = "default"; | 224 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 225 | pinctrl-names = "default", "clk-gate"; | ||
| 224 | 226 | ||
| 225 | bus-width = <4>; | 227 | bus-width = <4>; |
| 226 | cap-sd-highspeed; | 228 | cap-sd-highspeed; |
| 229 | sd-uhs-sdr12; | ||
| 230 | sd-uhs-sdr25; | ||
| 231 | sd-uhs-sdr50; | ||
| 227 | max-frequency = <100000000>; | 232 | max-frequency = <100000000>; |
| 228 | disable-wp; | 233 | disable-wp; |
| 229 | 234 | ||
| @@ -238,10 +243,10 @@ | |||
| 238 | &sd_emmc_c { | 243 | &sd_emmc_c { |
| 239 | status = "okay"; | 244 | status = "okay"; |
| 240 | pinctrl-0 = <&emmc_pins>; | 245 | pinctrl-0 = <&emmc_pins>; |
| 241 | pinctrl-names = "default"; | 246 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 247 | pinctrl-names = "default", "clk-gate"; | ||
| 242 | 248 | ||
| 243 | bus-width = <8>; | 249 | bus-width = <8>; |
| 244 | cap-sd-highspeed; | ||
| 245 | cap-mmc-highspeed; | 250 | cap-mmc-highspeed; |
| 246 | max-frequency = <200000000>; | 251 | max-frequency = <200000000>; |
| 247 | non-removable; | 252 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index 346753fb6324..f2bc6dea1fc6 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi | |||
| @@ -155,7 +155,8 @@ | |||
| 155 | &sd_emmc_a { | 155 | &sd_emmc_a { |
| 156 | status = "okay"; | 156 | status = "okay"; |
| 157 | pinctrl-0 = <&sdio_pins &sdio_irq_pins>; | 157 | pinctrl-0 = <&sdio_pins &sdio_irq_pins>; |
| 158 | pinctrl-names = "default"; | 158 | pinctrl-1 = <&sdio_clk_gate_pins>; |
| 159 | pinctrl-names = "default", "clk-gate"; | ||
| 159 | #address-cells = <1>; | 160 | #address-cells = <1>; |
| 160 | #size-cells = <0>; | 161 | #size-cells = <0>; |
| 161 | 162 | ||
| @@ -181,7 +182,8 @@ | |||
| 181 | &sd_emmc_b { | 182 | &sd_emmc_b { |
| 182 | status = "okay"; | 183 | status = "okay"; |
| 183 | pinctrl-0 = <&sdcard_pins>; | 184 | pinctrl-0 = <&sdcard_pins>; |
| 184 | pinctrl-names = "default"; | 185 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 186 | pinctrl-names = "default", "clk-gate"; | ||
| 185 | 187 | ||
| 186 | bus-width = <4>; | 188 | bus-width = <4>; |
| 187 | cap-sd-highspeed; | 189 | cap-sd-highspeed; |
| @@ -198,10 +200,10 @@ | |||
| 198 | &sd_emmc_c { | 200 | &sd_emmc_c { |
| 199 | status = "okay"; | 201 | status = "okay"; |
| 200 | pinctrl-0 = <&emmc_pins>; | 202 | pinctrl-0 = <&emmc_pins>; |
| 201 | pinctrl-names = "default"; | 203 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 204 | pinctrl-names = "default", "clk-gate"; | ||
| 202 | 205 | ||
| 203 | bus-width = <8>; | 206 | bus-width = <8>; |
| 204 | cap-sd-highspeed; | ||
| 205 | cap-mmc-highspeed; | 207 | cap-mmc-highspeed; |
| 206 | max-frequency = <200000000>; | 208 | max-frequency = <200000000>; |
| 207 | non-removable; | 209 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi index 52f1687e7a09..af834cdbba79 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi | |||
| @@ -392,6 +392,17 @@ | |||
| 392 | }; | 392 | }; |
| 393 | }; | 393 | }; |
| 394 | 394 | ||
| 395 | emmc_clk_gate_pins: emmc_clk_gate { | ||
| 396 | mux { | ||
| 397 | groups = "BOOT_8"; | ||
| 398 | function = "gpio_periphs"; | ||
| 399 | }; | ||
| 400 | cfg-pull-down { | ||
| 401 | pins = "BOOT_8"; | ||
| 402 | bias-pull-down; | ||
| 403 | }; | ||
| 404 | }; | ||
| 405 | |||
| 395 | nor_pins: nor { | 406 | nor_pins: nor { |
| 396 | mux { | 407 | mux { |
| 397 | groups = "nor_d", | 408 | groups = "nor_d", |
| @@ -430,6 +441,17 @@ | |||
| 430 | }; | 441 | }; |
| 431 | }; | 442 | }; |
| 432 | 443 | ||
| 444 | sdcard_clk_gate_pins: sdcard_clk_gate { | ||
| 445 | mux { | ||
| 446 | groups = "CARD_2"; | ||
| 447 | function = "gpio_periphs"; | ||
| 448 | }; | ||
| 449 | cfg-pull-down { | ||
| 450 | pins = "CARD_2"; | ||
| 451 | bias-pull-down; | ||
| 452 | }; | ||
| 453 | }; | ||
| 454 | |||
| 433 | sdio_pins: sdio { | 455 | sdio_pins: sdio { |
| 434 | mux { | 456 | mux { |
| 435 | groups = "sdio_d0", | 457 | groups = "sdio_d0", |
| @@ -442,6 +464,17 @@ | |||
| 442 | }; | 464 | }; |
| 443 | }; | 465 | }; |
| 444 | 466 | ||
| 467 | sdio_clk_gate_pins: sdio_clk_gate { | ||
| 468 | mux { | ||
| 469 | groups = "GPIOX_4"; | ||
| 470 | function = "gpio_periphs"; | ||
| 471 | }; | ||
| 472 | cfg-pull-down { | ||
| 473 | pins = "GPIOX_4"; | ||
| 474 | bias-pull-down; | ||
| 475 | }; | ||
| 476 | }; | ||
| 477 | |||
| 445 | sdio_irq_pins: sdio_irq { | 478 | sdio_irq_pins: sdio_irq { |
| 446 | mux { | 479 | mux { |
| 447 | groups = "sdio_irq"; | 480 | groups = "sdio_irq"; |
| @@ -661,21 +694,21 @@ | |||
| 661 | 694 | ||
| 662 | &sd_emmc_a { | 695 | &sd_emmc_a { |
| 663 | clocks = <&clkc CLKID_SD_EMMC_A>, | 696 | clocks = <&clkc CLKID_SD_EMMC_A>, |
| 664 | <&xtal>, | 697 | <&clkc CLKID_SD_EMMC_A_CLK0>, |
| 665 | <&clkc CLKID_FCLK_DIV2>; | 698 | <&clkc CLKID_FCLK_DIV2>; |
| 666 | clock-names = "core", "clkin0", "clkin1"; | 699 | clock-names = "core", "clkin0", "clkin1"; |
| 667 | }; | 700 | }; |
| 668 | 701 | ||
| 669 | &sd_emmc_b { | 702 | &sd_emmc_b { |
| 670 | clocks = <&clkc CLKID_SD_EMMC_B>, | 703 | clocks = <&clkc CLKID_SD_EMMC_B>, |
| 671 | <&xtal>, | 704 | <&clkc CLKID_SD_EMMC_B_CLK0>, |
| 672 | <&clkc CLKID_FCLK_DIV2>; | 705 | <&clkc CLKID_FCLK_DIV2>; |
| 673 | clock-names = "core", "clkin0", "clkin1"; | 706 | clock-names = "core", "clkin0", "clkin1"; |
| 674 | }; | 707 | }; |
| 675 | 708 | ||
| 676 | &sd_emmc_c { | 709 | &sd_emmc_c { |
| 677 | clocks = <&clkc CLKID_SD_EMMC_C>, | 710 | clocks = <&clkc CLKID_SD_EMMC_C>, |
| 678 | <&xtal>, | 711 | <&clkc CLKID_SD_EMMC_C_CLK0>, |
| 679 | <&clkc CLKID_FCLK_DIV2>; | 712 | <&clkc CLKID_FCLK_DIV2>; |
| 680 | clock-names = "core", "clkin0", "clkin1"; | 713 | clock-names = "core", "clkin0", "clkin1"; |
| 681 | }; | 714 | }; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts index 2a5804ce7f4b..977b4240f3c1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts | |||
| @@ -123,7 +123,8 @@ | |||
| 123 | &sd_emmc_b { | 123 | &sd_emmc_b { |
| 124 | status = "okay"; | 124 | status = "okay"; |
| 125 | pinctrl-0 = <&sdcard_pins>; | 125 | pinctrl-0 = <&sdcard_pins>; |
| 126 | pinctrl-names = "default"; | 126 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 127 | pinctrl-names = "default", "clk-gate"; | ||
| 127 | 128 | ||
| 128 | bus-width = <4>; | 129 | bus-width = <4>; |
| 129 | cap-sd-highspeed; | 130 | cap-sd-highspeed; |
| @@ -141,10 +142,10 @@ | |||
| 141 | &sd_emmc_c { | 142 | &sd_emmc_c { |
| 142 | status = "okay"; | 143 | status = "okay"; |
| 143 | pinctrl-0 = <&emmc_pins>; | 144 | pinctrl-0 = <&emmc_pins>; |
| 144 | pinctrl-names = "default"; | 145 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 146 | pinctrl-names = "default", "clk-gate"; | ||
| 145 | 147 | ||
| 146 | bus-width = <8>; | 148 | bus-width = <8>; |
| 147 | cap-sd-highspeed; | ||
| 148 | cap-mmc-highspeed; | 149 | cap-mmc-highspeed; |
| 149 | max-frequency = <100000000>; | 150 | max-frequency = <100000000>; |
| 150 | non-removable; | 151 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index 69ca14ac10fa..64c54c92e214 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts | |||
| @@ -91,6 +91,9 @@ | |||
| 91 | 91 | ||
| 92 | states = <3300000 0>, | 92 | states = <3300000 0>, |
| 93 | <1800000 1>; | 93 | <1800000 1>; |
| 94 | |||
| 95 | regulator-settling-time-up-us = <200>; | ||
| 96 | regulator-settling-time-down-us = <50000>; | ||
| 94 | }; | 97 | }; |
| 95 | 98 | ||
| 96 | vddio_boot: regulator-vddio_boot { | 99 | vddio_boot: regulator-vddio_boot { |
| @@ -197,10 +200,14 @@ | |||
| 197 | &sd_emmc_b { | 200 | &sd_emmc_b { |
| 198 | status = "okay"; | 201 | status = "okay"; |
| 199 | pinctrl-0 = <&sdcard_pins>; | 202 | pinctrl-0 = <&sdcard_pins>; |
| 200 | pinctrl-names = "default"; | 203 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 204 | pinctrl-names = "default", "clk-gate"; | ||
| 201 | 205 | ||
| 202 | bus-width = <4>; | 206 | bus-width = <4>; |
| 203 | cap-sd-highspeed; | 207 | cap-sd-highspeed; |
| 208 | sd-uhs-sdr12; | ||
| 209 | sd-uhs-sdr25; | ||
| 210 | sd-uhs-sdr50; | ||
| 204 | max-frequency = <100000000>; | 211 | max-frequency = <100000000>; |
| 205 | disable-wp; | 212 | disable-wp; |
| 206 | 213 | ||
| @@ -215,10 +222,12 @@ | |||
| 215 | &sd_emmc_c { | 222 | &sd_emmc_c { |
| 216 | status = "okay"; | 223 | status = "okay"; |
| 217 | pinctrl-0 = <&emmc_pins>; | 224 | pinctrl-0 = <&emmc_pins>; |
| 218 | pinctrl-names = "default"; | 225 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 226 | pinctrl-names = "default", "clk-gate"; | ||
| 219 | 227 | ||
| 220 | bus-width = <8>; | 228 | bus-width = <8>; |
| 221 | cap-mmc-highspeed; | 229 | cap-mmc-highspeed; |
| 230 | mmc-ddr-3_3v; | ||
| 222 | max-frequency = <50000000>; | 231 | max-frequency = <50000000>; |
| 223 | non-removable; | 232 | non-removable; |
| 224 | disable-wp; | 233 | disable-wp; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 4c2ac7650fcd..1b8f32867aa1 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts | |||
| @@ -189,7 +189,8 @@ | |||
| 189 | &sd_emmc_a { | 189 | &sd_emmc_a { |
| 190 | status = "okay"; | 190 | status = "okay"; |
| 191 | pinctrl-0 = <&sdio_pins>; | 191 | pinctrl-0 = <&sdio_pins>; |
| 192 | pinctrl-names = "default"; | 192 | pinctrl-1 = <&sdio_clk_gate_pins>; |
| 193 | pinctrl-names = "default", "clk-gate"; | ||
| 193 | #address-cells = <1>; | 194 | #address-cells = <1>; |
| 194 | #size-cells = <0>; | 195 | #size-cells = <0>; |
| 195 | 196 | ||
| @@ -210,7 +211,8 @@ | |||
| 210 | &sd_emmc_b { | 211 | &sd_emmc_b { |
| 211 | status = "okay"; | 212 | status = "okay"; |
| 212 | pinctrl-0 = <&sdcard_pins>; | 213 | pinctrl-0 = <&sdcard_pins>; |
| 213 | pinctrl-names = "default"; | 214 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 215 | pinctrl-names = "default", "clk-gate"; | ||
| 214 | 216 | ||
| 215 | bus-width = <4>; | 217 | bus-width = <4>; |
| 216 | cap-sd-highspeed; | 218 | cap-sd-highspeed; |
| @@ -228,10 +230,10 @@ | |||
| 228 | &sd_emmc_c { | 230 | &sd_emmc_c { |
| 229 | status = "okay"; | 231 | status = "okay"; |
| 230 | pinctrl-0 = <&emmc_pins>; | 232 | pinctrl-0 = <&emmc_pins>; |
| 231 | pinctrl-names = "default"; | 233 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 234 | pinctrl-names = "default", "clk-gate"; | ||
| 232 | 235 | ||
| 233 | bus-width = <8>; | 236 | bus-width = <8>; |
| 234 | cap-sd-highspeed; | ||
| 235 | cap-mmc-highspeed; | 237 | cap-mmc-highspeed; |
| 236 | max-frequency = <200000000>; | 238 | max-frequency = <200000000>; |
| 237 | non-removable; | 239 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index f3eea8e89d12..129af9068814 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi | |||
| @@ -95,7 +95,8 @@ | |||
| 95 | &sd_emmc_a { | 95 | &sd_emmc_a { |
| 96 | status = "okay"; | 96 | status = "okay"; |
| 97 | pinctrl-0 = <&sdio_pins>; | 97 | pinctrl-0 = <&sdio_pins>; |
| 98 | pinctrl-names = "default"; | 98 | pinctrl-1 = <&sdio_clk_gate_pins>; |
| 99 | pinctrl-names = "default", "clk-gate"; | ||
| 99 | #address-cells = <1>; | 100 | #address-cells = <1>; |
| 100 | #size-cells = <0>; | 101 | #size-cells = <0>; |
| 101 | 102 | ||
| @@ -116,7 +117,8 @@ | |||
| 116 | &sd_emmc_b { | 117 | &sd_emmc_b { |
| 117 | status = "okay"; | 118 | status = "okay"; |
| 118 | pinctrl-0 = <&sdcard_pins>; | 119 | pinctrl-0 = <&sdcard_pins>; |
| 119 | pinctrl-names = "default"; | 120 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 121 | pinctrl-names = "default", "clk-gate"; | ||
| 120 | 122 | ||
| 121 | bus-width = <4>; | 123 | bus-width = <4>; |
| 122 | cap-sd-highspeed; | 124 | cap-sd-highspeed; |
| @@ -134,10 +136,10 @@ | |||
| 134 | &sd_emmc_c { | 136 | &sd_emmc_c { |
| 135 | status = "okay"; | 137 | status = "okay"; |
| 136 | pinctrl-0 = <&emmc_pins>; | 138 | pinctrl-0 = <&emmc_pins>; |
| 137 | pinctrl-names = "default"; | 139 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 140 | pinctrl-names = "default", "clk-gate"; | ||
| 138 | 141 | ||
| 139 | bus-width = <8>; | 142 | bus-width = <8>; |
| 140 | cap-sd-highspeed; | ||
| 141 | cap-mmc-highspeed; | 143 | cap-mmc-highspeed; |
| 142 | max-frequency = <200000000>; | 144 | max-frequency = <200000000>; |
| 143 | non-removable; | 145 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi index d6876e64979e..d8dd3298b15c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi | |||
| @@ -281,6 +281,17 @@ | |||
| 281 | }; | 281 | }; |
| 282 | }; | 282 | }; |
| 283 | 283 | ||
| 284 | emmc_clk_gate_pins: emmc_clk_gate { | ||
| 285 | mux { | ||
| 286 | groups = "BOOT_8"; | ||
| 287 | function = "gpio_periphs"; | ||
| 288 | }; | ||
| 289 | cfg-pull-down { | ||
| 290 | pins = "BOOT_8"; | ||
| 291 | bias-pull-down; | ||
| 292 | }; | ||
| 293 | }; | ||
| 294 | |||
| 284 | nor_pins: nor { | 295 | nor_pins: nor { |
| 285 | mux { | 296 | mux { |
| 286 | groups = "nor_d", | 297 | groups = "nor_d", |
| @@ -319,6 +330,17 @@ | |||
| 319 | }; | 330 | }; |
| 320 | }; | 331 | }; |
| 321 | 332 | ||
| 333 | sdcard_clk_gate_pins: sdcard_clk_gate { | ||
| 334 | mux { | ||
| 335 | groups = "CARD_2"; | ||
| 336 | function = "gpio_periphs"; | ||
| 337 | }; | ||
| 338 | cfg-pull-down { | ||
| 339 | pins = "CARD_2"; | ||
| 340 | bias-pull-down; | ||
| 341 | }; | ||
| 342 | }; | ||
| 343 | |||
| 322 | sdio_pins: sdio { | 344 | sdio_pins: sdio { |
| 323 | mux { | 345 | mux { |
| 324 | groups = "sdio_d0", | 346 | groups = "sdio_d0", |
| @@ -331,6 +353,17 @@ | |||
| 331 | }; | 353 | }; |
| 332 | }; | 354 | }; |
| 333 | 355 | ||
| 356 | sdio_clk_gate_pins: sdio_clk_gate { | ||
| 357 | mux { | ||
| 358 | groups = "GPIOX_4"; | ||
| 359 | function = "gpio_periphs"; | ||
| 360 | }; | ||
| 361 | cfg-pull-down { | ||
| 362 | pins = "GPIOX_4"; | ||
| 363 | bias-pull-down; | ||
| 364 | }; | ||
| 365 | }; | ||
| 366 | |||
| 334 | sdio_irq_pins: sdio_irq { | 367 | sdio_irq_pins: sdio_irq { |
| 335 | mux { | 368 | mux { |
| 336 | groups = "sdio_irq"; | 369 | groups = "sdio_irq"; |
| @@ -603,21 +636,21 @@ | |||
| 603 | 636 | ||
| 604 | &sd_emmc_a { | 637 | &sd_emmc_a { |
| 605 | clocks = <&clkc CLKID_SD_EMMC_A>, | 638 | clocks = <&clkc CLKID_SD_EMMC_A>, |
| 606 | <&xtal>, | 639 | <&clkc CLKID_SD_EMMC_A_CLK0>, |
| 607 | <&clkc CLKID_FCLK_DIV2>; | 640 | <&clkc CLKID_FCLK_DIV2>; |
| 608 | clock-names = "core", "clkin0", "clkin1"; | 641 | clock-names = "core", "clkin0", "clkin1"; |
| 609 | }; | 642 | }; |
| 610 | 643 | ||
| 611 | &sd_emmc_b { | 644 | &sd_emmc_b { |
| 612 | clocks = <&clkc CLKID_SD_EMMC_B>, | 645 | clocks = <&clkc CLKID_SD_EMMC_B>, |
| 613 | <&xtal>, | 646 | <&clkc CLKID_SD_EMMC_B_CLK0>, |
| 614 | <&clkc CLKID_FCLK_DIV2>; | 647 | <&clkc CLKID_FCLK_DIV2>; |
| 615 | clock-names = "core", "clkin0", "clkin1"; | 648 | clock-names = "core", "clkin0", "clkin1"; |
| 616 | }; | 649 | }; |
| 617 | 650 | ||
| 618 | &sd_emmc_c { | 651 | &sd_emmc_c { |
| 619 | clocks = <&clkc CLKID_SD_EMMC_C>, | 652 | clocks = <&clkc CLKID_SD_EMMC_C>, |
| 620 | <&xtal>, | 653 | <&clkc CLKID_SD_EMMC_C_CLK0>, |
| 621 | <&clkc CLKID_FCLK_DIV2>; | 654 | <&clkc CLKID_FCLK_DIV2>; |
| 622 | clock-names = "core", "clkin0", "clkin1"; | 655 | clock-names = "core", "clkin0", "clkin1"; |
| 623 | }; | 656 | }; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts index 9b10c5f4f8c0..22c697732f66 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts | |||
| @@ -175,7 +175,8 @@ | |||
| 175 | &sd_emmc_b { | 175 | &sd_emmc_b { |
| 176 | status = "okay"; | 176 | status = "okay"; |
| 177 | pinctrl-0 = <&sdcard_pins>; | 177 | pinctrl-0 = <&sdcard_pins>; |
| 178 | pinctrl-names = "default"; | 178 | pinctrl-1 = <&sdcard_clk_gate_pins>; |
| 179 | pinctrl-names = "default", "clk-gate"; | ||
| 179 | 180 | ||
| 180 | bus-width = <4>; | 181 | bus-width = <4>; |
| 181 | cap-sd-highspeed; | 182 | cap-sd-highspeed; |
| @@ -193,10 +194,10 @@ | |||
| 193 | &sd_emmc_c { | 194 | &sd_emmc_c { |
| 194 | status = "okay"; | 195 | status = "okay"; |
| 195 | pinctrl-0 = <&emmc_pins>; | 196 | pinctrl-0 = <&emmc_pins>; |
| 196 | pinctrl-names = "default"; | 197 | pinctrl-1 = <&emmc_clk_gate_pins>; |
| 198 | pinctrl-names = "default", "clk-gate"; | ||
| 197 | 199 | ||
| 198 | bus-width = <8>; | 200 | bus-width = <8>; |
| 199 | cap-sd-highspeed; | ||
| 200 | cap-mmc-highspeed; | 201 | cap-mmc-highspeed; |
| 201 | max-frequency = <200000000>; | 202 | max-frequency = <200000000>; |
| 202 | non-removable; | 203 | non-removable; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts index 08f1dd69b679..470f72bb863c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts | |||
| @@ -220,7 +220,6 @@ | |||
| 220 | pinctrl-names = "default"; | 220 | pinctrl-names = "default"; |
| 221 | 221 | ||
| 222 | bus-width = <8>; | 222 | bus-width = <8>; |
| 223 | cap-sd-highspeed; | ||
| 224 | cap-mmc-highspeed; | 223 | cap-mmc-highspeed; |
| 225 | max-frequency = <200000000>; | 224 | max-frequency = <200000000>; |
| 226 | non-removable; | 225 | non-removable; |
diff --git a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi index 4d360713ed12..30d48ecf46e0 100644 --- a/arch/arm64/boot/dts/marvell/armada-ap806.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-ap806.dtsi | |||
| @@ -254,7 +254,7 @@ | |||
| 254 | 254 | ||
| 255 | ap_syscon: system-controller@6f4000 { | 255 | ap_syscon: system-controller@6f4000 { |
| 256 | compatible = "syscon", "simple-mfd"; | 256 | compatible = "syscon", "simple-mfd"; |
| 257 | reg = <0x6f4000 0x1000>; | 257 | reg = <0x6f4000 0x2000>; |
| 258 | 258 | ||
| 259 | ap_clk: clock { | 259 | ap_clk: clock { |
| 260 | compatible = "marvell,ap806-clock"; | 260 | compatible = "marvell,ap806-clock"; |
| @@ -265,7 +265,7 @@ | |||
| 265 | compatible = "marvell,ap806-pinctrl"; | 265 | compatible = "marvell,ap806-pinctrl"; |
| 266 | }; | 266 | }; |
| 267 | 267 | ||
| 268 | ap_gpio: gpio { | 268 | ap_gpio: gpio@1040 { |
| 269 | compatible = "marvell,armada-8k-gpio"; | 269 | compatible = "marvell,armada-8k-gpio"; |
| 270 | offset = <0x1040>; | 270 | offset = <0x1040>; |
| 271 | ngpios = <20>; | 271 | ngpios = <20>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index e0518b4bc6c2..19fbaa5e7bdd 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi | |||
| @@ -113,8 +113,7 @@ | |||
| 113 | compatible = "arm,cortex-a53", "arm,armv8"; | 113 | compatible = "arm,cortex-a53", "arm,armv8"; |
| 114 | reg = <0x0 0x0>; | 114 | reg = <0x0 0x0>; |
| 115 | enable-method = "psci"; | 115 | enable-method = "psci"; |
| 116 | clocks = <&cru ARMCLKL>; | 116 | |
| 117 | operating-points-v2 = <&cluster0_opp>; | ||
| 118 | #cooling-cells = <2>; /* min followed by max */ | 117 | #cooling-cells = <2>; /* min followed by max */ |
| 119 | }; | 118 | }; |
| 120 | 119 | ||
| @@ -123,8 +122,6 @@ | |||
| 123 | compatible = "arm,cortex-a53", "arm,armv8"; | 122 | compatible = "arm,cortex-a53", "arm,armv8"; |
| 124 | reg = <0x0 0x1>; | 123 | reg = <0x0 0x1>; |
| 125 | enable-method = "psci"; | 124 | enable-method = "psci"; |
| 126 | clocks = <&cru ARMCLKL>; | ||
| 127 | operating-points-v2 = <&cluster0_opp>; | ||
| 128 | }; | 125 | }; |
| 129 | 126 | ||
| 130 | cpu_l2: cpu@2 { | 127 | cpu_l2: cpu@2 { |
| @@ -132,8 +129,6 @@ | |||
| 132 | compatible = "arm,cortex-a53", "arm,armv8"; | 129 | compatible = "arm,cortex-a53", "arm,armv8"; |
| 133 | reg = <0x0 0x2>; | 130 | reg = <0x0 0x2>; |
| 134 | enable-method = "psci"; | 131 | enable-method = "psci"; |
| 135 | clocks = <&cru ARMCLKL>; | ||
| 136 | operating-points-v2 = <&cluster0_opp>; | ||
| 137 | }; | 132 | }; |
| 138 | 133 | ||
| 139 | cpu_l3: cpu@3 { | 134 | cpu_l3: cpu@3 { |
| @@ -141,8 +136,6 @@ | |||
| 141 | compatible = "arm,cortex-a53", "arm,armv8"; | 136 | compatible = "arm,cortex-a53", "arm,armv8"; |
| 142 | reg = <0x0 0x3>; | 137 | reg = <0x0 0x3>; |
| 143 | enable-method = "psci"; | 138 | enable-method = "psci"; |
| 144 | clocks = <&cru ARMCLKL>; | ||
| 145 | operating-points-v2 = <&cluster0_opp>; | ||
| 146 | }; | 139 | }; |
| 147 | 140 | ||
| 148 | cpu_b0: cpu@100 { | 141 | cpu_b0: cpu@100 { |
| @@ -150,8 +143,7 @@ | |||
| 150 | compatible = "arm,cortex-a53", "arm,armv8"; | 143 | compatible = "arm,cortex-a53", "arm,armv8"; |
| 151 | reg = <0x0 0x100>; | 144 | reg = <0x0 0x100>; |
| 152 | enable-method = "psci"; | 145 | enable-method = "psci"; |
| 153 | clocks = <&cru ARMCLKB>; | 146 | |
| 154 | operating-points-v2 = <&cluster1_opp>; | ||
| 155 | #cooling-cells = <2>; /* min followed by max */ | 147 | #cooling-cells = <2>; /* min followed by max */ |
| 156 | }; | 148 | }; |
| 157 | 149 | ||
| @@ -160,8 +152,6 @@ | |||
| 160 | compatible = "arm,cortex-a53", "arm,armv8"; | 152 | compatible = "arm,cortex-a53", "arm,armv8"; |
| 161 | reg = <0x0 0x101>; | 153 | reg = <0x0 0x101>; |
| 162 | enable-method = "psci"; | 154 | enable-method = "psci"; |
| 163 | clocks = <&cru ARMCLKB>; | ||
| 164 | operating-points-v2 = <&cluster1_opp>; | ||
| 165 | }; | 155 | }; |
| 166 | 156 | ||
| 167 | cpu_b2: cpu@102 { | 157 | cpu_b2: cpu@102 { |
| @@ -169,8 +159,6 @@ | |||
| 169 | compatible = "arm,cortex-a53", "arm,armv8"; | 159 | compatible = "arm,cortex-a53", "arm,armv8"; |
| 170 | reg = <0x0 0x102>; | 160 | reg = <0x0 0x102>; |
| 171 | enable-method = "psci"; | 161 | enable-method = "psci"; |
| 172 | clocks = <&cru ARMCLKB>; | ||
| 173 | operating-points-v2 = <&cluster1_opp>; | ||
| 174 | }; | 162 | }; |
| 175 | 163 | ||
| 176 | cpu_b3: cpu@103 { | 164 | cpu_b3: cpu@103 { |
| @@ -178,62 +166,6 @@ | |||
| 178 | compatible = "arm,cortex-a53", "arm,armv8"; | 166 | compatible = "arm,cortex-a53", "arm,armv8"; |
| 179 | reg = <0x0 0x103>; | 167 | reg = <0x0 0x103>; |
| 180 | enable-method = "psci"; | 168 | enable-method = "psci"; |
| 181 | clocks = <&cru ARMCLKB>; | ||
| 182 | operating-points-v2 = <&cluster1_opp>; | ||
| 183 | }; | ||
| 184 | }; | ||
| 185 | |||
| 186 | cluster0_opp: opp-table0 { | ||
| 187 | compatible = "operating-points-v2"; | ||
| 188 | opp-shared; | ||
| 189 | |||
| 190 | opp00 { | ||
| 191 | opp-hz = /bits/ 64 <312000000>; | ||
| 192 | opp-microvolt = <950000>; | ||
| 193 | clock-latency-ns = <40000>; | ||
| 194 | }; | ||
| 195 | opp01 { | ||
| 196 | opp-hz = /bits/ 64 <408000000>; | ||
| 197 | opp-microvolt = <950000>; | ||
| 198 | }; | ||
| 199 | opp02 { | ||
| 200 | opp-hz = /bits/ 64 <600000000>; | ||
| 201 | opp-microvolt = <950000>; | ||
| 202 | }; | ||
| 203 | opp03 { | ||
| 204 | opp-hz = /bits/ 64 <816000000>; | ||
| 205 | opp-microvolt = <1025000>; | ||
| 206 | }; | ||
| 207 | opp04 { | ||
| 208 | opp-hz = /bits/ 64 <1008000000>; | ||
| 209 | opp-microvolt = <1125000>; | ||
| 210 | }; | ||
| 211 | }; | ||
| 212 | |||
| 213 | cluster1_opp: opp-table1 { | ||
| 214 | compatible = "operating-points-v2"; | ||
| 215 | opp-shared; | ||
| 216 | |||
| 217 | opp00 { | ||
| 218 | opp-hz = /bits/ 64 <312000000>; | ||
| 219 | opp-microvolt = <950000>; | ||
| 220 | clock-latency-ns = <40000>; | ||
| 221 | }; | ||
| 222 | opp01 { | ||
| 223 | opp-hz = /bits/ 64 <408000000>; | ||
| 224 | opp-microvolt = <950000>; | ||
| 225 | }; | ||
| 226 | opp02 { | ||
| 227 | opp-hz = /bits/ 64 <600000000>; | ||
| 228 | opp-microvolt = <950000>; | ||
| 229 | }; | ||
| 230 | opp03 { | ||
| 231 | opp-hz = /bits/ 64 <816000000>; | ||
| 232 | opp-microvolt = <975000>; | ||
| 233 | }; | ||
| 234 | opp04 { | ||
| 235 | opp-hz = /bits/ 64 <1008000000>; | ||
| 236 | opp-microvolt = <1050000>; | ||
| 237 | }; | 169 | }; |
| 238 | }; | 170 | }; |
| 239 | 171 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi index d79e9b3265b9..ab7629c5b856 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi | |||
| @@ -1629,9 +1629,9 @@ | |||
| 1629 | compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"; | 1629 | compatible = "rockchip,rk3399-mipi-dsi", "snps,dw-mipi-dsi"; |
| 1630 | reg = <0x0 0xff960000 0x0 0x8000>; | 1630 | reg = <0x0 0xff960000 0x0 0x8000>; |
| 1631 | interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>; | 1631 | interrupts = <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH 0>; |
| 1632 | clocks = <&cru SCLK_MIPIDPHY_REF>, <&cru PCLK_MIPI_DSI0>, | 1632 | clocks = <&cru SCLK_DPHY_PLL>, <&cru PCLK_MIPI_DSI0>, |
| 1633 | <&cru SCLK_DPHY_TX0_CFG>; | 1633 | <&cru SCLK_DPHY_TX0_CFG>, <&cru PCLK_VIO_GRF>; |
| 1634 | clock-names = "ref", "pclk", "phy_cfg"; | 1634 | clock-names = "ref", "pclk", "phy_cfg", "grf"; |
| 1635 | power-domains = <&power RK3399_PD_VIO>; | 1635 | power-domains = <&power RK3399_PD_VIO>; |
| 1636 | rockchip,grf = <&grf>; | 1636 | rockchip,grf = <&grf>; |
| 1637 | status = "disabled"; | 1637 | status = "disabled"; |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 3585a5e26151..f7c4d2146aed 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
| @@ -95,16 +95,19 @@ | |||
| 95 | #define KERNEL_END _end | 95 | #define KERNEL_END _end |
| 96 | 96 | ||
| 97 | /* | 97 | /* |
| 98 | * The size of the KASAN shadow region. This should be 1/8th of the | 98 | * KASAN requires 1/8th of the kernel virtual address space for the shadow |
| 99 | * size of the entire kernel virtual address space. | 99 | * region. KASAN can bloat the stack significantly, so double the (minimum) |
| 100 | * stack size when KASAN is in use. | ||
| 100 | */ | 101 | */ |
| 101 | #ifdef CONFIG_KASAN | 102 | #ifdef CONFIG_KASAN |
| 102 | #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) | 103 | #define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3)) |
| 104 | #define KASAN_THREAD_SHIFT 1 | ||
| 103 | #else | 105 | #else |
| 104 | #define KASAN_SHADOW_SIZE (0) | 106 | #define KASAN_SHADOW_SIZE (0) |
| 107 | #define KASAN_THREAD_SHIFT 0 | ||
| 105 | #endif | 108 | #endif |
| 106 | 109 | ||
| 107 | #define MIN_THREAD_SHIFT 14 | 110 | #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) |
| 108 | 111 | ||
| 109 | /* | 112 | /* |
| 110 | * VMAP'd stacks are allocated at page granularity, so we must ensure that such | 113 | * VMAP'd stacks are allocated at page granularity, so we must ensure that such |
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index f0e6d717885b..d06fbe4cd38d 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c | |||
| @@ -649,4 +649,4 @@ static int __init armv8_deprecated_init(void) | |||
| 649 | return 0; | 649 | return 0; |
| 650 | } | 650 | } |
| 651 | 651 | ||
| 652 | late_initcall(armv8_deprecated_init); | 652 | core_initcall(armv8_deprecated_init); |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index cd52d365d1f0..21e2c95d24e7 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
| @@ -1307,4 +1307,4 @@ static int __init enable_mrs_emulation(void) | |||
| 1307 | return 0; | 1307 | return 0; |
| 1308 | } | 1308 | } |
| 1309 | 1309 | ||
| 1310 | late_initcall(enable_mrs_emulation); | 1310 | core_initcall(enable_mrs_emulation); |
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index f444f374bd7b..5d547deb6996 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c | |||
| @@ -444,4 +444,4 @@ static int __init fpsimd_init(void) | |||
| 444 | 444 | ||
| 445 | return 0; | 445 | return 0; |
| 446 | } | 446 | } |
| 447 | late_initcall(fpsimd_init); | 447 | core_initcall(fpsimd_init); |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2069e9bc0fca..b64958b23a7f 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
| @@ -97,7 +97,7 @@ static void data_abort_decode(unsigned int esr) | |||
| 97 | (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, | 97 | (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, |
| 98 | (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); | 98 | (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); |
| 99 | } else { | 99 | } else { |
| 100 | pr_alert(" ISV = 0, ISS = 0x%08lu\n", esr & ESR_ELx_ISS_MASK); | 100 | pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | pr_alert(" CM = %lu, WnR = %lu\n", | 103 | pr_alert(" CM = %lu, WnR = %lu\n", |
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 87cde1e4b38c..0777f3a8a1f3 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
| @@ -194,6 +194,10 @@ config TIMER_DIVIDE | |||
| 194 | int "Timer divider (integer)" | 194 | int "Timer divider (integer)" |
| 195 | default "128" | 195 | default "128" |
| 196 | 196 | ||
| 197 | config CPU_BIG_ENDIAN | ||
| 198 | bool "Generate big endian code" | ||
| 199 | default n | ||
| 200 | |||
| 197 | config CPU_LITTLE_ENDIAN | 201 | config CPU_LITTLE_ENDIAN |
| 198 | bool "Generate little endian code" | 202 | bool "Generate little endian code" |
| 199 | default n | 203 | default n |
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 647dd94a0c39..72b96f282689 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c | |||
| @@ -114,6 +114,15 @@ static void set_eit_vector_entries(void) | |||
| 114 | _flush_cache_copyback_all(); | 114 | _flush_cache_copyback_all(); |
| 115 | } | 115 | } |
| 116 | 116 | ||
| 117 | void abort(void) | ||
| 118 | { | ||
| 119 | BUG(); | ||
| 120 | |||
| 121 | /* if that doesn't kill us, halt */ | ||
| 122 | panic("Oops failed to kill thread"); | ||
| 123 | } | ||
| 124 | EXPORT_SYMBOL(abort); | ||
| 125 | |||
| 117 | void __init trap_init(void) | 126 | void __init trap_init(void) |
| 118 | { | 127 | { |
| 119 | set_eit_vector_entries(); | 128 | set_eit_vector_entries(); |
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 903f3bf48419..7e25c5cc353a 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h | |||
| @@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
| 155 | return __cmpxchg_small(ptr, old, new, size); | 155 | return __cmpxchg_small(ptr, old, new, size); |
| 156 | 156 | ||
| 157 | case 4: | 157 | case 4: |
| 158 | return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new); | 158 | return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, |
| 159 | (u32)old, new); | ||
| 159 | 160 | ||
| 160 | case 8: | 161 | case 8: |
| 161 | /* lld/scd are only available for MIPS64 */ | 162 | /* lld/scd are only available for MIPS64 */ |
| 162 | if (!IS_ENABLED(CONFIG_64BIT)) | 163 | if (!IS_ENABLED(CONFIG_64BIT)) |
| 163 | return __cmpxchg_called_with_bad_pointer(); | 164 | return __cmpxchg_called_with_bad_pointer(); |
| 164 | 165 | ||
| 165 | return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new); | 166 | return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, |
| 167 | (u64)old, new); | ||
| 166 | 168 | ||
| 167 | default: | 169 | default: |
| 168 | return __cmpxchg_called_with_bad_pointer(); | 170 | return __cmpxchg_called_with_bad_pointer(); |
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c index 100f23dfa438..ac584c5823d0 100644 --- a/arch/mips/loongson32/common/platform.c +++ b/arch/mips/loongson32/common/platform.c | |||
| @@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) | |||
| 183 | } | 183 | } |
| 184 | 184 | ||
| 185 | static struct plat_stmmacenet_data ls1x_eth0_pdata = { | 185 | static struct plat_stmmacenet_data ls1x_eth0_pdata = { |
| 186 | .bus_id = 0, | 186 | .bus_id = 0, |
| 187 | .phy_addr = -1, | 187 | .phy_addr = -1, |
| 188 | #if defined(CONFIG_LOONGSON1_LS1B) | 188 | #if defined(CONFIG_LOONGSON1_LS1B) |
| 189 | .interface = PHY_INTERFACE_MODE_MII, | 189 | .interface = PHY_INTERFACE_MODE_MII, |
| 190 | #elif defined(CONFIG_LOONGSON1_LS1C) | 190 | #elif defined(CONFIG_LOONGSON1_LS1C) |
| 191 | .interface = PHY_INTERFACE_MODE_RMII, | 191 | .interface = PHY_INTERFACE_MODE_RMII, |
| 192 | #endif | 192 | #endif |
| 193 | .mdio_bus_data = &ls1x_mdio_bus_data, | 193 | .mdio_bus_data = &ls1x_mdio_bus_data, |
| 194 | .dma_cfg = &ls1x_eth_dma_cfg, | 194 | .dma_cfg = &ls1x_eth_dma_cfg, |
| 195 | .has_gmac = 1, | 195 | .has_gmac = 1, |
| 196 | .tx_coe = 1, | 196 | .tx_coe = 1, |
| 197 | .init = ls1x_eth_mux_init, | 197 | .rx_queues_to_use = 1, |
| 198 | .tx_queues_to_use = 1, | ||
| 199 | .init = ls1x_eth_mux_init, | ||
| 198 | }; | 200 | }; |
| 199 | 201 | ||
| 200 | static struct resource ls1x_eth0_resources[] = { | 202 | static struct resource ls1x_eth0_resources[] = { |
| @@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = { | |||
| 222 | 224 | ||
| 223 | #ifdef CONFIG_LOONGSON1_LS1B | 225 | #ifdef CONFIG_LOONGSON1_LS1B |
| 224 | static struct plat_stmmacenet_data ls1x_eth1_pdata = { | 226 | static struct plat_stmmacenet_data ls1x_eth1_pdata = { |
| 225 | .bus_id = 1, | 227 | .bus_id = 1, |
| 226 | .phy_addr = -1, | 228 | .phy_addr = -1, |
| 227 | .interface = PHY_INTERFACE_MODE_MII, | 229 | .interface = PHY_INTERFACE_MODE_MII, |
| 228 | .mdio_bus_data = &ls1x_mdio_bus_data, | 230 | .mdio_bus_data = &ls1x_mdio_bus_data, |
| 229 | .dma_cfg = &ls1x_eth_dma_cfg, | 231 | .dma_cfg = &ls1x_eth_dma_cfg, |
| 230 | .has_gmac = 1, | 232 | .has_gmac = 1, |
| 231 | .tx_coe = 1, | 233 | .tx_coe = 1, |
| 232 | .init = ls1x_eth_mux_init, | 234 | .rx_queues_to_use = 1, |
| 235 | .tx_queues_to_use = 1, | ||
| 236 | .init = ls1x_eth_mux_init, | ||
| 233 | }; | 237 | }; |
| 234 | 238 | ||
| 235 | static struct resource ls1x_eth1_resources[] = { | 239 | static struct resource ls1x_eth1_resources[] = { |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 192542dbd972..16d9ef5a78c5 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
| @@ -2558,7 +2558,6 @@ dcopuop: | |||
| 2558 | break; | 2558 | break; |
| 2559 | default: | 2559 | default: |
| 2560 | /* Reserved R6 ops */ | 2560 | /* Reserved R6 ops */ |
| 2561 | pr_err("Reserved MIPS R6 CMP.condn.S operation\n"); | ||
| 2562 | return SIGILL; | 2561 | return SIGILL; |
| 2563 | } | 2562 | } |
| 2564 | } | 2563 | } |
| @@ -2719,7 +2718,6 @@ dcopuop: | |||
| 2719 | break; | 2718 | break; |
| 2720 | default: | 2719 | default: |
| 2721 | /* Reserved R6 ops */ | 2720 | /* Reserved R6 ops */ |
| 2722 | pr_err("Reserved MIPS R6 CMP.condn.D operation\n"); | ||
| 2723 | return SIGILL; | 2721 | return SIGILL; |
| 2724 | } | 2722 | } |
| 2725 | } | 2723 | } |
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 7646891c4e9b..01b7a87ea678 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c | |||
| @@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
| 667 | { | 667 | { |
| 668 | int src, dst, r, td, ts, mem_off, b_off; | 668 | int src, dst, r, td, ts, mem_off, b_off; |
| 669 | bool need_swap, did_move, cmp_eq; | 669 | bool need_swap, did_move, cmp_eq; |
| 670 | unsigned int target; | 670 | unsigned int target = 0; |
| 671 | u64 t64; | 671 | u64 t64; |
| 672 | s64 t64s; | 672 | s64 t64s; |
| 673 | int bpf_op = BPF_OP(insn->code); | 673 | int bpf_op = BPF_OP(insn->code); |
diff --git a/arch/mips/tools/generic-board-config.sh b/arch/mips/tools/generic-board-config.sh index 5c4f93687039..654d652d7fa1 100755 --- a/arch/mips/tools/generic-board-config.sh +++ b/arch/mips/tools/generic-board-config.sh | |||
| @@ -30,8 +30,6 @@ cfg="$4" | |||
| 30 | boards_origin="$5" | 30 | boards_origin="$5" |
| 31 | shift 5 | 31 | shift 5 |
| 32 | 32 | ||
| 33 | cd "${srctree}" | ||
| 34 | |||
| 35 | # Only print Skipping... lines if the user explicitly specified BOARDS=. In the | 33 | # Only print Skipping... lines if the user explicitly specified BOARDS=. In the |
| 36 | # general case it only serves to obscure the useful output about what actually | 34 | # general case it only serves to obscure the useful output about what actually |
| 37 | # was included. | 35 | # was included. |
| @@ -48,7 +46,7 @@ environment*) | |||
| 48 | esac | 46 | esac |
| 49 | 47 | ||
| 50 | for board in $@; do | 48 | for board in $@; do |
| 51 | board_cfg="arch/mips/configs/generic/board-${board}.config" | 49 | board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config" |
| 52 | if [ ! -f "${board_cfg}" ]; then | 50 | if [ ! -f "${board_cfg}" ]; then |
| 53 | echo "WARNING: Board config '${board_cfg}' not found" | 51 | echo "WARNING: Board config '${board_cfg}' not found" |
| 54 | continue | 52 | continue |
| @@ -84,7 +82,7 @@ for board in $@; do | |||
| 84 | done || continue | 82 | done || continue |
| 85 | 83 | ||
| 86 | # Merge this board config fragment into our final config file | 84 | # Merge this board config fragment into our final config file |
| 87 | ./scripts/kconfig/merge_config.sh \ | 85 | ${srctree}/scripts/kconfig/merge_config.sh \ |
| 88 | -m -O ${objtree} ${cfg} ${board_cfg} \ | 86 | -m -O ${objtree} ${cfg} ${board_cfg} \ |
| 89 | | grep -Ev '^(#|Using)' | 87 | | grep -Ev '^(#|Using)' |
| 90 | done | 88 | done |
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index a45a67d526f8..30f92391a93e 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
| @@ -146,7 +146,7 @@ void machine_power_off(void) | |||
| 146 | 146 | ||
| 147 | /* prevent soft lockup/stalled CPU messages for endless loop. */ | 147 | /* prevent soft lockup/stalled CPU messages for endless loop. */ |
| 148 | rcu_sysrq_start(); | 148 | rcu_sysrq_start(); |
| 149 | lockup_detector_suspend(); | 149 | lockup_detector_soft_poweroff(); |
| 150 | for (;;); | 150 | for (;;); |
| 151 | } | 151 | } |
| 152 | 152 | ||
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 1df770e8cbe0..7275fed271af 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c | |||
| @@ -102,10 +102,10 @@ static void cpufeatures_flush_tlb(void) | |||
| 102 | case PVR_POWER8: | 102 | case PVR_POWER8: |
| 103 | case PVR_POWER8E: | 103 | case PVR_POWER8E: |
| 104 | case PVR_POWER8NVL: | 104 | case PVR_POWER8NVL: |
| 105 | __flush_tlb_power8(POWER8_TLB_SETS); | 105 | __flush_tlb_power8(TLB_INVAL_SCOPE_GLOBAL); |
| 106 | break; | 106 | break; |
| 107 | case PVR_POWER9: | 107 | case PVR_POWER9: |
| 108 | __flush_tlb_power9(POWER9_TLB_SETS_HASH); | 108 | __flush_tlb_power9(TLB_INVAL_SCOPE_GLOBAL); |
| 109 | break; | 109 | break; |
| 110 | default: | 110 | default: |
| 111 | pr_err("unknown CPU version for boot TLB flush\n"); | 111 | pr_err("unknown CPU version for boot TLB flush\n"); |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 48da0f5d2f7f..b82586c53560 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
| @@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100) | |||
| 734 | EXC_VIRT(program_check, 0x4700, 0x100, 0x700) | 734 | EXC_VIRT(program_check, 0x4700, 0x100, 0x700) |
| 735 | TRAMP_KVM(PACA_EXGEN, 0x700) | 735 | TRAMP_KVM(PACA_EXGEN, 0x700) |
| 736 | EXC_COMMON_BEGIN(program_check_common) | 736 | EXC_COMMON_BEGIN(program_check_common) |
| 737 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | 737 | /* |
| 738 | * It's possible to receive a TM Bad Thing type program check with | ||
| 739 | * userspace register values (in particular r1), but with SRR1 reporting | ||
| 740 | * that we came from the kernel. Normally that would confuse the bad | ||
| 741 | * stack logic, and we would report a bad kernel stack pointer. Instead | ||
| 742 | * we switch to the emergency stack if we're taking a TM Bad Thing from | ||
| 743 | * the kernel. | ||
| 744 | */ | ||
| 745 | li r10,MSR_PR /* Build a mask of MSR_PR .. */ | ||
| 746 | oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */ | ||
| 747 | and r10,r10,r12 /* Mask SRR1 with that. */ | ||
| 748 | srdi r10,r10,8 /* Shift it so we can compare */ | ||
| 749 | cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */ | ||
| 750 | bne 1f /* If != go to normal path. */ | ||
| 751 | |||
| 752 | /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */ | ||
| 753 | andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */ | ||
| 754 | /* 3 in EXCEPTION_PROLOG_COMMON */ | ||
| 755 | mr r10,r1 /* Save r1 */ | ||
| 756 | ld r1,PACAEMERGSP(r13) /* Use emergency stack */ | ||
| 757 | subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ | ||
| 758 | b 3f /* Jump into the macro !! */ | ||
| 759 | 1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | ||
| 738 | bl save_nvgprs | 760 | bl save_nvgprs |
| 739 | RECONCILE_IRQ_STATE(r10, r11) | 761 | RECONCILE_IRQ_STATE(r10, r11) |
| 740 | addi r3,r1,STACK_FRAME_OVERHEAD | 762 | addi r3,r1,STACK_FRAME_OVERHEAD |
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index b76ca198e09c..72f153c6f3fa 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c | |||
| @@ -624,5 +624,18 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs) | |||
| 624 | 624 | ||
| 625 | long __machine_check_early_realmode_p9(struct pt_regs *regs) | 625 | long __machine_check_early_realmode_p9(struct pt_regs *regs) |
| 626 | { | 626 | { |
| 627 | /* | ||
| 628 | * On POWER9 DD2.1 and below, it's possible to get a machine check | ||
| 629 | * caused by a paste instruction where only DSISR bit 25 is set. This | ||
| 630 | * will result in the MCE handler seeing an unknown event and the kernel | ||
| 631 | * crashing. An MCE that occurs like this is spurious, so we don't need | ||
| 632 | * to do anything in terms of servicing it. If there is something that | ||
| 633 | * needs to be serviced, the CPU will raise the MCE again with the | ||
| 634 | * correct DSISR so that it can be serviced properly. So detect this | ||
| 635 | * case and mark it as handled. | ||
| 636 | */ | ||
| 637 | if (SRR1_MC_LOADSTORE(regs->msr) && regs->dsisr == 0x02000000) | ||
| 638 | return 1; | ||
| 639 | |||
| 627 | return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table); | 640 | return mce_handle_error(regs, mce_p9_derror_table, mce_p9_ierror_table); |
| 628 | } | 641 | } |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 0ac741fae90e..2e3bc16d02b2 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
| @@ -904,9 +904,6 @@ void __init setup_arch(char **cmdline_p) | |||
| 904 | #endif | 904 | #endif |
| 905 | #endif | 905 | #endif |
| 906 | 906 | ||
| 907 | #ifdef CONFIG_PPC_64K_PAGES | ||
| 908 | init_mm.context.pte_frag = NULL; | ||
| 909 | #endif | ||
| 910 | #ifdef CONFIG_SPAPR_TCE_IOMMU | 907 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
| 911 | mm_iommu_init(&init_mm); | 908 | mm_iommu_init(&init_mm); |
| 912 | #endif | 909 | #endif |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index c83c115858c1..b2c002993d78 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
| @@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, | |||
| 452 | if (MSR_TM_RESV(msr)) | 452 | if (MSR_TM_RESV(msr)) |
| 453 | return -EINVAL; | 453 | return -EINVAL; |
| 454 | 454 | ||
| 455 | /* pull in MSR TM from user context */ | 455 | /* pull in MSR TS bits from user context */ |
| 456 | regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); | 456 | regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); |
| 457 | 457 | ||
| 458 | /* | ||
| 459 | * Ensure that TM is enabled in regs->msr before we leave the signal | ||
| 460 | * handler. It could be the case that (a) user disabled the TM bit | ||
| 461 | * through the manipulation of the MSR bits in uc_mcontext or (b) the | ||
| 462 | * TM bit was disabled because a sufficient number of context switches | ||
| 463 | * happened whilst in the signal handler and load_tm overflowed, | ||
| 464 | * disabling the TM bit. In either case we can end up with an illegal | ||
| 465 | * TM state leading to a TM Bad Thing when we return to userspace. | ||
| 466 | */ | ||
| 467 | regs->msr |= MSR_TM; | ||
| 468 | |||
| 458 | /* pull in MSR LE from user context */ | 469 | /* pull in MSR LE from user context */ |
| 459 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); | 470 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); |
| 460 | 471 | ||
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index c98e90b4ea7b..b4e2b7165f79 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S | |||
| @@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub) | |||
| 181 | * - we have no stack frame and can not allocate one | 181 | * - we have no stack frame and can not allocate one |
| 182 | * - LR points back to the original caller (in A) | 182 | * - LR points back to the original caller (in A) |
| 183 | * - CTR holds the new NIP in C | 183 | * - CTR holds the new NIP in C |
| 184 | * - r0 & r12 are free | 184 | * - r0, r11 & r12 are free |
| 185 | * | ||
| 186 | * r0 can't be used as the base register for a DS-form load or store, so | ||
| 187 | * we temporarily shuffle r1 (stack pointer) into r0 and then put it back. | ||
| 188 | */ | 185 | */ |
| 189 | livepatch_handler: | 186 | livepatch_handler: |
| 190 | CURRENT_THREAD_INFO(r12, r1) | 187 | CURRENT_THREAD_INFO(r12, r1) |
| 191 | 188 | ||
| 192 | /* Save stack pointer into r0 */ | ||
| 193 | mr r0, r1 | ||
| 194 | |||
| 195 | /* Allocate 3 x 8 bytes */ | 189 | /* Allocate 3 x 8 bytes */ |
| 196 | ld r1, TI_livepatch_sp(r12) | 190 | ld r11, TI_livepatch_sp(r12) |
| 197 | addi r1, r1, 24 | 191 | addi r11, r11, 24 |
| 198 | std r1, TI_livepatch_sp(r12) | 192 | std r11, TI_livepatch_sp(r12) |
| 199 | 193 | ||
| 200 | /* Save toc & real LR on livepatch stack */ | 194 | /* Save toc & real LR on livepatch stack */ |
| 201 | std r2, -24(r1) | 195 | std r2, -24(r11) |
| 202 | mflr r12 | 196 | mflr r12 |
| 203 | std r12, -16(r1) | 197 | std r12, -16(r11) |
| 204 | 198 | ||
| 205 | /* Store stack end marker */ | 199 | /* Store stack end marker */ |
| 206 | lis r12, STACK_END_MAGIC@h | 200 | lis r12, STACK_END_MAGIC@h |
| 207 | ori r12, r12, STACK_END_MAGIC@l | 201 | ori r12, r12, STACK_END_MAGIC@l |
| 208 | std r12, -8(r1) | 202 | std r12, -8(r11) |
| 209 | |||
| 210 | /* Restore real stack pointer */ | ||
| 211 | mr r1, r0 | ||
| 212 | 203 | ||
| 213 | /* Put ctr in r12 for global entry and branch there */ | 204 | /* Put ctr in r12 for global entry and branch there */ |
| 214 | mfctr r12 | 205 | mfctr r12 |
| @@ -216,36 +207,30 @@ livepatch_handler: | |||
| 216 | 207 | ||
| 217 | /* | 208 | /* |
| 218 | * Now we are returning from the patched function to the original | 209 | * Now we are returning from the patched function to the original |
| 219 | * caller A. We are free to use r0 and r12, and we can use r2 until we | 210 | * caller A. We are free to use r11, r12 and we can use r2 until we |
| 220 | * restore it. | 211 | * restore it. |
| 221 | */ | 212 | */ |
| 222 | 213 | ||
| 223 | CURRENT_THREAD_INFO(r12, r1) | 214 | CURRENT_THREAD_INFO(r12, r1) |
| 224 | 215 | ||
| 225 | /* Save stack pointer into r0 */ | 216 | ld r11, TI_livepatch_sp(r12) |
| 226 | mr r0, r1 | ||
| 227 | |||
| 228 | ld r1, TI_livepatch_sp(r12) | ||
| 229 | 217 | ||
| 230 | /* Check stack marker hasn't been trashed */ | 218 | /* Check stack marker hasn't been trashed */ |
| 231 | lis r2, STACK_END_MAGIC@h | 219 | lis r2, STACK_END_MAGIC@h |
| 232 | ori r2, r2, STACK_END_MAGIC@l | 220 | ori r2, r2, STACK_END_MAGIC@l |
| 233 | ld r12, -8(r1) | 221 | ld r12, -8(r11) |
| 234 | 1: tdne r12, r2 | 222 | 1: tdne r12, r2 |
| 235 | EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 | 223 | EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 |
| 236 | 224 | ||
| 237 | /* Restore LR & toc from livepatch stack */ | 225 | /* Restore LR & toc from livepatch stack */ |
| 238 | ld r12, -16(r1) | 226 | ld r12, -16(r11) |
| 239 | mtlr r12 | 227 | mtlr r12 |
| 240 | ld r2, -24(r1) | 228 | ld r2, -24(r11) |
| 241 | 229 | ||
| 242 | /* Pop livepatch stack frame */ | 230 | /* Pop livepatch stack frame */ |
| 243 | CURRENT_THREAD_INFO(r12, r0) | 231 | CURRENT_THREAD_INFO(r12, r1) |
| 244 | subi r1, r1, 24 | 232 | subi r11, r11, 24 |
| 245 | std r1, TI_livepatch_sp(r12) | 233 | std r11, TI_livepatch_sp(r12) |
| 246 | |||
| 247 | /* Restore real stack pointer */ | ||
| 248 | mr r1, r0 | ||
| 249 | 234 | ||
| 250 | /* Return to original caller of live patched function */ | 235 | /* Return to original caller of live patched function */ |
| 251 | blr | 236 | blr |
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c index 2f6eadd9408d..c702a8981452 100644 --- a/arch/powerpc/kernel/watchdog.c +++ b/arch/powerpc/kernel/watchdog.c | |||
| @@ -310,9 +310,6 @@ static int start_wd_on_cpu(unsigned int cpu) | |||
| 310 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | 310 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) |
| 311 | return 0; | 311 | return 0; |
| 312 | 312 | ||
| 313 | if (watchdog_suspended) | ||
| 314 | return 0; | ||
| 315 | |||
| 316 | if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) | 313 | if (!cpumask_test_cpu(cpu, &watchdog_cpumask)) |
| 317 | return 0; | 314 | return 0; |
| 318 | 315 | ||
| @@ -358,36 +355,39 @@ static void watchdog_calc_timeouts(void) | |||
| 358 | wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5; | 355 | wd_timer_period_ms = watchdog_thresh * 1000 * 2 / 5; |
| 359 | } | 356 | } |
| 360 | 357 | ||
| 361 | void watchdog_nmi_reconfigure(void) | 358 | void watchdog_nmi_stop(void) |
| 362 | { | 359 | { |
| 363 | int cpu; | 360 | int cpu; |
| 364 | 361 | ||
| 365 | watchdog_calc_timeouts(); | ||
| 366 | |||
| 367 | for_each_cpu(cpu, &wd_cpus_enabled) | 362 | for_each_cpu(cpu, &wd_cpus_enabled) |
| 368 | stop_wd_on_cpu(cpu); | 363 | stop_wd_on_cpu(cpu); |
| 364 | } | ||
| 369 | 365 | ||
| 366 | void watchdog_nmi_start(void) | ||
| 367 | { | ||
| 368 | int cpu; | ||
| 369 | |||
| 370 | watchdog_calc_timeouts(); | ||
| 370 | for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) | 371 | for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask) |
| 371 | start_wd_on_cpu(cpu); | 372 | start_wd_on_cpu(cpu); |
| 372 | } | 373 | } |
| 373 | 374 | ||
| 374 | /* | 375 | /* |
| 375 | * This runs after lockup_detector_init() which sets up watchdog_cpumask. | 376 | * Invoked from core watchdog init. |
| 376 | */ | 377 | */ |
| 377 | static int __init powerpc_watchdog_init(void) | 378 | int __init watchdog_nmi_probe(void) |
| 378 | { | 379 | { |
| 379 | int err; | 380 | int err; |
| 380 | 381 | ||
| 381 | watchdog_calc_timeouts(); | 382 | err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
| 382 | 383 | "powerpc/watchdog:online", | |
| 383 | err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "powerpc/watchdog:online", | 384 | start_wd_on_cpu, stop_wd_on_cpu); |
| 384 | start_wd_on_cpu, stop_wd_on_cpu); | 385 | if (err < 0) { |
| 385 | if (err < 0) | ||
| 386 | pr_warn("Watchdog could not be initialized"); | 386 | pr_warn("Watchdog could not be initialized"); |
| 387 | 387 | return err; | |
| 388 | } | ||
| 388 | return 0; | 389 | return 0; |
| 389 | } | 390 | } |
| 390 | arch_initcall(powerpc_watchdog_init); | ||
| 391 | 391 | ||
| 392 | static void handle_backtrace_ipi(struct pt_regs *regs) | 392 | static void handle_backtrace_ipi(struct pt_regs *regs) |
| 393 | { | 393 | { |
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index 13304622ab1c..bf457843e032 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c | |||
| @@ -622,7 +622,7 @@ int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server, | |||
| 622 | return -EINVAL; | 622 | return -EINVAL; |
| 623 | state = &sb->irq_state[idx]; | 623 | state = &sb->irq_state[idx]; |
| 624 | arch_spin_lock(&sb->lock); | 624 | arch_spin_lock(&sb->lock); |
| 625 | *server = state->guest_server; | 625 | *server = state->act_server; |
| 626 | *priority = state->guest_priority; | 626 | *priority = state->guest_priority; |
| 627 | arch_spin_unlock(&sb->lock); | 627 | arch_spin_unlock(&sb->lock); |
| 628 | 628 | ||
| @@ -1331,7 +1331,7 @@ static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr) | |||
| 1331 | xive->saved_src_count++; | 1331 | xive->saved_src_count++; |
| 1332 | 1332 | ||
| 1333 | /* Convert saved state into something compatible with xics */ | 1333 | /* Convert saved state into something compatible with xics */ |
| 1334 | val = state->guest_server; | 1334 | val = state->act_server; |
| 1335 | prio = state->saved_scan_prio; | 1335 | prio = state->saved_scan_prio; |
| 1336 | 1336 | ||
| 1337 | if (prio == MASKED) { | 1337 | if (prio == MASKED) { |
| @@ -1507,7 +1507,6 @@ static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr) | |||
| 1507 | /* First convert prio and mark interrupt as untargetted */ | 1507 | /* First convert prio and mark interrupt as untargetted */ |
| 1508 | act_prio = xive_prio_from_guest(guest_prio); | 1508 | act_prio = xive_prio_from_guest(guest_prio); |
| 1509 | state->act_priority = MASKED; | 1509 | state->act_priority = MASKED; |
| 1510 | state->guest_server = server; | ||
| 1511 | 1510 | ||
| 1512 | /* | 1511 | /* |
| 1513 | * We need to drop the lock due to the mutex below. Hopefully | 1512 | * We need to drop the lock due to the mutex below. Hopefully |
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h index 5938f7644dc1..6ba63f8e8a61 100644 --- a/arch/powerpc/kvm/book3s_xive.h +++ b/arch/powerpc/kvm/book3s_xive.h | |||
| @@ -35,7 +35,6 @@ struct kvmppc_xive_irq_state { | |||
| 35 | struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */ | 35 | struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */ |
| 36 | 36 | ||
| 37 | /* Targetting as set by guest */ | 37 | /* Targetting as set by guest */ |
| 38 | u32 guest_server; /* Current guest selected target */ | ||
| 39 | u8 guest_priority; /* Guest set priority */ | 38 | u8 guest_priority; /* Guest set priority */ |
| 40 | u8 saved_priority; /* Saved priority when masking */ | 39 | u8 saved_priority; /* Saved priority when masking */ |
| 41 | 40 | ||
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 5e8418c28bd8..f208f560aecd 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c | |||
| @@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, | |||
| 1684 | * Logical instructions | 1684 | * Logical instructions |
| 1685 | */ | 1685 | */ |
| 1686 | case 26: /* cntlzw */ | 1686 | case 26: /* cntlzw */ |
| 1687 | op->val = __builtin_clz((unsigned int) regs->gpr[rd]); | 1687 | val = (unsigned int) regs->gpr[rd]; |
| 1688 | op->val = ( val ? __builtin_clz(val) : 32 ); | ||
| 1688 | goto logical_done; | 1689 | goto logical_done; |
| 1689 | #ifdef __powerpc64__ | 1690 | #ifdef __powerpc64__ |
| 1690 | case 58: /* cntlzd */ | 1691 | case 58: /* cntlzd */ |
| 1691 | op->val = __builtin_clzl(regs->gpr[rd]); | 1692 | val = regs->gpr[rd]; |
| 1693 | op->val = ( val ? __builtin_clzl(val) : 64 ); | ||
| 1692 | goto logical_done; | 1694 | goto logical_done; |
| 1693 | #endif | 1695 | #endif |
| 1694 | case 28: /* and */ | 1696 | case 28: /* and */ |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b95c584ce19d..a51df9ef529d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
| @@ -1438,7 +1438,6 @@ out: | |||
| 1438 | 1438 | ||
| 1439 | int arch_update_cpu_topology(void) | 1439 | int arch_update_cpu_topology(void) |
| 1440 | { | 1440 | { |
| 1441 | lockdep_assert_cpus_held(); | ||
| 1442 | return numa_update_cpu_topology(true); | 1441 | return numa_update_cpu_topology(true); |
| 1443 | } | 1442 | } |
| 1444 | 1443 | ||
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 65eda1997c3f..f6c7f54c0515 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
| @@ -361,9 +361,9 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot) | |||
| 361 | break; | 361 | break; |
| 362 | } | 362 | } |
| 363 | wmb(); | 363 | wmb(); |
| 364 | local_irq_restore(flags); | ||
| 364 | flush_tlb_kernel_range((unsigned long)page_address(start), | 365 | flush_tlb_kernel_range((unsigned long)page_address(start), |
| 365 | (unsigned long)page_address(page)); | 366 | (unsigned long)page_address(page)); |
| 366 | local_irq_restore(flags); | ||
| 367 | return err; | 367 | return err; |
| 368 | } | 368 | } |
| 369 | 369 | ||
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 9ccac86f3463..88126245881b 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c | |||
| @@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event) | |||
| 399 | 399 | ||
| 400 | /* Take the mutex lock for this node and then decrement the reference count */ | 400 | /* Take the mutex lock for this node and then decrement the reference count */ |
| 401 | mutex_lock(&ref->lock); | 401 | mutex_lock(&ref->lock); |
| 402 | if (ref->refc == 0) { | ||
| 403 | /* | ||
| 404 | * The scenario where this is true is, when perf session is | ||
| 405 | * started, followed by offlining of all cpus in a given node. | ||
| 406 | * | ||
| 407 | * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline() | ||
| 408 | * function set the ref->count to zero, if the cpu which is | ||
| 409 | * about to offline is the last cpu in a given node and make | ||
| 410 | * an OPAL call to disable the engine in that node. | ||
| 411 | * | ||
| 412 | */ | ||
| 413 | mutex_unlock(&ref->lock); | ||
| 414 | return; | ||
| 415 | } | ||
| 402 | ref->refc--; | 416 | ref->refc--; |
| 403 | if (ref->refc == 0) { | 417 | if (ref->refc == 0) { |
| 404 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, | 418 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, |
| @@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size) | |||
| 523 | 537 | ||
| 524 | /* We need only vbase for core counters */ | 538 | /* We need only vbase for core counters */ |
| 525 | mem_info->vbase = page_address(alloc_pages_node(phys_id, | 539 | mem_info->vbase = page_address(alloc_pages_node(phys_id, |
| 526 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, | 540 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
| 527 | get_order(size))); | 541 | __GFP_NOWARN, get_order(size))); |
| 528 | if (!mem_info->vbase) | 542 | if (!mem_info->vbase) |
| 529 | return -ENOMEM; | 543 | return -ENOMEM; |
| 530 | 544 | ||
| @@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event) | |||
| 646 | return; | 660 | return; |
| 647 | 661 | ||
| 648 | mutex_lock(&ref->lock); | 662 | mutex_lock(&ref->lock); |
| 663 | if (ref->refc == 0) { | ||
| 664 | /* | ||
| 665 | * The scenario where this is true is, when perf session is | ||
| 666 | * started, followed by offlining of all cpus in a given core. | ||
| 667 | * | ||
| 668 | * In the cpuhotplug offline path, ppc_core_imc_cpu_offline() | ||
| 669 | * function set the ref->count to zero, if the cpu which is | ||
| 670 | * about to offline is the last cpu in a given core and make | ||
| 671 | * an OPAL call to disable the engine in that core. | ||
| 672 | * | ||
| 673 | */ | ||
| 674 | mutex_unlock(&ref->lock); | ||
| 675 | return; | ||
| 676 | } | ||
| 649 | ref->refc--; | 677 | ref->refc--; |
| 650 | if (ref->refc == 0) { | 678 | if (ref->refc == 0) { |
| 651 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, | 679 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, |
| @@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size) | |||
| 763 | * free the memory in cpu offline path. | 791 | * free the memory in cpu offline path. |
| 764 | */ | 792 | */ |
| 765 | local_mem = page_address(alloc_pages_node(phys_id, | 793 | local_mem = page_address(alloc_pages_node(phys_id, |
| 766 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, | 794 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
| 767 | get_order(size))); | 795 | __GFP_NOWARN, get_order(size))); |
| 768 | if (!local_mem) | 796 | if (!local_mem) |
| 769 | return -ENOMEM; | 797 | return -ENOMEM; |
| 770 | 798 | ||
| @@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) | |||
| 1148 | } | 1176 | } |
| 1149 | 1177 | ||
| 1150 | /* Only free the attr_groups which are dynamically allocated */ | 1178 | /* Only free the attr_groups which are dynamically allocated */ |
| 1151 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); | 1179 | if (pmu_ptr->attr_groups[IMC_EVENT_ATTR]) |
| 1180 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); | ||
| 1152 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); | 1181 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); |
| 1153 | kfree(pmu_ptr); | 1182 | kfree(pmu_ptr); |
| 1154 | return; | 1183 | return; |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 897aa1400eb8..bbb73aa0eb8f 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
| @@ -272,7 +272,15 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) | |||
| 272 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE | 272 | #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE |
| 273 | static unsigned long pnv_memory_block_size(void) | 273 | static unsigned long pnv_memory_block_size(void) |
| 274 | { | 274 | { |
| 275 | return 256UL * 1024 * 1024; | 275 | /* |
| 276 | * We map the kernel linear region with 1GB large pages on radix. For | ||
| 277 | * memory hot unplug to work our memory block size must be at least | ||
| 278 | * this size. | ||
| 279 | */ | ||
| 280 | if (radix_enabled()) | ||
| 281 | return 1UL * 1024 * 1024 * 1024; | ||
| 282 | else | ||
| 283 | return 256UL * 1024 * 1024; | ||
| 276 | } | 284 | } |
| 277 | #endif | 285 | #endif |
| 278 | 286 | ||
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 9234be1e66f5..5011ffea4e4b 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
| @@ -71,6 +71,8 @@ | |||
| 71 | #define RIWAR_WRTYP_ALLOC 0x00006000 | 71 | #define RIWAR_WRTYP_ALLOC 0x00006000 |
| 72 | #define RIWAR_SIZE_MASK 0x0000003F | 72 | #define RIWAR_SIZE_MASK 0x0000003F |
| 73 | 73 | ||
| 74 | static DEFINE_SPINLOCK(fsl_rio_config_lock); | ||
| 75 | |||
| 74 | #define __fsl_read_rio_config(x, addr, err, op) \ | 76 | #define __fsl_read_rio_config(x, addr, err, op) \ |
| 75 | __asm__ __volatile__( \ | 77 | __asm__ __volatile__( \ |
| 76 | "1: "op" %1,0(%2)\n" \ | 78 | "1: "op" %1,0(%2)\n" \ |
| @@ -184,6 +186,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, | |||
| 184 | u8 hopcount, u32 offset, int len, u32 *val) | 186 | u8 hopcount, u32 offset, int len, u32 *val) |
| 185 | { | 187 | { |
| 186 | struct rio_priv *priv = mport->priv; | 188 | struct rio_priv *priv = mport->priv; |
| 189 | unsigned long flags; | ||
| 187 | u8 *data; | 190 | u8 *data; |
| 188 | u32 rval, err = 0; | 191 | u32 rval, err = 0; |
| 189 | 192 | ||
| @@ -197,6 +200,8 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, | |||
| 197 | if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) | 200 | if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) |
| 198 | return -EINVAL; | 201 | return -EINVAL; |
| 199 | 202 | ||
| 203 | spin_lock_irqsave(&fsl_rio_config_lock, flags); | ||
| 204 | |||
| 200 | out_be32(&priv->maint_atmu_regs->rowtar, | 205 | out_be32(&priv->maint_atmu_regs->rowtar, |
| 201 | (destid << 22) | (hopcount << 12) | (offset >> 12)); | 206 | (destid << 22) | (hopcount << 12) | (offset >> 12)); |
| 202 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); | 207 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); |
| @@ -213,6 +218,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, | |||
| 213 | __fsl_read_rio_config(rval, data, err, "lwz"); | 218 | __fsl_read_rio_config(rval, data, err, "lwz"); |
| 214 | break; | 219 | break; |
| 215 | default: | 220 | default: |
| 221 | spin_unlock_irqrestore(&fsl_rio_config_lock, flags); | ||
| 216 | return -EINVAL; | 222 | return -EINVAL; |
| 217 | } | 223 | } |
| 218 | 224 | ||
| @@ -221,6 +227,7 @@ fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid, | |||
| 221 | err, destid, hopcount, offset); | 227 | err, destid, hopcount, offset); |
| 222 | } | 228 | } |
| 223 | 229 | ||
| 230 | spin_unlock_irqrestore(&fsl_rio_config_lock, flags); | ||
| 224 | *val = rval; | 231 | *val = rval; |
| 225 | 232 | ||
| 226 | return err; | 233 | return err; |
| @@ -244,7 +251,10 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
| 244 | u8 hopcount, u32 offset, int len, u32 val) | 251 | u8 hopcount, u32 offset, int len, u32 val) |
| 245 | { | 252 | { |
| 246 | struct rio_priv *priv = mport->priv; | 253 | struct rio_priv *priv = mport->priv; |
| 254 | unsigned long flags; | ||
| 247 | u8 *data; | 255 | u8 *data; |
| 256 | int ret = 0; | ||
| 257 | |||
| 248 | pr_debug | 258 | pr_debug |
| 249 | ("fsl_rio_config_write:" | 259 | ("fsl_rio_config_write:" |
| 250 | " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", | 260 | " index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n", |
| @@ -255,6 +265,8 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
| 255 | if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) | 265 | if (offset > (0x1000000 - len) || !IS_ALIGNED(offset, len)) |
| 256 | return -EINVAL; | 266 | return -EINVAL; |
| 257 | 267 | ||
| 268 | spin_lock_irqsave(&fsl_rio_config_lock, flags); | ||
| 269 | |||
| 258 | out_be32(&priv->maint_atmu_regs->rowtar, | 270 | out_be32(&priv->maint_atmu_regs->rowtar, |
| 259 | (destid << 22) | (hopcount << 12) | (offset >> 12)); | 271 | (destid << 22) | (hopcount << 12) | (offset >> 12)); |
| 260 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); | 272 | out_be32(&priv->maint_atmu_regs->rowtear, (destid >> 10)); |
| @@ -271,10 +283,11 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
| 271 | out_be32((u32 *) data, val); | 283 | out_be32((u32 *) data, val); |
| 272 | break; | 284 | break; |
| 273 | default: | 285 | default: |
| 274 | return -EINVAL; | 286 | ret = -EINVAL; |
| 275 | } | 287 | } |
| 288 | spin_unlock_irqrestore(&fsl_rio_config_lock, flags); | ||
| 276 | 289 | ||
| 277 | return 0; | 290 | return ret; |
| 278 | } | 291 | } |
| 279 | 292 | ||
| 280 | static void fsl_rio_inbound_mem_init(struct rio_priv *priv) | 293 | static void fsl_rio_inbound_mem_init(struct rio_priv *priv) |
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c index ab7a74c75be8..88b35a3dcdc5 100644 --- a/arch/powerpc/sysdev/fsl_rmu.c +++ b/arch/powerpc/sysdev/fsl_rmu.c | |||
| @@ -104,6 +104,8 @@ | |||
| 104 | 104 | ||
| 105 | #define DOORBELL_MESSAGE_SIZE 0x08 | 105 | #define DOORBELL_MESSAGE_SIZE 0x08 |
| 106 | 106 | ||
| 107 | static DEFINE_SPINLOCK(fsl_rio_doorbell_lock); | ||
| 108 | |||
| 107 | struct rio_msg_regs { | 109 | struct rio_msg_regs { |
| 108 | u32 omr; | 110 | u32 omr; |
| 109 | u32 osr; | 111 | u32 osr; |
| @@ -626,9 +628,13 @@ err_out: | |||
| 626 | int fsl_rio_doorbell_send(struct rio_mport *mport, | 628 | int fsl_rio_doorbell_send(struct rio_mport *mport, |
| 627 | int index, u16 destid, u16 data) | 629 | int index, u16 destid, u16 data) |
| 628 | { | 630 | { |
| 631 | unsigned long flags; | ||
| 632 | |||
| 629 | pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", | 633 | pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n", |
| 630 | index, destid, data); | 634 | index, destid, data); |
| 631 | 635 | ||
| 636 | spin_lock_irqsave(&fsl_rio_doorbell_lock, flags); | ||
| 637 | |||
| 632 | /* In the serial version silicons, such as MPC8548, MPC8641, | 638 | /* In the serial version silicons, such as MPC8548, MPC8641, |
| 633 | * below operations is must be. | 639 | * below operations is must be. |
| 634 | */ | 640 | */ |
| @@ -638,6 +644,8 @@ int fsl_rio_doorbell_send(struct rio_mport *mport, | |||
| 638 | out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); | 644 | out_be32(&dbell->dbell_regs->oddatr, (index << 20) | data); |
| 639 | out_be32(&dbell->dbell_regs->odmr, 0x00000001); | 645 | out_be32(&dbell->dbell_regs->odmr, 0x00000001); |
| 640 | 646 | ||
| 647 | spin_unlock_irqrestore(&fsl_rio_doorbell_lock, flags); | ||
| 648 | |||
| 641 | return 0; | 649 | return 0; |
| 642 | } | 650 | } |
| 643 | 651 | ||
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index f387318678b9..a3b8d7d1316e 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c | |||
| @@ -1402,6 +1402,14 @@ void xive_teardown_cpu(void) | |||
| 1402 | 1402 | ||
| 1403 | if (xive_ops->teardown_cpu) | 1403 | if (xive_ops->teardown_cpu) |
| 1404 | xive_ops->teardown_cpu(cpu, xc); | 1404 | xive_ops->teardown_cpu(cpu, xc); |
| 1405 | |||
| 1406 | #ifdef CONFIG_SMP | ||
| 1407 | /* Get rid of IPI */ | ||
| 1408 | xive_cleanup_cpu_ipi(cpu, xc); | ||
| 1409 | #endif | ||
| 1410 | |||
| 1411 | /* Disable and free the queues */ | ||
| 1412 | xive_cleanup_cpu_queues(cpu, xc); | ||
| 1405 | } | 1413 | } |
| 1406 | 1414 | ||
| 1407 | void xive_kexec_teardown_cpu(int secondary) | 1415 | void xive_kexec_teardown_cpu(int secondary) |
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c index f24a70bc6855..d9c4c9366049 100644 --- a/arch/powerpc/sysdev/xive/spapr.c +++ b/arch/powerpc/sysdev/xive/spapr.c | |||
| @@ -431,7 +431,11 @@ static int xive_spapr_get_ipi(unsigned int cpu, struct xive_cpu *xc) | |||
| 431 | 431 | ||
| 432 | static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) | 432 | static void xive_spapr_put_ipi(unsigned int cpu, struct xive_cpu *xc) |
| 433 | { | 433 | { |
| 434 | if (!xc->hw_ipi) | ||
| 435 | return; | ||
| 436 | |||
| 434 | xive_irq_bitmap_free(xc->hw_ipi); | 437 | xive_irq_bitmap_free(xc->hw_ipi); |
| 438 | xc->hw_ipi = 0; | ||
| 435 | } | 439 | } |
| 436 | #endif /* CONFIG_SMP */ | 440 | #endif /* CONFIG_SMP */ |
| 437 | 441 | ||
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7264.h b/arch/sh/include/cpu-sh2a/cpu/sh7264.h index 4d1ef6d74bd6..2ae0e938b657 100644 --- a/arch/sh/include/cpu-sh2a/cpu/sh7264.h +++ b/arch/sh/include/cpu-sh2a/cpu/sh7264.h | |||
| @@ -43,9 +43,7 @@ enum { | |||
| 43 | GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, | 43 | GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, |
| 44 | GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, | 44 | GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, |
| 45 | 45 | ||
| 46 | /* Port H */ | 46 | /* Port H - Port H does not have a Data Register */ |
| 47 | GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4, | ||
| 48 | GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0, | ||
| 49 | 47 | ||
| 50 | /* Port I - not on device */ | 48 | /* Port I - not on device */ |
| 51 | 49 | ||
diff --git a/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/arch/sh/include/cpu-sh2a/cpu/sh7269.h index 2a0ca8780f0d..13c495a9fc00 100644 --- a/arch/sh/include/cpu-sh2a/cpu/sh7269.h +++ b/arch/sh/include/cpu-sh2a/cpu/sh7269.h | |||
| @@ -45,9 +45,7 @@ enum { | |||
| 45 | GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, | 45 | GPIO_PG7, GPIO_PG6, GPIO_PG5, GPIO_PG4, |
| 46 | GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, | 46 | GPIO_PG3, GPIO_PG2, GPIO_PG1, GPIO_PG0, |
| 47 | 47 | ||
| 48 | /* Port H */ | 48 | /* Port H - Port H does not have a Data Register */ |
| 49 | GPIO_PH7, GPIO_PH6, GPIO_PH5, GPIO_PH4, | ||
| 50 | GPIO_PH3, GPIO_PH2, GPIO_PH1, GPIO_PH0, | ||
| 51 | 49 | ||
| 52 | /* Port I - not on device */ | 50 | /* Port I - not on device */ |
| 53 | 51 | ||
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7722.h b/arch/sh/include/cpu-sh4/cpu/sh7722.h index 3bb74e534d0f..78961ab78a5a 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7722.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7722.h | |||
| @@ -67,7 +67,7 @@ enum { | |||
| 67 | GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0, | 67 | GPIO_PTN3, GPIO_PTN2, GPIO_PTN1, GPIO_PTN0, |
| 68 | 68 | ||
| 69 | /* PTQ */ | 69 | /* PTQ */ |
| 70 | GPIO_PTQ7, GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4, | 70 | GPIO_PTQ6, GPIO_PTQ5, GPIO_PTQ4, |
| 71 | GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0, | 71 | GPIO_PTQ3, GPIO_PTQ2, GPIO_PTQ1, GPIO_PTQ0, |
| 72 | 72 | ||
| 73 | /* PTR */ | 73 | /* PTR */ |
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7757.h b/arch/sh/include/cpu-sh4/cpu/sh7757.h index 5340f3bc1863..b40fb541e72a 100644 --- a/arch/sh/include/cpu-sh4/cpu/sh7757.h +++ b/arch/sh/include/cpu-sh4/cpu/sh7757.h | |||
| @@ -40,7 +40,7 @@ enum { | |||
| 40 | 40 | ||
| 41 | /* PTJ */ | 41 | /* PTJ */ |
| 42 | GPIO_PTJ0, GPIO_PTJ1, GPIO_PTJ2, GPIO_PTJ3, | 42 | GPIO_PTJ0, GPIO_PTJ1, GPIO_PTJ2, GPIO_PTJ3, |
| 43 | GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6, GPIO_PTJ7_RESV, | 43 | GPIO_PTJ4, GPIO_PTJ5, GPIO_PTJ6, |
| 44 | 44 | ||
| 45 | /* PTK */ | 45 | /* PTK */ |
| 46 | GPIO_PTK0, GPIO_PTK1, GPIO_PTK2, GPIO_PTK3, | 46 | GPIO_PTK0, GPIO_PTK1, GPIO_PTK2, GPIO_PTK3, |
| @@ -48,7 +48,7 @@ enum { | |||
| 48 | 48 | ||
| 49 | /* PTL */ | 49 | /* PTL */ |
| 50 | GPIO_PTL0, GPIO_PTL1, GPIO_PTL2, GPIO_PTL3, | 50 | GPIO_PTL0, GPIO_PTL1, GPIO_PTL2, GPIO_PTL3, |
| 51 | GPIO_PTL4, GPIO_PTL5, GPIO_PTL6, GPIO_PTL7_RESV, | 51 | GPIO_PTL4, GPIO_PTL5, GPIO_PTL6, |
| 52 | 52 | ||
| 53 | /* PTM */ | 53 | /* PTM */ |
| 54 | GPIO_PTM0, GPIO_PTM1, GPIO_PTM2, GPIO_PTM3, | 54 | GPIO_PTM0, GPIO_PTM1, GPIO_PTM2, GPIO_PTM3, |
| @@ -56,7 +56,7 @@ enum { | |||
| 56 | 56 | ||
| 57 | /* PTN */ | 57 | /* PTN */ |
| 58 | GPIO_PTN0, GPIO_PTN1, GPIO_PTN2, GPIO_PTN3, | 58 | GPIO_PTN0, GPIO_PTN1, GPIO_PTN2, GPIO_PTN3, |
| 59 | GPIO_PTN4, GPIO_PTN5, GPIO_PTN6, GPIO_PTN7_RESV, | 59 | GPIO_PTN4, GPIO_PTN5, GPIO_PTN6, |
| 60 | 60 | ||
| 61 | /* PTO */ | 61 | /* PTO */ |
| 62 | GPIO_PTO0, GPIO_PTO1, GPIO_PTO2, GPIO_PTO3, | 62 | GPIO_PTO0, GPIO_PTO1, GPIO_PTO2, GPIO_PTO3, |
| @@ -68,7 +68,7 @@ enum { | |||
| 68 | 68 | ||
| 69 | /* PTQ */ | 69 | /* PTQ */ |
| 70 | GPIO_PTQ0, GPIO_PTQ1, GPIO_PTQ2, GPIO_PTQ3, | 70 | GPIO_PTQ0, GPIO_PTQ1, GPIO_PTQ2, GPIO_PTQ3, |
| 71 | GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6, GPIO_PTQ7_RESV, | 71 | GPIO_PTQ4, GPIO_PTQ5, GPIO_PTQ6, |
| 72 | 72 | ||
| 73 | /* PTR */ | 73 | /* PTR */ |
| 74 | GPIO_PTR0, GPIO_PTR1, GPIO_PTR2, GPIO_PTR3, | 74 | GPIO_PTR0, GPIO_PTR1, GPIO_PTR2, GPIO_PTR3, |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 0be3828752e5..4e83f950713e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
| @@ -44,7 +44,6 @@ config SPARC | |||
| 44 | select ARCH_HAS_SG_CHAIN | 44 | select ARCH_HAS_SG_CHAIN |
| 45 | select CPU_NO_EFFICIENT_FFS | 45 | select CPU_NO_EFFICIENT_FFS |
| 46 | select LOCKDEP_SMALL if LOCKDEP | 46 | select LOCKDEP_SMALL if LOCKDEP |
| 47 | select ARCH_WANT_RELAX_ORDER | ||
| 48 | 47 | ||
| 49 | config SPARC32 | 48 | config SPARC32 |
| 50 | def_bool !64BIT | 49 | def_bool !64BIT |
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 8a13d468635a..50e0d2bc4528 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
| @@ -176,7 +176,7 @@ | |||
| 176 | /* | 176 | /* |
| 177 | * This is a sneaky trick to help the unwinder find pt_regs on the stack. The | 177 | * This is a sneaky trick to help the unwinder find pt_regs on the stack. The |
| 178 | * frame pointer is replaced with an encoded pointer to pt_regs. The encoding | 178 | * frame pointer is replaced with an encoded pointer to pt_regs. The encoding |
| 179 | * is just setting the LSB, which makes it an invalid stack address and is also | 179 | * is just clearing the MSB, which makes it an invalid stack address and is also |
| 180 | * a signal to the unwinder that it's a pt_regs pointer in disguise. | 180 | * a signal to the unwinder that it's a pt_regs pointer in disguise. |
| 181 | * | 181 | * |
| 182 | * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the | 182 | * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the |
| @@ -185,7 +185,7 @@ | |||
| 185 | .macro ENCODE_FRAME_POINTER | 185 | .macro ENCODE_FRAME_POINTER |
| 186 | #ifdef CONFIG_FRAME_POINTER | 186 | #ifdef CONFIG_FRAME_POINTER |
| 187 | mov %esp, %ebp | 187 | mov %esp, %ebp |
| 188 | orl $0x1, %ebp | 188 | andl $0x7fffffff, %ebp |
| 189 | #endif | 189 | #endif |
| 190 | .endm | 190 | .endm |
| 191 | 191 | ||
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 829e89cfcee2..9fb9a1f1e47b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
| @@ -4409,10 +4409,9 @@ static __init int fixup_ht_bug(void) | |||
| 4409 | return 0; | 4409 | return 0; |
| 4410 | } | 4410 | } |
| 4411 | 4411 | ||
| 4412 | if (lockup_detector_suspend() != 0) { | 4412 | cpus_read_lock(); |
| 4413 | pr_debug("failed to disable PMU erratum BJ122, BV98, HSD29 workaround\n"); | 4413 | |
| 4414 | return 0; | 4414 | hardlockup_detector_perf_stop(); |
| 4415 | } | ||
| 4416 | 4415 | ||
| 4417 | x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); | 4416 | x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED); |
| 4418 | 4417 | ||
| @@ -4420,9 +4419,7 @@ static __init int fixup_ht_bug(void) | |||
| 4420 | x86_pmu.commit_scheduling = NULL; | 4419 | x86_pmu.commit_scheduling = NULL; |
| 4421 | x86_pmu.stop_scheduling = NULL; | 4420 | x86_pmu.stop_scheduling = NULL; |
| 4422 | 4421 | ||
| 4423 | lockup_detector_resume(); | 4422 | hardlockup_detector_perf_restart(); |
| 4424 | |||
| 4425 | cpus_read_lock(); | ||
| 4426 | 4423 | ||
| 4427 | for_each_online_cpu(c) | 4424 | for_each_online_cpu(c) |
| 4428 | free_excl_cntrs(c); | 4425 | free_excl_cntrs(c); |
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 1c5390f1cf09..d45e06346f14 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c | |||
| @@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid) | |||
| 822 | pmus[i].type = type; | 822 | pmus[i].type = type; |
| 823 | pmus[i].boxes = kzalloc(size, GFP_KERNEL); | 823 | pmus[i].boxes = kzalloc(size, GFP_KERNEL); |
| 824 | if (!pmus[i].boxes) | 824 | if (!pmus[i].boxes) |
| 825 | return -ENOMEM; | 825 | goto err; |
| 826 | } | 826 | } |
| 827 | 827 | ||
| 828 | type->pmus = pmus; | 828 | type->pmus = pmus; |
| @@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid) | |||
| 836 | attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + | 836 | attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + |
| 837 | sizeof(*attr_group), GFP_KERNEL); | 837 | sizeof(*attr_group), GFP_KERNEL); |
| 838 | if (!attr_group) | 838 | if (!attr_group) |
| 839 | return -ENOMEM; | 839 | goto err; |
| 840 | 840 | ||
| 841 | attrs = (struct attribute **)(attr_group + 1); | 841 | attrs = (struct attribute **)(attr_group + 1); |
| 842 | attr_group->name = "events"; | 842 | attr_group->name = "events"; |
| @@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid) | |||
| 849 | } | 849 | } |
| 850 | 850 | ||
| 851 | type->pmu_group = &uncore_pmu_attr_group; | 851 | type->pmu_group = &uncore_pmu_attr_group; |
| 852 | |||
| 852 | return 0; | 853 | return 0; |
| 854 | |||
| 855 | err: | ||
| 856 | for (i = 0; i < type->num_boxes; i++) | ||
| 857 | kfree(pmus[i].boxes); | ||
| 858 | kfree(pmus); | ||
| 859 | |||
| 860 | return -ENOMEM; | ||
| 853 | } | 861 | } |
| 854 | 862 | ||
| 855 | static int __init | 863 | static int __init |
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 1a8eb550c40f..a5db63f728a2 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c | |||
| @@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs); | |||
| 85 | u32 *hv_vp_index; | 85 | u32 *hv_vp_index; |
| 86 | EXPORT_SYMBOL_GPL(hv_vp_index); | 86 | EXPORT_SYMBOL_GPL(hv_vp_index); |
| 87 | 87 | ||
| 88 | u32 hv_max_vp_index; | ||
| 89 | |||
| 88 | static int hv_cpu_init(unsigned int cpu) | 90 | static int hv_cpu_init(unsigned int cpu) |
| 89 | { | 91 | { |
| 90 | u64 msr_vp_index; | 92 | u64 msr_vp_index; |
| @@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu) | |||
| 93 | 95 | ||
| 94 | hv_vp_index[smp_processor_id()] = msr_vp_index; | 96 | hv_vp_index[smp_processor_id()] = msr_vp_index; |
| 95 | 97 | ||
| 98 | if (msr_vp_index > hv_max_vp_index) | ||
| 99 | hv_max_vp_index = msr_vp_index; | ||
| 100 | |||
| 96 | return 0; | 101 | return 0; |
| 97 | } | 102 | } |
| 98 | 103 | ||
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 39e7f6e50919..9cc9e1c1e2db 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c | |||
| @@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex { | |||
| 36 | /* Each gva in gva_list encodes up to 4096 pages to flush */ | 36 | /* Each gva in gva_list encodes up to 4096 pages to flush */ |
| 37 | #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) | 37 | #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) |
| 38 | 38 | ||
| 39 | static struct hv_flush_pcpu __percpu *pcpu_flush; | 39 | static struct hv_flush_pcpu __percpu **pcpu_flush; |
| 40 | 40 | ||
| 41 | static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex; | 41 | static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex; |
| 42 | 42 | ||
| 43 | /* | 43 | /* |
| 44 | * Fills in gva_list starting from offset. Returns the number of items added. | 44 | * Fills in gva_list starting from offset. Returns the number of items added. |
| @@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush, | |||
| 76 | { | 76 | { |
| 77 | int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; | 77 | int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; |
| 78 | 78 | ||
| 79 | /* valid_bank_mask can represent up to 64 banks */ | ||
| 80 | if (hv_max_vp_index / 64 >= 64) | ||
| 81 | return 0; | ||
| 82 | |||
| 83 | /* | ||
| 84 | * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex | ||
| 85 | * structs are not cleared between calls, we risk flushing unneeded | ||
| 86 | * vCPUs otherwise. | ||
| 87 | */ | ||
| 88 | for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) | ||
| 89 | flush->hv_vp_set.bank_contents[vcpu_bank] = 0; | ||
| 90 | |||
| 79 | /* | 91 | /* |
| 80 | * Some banks may end up being empty but this is acceptable. | 92 | * Some banks may end up being empty but this is acceptable. |
| 81 | */ | 93 | */ |
| @@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush, | |||
| 83 | vcpu = hv_cpu_number_to_vp_number(cpu); | 95 | vcpu = hv_cpu_number_to_vp_number(cpu); |
| 84 | vcpu_bank = vcpu / 64; | 96 | vcpu_bank = vcpu / 64; |
| 85 | vcpu_offset = vcpu % 64; | 97 | vcpu_offset = vcpu % 64; |
| 86 | |||
| 87 | /* valid_bank_mask can represent up to 64 banks */ | ||
| 88 | if (vcpu_bank >= 64) | ||
| 89 | return 0; | ||
| 90 | |||
| 91 | __set_bit(vcpu_offset, (unsigned long *) | 98 | __set_bit(vcpu_offset, (unsigned long *) |
| 92 | &flush->hv_vp_set.bank_contents[vcpu_bank]); | 99 | &flush->hv_vp_set.bank_contents[vcpu_bank]); |
| 93 | if (vcpu_bank >= nr_bank) | 100 | if (vcpu_bank >= nr_bank) |
| @@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, | |||
| 102 | const struct flush_tlb_info *info) | 109 | const struct flush_tlb_info *info) |
| 103 | { | 110 | { |
| 104 | int cpu, vcpu, gva_n, max_gvas; | 111 | int cpu, vcpu, gva_n, max_gvas; |
| 112 | struct hv_flush_pcpu **flush_pcpu; | ||
| 105 | struct hv_flush_pcpu *flush; | 113 | struct hv_flush_pcpu *flush; |
| 106 | u64 status = U64_MAX; | 114 | u64 status = U64_MAX; |
| 107 | unsigned long flags; | 115 | unsigned long flags; |
| @@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, | |||
| 116 | 124 | ||
| 117 | local_irq_save(flags); | 125 | local_irq_save(flags); |
| 118 | 126 | ||
| 119 | flush = this_cpu_ptr(pcpu_flush); | 127 | flush_pcpu = this_cpu_ptr(pcpu_flush); |
| 128 | |||
| 129 | if (unlikely(!*flush_pcpu)) | ||
| 130 | *flush_pcpu = page_address(alloc_page(GFP_ATOMIC)); | ||
| 131 | |||
| 132 | flush = *flush_pcpu; | ||
| 133 | |||
| 134 | if (unlikely(!flush)) { | ||
| 135 | local_irq_restore(flags); | ||
| 136 | goto do_native; | ||
| 137 | } | ||
| 120 | 138 | ||
| 121 | if (info->mm) { | 139 | if (info->mm) { |
| 122 | flush->address_space = virt_to_phys(info->mm->pgd); | 140 | flush->address_space = virt_to_phys(info->mm->pgd); |
| @@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, | |||
| 173 | const struct flush_tlb_info *info) | 191 | const struct flush_tlb_info *info) |
| 174 | { | 192 | { |
| 175 | int nr_bank = 0, max_gvas, gva_n; | 193 | int nr_bank = 0, max_gvas, gva_n; |
| 194 | struct hv_flush_pcpu_ex **flush_pcpu; | ||
| 176 | struct hv_flush_pcpu_ex *flush; | 195 | struct hv_flush_pcpu_ex *flush; |
| 177 | u64 status = U64_MAX; | 196 | u64 status = U64_MAX; |
| 178 | unsigned long flags; | 197 | unsigned long flags; |
| @@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, | |||
| 187 | 206 | ||
| 188 | local_irq_save(flags); | 207 | local_irq_save(flags); |
| 189 | 208 | ||
| 190 | flush = this_cpu_ptr(pcpu_flush_ex); | 209 | flush_pcpu = this_cpu_ptr(pcpu_flush_ex); |
| 210 | |||
| 211 | if (unlikely(!*flush_pcpu)) | ||
| 212 | *flush_pcpu = page_address(alloc_page(GFP_ATOMIC)); | ||
| 213 | |||
| 214 | flush = *flush_pcpu; | ||
| 215 | |||
| 216 | if (unlikely(!flush)) { | ||
| 217 | local_irq_restore(flags); | ||
| 218 | goto do_native; | ||
| 219 | } | ||
| 191 | 220 | ||
| 192 | if (info->mm) { | 221 | if (info->mm) { |
| 193 | flush->address_space = virt_to_phys(info->mm->pgd); | 222 | flush->address_space = virt_to_phys(info->mm->pgd); |
| @@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, | |||
| 222 | flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; | 251 | flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; |
| 223 | status = hv_do_rep_hypercall( | 252 | status = hv_do_rep_hypercall( |
| 224 | HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, | 253 | HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, |
| 225 | 0, nr_bank + 2, flush, NULL); | 254 | 0, nr_bank, flush, NULL); |
| 226 | } else if (info->end && | 255 | } else if (info->end && |
| 227 | ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { | 256 | ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { |
| 228 | status = hv_do_rep_hypercall( | 257 | status = hv_do_rep_hypercall( |
| 229 | HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, | 258 | HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, |
| 230 | 0, nr_bank + 2, flush, NULL); | 259 | 0, nr_bank, flush, NULL); |
| 231 | } else { | 260 | } else { |
| 232 | gva_n = fill_gva_list(flush->gva_list, nr_bank, | 261 | gva_n = fill_gva_list(flush->gva_list, nr_bank, |
| 233 | info->start, info->end); | 262 | info->start, info->end); |
| 234 | status = hv_do_rep_hypercall( | 263 | status = hv_do_rep_hypercall( |
| 235 | HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, | 264 | HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, |
| 236 | gva_n, nr_bank + 2, flush, NULL); | 265 | gva_n, nr_bank, flush, NULL); |
| 237 | } | 266 | } |
| 238 | 267 | ||
| 239 | local_irq_restore(flags); | 268 | local_irq_restore(flags); |
| @@ -266,7 +295,7 @@ void hyper_alloc_mmu(void) | |||
| 266 | return; | 295 | return; |
| 267 | 296 | ||
| 268 | if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) | 297 | if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) |
| 269 | pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); | 298 | pcpu_flush = alloc_percpu(struct hv_flush_pcpu *); |
| 270 | else | 299 | else |
| 271 | pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); | 300 | pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *); |
| 272 | } | 301 | } |
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index e7636bac7372..6c98821fef5e 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h | |||
| @@ -62,8 +62,10 @@ | |||
| 62 | #define new_len2 145f-144f | 62 | #define new_len2 145f-144f |
| 63 | 63 | ||
| 64 | /* | 64 | /* |
| 65 | * max without conditionals. Idea adapted from: | 65 | * gas compatible max based on the idea from: |
| 66 | * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax | 66 | * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax |
| 67 | * | ||
| 68 | * The additional "-" is needed because gas uses a "true" value of -1. | ||
| 67 | */ | 69 | */ |
| 68 | #define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) | 70 | #define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) |
| 69 | 71 | ||
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index c096624137ae..ccbe24e697c4 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
| @@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
| 103 | alt_end_marker ":\n" | 103 | alt_end_marker ":\n" |
| 104 | 104 | ||
| 105 | /* | 105 | /* |
| 106 | * max without conditionals. Idea adapted from: | 106 | * gas compatible max based on the idea from: |
| 107 | * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax | 107 | * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax |
| 108 | * | 108 | * |
| 109 | * The additional "-" is needed because gas works with s32s. | 109 | * The additional "-" is needed because gas uses a "true" value of -1. |
| 110 | */ | 110 | */ |
| 111 | #define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))" | 111 | #define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))" |
| 112 | 112 | ||
| 113 | /* | 113 | /* |
| 114 | * Pad the second replacement alternative with additional NOPs if it is | 114 | * Pad the second replacement alternative with additional NOPs if it is |
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h index bc62e7cbf1b1..59ad3d132353 100644 --- a/arch/x86/include/asm/kvm_para.h +++ b/arch/x86/include/asm/kvm_para.h | |||
| @@ -88,7 +88,7 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, | |||
| 88 | bool kvm_para_available(void); | 88 | bool kvm_para_available(void); |
| 89 | unsigned int kvm_arch_para_features(void); | 89 | unsigned int kvm_arch_para_features(void); |
| 90 | void __init kvm_guest_init(void); | 90 | void __init kvm_guest_init(void); |
| 91 | void kvm_async_pf_task_wait(u32 token); | 91 | void kvm_async_pf_task_wait(u32 token, int interrupt_kernel); |
| 92 | void kvm_async_pf_task_wake(u32 token); | 92 | void kvm_async_pf_task_wake(u32 token); |
| 93 | u32 kvm_read_and_reset_pf_reason(void); | 93 | u32 kvm_read_and_reset_pf_reason(void); |
| 94 | extern void kvm_disable_steal_time(void); | 94 | extern void kvm_disable_steal_time(void); |
| @@ -103,7 +103,7 @@ static inline void kvm_spinlock_init(void) | |||
| 103 | 103 | ||
| 104 | #else /* CONFIG_KVM_GUEST */ | 104 | #else /* CONFIG_KVM_GUEST */ |
| 105 | #define kvm_guest_init() do {} while (0) | 105 | #define kvm_guest_init() do {} while (0) |
| 106 | #define kvm_async_pf_task_wait(T) do {} while(0) | 106 | #define kvm_async_pf_task_wait(T, I) do {} while(0) |
| 107 | #define kvm_async_pf_task_wake(T) do {} while(0) | 107 | #define kvm_async_pf_task_wake(T) do {} while(0) |
| 108 | 108 | ||
| 109 | static inline bool kvm_para_available(void) | 109 | static inline bool kvm_para_available(void) |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 181264989db5..8edac1de2e35 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
| @@ -187,7 +187,6 @@ struct mca_msr_regs { | |||
| 187 | 187 | ||
| 188 | extern struct mce_vendor_flags mce_flags; | 188 | extern struct mce_vendor_flags mce_flags; |
| 189 | 189 | ||
| 190 | extern struct mca_config mca_cfg; | ||
| 191 | extern struct mca_msr_regs msr_ops; | 190 | extern struct mca_msr_regs msr_ops; |
| 192 | 191 | ||
| 193 | enum mce_notifier_prios { | 192 | enum mce_notifier_prios { |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index c120b5db178a..3c856a15b98e 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
| @@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) | |||
| 126 | DEBUG_LOCKS_WARN_ON(preemptible()); | 126 | DEBUG_LOCKS_WARN_ON(preemptible()); |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 129 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); |
| 130 | { | ||
| 131 | int cpu = smp_processor_id(); | ||
| 132 | |||
| 133 | if (cpumask_test_cpu(cpu, mm_cpumask(mm))) | ||
| 134 | cpumask_clear_cpu(cpu, mm_cpumask(mm)); | ||
| 135 | } | ||
| 136 | 130 | ||
| 137 | static inline int init_new_context(struct task_struct *tsk, | 131 | static inline int init_new_context(struct task_struct *tsk, |
| 138 | struct mm_struct *mm) | 132 | struct mm_struct *mm) |
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 738503e1f80c..530f448fddaf 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
| @@ -289,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, | |||
| 289 | * to this information. | 289 | * to this information. |
| 290 | */ | 290 | */ |
| 291 | extern u32 *hv_vp_index; | 291 | extern u32 *hv_vp_index; |
| 292 | extern u32 hv_max_vp_index; | ||
| 292 | 293 | ||
| 293 | /** | 294 | /** |
| 294 | * hv_cpu_number_to_vp_number() - Map CPU to VP. | 295 | * hv_cpu_number_to_vp_number() - Map CPU to VP. |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 4893abf7f74f..d362161d3291 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
| @@ -83,6 +83,13 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) | |||
| 83 | #endif | 83 | #endif |
| 84 | 84 | ||
| 85 | /* | 85 | /* |
| 86 | * If tlb_use_lazy_mode is true, then we try to avoid switching CR3 to point | ||
| 87 | * to init_mm when we switch to a kernel thread (e.g. the idle thread). If | ||
| 88 | * it's false, then we immediately switch CR3 when entering a kernel thread. | ||
| 89 | */ | ||
| 90 | DECLARE_STATIC_KEY_TRUE(tlb_use_lazy_mode); | ||
| 91 | |||
| 92 | /* | ||
| 86 | * 6 because 6 should be plenty and struct tlb_state will fit in | 93 | * 6 because 6 should be plenty and struct tlb_state will fit in |
| 87 | * two cache lines. | 94 | * two cache lines. |
| 88 | */ | 95 | */ |
| @@ -105,6 +112,23 @@ struct tlb_state { | |||
| 105 | u16 next_asid; | 112 | u16 next_asid; |
| 106 | 113 | ||
| 107 | /* | 114 | /* |
| 115 | * We can be in one of several states: | ||
| 116 | * | ||
| 117 | * - Actively using an mm. Our CPU's bit will be set in | ||
| 118 | * mm_cpumask(loaded_mm) and is_lazy == false; | ||
| 119 | * | ||
| 120 | * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit | ||
| 121 | * will not be set in mm_cpumask(&init_mm) and is_lazy == false. | ||
| 122 | * | ||
| 123 | * - Lazily using a real mm. loaded_mm != &init_mm, our bit | ||
| 124 | * is set in mm_cpumask(loaded_mm), but is_lazy == true. | ||
| 125 | * We're heuristically guessing that the CR3 load we | ||
| 126 | * skipped more than makes up for the overhead added by | ||
| 127 | * lazy mode. | ||
| 128 | */ | ||
| 129 | bool is_lazy; | ||
| 130 | |||
| 131 | /* | ||
| 108 | * Access to this CR4 shadow and to H/W CR4 is protected by | 132 | * Access to this CR4 shadow and to H/W CR4 is protected by |
| 109 | * disabling interrupts when modifying either one. | 133 | * disabling interrupts when modifying either one. |
| 110 | */ | 134 | */ |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index d705c769f77d..ff891772c9f8 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
| @@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void) | |||
| 573 | return ~0U; | 573 | return ~0U; |
| 574 | } | 574 | } |
| 575 | 575 | ||
| 576 | static u32 skx_deadline_rev(void) | ||
| 577 | { | ||
| 578 | switch (boot_cpu_data.x86_mask) { | ||
| 579 | case 0x03: return 0x01000136; | ||
| 580 | case 0x04: return 0x02000014; | ||
| 581 | } | ||
| 582 | |||
| 583 | return ~0U; | ||
| 584 | } | ||
| 585 | |||
| 576 | static const struct x86_cpu_id deadline_match[] = { | 586 | static const struct x86_cpu_id deadline_match[] = { |
| 577 | DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), | 587 | DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), |
| 578 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), | 588 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), |
| 579 | DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), | 589 | DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), |
| 580 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X, 0x02000014), | 590 | DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev), |
| 581 | 591 | ||
| 582 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22), | 592 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22), |
| 583 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20), | 593 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20), |
| @@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void) | |||
| 600 | const struct x86_cpu_id *m; | 610 | const struct x86_cpu_id *m; |
| 601 | u32 rev; | 611 | u32 rev; |
| 602 | 612 | ||
| 603 | if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) | 613 | if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) || |
| 614 | boot_cpu_has(X86_FEATURE_HYPERVISOR)) | ||
| 604 | return; | 615 | return; |
| 605 | 616 | ||
| 606 | m = x86_match_cpu(deadline_match); | 617 | m = x86_match_cpu(deadline_match); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index 098530a93bb7..debb974fd17d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h | |||
| @@ -1,3 +1,6 @@ | |||
| 1 | #ifndef __X86_MCE_INTERNAL_H__ | ||
| 2 | #define __X86_MCE_INTERNAL_H__ | ||
| 3 | |||
| 1 | #include <linux/device.h> | 4 | #include <linux/device.h> |
| 2 | #include <asm/mce.h> | 5 | #include <asm/mce.h> |
| 3 | 6 | ||
| @@ -108,3 +111,7 @@ static inline void mce_work_trigger(void) { } | |||
| 108 | static inline void mce_register_injector_chain(struct notifier_block *nb) { } | 111 | static inline void mce_register_injector_chain(struct notifier_block *nb) { } |
| 109 | static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } | 112 | static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } |
| 110 | #endif | 113 | #endif |
| 114 | |||
| 115 | extern struct mca_config mca_cfg; | ||
| 116 | |||
| 117 | #endif /* __X86_MCE_INTERNAL_H__ */ | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 40e28ed77fbf..486f640b02ef 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
| @@ -28,6 +28,8 @@ | |||
| 28 | #include <asm/msr.h> | 28 | #include <asm/msr.h> |
| 29 | #include <asm/trace/irq_vectors.h> | 29 | #include <asm/trace/irq_vectors.h> |
| 30 | 30 | ||
| 31 | #include "mce-internal.h" | ||
| 32 | |||
| 31 | #define NR_BLOCKS 5 | 33 | #define NR_BLOCKS 5 |
| 32 | #define THRESHOLD_MAX 0xFFF | 34 | #define THRESHOLD_MAX 0xFFF |
| 33 | #define INT_TYPE_APIC 0x00020000 | 35 | #define INT_TYPE_APIC 0x00020000 |
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 86e8f0b2537b..c4fa4a85d4cb 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
| @@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void) | |||
| 122 | bool *res = &dis_ucode_ldr; | 122 | bool *res = &dis_ucode_ldr; |
| 123 | #endif | 123 | #endif |
| 124 | 124 | ||
| 125 | if (!have_cpuid_p()) | ||
| 126 | return *res; | ||
| 127 | |||
| 128 | /* | 125 | /* |
| 129 | * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not | 126 | * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not |
| 130 | * completely accurate as xen pv guests don't see that CPUID bit set but | 127 | * completely accurate as xen pv guests don't see that CPUID bit set but |
| @@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name) | |||
| 166 | void __init load_ucode_bsp(void) | 163 | void __init load_ucode_bsp(void) |
| 167 | { | 164 | { |
| 168 | unsigned int cpuid_1_eax; | 165 | unsigned int cpuid_1_eax; |
| 166 | bool intel = true; | ||
| 169 | 167 | ||
| 170 | if (check_loader_disabled_bsp()) | 168 | if (!have_cpuid_p()) |
| 171 | return; | 169 | return; |
| 172 | 170 | ||
| 173 | cpuid_1_eax = native_cpuid_eax(1); | 171 | cpuid_1_eax = native_cpuid_eax(1); |
| 174 | 172 | ||
| 175 | switch (x86_cpuid_vendor()) { | 173 | switch (x86_cpuid_vendor()) { |
| 176 | case X86_VENDOR_INTEL: | 174 | case X86_VENDOR_INTEL: |
| 177 | if (x86_family(cpuid_1_eax) >= 6) | 175 | if (x86_family(cpuid_1_eax) < 6) |
| 178 | load_ucode_intel_bsp(); | 176 | return; |
| 179 | break; | 177 | break; |
| 178 | |||
| 180 | case X86_VENDOR_AMD: | 179 | case X86_VENDOR_AMD: |
| 181 | if (x86_family(cpuid_1_eax) >= 0x10) | 180 | if (x86_family(cpuid_1_eax) < 0x10) |
| 182 | load_ucode_amd_bsp(cpuid_1_eax); | 181 | return; |
| 182 | intel = false; | ||
| 183 | break; | 183 | break; |
| 184 | |||
| 184 | default: | 185 | default: |
| 185 | break; | 186 | return; |
| 186 | } | 187 | } |
| 188 | |||
| 189 | if (check_loader_disabled_bsp()) | ||
| 190 | return; | ||
| 191 | |||
| 192 | if (intel) | ||
| 193 | load_ucode_intel_bsp(); | ||
| 194 | else | ||
| 195 | load_ucode_amd_bsp(cpuid_1_eax); | ||
| 187 | } | 196 | } |
| 188 | 197 | ||
| 189 | static bool check_loader_disabled_ap(void) | 198 | static bool check_loader_disabled_ap(void) |
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index db2182d63ed0..3fc0f9a794cb 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h | |||
| @@ -3,6 +3,15 @@ | |||
| 3 | 3 | ||
| 4 | /* Kprobes and Optprobes common header */ | 4 | /* Kprobes and Optprobes common header */ |
| 5 | 5 | ||
| 6 | #include <asm/asm.h> | ||
| 7 | |||
| 8 | #ifdef CONFIG_FRAME_POINTER | ||
| 9 | # define SAVE_RBP_STRING " push %" _ASM_BP "\n" \ | ||
| 10 | " mov %" _ASM_SP ", %" _ASM_BP "\n" | ||
| 11 | #else | ||
| 12 | # define SAVE_RBP_STRING " push %" _ASM_BP "\n" | ||
| 13 | #endif | ||
| 14 | |||
| 6 | #ifdef CONFIG_X86_64 | 15 | #ifdef CONFIG_X86_64 |
| 7 | #define SAVE_REGS_STRING \ | 16 | #define SAVE_REGS_STRING \ |
| 8 | /* Skip cs, ip, orig_ax. */ \ | 17 | /* Skip cs, ip, orig_ax. */ \ |
| @@ -17,7 +26,7 @@ | |||
| 17 | " pushq %r10\n" \ | 26 | " pushq %r10\n" \ |
| 18 | " pushq %r11\n" \ | 27 | " pushq %r11\n" \ |
| 19 | " pushq %rbx\n" \ | 28 | " pushq %rbx\n" \ |
| 20 | " pushq %rbp\n" \ | 29 | SAVE_RBP_STRING \ |
| 21 | " pushq %r12\n" \ | 30 | " pushq %r12\n" \ |
| 22 | " pushq %r13\n" \ | 31 | " pushq %r13\n" \ |
| 23 | " pushq %r14\n" \ | 32 | " pushq %r14\n" \ |
| @@ -48,7 +57,7 @@ | |||
| 48 | " pushl %es\n" \ | 57 | " pushl %es\n" \ |
| 49 | " pushl %ds\n" \ | 58 | " pushl %ds\n" \ |
| 50 | " pushl %eax\n" \ | 59 | " pushl %eax\n" \ |
| 51 | " pushl %ebp\n" \ | 60 | SAVE_RBP_STRING \ |
| 52 | " pushl %edi\n" \ | 61 | " pushl %edi\n" \ |
| 53 | " pushl %esi\n" \ | 62 | " pushl %esi\n" \ |
| 54 | " pushl %edx\n" \ | 63 | " pushl %edx\n" \ |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index f0153714ddac..0742491cbb73 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
| @@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
| 1080 | * raw stack chunk with redzones: | 1080 | * raw stack chunk with redzones: |
| 1081 | */ | 1081 | */ |
| 1082 | __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr)); | 1082 | __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr)); |
| 1083 | regs->flags &= ~X86_EFLAGS_IF; | ||
| 1084 | trace_hardirqs_off(); | ||
| 1085 | regs->ip = (unsigned long)(jp->entry); | 1083 | regs->ip = (unsigned long)(jp->entry); |
| 1086 | 1084 | ||
| 1087 | /* | 1085 | /* |
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index e675704fa6f7..8bb9594d0761 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
| @@ -117,7 +117,11 @@ static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b, | |||
| 117 | return NULL; | 117 | return NULL; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | void kvm_async_pf_task_wait(u32 token) | 120 | /* |
| 121 | * @interrupt_kernel: Is this called from a routine which interrupts the kernel | ||
| 122 | * (other than user space)? | ||
| 123 | */ | ||
| 124 | void kvm_async_pf_task_wait(u32 token, int interrupt_kernel) | ||
| 121 | { | 125 | { |
| 122 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); | 126 | u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); |
| 123 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; | 127 | struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; |
| @@ -140,8 +144,10 @@ void kvm_async_pf_task_wait(u32 token) | |||
| 140 | 144 | ||
| 141 | n.token = token; | 145 | n.token = token; |
| 142 | n.cpu = smp_processor_id(); | 146 | n.cpu = smp_processor_id(); |
| 143 | n.halted = is_idle_task(current) || preempt_count() > 1 || | 147 | n.halted = is_idle_task(current) || |
| 144 | rcu_preempt_depth(); | 148 | (IS_ENABLED(CONFIG_PREEMPT_COUNT) |
| 149 | ? preempt_count() > 1 || rcu_preempt_depth() | ||
| 150 | : interrupt_kernel); | ||
| 145 | init_swait_queue_head(&n.wq); | 151 | init_swait_queue_head(&n.wq); |
| 146 | hlist_add_head(&n.link, &b->list); | 152 | hlist_add_head(&n.link, &b->list); |
| 147 | raw_spin_unlock(&b->lock); | 153 | raw_spin_unlock(&b->lock); |
| @@ -269,7 +275,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
| 269 | case KVM_PV_REASON_PAGE_NOT_PRESENT: | 275 | case KVM_PV_REASON_PAGE_NOT_PRESENT: |
| 270 | /* page is swapped out by the host. */ | 276 | /* page is swapped out by the host. */ |
| 271 | prev_state = exception_enter(); | 277 | prev_state = exception_enter(); |
| 272 | kvm_async_pf_task_wait((u32)read_cr2()); | 278 | kvm_async_pf_task_wait((u32)read_cr2(), !user_mode(regs)); |
| 273 | exception_exit(prev_state); | 279 | exception_exit(prev_state); |
| 274 | break; | 280 | break; |
| 275 | case KVM_PV_REASON_PAGE_READY: | 281 | case KVM_PV_REASON_PAGE_READY: |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 54180fa6f66f..add33f600531 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
| @@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type) | |||
| 105 | load_cr3(initial_page_table); | 105 | load_cr3(initial_page_table); |
| 106 | #else | 106 | #else |
| 107 | write_cr3(real_mode_header->trampoline_pgd); | 107 | write_cr3(real_mode_header->trampoline_pgd); |
| 108 | |||
| 109 | /* Exiting long mode will fail if CR4.PCIDE is set. */ | ||
| 110 | if (static_cpu_has(X86_FEATURE_PCID)) | ||
| 111 | cr4_clear_bits(X86_CR4_PCIDE); | ||
| 108 | #endif | 112 | #endif |
| 109 | 113 | ||
| 110 | /* Jump to the identity-mapped low memory code */ | 114 | /* Jump to the identity-mapped low memory code */ |
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index d145a0b1f529..3dc26f95d46e 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c | |||
| @@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state) | |||
| 44 | state->stack_info.type, state->stack_info.next_sp, | 44 | state->stack_info.type, state->stack_info.next_sp, |
| 45 | state->stack_mask, state->graph_idx); | 45 | state->stack_mask, state->graph_idx); |
| 46 | 46 | ||
| 47 | for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { | 47 | for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp; |
| 48 | sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { | ||
| 48 | if (get_stack_info(sp, state->task, &stack_info, &visit_mask)) | 49 | if (get_stack_info(sp, state->task, &stack_info, &visit_mask)) |
| 49 | break; | 50 | break; |
| 50 | 51 | ||
| @@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state) | |||
| 174 | * This determines if the frame pointer actually contains an encoded pointer to | 175 | * This determines if the frame pointer actually contains an encoded pointer to |
| 175 | * pt_regs on the stack. See ENCODE_FRAME_POINTER. | 176 | * pt_regs on the stack. See ENCODE_FRAME_POINTER. |
| 176 | */ | 177 | */ |
| 178 | #ifdef CONFIG_X86_64 | ||
| 177 | static struct pt_regs *decode_frame_pointer(unsigned long *bp) | 179 | static struct pt_regs *decode_frame_pointer(unsigned long *bp) |
| 178 | { | 180 | { |
| 179 | unsigned long regs = (unsigned long)bp; | 181 | unsigned long regs = (unsigned long)bp; |
| @@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp) | |||
| 183 | 185 | ||
| 184 | return (struct pt_regs *)(regs & ~0x1); | 186 | return (struct pt_regs *)(regs & ~0x1); |
| 185 | } | 187 | } |
| 188 | #else | ||
| 189 | static struct pt_regs *decode_frame_pointer(unsigned long *bp) | ||
| 190 | { | ||
| 191 | unsigned long regs = (unsigned long)bp; | ||
| 192 | |||
| 193 | if (regs & 0x80000000) | ||
| 194 | return NULL; | ||
| 195 | |||
| 196 | return (struct pt_regs *)(regs | 0x80000000); | ||
| 197 | } | ||
| 198 | #endif | ||
| 199 | |||
| 200 | #ifdef CONFIG_X86_32 | ||
| 201 | #define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long)) | ||
| 202 | #else | ||
| 203 | #define KERNEL_REGS_SIZE (sizeof(struct pt_regs)) | ||
| 204 | #endif | ||
| 186 | 205 | ||
| 187 | static bool update_stack_state(struct unwind_state *state, | 206 | static bool update_stack_state(struct unwind_state *state, |
| 188 | unsigned long *next_bp) | 207 | unsigned long *next_bp) |
| @@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state, | |||
| 202 | regs = decode_frame_pointer(next_bp); | 221 | regs = decode_frame_pointer(next_bp); |
| 203 | if (regs) { | 222 | if (regs) { |
| 204 | frame = (unsigned long *)regs; | 223 | frame = (unsigned long *)regs; |
| 205 | len = regs_size(regs); | 224 | len = KERNEL_REGS_SIZE; |
| 206 | state->got_irq = true; | 225 | state->got_irq = true; |
| 207 | } else { | 226 | } else { |
| 208 | frame = next_bp; | 227 | frame = next_bp; |
| @@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state, | |||
| 226 | frame < prev_frame_end) | 245 | frame < prev_frame_end) |
| 227 | return false; | 246 | return false; |
| 228 | 247 | ||
| 248 | /* | ||
| 249 | * On 32-bit with user mode regs, make sure the last two regs are safe | ||
| 250 | * to access: | ||
| 251 | */ | ||
| 252 | if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) && | ||
| 253 | !on_stack(info, frame, len + 2*sizeof(long))) | ||
| 254 | return false; | ||
| 255 | |||
| 229 | /* Move state to the next frame: */ | 256 | /* Move state to the next frame: */ |
| 230 | if (regs) { | 257 | if (regs) { |
| 231 | state->regs = regs; | 258 | state->regs = regs; |
| @@ -328,6 +355,13 @@ bad_address: | |||
| 328 | state->regs->sp < (unsigned long)task_pt_regs(state->task)) | 355 | state->regs->sp < (unsigned long)task_pt_regs(state->task)) |
| 329 | goto the_end; | 356 | goto the_end; |
| 330 | 357 | ||
| 358 | /* | ||
| 359 | * There are some known frame pointer issues on 32-bit. Disable | ||
| 360 | * unwinder warnings on 32-bit until it gets objtool support. | ||
| 361 | */ | ||
| 362 | if (IS_ENABLED(CONFIG_X86_32)) | ||
| 363 | goto the_end; | ||
| 364 | |||
| 331 | if (state->regs) { | 365 | if (state->regs) { |
| 332 | printk_deferred_once(KERN_WARNING | 366 | printk_deferred_once(KERN_WARNING |
| 333 | "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", | 367 | "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", |
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index 3ea624452f93..3c48bc8bf08c 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig | |||
| @@ -23,6 +23,7 @@ config KVM | |||
| 23 | depends on HIGH_RES_TIMERS | 23 | depends on HIGH_RES_TIMERS |
| 24 | # for TASKSTATS/TASK_DELAY_ACCT: | 24 | # for TASKSTATS/TASK_DELAY_ACCT: |
| 25 | depends on NET && MULTIUSER | 25 | depends on NET && MULTIUSER |
| 26 | depends on X86_LOCAL_APIC | ||
| 26 | select PREEMPT_NOTIFIERS | 27 | select PREEMPT_NOTIFIERS |
| 27 | select MMU_NOTIFIER | 28 | select MMU_NOTIFIER |
| 28 | select ANON_INODES | 29 | select ANON_INODES |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a36254cbf776..d90cdc77e077 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
| @@ -425,8 +425,10 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)); | |||
| 425 | #op " %al \n\t" \ | 425 | #op " %al \n\t" \ |
| 426 | FOP_RET | 426 | FOP_RET |
| 427 | 427 | ||
| 428 | asm(".global kvm_fastop_exception \n" | 428 | asm(".pushsection .fixup, \"ax\"\n" |
| 429 | "kvm_fastop_exception: xor %esi, %esi; ret"); | 429 | ".global kvm_fastop_exception \n" |
| 430 | "kvm_fastop_exception: xor %esi, %esi; ret\n" | ||
| 431 | ".popsection"); | ||
| 430 | 432 | ||
| 431 | FOP_START(setcc) | 433 | FOP_START(setcc) |
| 432 | FOP_SETCC(seto) | 434 | FOP_SETCC(seto) |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index eca30c1eb1d9..7a69cf053711 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -3837,7 +3837,7 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code, | |||
| 3837 | case KVM_PV_REASON_PAGE_NOT_PRESENT: | 3837 | case KVM_PV_REASON_PAGE_NOT_PRESENT: |
| 3838 | vcpu->arch.apf.host_apf_reason = 0; | 3838 | vcpu->arch.apf.host_apf_reason = 0; |
| 3839 | local_irq_disable(); | 3839 | local_irq_disable(); |
| 3840 | kvm_async_pf_task_wait(fault_address); | 3840 | kvm_async_pf_task_wait(fault_address, 0); |
| 3841 | local_irq_enable(); | 3841 | local_irq_enable(); |
| 3842 | break; | 3842 | break; |
| 3843 | case KVM_PV_REASON_PAGE_READY: | 3843 | case KVM_PV_REASON_PAGE_READY: |
| @@ -3974,19 +3974,19 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, | |||
| 3974 | unsigned level, unsigned gpte) | 3974 | unsigned level, unsigned gpte) |
| 3975 | { | 3975 | { |
| 3976 | /* | 3976 | /* |
| 3977 | * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set | ||
| 3978 | * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means | ||
| 3979 | * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. | ||
| 3980 | */ | ||
| 3981 | gpte |= level - PT_PAGE_TABLE_LEVEL - 1; | ||
| 3982 | |||
| 3983 | /* | ||
| 3984 | * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. | 3977 | * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. |
| 3985 | * If it is clear, there are no large pages at this level, so clear | 3978 | * If it is clear, there are no large pages at this level, so clear |
| 3986 | * PT_PAGE_SIZE_MASK in gpte if that is the case. | 3979 | * PT_PAGE_SIZE_MASK in gpte if that is the case. |
| 3987 | */ | 3980 | */ |
| 3988 | gpte &= level - mmu->last_nonleaf_level; | 3981 | gpte &= level - mmu->last_nonleaf_level; |
| 3989 | 3982 | ||
| 3983 | /* | ||
| 3984 | * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set | ||
| 3985 | * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means | ||
| 3986 | * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. | ||
| 3987 | */ | ||
| 3988 | gpte |= level - PT_PAGE_TABLE_LEVEL - 1; | ||
| 3989 | |||
| 3990 | return gpte & PT_PAGE_SIZE_MASK; | 3990 | return gpte & PT_PAGE_SIZE_MASK; |
| 3991 | } | 3991 | } |
| 3992 | 3992 | ||
| @@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, | |||
| 4555 | 4555 | ||
| 4556 | update_permission_bitmask(vcpu, context, true); | 4556 | update_permission_bitmask(vcpu, context, true); |
| 4557 | update_pkru_bitmask(vcpu, context, true); | 4557 | update_pkru_bitmask(vcpu, context, true); |
| 4558 | update_last_nonleaf_level(vcpu, context); | ||
| 4558 | reset_rsvds_bits_mask_ept(vcpu, context, execonly); | 4559 | reset_rsvds_bits_mask_ept(vcpu, context, execonly); |
| 4559 | reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); | 4560 | reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); |
| 4560 | } | 4561 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 86b68dc5a649..f18d1f8d332b 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
| @@ -334,10 +334,11 @@ retry_walk: | |||
| 334 | --walker->level; | 334 | --walker->level; |
| 335 | 335 | ||
| 336 | index = PT_INDEX(addr, walker->level); | 336 | index = PT_INDEX(addr, walker->level); |
| 337 | |||
| 338 | table_gfn = gpte_to_gfn(pte); | 337 | table_gfn = gpte_to_gfn(pte); |
| 339 | offset = index * sizeof(pt_element_t); | 338 | offset = index * sizeof(pt_element_t); |
| 340 | pte_gpa = gfn_to_gpa(table_gfn) + offset; | 339 | pte_gpa = gfn_to_gpa(table_gfn) + offset; |
| 340 | |||
| 341 | BUG_ON(walker->level < 1); | ||
| 341 | walker->table_gfn[walker->level - 1] = table_gfn; | 342 | walker->table_gfn[walker->level - 1] = table_gfn; |
| 342 | walker->pte_gpa[walker->level - 1] = pte_gpa; | 343 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
| 343 | 344 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a2b804e10c95..95a01609d7ee 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -11297,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, | |||
| 11297 | 11297 | ||
| 11298 | /* Same as above - no reason to call set_cr4_guest_host_mask(). */ | 11298 | /* Same as above - no reason to call set_cr4_guest_host_mask(). */ |
| 11299 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); | 11299 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); |
| 11300 | kvm_set_cr4(vcpu, vmcs12->host_cr4); | 11300 | vmx_set_cr4(vcpu, vmcs12->host_cr4); |
| 11301 | 11301 | ||
| 11302 | nested_ept_uninit_mmu_context(vcpu); | 11302 | nested_ept_uninit_mmu_context(vcpu); |
| 11303 | 11303 | ||
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 72bf8c01c6e3..e1f095884386 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
| @@ -1,5 +1,12 @@ | |||
| 1 | # Kernel does not boot with instrumentation of tlb.c. | 1 | # Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c |
| 2 | KCOV_INSTRUMENT_tlb.o := n | 2 | KCOV_INSTRUMENT_tlb.o := n |
| 3 | KCOV_INSTRUMENT_mem_encrypt.o := n | ||
| 4 | |||
| 5 | KASAN_SANITIZE_mem_encrypt.o := n | ||
| 6 | |||
| 7 | ifdef CONFIG_FUNCTION_TRACER | ||
| 8 | CFLAGS_REMOVE_mem_encrypt.o = -pg | ||
| 9 | endif | ||
| 3 | 10 | ||
| 4 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ | 11 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ |
| 5 | pat.o pgtable.o physaddr.o setup_nx.o tlb.o | 12 | pat.o pgtable.o physaddr.o setup_nx.o tlb.o |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 49d9778376d7..658bf0090565 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
| @@ -30,6 +30,8 @@ | |||
| 30 | 30 | ||
| 31 | atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); | 31 | atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); |
| 32 | 32 | ||
| 33 | DEFINE_STATIC_KEY_TRUE(tlb_use_lazy_mode); | ||
| 34 | |||
| 33 | static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, | 35 | static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, |
| 34 | u16 *new_asid, bool *need_flush) | 36 | u16 *new_asid, bool *need_flush) |
| 35 | { | 37 | { |
| @@ -80,7 +82,7 @@ void leave_mm(int cpu) | |||
| 80 | return; | 82 | return; |
| 81 | 83 | ||
| 82 | /* Warn if we're not lazy. */ | 84 | /* Warn if we're not lazy. */ |
| 83 | WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))); | 85 | WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy)); |
| 84 | 86 | ||
| 85 | switch_mm(NULL, &init_mm, NULL); | 87 | switch_mm(NULL, &init_mm, NULL); |
| 86 | } | 88 | } |
| @@ -142,45 +144,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
| 142 | __flush_tlb_all(); | 144 | __flush_tlb_all(); |
| 143 | } | 145 | } |
| 144 | #endif | 146 | #endif |
| 147 | this_cpu_write(cpu_tlbstate.is_lazy, false); | ||
| 145 | 148 | ||
| 146 | if (real_prev == next) { | 149 | if (real_prev == next) { |
| 147 | VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != | 150 | VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != |
| 148 | next->context.ctx_id); | 151 | next->context.ctx_id); |
| 149 | 152 | ||
| 150 | if (cpumask_test_cpu(cpu, mm_cpumask(next))) { | ||
| 151 | /* | ||
| 152 | * There's nothing to do: we weren't lazy, and we | ||
| 153 | * aren't changing our mm. We don't need to flush | ||
| 154 | * anything, nor do we need to update CR3, CR4, or | ||
| 155 | * LDTR. | ||
| 156 | */ | ||
| 157 | return; | ||
| 158 | } | ||
| 159 | |||
| 160 | /* Resume remote flushes and then read tlb_gen. */ | ||
| 161 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
| 162 | next_tlb_gen = atomic64_read(&next->context.tlb_gen); | ||
| 163 | |||
| 164 | if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) < | ||
| 165 | next_tlb_gen) { | ||
| 166 | /* | ||
| 167 | * Ideally, we'd have a flush_tlb() variant that | ||
| 168 | * takes the known CR3 value as input. This would | ||
| 169 | * be faster on Xen PV and on hypothetical CPUs | ||
| 170 | * on which INVPCID is fast. | ||
| 171 | */ | ||
| 172 | this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen, | ||
| 173 | next_tlb_gen); | ||
| 174 | write_cr3(build_cr3(next, prev_asid)); | ||
| 175 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, | ||
| 176 | TLB_FLUSH_ALL); | ||
| 177 | } | ||
| 178 | |||
| 179 | /* | 153 | /* |
| 180 | * We just exited lazy mode, which means that CR4 and/or LDTR | 154 | * We don't currently support having a real mm loaded without |
| 181 | * may be stale. (Changes to the required CR4 and LDTR states | 155 | * our cpu set in mm_cpumask(). We have all the bookkeeping |
| 182 | * are not reflected in tlb_gen.) | 156 | * in place to figure out whether we would need to flush |
| 157 | * if our cpu were cleared in mm_cpumask(), but we don't | ||
| 158 | * currently use it. | ||
| 183 | */ | 159 | */ |
| 160 | if (WARN_ON_ONCE(real_prev != &init_mm && | ||
| 161 | !cpumask_test_cpu(cpu, mm_cpumask(next)))) | ||
| 162 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
| 163 | |||
| 164 | return; | ||
| 184 | } else { | 165 | } else { |
| 185 | u16 new_asid; | 166 | u16 new_asid; |
| 186 | bool need_flush; | 167 | bool need_flush; |
| @@ -199,10 +180,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
| 199 | } | 180 | } |
| 200 | 181 | ||
| 201 | /* Stop remote flushes for the previous mm */ | 182 | /* Stop remote flushes for the previous mm */ |
| 202 | if (cpumask_test_cpu(cpu, mm_cpumask(real_prev))) | 183 | VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) && |
| 203 | cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); | 184 | real_prev != &init_mm); |
| 204 | 185 | cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); | |
| 205 | VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next))); | ||
| 206 | 186 | ||
| 207 | /* | 187 | /* |
| 208 | * Start remote flushes and then read tlb_gen. | 188 | * Start remote flushes and then read tlb_gen. |
| @@ -233,6 +213,37 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
| 233 | } | 213 | } |
| 234 | 214 | ||
| 235 | /* | 215 | /* |
| 216 | * enter_lazy_tlb() is a hint from the scheduler that we are entering a | ||
| 217 | * kernel thread or other context without an mm. Acceptable implementations | ||
| 218 | * include doing nothing whatsoever, switching to init_mm, or various clever | ||
| 219 | * lazy tricks to try to minimize TLB flushes. | ||
| 220 | * | ||
| 221 | * The scheduler reserves the right to call enter_lazy_tlb() several times | ||
| 222 | * in a row. It will notify us that we're going back to a real mm by | ||
| 223 | * calling switch_mm_irqs_off(). | ||
| 224 | */ | ||
| 225 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
| 226 | { | ||
| 227 | if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) | ||
| 228 | return; | ||
| 229 | |||
| 230 | if (static_branch_unlikely(&tlb_use_lazy_mode)) { | ||
| 231 | /* | ||
| 232 | * There's a significant optimization that may be possible | ||
| 233 | * here. We have accurate enough TLB flush tracking that we | ||
| 234 | * don't need to maintain coherence of TLB per se when we're | ||
| 235 | * lazy. We do, however, need to maintain coherence of | ||
| 236 | * paging-structure caches. We could, in principle, leave our | ||
| 237 | * old mm loaded and only switch to init_mm when | ||
| 238 | * tlb_remove_page() happens. | ||
| 239 | */ | ||
| 240 | this_cpu_write(cpu_tlbstate.is_lazy, true); | ||
| 241 | } else { | ||
| 242 | switch_mm(NULL, &init_mm, NULL); | ||
| 243 | } | ||
| 244 | } | ||
| 245 | |||
| 246 | /* | ||
| 236 | * Call this when reinitializing a CPU. It fixes the following potential | 247 | * Call this when reinitializing a CPU. It fixes the following potential |
| 237 | * problems: | 248 | * problems: |
| 238 | * | 249 | * |
| @@ -303,16 +314,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, | |||
| 303 | /* This code cannot presently handle being reentered. */ | 314 | /* This code cannot presently handle being reentered. */ |
| 304 | VM_WARN_ON(!irqs_disabled()); | 315 | VM_WARN_ON(!irqs_disabled()); |
| 305 | 316 | ||
| 317 | if (unlikely(loaded_mm == &init_mm)) | ||
| 318 | return; | ||
| 319 | |||
| 306 | VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != | 320 | VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != |
| 307 | loaded_mm->context.ctx_id); | 321 | loaded_mm->context.ctx_id); |
| 308 | 322 | ||
| 309 | if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) { | 323 | if (this_cpu_read(cpu_tlbstate.is_lazy)) { |
| 310 | /* | 324 | /* |
| 311 | * We're in lazy mode -- don't flush. We can get here on | 325 | * We're in lazy mode. We need to at least flush our |
| 312 | * remote flushes due to races and on local flushes if a | 326 | * paging-structure cache to avoid speculatively reading |
| 313 | * kernel thread coincidentally flushes the mm it's lazily | 327 | * garbage into our TLB. Since switching to init_mm is barely |
| 314 | * still using. | 328 | * slower than a minimal flush, just switch to init_mm. |
| 315 | */ | 329 | */ |
| 330 | switch_mm_irqs_off(NULL, &init_mm, NULL); | ||
| 316 | return; | 331 | return; |
| 317 | } | 332 | } |
| 318 | 333 | ||
| @@ -611,3 +626,57 @@ static int __init create_tlb_single_page_flush_ceiling(void) | |||
| 611 | return 0; | 626 | return 0; |
| 612 | } | 627 | } |
| 613 | late_initcall(create_tlb_single_page_flush_ceiling); | 628 | late_initcall(create_tlb_single_page_flush_ceiling); |
| 629 | |||
| 630 | static ssize_t tlblazy_read_file(struct file *file, char __user *user_buf, | ||
| 631 | size_t count, loff_t *ppos) | ||
| 632 | { | ||
| 633 | char buf[2]; | ||
| 634 | |||
| 635 | buf[0] = static_branch_likely(&tlb_use_lazy_mode) ? '1' : '0'; | ||
| 636 | buf[1] = '\n'; | ||
| 637 | |||
| 638 | return simple_read_from_buffer(user_buf, count, ppos, buf, 2); | ||
| 639 | } | ||
| 640 | |||
| 641 | static ssize_t tlblazy_write_file(struct file *file, | ||
| 642 | const char __user *user_buf, size_t count, loff_t *ppos) | ||
| 643 | { | ||
| 644 | bool val; | ||
| 645 | |||
| 646 | if (kstrtobool_from_user(user_buf, count, &val)) | ||
| 647 | return -EINVAL; | ||
| 648 | |||
| 649 | if (val) | ||
| 650 | static_branch_enable(&tlb_use_lazy_mode); | ||
| 651 | else | ||
| 652 | static_branch_disable(&tlb_use_lazy_mode); | ||
| 653 | |||
| 654 | return count; | ||
| 655 | } | ||
| 656 | |||
| 657 | static const struct file_operations fops_tlblazy = { | ||
| 658 | .read = tlblazy_read_file, | ||
| 659 | .write = tlblazy_write_file, | ||
| 660 | .llseek = default_llseek, | ||
| 661 | }; | ||
| 662 | |||
| 663 | static int __init init_tlb_use_lazy_mode(void) | ||
| 664 | { | ||
| 665 | if (boot_cpu_has(X86_FEATURE_PCID)) { | ||
| 666 | /* | ||
| 667 | * Heuristic: with PCID on, switching to and from | ||
| 668 | * init_mm is reasonably fast, but remote flush IPIs | ||
| 669 | * as expensive as ever, so turn off lazy TLB mode. | ||
| 670 | * | ||
| 671 | * We can't do this in setup_pcid() because static keys | ||
| 672 | * haven't been initialized yet, and it would blow up | ||
| 673 | * badly. | ||
| 674 | */ | ||
| 675 | static_branch_disable(&tlb_use_lazy_mode); | ||
| 676 | } | ||
| 677 | |||
| 678 | debugfs_create_file("tlb_use_lazy_mode", S_IRUSR | S_IWUSR, | ||
| 679 | arch_debugfs_dir, NULL, &fops_tlblazy); | ||
| 680 | return 0; | ||
| 681 | } | ||
| 682 | late_initcall(init_tlb_use_lazy_mode); | ||
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 8c9573660d51..0554e8aef4d5 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c | |||
| @@ -284,9 +284,9 @@ static void emit_bpf_tail_call(u8 **pprog) | |||
| 284 | /* if (index >= array->map.max_entries) | 284 | /* if (index >= array->map.max_entries) |
| 285 | * goto out; | 285 | * goto out; |
| 286 | */ | 286 | */ |
| 287 | EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ | 287 | EMIT2(0x89, 0xD2); /* mov edx, edx */ |
| 288 | EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ | ||
| 288 | offsetof(struct bpf_array, map.max_entries)); | 289 | offsetof(struct bpf_array, map.max_entries)); |
| 289 | EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ | ||
| 290 | #define OFFSET1 43 /* number of bytes to jump */ | 290 | #define OFFSET1 43 /* number of bytes to jump */ |
| 291 | EMIT2(X86_JBE, OFFSET1); /* jbe out */ | 291 | EMIT2(X86_JBE, OFFSET1); /* jbe out */ |
| 292 | label1 = cnt; | 292 | label1 = cnt; |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 0e7ef69e8531..d669e9d89001 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), | |||
| 93 | int rc; | 93 | int rc; |
| 94 | 94 | ||
| 95 | rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, | 95 | rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, |
| 96 | "x86/xen/hvm_guest:prepare", | 96 | "x86/xen/guest:prepare", |
| 97 | cpu_up_prepare_cb, cpu_dead_cb); | 97 | cpu_up_prepare_cb, cpu_dead_cb); |
| 98 | if (rc >= 0) { | 98 | if (rc >= 0) { |
| 99 | rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | 99 | rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
| 100 | "x86/xen/hvm_guest:online", | 100 | "x86/xen/guest:online", |
| 101 | xen_cpu_up_online, NULL); | 101 | xen_cpu_up_online, NULL); |
| 102 | if (rc < 0) | 102 | if (rc < 0) |
| 103 | cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); | 103 | cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); |
diff --git a/block/bio.c b/block/bio.c index b38e962fa83e..101c2a9b5481 100644 --- a/block/bio.c +++ b/block/bio.c | |||
| @@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
| 1239 | */ | 1239 | */ |
| 1240 | bmd->is_our_pages = map_data ? 0 : 1; | 1240 | bmd->is_our_pages = map_data ? 0 : 1; |
| 1241 | memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); | 1241 | memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); |
| 1242 | iov_iter_init(&bmd->iter, iter->type, bmd->iov, | 1242 | bmd->iter = *iter; |
| 1243 | iter->nr_segs, iter->count); | 1243 | bmd->iter.iov = bmd->iov; |
| 1244 | 1244 | ||
| 1245 | ret = -ENOMEM; | 1245 | ret = -ENOMEM; |
| 1246 | bio = bio_kmalloc(gfp_mask, nr_pages); | 1246 | bio = bio_kmalloc(gfp_mask, nr_pages); |
| @@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
| 1331 | int ret, offset; | 1331 | int ret, offset; |
| 1332 | struct iov_iter i; | 1332 | struct iov_iter i; |
| 1333 | struct iovec iov; | 1333 | struct iovec iov; |
| 1334 | struct bio_vec *bvec; | ||
| 1334 | 1335 | ||
| 1335 | iov_for_each(iov, i, *iter) { | 1336 | iov_for_each(iov, i, *iter) { |
| 1336 | unsigned long uaddr = (unsigned long) iov.iov_base; | 1337 | unsigned long uaddr = (unsigned long) iov.iov_base; |
| @@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
| 1375 | ret = get_user_pages_fast(uaddr, local_nr_pages, | 1376 | ret = get_user_pages_fast(uaddr, local_nr_pages, |
| 1376 | (iter->type & WRITE) != WRITE, | 1377 | (iter->type & WRITE) != WRITE, |
| 1377 | &pages[cur_page]); | 1378 | &pages[cur_page]); |
| 1378 | if (ret < local_nr_pages) { | 1379 | if (unlikely(ret < local_nr_pages)) { |
| 1380 | for (j = cur_page; j < page_limit; j++) { | ||
| 1381 | if (!pages[j]) | ||
| 1382 | break; | ||
| 1383 | put_page(pages[j]); | ||
| 1384 | } | ||
| 1379 | ret = -EFAULT; | 1385 | ret = -EFAULT; |
| 1380 | goto out_unmap; | 1386 | goto out_unmap; |
| 1381 | } | 1387 | } |
| @@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
| 1383 | offset = offset_in_page(uaddr); | 1389 | offset = offset_in_page(uaddr); |
| 1384 | for (j = cur_page; j < page_limit; j++) { | 1390 | for (j = cur_page; j < page_limit; j++) { |
| 1385 | unsigned int bytes = PAGE_SIZE - offset; | 1391 | unsigned int bytes = PAGE_SIZE - offset; |
| 1392 | unsigned short prev_bi_vcnt = bio->bi_vcnt; | ||
| 1386 | 1393 | ||
| 1387 | if (len <= 0) | 1394 | if (len <= 0) |
| 1388 | break; | 1395 | break; |
| @@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
| 1397 | bytes) | 1404 | bytes) |
| 1398 | break; | 1405 | break; |
| 1399 | 1406 | ||
| 1407 | /* | ||
| 1408 | * check if vector was merged with previous | ||
| 1409 | * drop page reference if needed | ||
| 1410 | */ | ||
| 1411 | if (bio->bi_vcnt == prev_bi_vcnt) | ||
| 1412 | put_page(pages[j]); | ||
| 1413 | |||
| 1400 | len -= bytes; | 1414 | len -= bytes; |
| 1401 | offset = 0; | 1415 | offset = 0; |
| 1402 | } | 1416 | } |
| @@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
| 1423 | return bio; | 1437 | return bio; |
| 1424 | 1438 | ||
| 1425 | out_unmap: | 1439 | out_unmap: |
| 1426 | for (j = 0; j < nr_pages; j++) { | 1440 | bio_for_each_segment_all(bvec, bio, j) { |
| 1427 | if (!pages[j]) | 1441 | put_page(bvec->bv_page); |
| 1428 | break; | ||
| 1429 | put_page(pages[j]); | ||
| 1430 | } | 1442 | } |
| 1431 | out: | 1443 | out: |
| 1432 | kfree(pages); | 1444 | kfree(pages); |
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index 980e73095643..de294d775acf 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c | |||
| @@ -815,10 +815,14 @@ int blk_mq_debugfs_register(struct request_queue *q) | |||
| 815 | goto err; | 815 | goto err; |
| 816 | 816 | ||
| 817 | /* | 817 | /* |
| 818 | * blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir | 818 | * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir |
| 819 | * didn't exist yet (because we don't know what to name the directory | 819 | * didn't exist yet (because we don't know what to name the directory |
| 820 | * until the queue is registered to a gendisk). | 820 | * until the queue is registered to a gendisk). |
| 821 | */ | 821 | */ |
| 822 | if (q->elevator && !q->sched_debugfs_dir) | ||
| 823 | blk_mq_debugfs_register_sched(q); | ||
| 824 | |||
| 825 | /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ | ||
| 822 | queue_for_each_hw_ctx(q, hctx, i) { | 826 | queue_for_each_hw_ctx(q, hctx, i) { |
| 823 | if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) | 827 | if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx)) |
| 824 | goto err; | 828 | goto err; |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 0fea76aa0f3f..17816a028dcb 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
| @@ -1911,11 +1911,11 @@ static void throtl_upgrade_state(struct throtl_data *td) | |||
| 1911 | 1911 | ||
| 1912 | tg->disptime = jiffies - 1; | 1912 | tg->disptime = jiffies - 1; |
| 1913 | throtl_select_dispatch(sq); | 1913 | throtl_select_dispatch(sq); |
| 1914 | throtl_schedule_next_dispatch(sq, false); | 1914 | throtl_schedule_next_dispatch(sq, true); |
| 1915 | } | 1915 | } |
| 1916 | rcu_read_unlock(); | 1916 | rcu_read_unlock(); |
| 1917 | throtl_select_dispatch(&td->service_queue); | 1917 | throtl_select_dispatch(&td->service_queue); |
| 1918 | throtl_schedule_next_dispatch(&td->service_queue, false); | 1918 | throtl_schedule_next_dispatch(&td->service_queue, true); |
| 1919 | queue_work(kthrotld_workqueue, &td->dispatch_work); | 1919 | queue_work(kthrotld_workqueue, &td->dispatch_work); |
| 1920 | } | 1920 | } |
| 1921 | 1921 | ||
diff --git a/block/bsg-lib.c b/block/bsg-lib.c index dbddff8174e5..15d25ccd51a5 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c | |||
| @@ -207,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp) | |||
| 207 | struct bsg_job *job = blk_mq_rq_to_pdu(req); | 207 | struct bsg_job *job = blk_mq_rq_to_pdu(req); |
| 208 | struct scsi_request *sreq = &job->sreq; | 208 | struct scsi_request *sreq = &job->sreq; |
| 209 | 209 | ||
| 210 | /* called right after the request is allocated for the request_queue */ | ||
| 211 | |||
| 212 | sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp); | ||
| 213 | if (!sreq->sense) | ||
| 214 | return -ENOMEM; | ||
| 215 | |||
| 216 | return 0; | ||
| 217 | } | ||
| 218 | |||
| 219 | static void bsg_initialize_rq(struct request *req) | ||
| 220 | { | ||
| 221 | struct bsg_job *job = blk_mq_rq_to_pdu(req); | ||
| 222 | struct scsi_request *sreq = &job->sreq; | ||
| 223 | void *sense = sreq->sense; | ||
| 224 | |||
| 225 | /* called right before the request is given to the request_queue user */ | ||
| 226 | |||
| 210 | memset(job, 0, sizeof(*job)); | 227 | memset(job, 0, sizeof(*job)); |
| 211 | 228 | ||
| 212 | scsi_req_init(sreq); | 229 | scsi_req_init(sreq); |
| 230 | |||
| 231 | sreq->sense = sense; | ||
| 213 | sreq->sense_len = SCSI_SENSE_BUFFERSIZE; | 232 | sreq->sense_len = SCSI_SENSE_BUFFERSIZE; |
| 214 | sreq->sense = kzalloc(sreq->sense_len, gfp); | ||
| 215 | if (!sreq->sense) | ||
| 216 | return -ENOMEM; | ||
| 217 | 233 | ||
| 218 | job->req = req; | 234 | job->req = req; |
| 219 | job->reply = sreq->sense; | 235 | job->reply = sense; |
| 220 | job->reply_len = sreq->sense_len; | 236 | job->reply_len = sreq->sense_len; |
| 221 | job->dd_data = job + 1; | 237 | job->dd_data = job + 1; |
| 222 | |||
| 223 | return 0; | ||
| 224 | } | 238 | } |
| 225 | 239 | ||
| 226 | static void bsg_exit_rq(struct request_queue *q, struct request *req) | 240 | static void bsg_exit_rq(struct request_queue *q, struct request *req) |
| @@ -251,6 +265,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, | |||
| 251 | q->cmd_size = sizeof(struct bsg_job) + dd_job_size; | 265 | q->cmd_size = sizeof(struct bsg_job) + dd_job_size; |
| 252 | q->init_rq_fn = bsg_init_rq; | 266 | q->init_rq_fn = bsg_init_rq; |
| 253 | q->exit_rq_fn = bsg_exit_rq; | 267 | q->exit_rq_fn = bsg_exit_rq; |
| 268 | q->initialize_rq_fn = bsg_initialize_rq; | ||
| 254 | q->request_fn = bsg_request_fn; | 269 | q->request_fn = bsg_request_fn; |
| 255 | 270 | ||
| 256 | ret = blk_init_allocated_queue(q); | 271 | ret = blk_init_allocated_queue(q); |
diff --git a/crypto/shash.c b/crypto/shash.c index 5e31c8d776df..325a14da5827 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
| @@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | |||
| 41 | int err; | 41 | int err; |
| 42 | 42 | ||
| 43 | absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); | 43 | absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); |
| 44 | buffer = kmalloc(absize, GFP_KERNEL); | 44 | buffer = kmalloc(absize, GFP_ATOMIC); |
| 45 | if (!buffer) | 45 | if (!buffer) |
| 46 | return -ENOMEM; | 46 | return -ENOMEM; |
| 47 | 47 | ||
| @@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req) | |||
| 275 | 275 | ||
| 276 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) | 276 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) |
| 277 | { | 277 | { |
| 278 | struct scatterlist *sg = req->src; | ||
| 279 | unsigned int offset = sg->offset; | ||
| 280 | unsigned int nbytes = req->nbytes; | 278 | unsigned int nbytes = req->nbytes; |
| 279 | struct scatterlist *sg; | ||
| 280 | unsigned int offset; | ||
| 281 | int err; | 281 | int err; |
| 282 | 282 | ||
| 283 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { | 283 | if (nbytes && |
| 284 | (sg = req->src, offset = sg->offset, | ||
| 285 | nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { | ||
| 284 | void *data; | 286 | void *data; |
| 285 | 287 | ||
| 286 | data = kmap_atomic(sg_page(sg)); | 288 | data = kmap_atomic(sg_page(sg)); |
diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 4faa0fd53b0c..d5692e35fab1 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c | |||
| @@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk) | |||
| 426 | 426 | ||
| 427 | static int skcipher_walk_first(struct skcipher_walk *walk) | 427 | static int skcipher_walk_first(struct skcipher_walk *walk) |
| 428 | { | 428 | { |
| 429 | walk->nbytes = 0; | ||
| 430 | |||
| 431 | if (WARN_ON_ONCE(in_irq())) | 429 | if (WARN_ON_ONCE(in_irq())) |
| 432 | return -EDEADLK; | 430 | return -EDEADLK; |
| 433 | 431 | ||
| 434 | if (unlikely(!walk->total)) | ||
| 435 | return 0; | ||
| 436 | |||
| 437 | walk->buffer = NULL; | 432 | walk->buffer = NULL; |
| 438 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | 433 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { |
| 439 | int err = skcipher_copy_iv(walk); | 434 | int err = skcipher_copy_iv(walk); |
| @@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, | |||
| 452 | { | 447 | { |
| 453 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 448 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
| 454 | 449 | ||
| 450 | walk->total = req->cryptlen; | ||
| 451 | walk->nbytes = 0; | ||
| 452 | |||
| 453 | if (unlikely(!walk->total)) | ||
| 454 | return 0; | ||
| 455 | |||
| 455 | scatterwalk_start(&walk->in, req->src); | 456 | scatterwalk_start(&walk->in, req->src); |
| 456 | scatterwalk_start(&walk->out, req->dst); | 457 | scatterwalk_start(&walk->out, req->dst); |
| 457 | 458 | ||
| 458 | walk->total = req->cryptlen; | ||
| 459 | walk->iv = req->iv; | 459 | walk->iv = req->iv; |
| 460 | walk->oiv = req->iv; | 460 | walk->oiv = req->iv; |
| 461 | 461 | ||
| @@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, | |||
| 509 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 509 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
| 510 | int err; | 510 | int err; |
| 511 | 511 | ||
| 512 | walk->nbytes = 0; | ||
| 513 | |||
| 514 | if (unlikely(!walk->total)) | ||
| 515 | return 0; | ||
| 516 | |||
| 512 | walk->flags &= ~SKCIPHER_WALK_PHYS; | 517 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
| 513 | 518 | ||
| 514 | scatterwalk_start(&walk->in, req->src); | 519 | scatterwalk_start(&walk->in, req->src); |
diff --git a/crypto/xts.c b/crypto/xts.c index d86c11a8c882..e31828ed0046 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
| @@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 554 | ctx->name[len - 1] = 0; | 554 | ctx->name[len - 1] = 0; |
| 555 | 555 | ||
| 556 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, | 556 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
| 557 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) | 557 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) { |
| 558 | return -ENAMETOOLONG; | 558 | err = -ENAMETOOLONG; |
| 559 | goto err_drop_spawn; | ||
| 560 | } | ||
| 559 | } else | 561 | } else |
| 560 | goto err_drop_spawn; | 562 | goto err_drop_spawn; |
| 561 | 563 | ||
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 9565d572f8dd..de56394dd161 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c | |||
| @@ -1178,12 +1178,44 @@ dev_put: | |||
| 1178 | return ret; | 1178 | return ret; |
| 1179 | } | 1179 | } |
| 1180 | 1180 | ||
| 1181 | static bool __init iort_enable_acs(struct acpi_iort_node *iort_node) | ||
| 1182 | { | ||
| 1183 | if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) { | ||
| 1184 | struct acpi_iort_node *parent; | ||
| 1185 | struct acpi_iort_id_mapping *map; | ||
| 1186 | int i; | ||
| 1187 | |||
| 1188 | map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node, | ||
| 1189 | iort_node->mapping_offset); | ||
| 1190 | |||
| 1191 | for (i = 0; i < iort_node->mapping_count; i++, map++) { | ||
| 1192 | if (!map->output_reference) | ||
| 1193 | continue; | ||
| 1194 | |||
| 1195 | parent = ACPI_ADD_PTR(struct acpi_iort_node, | ||
| 1196 | iort_table, map->output_reference); | ||
| 1197 | /* | ||
| 1198 | * If we detect a RC->SMMU mapping, make sure | ||
| 1199 | * we enable ACS on the system. | ||
| 1200 | */ | ||
| 1201 | if ((parent->type == ACPI_IORT_NODE_SMMU) || | ||
| 1202 | (parent->type == ACPI_IORT_NODE_SMMU_V3)) { | ||
| 1203 | pci_request_acs(); | ||
| 1204 | return true; | ||
| 1205 | } | ||
| 1206 | } | ||
| 1207 | } | ||
| 1208 | |||
| 1209 | return false; | ||
| 1210 | } | ||
| 1211 | |||
| 1181 | static void __init iort_init_platform_devices(void) | 1212 | static void __init iort_init_platform_devices(void) |
| 1182 | { | 1213 | { |
| 1183 | struct acpi_iort_node *iort_node, *iort_end; | 1214 | struct acpi_iort_node *iort_node, *iort_end; |
| 1184 | struct acpi_table_iort *iort; | 1215 | struct acpi_table_iort *iort; |
| 1185 | struct fwnode_handle *fwnode; | 1216 | struct fwnode_handle *fwnode; |
| 1186 | int i, ret; | 1217 | int i, ret; |
| 1218 | bool acs_enabled = false; | ||
| 1187 | 1219 | ||
| 1188 | /* | 1220 | /* |
| 1189 | * iort_table and iort both point to the start of IORT table, but | 1221 | * iort_table and iort both point to the start of IORT table, but |
| @@ -1203,6 +1235,9 @@ static void __init iort_init_platform_devices(void) | |||
| 1203 | return; | 1235 | return; |
| 1204 | } | 1236 | } |
| 1205 | 1237 | ||
| 1238 | if (!acs_enabled) | ||
| 1239 | acs_enabled = iort_enable_acs(iort_node); | ||
| 1240 | |||
| 1206 | if ((iort_node->type == ACPI_IORT_NODE_SMMU) || | 1241 | if ((iort_node->type == ACPI_IORT_NODE_SMMU) || |
| 1207 | (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { | 1242 | (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { |
| 1208 | 1243 | ||
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 3fb8ff513461..e26ea209b63e 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c | |||
| @@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data, | |||
| 571 | * } | 571 | * } |
| 572 | * } | 572 | * } |
| 573 | * | 573 | * |
| 574 | * Calling this function with index %2 return %-ENOENT and with index %3 | 574 | * Calling this function with index %2 or index %3 return %-ENOENT. If the |
| 575 | * returns the last entry. If the property does not contain any more values | 575 | * property does not contain any more values %-ENOENT is returned. The NULL |
| 576 | * %-ENODATA is returned. The NULL entry must be single integer and | 576 | * entry must be single integer and preferably contain value %0. |
| 577 | * preferably contain value %0. | ||
| 578 | * | 577 | * |
| 579 | * Return: %0 on success, negative error code on failure. | 578 | * Return: %0 on success, negative error code on failure. |
| 580 | */ | 579 | */ |
| @@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 590 | 589 | ||
| 591 | data = acpi_device_data_of_node(fwnode); | 590 | data = acpi_device_data_of_node(fwnode); |
| 592 | if (!data) | 591 | if (!data) |
| 593 | return -EINVAL; | 592 | return -ENOENT; |
| 594 | 593 | ||
| 595 | ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); | 594 | ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); |
| 596 | if (ret) | 595 | if (ret) |
| 597 | return ret; | 596 | return ret == -EINVAL ? -ENOENT : -EINVAL; |
| 598 | 597 | ||
| 599 | /* | 598 | /* |
| 600 | * The simplest case is when the value is a single reference. Just | 599 | * The simplest case is when the value is a single reference. Just |
| @@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 606 | 605 | ||
| 607 | ret = acpi_bus_get_device(obj->reference.handle, &device); | 606 | ret = acpi_bus_get_device(obj->reference.handle, &device); |
| 608 | if (ret) | 607 | if (ret) |
| 609 | return ret; | 608 | return ret == -ENODEV ? -EINVAL : ret; |
| 610 | 609 | ||
| 611 | args->adev = device; | 610 | args->adev = device; |
| 612 | args->nargs = 0; | 611 | args->nargs = 0; |
| @@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 622 | * The index argument is then used to determine which reference | 621 | * The index argument is then used to determine which reference |
| 623 | * the caller wants (along with the arguments). | 622 | * the caller wants (along with the arguments). |
| 624 | */ | 623 | */ |
| 625 | if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count) | 624 | if (obj->type != ACPI_TYPE_PACKAGE) |
| 626 | return -EPROTO; | 625 | return -EINVAL; |
| 626 | if (index >= obj->package.count) | ||
| 627 | return -ENOENT; | ||
| 627 | 628 | ||
| 628 | element = obj->package.elements; | 629 | element = obj->package.elements; |
| 629 | end = element + obj->package.count; | 630 | end = element + obj->package.count; |
| @@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 635 | ret = acpi_bus_get_device(element->reference.handle, | 636 | ret = acpi_bus_get_device(element->reference.handle, |
| 636 | &device); | 637 | &device); |
| 637 | if (ret) | 638 | if (ret) |
| 638 | return -ENODEV; | 639 | return -EINVAL; |
| 639 | 640 | ||
| 640 | nargs = 0; | 641 | nargs = 0; |
| 641 | element++; | 642 | element++; |
| @@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 649 | else if (type == ACPI_TYPE_LOCAL_REFERENCE) | 650 | else if (type == ACPI_TYPE_LOCAL_REFERENCE) |
| 650 | break; | 651 | break; |
| 651 | else | 652 | else |
| 652 | return -EPROTO; | 653 | return -EINVAL; |
| 653 | } | 654 | } |
| 654 | 655 | ||
| 655 | if (nargs > MAX_ACPI_REFERENCE_ARGS) | 656 | if (nargs > MAX_ACPI_REFERENCE_ARGS) |
| 656 | return -EPROTO; | 657 | return -EINVAL; |
| 657 | 658 | ||
| 658 | if (idx == index) { | 659 | if (idx == index) { |
| 659 | args->adev = device; | 660 | args->adev = device; |
| @@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
| 670 | return -ENOENT; | 671 | return -ENOENT; |
| 671 | element++; | 672 | element++; |
| 672 | } else { | 673 | } else { |
| 673 | return -EPROTO; | 674 | return -EINVAL; |
| 674 | } | 675 | } |
| 675 | 676 | ||
| 676 | idx++; | 677 | idx++; |
| 677 | } | 678 | } |
| 678 | 679 | ||
| 679 | return -ENODATA; | 680 | return -ENOENT; |
| 680 | } | 681 | } |
| 681 | EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); | 682 | EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); |
| 682 | 683 | ||
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 8fe165844e47..064f5e31ec55 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
| @@ -913,6 +913,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
| 913 | struct binder_alloc *alloc; | 913 | struct binder_alloc *alloc; |
| 914 | uintptr_t page_addr; | 914 | uintptr_t page_addr; |
| 915 | size_t index; | 915 | size_t index; |
| 916 | struct vm_area_struct *vma; | ||
| 916 | 917 | ||
| 917 | alloc = page->alloc; | 918 | alloc = page->alloc; |
| 918 | if (!mutex_trylock(&alloc->mutex)) | 919 | if (!mutex_trylock(&alloc->mutex)) |
| @@ -923,16 +924,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
| 923 | 924 | ||
| 924 | index = page - alloc->pages; | 925 | index = page - alloc->pages; |
| 925 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; | 926 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; |
| 926 | if (alloc->vma) { | 927 | vma = alloc->vma; |
| 928 | if (vma) { | ||
| 927 | mm = get_task_mm(alloc->tsk); | 929 | mm = get_task_mm(alloc->tsk); |
| 928 | if (!mm) | 930 | if (!mm) |
| 929 | goto err_get_task_mm_failed; | 931 | goto err_get_task_mm_failed; |
| 930 | if (!down_write_trylock(&mm->mmap_sem)) | 932 | if (!down_write_trylock(&mm->mmap_sem)) |
| 931 | goto err_down_write_mmap_sem_failed; | 933 | goto err_down_write_mmap_sem_failed; |
| 934 | } | ||
| 935 | |||
| 936 | list_lru_isolate(lru, item); | ||
| 937 | spin_unlock(lock); | ||
| 932 | 938 | ||
| 939 | if (vma) { | ||
| 933 | trace_binder_unmap_user_start(alloc, index); | 940 | trace_binder_unmap_user_start(alloc, index); |
| 934 | 941 | ||
| 935 | zap_page_range(alloc->vma, | 942 | zap_page_range(vma, |
| 936 | page_addr + alloc->user_buffer_offset, | 943 | page_addr + alloc->user_buffer_offset, |
| 937 | PAGE_SIZE); | 944 | PAGE_SIZE); |
| 938 | 945 | ||
| @@ -950,13 +957,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
| 950 | 957 | ||
| 951 | trace_binder_unmap_kernel_end(alloc, index); | 958 | trace_binder_unmap_kernel_end(alloc, index); |
| 952 | 959 | ||
| 953 | list_lru_isolate(lru, item); | 960 | spin_lock(lock); |
| 954 | |||
| 955 | mutex_unlock(&alloc->mutex); | 961 | mutex_unlock(&alloc->mutex); |
| 956 | return LRU_REMOVED; | 962 | return LRU_REMOVED_RETRY; |
| 957 | 963 | ||
| 958 | err_down_write_mmap_sem_failed: | 964 | err_down_write_mmap_sem_failed: |
| 959 | mmput(mm); | 965 | mmput_async(mm); |
| 960 | err_get_task_mm_failed: | 966 | err_get_task_mm_failed: |
| 961 | err_page_already_freed: | 967 | err_page_already_freed: |
| 962 | mutex_unlock(&alloc->mutex); | 968 | mutex_unlock(&alloc->mutex); |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 3855902f2c5b..aae2402f3791 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
| @@ -27,13 +27,21 @@ static struct bus_type node_subsys = { | |||
| 27 | 27 | ||
| 28 | static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) | 28 | static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) |
| 29 | { | 29 | { |
| 30 | ssize_t n; | ||
| 31 | cpumask_var_t mask; | ||
| 30 | struct node *node_dev = to_node(dev); | 32 | struct node *node_dev = to_node(dev); |
| 31 | const struct cpumask *mask = cpumask_of_node(node_dev->dev.id); | ||
| 32 | 33 | ||
| 33 | /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ | 34 | /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ |
| 34 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); | 35 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); |
| 35 | 36 | ||
| 36 | return cpumap_print_to_pagebuf(list, buf, mask); | 37 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
| 38 | return 0; | ||
| 39 | |||
| 40 | cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); | ||
| 41 | n = cpumap_print_to_pagebuf(list, buf, mask); | ||
| 42 | free_cpumask_var(mask); | ||
| 43 | |||
| 44 | return n; | ||
| 37 | } | 45 | } |
| 38 | 46 | ||
| 39 | static inline ssize_t node_read_cpumask(struct device *dev, | 47 | static inline ssize_t node_read_cpumask(struct device *dev, |
diff --git a/drivers/base/property.c b/drivers/base/property.c index d0b65bbe7e15..7ed99c1b2a8b 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/phy.h> | 21 | #include <linux/phy.h> |
| 22 | 22 | ||
| 23 | struct property_set { | 23 | struct property_set { |
| 24 | struct device *dev; | ||
| 24 | struct fwnode_handle fwnode; | 25 | struct fwnode_handle fwnode; |
| 25 | const struct property_entry *properties; | 26 | const struct property_entry *properties; |
| 26 | }; | 27 | }; |
| @@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string); | |||
| 682 | * Caller is responsible to call fwnode_handle_put() on the returned | 683 | * Caller is responsible to call fwnode_handle_put() on the returned |
| 683 | * args->fwnode pointer. | 684 | * args->fwnode pointer. |
| 684 | * | 685 | * |
| 686 | * Returns: %0 on success | ||
| 687 | * %-ENOENT when the index is out of bounds, the index has an empty | ||
| 688 | * reference or the property was not found | ||
| 689 | * %-EINVAL on parse error | ||
| 685 | */ | 690 | */ |
| 686 | int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, | 691 | int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, |
| 687 | const char *prop, const char *nargs_prop, | 692 | const char *prop, const char *nargs_prop, |
| @@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset) | |||
| 891 | void device_remove_properties(struct device *dev) | 896 | void device_remove_properties(struct device *dev) |
| 892 | { | 897 | { |
| 893 | struct fwnode_handle *fwnode; | 898 | struct fwnode_handle *fwnode; |
| 899 | struct property_set *pset; | ||
| 894 | 900 | ||
| 895 | fwnode = dev_fwnode(dev); | 901 | fwnode = dev_fwnode(dev); |
| 896 | if (!fwnode) | 902 | if (!fwnode) |
| @@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev) | |||
| 900 | * the pset. If there is no real firmware node (ACPI/DT) primary | 906 | * the pset. If there is no real firmware node (ACPI/DT) primary |
| 901 | * will hold the pset. | 907 | * will hold the pset. |
| 902 | */ | 908 | */ |
| 903 | if (is_pset_node(fwnode)) { | 909 | pset = to_pset_node(fwnode); |
| 910 | if (pset) { | ||
| 904 | set_primary_fwnode(dev, NULL); | 911 | set_primary_fwnode(dev, NULL); |
| 905 | pset_free_set(to_pset_node(fwnode)); | ||
| 906 | } else { | 912 | } else { |
| 907 | fwnode = fwnode->secondary; | 913 | pset = to_pset_node(fwnode->secondary); |
| 908 | if (!IS_ERR(fwnode) && is_pset_node(fwnode)) { | 914 | if (pset && dev == pset->dev) |
| 909 | set_secondary_fwnode(dev, NULL); | 915 | set_secondary_fwnode(dev, NULL); |
| 910 | pset_free_set(to_pset_node(fwnode)); | ||
| 911 | } | ||
| 912 | } | 916 | } |
| 917 | if (pset && dev == pset->dev) | ||
| 918 | pset_free_set(pset); | ||
| 913 | } | 919 | } |
| 914 | EXPORT_SYMBOL_GPL(device_remove_properties); | 920 | EXPORT_SYMBOL_GPL(device_remove_properties); |
| 915 | 921 | ||
| @@ -938,6 +944,7 @@ int device_add_properties(struct device *dev, | |||
| 938 | 944 | ||
| 939 | p->fwnode.ops = &pset_fwnode_ops; | 945 | p->fwnode.ops = &pset_fwnode_ops; |
| 940 | set_secondary_fwnode(dev, &p->fwnode); | 946 | set_secondary_fwnode(dev, &p->fwnode); |
| 947 | p->dev = dev; | ||
| 941 | return 0; | 948 | return 0; |
| 942 | } | 949 | } |
| 943 | EXPORT_SYMBOL_GPL(device_add_properties); | 950 | EXPORT_SYMBOL_GPL(device_add_properties); |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 4a438b8abe27..2dfe99b328f8 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
| @@ -17,7 +17,7 @@ if BLK_DEV | |||
| 17 | 17 | ||
| 18 | config BLK_DEV_NULL_BLK | 18 | config BLK_DEV_NULL_BLK |
| 19 | tristate "Null test block driver" | 19 | tristate "Null test block driver" |
| 20 | depends on CONFIGFS_FS | 20 | select CONFIGFS_FS |
| 21 | 21 | ||
| 22 | config BLK_DEV_FD | 22 | config BLK_DEV_FD |
| 23 | tristate "Normal floppy disk support" | 23 | tristate "Normal floppy disk support" |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 3684e21d543f..883dfebd3014 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
| @@ -820,9 +820,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 820 | * appropriate. | 820 | * appropriate. |
| 821 | */ | 821 | */ |
| 822 | ret = nbd_handle_cmd(cmd, hctx->queue_num); | 822 | ret = nbd_handle_cmd(cmd, hctx->queue_num); |
| 823 | if (ret < 0) | ||
| 824 | ret = BLK_STS_IOERR; | ||
| 825 | else if (!ret) | ||
| 826 | ret = BLK_STS_OK; | ||
| 823 | complete(&cmd->send_complete); | 827 | complete(&cmd->send_complete); |
| 824 | 828 | ||
| 825 | return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK; | 829 | return ret; |
| 826 | } | 830 | } |
| 827 | 831 | ||
| 828 | static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, | 832 | static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, |
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 2981c27d3aae..f149d3e61234 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
| @@ -766,27 +766,6 @@ static void zram_slot_unlock(struct zram *zram, u32 index) | |||
| 766 | bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value); | 766 | bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value); |
| 767 | } | 767 | } |
| 768 | 768 | ||
| 769 | static bool zram_same_page_read(struct zram *zram, u32 index, | ||
| 770 | struct page *page, | ||
| 771 | unsigned int offset, unsigned int len) | ||
| 772 | { | ||
| 773 | zram_slot_lock(zram, index); | ||
| 774 | if (unlikely(!zram_get_handle(zram, index) || | ||
| 775 | zram_test_flag(zram, index, ZRAM_SAME))) { | ||
| 776 | void *mem; | ||
| 777 | |||
| 778 | zram_slot_unlock(zram, index); | ||
| 779 | mem = kmap_atomic(page); | ||
| 780 | zram_fill_page(mem + offset, len, | ||
| 781 | zram_get_element(zram, index)); | ||
| 782 | kunmap_atomic(mem); | ||
| 783 | return true; | ||
| 784 | } | ||
| 785 | zram_slot_unlock(zram, index); | ||
| 786 | |||
| 787 | return false; | ||
| 788 | } | ||
| 789 | |||
| 790 | static void zram_meta_free(struct zram *zram, u64 disksize) | 769 | static void zram_meta_free(struct zram *zram, u64 disksize) |
| 791 | { | 770 | { |
| 792 | size_t num_pages = disksize >> PAGE_SHIFT; | 771 | size_t num_pages = disksize >> PAGE_SHIFT; |
| @@ -884,11 +863,20 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, | |||
| 884 | zram_slot_unlock(zram, index); | 863 | zram_slot_unlock(zram, index); |
| 885 | } | 864 | } |
| 886 | 865 | ||
| 887 | if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE)) | ||
| 888 | return 0; | ||
| 889 | |||
| 890 | zram_slot_lock(zram, index); | 866 | zram_slot_lock(zram, index); |
| 891 | handle = zram_get_handle(zram, index); | 867 | handle = zram_get_handle(zram, index); |
| 868 | if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { | ||
| 869 | unsigned long value; | ||
| 870 | void *mem; | ||
| 871 | |||
| 872 | value = handle ? zram_get_element(zram, index) : 0; | ||
| 873 | mem = kmap_atomic(page); | ||
| 874 | zram_fill_page(mem, PAGE_SIZE, value); | ||
| 875 | kunmap_atomic(mem); | ||
| 876 | zram_slot_unlock(zram, index); | ||
| 877 | return 0; | ||
| 878 | } | ||
| 879 | |||
| 892 | size = zram_get_obj_size(zram, index); | 880 | size = zram_get_obj_size(zram, index); |
| 893 | 881 | ||
| 894 | src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); | 882 | src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); |
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c index c834f5abfc49..4c10456f8a32 100644 --- a/drivers/clk/clk-bulk.c +++ b/drivers/clk/clk-bulk.c | |||
| @@ -105,6 +105,7 @@ err: | |||
| 105 | 105 | ||
| 106 | return ret; | 106 | return ret; |
| 107 | } | 107 | } |
| 108 | EXPORT_SYMBOL_GPL(clk_bulk_prepare); | ||
| 108 | 109 | ||
| 109 | #endif /* CONFIG_HAVE_CLK_PREPARE */ | 110 | #endif /* CONFIG_HAVE_CLK_PREPARE */ |
| 110 | 111 | ||
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c index 62d7854e4b87..5970a50671b9 100644 --- a/drivers/clk/rockchip/clk-rk3128.c +++ b/drivers/clk/rockchip/clk-rk3128.c | |||
| @@ -315,13 +315,13 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { | |||
| 315 | RK2928_CLKGATE_CON(10), 8, GFLAGS), | 315 | RK2928_CLKGATE_CON(10), 8, GFLAGS), |
| 316 | 316 | ||
| 317 | GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0, | 317 | GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0, |
| 318 | RK2928_CLKGATE_CON(10), 8, GFLAGS), | 318 | RK2928_CLKGATE_CON(10), 0, GFLAGS), |
| 319 | GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0, | 319 | GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0, |
| 320 | RK2928_CLKGATE_CON(10), 8, GFLAGS), | 320 | RK2928_CLKGATE_CON(10), 1, GFLAGS), |
| 321 | GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0, | 321 | GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0, |
| 322 | RK2928_CLKGATE_CON(10), 8, GFLAGS), | 322 | RK2928_CLKGATE_CON(10), 2, GFLAGS), |
| 323 | GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED, | 323 | GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED, |
| 324 | RK2928_CLKGATE_CON(10), 8, GFLAGS), | 324 | RK2928_CLKGATE_CON(2), 15, GFLAGS), |
| 325 | 325 | ||
| 326 | COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, | 326 | COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, |
| 327 | RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS, | 327 | RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS, |
| @@ -541,7 +541,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { | |||
| 541 | GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS), | 541 | GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS), |
| 542 | GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS), | 542 | GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS), |
| 543 | 543 | ||
| 544 | GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS), | 544 | GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS), |
| 545 | GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS), | 545 | GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS), |
| 546 | 546 | ||
| 547 | /* PD_MMC */ | 547 | /* PD_MMC */ |
| @@ -577,6 +577,8 @@ static const char *const rk3128_critical_clocks[] __initconst = { | |||
| 577 | "aclk_peri", | 577 | "aclk_peri", |
| 578 | "hclk_peri", | 578 | "hclk_peri", |
| 579 | "pclk_peri", | 579 | "pclk_peri", |
| 580 | "pclk_pmu", | ||
| 581 | "sclk_timer5", | ||
| 580 | }; | 582 | }; |
| 581 | 583 | ||
| 582 | static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np) | 584 | static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np) |
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index e40b77583c47..d8d3cb67b402 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c | |||
| @@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = { | |||
| 294 | #define PLL_ENABLED (1 << 31) | 294 | #define PLL_ENABLED (1 << 31) |
| 295 | #define PLL_LOCKED (1 << 29) | 295 | #define PLL_LOCKED (1 << 29) |
| 296 | 296 | ||
| 297 | static void exynos4_clk_enable_pll(u32 reg) | ||
| 298 | { | ||
| 299 | u32 pll_con = readl(reg_base + reg); | ||
| 300 | pll_con |= PLL_ENABLED; | ||
| 301 | writel(pll_con, reg_base + reg); | ||
| 302 | |||
| 303 | while (!(pll_con & PLL_LOCKED)) { | ||
| 304 | cpu_relax(); | ||
| 305 | pll_con = readl(reg_base + reg); | ||
| 306 | } | ||
| 307 | } | ||
| 308 | |||
| 297 | static void exynos4_clk_wait_for_pll(u32 reg) | 309 | static void exynos4_clk_wait_for_pll(u32 reg) |
| 298 | { | 310 | { |
| 299 | u32 pll_con; | 311 | u32 pll_con; |
| @@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void) | |||
| 315 | samsung_clk_save(reg_base, exynos4_save_pll, | 327 | samsung_clk_save(reg_base, exynos4_save_pll, |
| 316 | ARRAY_SIZE(exynos4_clk_pll_regs)); | 328 | ARRAY_SIZE(exynos4_clk_pll_regs)); |
| 317 | 329 | ||
| 330 | exynos4_clk_enable_pll(EPLL_CON0); | ||
| 331 | exynos4_clk_enable_pll(VPLL_CON0); | ||
| 332 | |||
| 318 | if (exynos4_soc == EXYNOS4210) { | 333 | if (exynos4_soc == EXYNOS4210) { |
| 319 | samsung_clk_save(reg_base, exynos4_save_soc, | 334 | samsung_clk_save(reg_base, exynos4_save_soc, |
| 320 | ARRAY_SIZE(exynos4210_clk_save)); | 335 | ARRAY_SIZE(exynos4210_clk_save)); |
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index d9fbbf01062b..0f9754e07719 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c | |||
| @@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx { | |||
| 349 | /* The crypto framework makes it hard to avoid this global. */ | 349 | /* The crypto framework makes it hard to avoid this global. */ |
| 350 | static struct device *artpec6_crypto_dev; | 350 | static struct device *artpec6_crypto_dev; |
| 351 | 351 | ||
| 352 | static struct dentry *dbgfs_root; | ||
| 353 | |||
| 354 | #ifdef CONFIG_FAULT_INJECTION | 352 | #ifdef CONFIG_FAULT_INJECTION |
| 355 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); | 353 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); |
| 356 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); | 354 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); |
| @@ -2984,6 +2982,8 @@ struct dbgfs_u32 { | |||
| 2984 | char *desc; | 2982 | char *desc; |
| 2985 | }; | 2983 | }; |
| 2986 | 2984 | ||
| 2985 | static struct dentry *dbgfs_root; | ||
| 2986 | |||
| 2987 | static void artpec6_crypto_init_debugfs(void) | 2987 | static void artpec6_crypto_init_debugfs(void) |
| 2988 | { | 2988 | { |
| 2989 | dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); | 2989 | dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); |
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index b585ce54a802..4835dd4a9e50 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c | |||
| @@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) | |||
| 553 | { | 553 | { |
| 554 | struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); | 554 | struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); |
| 555 | struct scatterlist sg[1], *tsg; | 555 | struct scatterlist sg[1], *tsg; |
| 556 | int err = 0, len = 0, reg, ncp; | 556 | int err = 0, len = 0, reg, ncp = 0; |
| 557 | unsigned int i; | 557 | unsigned int i; |
| 558 | const u32 *buffer = (const u32 *)rctx->buffer; | 558 | u32 *buffer = (void *)rctx->buffer; |
| 559 | 559 | ||
| 560 | rctx->sg = hdev->req->src; | 560 | rctx->sg = hdev->req->src; |
| 561 | rctx->total = hdev->req->nbytes; | 561 | rctx->total = hdev->req->nbytes; |
| @@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) | |||
| 620 | reg |= HASH_CR_DMAA; | 620 | reg |= HASH_CR_DMAA; |
| 621 | stm32_hash_write(hdev, HASH_CR, reg); | 621 | stm32_hash_write(hdev, HASH_CR, reg); |
| 622 | 622 | ||
| 623 | for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++) | 623 | if (ncp) { |
| 624 | stm32_hash_write(hdev, HASH_DIN, buffer[i]); | 624 | memset(buffer + ncp, 0, |
| 625 | 625 | DIV_ROUND_UP(ncp, sizeof(u32)) - ncp); | |
| 626 | stm32_hash_set_nblw(hdev, ncp); | 626 | writesl(hdev->io_base + HASH_DIN, buffer, |
| 627 | DIV_ROUND_UP(ncp, sizeof(u32))); | ||
| 628 | } | ||
| 629 | stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32))); | ||
| 627 | reg = stm32_hash_read(hdev, HASH_STR); | 630 | reg = stm32_hash_read(hdev, HASH_STR); |
| 628 | reg |= HASH_STR_DCAL; | 631 | reg |= HASH_STR_DCAL; |
| 629 | stm32_hash_write(hdev, HASH_STR, reg); | 632 | stm32_hash_write(hdev, HASH_STR, reg); |
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 66fb40d0ebdb..03830634e141 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c | |||
| @@ -383,7 +383,7 @@ err_put_fd: | |||
| 383 | return err; | 383 | return err; |
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | static void sync_fill_fence_info(struct dma_fence *fence, | 386 | static int sync_fill_fence_info(struct dma_fence *fence, |
| 387 | struct sync_fence_info *info) | 387 | struct sync_fence_info *info) |
| 388 | { | 388 | { |
| 389 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), | 389 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), |
| @@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence, | |||
| 399 | test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? | 399 | test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? |
| 400 | ktime_to_ns(fence->timestamp) : | 400 | ktime_to_ns(fence->timestamp) : |
| 401 | ktime_set(0, 0); | 401 | ktime_set(0, 0); |
| 402 | |||
| 403 | return info->status; | ||
| 402 | } | 404 | } |
| 403 | 405 | ||
| 404 | static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | 406 | static long sync_file_ioctl_fence_info(struct sync_file *sync_file, |
| @@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 424 | * sync_fence_info and return the actual number of fences on | 426 | * sync_fence_info and return the actual number of fences on |
| 425 | * info->num_fences. | 427 | * info->num_fences. |
| 426 | */ | 428 | */ |
| 427 | if (!info.num_fences) | 429 | if (!info.num_fences) { |
| 430 | info.status = dma_fence_is_signaled(sync_file->fence); | ||
| 428 | goto no_fences; | 431 | goto no_fences; |
| 432 | } else { | ||
| 433 | info.status = 1; | ||
| 434 | } | ||
| 429 | 435 | ||
| 430 | if (info.num_fences < num_fences) | 436 | if (info.num_fences < num_fences) |
| 431 | return -EINVAL; | 437 | return -EINVAL; |
| @@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 435 | if (!fence_info) | 441 | if (!fence_info) |
| 436 | return -ENOMEM; | 442 | return -ENOMEM; |
| 437 | 443 | ||
| 438 | for (i = 0; i < num_fences; i++) | 444 | for (i = 0; i < num_fences; i++) { |
| 439 | sync_fill_fence_info(fences[i], &fence_info[i]); | 445 | int status = sync_fill_fence_info(fences[i], &fence_info[i]); |
| 446 | info.status = info.status <= 0 ? info.status : status; | ||
| 447 | } | ||
| 440 | 448 | ||
| 441 | if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, | 449 | if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, |
| 442 | size)) { | 450 | size)) { |
| @@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
| 446 | 454 | ||
| 447 | no_fences: | 455 | no_fences: |
| 448 | sync_file_get_name(sync_file, info.name, sizeof(info.name)); | 456 | sync_file_get_name(sync_file, info.name, sizeof(info.name)); |
| 449 | info.status = dma_fence_is_signaled(sync_file->fence); | ||
| 450 | info.num_fences = num_fences; | 457 | info.num_fences = num_fences; |
| 451 | 458 | ||
| 452 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) | 459 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) |
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index 32905d5606ac..339186f25a2a 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c | |||
| @@ -212,11 +212,12 @@ struct msgdma_device { | |||
| 212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) | 212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) |
| 213 | { | 213 | { |
| 214 | struct msgdma_sw_desc *desc; | 214 | struct msgdma_sw_desc *desc; |
| 215 | unsigned long flags; | ||
| 215 | 216 | ||
| 216 | spin_lock_bh(&mdev->lock); | 217 | spin_lock_irqsave(&mdev->lock, flags); |
| 217 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); | 218 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); |
| 218 | list_del(&desc->node); | 219 | list_del(&desc->node); |
| 219 | spin_unlock_bh(&mdev->lock); | 220 | spin_unlock_irqrestore(&mdev->lock, flags); |
| 220 | 221 | ||
| 221 | INIT_LIST_HEAD(&desc->tx_list); | 222 | INIT_LIST_HEAD(&desc->tx_list); |
| 222 | 223 | ||
| @@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
| 306 | struct msgdma_device *mdev = to_mdev(tx->chan); | 307 | struct msgdma_device *mdev = to_mdev(tx->chan); |
| 307 | struct msgdma_sw_desc *new; | 308 | struct msgdma_sw_desc *new; |
| 308 | dma_cookie_t cookie; | 309 | dma_cookie_t cookie; |
| 310 | unsigned long flags; | ||
| 309 | 311 | ||
| 310 | new = tx_to_desc(tx); | 312 | new = tx_to_desc(tx); |
| 311 | spin_lock_bh(&mdev->lock); | 313 | spin_lock_irqsave(&mdev->lock, flags); |
| 312 | cookie = dma_cookie_assign(tx); | 314 | cookie = dma_cookie_assign(tx); |
| 313 | 315 | ||
| 314 | list_add_tail(&new->node, &mdev->pending_list); | 316 | list_add_tail(&new->node, &mdev->pending_list); |
| 315 | spin_unlock_bh(&mdev->lock); | 317 | spin_unlock_irqrestore(&mdev->lock, flags); |
| 316 | 318 | ||
| 317 | return cookie; | 319 | return cookie; |
| 318 | } | 320 | } |
| @@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, | |||
| 336 | struct msgdma_extended_desc *desc; | 338 | struct msgdma_extended_desc *desc; |
| 337 | size_t copy; | 339 | size_t copy; |
| 338 | u32 desc_cnt; | 340 | u32 desc_cnt; |
| 341 | unsigned long irqflags; | ||
| 339 | 342 | ||
| 340 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); | 343 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); |
| 341 | 344 | ||
| 342 | spin_lock_bh(&mdev->lock); | 345 | spin_lock_irqsave(&mdev->lock, irqflags); |
| 343 | if (desc_cnt > mdev->desc_free_cnt) { | 346 | if (desc_cnt > mdev->desc_free_cnt) { |
| 344 | spin_unlock_bh(&mdev->lock); | 347 | spin_unlock_bh(&mdev->lock); |
| 345 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | 348 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); |
| 346 | return NULL; | 349 | return NULL; |
| 347 | } | 350 | } |
| 348 | mdev->desc_free_cnt -= desc_cnt; | 351 | mdev->desc_free_cnt -= desc_cnt; |
| 349 | spin_unlock_bh(&mdev->lock); | 352 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
| 350 | 353 | ||
| 351 | do { | 354 | do { |
| 352 | /* Allocate and populate the descriptor */ | 355 | /* Allocate and populate the descriptor */ |
| @@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |||
| 397 | u32 desc_cnt = 0, i; | 400 | u32 desc_cnt = 0, i; |
| 398 | struct scatterlist *sg; | 401 | struct scatterlist *sg; |
| 399 | u32 stride; | 402 | u32 stride; |
| 403 | unsigned long irqflags; | ||
| 400 | 404 | ||
| 401 | for_each_sg(sgl, sg, sg_len, i) | 405 | for_each_sg(sgl, sg, sg_len, i) |
| 402 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); | 406 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); |
| 403 | 407 | ||
| 404 | spin_lock_bh(&mdev->lock); | 408 | spin_lock_irqsave(&mdev->lock, irqflags); |
| 405 | if (desc_cnt > mdev->desc_free_cnt) { | 409 | if (desc_cnt > mdev->desc_free_cnt) { |
| 406 | spin_unlock_bh(&mdev->lock); | 410 | spin_unlock_bh(&mdev->lock); |
| 407 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | 411 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); |
| 408 | return NULL; | 412 | return NULL; |
| 409 | } | 413 | } |
| 410 | mdev->desc_free_cnt -= desc_cnt; | 414 | mdev->desc_free_cnt -= desc_cnt; |
| 411 | spin_unlock_bh(&mdev->lock); | 415 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
| 412 | 416 | ||
| 413 | avail = sg_dma_len(sgl); | 417 | avail = sg_dma_len(sgl); |
| 414 | 418 | ||
| @@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev) | |||
| 566 | static void msgdma_issue_pending(struct dma_chan *chan) | 570 | static void msgdma_issue_pending(struct dma_chan *chan) |
| 567 | { | 571 | { |
| 568 | struct msgdma_device *mdev = to_mdev(chan); | 572 | struct msgdma_device *mdev = to_mdev(chan); |
| 573 | unsigned long flags; | ||
| 569 | 574 | ||
| 570 | spin_lock_bh(&mdev->lock); | 575 | spin_lock_irqsave(&mdev->lock, flags); |
| 571 | msgdma_start_transfer(mdev); | 576 | msgdma_start_transfer(mdev); |
| 572 | spin_unlock_bh(&mdev->lock); | 577 | spin_unlock_irqrestore(&mdev->lock, flags); |
| 573 | } | 578 | } |
| 574 | 579 | ||
| 575 | /** | 580 | /** |
| @@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev) | |||
| 634 | static void msgdma_free_chan_resources(struct dma_chan *dchan) | 639 | static void msgdma_free_chan_resources(struct dma_chan *dchan) |
| 635 | { | 640 | { |
| 636 | struct msgdma_device *mdev = to_mdev(dchan); | 641 | struct msgdma_device *mdev = to_mdev(dchan); |
| 642 | unsigned long flags; | ||
| 637 | 643 | ||
| 638 | spin_lock_bh(&mdev->lock); | 644 | spin_lock_irqsave(&mdev->lock, flags); |
| 639 | msgdma_free_descriptors(mdev); | 645 | msgdma_free_descriptors(mdev); |
| 640 | spin_unlock_bh(&mdev->lock); | 646 | spin_unlock_irqrestore(&mdev->lock, flags); |
| 641 | kfree(mdev->sw_desq); | 647 | kfree(mdev->sw_desq); |
| 642 | } | 648 | } |
| 643 | 649 | ||
| @@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data) | |||
| 682 | u32 count; | 688 | u32 count; |
| 683 | u32 __maybe_unused size; | 689 | u32 __maybe_unused size; |
| 684 | u32 __maybe_unused status; | 690 | u32 __maybe_unused status; |
| 691 | unsigned long flags; | ||
| 685 | 692 | ||
| 686 | spin_lock(&mdev->lock); | 693 | spin_lock_irqsave(&mdev->lock, flags); |
| 687 | 694 | ||
| 688 | /* Read number of responses that are available */ | 695 | /* Read number of responses that are available */ |
| 689 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); | 696 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); |
| @@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data) | |||
| 698 | * bits. So we need to just drop these values. | 705 | * bits. So we need to just drop these values. |
| 699 | */ | 706 | */ |
| 700 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); | 707 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); |
| 701 | status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); | 708 | status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); |
| 702 | 709 | ||
| 703 | msgdma_complete_descriptor(mdev); | 710 | msgdma_complete_descriptor(mdev); |
| 704 | msgdma_chan_desc_cleanup(mdev); | 711 | msgdma_chan_desc_cleanup(mdev); |
| 705 | } | 712 | } |
| 706 | 713 | ||
| 707 | spin_unlock(&mdev->lock); | 714 | spin_unlock_irqrestore(&mdev->lock, flags); |
| 708 | } | 715 | } |
| 709 | 716 | ||
| 710 | /** | 717 | /** |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3879f80a4815..a7ea20e7b8e9 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
| @@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
| 1143 | struct edma_desc *edesc; | 1143 | struct edma_desc *edesc; |
| 1144 | struct device *dev = chan->device->dev; | 1144 | struct device *dev = chan->device->dev; |
| 1145 | struct edma_chan *echan = to_edma_chan(chan); | 1145 | struct edma_chan *echan = to_edma_chan(chan); |
| 1146 | unsigned int width, pset_len; | 1146 | unsigned int width, pset_len, array_size; |
| 1147 | 1147 | ||
| 1148 | if (unlikely(!echan || !len)) | 1148 | if (unlikely(!echan || !len)) |
| 1149 | return NULL; | 1149 | return NULL; |
| 1150 | 1150 | ||
| 1151 | /* Align the array size (acnt block) with the transfer properties */ | ||
| 1152 | switch (__ffs((src | dest | len))) { | ||
| 1153 | case 0: | ||
| 1154 | array_size = SZ_32K - 1; | ||
| 1155 | break; | ||
| 1156 | case 1: | ||
| 1157 | array_size = SZ_32K - 2; | ||
| 1158 | break; | ||
| 1159 | default: | ||
| 1160 | array_size = SZ_32K - 4; | ||
| 1161 | break; | ||
| 1162 | } | ||
| 1163 | |||
| 1151 | if (len < SZ_64K) { | 1164 | if (len < SZ_64K) { |
| 1152 | /* | 1165 | /* |
| 1153 | * Transfer size less than 64K can be handled with one paRAM | 1166 | * Transfer size less than 64K can be handled with one paRAM |
| @@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
| 1169 | * When the full_length is multibple of 32767 one slot can be | 1182 | * When the full_length is multibple of 32767 one slot can be |
| 1170 | * used to complete the transfer. | 1183 | * used to complete the transfer. |
| 1171 | */ | 1184 | */ |
| 1172 | width = SZ_32K - 1; | 1185 | width = array_size; |
| 1173 | pset_len = rounddown(len, width); | 1186 | pset_len = rounddown(len, width); |
| 1174 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ | 1187 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ |
| 1175 | if (unlikely(pset_len == len)) | 1188 | if (unlikely(pset_len == len)) |
| @@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
| 1217 | } | 1230 | } |
| 1218 | dest += pset_len; | 1231 | dest += pset_len; |
| 1219 | src += pset_len; | 1232 | src += pset_len; |
| 1220 | pset_len = width = len % (SZ_32K - 1); | 1233 | pset_len = width = len % array_size; |
| 1221 | 1234 | ||
| 1222 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, | 1235 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, |
| 1223 | width, pset_len, DMA_MEM_TO_MEM); | 1236 | width, pset_len, DMA_MEM_TO_MEM); |
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 2f65a8fde21d..f1d04b70ee67 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c | |||
| @@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, | |||
| 262 | mutex_lock(&xbar->mutex); | 262 | mutex_lock(&xbar->mutex); |
| 263 | map->xbar_out = find_first_zero_bit(xbar->dma_inuse, | 263 | map->xbar_out = find_first_zero_bit(xbar->dma_inuse, |
| 264 | xbar->dma_requests); | 264 | xbar->dma_requests); |
| 265 | mutex_unlock(&xbar->mutex); | ||
| 266 | if (map->xbar_out == xbar->dma_requests) { | 265 | if (map->xbar_out == xbar->dma_requests) { |
| 266 | mutex_unlock(&xbar->mutex); | ||
| 267 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); | 267 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); |
| 268 | kfree(map); | 268 | kfree(map); |
| 269 | return ERR_PTR(-ENOMEM); | 269 | return ERR_PTR(-ENOMEM); |
| 270 | } | 270 | } |
| 271 | set_bit(map->xbar_out, xbar->dma_inuse); | 271 | set_bit(map->xbar_out, xbar->dma_inuse); |
| 272 | mutex_unlock(&xbar->mutex); | ||
| 272 | 273 | ||
| 273 | map->xbar_in = (u16)dma_spec->args[0]; | 274 | map->xbar_in = (u16)dma_spec->args[0]; |
| 274 | 275 | ||
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 3388d54ba114..3f80f167ed56 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
| @@ -453,7 +453,8 @@ config GPIO_TS4800 | |||
| 453 | config GPIO_THUNDERX | 453 | config GPIO_THUNDERX |
| 454 | tristate "Cavium ThunderX/OCTEON-TX GPIO" | 454 | tristate "Cavium ThunderX/OCTEON-TX GPIO" |
| 455 | depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) | 455 | depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) |
| 456 | depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY | 456 | depends on PCI_MSI |
| 457 | select IRQ_DOMAIN_HIERARCHY | ||
| 457 | select IRQ_FASTEOI_HIERARCHY_HANDLERS | 458 | select IRQ_FASTEOI_HIERARCHY_HANDLERS |
| 458 | help | 459 | help |
| 459 | Say yes here to support the on-chip GPIO lines on the ThunderX | 460 | Say yes here to support the on-chip GPIO lines on the ThunderX |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index dbf869fb63ce..3233b72b6828 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
| @@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) | |||
| 518 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 518 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
| 519 | irq_set_handler_locked(d, handle_level_irq); | 519 | irq_set_handler_locked(d, handle_level_irq); |
| 520 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 520 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
| 521 | irq_set_handler_locked(d, handle_edge_irq); | 521 | /* |
| 522 | * Edge IRQs are already cleared/acked in irq_handler and | ||
| 523 | * not need to be masked, as result handle_edge_irq() | ||
| 524 | * logic is excessed here and may cause lose of interrupts. | ||
| 525 | * So just use handle_simple_irq. | ||
| 526 | */ | ||
| 527 | irq_set_handler_locked(d, handle_simple_irq); | ||
| 522 | 528 | ||
| 523 | return 0; | 529 | return 0; |
| 524 | 530 | ||
| @@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) | |||
| 678 | static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) | 684 | static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) |
| 679 | { | 685 | { |
| 680 | void __iomem *isr_reg = NULL; | 686 | void __iomem *isr_reg = NULL; |
| 681 | u32 isr; | 687 | u32 enabled, isr, level_mask; |
| 682 | unsigned int bit; | 688 | unsigned int bit; |
| 683 | struct gpio_bank *bank = gpiobank; | 689 | struct gpio_bank *bank = gpiobank; |
| 684 | unsigned long wa_lock_flags; | 690 | unsigned long wa_lock_flags; |
| @@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) | |||
| 691 | pm_runtime_get_sync(bank->chip.parent); | 697 | pm_runtime_get_sync(bank->chip.parent); |
| 692 | 698 | ||
| 693 | while (1) { | 699 | while (1) { |
| 694 | u32 isr_saved, level_mask = 0; | ||
| 695 | u32 enabled; | ||
| 696 | |||
| 697 | raw_spin_lock_irqsave(&bank->lock, lock_flags); | 700 | raw_spin_lock_irqsave(&bank->lock, lock_flags); |
| 698 | 701 | ||
| 699 | enabled = omap_get_gpio_irqbank_mask(bank); | 702 | enabled = omap_get_gpio_irqbank_mask(bank); |
| 700 | isr_saved = isr = readl_relaxed(isr_reg) & enabled; | 703 | isr = readl_relaxed(isr_reg) & enabled; |
| 701 | 704 | ||
| 702 | if (bank->level_mask) | 705 | if (bank->level_mask) |
| 703 | level_mask = bank->level_mask & enabled; | 706 | level_mask = bank->level_mask & enabled; |
| 707 | else | ||
| 708 | level_mask = 0; | ||
| 704 | 709 | ||
| 705 | /* clear edge sensitive interrupts before handler(s) are | 710 | /* clear edge sensitive interrupts before handler(s) are |
| 706 | called so that we don't miss any interrupt occurred while | 711 | called so that we don't miss any interrupt occurred while |
| 707 | executing them */ | 712 | executing them */ |
| 708 | omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); | 713 | if (isr & ~level_mask) |
| 709 | omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); | 714 | omap_clear_gpio_irqbank(bank, isr & ~level_mask); |
| 710 | omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask); | ||
| 711 | 715 | ||
| 712 | raw_spin_unlock_irqrestore(&bank->lock, lock_flags); | 716 | raw_spin_unlock_irqrestore(&bank->lock, lock_flags); |
| 713 | 717 | ||
| @@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
| 1010 | 1014 | ||
| 1011 | /*---------------------------------------------------------------------*/ | 1015 | /*---------------------------------------------------------------------*/ |
| 1012 | 1016 | ||
| 1013 | static void __init omap_gpio_show_rev(struct gpio_bank *bank) | 1017 | static void omap_gpio_show_rev(struct gpio_bank *bank) |
| 1014 | { | 1018 | { |
| 1015 | static bool called; | 1019 | static bool called; |
| 1016 | u32 rev; | 1020 | u32 rev; |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 4d2113530735..eb4528c87c0b 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
| @@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
| 203 | 203 | ||
| 204 | if (pin <= 255) { | 204 | if (pin <= 255) { |
| 205 | char ev_name[5]; | 205 | char ev_name[5]; |
| 206 | sprintf(ev_name, "_%c%02X", | 206 | sprintf(ev_name, "_%c%02hhX", |
| 207 | agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', | 207 | agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', |
| 208 | pin); | 208 | pin); |
| 209 | if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) | 209 | if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7ef6c28a34d9..bc746131987f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
| @@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) | |||
| 834 | placement.busy_placement = &placements; | 834 | placement.busy_placement = &placements; |
| 835 | placements.fpfn = 0; | 835 | placements.fpfn = 0; |
| 836 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; | 836 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; |
| 837 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | 837 | placements.flags = bo->mem.placement | TTM_PL_FLAG_TT; |
| 838 | 838 | ||
| 839 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); | 839 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); |
| 840 | if (unlikely(r)) | 840 | if (unlikely(r)) |
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 4e53aae9a1fb..0028591f3f95 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
| @@ -2960,6 +2960,7 @@ out: | |||
| 2960 | drm_modeset_backoff(&ctx); | 2960 | drm_modeset_backoff(&ctx); |
| 2961 | } | 2961 | } |
| 2962 | 2962 | ||
| 2963 | drm_atomic_state_put(state); | ||
| 2963 | drm_modeset_drop_locks(&ctx); | 2964 | drm_modeset_drop_locks(&ctx); |
| 2964 | drm_modeset_acquire_fini(&ctx); | 2965 | drm_modeset_acquire_fini(&ctx); |
| 2965 | 2966 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19404c96eeb1..af289d35b77a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -3013,10 +3013,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | |||
| 3013 | 3013 | ||
| 3014 | static void nop_submit_request(struct drm_i915_gem_request *request) | 3014 | static void nop_submit_request(struct drm_i915_gem_request *request) |
| 3015 | { | 3015 | { |
| 3016 | unsigned long flags; | ||
| 3017 | |||
| 3016 | GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); | 3018 | GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); |
| 3017 | dma_fence_set_error(&request->fence, -EIO); | 3019 | dma_fence_set_error(&request->fence, -EIO); |
| 3018 | i915_gem_request_submit(request); | 3020 | |
| 3021 | spin_lock_irqsave(&request->engine->timeline->lock, flags); | ||
| 3022 | __i915_gem_request_submit(request); | ||
| 3019 | intel_engine_init_global_seqno(request->engine, request->global_seqno); | 3023 | intel_engine_init_global_seqno(request->engine, request->global_seqno); |
| 3024 | spin_unlock_irqrestore(&request->engine->timeline->lock, flags); | ||
| 3020 | } | 3025 | } |
| 3021 | 3026 | ||
| 3022 | static void engine_set_wedged(struct intel_engine_cs *engine) | 3027 | static void engine_set_wedged(struct intel_engine_cs *engine) |
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index d805b6e6fe71..27743be5b768 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c | |||
| @@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder, | |||
| 606 | connector->encoder->base.id, | 606 | connector->encoder->base.id, |
| 607 | connector->encoder->name); | 607 | connector->encoder->name); |
| 608 | 608 | ||
| 609 | /* ELD Conn_Type */ | ||
| 610 | connector->eld[5] &= ~(3 << 2); | ||
| 611 | if (intel_crtc_has_dp_encoder(crtc_state)) | ||
| 612 | connector->eld[5] |= (1 << 2); | ||
| 613 | |||
| 614 | connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; | 609 | connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; |
| 615 | 610 | ||
| 616 | if (dev_priv->display.audio_codec_enable) | 611 | if (dev_priv->display.audio_codec_enable) |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 183e87e8ea31..5d4cd3d00564 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
| @@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
| 1163 | is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; | 1163 | is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; |
| 1164 | is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); | 1164 | is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); |
| 1165 | 1165 | ||
| 1166 | if (port == PORT_A && is_dvi) { | ||
| 1167 | DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n", | ||
| 1168 | is_hdmi ? "/HDMI" : ""); | ||
| 1169 | is_dvi = false; | ||
| 1170 | is_hdmi = false; | ||
| 1171 | } | ||
| 1172 | |||
| 1166 | info->supports_dvi = is_dvi; | 1173 | info->supports_dvi = is_dvi; |
| 1167 | info->supports_hdmi = is_hdmi; | 1174 | info->supports_hdmi = is_hdmi; |
| 1168 | info->supports_dp = is_dp; | 1175 | info->supports_dp = is_dp; |
| @@ -1233,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, | |||
| 1233 | { | 1240 | { |
| 1234 | enum port port; | 1241 | enum port port; |
| 1235 | 1242 | ||
| 1236 | if (!HAS_DDI(dev_priv)) | 1243 | if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
| 1237 | return; | 1244 | return; |
| 1238 | 1245 | ||
| 1239 | if (!dev_priv->vbt.child_dev_num) | 1246 | if (!dev_priv->vbt.child_dev_num) |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index ff9ecd211abb..b8315bca852b 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c | |||
| @@ -74,7 +74,7 @@ | |||
| 74 | #define I9XX_CSC_COEFF_1_0 \ | 74 | #define I9XX_CSC_COEFF_1_0 \ |
| 75 | ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) | 75 | ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) |
| 76 | 76 | ||
| 77 | static bool crtc_state_is_legacy(struct drm_crtc_state *state) | 77 | static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state) |
| 78 | { | 78 | { |
| 79 | return !state->degamma_lut && | 79 | return !state->degamma_lut && |
| 80 | !state->ctm && | 80 | !state->ctm && |
| @@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) | |||
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); | 290 | mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); |
| 291 | if (!crtc_state_is_legacy(state)) { | 291 | if (!crtc_state_is_legacy_gamma(state)) { |
| 292 | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | | 292 | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | |
| 293 | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); | 293 | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); |
| 294 | } | 294 | } |
| @@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state) | |||
| 469 | struct intel_crtc_state *intel_state = to_intel_crtc_state(state); | 469 | struct intel_crtc_state *intel_state = to_intel_crtc_state(state); |
| 470 | enum pipe pipe = to_intel_crtc(state->crtc)->pipe; | 470 | enum pipe pipe = to_intel_crtc(state->crtc)->pipe; |
| 471 | 471 | ||
| 472 | if (crtc_state_is_legacy(state)) { | 472 | if (crtc_state_is_legacy_gamma(state)) { |
| 473 | haswell_load_luts(state); | 473 | haswell_load_luts(state); |
| 474 | return; | 474 | return; |
| 475 | } | 475 | } |
| @@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state) | |||
| 529 | 529 | ||
| 530 | glk_load_degamma_lut(state); | 530 | glk_load_degamma_lut(state); |
| 531 | 531 | ||
| 532 | if (crtc_state_is_legacy(state)) { | 532 | if (crtc_state_is_legacy_gamma(state)) { |
| 533 | haswell_load_luts(state); | 533 | haswell_load_luts(state); |
| 534 | return; | 534 | return; |
| 535 | } | 535 | } |
| @@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) | |||
| 551 | uint32_t i, lut_size; | 551 | uint32_t i, lut_size; |
| 552 | uint32_t word0, word1; | 552 | uint32_t word0, word1; |
| 553 | 553 | ||
| 554 | if (crtc_state_is_legacy(state)) { | 554 | if (crtc_state_is_legacy_gamma(state)) { |
| 555 | /* Turn off degamma/gamma on CGM block. */ | 555 | /* Turn off degamma/gamma on CGM block. */ |
| 556 | I915_WRITE(CGM_PIPE_MODE(pipe), | 556 | I915_WRITE(CGM_PIPE_MODE(pipe), |
| 557 | (state->ctm ? CGM_PIPE_MODE_CSC : 0)); | 557 | (state->ctm ? CGM_PIPE_MODE_CSC : 0)); |
| @@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc, | |||
| 632 | return 0; | 632 | return 0; |
| 633 | 633 | ||
| 634 | /* | 634 | /* |
| 635 | * We also allow no degamma lut and a gamma lut at the legacy | 635 | * We also allow no degamma lut/ctm and a gamma lut at the legacy |
| 636 | * size (256 entries). | 636 | * size (256 entries). |
| 637 | */ | 637 | */ |
| 638 | if (!crtc_state->degamma_lut && | 638 | if (crtc_state_is_legacy_gamma(crtc_state)) |
| 639 | crtc_state->gamma_lut && | ||
| 640 | crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH) | ||
| 641 | return 0; | 639 | return 0; |
| 642 | 640 | ||
| 643 | return -EINVAL; | 641 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 965988f79a55..92c1f8e166dc 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c | |||
| @@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv) | |||
| 216 | 216 | ||
| 217 | mask = DC_STATE_DEBUG_MASK_MEMORY_UP; | 217 | mask = DC_STATE_DEBUG_MASK_MEMORY_UP; |
| 218 | 218 | ||
| 219 | if (IS_BROXTON(dev_priv)) | 219 | if (IS_GEN9_LP(dev_priv)) |
| 220 | mask |= DC_STATE_DEBUG_MASK_CORES; | 220 | mask |= DC_STATE_DEBUG_MASK_CORES; |
| 221 | 221 | ||
| 222 | /* The below bit doesn't need to be cleared ever afterwards */ | 222 | /* The below bit doesn't need to be cleared ever afterwards */ |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 4b4fd1f8110b..476681d5940c 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
| @@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder, | |||
| 1655 | out: | 1655 | out: |
| 1656 | if (ret && IS_GEN9_LP(dev_priv)) { | 1656 | if (ret && IS_GEN9_LP(dev_priv)) { |
| 1657 | tmp = I915_READ(BXT_PHY_CTL(port)); | 1657 | tmp = I915_READ(BXT_PHY_CTL(port)); |
| 1658 | if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | | 1658 | if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | |
| 1659 | BXT_PHY_LANE_POWERDOWN_ACK | | ||
| 1659 | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) | 1660 | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) |
| 1660 | DRM_ERROR("Port %c enabled but PHY powered down? " | 1661 | DRM_ERROR("Port %c enabled but PHY powered down? " |
| 1661 | "(PHY_CTL %08x)\n", port_name(port), tmp); | 1662 | "(PHY_CTL %08x)\n", port_name(port), tmp); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 00cd17c76fdc..5c7828c52d12 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
| 10245 | { | 10245 | { |
| 10246 | struct drm_i915_private *dev_priv = to_i915(dev); | 10246 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 10247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 10248 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | 10248 | enum transcoder cpu_transcoder; |
| 10249 | struct drm_display_mode *mode; | 10249 | struct drm_display_mode *mode; |
| 10250 | struct intel_crtc_state *pipe_config; | 10250 | struct intel_crtc_state *pipe_config; |
| 10251 | int htot = I915_READ(HTOTAL(cpu_transcoder)); | 10251 | u32 htot, hsync, vtot, vsync; |
| 10252 | int hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
| 10253 | int vtot = I915_READ(VTOTAL(cpu_transcoder)); | ||
| 10254 | int vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
| 10255 | enum pipe pipe = intel_crtc->pipe; | 10252 | enum pipe pipe = intel_crtc->pipe; |
| 10256 | 10253 | ||
| 10257 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 10254 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
| @@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
| 10279 | i9xx_crtc_clock_get(intel_crtc, pipe_config); | 10276 | i9xx_crtc_clock_get(intel_crtc, pipe_config); |
| 10280 | 10277 | ||
| 10281 | mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; | 10278 | mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; |
| 10279 | |||
| 10280 | cpu_transcoder = pipe_config->cpu_transcoder; | ||
| 10281 | htot = I915_READ(HTOTAL(cpu_transcoder)); | ||
| 10282 | hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
| 10283 | vtot = I915_READ(VTOTAL(cpu_transcoder)); | ||
| 10284 | vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
| 10285 | |||
| 10282 | mode->hdisplay = (htot & 0xffff) + 1; | 10286 | mode->hdisplay = (htot & 0xffff) + 1; |
| 10283 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; | 10287 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; |
| 10284 | mode->hsync_start = (hsync & 0xffff) + 1; | 10288 | mode->hsync_start = (hsync & 0xffff) + 1; |
| @@ -12359,7 +12363,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 12359 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; | 12363 | struct drm_crtc_state *old_crtc_state, *new_crtc_state; |
| 12360 | struct drm_crtc *crtc; | 12364 | struct drm_crtc *crtc; |
| 12361 | struct intel_crtc_state *intel_cstate; | 12365 | struct intel_crtc_state *intel_cstate; |
| 12362 | bool hw_check = intel_state->modeset; | ||
| 12363 | u64 put_domains[I915_MAX_PIPES] = {}; | 12366 | u64 put_domains[I915_MAX_PIPES] = {}; |
| 12364 | unsigned crtc_vblank_mask = 0; | 12367 | unsigned crtc_vblank_mask = 0; |
| 12365 | int i; | 12368 | int i; |
| @@ -12376,7 +12379,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 12376 | 12379 | ||
| 12377 | if (needs_modeset(new_crtc_state) || | 12380 | if (needs_modeset(new_crtc_state) || |
| 12378 | to_intel_crtc_state(new_crtc_state)->update_pipe) { | 12381 | to_intel_crtc_state(new_crtc_state)->update_pipe) { |
| 12379 | hw_check = true; | ||
| 12380 | 12382 | ||
| 12381 | put_domains[to_intel_crtc(crtc)->pipe] = | 12383 | put_domains[to_intel_crtc(crtc)->pipe] = |
| 12382 | modeset_get_crtc_power_domains(crtc, | 12384 | modeset_get_crtc_power_domains(crtc, |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 64134947c0aa..203198659ab2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
| @@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp) | |||
| 2307 | I915_WRITE(pp_ctrl_reg, pp); | 2307 | I915_WRITE(pp_ctrl_reg, pp); |
| 2308 | POSTING_READ(pp_ctrl_reg); | 2308 | POSTING_READ(pp_ctrl_reg); |
| 2309 | 2309 | ||
| 2310 | intel_dp->panel_power_off_time = ktime_get_boottime(); | ||
| 2311 | wait_panel_off(intel_dp); | 2310 | wait_panel_off(intel_dp); |
| 2311 | intel_dp->panel_power_off_time = ktime_get_boottime(); | ||
| 2312 | 2312 | ||
| 2313 | /* We got a reference when we enabled the VDD. */ | 2313 | /* We got a reference when we enabled the VDD. */ |
| 2314 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 2314 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); |
| @@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
| 5273 | * seems sufficient to avoid this problem. | 5273 | * seems sufficient to avoid this problem. |
| 5274 | */ | 5274 | */ |
| 5275 | if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { | 5275 | if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { |
| 5276 | vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10); | 5276 | vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); |
| 5277 | DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", | 5277 | DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", |
| 5278 | vbt.t11_t12); | 5278 | vbt.t11_t12); |
| 5279 | } | 5279 | } |
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c index 09b670929786..de38d014ed39 100644 --- a/drivers/gpu/drm/i915/intel_dpio_phy.c +++ b/drivers/gpu/drm/i915/intel_dpio_phy.c | |||
| @@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = { | |||
| 208 | }, | 208 | }, |
| 209 | }; | 209 | }; |
| 210 | 210 | ||
| 211 | static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info) | ||
| 212 | { | ||
| 213 | return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) | | ||
| 214 | BIT(phy_info->channel[DPIO_CH0].port); | ||
| 215 | } | ||
| 216 | |||
| 217 | static const struct bxt_ddi_phy_info * | 211 | static const struct bxt_ddi_phy_info * |
| 218 | bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) | 212 | bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) |
| 219 | { | 213 | { |
| @@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, | |||
| 313 | enum dpio_phy phy) | 307 | enum dpio_phy phy) |
| 314 | { | 308 | { |
| 315 | const struct bxt_ddi_phy_info *phy_info; | 309 | const struct bxt_ddi_phy_info *phy_info; |
| 316 | enum port port; | ||
| 317 | 310 | ||
| 318 | phy_info = bxt_get_phy_info(dev_priv, phy); | 311 | phy_info = bxt_get_phy_info(dev_priv, phy); |
| 319 | 312 | ||
| @@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv, | |||
| 335 | return false; | 328 | return false; |
| 336 | } | 329 | } |
| 337 | 330 | ||
| 338 | for_each_port_masked(port, bxt_phy_port_mask(phy_info)) { | ||
| 339 | u32 tmp = I915_READ(BXT_PHY_CTL(port)); | ||
| 340 | |||
| 341 | if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) { | ||
| 342 | DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane " | ||
| 343 | "for port %c powered down " | ||
| 344 | "(PHY_CTL %08x)\n", | ||
| 345 | phy, port_name(port), tmp); | ||
| 346 | |||
| 347 | return false; | ||
| 348 | } | ||
| 349 | } | ||
| 350 | |||
| 351 | return true; | 331 | return true; |
| 352 | } | 332 | } |
| 353 | 333 | ||
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c index 951e834dd274..28a778b785ac 100644 --- a/drivers/gpu/drm/i915/intel_modes.c +++ b/drivers/gpu/drm/i915/intel_modes.c | |||
| @@ -30,6 +30,21 @@ | |||
| 30 | #include "intel_drv.h" | 30 | #include "intel_drv.h" |
| 31 | #include "i915_drv.h" | 31 | #include "i915_drv.h" |
| 32 | 32 | ||
| 33 | static void intel_connector_update_eld_conn_type(struct drm_connector *connector) | ||
| 34 | { | ||
| 35 | u8 conn_type; | ||
| 36 | |||
| 37 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort || | ||
| 38 | connector->connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
| 39 | conn_type = DRM_ELD_CONN_TYPE_DP; | ||
| 40 | } else { | ||
| 41 | conn_type = DRM_ELD_CONN_TYPE_HDMI; | ||
| 42 | } | ||
| 43 | |||
| 44 | connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK; | ||
| 45 | connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type; | ||
| 46 | } | ||
| 47 | |||
| 33 | /** | 48 | /** |
| 34 | * intel_connector_update_modes - update connector from edid | 49 | * intel_connector_update_modes - update connector from edid |
| 35 | * @connector: DRM connector device to use | 50 | * @connector: DRM connector device to use |
| @@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector, | |||
| 44 | ret = drm_add_edid_modes(connector, edid); | 59 | ret = drm_add_edid_modes(connector, edid); |
| 45 | drm_edid_to_eld(connector, edid); | 60 | drm_edid_to_eld(connector, edid); |
| 46 | 61 | ||
| 62 | intel_connector_update_eld_conn_type(connector); | ||
| 63 | |||
| 47 | return ret; | 64 | return ret; |
| 48 | } | 65 | } |
| 49 | 66 | ||
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b66d8e136aa3..49577eba8e7e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
| @@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, | |||
| 368 | { | 368 | { |
| 369 | enum i915_power_well_id id = power_well->id; | 369 | enum i915_power_well_id id = power_well->id; |
| 370 | bool wait_fuses = power_well->hsw.has_fuses; | 370 | bool wait_fuses = power_well->hsw.has_fuses; |
| 371 | enum skl_power_gate pg; | 371 | enum skl_power_gate uninitialized_var(pg); |
| 372 | u32 val; | 372 | u32 val; |
| 373 | 373 | ||
| 374 | if (wait_fuses) { | 374 | if (wait_fuses) { |
| @@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume | |||
| 2782 | 2782 | ||
| 2783 | /* 6. Enable DBUF */ | 2783 | /* 6. Enable DBUF */ |
| 2784 | gen9_dbuf_enable(dev_priv); | 2784 | gen9_dbuf_enable(dev_priv); |
| 2785 | |||
| 2786 | if (resume && dev_priv->csr.dmc_payload) | ||
| 2787 | intel_csr_load_program(dev_priv); | ||
| 2785 | } | 2788 | } |
| 2786 | 2789 | ||
| 2787 | #undef CNL_PROCMON_IDX | 2790 | #undef CNL_PROCMON_IDX |
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index dbb31a014419..deaf869374ea 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c | |||
| @@ -248,7 +248,7 @@ disable_clks: | |||
| 248 | clk_disable_unprepare(ahb_clk); | 248 | clk_disable_unprepare(ahb_clk); |
| 249 | disable_gdsc: | 249 | disable_gdsc: |
| 250 | regulator_disable(gdsc_reg); | 250 | regulator_disable(gdsc_reg); |
| 251 | pm_runtime_put_autosuspend(dev); | 251 | pm_runtime_put_sync(dev); |
| 252 | put_clk: | 252 | put_clk: |
| 253 | clk_put(ahb_clk); | 253 | clk_put(ahb_clk); |
| 254 | put_gdsc: | 254 | put_gdsc: |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index c2bdad88447e..824067d2d427 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | |||
| @@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = { | |||
| 83 | .caps = MDP_LM_CAP_WB }, | 83 | .caps = MDP_LM_CAP_WB }, |
| 84 | }, | 84 | }, |
| 85 | .nb_stages = 5, | 85 | .nb_stages = 5, |
| 86 | .max_width = 2048, | ||
| 87 | .max_height = 0xFFFF, | ||
| 86 | }, | 88 | }, |
| 87 | .dspp = { | 89 | .dspp = { |
| 88 | .count = 3, | 90 | .count = 3, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 6fcb58ab718c..440977677001 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
| @@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
| 804 | 804 | ||
| 805 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); | 805 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); |
| 806 | 806 | ||
| 807 | pm_runtime_put_autosuspend(&pdev->dev); | ||
| 808 | |||
| 809 | set_cursor: | 807 | set_cursor: |
| 810 | ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); | 808 | ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); |
| 811 | if (ret) { | 809 | if (ret) { |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index f15821a0d900..ea5bb0e1632c 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
| @@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
| 610 | struct dma_fence *fence; | 610 | struct dma_fence *fence; |
| 611 | int i, ret; | 611 | int i, ret; |
| 612 | 612 | ||
| 613 | if (!exclusive) { | ||
| 614 | /* NOTE: _reserve_shared() must happen before _add_shared_fence(), | ||
| 615 | * which makes this a slightly strange place to call it. OTOH this | ||
| 616 | * is a convenient can-fail point to hook it in. (And similar to | ||
| 617 | * how etnaviv and nouveau handle this.) | ||
| 618 | */ | ||
| 619 | ret = reservation_object_reserve_shared(msm_obj->resv); | ||
| 620 | if (ret) | ||
| 621 | return ret; | ||
| 622 | } | ||
| 623 | |||
| 624 | fobj = reservation_object_get_list(msm_obj->resv); | 613 | fobj = reservation_object_get_list(msm_obj->resv); |
| 625 | if (!fobj || (fobj->shared_count == 0)) { | 614 | if (!fobj || (fobj->shared_count == 0)) { |
| 626 | fence = reservation_object_get_excl(msm_obj->resv); | 615 | fence = reservation_object_get_excl(msm_obj->resv); |
| @@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |||
| 1045 | } | 1034 | } |
| 1046 | 1035 | ||
| 1047 | vaddr = msm_gem_get_vaddr(obj); | 1036 | vaddr = msm_gem_get_vaddr(obj); |
| 1048 | if (!vaddr) { | 1037 | if (IS_ERR(vaddr)) { |
| 1049 | msm_gem_put_iova(obj, aspace); | 1038 | msm_gem_put_iova(obj, aspace); |
| 1050 | drm_gem_object_unreference(obj); | 1039 | drm_gem_object_unreference(obj); |
| 1051 | return ERR_PTR(-ENOMEM); | 1040 | return ERR_CAST(vaddr); |
| 1052 | } | 1041 | } |
| 1053 | 1042 | ||
| 1054 | if (bo) | 1043 | if (bo) |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5d0a75d4b249..93535cac0676 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
| @@ -221,7 +221,7 @@ fail: | |||
| 221 | return ret; | 221 | return ret; |
| 222 | } | 222 | } |
| 223 | 223 | ||
| 224 | static int submit_fence_sync(struct msm_gem_submit *submit) | 224 | static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) |
| 225 | { | 225 | { |
| 226 | int i, ret = 0; | 226 | int i, ret = 0; |
| 227 | 227 | ||
| @@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit) | |||
| 229 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | 229 | struct msm_gem_object *msm_obj = submit->bos[i].obj; |
| 230 | bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; | 230 | bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; |
| 231 | 231 | ||
| 232 | if (!write) { | ||
| 233 | /* NOTE: _reserve_shared() must happen before | ||
| 234 | * _add_shared_fence(), which makes this a slightly | ||
| 235 | * strange place to call it. OTOH this is a | ||
| 236 | * convenient can-fail point to hook it in. | ||
| 237 | */ | ||
| 238 | ret = reservation_object_reserve_shared(msm_obj->resv); | ||
| 239 | if (ret) | ||
| 240 | return ret; | ||
| 241 | } | ||
| 242 | |||
| 243 | if (no_implicit) | ||
| 244 | continue; | ||
| 245 | |||
| 232 | ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); | 246 | ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); |
| 233 | if (ret) | 247 | if (ret) |
| 234 | break; | 248 | break; |
| @@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
| 451 | if (ret) | 465 | if (ret) |
| 452 | goto out; | 466 | goto out; |
| 453 | 467 | ||
| 454 | if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { | 468 | ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); |
| 455 | ret = submit_fence_sync(submit); | 469 | if (ret) |
| 456 | if (ret) | 470 | goto out; |
| 457 | goto out; | ||
| 458 | } | ||
| 459 | 471 | ||
| 460 | ret = submit_pin_objects(submit); | 472 | ret = submit_pin_objects(submit); |
| 461 | if (ret) | 473 | if (ret) |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index ffbff27600e0..6a887032c66a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
| @@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) | |||
| 718 | msm_gem_put_iova(gpu->rb->bo, gpu->aspace); | 718 | msm_gem_put_iova(gpu->rb->bo, gpu->aspace); |
| 719 | msm_ringbuffer_destroy(gpu->rb); | 719 | msm_ringbuffer_destroy(gpu->rb); |
| 720 | } | 720 | } |
| 721 | if (gpu->aspace) { | 721 | |
| 722 | if (!IS_ERR_OR_NULL(gpu->aspace)) { | ||
| 722 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, | 723 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, |
| 723 | NULL, 0); | 724 | NULL, 0); |
| 724 | msm_gem_address_space_put(gpu->aspace); | 725 | msm_gem_address_space_put(gpu->aspace); |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 0366b8092f97..ec56794ad039 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
| @@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) | |||
| 111 | 111 | ||
| 112 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); | 112 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); |
| 113 | 113 | ||
| 114 | /* Note that smp_load_acquire() is not strictly required | ||
| 115 | * as CIRC_SPACE_TO_END() does not access the tail more | ||
| 116 | * than once. | ||
| 117 | */ | ||
| 114 | n = min(sz, circ_space_to_end(&rd->fifo)); | 118 | n = min(sz, circ_space_to_end(&rd->fifo)); |
| 115 | memcpy(fptr, ptr, n); | 119 | memcpy(fptr, ptr, n); |
| 116 | 120 | ||
| 117 | fifo->head = (fifo->head + n) & (BUF_SZ - 1); | 121 | smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1)); |
| 118 | sz -= n; | 122 | sz -= n; |
| 119 | ptr += n; | 123 | ptr += n; |
| 120 | 124 | ||
| @@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf, | |||
| 145 | if (ret) | 149 | if (ret) |
| 146 | goto out; | 150 | goto out; |
| 147 | 151 | ||
| 152 | /* Note that smp_load_acquire() is not strictly required | ||
| 153 | * as CIRC_CNT_TO_END() does not access the head more than | ||
| 154 | * once. | ||
| 155 | */ | ||
| 148 | n = min_t(int, sz, circ_count_to_end(&rd->fifo)); | 156 | n = min_t(int, sz, circ_count_to_end(&rd->fifo)); |
| 149 | if (copy_to_user(buf, fptr, n)) { | 157 | if (copy_to_user(buf, fptr, n)) { |
| 150 | ret = -EFAULT; | 158 | ret = -EFAULT; |
| 151 | goto out; | 159 | goto out; |
| 152 | } | 160 | } |
| 153 | 161 | ||
| 154 | fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); | 162 | smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1)); |
| 155 | *ppos += n; | 163 | *ppos += n; |
| 156 | 164 | ||
| 157 | wake_up_all(&rd->fifo_event); | 165 | wake_up_all(&rd->fifo_event); |
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index 9ea6cd5a1370..3cf1a6932fac 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c | |||
| @@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, | |||
| 302 | hdmi->mod_clk = devm_clk_get(dev, "mod"); | 302 | hdmi->mod_clk = devm_clk_get(dev, "mod"); |
| 303 | if (IS_ERR(hdmi->mod_clk)) { | 303 | if (IS_ERR(hdmi->mod_clk)) { |
| 304 | dev_err(dev, "Couldn't get the HDMI mod clock\n"); | 304 | dev_err(dev, "Couldn't get the HDMI mod clock\n"); |
| 305 | return PTR_ERR(hdmi->mod_clk); | 305 | ret = PTR_ERR(hdmi->mod_clk); |
| 306 | goto err_disable_bus_clk; | ||
| 306 | } | 307 | } |
| 307 | clk_prepare_enable(hdmi->mod_clk); | 308 | clk_prepare_enable(hdmi->mod_clk); |
| 308 | 309 | ||
| 309 | hdmi->pll0_clk = devm_clk_get(dev, "pll-0"); | 310 | hdmi->pll0_clk = devm_clk_get(dev, "pll-0"); |
| 310 | if (IS_ERR(hdmi->pll0_clk)) { | 311 | if (IS_ERR(hdmi->pll0_clk)) { |
| 311 | dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n"); | 312 | dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n"); |
| 312 | return PTR_ERR(hdmi->pll0_clk); | 313 | ret = PTR_ERR(hdmi->pll0_clk); |
| 314 | goto err_disable_mod_clk; | ||
| 313 | } | 315 | } |
| 314 | 316 | ||
| 315 | hdmi->pll1_clk = devm_clk_get(dev, "pll-1"); | 317 | hdmi->pll1_clk = devm_clk_get(dev, "pll-1"); |
| 316 | if (IS_ERR(hdmi->pll1_clk)) { | 318 | if (IS_ERR(hdmi->pll1_clk)) { |
| 317 | dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n"); | 319 | dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n"); |
| 318 | return PTR_ERR(hdmi->pll1_clk); | 320 | ret = PTR_ERR(hdmi->pll1_clk); |
| 321 | goto err_disable_mod_clk; | ||
| 319 | } | 322 | } |
| 320 | 323 | ||
| 321 | ret = sun4i_tmds_create(hdmi); | 324 | ret = sun4i_tmds_create(hdmi); |
| 322 | if (ret) { | 325 | if (ret) { |
| 323 | dev_err(dev, "Couldn't create the TMDS clock\n"); | 326 | dev_err(dev, "Couldn't create the TMDS clock\n"); |
| 324 | return ret; | 327 | goto err_disable_mod_clk; |
| 325 | } | 328 | } |
| 326 | 329 | ||
| 327 | writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG); | 330 | writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG); |
| @@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, | |||
| 362 | ret = sun4i_hdmi_i2c_create(dev, hdmi); | 365 | ret = sun4i_hdmi_i2c_create(dev, hdmi); |
| 363 | if (ret) { | 366 | if (ret) { |
| 364 | dev_err(dev, "Couldn't create the HDMI I2C adapter\n"); | 367 | dev_err(dev, "Couldn't create the HDMI I2C adapter\n"); |
| 365 | return ret; | 368 | goto err_disable_mod_clk; |
| 366 | } | 369 | } |
| 367 | 370 | ||
| 368 | drm_encoder_helper_add(&hdmi->encoder, | 371 | drm_encoder_helper_add(&hdmi->encoder, |
| @@ -422,6 +425,10 @@ err_cleanup_connector: | |||
| 422 | drm_encoder_cleanup(&hdmi->encoder); | 425 | drm_encoder_cleanup(&hdmi->encoder); |
| 423 | err_del_i2c_adapter: | 426 | err_del_i2c_adapter: |
| 424 | i2c_del_adapter(hdmi->i2c); | 427 | i2c_del_adapter(hdmi->i2c); |
| 428 | err_disable_mod_clk: | ||
| 429 | clk_disable_unprepare(hdmi->mod_clk); | ||
| 430 | err_disable_bus_clk: | ||
| 431 | clk_disable_unprepare(hdmi->bus_clk); | ||
| 425 | return ret; | 432 | return ret; |
| 426 | } | 433 | } |
| 427 | 434 | ||
| @@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master, | |||
| 434 | drm_connector_cleanup(&hdmi->connector); | 441 | drm_connector_cleanup(&hdmi->connector); |
| 435 | drm_encoder_cleanup(&hdmi->encoder); | 442 | drm_encoder_cleanup(&hdmi->encoder); |
| 436 | i2c_del_adapter(hdmi->i2c); | 443 | i2c_del_adapter(hdmi->i2c); |
| 444 | clk_disable_unprepare(hdmi->mod_clk); | ||
| 445 | clk_disable_unprepare(hdmi->bus_clk); | ||
| 437 | } | 446 | } |
| 438 | 447 | ||
| 439 | static const struct component_ops sun4i_hdmi_ops = { | 448 | static const struct component_ops sun4i_hdmi_ops = { |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 6a573d21d3cc..658fa2d3e40c 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
| @@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts) | |||
| 405 | return -EINVAL; | 405 | return -EINVAL; |
| 406 | } | 406 | } |
| 407 | 407 | ||
| 408 | /* | ||
| 409 | * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M / | ||
| 410 | * i.MX53 channel arbitration locking doesn't seem to work properly. | ||
| 411 | * Allow enabling the lock feature on IPUv3H / i.MX6 only. | ||
| 412 | */ | ||
| 413 | if (bursts && ipu->ipu_type != IPUV3H) | ||
| 414 | return -EINVAL; | ||
| 415 | |||
| 408 | for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { | 416 | for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { |
| 409 | if (channel->num == idmac_lock_en_info[i].chnum) | 417 | if (channel->num == idmac_lock_en_info[i].chnum) |
| 410 | break; | 418 | break; |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index c35f74c83065..c860a7997cb5 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
| @@ -73,6 +73,14 @@ | |||
| 73 | #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) | 73 | #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) |
| 74 | #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) | 74 | #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) |
| 75 | 75 | ||
| 76 | #define IPU_PRE_STORE_ENG_STATUS 0x120 | ||
| 77 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff | ||
| 78 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0 | ||
| 79 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff | ||
| 80 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16 | ||
| 81 | #define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30) | ||
| 82 | #define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31) | ||
| 83 | |||
| 76 | #define IPU_PRE_STORE_ENG_SIZE 0x130 | 84 | #define IPU_PRE_STORE_ENG_SIZE 0x130 |
| 77 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) | 85 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) |
| 78 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) | 86 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) |
| @@ -93,6 +101,7 @@ struct ipu_pre { | |||
| 93 | dma_addr_t buffer_paddr; | 101 | dma_addr_t buffer_paddr; |
| 94 | void *buffer_virt; | 102 | void *buffer_virt; |
| 95 | bool in_use; | 103 | bool in_use; |
| 104 | unsigned int safe_window_end; | ||
| 96 | }; | 105 | }; |
| 97 | 106 | ||
| 98 | static DEFINE_MUTEX(ipu_pre_list_mutex); | 107 | static DEFINE_MUTEX(ipu_pre_list_mutex); |
| @@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
| 160 | u32 active_bpp = info->cpp[0] >> 1; | 169 | u32 active_bpp = info->cpp[0] >> 1; |
| 161 | u32 val; | 170 | u32 val; |
| 162 | 171 | ||
| 172 | /* calculate safe window for ctrl register updates */ | ||
| 173 | pre->safe_window_end = height - 2; | ||
| 174 | |||
| 163 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); | 175 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); |
| 164 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 176 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
| 165 | 177 | ||
| @@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
| 199 | 211 | ||
| 200 | void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) | 212 | void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) |
| 201 | { | 213 | { |
| 214 | unsigned long timeout = jiffies + msecs_to_jiffies(5); | ||
| 215 | unsigned short current_yblock; | ||
| 216 | u32 val; | ||
| 217 | |||
| 202 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 218 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
| 219 | |||
| 220 | do { | ||
| 221 | if (time_after(jiffies, timeout)) { | ||
| 222 | dev_warn(pre->dev, "timeout waiting for PRE safe window\n"); | ||
| 223 | return; | ||
| 224 | } | ||
| 225 | |||
| 226 | val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS); | ||
| 227 | current_yblock = | ||
| 228 | (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) & | ||
| 229 | IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK; | ||
| 230 | } while (current_yblock == 0 || current_yblock >= pre->safe_window_end); | ||
| 231 | |||
| 203 | writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); | 232 | writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); |
| 204 | } | 233 | } |
| 205 | 234 | ||
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index ecc9ea44dc50..0013ca9f72c8 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <drm/drm_fourcc.h> | 14 | #include <drm/drm_fourcc.h> |
| 15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
| 16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
| 17 | #include <linux/iopoll.h> | ||
| 17 | #include <linux/mfd/syscon.h> | 18 | #include <linux/mfd/syscon.h> |
| 18 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 19 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
| 19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| @@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, | |||
| 329 | val = IPU_PRG_REG_UPDATE_REG_UPDATE; | 330 | val = IPU_PRG_REG_UPDATE_REG_UPDATE; |
| 330 | writel(val, prg->regs + IPU_PRG_REG_UPDATE); | 331 | writel(val, prg->regs + IPU_PRG_REG_UPDATE); |
| 331 | 332 | ||
| 333 | /* wait for both double buffers to be filled */ | ||
| 334 | readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val, | ||
| 335 | (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) && | ||
| 336 | (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)), | ||
| 337 | 5, 1000); | ||
| 338 | |||
| 332 | clk_disable_unprepare(prg->clk_ipg); | 339 | clk_disable_unprepare(prg->clk_ipg); |
| 333 | 340 | ||
| 334 | chan->enabled = true; | 341 | chan->enabled = true; |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 0a3117cc29e7..374301fcbc86 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
| @@ -281,6 +281,7 @@ config HID_ELECOM | |||
| 281 | Support for ELECOM devices: | 281 | Support for ELECOM devices: |
| 282 | - BM084 Bluetooth Mouse | 282 | - BM084 Bluetooth Mouse |
| 283 | - DEFT Trackball (Wired and wireless) | 283 | - DEFT Trackball (Wired and wireless) |
| 284 | - HUGE Trackball (Wired and wireless) | ||
| 284 | 285 | ||
| 285 | config HID_ELO | 286 | config HID_ELO |
| 286 | tristate "ELO USB 4000/4500 touchscreen" | 287 | tristate "ELO USB 4000/4500 touchscreen" |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9bc91160819b..330ca983828b 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 2032 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 2032 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
| 2033 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | 2033 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, |
| 2034 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | 2034 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, |
| 2035 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) }, | ||
| 2036 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) }, | ||
| 2035 | #endif | 2037 | #endif |
| 2036 | #if IS_ENABLED(CONFIG_HID_ELO) | 2038 | #if IS_ENABLED(CONFIG_HID_ELO) |
| 2037 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, | 2039 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, |
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c index e2c7465df69f..54aeea57d209 100644 --- a/drivers/hid/hid-elecom.c +++ b/drivers/hid/hid-elecom.c | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> | 3 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> |
| 4 | * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> | 4 | * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> |
| 5 | * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> | 5 | * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> |
| 6 | * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org> | ||
| 6 | */ | 7 | */ |
| 7 | 8 | ||
| 8 | /* | 9 | /* |
| @@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
| 32 | break; | 33 | break; |
| 33 | case USB_DEVICE_ID_ELECOM_DEFT_WIRED: | 34 | case USB_DEVICE_ID_ELECOM_DEFT_WIRED: |
| 34 | case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: | 35 | case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: |
| 35 | /* The DEFT trackball has eight buttons, but its descriptor only | 36 | case USB_DEVICE_ID_ELECOM_HUGE_WIRED: |
| 36 | * reports five, disabling the three Fn buttons on the top of | 37 | case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS: |
| 37 | * the mouse. | 38 | /* The DEFT/HUGE trackball has eight buttons, but its descriptor |
| 39 | * only reports five, disabling the three Fn buttons on the top | ||
| 40 | * of the mouse. | ||
| 38 | * | 41 | * |
| 39 | * Apply the following diff to the descriptor: | 42 | * Apply the following diff to the descriptor: |
| 40 | * | 43 | * |
| @@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
| 62 | * End Collection, End Collection, | 65 | * End Collection, End Collection, |
| 63 | */ | 66 | */ |
| 64 | if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { | 67 | if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { |
| 65 | hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); | 68 | hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n"); |
| 66 | rdesc[13] = 8; /* Button/Variable Report Count */ | 69 | rdesc[13] = 8; /* Button/Variable Report Count */ |
| 67 | rdesc[21] = 8; /* Button/Variable Usage Maximum */ | 70 | rdesc[21] = 8; /* Button/Variable Usage Maximum */ |
| 68 | rdesc[29] = 0; /* Button/Constant Report Count */ | 71 | rdesc[29] = 0; /* Button/Constant Report Count */ |
| @@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = { | |||
| 76 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 79 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
| 77 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | 80 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, |
| 78 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | 81 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, |
| 82 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) }, | ||
| 83 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) }, | ||
| 79 | { } | 84 | { } |
| 80 | }; | 85 | }; |
| 81 | MODULE_DEVICE_TABLE(hid, elecom_devices); | 86 | MODULE_DEVICE_TABLE(hid, elecom_devices); |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b397a14ab970..be2e005c3c51 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -368,6 +368,8 @@ | |||
| 368 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 | 368 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 |
| 369 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe | 369 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe |
| 370 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff | 370 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff |
| 371 | #define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c | ||
| 372 | #define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d | ||
| 371 | 373 | ||
| 372 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 | 374 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 |
| 373 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 | 375 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 |
| @@ -533,6 +535,7 @@ | |||
| 533 | #define USB_VENDOR_ID_IDEACOM 0x1cb6 | 535 | #define USB_VENDOR_ID_IDEACOM 0x1cb6 |
| 534 | #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 | 536 | #define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 |
| 535 | #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 | 537 | #define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 |
| 538 | #define USB_DEVICE_ID_IDEACOM_IDC6680 0x6680 | ||
| 536 | 539 | ||
| 537 | #define USB_VENDOR_ID_ILITEK 0x222a | 540 | #define USB_VENDOR_ID_ILITEK 0x222a |
| 538 | #define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001 | 541 | #define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001 |
| @@ -660,6 +663,7 @@ | |||
| 660 | #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 | 663 | #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 |
| 661 | #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 | 664 | #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 |
| 662 | #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 | 665 | #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 |
| 666 | #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 | ||
| 663 | 667 | ||
| 664 | #define USB_VENDOR_ID_LG 0x1fd2 | 668 | #define USB_VENDOR_ID_LG 0x1fd2 |
| 665 | #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 | 669 | #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 |
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 440b999304a5..9e8c4d2ba11d 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c | |||
| @@ -930,6 +930,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
| 930 | field->application != HID_DG_PEN && | 930 | field->application != HID_DG_PEN && |
| 931 | field->application != HID_DG_TOUCHPAD && | 931 | field->application != HID_DG_TOUCHPAD && |
| 932 | field->application != HID_GD_KEYBOARD && | 932 | field->application != HID_GD_KEYBOARD && |
| 933 | field->application != HID_GD_SYSTEM_CONTROL && | ||
| 933 | field->application != HID_CP_CONSUMER_CONTROL && | 934 | field->application != HID_CP_CONSUMER_CONTROL && |
| 934 | field->application != HID_GD_WIRELESS_RADIO_CTLS && | 935 | field->application != HID_GD_WIRELESS_RADIO_CTLS && |
| 935 | !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS && | 936 | !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS && |
| @@ -1419,6 +1420,12 @@ static const struct hid_device_id mt_devices[] = { | |||
| 1419 | USB_VENDOR_ID_ALPS_JP, | 1420 | USB_VENDOR_ID_ALPS_JP, |
| 1420 | HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, | 1421 | HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, |
| 1421 | 1422 | ||
| 1423 | /* Lenovo X1 TAB Gen 2 */ | ||
| 1424 | { .driver_data = MT_CLS_WIN_8_DUAL, | ||
| 1425 | HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, | ||
| 1426 | USB_VENDOR_ID_LENOVO, | ||
| 1427 | USB_DEVICE_ID_LENOVO_X1_TAB) }, | ||
| 1428 | |||
| 1422 | /* Anton devices */ | 1429 | /* Anton devices */ |
| 1423 | { .driver_data = MT_CLS_EXPORT_ALL_INPUTS, | 1430 | { .driver_data = MT_CLS_EXPORT_ALL_INPUTS, |
| 1424 | MT_USB_DEVICE(USB_VENDOR_ID_ANTON, | 1431 | MT_USB_DEVICE(USB_VENDOR_ID_ANTON, |
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 5b40c2614599..ef241d66562e 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c | |||
| @@ -436,17 +436,24 @@ static int rmi_post_resume(struct hid_device *hdev) | |||
| 436 | if (!(data->device_flags & RMI_DEVICE)) | 436 | if (!(data->device_flags & RMI_DEVICE)) |
| 437 | return 0; | 437 | return 0; |
| 438 | 438 | ||
| 439 | ret = rmi_reset_attn_mode(hdev); | 439 | /* Make sure the HID device is ready to receive events */ |
| 440 | ret = hid_hw_open(hdev); | ||
| 440 | if (ret) | 441 | if (ret) |
| 441 | return ret; | 442 | return ret; |
| 442 | 443 | ||
| 444 | ret = rmi_reset_attn_mode(hdev); | ||
| 445 | if (ret) | ||
| 446 | goto out; | ||
| 447 | |||
| 443 | ret = rmi_driver_resume(rmi_dev, false); | 448 | ret = rmi_driver_resume(rmi_dev, false); |
| 444 | if (ret) { | 449 | if (ret) { |
| 445 | hid_warn(hdev, "Failed to resume device: %d\n", ret); | 450 | hid_warn(hdev, "Failed to resume device: %d\n", ret); |
| 446 | return ret; | 451 | goto out; |
| 447 | } | 452 | } |
| 448 | 453 | ||
| 449 | return 0; | 454 | out: |
| 455 | hid_hw_close(hdev); | ||
| 456 | return ret; | ||
| 450 | } | 457 | } |
| 451 | #endif /* CONFIG_PM */ | 458 | #endif /* CONFIG_PM */ |
| 452 | 459 | ||
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index ec530454e6f6..5fbe0f81ab2e 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c | |||
| @@ -337,8 +337,8 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit) | |||
| 337 | kfree(hidraw); | 337 | kfree(hidraw); |
| 338 | } else { | 338 | } else { |
| 339 | /* close device for last reader */ | 339 | /* close device for last reader */ |
| 340 | hid_hw_power(hidraw->hid, PM_HINT_NORMAL); | ||
| 341 | hid_hw_close(hidraw->hid); | 340 | hid_hw_close(hidraw->hid); |
| 341 | hid_hw_power(hidraw->hid, PM_HINT_NORMAL); | ||
| 342 | } | 342 | } |
| 343 | } | 343 | } |
| 344 | } | 344 | } |
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 77396145d2d0..9145c2129a96 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
| @@ -543,7 +543,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size) | |||
| 543 | { | 543 | { |
| 544 | /* the worst case is computed from the set_report command with a | 544 | /* the worst case is computed from the set_report command with a |
| 545 | * reportID > 15 and the maximum report length */ | 545 | * reportID > 15 and the maximum report length */ |
| 546 | int args_len = sizeof(__u8) + /* optional ReportID byte */ | 546 | int args_len = sizeof(__u8) + /* ReportID */ |
| 547 | sizeof(__u8) + /* optional ReportID byte */ | ||
| 547 | sizeof(__u16) + /* data register */ | 548 | sizeof(__u16) + /* data register */ |
| 548 | sizeof(__u16) + /* size of the report */ | 549 | sizeof(__u16) + /* size of the report */ |
| 549 | report_size; /* report */ | 550 | report_size; /* report */ |
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 089bad8a9a21..045b5da9b992 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
| @@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid) | |||
| 975 | unsigned int rsize = 0; | 975 | unsigned int rsize = 0; |
| 976 | char *rdesc; | 976 | char *rdesc; |
| 977 | int ret, n; | 977 | int ret, n; |
| 978 | int num_descriptors; | ||
| 979 | size_t offset = offsetof(struct hid_descriptor, desc); | ||
| 978 | 980 | ||
| 979 | quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), | 981 | quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), |
| 980 | le16_to_cpu(dev->descriptor.idProduct)); | 982 | le16_to_cpu(dev->descriptor.idProduct)); |
| @@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid) | |||
| 997 | return -ENODEV; | 999 | return -ENODEV; |
| 998 | } | 1000 | } |
| 999 | 1001 | ||
| 1002 | if (hdesc->bLength < sizeof(struct hid_descriptor)) { | ||
| 1003 | dbg_hid("hid descriptor is too short\n"); | ||
| 1004 | return -EINVAL; | ||
| 1005 | } | ||
| 1006 | |||
| 1000 | hid->version = le16_to_cpu(hdesc->bcdHID); | 1007 | hid->version = le16_to_cpu(hdesc->bcdHID); |
| 1001 | hid->country = hdesc->bCountryCode; | 1008 | hid->country = hdesc->bCountryCode; |
| 1002 | 1009 | ||
| 1003 | for (n = 0; n < hdesc->bNumDescriptors; n++) | 1010 | num_descriptors = min_t(int, hdesc->bNumDescriptors, |
| 1011 | (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor)); | ||
| 1012 | |||
| 1013 | for (n = 0; n < num_descriptors; n++) | ||
| 1004 | if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) | 1014 | if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) |
| 1005 | rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); | 1015 | rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); |
| 1006 | 1016 | ||
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index a83fa76655b9..f489a5cfcb48 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
| @@ -99,6 +99,7 @@ static const struct hid_blacklist { | |||
| 99 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL }, | 99 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL }, |
| 100 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, | 100 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, |
| 101 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, | 101 | { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, |
| 102 | { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT }, | ||
| 102 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL }, | 103 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL }, |
| 103 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, | 104 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, |
| 104 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET }, | 105 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET }, |
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index e82a696a1d07..906e654fb0ba 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c | |||
| @@ -668,8 +668,10 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev) | |||
| 668 | 668 | ||
| 669 | /* Try to find an already-probed interface from the same device */ | 669 | /* Try to find an already-probed interface from the same device */ |
| 670 | list_for_each_entry(data, &wacom_udev_list, list) { | 670 | list_for_each_entry(data, &wacom_udev_list, list) { |
| 671 | if (compare_device_paths(hdev, data->dev, '/')) | 671 | if (compare_device_paths(hdev, data->dev, '/')) { |
| 672 | kref_get(&data->kref); | ||
| 672 | return data; | 673 | return data; |
| 674 | } | ||
| 673 | } | 675 | } |
| 674 | 676 | ||
| 675 | /* Fallback to finding devices that appear to be "siblings" */ | 677 | /* Fallback to finding devices that appear to be "siblings" */ |
| @@ -766,6 +768,9 @@ static int wacom_led_control(struct wacom *wacom) | |||
| 766 | if (!wacom->led.groups) | 768 | if (!wacom->led.groups) |
| 767 | return -ENOTSUPP; | 769 | return -ENOTSUPP; |
| 768 | 770 | ||
| 771 | if (wacom->wacom_wac.features.type == REMOTE) | ||
| 772 | return -ENOTSUPP; | ||
| 773 | |||
| 769 | if (wacom->wacom_wac.pid) { /* wireless connected */ | 774 | if (wacom->wacom_wac.pid) { /* wireless connected */ |
| 770 | report_id = WAC_CMD_WL_LED_CONTROL; | 775 | report_id = WAC_CMD_WL_LED_CONTROL; |
| 771 | buf_size = 13; | 776 | buf_size = 13; |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index bb17d7bbefd3..aa692e28b2cd 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
| @@ -567,8 +567,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom) | |||
| 567 | keys = data[9] & 0x07; | 567 | keys = data[9] & 0x07; |
| 568 | } | 568 | } |
| 569 | } else { | 569 | } else { |
| 570 | buttons = ((data[6] & 0x10) << 10) | | 570 | buttons = ((data[6] & 0x10) << 5) | |
| 571 | ((data[5] & 0x10) << 9) | | 571 | ((data[5] & 0x10) << 4) | |
| 572 | ((data[6] & 0x0F) << 4) | | 572 | ((data[6] & 0x0F) << 4) | |
| 573 | (data[5] & 0x0F); | 573 | (data[5] & 0x0F); |
| 574 | } | 574 | } |
| @@ -1227,11 +1227,17 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom) | |||
| 1227 | continue; | 1227 | continue; |
| 1228 | 1228 | ||
| 1229 | if (range) { | 1229 | if (range) { |
| 1230 | /* Fix rotation alignment: userspace expects zero at left */ | ||
| 1231 | int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]); | ||
| 1232 | rotation += 1800/4; | ||
| 1233 | if (rotation > 899) | ||
| 1234 | rotation -= 1800; | ||
| 1235 | |||
| 1230 | input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); | 1236 | input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); |
| 1231 | input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); | 1237 | input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); |
| 1232 | input_report_abs(pen_input, ABS_TILT_X, frame[7]); | 1238 | input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]); |
| 1233 | input_report_abs(pen_input, ABS_TILT_Y, frame[8]); | 1239 | input_report_abs(pen_input, ABS_TILT_Y, (char)frame[8]); |
| 1234 | input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9])); | 1240 | input_report_abs(pen_input, ABS_Z, rotation); |
| 1235 | input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11])); | 1241 | input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11])); |
| 1236 | } | 1242 | } |
| 1237 | input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); | 1243 | input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); |
| @@ -1319,12 +1325,19 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom) | |||
| 1319 | unsigned char *data = wacom->data; | 1325 | unsigned char *data = wacom->data; |
| 1320 | 1326 | ||
| 1321 | int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01); | 1327 | int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01); |
| 1322 | int ring = data[285]; | 1328 | int ring = data[285] & 0x7F; |
| 1323 | int prox = buttons | (ring & 0x80); | 1329 | bool ringstatus = data[285] & 0x80; |
| 1330 | bool prox = buttons || ringstatus; | ||
| 1331 | |||
| 1332 | /* Fix touchring data: userspace expects 0 at left and increasing clockwise */ | ||
| 1333 | ring = 71 - ring; | ||
| 1334 | ring += 3*72/16; | ||
| 1335 | if (ring > 71) | ||
| 1336 | ring -= 72; | ||
| 1324 | 1337 | ||
| 1325 | wacom_report_numbered_buttons(pad_input, 9, buttons); | 1338 | wacom_report_numbered_buttons(pad_input, 9, buttons); |
| 1326 | 1339 | ||
| 1327 | input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0); | 1340 | input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0); |
| 1328 | 1341 | ||
| 1329 | input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0); | 1342 | input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0); |
| 1330 | input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0); | 1343 | input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0); |
| @@ -1616,6 +1629,20 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) | |||
| 1616 | return 0; | 1629 | return 0; |
| 1617 | } | 1630 | } |
| 1618 | 1631 | ||
| 1632 | static int wacom_offset_rotation(struct input_dev *input, struct hid_usage *usage, | ||
| 1633 | int value, int num, int denom) | ||
| 1634 | { | ||
| 1635 | struct input_absinfo *abs = &input->absinfo[usage->code]; | ||
| 1636 | int range = (abs->maximum - abs->minimum + 1); | ||
| 1637 | |||
| 1638 | value += num*range/denom; | ||
| 1639 | if (value > abs->maximum) | ||
| 1640 | value -= range; | ||
| 1641 | else if (value < abs->minimum) | ||
| 1642 | value += range; | ||
| 1643 | return value; | ||
| 1644 | } | ||
| 1645 | |||
| 1619 | int wacom_equivalent_usage(int usage) | 1646 | int wacom_equivalent_usage(int usage) |
| 1620 | { | 1647 | { |
| 1621 | if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) { | 1648 | if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) { |
| @@ -1898,6 +1925,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field | |||
| 1898 | unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); | 1925 | unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); |
| 1899 | int i; | 1926 | int i; |
| 1900 | bool is_touch_on = value; | 1927 | bool is_touch_on = value; |
| 1928 | bool do_report = false; | ||
| 1901 | 1929 | ||
| 1902 | /* | 1930 | /* |
| 1903 | * Avoid reporting this event and setting inrange_state if this usage | 1931 | * Avoid reporting this event and setting inrange_state if this usage |
| @@ -1912,6 +1940,29 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field | |||
| 1912 | } | 1940 | } |
| 1913 | 1941 | ||
| 1914 | switch (equivalent_usage) { | 1942 | switch (equivalent_usage) { |
| 1943 | case WACOM_HID_WD_TOUCHRING: | ||
| 1944 | /* | ||
| 1945 | * Userspace expects touchrings to increase in value with | ||
| 1946 | * clockwise gestures and have their zero point at the | ||
| 1947 | * tablet's left. HID events "should" be clockwise- | ||
| 1948 | * increasing and zero at top, though the MobileStudio | ||
| 1949 | * Pro and 2nd-gen Intuos Pro don't do this... | ||
| 1950 | */ | ||
| 1951 | if (hdev->vendor == 0x56a && | ||
| 1952 | (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */ | ||
| 1953 | hdev->product == 0x357 || hdev->product == 0x358)) { /* Intuos Pro 2 */ | ||
| 1954 | value = (field->logical_maximum - value); | ||
| 1955 | |||
| 1956 | if (hdev->product == 0x357 || hdev->product == 0x358) | ||
| 1957 | value = wacom_offset_rotation(input, usage, value, 3, 16); | ||
| 1958 | else if (hdev->product == 0x34d || hdev->product == 0x34e) | ||
| 1959 | value = wacom_offset_rotation(input, usage, value, 1, 2); | ||
| 1960 | } | ||
| 1961 | else { | ||
| 1962 | value = wacom_offset_rotation(input, usage, value, 1, 4); | ||
| 1963 | } | ||
| 1964 | do_report = true; | ||
| 1965 | break; | ||
| 1915 | case WACOM_HID_WD_TOUCHRINGSTATUS: | 1966 | case WACOM_HID_WD_TOUCHRINGSTATUS: |
| 1916 | if (!value) | 1967 | if (!value) |
| 1917 | input_event(input, usage->type, usage->code, 0); | 1968 | input_event(input, usage->type, usage->code, 0); |
| @@ -1945,10 +1996,14 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field | |||
| 1945 | value, i); | 1996 | value, i); |
| 1946 | /* fall through*/ | 1997 | /* fall through*/ |
| 1947 | default: | 1998 | default: |
| 1999 | do_report = true; | ||
| 2000 | break; | ||
| 2001 | } | ||
| 2002 | |||
| 2003 | if (do_report) { | ||
| 1948 | input_event(input, usage->type, usage->code, value); | 2004 | input_event(input, usage->type, usage->code, value); |
| 1949 | if (value) | 2005 | if (value) |
| 1950 | wacom_wac->hid_data.pad_input_event_flag = true; | 2006 | wacom_wac->hid_data.pad_input_event_flag = true; |
| 1951 | break; | ||
| 1952 | } | 2007 | } |
| 1953 | } | 2008 | } |
| 1954 | 2009 | ||
| @@ -2086,22 +2141,34 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field | |||
| 2086 | wacom_wac->hid_data.tipswitch |= value; | 2141 | wacom_wac->hid_data.tipswitch |= value; |
| 2087 | return; | 2142 | return; |
| 2088 | case HID_DG_TOOLSERIALNUMBER: | 2143 | case HID_DG_TOOLSERIALNUMBER: |
| 2089 | wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); | 2144 | if (value) { |
| 2090 | wacom_wac->serial[0] |= (__u32)value; | 2145 | wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); |
| 2146 | wacom_wac->serial[0] |= (__u32)value; | ||
| 2147 | } | ||
| 2091 | return; | 2148 | return; |
| 2149 | case HID_DG_TWIST: | ||
| 2150 | /* | ||
| 2151 | * Userspace expects pen twist to have its zero point when | ||
| 2152 | * the buttons/finger is on the tablet's left. HID values | ||
| 2153 | * are zero when buttons are toward the top. | ||
| 2154 | */ | ||
| 2155 | value = wacom_offset_rotation(input, usage, value, 1, 4); | ||
| 2156 | break; | ||
| 2092 | case WACOM_HID_WD_SENSE: | 2157 | case WACOM_HID_WD_SENSE: |
| 2093 | wacom_wac->hid_data.sense_state = value; | 2158 | wacom_wac->hid_data.sense_state = value; |
| 2094 | return; | 2159 | return; |
| 2095 | case WACOM_HID_WD_SERIALHI: | 2160 | case WACOM_HID_WD_SERIALHI: |
| 2096 | wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); | 2161 | if (value) { |
| 2097 | wacom_wac->serial[0] |= ((__u64)value) << 32; | 2162 | wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); |
| 2098 | /* | 2163 | wacom_wac->serial[0] |= ((__u64)value) << 32; |
| 2099 | * Non-USI EMR devices may contain additional tool type | 2164 | /* |
| 2100 | * information here. See WACOM_HID_WD_TOOLTYPE case for | 2165 | * Non-USI EMR devices may contain additional tool type |
| 2101 | * more details. | 2166 | * information here. See WACOM_HID_WD_TOOLTYPE case for |
| 2102 | */ | 2167 | * more details. |
| 2103 | if (value >> 20 == 1) { | 2168 | */ |
| 2104 | wacom_wac->id[0] |= value & 0xFFFFF; | 2169 | if (value >> 20 == 1) { |
| 2170 | wacom_wac->id[0] |= value & 0xFFFFF; | ||
| 2171 | } | ||
| 2105 | } | 2172 | } |
| 2106 | return; | 2173 | return; |
| 2107 | case WACOM_HID_WD_TOOLTYPE: | 2174 | case WACOM_HID_WD_TOOLTYPE: |
| @@ -2205,7 +2272,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev, | |||
| 2205 | input_report_key(input, wacom_wac->tool[0], prox); | 2272 | input_report_key(input, wacom_wac->tool[0], prox); |
| 2206 | if (wacom_wac->serial[0]) { | 2273 | if (wacom_wac->serial[0]) { |
| 2207 | input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]); | 2274 | input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]); |
| 2208 | input_report_abs(input, ABS_MISC, id); | 2275 | input_report_abs(input, ABS_MISC, prox ? id : 0); |
| 2209 | } | 2276 | } |
| 2210 | 2277 | ||
| 2211 | wacom_wac->hid_data.tipswitch = false; | 2278 | wacom_wac->hid_data.tipswitch = false; |
| @@ -2216,6 +2283,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev, | |||
| 2216 | if (!prox) { | 2283 | if (!prox) { |
| 2217 | wacom_wac->tool[0] = 0; | 2284 | wacom_wac->tool[0] = 0; |
| 2218 | wacom_wac->id[0] = 0; | 2285 | wacom_wac->id[0] = 0; |
| 2286 | wacom_wac->serial[0] = 0; | ||
| 2219 | } | 2287 | } |
| 2220 | } | 2288 | } |
| 2221 | 2289 | ||
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index 9c0dbb8191ad..e1be61095532 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c | |||
| @@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
| 630 | sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE, | 630 | sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE, |
| 631 | GFP_KERNEL); | 631 | GFP_KERNEL); |
| 632 | if (rc) | 632 | if (rc) |
| 633 | goto out_mbox_free; | 633 | return -ENOMEM; |
| 634 | 634 | ||
| 635 | INIT_WORK(&ctx->workq, xgene_hwmon_evt_work); | 635 | INIT_WORK(&ctx->workq, xgene_hwmon_evt_work); |
| 636 | 636 | ||
| @@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
| 646 | if (IS_ERR(ctx->mbox_chan)) { | 646 | if (IS_ERR(ctx->mbox_chan)) { |
| 647 | dev_err(&pdev->dev, | 647 | dev_err(&pdev->dev, |
| 648 | "SLIMpro mailbox channel request failed\n"); | 648 | "SLIMpro mailbox channel request failed\n"); |
| 649 | return -ENODEV; | 649 | rc = -ENODEV; |
| 650 | goto out_mbox_free; | ||
| 650 | } | 651 | } |
| 651 | } else { | 652 | } else { |
| 652 | struct acpi_pcct_hw_reduced *cppc_ss; | 653 | struct acpi_pcct_hw_reduced *cppc_ss; |
| @@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
| 654 | if (device_property_read_u32(&pdev->dev, "pcc-channel", | 655 | if (device_property_read_u32(&pdev->dev, "pcc-channel", |
| 655 | &ctx->mbox_idx)) { | 656 | &ctx->mbox_idx)) { |
| 656 | dev_err(&pdev->dev, "no pcc-channel property\n"); | 657 | dev_err(&pdev->dev, "no pcc-channel property\n"); |
| 657 | return -ENODEV; | 658 | rc = -ENODEV; |
| 659 | goto out_mbox_free; | ||
| 658 | } | 660 | } |
| 659 | 661 | ||
| 660 | cl->rx_callback = xgene_hwmon_pcc_rx_cb; | 662 | cl->rx_callback = xgene_hwmon_pcc_rx_cb; |
| @@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
| 662 | if (IS_ERR(ctx->mbox_chan)) { | 664 | if (IS_ERR(ctx->mbox_chan)) { |
| 663 | dev_err(&pdev->dev, | 665 | dev_err(&pdev->dev, |
| 664 | "PPC channel request failed\n"); | 666 | "PPC channel request failed\n"); |
| 665 | return -ENODEV; | 667 | rc = -ENODEV; |
| 668 | goto out_mbox_free; | ||
| 666 | } | 669 | } |
| 667 | 670 | ||
| 668 | /* | 671 | /* |
| @@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
| 675 | if (!cppc_ss) { | 678 | if (!cppc_ss) { |
| 676 | dev_err(&pdev->dev, "PPC subspace not found\n"); | 679 | dev_err(&pdev->dev, "PPC subspace not found\n"); |
| 677 | rc = -ENODEV; | 680 | rc = -ENODEV; |
| 678 | goto out_mbox_free; | 681 | goto out; |
| 679 | } | 682 | } |
| 680 | 683 | ||
| 681 | if (!ctx->mbox_chan->mbox->txdone_irq) { | 684 | if (!ctx->mbox_chan->mbox->txdone_irq) { |
| 682 | dev_err(&pdev->dev, "PCC IRQ not supported\n"); | 685 | dev_err(&pdev->dev, "PCC IRQ not supported\n"); |
| 683 | rc = -ENODEV; | 686 | rc = -ENODEV; |
| 684 | goto out_mbox_free; | 687 | goto out; |
| 685 | } | 688 | } |
| 686 | 689 | ||
| 687 | /* | 690 | /* |
| @@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev) | |||
| 696 | } else { | 699 | } else { |
| 697 | dev_err(&pdev->dev, "Failed to get PCC comm region\n"); | 700 | dev_err(&pdev->dev, "Failed to get PCC comm region\n"); |
| 698 | rc = -ENODEV; | 701 | rc = -ENODEV; |
| 699 | goto out_mbox_free; | 702 | goto out; |
| 700 | } | 703 | } |
| 701 | 704 | ||
| 702 | if (!ctx->pcc_comm_addr) { | 705 | if (!ctx->pcc_comm_addr) { |
| 703 | dev_err(&pdev->dev, | 706 | dev_err(&pdev->dev, |
| 704 | "Failed to ioremap PCC comm region\n"); | 707 | "Failed to ioremap PCC comm region\n"); |
| 705 | rc = -ENOMEM; | 708 | rc = -ENOMEM; |
| 706 | goto out_mbox_free; | 709 | goto out; |
| 707 | } | 710 | } |
| 708 | 711 | ||
| 709 | /* | 712 | /* |
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index c06dce2c1da7..45a3f3ca29b3 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig | |||
| @@ -131,6 +131,7 @@ config I2C_I801 | |||
| 131 | Gemini Lake (SOC) | 131 | Gemini Lake (SOC) |
| 132 | Cannon Lake-H (PCH) | 132 | Cannon Lake-H (PCH) |
| 133 | Cannon Lake-LP (PCH) | 133 | Cannon Lake-LP (PCH) |
| 134 | Cedar Fork (PCH) | ||
| 134 | 135 | ||
| 135 | This driver can also be built as a module. If so, the module | 136 | This driver can also be built as a module. If so, the module |
| 136 | will be called i2c-i801. | 137 | will be called i2c-i801. |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index e114e4e00d29..9e12a53ef7b8 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c | |||
| @@ -68,6 +68,7 @@ | |||
| 68 | * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes | 68 | * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes |
| 69 | * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes | 69 | * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes |
| 70 | * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes | 70 | * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes |
| 71 | * Cedar Fork (PCH) 0x18df 32 hard yes yes yes | ||
| 71 | * | 72 | * |
| 72 | * Features supported by this driver: | 73 | * Features supported by this driver: |
| 73 | * Software PEC no | 74 | * Software PEC no |
| @@ -204,6 +205,7 @@ | |||
| 204 | 205 | ||
| 205 | /* Older devices have their ID defined in <linux/pci_ids.h> */ | 206 | /* Older devices have their ID defined in <linux/pci_ids.h> */ |
| 206 | #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 | 207 | #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 |
| 208 | #define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df | ||
| 207 | #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df | 209 | #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df |
| 208 | #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 | 210 | #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 |
| 209 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 | 211 | #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 |
| @@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = { | |||
| 1025 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) }, | 1027 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) }, |
| 1026 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) }, | 1028 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) }, |
| 1027 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, | 1029 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, |
| 1030 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) }, | ||
| 1028 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, | 1031 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, |
| 1029 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, | 1032 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, |
| 1030 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, | 1033 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, |
| @@ -1513,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
| 1513 | case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS: | 1516 | case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS: |
| 1514 | case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS: | 1517 | case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS: |
| 1515 | case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: | 1518 | case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: |
| 1519 | case PCI_DEVICE_ID_INTEL_CDF_SMBUS: | ||
| 1516 | case PCI_DEVICE_ID_INTEL_DNV_SMBUS: | 1520 | case PCI_DEVICE_ID_INTEL_DNV_SMBUS: |
| 1517 | case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: | 1521 | case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: |
| 1518 | priv->features |= FEATURE_I2C_BLOCK_READ; | 1522 | priv->features |= FEATURE_I2C_BLOCK_READ; |
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c index 84fb35f6837f..eb1d91b986fd 100644 --- a/drivers/i2c/busses/i2c-img-scb.c +++ b/drivers/i2c/busses/i2c-img-scb.c | |||
| @@ -1459,6 +1459,6 @@ static struct platform_driver img_scb_i2c_driver = { | |||
| 1459 | }; | 1459 | }; |
| 1460 | module_platform_driver(img_scb_i2c_driver); | 1460 | module_platform_driver(img_scb_i2c_driver); |
| 1461 | 1461 | ||
| 1462 | MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>"); | 1462 | MODULE_AUTHOR("James Hogan <jhogan@kernel.org>"); |
| 1463 | MODULE_DESCRIPTION("IMG host I2C driver"); | 1463 | MODULE_DESCRIPTION("IMG host I2C driver"); |
| 1464 | MODULE_LICENSE("GPL v2"); | 1464 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c index 22e08ae1704f..25fcc3c1e32b 100644 --- a/drivers/i2c/busses/i2c-sprd.c +++ b/drivers/i2c/busses/i2c-sprd.c | |||
| @@ -627,6 +627,7 @@ static const struct dev_pm_ops sprd_i2c_pm_ops = { | |||
| 627 | 627 | ||
| 628 | static const struct of_device_id sprd_i2c_of_match[] = { | 628 | static const struct of_device_id sprd_i2c_of_match[] = { |
| 629 | { .compatible = "sprd,sc9860-i2c", }, | 629 | { .compatible = "sprd,sc9860-i2c", }, |
| 630 | {}, | ||
| 630 | }; | 631 | }; |
| 631 | 632 | ||
| 632 | static struct platform_driver sprd_i2c_driver = { | 633 | static struct platform_driver sprd_i2c_driver = { |
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 47c67b0ca896..d4a6e9c2e9aa 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c | |||
| @@ -215,7 +215,7 @@ struct stm32f7_i2c_dev { | |||
| 215 | unsigned int msg_num; | 215 | unsigned int msg_num; |
| 216 | unsigned int msg_id; | 216 | unsigned int msg_id; |
| 217 | struct stm32f7_i2c_msg f7_msg; | 217 | struct stm32f7_i2c_msg f7_msg; |
| 218 | struct stm32f7_i2c_setup *setup; | 218 | struct stm32f7_i2c_setup setup; |
| 219 | struct stm32f7_i2c_timings timing; | 219 | struct stm32f7_i2c_timings timing; |
| 220 | }; | 220 | }; |
| 221 | 221 | ||
| @@ -265,7 +265,7 @@ static struct stm32f7_i2c_spec i2c_specs[] = { | |||
| 265 | }, | 265 | }, |
| 266 | }; | 266 | }; |
| 267 | 267 | ||
| 268 | struct stm32f7_i2c_setup stm32f7_setup = { | 268 | static const struct stm32f7_i2c_setup stm32f7_setup = { |
| 269 | .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, | 269 | .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, |
| 270 | .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, | 270 | .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, |
| 271 | .dnf = STM32F7_I2C_DNF_DEFAULT, | 271 | .dnf = STM32F7_I2C_DNF_DEFAULT, |
| @@ -537,7 +537,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev) | |||
| 537 | writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR); | 537 | writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR); |
| 538 | 538 | ||
| 539 | /* Enable I2C */ | 539 | /* Enable I2C */ |
| 540 | if (i2c_dev->setup->analog_filter) | 540 | if (i2c_dev->setup.analog_filter) |
| 541 | stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, | 541 | stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, |
| 542 | STM32F7_I2C_CR1_ANFOFF); | 542 | STM32F7_I2C_CR1_ANFOFF); |
| 543 | else | 543 | else |
| @@ -887,22 +887,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev) | |||
| 887 | } | 887 | } |
| 888 | 888 | ||
| 889 | setup = of_device_get_match_data(&pdev->dev); | 889 | setup = of_device_get_match_data(&pdev->dev); |
| 890 | i2c_dev->setup->rise_time = setup->rise_time; | 890 | i2c_dev->setup = *setup; |
| 891 | i2c_dev->setup->fall_time = setup->fall_time; | ||
| 892 | i2c_dev->setup->dnf = setup->dnf; | ||
| 893 | i2c_dev->setup->analog_filter = setup->analog_filter; | ||
| 894 | 891 | ||
| 895 | ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", | 892 | ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", |
| 896 | &rise_time); | 893 | &rise_time); |
| 897 | if (!ret) | 894 | if (!ret) |
| 898 | i2c_dev->setup->rise_time = rise_time; | 895 | i2c_dev->setup.rise_time = rise_time; |
| 899 | 896 | ||
| 900 | ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns", | 897 | ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns", |
| 901 | &fall_time); | 898 | &fall_time); |
| 902 | if (!ret) | 899 | if (!ret) |
| 903 | i2c_dev->setup->fall_time = fall_time; | 900 | i2c_dev->setup.fall_time = fall_time; |
| 904 | 901 | ||
| 905 | ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup); | 902 | ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup); |
| 906 | if (ret) | 903 | if (ret) |
| 907 | goto clk_free; | 904 | goto clk_free; |
| 908 | 905 | ||
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 01b2adfd8226..eaf39e5db08b 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
| @@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d, | |||
| 1451 | if (hwif_init(hwif) == 0) { | 1451 | if (hwif_init(hwif) == 0) { |
| 1452 | printk(KERN_INFO "%s: failed to initialize IDE " | 1452 | printk(KERN_INFO "%s: failed to initialize IDE " |
| 1453 | "interface\n", hwif->name); | 1453 | "interface\n", hwif->name); |
| 1454 | device_unregister(hwif->portdev); | ||
| 1454 | device_unregister(&hwif->gendev); | 1455 | device_unregister(&hwif->gendev); |
| 1455 | ide_disable_port(hwif); | 1456 | ide_disable_port(hwif); |
| 1456 | continue; | 1457 | continue; |
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c index 86aa88aeb3a6..acf874800ca4 100644 --- a/drivers/ide/ide-scan-pci.c +++ b/drivers/ide/ide-scan-pci.c | |||
| @@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev) | |||
| 56 | { | 56 | { |
| 57 | struct list_head *l; | 57 | struct list_head *l; |
| 58 | struct pci_driver *d; | 58 | struct pci_driver *d; |
| 59 | int ret; | ||
| 59 | 60 | ||
| 60 | list_for_each(l, &ide_pci_drivers) { | 61 | list_for_each(l, &ide_pci_drivers) { |
| 61 | d = list_entry(l, struct pci_driver, node); | 62 | d = list_entry(l, struct pci_driver, node); |
| @@ -63,10 +64,14 @@ static int __init ide_scan_pcidev(struct pci_dev *dev) | |||
| 63 | const struct pci_device_id *id = | 64 | const struct pci_device_id *id = |
| 64 | pci_match_id(d->id_table, dev); | 65 | pci_match_id(d->id_table, dev); |
| 65 | 66 | ||
| 66 | if (id != NULL && d->probe(dev, id) >= 0) { | 67 | if (id != NULL) { |
| 67 | dev->driver = d; | 68 | pci_assign_irq(dev); |
| 68 | pci_dev_get(dev); | 69 | ret = d->probe(dev, id); |
| 69 | return 1; | 70 | if (ret >= 0) { |
| 71 | dev->driver = d; | ||
| 72 | pci_dev_get(dev); | ||
| 73 | return 1; | ||
| 74 | } | ||
| 70 | } | 75 | } |
| 71 | } | 76 | } |
| 72 | } | 77 | } |
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index 112d2fe1bcdb..fdc8e813170c 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c | |||
| @@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise); | |||
| 179 | /** | 179 | /** |
| 180 | * ide_pci_enable - do PCI enables | 180 | * ide_pci_enable - do PCI enables |
| 181 | * @dev: PCI device | 181 | * @dev: PCI device |
| 182 | * @bars: PCI BARs mask | ||
| 182 | * @d: IDE port info | 183 | * @d: IDE port info |
| 183 | * | 184 | * |
| 184 | * Enable the IDE PCI device. We attempt to enable the device in full | 185 | * Enable the IDE PCI device. We attempt to enable the device in full |
| @@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise); | |||
| 189 | * Returns zero on success or an error code | 190 | * Returns zero on success or an error code |
| 190 | */ | 191 | */ |
| 191 | 192 | ||
| 192 | static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d) | 193 | static int ide_pci_enable(struct pci_dev *dev, int bars, |
| 194 | const struct ide_port_info *d) | ||
| 193 | { | 195 | { |
| 194 | int ret, bars; | 196 | int ret; |
| 195 | 197 | ||
| 196 | if (pci_enable_device(dev)) { | 198 | if (pci_enable_device(dev)) { |
| 197 | ret = pci_enable_device_io(dev); | 199 | ret = pci_enable_device_io(dev); |
| @@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d) | |||
| 216 | goto out; | 218 | goto out; |
| 217 | } | 219 | } |
| 218 | 220 | ||
| 219 | if (d->host_flags & IDE_HFLAG_SINGLE) | ||
| 220 | bars = (1 << 2) - 1; | ||
| 221 | else | ||
| 222 | bars = (1 << 4) - 1; | ||
| 223 | |||
| 224 | if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) { | ||
| 225 | if (d->host_flags & IDE_HFLAG_CS5520) | ||
| 226 | bars |= (1 << 2); | ||
| 227 | else | ||
| 228 | bars |= (1 << 4); | ||
| 229 | } | ||
| 230 | |||
| 231 | ret = pci_request_selected_regions(dev, bars, d->name); | 221 | ret = pci_request_selected_regions(dev, bars, d->name); |
| 232 | if (ret < 0) | 222 | if (ret < 0) |
| 233 | printk(KERN_ERR "%s %s: can't reserve resources\n", | 223 | printk(KERN_ERR "%s %s: can't reserve resources\n", |
| @@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d) | |||
| 403 | /** | 393 | /** |
| 404 | * ide_setup_pci_controller - set up IDE PCI | 394 | * ide_setup_pci_controller - set up IDE PCI |
| 405 | * @dev: PCI device | 395 | * @dev: PCI device |
| 396 | * @bars: PCI BARs mask | ||
| 406 | * @d: IDE port info | 397 | * @d: IDE port info |
| 407 | * @noisy: verbose flag | 398 | * @noisy: verbose flag |
| 408 | * | 399 | * |
| @@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d) | |||
| 411 | * and enables it if need be | 402 | * and enables it if need be |
| 412 | */ | 403 | */ |
| 413 | 404 | ||
| 414 | static int ide_setup_pci_controller(struct pci_dev *dev, | 405 | static int ide_setup_pci_controller(struct pci_dev *dev, int bars, |
| 415 | const struct ide_port_info *d, int noisy) | 406 | const struct ide_port_info *d, int noisy) |
| 416 | { | 407 | { |
| 417 | int ret; | 408 | int ret; |
| @@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev, | |||
| 420 | if (noisy) | 411 | if (noisy) |
| 421 | ide_setup_pci_noise(dev, d); | 412 | ide_setup_pci_noise(dev, d); |
| 422 | 413 | ||
| 423 | ret = ide_pci_enable(dev, d); | 414 | ret = ide_pci_enable(dev, bars, d); |
| 424 | if (ret < 0) | 415 | if (ret < 0) |
| 425 | goto out; | 416 | goto out; |
| 426 | 417 | ||
| @@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev, | |||
| 428 | if (ret < 0) { | 419 | if (ret < 0) { |
| 429 | printk(KERN_ERR "%s %s: error accessing PCI regs\n", | 420 | printk(KERN_ERR "%s %s: error accessing PCI regs\n", |
| 430 | d->name, pci_name(dev)); | 421 | d->name, pci_name(dev)); |
| 431 | goto out; | 422 | goto out_free_bars; |
| 432 | } | 423 | } |
| 433 | if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */ | 424 | if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */ |
| 434 | ret = ide_pci_configure(dev, d); | 425 | ret = ide_pci_configure(dev, d); |
| 435 | if (ret < 0) | 426 | if (ret < 0) |
| 436 | goto out; | 427 | goto out_free_bars; |
| 437 | printk(KERN_INFO "%s %s: device enabled (Linux)\n", | 428 | printk(KERN_INFO "%s %s: device enabled (Linux)\n", |
| 438 | d->name, pci_name(dev)); | 429 | d->name, pci_name(dev)); |
| 439 | } | 430 | } |
| 440 | 431 | ||
| 432 | goto out; | ||
| 433 | |||
| 434 | out_free_bars: | ||
| 435 | pci_release_selected_regions(dev, bars); | ||
| 441 | out: | 436 | out: |
| 442 | return ret; | 437 | return ret; |
| 443 | } | 438 | } |
| @@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, | |||
| 540 | { | 535 | { |
| 541 | struct pci_dev *pdev[] = { dev1, dev2 }; | 536 | struct pci_dev *pdev[] = { dev1, dev2 }; |
| 542 | struct ide_host *host; | 537 | struct ide_host *host; |
| 543 | int ret, i, n_ports = dev2 ? 4 : 2; | 538 | int ret, i, n_ports = dev2 ? 4 : 2, bars; |
| 544 | struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; | 539 | struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; |
| 545 | 540 | ||
| 541 | if (d->host_flags & IDE_HFLAG_SINGLE) | ||
| 542 | bars = (1 << 2) - 1; | ||
| 543 | else | ||
| 544 | bars = (1 << 4) - 1; | ||
| 545 | |||
| 546 | if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) { | ||
| 547 | if (d->host_flags & IDE_HFLAG_CS5520) | ||
| 548 | bars |= (1 << 2); | ||
| 549 | else | ||
| 550 | bars |= (1 << 4); | ||
| 551 | } | ||
| 552 | |||
| 546 | for (i = 0; i < n_ports / 2; i++) { | 553 | for (i = 0; i < n_ports / 2; i++) { |
| 547 | ret = ide_setup_pci_controller(pdev[i], d, !i); | 554 | ret = ide_setup_pci_controller(pdev[i], bars, d, !i); |
| 548 | if (ret < 0) | 555 | if (ret < 0) { |
| 556 | if (i == 1) | ||
| 557 | pci_release_selected_regions(pdev[0], bars); | ||
| 549 | goto out; | 558 | goto out; |
| 559 | } | ||
| 550 | 560 | ||
| 551 | ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); | 561 | ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); |
| 552 | } | 562 | } |
| @@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, | |||
| 554 | host = ide_host_alloc(d, hws, n_ports); | 564 | host = ide_host_alloc(d, hws, n_ports); |
| 555 | if (host == NULL) { | 565 | if (host == NULL) { |
| 556 | ret = -ENOMEM; | 566 | ret = -ENOMEM; |
| 557 | goto out; | 567 | goto out_free_bars; |
| 558 | } | 568 | } |
| 559 | 569 | ||
| 560 | host->dev[0] = &dev1->dev; | 570 | host->dev[0] = &dev1->dev; |
| @@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, | |||
| 576 | * do_ide_setup_pci_device() on the first device! | 586 | * do_ide_setup_pci_device() on the first device! |
| 577 | */ | 587 | */ |
| 578 | if (ret < 0) | 588 | if (ret < 0) |
| 579 | goto out; | 589 | goto out_free_bars; |
| 580 | 590 | ||
| 581 | /* fixup IRQ */ | 591 | /* fixup IRQ */ |
| 582 | if (ide_pci_is_in_compatibility_mode(pdev[i])) { | 592 | if (ide_pci_is_in_compatibility_mode(pdev[i])) { |
| @@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2, | |||
| 589 | ret = ide_host_register(host, d, hws); | 599 | ret = ide_host_register(host, d, hws); |
| 590 | if (ret) | 600 | if (ret) |
| 591 | ide_host_free(host); | 601 | ide_host_free(host); |
| 602 | else | ||
| 603 | goto out; | ||
| 604 | |||
| 605 | out_free_bars: | ||
| 606 | i = n_ports / 2; | ||
| 607 | while (i--) | ||
| 608 | pci_release_selected_regions(pdev[i], bars); | ||
| 592 | out: | 609 | out: |
| 593 | return ret; | 610 | return ret; |
| 594 | } | 611 | } |
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c index 30825bb9b8e9..8861c052155a 100644 --- a/drivers/infiniband/core/iwpm_msg.c +++ b/drivers/infiniband/core/iwpm_msg.c | |||
| @@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client) | |||
| 100 | if (ret) | 100 | if (ret) |
| 101 | goto pid_query_error; | 101 | goto pid_query_error; |
| 102 | 102 | ||
| 103 | nlmsg_end(skb, nlh); | ||
| 104 | |||
| 103 | pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", | 105 | pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", |
| 104 | __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); | 106 | __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); |
| 105 | 107 | ||
| @@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) | |||
| 170 | &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); | 172 | &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); |
| 171 | if (ret) | 173 | if (ret) |
| 172 | goto add_mapping_error; | 174 | goto add_mapping_error; |
| 175 | |||
| 176 | nlmsg_end(skb, nlh); | ||
| 173 | nlmsg_request->req_buffer = pm_msg; | 177 | nlmsg_request->req_buffer = pm_msg; |
| 174 | 178 | ||
| 175 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); | 179 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); |
| @@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client) | |||
| 246 | &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); | 250 | &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); |
| 247 | if (ret) | 251 | if (ret) |
| 248 | goto query_mapping_error; | 252 | goto query_mapping_error; |
| 253 | |||
| 254 | nlmsg_end(skb, nlh); | ||
| 249 | nlmsg_request->req_buffer = pm_msg; | 255 | nlmsg_request->req_buffer = pm_msg; |
| 250 | 256 | ||
| 251 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); | 257 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); |
| @@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client) | |||
| 308 | if (ret) | 314 | if (ret) |
| 309 | goto remove_mapping_error; | 315 | goto remove_mapping_error; |
| 310 | 316 | ||
| 317 | nlmsg_end(skb, nlh); | ||
| 318 | |||
| 311 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); | 319 | ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); |
| 312 | if (ret) { | 320 | if (ret) { |
| 313 | skb = NULL; /* skb is freed in the netlink send-op handling */ | 321 | skb = NULL; /* skb is freed in the netlink send-op handling */ |
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index c81c55942626..3c4faadb8cdd 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c | |||
| @@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid) | |||
| 597 | &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); | 597 | &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); |
| 598 | if (ret) | 598 | if (ret) |
| 599 | goto mapinfo_num_error; | 599 | goto mapinfo_num_error; |
| 600 | |||
| 601 | nlmsg_end(skb, nlh); | ||
| 602 | |||
| 600 | ret = rdma_nl_unicast(skb, iwpm_pid); | 603 | ret = rdma_nl_unicast(skb, iwpm_pid); |
| 601 | if (ret) { | 604 | if (ret) { |
| 602 | skb = NULL; | 605 | skb = NULL; |
| @@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid) | |||
| 678 | if (ret) | 681 | if (ret) |
| 679 | goto send_mapping_info_unlock; | 682 | goto send_mapping_info_unlock; |
| 680 | 683 | ||
| 684 | nlmsg_end(skb, nlh); | ||
| 685 | |||
| 681 | iwpm_print_sockaddr(&map_info->local_sockaddr, | 686 | iwpm_print_sockaddr(&map_info->local_sockaddr, |
| 682 | "send_mapping_info: Local sockaddr:"); | 687 | "send_mapping_info: Local sockaddr:"); |
| 683 | iwpm_print_sockaddr(&map_info->mapped_sockaddr, | 688 | iwpm_print_sockaddr(&map_info->mapped_sockaddr, |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index d1f5345f04f0..42ca5346777d 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c | |||
| @@ -48,7 +48,7 @@ | |||
| 48 | * @wqe: cqp wqe for header | 48 | * @wqe: cqp wqe for header |
| 49 | * @header: header for the cqp wqe | 49 | * @header: header for the cqp wqe |
| 50 | */ | 50 | */ |
| 51 | static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) | 51 | void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) |
| 52 | { | 52 | { |
| 53 | wmb(); /* make sure WQE is populated before polarity is set */ | 53 | wmb(); /* make sure WQE is populated before polarity is set */ |
| 54 | set_64bit_val(wqe, 24, header); | 54 | set_64bit_val(wqe, 24, header); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h index e217a1259f57..5498ad01c280 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_p.h +++ b/drivers/infiniband/hw/i40iw/i40iw_p.h | |||
| @@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp, | |||
| 59 | struct i40iw_fast_reg_stag_info *info, | 59 | struct i40iw_fast_reg_stag_info *info, |
| 60 | bool post_sq); | 60 | bool post_sq); |
| 61 | 61 | ||
| 62 | void i40iw_insert_wqe_hdr(u64 *wqe, u64 header); | ||
| 63 | |||
| 62 | /* HMC/FPM functions */ | 64 | /* HMC/FPM functions */ |
| 63 | enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, | 65 | enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, |
| 64 | u8 hmc_fn_id); | 66 | u8 hmc_fn_id); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index c2cab20c4bc5..59f70676f0e0 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c | |||
| @@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx, | |||
| 123 | get_64bit_val(wqe, 24, &offset24); | 123 | get_64bit_val(wqe, 24, &offset24); |
| 124 | 124 | ||
| 125 | offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); | 125 | offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); |
| 126 | set_64bit_val(wqe, 24, offset24); | ||
| 127 | 126 | ||
| 128 | set_64bit_val(wqe, 0, buf->mem.pa); | 127 | set_64bit_val(wqe, 0, buf->mem.pa); |
| 129 | set_64bit_val(wqe, 8, | 128 | set_64bit_val(wqe, 8, |
| 130 | LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); | 129 | LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); |
| 131 | set_64bit_val(wqe, 24, offset24); | 130 | i40iw_insert_wqe_hdr(wqe, offset24); |
| 132 | } | 131 | } |
| 133 | 132 | ||
| 134 | /** | 133 | /** |
| @@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, | |||
| 409 | set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); | 408 | set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); |
| 410 | set_64bit_val(wqe, 16, header[0]); | 409 | set_64bit_val(wqe, 16, header[0]); |
| 411 | 410 | ||
| 412 | /* Ensure all data is written before writing valid bit */ | 411 | i40iw_insert_wqe_hdr(wqe, header[1]); |
| 413 | wmb(); | ||
| 414 | set_64bit_val(wqe, 24, header[1]); | ||
| 415 | 412 | ||
| 416 | i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); | 413 | i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); |
| 417 | i40iw_qp_post_wr(&qp->qp_uk); | 414 | i40iw_qp_post_wr(&qp->qp_uk); |
| @@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct | |||
| 539 | LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | | 536 | LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | |
| 540 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | 537 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); |
| 541 | 538 | ||
| 542 | set_64bit_val(wqe, 24, header); | 539 | i40iw_insert_wqe_hdr(wqe, header); |
| 543 | 540 | ||
| 544 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); | 541 | i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); |
| 545 | i40iw_sc_cqp_post_sq(cqp); | 542 | i40iw_sc_cqp_post_sq(cqp); |
| @@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct | |||
| 655 | LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | | 652 | LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | |
| 656 | LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | | 653 | LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | |
| 657 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); | 654 | LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); |
| 658 | set_64bit_val(wqe, 24, header); | 655 | i40iw_insert_wqe_hdr(wqe, header); |
| 659 | 656 | ||
| 660 | i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", | 657 | i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", |
| 661 | wqe, I40IW_CQP_WQE_SIZE * 8); | 658 | wqe, I40IW_CQP_WQE_SIZE * 8); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 28b3d02d511b..62be0a41ad0b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c | |||
| @@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp, | |||
| 826 | attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; | 826 | attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; |
| 827 | attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; | 827 | attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; |
| 828 | attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; | 828 | attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; |
| 829 | attr->port_num = 1; | ||
| 829 | init_attr->event_handler = iwqp->ibqp.event_handler; | 830 | init_attr->event_handler = iwqp->ibqp.event_handler; |
| 830 | init_attr->qp_context = iwqp->ibqp.qp_context; | 831 | init_attr->qp_context = iwqp->ibqp.qp_context; |
| 831 | init_attr->send_cq = iwqp->ibqp.send_cq; | 832 | init_attr->send_cq = iwqp->ibqp.send_cq; |
| 832 | init_attr->recv_cq = iwqp->ibqp.recv_cq; | 833 | init_attr->recv_cq = iwqp->ibqp.recv_cq; |
| 833 | init_attr->srq = iwqp->ibqp.srq; | 834 | init_attr->srq = iwqp->ibqp.srq; |
| 834 | init_attr->cap = attr->cap; | 835 | init_attr->cap = attr->cap; |
| 836 | init_attr->port_num = 1; | ||
| 835 | return 0; | 837 | return 0; |
| 836 | } | 838 | } |
| 837 | 839 | ||
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d6fbad8f34aa..552f7bd4ecc3 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
| @@ -4174,9 +4174,9 @@ err_bfreg: | |||
| 4174 | err_uar_page: | 4174 | err_uar_page: |
| 4175 | mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); | 4175 | mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); |
| 4176 | 4176 | ||
| 4177 | err_cnt: | ||
| 4178 | mlx5_ib_cleanup_cong_debugfs(dev); | ||
| 4179 | err_cong: | 4177 | err_cong: |
| 4178 | mlx5_ib_cleanup_cong_debugfs(dev); | ||
| 4179 | err_cnt: | ||
| 4180 | if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) | 4180 | if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) |
| 4181 | mlx5_ib_dealloc_counters(dev); | 4181 | mlx5_ib_dealloc_counters(dev); |
| 4182 | 4182 | ||
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h index b2bb42e2805d..254083b524bd 100644 --- a/drivers/infiniband/hw/qedr/qedr.h +++ b/drivers/infiniband/hw/qedr/qedr.h | |||
| @@ -387,7 +387,7 @@ struct qedr_qp { | |||
| 387 | u8 wqe_size; | 387 | u8 wqe_size; |
| 388 | 388 | ||
| 389 | u8 smac[ETH_ALEN]; | 389 | u8 smac[ETH_ALEN]; |
| 390 | u16 vlan_id; | 390 | u16 vlan; |
| 391 | int rc; | 391 | int rc; |
| 392 | } *rqe_wr_id; | 392 | } *rqe_wr_id; |
| 393 | 393 | ||
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 4689e802b332..ad8965397cf7 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c | |||
| @@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt, | |||
| 105 | 105 | ||
| 106 | qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? | 106 | qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? |
| 107 | -EINVAL : 0; | 107 | -EINVAL : 0; |
| 108 | qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; | 108 | qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan; |
| 109 | /* note: length stands for data length i.e. GRH is excluded */ | 109 | /* note: length stands for data length i.e. GRH is excluded */ |
| 110 | qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = | 110 | qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = |
| 111 | data->length.data_length; | 111 | data->length.data_length; |
| @@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
| 694 | struct qedr_cq *cq = get_qedr_cq(ibcq); | 694 | struct qedr_cq *cq = get_qedr_cq(ibcq); |
| 695 | struct qedr_qp *qp = dev->gsi_qp; | 695 | struct qedr_qp *qp = dev->gsi_qp; |
| 696 | unsigned long flags; | 696 | unsigned long flags; |
| 697 | u16 vlan_id; | ||
| 697 | int i = 0; | 698 | int i = 0; |
| 698 | 699 | ||
| 699 | spin_lock_irqsave(&cq->cq_lock, flags); | 700 | spin_lock_irqsave(&cq->cq_lock, flags); |
| @@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) | |||
| 712 | wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; | 713 | wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; |
| 713 | ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); | 714 | ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); |
| 714 | wc[i].wc_flags |= IB_WC_WITH_SMAC; | 715 | wc[i].wc_flags |= IB_WC_WITH_SMAC; |
| 715 | if (qp->rqe_wr_id[qp->rq.cons].vlan_id) { | 716 | |
| 717 | vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan & | ||
| 718 | VLAN_VID_MASK; | ||
| 719 | if (vlan_id) { | ||
| 716 | wc[i].wc_flags |= IB_WC_WITH_VLAN; | 720 | wc[i].wc_flags |= IB_WC_WITH_VLAN; |
| 717 | wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id; | 721 | wc[i].vlan_id = vlan_id; |
| 722 | wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan & | ||
| 723 | VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; | ||
| 718 | } | 724 | } |
| 719 | 725 | ||
| 720 | qedr_inc_sw_cons(&qp->rq); | 726 | qedr_inc_sw_cons(&qp->rq); |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 51f8215877f5..8e8874d23717 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void) | |||
| 2773 | 2773 | ||
| 2774 | int __init amd_iommu_init_dma_ops(void) | 2774 | int __init amd_iommu_init_dma_ops(void) |
| 2775 | { | 2775 | { |
| 2776 | swiotlb = iommu_pass_through ? 1 : 0; | 2776 | swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0; |
| 2777 | iommu_detected = 1; | 2777 | iommu_detected = 1; |
| 2778 | 2778 | ||
| 2779 | /* | 2779 | /* |
| 2780 | * In case we don't initialize SWIOTLB (actually the common case | 2780 | * In case we don't initialize SWIOTLB (actually the common case |
| 2781 | * when AMD IOMMU is enabled), make sure there are global | 2781 | * when AMD IOMMU is enabled and SME is not active), make sure there |
| 2782 | * dma_ops set as a fall-back for devices not handled by this | 2782 | * are global dma_ops set as a fall-back for devices not handled by |
| 2783 | * driver (for example non-PCI devices). | 2783 | * this driver (for example non-PCI devices). When SME is active, |
| 2784 | * make sure that swiotlb variable remains set so the global dma_ops | ||
| 2785 | * continue to be SWIOTLB. | ||
| 2784 | */ | 2786 | */ |
| 2785 | if (!swiotlb) | 2787 | if (!swiotlb) |
| 2786 | dma_ops = &nommu_dma_ops; | 2788 | dma_ops = &nommu_dma_ops; |
| @@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | |||
| 3046 | mutex_unlock(&domain->api_lock); | 3048 | mutex_unlock(&domain->api_lock); |
| 3047 | 3049 | ||
| 3048 | domain_flush_tlb_pde(domain); | 3050 | domain_flush_tlb_pde(domain); |
| 3051 | domain_flush_complete(domain); | ||
| 3049 | 3052 | ||
| 3050 | return unmap_size; | 3053 | return unmap_size; |
| 3051 | } | 3054 | } |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index f596fcc32898..25c2c75f5332 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
| @@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = { | |||
| 709 | pm_runtime_force_resume) | 709 | pm_runtime_force_resume) |
| 710 | }; | 710 | }; |
| 711 | 711 | ||
| 712 | static const struct of_device_id sysmmu_of_match[] __initconst = { | 712 | static const struct of_device_id sysmmu_of_match[] = { |
| 713 | { .compatible = "samsung,exynos-sysmmu", }, | 713 | { .compatible = "samsung,exynos-sysmmu", }, |
| 714 | { }, | 714 | { }, |
| 715 | }; | 715 | }; |
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 7d5286b05036..1841d0359bac 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c | |||
| @@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put); | |||
| 64 | void __closure_wake_up(struct closure_waitlist *wait_list) | 64 | void __closure_wake_up(struct closure_waitlist *wait_list) |
| 65 | { | 65 | { |
| 66 | struct llist_node *list; | 66 | struct llist_node *list; |
| 67 | struct closure *cl; | 67 | struct closure *cl, *t; |
| 68 | struct llist_node *reverse = NULL; | 68 | struct llist_node *reverse = NULL; |
| 69 | 69 | ||
| 70 | list = llist_del_all(&wait_list->list); | 70 | list = llist_del_all(&wait_list->list); |
| @@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list) | |||
| 73 | reverse = llist_reverse_order(list); | 73 | reverse = llist_reverse_order(list); |
| 74 | 74 | ||
| 75 | /* Then do the wakeups */ | 75 | /* Then do the wakeups */ |
| 76 | llist_for_each_entry(cl, reverse, list) { | 76 | llist_for_each_entry_safe(cl, t, reverse, list) { |
| 77 | closure_set_waiting(cl, 0); | 77 | closure_set_waiting(cl, 0); |
| 78 | closure_sub(cl, CLOSURE_WAITING + 1); | 78 | closure_sub(cl, CLOSURE_WAITING + 1); |
| 79 | } | 79 | } |
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 24eddbdf2ab4..203144762f36 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h | |||
| @@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen | |||
| 149 | 149 | ||
| 150 | extern atomic_t dm_global_event_nr; | 150 | extern atomic_t dm_global_event_nr; |
| 151 | extern wait_queue_head_t dm_global_eventq; | 151 | extern wait_queue_head_t dm_global_eventq; |
| 152 | void dm_issue_global_event(void); | ||
| 152 | 153 | ||
| 153 | #endif | 154 | #endif |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index a55ffd4f5933..96ab46512e1f 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
| @@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key | |||
| 2466 | kfree(cipher_api); | 2466 | kfree(cipher_api); |
| 2467 | return ret; | 2467 | return ret; |
| 2468 | } | 2468 | } |
| 2469 | kfree(cipher_api); | ||
| 2469 | 2470 | ||
| 2470 | return 0; | 2471 | return 0; |
| 2471 | bad_mem: | 2472 | bad_mem: |
| @@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar | |||
| 2584 | ti->error = "Invalid feature value for sector_size"; | 2585 | ti->error = "Invalid feature value for sector_size"; |
| 2585 | return -EINVAL; | 2586 | return -EINVAL; |
| 2586 | } | 2587 | } |
| 2588 | if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) { | ||
| 2589 | ti->error = "Device size is not multiple of sector_size feature"; | ||
| 2590 | return -EINVAL; | ||
| 2591 | } | ||
| 2587 | cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; | 2592 | cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; |
| 2588 | } else if (!strcasecmp(opt_string, "iv_large_sectors")) | 2593 | } else if (!strcasecmp(opt_string, "iv_large_sectors")) |
| 2589 | set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); | 2594 | set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 8756a6850431..e52676fa9832 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
| @@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si | |||
| 477 | * Round up the ptr to an 8-byte boundary. | 477 | * Round up the ptr to an 8-byte boundary. |
| 478 | */ | 478 | */ |
| 479 | #define ALIGN_MASK 7 | 479 | #define ALIGN_MASK 7 |
| 480 | static inline size_t align_val(size_t val) | ||
| 481 | { | ||
| 482 | return (val + ALIGN_MASK) & ~ALIGN_MASK; | ||
| 483 | } | ||
| 480 | static inline void *align_ptr(void *ptr) | 484 | static inline void *align_ptr(void *ptr) |
| 481 | { | 485 | { |
| 482 | return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); | 486 | return (void *)align_val((size_t)ptr); |
| 483 | } | 487 | } |
| 484 | 488 | ||
| 485 | /* | 489 | /* |
| @@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ | |||
| 505 | struct hash_cell *hc; | 509 | struct hash_cell *hc; |
| 506 | size_t len, needed = 0; | 510 | size_t len, needed = 0; |
| 507 | struct gendisk *disk; | 511 | struct gendisk *disk; |
| 508 | struct dm_name_list *nl, *old_nl = NULL; | 512 | struct dm_name_list *orig_nl, *nl, *old_nl = NULL; |
| 509 | uint32_t *event_nr; | 513 | uint32_t *event_nr; |
| 510 | 514 | ||
| 511 | down_write(&_hash_lock); | 515 | down_write(&_hash_lock); |
| @@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ | |||
| 516 | */ | 520 | */ |
| 517 | for (i = 0; i < NUM_BUCKETS; i++) { | 521 | for (i = 0; i < NUM_BUCKETS; i++) { |
| 518 | list_for_each_entry (hc, _name_buckets + i, name_list) { | 522 | list_for_each_entry (hc, _name_buckets + i, name_list) { |
| 519 | needed += sizeof(struct dm_name_list); | 523 | needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1); |
| 520 | needed += strlen(hc->name) + 1; | 524 | needed += align_val(sizeof(uint32_t)); |
| 521 | needed += ALIGN_MASK; | ||
| 522 | needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK; | ||
| 523 | } | 525 | } |
| 524 | } | 526 | } |
| 525 | 527 | ||
| 526 | /* | 528 | /* |
| 527 | * Grab our output buffer. | 529 | * Grab our output buffer. |
| 528 | */ | 530 | */ |
| 529 | nl = get_result_buffer(param, param_size, &len); | 531 | nl = orig_nl = get_result_buffer(param, param_size, &len); |
| 530 | if (len < needed) { | 532 | if (len < needed) { |
| 531 | param->flags |= DM_BUFFER_FULL_FLAG; | 533 | param->flags |= DM_BUFFER_FULL_FLAG; |
| 532 | goto out; | 534 | goto out; |
| @@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_ | |||
| 549 | strcpy(nl->name, hc->name); | 551 | strcpy(nl->name, hc->name); |
| 550 | 552 | ||
| 551 | old_nl = nl; | 553 | old_nl = nl; |
| 552 | event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1); | 554 | event_nr = align_ptr(nl->name + strlen(hc->name) + 1); |
| 553 | *event_nr = dm_get_event_nr(hc->md); | 555 | *event_nr = dm_get_event_nr(hc->md); |
| 554 | nl = align_ptr(event_nr + 1); | 556 | nl = align_ptr(event_nr + 1); |
| 555 | } | 557 | } |
| 556 | } | 558 | } |
| 559 | /* | ||
| 560 | * If mismatch happens, security may be compromised due to buffer | ||
| 561 | * overflow, so it's better to crash. | ||
| 562 | */ | ||
| 563 | BUG_ON((char *)nl - (char *)orig_nl != needed); | ||
| 557 | 564 | ||
| 558 | out: | 565 | out: |
| 559 | up_write(&_hash_lock); | 566 | up_write(&_hash_lock); |
| @@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para | |||
| 1621 | * which has a variable size, is not used by the function processing | 1628 | * which has a variable size, is not used by the function processing |
| 1622 | * the ioctl. | 1629 | * the ioctl. |
| 1623 | */ | 1630 | */ |
| 1624 | #define IOCTL_FLAGS_NO_PARAMS 1 | 1631 | #define IOCTL_FLAGS_NO_PARAMS 1 |
| 1632 | #define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2 | ||
| 1625 | 1633 | ||
| 1626 | /*----------------------------------------------------------------- | 1634 | /*----------------------------------------------------------------- |
| 1627 | * Implementation of open/close/ioctl on the special char | 1635 | * Implementation of open/close/ioctl on the special char |
| @@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags) | |||
| 1635 | ioctl_fn fn; | 1643 | ioctl_fn fn; |
| 1636 | } _ioctls[] = { | 1644 | } _ioctls[] = { |
| 1637 | {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ | 1645 | {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ |
| 1638 | {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all}, | 1646 | {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all}, |
| 1639 | {DM_LIST_DEVICES_CMD, 0, list_devices}, | 1647 | {DM_LIST_DEVICES_CMD, 0, list_devices}, |
| 1640 | 1648 | ||
| 1641 | {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create}, | 1649 | {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create}, |
| 1642 | {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove}, | 1650 | {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove}, |
| 1643 | {DM_DEV_RENAME_CMD, 0, dev_rename}, | 1651 | {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename}, |
| 1644 | {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, | 1652 | {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, |
| 1645 | {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, | 1653 | {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, |
| 1646 | {DM_DEV_WAIT_CMD, 0, dev_wait}, | 1654 | {DM_DEV_WAIT_CMD, 0, dev_wait}, |
| @@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us | |||
| 1869 | unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) | 1877 | unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) |
| 1870 | DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); | 1878 | DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); |
| 1871 | 1879 | ||
| 1880 | if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT) | ||
| 1881 | dm_issue_global_event(); | ||
| 1882 | |||
| 1872 | /* | 1883 | /* |
| 1873 | * Copy the results back to userland. | 1884 | * Copy the results back to userland. |
| 1874 | */ | 1885 | */ |
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 1ac58c5651b7..2245d06d2045 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
| @@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev, | |||
| 3297 | static sector_t rs_get_progress(struct raid_set *rs, | 3297 | static sector_t rs_get_progress(struct raid_set *rs, |
| 3298 | sector_t resync_max_sectors, bool *array_in_sync) | 3298 | sector_t resync_max_sectors, bool *array_in_sync) |
| 3299 | { | 3299 | { |
| 3300 | sector_t r, recovery_cp, curr_resync_completed; | 3300 | sector_t r, curr_resync_completed; |
| 3301 | struct mddev *mddev = &rs->md; | 3301 | struct mddev *mddev = &rs->md; |
| 3302 | 3302 | ||
| 3303 | curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp; | 3303 | curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp; |
| 3304 | recovery_cp = mddev->recovery_cp; | ||
| 3305 | *array_in_sync = false; | 3304 | *array_in_sync = false; |
| 3306 | 3305 | ||
| 3307 | if (rs_is_raid0(rs)) { | 3306 | if (rs_is_raid0(rs)) { |
| @@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs, | |||
| 3330 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) | 3329 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
| 3331 | r = curr_resync_completed; | 3330 | r = curr_resync_completed; |
| 3332 | else | 3331 | else |
| 3333 | r = recovery_cp; | 3332 | r = mddev->recovery_cp; |
| 3334 | 3333 | ||
| 3335 | if (r == MaxSector) { | 3334 | if ((r == MaxSector) || |
| 3335 | (test_bit(MD_RECOVERY_DONE, &mddev->recovery) && | ||
| 3336 | (mddev->curr_resync_completed == resync_max_sectors))) { | ||
| 3336 | /* | 3337 | /* |
| 3337 | * Sync complete. | 3338 | * Sync complete. |
| 3338 | */ | 3339 | */ |
| @@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti) | |||
| 3892 | 3893 | ||
| 3893 | static struct target_type raid_target = { | 3894 | static struct target_type raid_target = { |
| 3894 | .name = "raid", | 3895 | .name = "raid", |
| 3895 | .version = {1, 12, 1}, | 3896 | .version = {1, 13, 0}, |
| 3896 | .module = THIS_MODULE, | 3897 | .module = THIS_MODULE, |
| 3897 | .ctr = raid_ctr, | 3898 | .ctr = raid_ctr, |
| 3898 | .dtr = raid_dtr, | 3899 | .dtr = raid_dtr, |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6e54145969c5..4be85324f44d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue; | |||
| 52 | atomic_t dm_global_event_nr = ATOMIC_INIT(0); | 52 | atomic_t dm_global_event_nr = ATOMIC_INIT(0); |
| 53 | DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); | 53 | DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); |
| 54 | 54 | ||
| 55 | void dm_issue_global_event(void) | ||
| 56 | { | ||
| 57 | atomic_inc(&dm_global_event_nr); | ||
| 58 | wake_up(&dm_global_eventq); | ||
| 59 | } | ||
| 60 | |||
| 55 | /* | 61 | /* |
| 56 | * One of these is allocated per bio. | 62 | * One of these is allocated per bio. |
| 57 | */ | 63 | */ |
| @@ -1865,9 +1871,8 @@ static void event_callback(void *context) | |||
| 1865 | dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); | 1871 | dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); |
| 1866 | 1872 | ||
| 1867 | atomic_inc(&md->event_nr); | 1873 | atomic_inc(&md->event_nr); |
| 1868 | atomic_inc(&dm_global_event_nr); | ||
| 1869 | wake_up(&md->eventq); | 1874 | wake_up(&md->eventq); |
| 1870 | wake_up(&dm_global_eventq); | 1875 | dm_issue_global_event(); |
| 1871 | } | 1876 | } |
| 1872 | 1877 | ||
| 1873 | /* | 1878 | /* |
| @@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) | |||
| 2283 | } | 2288 | } |
| 2284 | 2289 | ||
| 2285 | map = __bind(md, table, &limits); | 2290 | map = __bind(md, table, &limits); |
| 2291 | dm_issue_global_event(); | ||
| 2286 | 2292 | ||
| 2287 | out: | 2293 | out: |
| 2288 | mutex_unlock(&md->suspend_lock); | 2294 | mutex_unlock(&md->suspend_lock); |
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c index ed43a4212479..129b558acc92 100644 --- a/drivers/media/rc/ir-sharp-decoder.c +++ b/drivers/media/rc/ir-sharp-decoder.c | |||
| @@ -245,5 +245,5 @@ module_init(ir_sharp_decode_init); | |||
| 245 | module_exit(ir_sharp_decode_exit); | 245 | module_exit(ir_sharp_decode_exit); |
| 246 | 246 | ||
| 247 | MODULE_LICENSE("GPL"); | 247 | MODULE_LICENSE("GPL"); |
| 248 | MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>"); | 248 | MODULE_AUTHOR("James Hogan <jhogan@kernel.org>"); |
| 249 | MODULE_DESCRIPTION("Sharp IR protocol decoder"); | 249 | MODULE_DESCRIPTION("Sharp IR protocol decoder"); |
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c index 5dba23ca2e5f..dc9bc1807fdf 100644 --- a/drivers/misc/cxl/cxllib.c +++ b/drivers/misc/cxl/cxllib.c | |||
| @@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags) | |||
| 219 | 219 | ||
| 220 | down_read(&mm->mmap_sem); | 220 | down_read(&mm->mmap_sem); |
| 221 | 221 | ||
| 222 | for (dar = addr; dar < addr + size; dar += page_size) { | 222 | vma = find_vma(mm, addr); |
| 223 | if (!vma || dar < vma->vm_start || dar > vma->vm_end) { | 223 | if (!vma) { |
| 224 | pr_err("Can't find vma for addr %016llx\n", addr); | ||
| 225 | rc = -EFAULT; | ||
| 226 | goto out; | ||
| 227 | } | ||
| 228 | /* get the size of the pages allocated */ | ||
| 229 | page_size = vma_kernel_pagesize(vma); | ||
| 230 | |||
| 231 | for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) { | ||
| 232 | if (dar < vma->vm_start || dar >= vma->vm_end) { | ||
| 224 | vma = find_vma(mm, addr); | 233 | vma = find_vma(mm, addr); |
| 225 | if (!vma) { | 234 | if (!vma) { |
| 226 | pr_err("Can't find vma for addr %016llx\n", addr); | 235 | pr_err("Can't find vma for addr %016llx\n", addr); |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 29fc1e662891..2ad7b5c69156 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
| @@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, | |||
| 1634 | } | 1634 | } |
| 1635 | 1635 | ||
| 1636 | mqrq->areq.mrq = &brq->mrq; | 1636 | mqrq->areq.mrq = &brq->mrq; |
| 1637 | |||
| 1638 | mmc_queue_bounce_pre(mqrq); | ||
| 1639 | } | 1637 | } |
| 1640 | 1638 | ||
| 1641 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, | 1639 | static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, |
| @@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) | |||
| 1829 | brq = &mq_rq->brq; | 1827 | brq = &mq_rq->brq; |
| 1830 | old_req = mmc_queue_req_to_req(mq_rq); | 1828 | old_req = mmc_queue_req_to_req(mq_rq); |
| 1831 | type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; | 1829 | type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; |
| 1832 | mmc_queue_bounce_post(mq_rq); | ||
| 1833 | 1830 | ||
| 1834 | switch (status) { | 1831 | switch (status) { |
| 1835 | case MMC_BLK_SUCCESS: | 1832 | case MMC_BLK_SUCCESS: |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index a7eb623f8daa..36217ad5e9b1 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -1286,6 +1286,23 @@ out_err: | |||
| 1286 | return err; | 1286 | return err; |
| 1287 | } | 1287 | } |
| 1288 | 1288 | ||
| 1289 | static void mmc_select_driver_type(struct mmc_card *card) | ||
| 1290 | { | ||
| 1291 | int card_drv_type, drive_strength, drv_type; | ||
| 1292 | |||
| 1293 | card_drv_type = card->ext_csd.raw_driver_strength | | ||
| 1294 | mmc_driver_type_mask(0); | ||
| 1295 | |||
| 1296 | drive_strength = mmc_select_drive_strength(card, | ||
| 1297 | card->ext_csd.hs200_max_dtr, | ||
| 1298 | card_drv_type, &drv_type); | ||
| 1299 | |||
| 1300 | card->drive_strength = drive_strength; | ||
| 1301 | |||
| 1302 | if (drv_type) | ||
| 1303 | mmc_set_driver_type(card->host, drv_type); | ||
| 1304 | } | ||
| 1305 | |||
| 1289 | static int mmc_select_hs400es(struct mmc_card *card) | 1306 | static int mmc_select_hs400es(struct mmc_card *card) |
| 1290 | { | 1307 | { |
| 1291 | struct mmc_host *host = card->host; | 1308 | struct mmc_host *host = card->host; |
| @@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card) | |||
| 1341 | goto out_err; | 1358 | goto out_err; |
| 1342 | } | 1359 | } |
| 1343 | 1360 | ||
| 1361 | mmc_select_driver_type(card); | ||
| 1362 | |||
| 1344 | /* Switch card to HS400 */ | 1363 | /* Switch card to HS400 */ |
| 1345 | val = EXT_CSD_TIMING_HS400 | | 1364 | val = EXT_CSD_TIMING_HS400 | |
| 1346 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; | 1365 | card->drive_strength << EXT_CSD_DRV_STR_SHIFT; |
| @@ -1374,23 +1393,6 @@ out_err: | |||
| 1374 | return err; | 1393 | return err; |
| 1375 | } | 1394 | } |
| 1376 | 1395 | ||
| 1377 | static void mmc_select_driver_type(struct mmc_card *card) | ||
| 1378 | { | ||
| 1379 | int card_drv_type, drive_strength, drv_type; | ||
| 1380 | |||
| 1381 | card_drv_type = card->ext_csd.raw_driver_strength | | ||
| 1382 | mmc_driver_type_mask(0); | ||
| 1383 | |||
| 1384 | drive_strength = mmc_select_drive_strength(card, | ||
| 1385 | card->ext_csd.hs200_max_dtr, | ||
| 1386 | card_drv_type, &drv_type); | ||
| 1387 | |||
| 1388 | card->drive_strength = drive_strength; | ||
| 1389 | |||
| 1390 | if (drv_type) | ||
| 1391 | mmc_set_driver_type(card->host, drv_type); | ||
| 1392 | } | ||
| 1393 | |||
| 1394 | /* | 1396 | /* |
| 1395 | * For device supporting HS200 mode, the following sequence | 1397 | * For device supporting HS200 mode, the following sequence |
| 1396 | * should be done before executing the tuning process. | 1398 | * should be done before executing the tuning process. |
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 74c663b1c0a7..0a4e77a5ba33 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c | |||
| @@ -23,8 +23,6 @@ | |||
| 23 | #include "core.h" | 23 | #include "core.h" |
| 24 | #include "card.h" | 24 | #include "card.h" |
| 25 | 25 | ||
| 26 | #define MMC_QUEUE_BOUNCESZ 65536 | ||
| 27 | |||
| 28 | /* | 26 | /* |
| 29 | * Prepare a MMC request. This just filters out odd stuff. | 27 | * Prepare a MMC request. This just filters out odd stuff. |
| 30 | */ | 28 | */ |
| @@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q, | |||
| 150 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); | 148 | queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); |
| 151 | } | 149 | } |
| 152 | 150 | ||
| 153 | static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host) | ||
| 154 | { | ||
| 155 | unsigned int bouncesz = MMC_QUEUE_BOUNCESZ; | ||
| 156 | |||
| 157 | if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF)) | ||
| 158 | return 0; | ||
| 159 | |||
| 160 | if (bouncesz > host->max_req_size) | ||
| 161 | bouncesz = host->max_req_size; | ||
| 162 | if (bouncesz > host->max_seg_size) | ||
| 163 | bouncesz = host->max_seg_size; | ||
| 164 | if (bouncesz > host->max_blk_count * 512) | ||
| 165 | bouncesz = host->max_blk_count * 512; | ||
| 166 | |||
| 167 | if (bouncesz <= 512) | ||
| 168 | return 0; | ||
| 169 | |||
| 170 | return bouncesz; | ||
| 171 | } | ||
| 172 | |||
| 173 | /** | 151 | /** |
| 174 | * mmc_init_request() - initialize the MMC-specific per-request data | 152 | * mmc_init_request() - initialize the MMC-specific per-request data |
| 175 | * @q: the request queue | 153 | * @q: the request queue |
| @@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req, | |||
| 184 | struct mmc_card *card = mq->card; | 162 | struct mmc_card *card = mq->card; |
| 185 | struct mmc_host *host = card->host; | 163 | struct mmc_host *host = card->host; |
| 186 | 164 | ||
| 187 | if (card->bouncesz) { | 165 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); |
| 188 | mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); | 166 | if (!mq_rq->sg) |
| 189 | if (!mq_rq->bounce_buf) | 167 | return -ENOMEM; |
| 190 | return -ENOMEM; | ||
| 191 | if (card->bouncesz > 512) { | ||
| 192 | mq_rq->sg = mmc_alloc_sg(1, gfp); | ||
| 193 | if (!mq_rq->sg) | ||
| 194 | return -ENOMEM; | ||
| 195 | mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512, | ||
| 196 | gfp); | ||
| 197 | if (!mq_rq->bounce_sg) | ||
| 198 | return -ENOMEM; | ||
| 199 | } | ||
| 200 | } else { | ||
| 201 | mq_rq->bounce_buf = NULL; | ||
| 202 | mq_rq->bounce_sg = NULL; | ||
| 203 | mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp); | ||
| 204 | if (!mq_rq->sg) | ||
| 205 | return -ENOMEM; | ||
| 206 | } | ||
| 207 | 168 | ||
| 208 | return 0; | 169 | return 0; |
| 209 | } | 170 | } |
| @@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req) | |||
| 212 | { | 173 | { |
| 213 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); | 174 | struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); |
| 214 | 175 | ||
| 215 | /* It is OK to kfree(NULL) so this will be smooth */ | ||
| 216 | kfree(mq_rq->bounce_sg); | ||
| 217 | mq_rq->bounce_sg = NULL; | ||
| 218 | |||
| 219 | kfree(mq_rq->bounce_buf); | ||
| 220 | mq_rq->bounce_buf = NULL; | ||
| 221 | |||
| 222 | kfree(mq_rq->sg); | 176 | kfree(mq_rq->sg); |
| 223 | mq_rq->sg = NULL; | 177 | mq_rq->sg = NULL; |
| 224 | } | 178 | } |
| @@ -242,12 +196,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
| 242 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) | 196 | if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) |
| 243 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; | 197 | limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; |
| 244 | 198 | ||
| 245 | /* | ||
| 246 | * mmc_init_request() depends on card->bouncesz so it must be calculated | ||
| 247 | * before blk_init_allocated_queue() starts allocating requests. | ||
| 248 | */ | ||
| 249 | card->bouncesz = mmc_queue_calc_bouncesz(host); | ||
| 250 | |||
| 251 | mq->card = card; | 199 | mq->card = card; |
| 252 | mq->queue = blk_alloc_queue(GFP_KERNEL); | 200 | mq->queue = blk_alloc_queue(GFP_KERNEL); |
| 253 | if (!mq->queue) | 201 | if (!mq->queue) |
| @@ -271,17 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, | |||
| 271 | if (mmc_can_erase(card)) | 219 | if (mmc_can_erase(card)) |
| 272 | mmc_queue_setup_discard(mq->queue, card); | 220 | mmc_queue_setup_discard(mq->queue, card); |
| 273 | 221 | ||
| 274 | if (card->bouncesz) { | 222 | blk_queue_bounce_limit(mq->queue, limit); |
| 275 | blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); | 223 | blk_queue_max_hw_sectors(mq->queue, |
| 276 | blk_queue_max_segments(mq->queue, card->bouncesz / 512); | 224 | min(host->max_blk_count, host->max_req_size / 512)); |
| 277 | blk_queue_max_segment_size(mq->queue, card->bouncesz); | 225 | blk_queue_max_segments(mq->queue, host->max_segs); |
| 278 | } else { | 226 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
| 279 | blk_queue_bounce_limit(mq->queue, limit); | ||
| 280 | blk_queue_max_hw_sectors(mq->queue, | ||
| 281 | min(host->max_blk_count, host->max_req_size / 512)); | ||
| 282 | blk_queue_max_segments(mq->queue, host->max_segs); | ||
| 283 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | ||
| 284 | } | ||
| 285 | 227 | ||
| 286 | sema_init(&mq->thread_sem, 1); | 228 | sema_init(&mq->thread_sem, 1); |
| 287 | 229 | ||
| @@ -370,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq) | |||
| 370 | */ | 312 | */ |
| 371 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) | 313 | unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) |
| 372 | { | 314 | { |
| 373 | unsigned int sg_len; | ||
| 374 | size_t buflen; | ||
| 375 | struct scatterlist *sg; | ||
| 376 | struct request *req = mmc_queue_req_to_req(mqrq); | 315 | struct request *req = mmc_queue_req_to_req(mqrq); |
| 377 | int i; | ||
| 378 | |||
| 379 | if (!mqrq->bounce_buf) | ||
| 380 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); | ||
| 381 | |||
| 382 | sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg); | ||
| 383 | |||
| 384 | mqrq->bounce_sg_len = sg_len; | ||
| 385 | |||
| 386 | buflen = 0; | ||
| 387 | for_each_sg(mqrq->bounce_sg, sg, sg_len, i) | ||
| 388 | buflen += sg->length; | ||
| 389 | |||
| 390 | sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); | ||
| 391 | |||
| 392 | return 1; | ||
| 393 | } | ||
| 394 | |||
| 395 | /* | ||
| 396 | * If writing, bounce the data to the buffer before the request | ||
| 397 | * is sent to the host driver | ||
| 398 | */ | ||
| 399 | void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) | ||
| 400 | { | ||
| 401 | if (!mqrq->bounce_buf) | ||
| 402 | return; | ||
| 403 | |||
| 404 | if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE) | ||
| 405 | return; | ||
| 406 | |||
| 407 | sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, | ||
| 408 | mqrq->bounce_buf, mqrq->sg[0].length); | ||
| 409 | } | ||
| 410 | |||
| 411 | /* | ||
| 412 | * If reading, bounce the data from the buffer after the request | ||
| 413 | * has been handled by the host driver | ||
| 414 | */ | ||
| 415 | void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) | ||
| 416 | { | ||
| 417 | if (!mqrq->bounce_buf) | ||
| 418 | return; | ||
| 419 | |||
| 420 | if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ) | ||
| 421 | return; | ||
| 422 | 316 | ||
| 423 | sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, | 317 | return blk_rq_map_sg(mq->queue, req, mqrq->sg); |
| 424 | mqrq->bounce_buf, mqrq->sg[0].length); | ||
| 425 | } | 318 | } |
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 04fc89360a7a..f18d3f656baa 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h | |||
| @@ -49,9 +49,6 @@ enum mmc_drv_op { | |||
| 49 | struct mmc_queue_req { | 49 | struct mmc_queue_req { |
| 50 | struct mmc_blk_request brq; | 50 | struct mmc_blk_request brq; |
| 51 | struct scatterlist *sg; | 51 | struct scatterlist *sg; |
| 52 | char *bounce_buf; | ||
| 53 | struct scatterlist *bounce_sg; | ||
| 54 | unsigned int bounce_sg_len; | ||
| 55 | struct mmc_async_req areq; | 52 | struct mmc_async_req areq; |
| 56 | enum mmc_drv_op drv_op; | 53 | enum mmc_drv_op drv_op; |
| 57 | int drv_op_result; | 54 | int drv_op_result; |
| @@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, | |||
| 81 | extern void mmc_cleanup_queue(struct mmc_queue *); | 78 | extern void mmc_cleanup_queue(struct mmc_queue *); |
| 82 | extern void mmc_queue_suspend(struct mmc_queue *); | 79 | extern void mmc_queue_suspend(struct mmc_queue *); |
| 83 | extern void mmc_queue_resume(struct mmc_queue *); | 80 | extern void mmc_queue_resume(struct mmc_queue *); |
| 84 | |||
| 85 | extern unsigned int mmc_queue_map_sg(struct mmc_queue *, | 81 | extern unsigned int mmc_queue_map_sg(struct mmc_queue *, |
| 86 | struct mmc_queue_req *); | 82 | struct mmc_queue_req *); |
| 87 | extern void mmc_queue_bounce_pre(struct mmc_queue_req *); | ||
| 88 | extern void mmc_queue_bounce_post(struct mmc_queue_req *); | ||
| 89 | 83 | ||
| 90 | extern int mmc_access_rpmb(struct mmc_queue *); | 84 | extern int mmc_access_rpmb(struct mmc_queue *); |
| 91 | 85 | ||
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c index 27fb625cbcf3..fbd29f00fca0 100644 --- a/drivers/mmc/host/cavium.c +++ b/drivers/mmc/host/cavium.c | |||
| @@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host) | |||
| 1038 | */ | 1038 | */ |
| 1039 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | | 1039 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | |
| 1040 | MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | | 1040 | MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | |
| 1041 | MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF; | 1041 | MMC_CAP_3_3V_DDR; |
| 1042 | 1042 | ||
| 1043 | if (host->use_sg) | 1043 | if (host->use_sg) |
| 1044 | mmc->max_segs = 16; | 1044 | mmc->max_segs = 16; |
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index c885c2d4b904..85745ef179e2 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c | |||
| @@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host) | |||
| 531 | div->shift = __ffs(CLK_DIV_MASK); | 531 | div->shift = __ffs(CLK_DIV_MASK); |
| 532 | div->width = __builtin_popcountl(CLK_DIV_MASK); | 532 | div->width = __builtin_popcountl(CLK_DIV_MASK); |
| 533 | div->hw.init = &init; | 533 | div->hw.init = &init; |
| 534 | div->flags = (CLK_DIVIDER_ONE_BASED | | 534 | div->flags = CLK_DIVIDER_ONE_BASED; |
| 535 | CLK_DIVIDER_ROUND_CLOSEST); | ||
| 536 | 535 | ||
| 537 | clk = devm_clk_register(host->dev, &div->hw); | 536 | clk = devm_clk_register(host->dev, &div->hw); |
| 538 | if (WARN_ON(IS_ERR(clk))) | 537 | if (WARN_ON(IS_ERR(clk))) |
| @@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode, | |||
| 717 | static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) | 716 | static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) |
| 718 | { | 717 | { |
| 719 | struct meson_host *host = mmc_priv(mmc); | 718 | struct meson_host *host = mmc_priv(mmc); |
| 719 | int ret; | ||
| 720 | |||
| 721 | /* | ||
| 722 | * If this is the initial tuning, try to get a sane Rx starting | ||
| 723 | * phase before doing the actual tuning. | ||
| 724 | */ | ||
| 725 | if (!mmc->doing_retune) { | ||
| 726 | ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); | ||
| 727 | |||
| 728 | if (ret) | ||
| 729 | return ret; | ||
| 730 | } | ||
| 731 | |||
| 732 | ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk); | ||
| 733 | if (ret) | ||
| 734 | return ret; | ||
| 720 | 735 | ||
| 721 | return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); | 736 | return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); |
| 722 | } | 737 | } |
| @@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 746 | case MMC_POWER_UP: | 761 | case MMC_POWER_UP: |
| 747 | if (!IS_ERR(mmc->supply.vmmc)) | 762 | if (!IS_ERR(mmc->supply.vmmc)) |
| 748 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); | 763 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); |
| 764 | |||
| 765 | /* Reset phases */ | ||
| 766 | clk_set_phase(host->rx_clk, 0); | ||
| 767 | clk_set_phase(host->tx_clk, 270); | ||
| 768 | |||
| 749 | break; | 769 | break; |
| 750 | 770 | ||
| 751 | case MMC_POWER_ON: | 771 | case MMC_POWER_ON: |
| @@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
| 759 | host->vqmmc_enabled = true; | 779 | host->vqmmc_enabled = true; |
| 760 | } | 780 | } |
| 761 | 781 | ||
| 762 | /* Reset rx phase */ | ||
| 763 | clk_set_phase(host->rx_clk, 0); | ||
| 764 | break; | 782 | break; |
| 765 | } | 783 | } |
| 766 | 784 | ||
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 59ab194cb009..c763b404510f 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c | |||
| @@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev) | |||
| 702 | 702 | ||
| 703 | pxamci_init_ocr(host); | 703 | pxamci_init_ocr(host); |
| 704 | 704 | ||
| 705 | /* | 705 | mmc->caps = 0; |
| 706 | * This architecture used to disable bounce buffers through its | ||
| 707 | * defconfig, now it is done at runtime as a host property. | ||
| 708 | */ | ||
| 709 | mmc->caps = MMC_CAP_NO_BOUNCE_BUFF; | ||
| 710 | host->cmdat = 0; | 706 | host->cmdat = 0; |
| 711 | if (!cpu_is_pxa25x()) { | 707 | if (!cpu_is_pxa25x()) { |
| 712 | mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; | 708 | mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; |
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index 2eec2e652c53..0842bbc2d7ad 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c | |||
| @@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev) | |||
| 466 | { | 466 | { |
| 467 | struct sdhci_pltfm_host *pltfm_host; | 467 | struct sdhci_pltfm_host *pltfm_host; |
| 468 | struct sdhci_host *host; | 468 | struct sdhci_host *host; |
| 469 | struct xenon_priv *priv; | ||
| 469 | int err; | 470 | int err; |
| 470 | 471 | ||
| 471 | host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata, | 472 | host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata, |
| @@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev) | |||
| 474 | return PTR_ERR(host); | 475 | return PTR_ERR(host); |
| 475 | 476 | ||
| 476 | pltfm_host = sdhci_priv(host); | 477 | pltfm_host = sdhci_priv(host); |
| 478 | priv = sdhci_pltfm_priv(pltfm_host); | ||
| 477 | 479 | ||
| 478 | /* | 480 | /* |
| 479 | * Link Xenon specific mmc_host_ops function, | 481 | * Link Xenon specific mmc_host_ops function, |
| @@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev) | |||
| 491 | if (err) | 493 | if (err) |
| 492 | goto free_pltfm; | 494 | goto free_pltfm; |
| 493 | 495 | ||
| 496 | priv->axi_clk = devm_clk_get(&pdev->dev, "axi"); | ||
| 497 | if (IS_ERR(priv->axi_clk)) { | ||
| 498 | err = PTR_ERR(priv->axi_clk); | ||
| 499 | if (err == -EPROBE_DEFER) | ||
| 500 | goto err_clk; | ||
| 501 | } else { | ||
| 502 | err = clk_prepare_enable(priv->axi_clk); | ||
| 503 | if (err) | ||
| 504 | goto err_clk; | ||
| 505 | } | ||
| 506 | |||
| 494 | err = mmc_of_parse(host->mmc); | 507 | err = mmc_of_parse(host->mmc); |
| 495 | if (err) | 508 | if (err) |
| 496 | goto err_clk; | 509 | goto err_clk_axi; |
| 497 | 510 | ||
| 498 | sdhci_get_of_property(pdev); | 511 | sdhci_get_of_property(pdev); |
| 499 | 512 | ||
| @@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev) | |||
| 502 | /* Xenon specific dt parse */ | 515 | /* Xenon specific dt parse */ |
| 503 | err = xenon_probe_dt(pdev); | 516 | err = xenon_probe_dt(pdev); |
| 504 | if (err) | 517 | if (err) |
| 505 | goto err_clk; | 518 | goto err_clk_axi; |
| 506 | 519 | ||
| 507 | err = xenon_sdhc_prepare(host); | 520 | err = xenon_sdhc_prepare(host); |
| 508 | if (err) | 521 | if (err) |
| 509 | goto err_clk; | 522 | goto err_clk_axi; |
| 510 | 523 | ||
| 511 | pm_runtime_get_noresume(&pdev->dev); | 524 | pm_runtime_get_noresume(&pdev->dev); |
| 512 | pm_runtime_set_active(&pdev->dev); | 525 | pm_runtime_set_active(&pdev->dev); |
| @@ -527,6 +540,8 @@ remove_sdhc: | |||
| 527 | pm_runtime_disable(&pdev->dev); | 540 | pm_runtime_disable(&pdev->dev); |
| 528 | pm_runtime_put_noidle(&pdev->dev); | 541 | pm_runtime_put_noidle(&pdev->dev); |
| 529 | xenon_sdhc_unprepare(host); | 542 | xenon_sdhc_unprepare(host); |
| 543 | err_clk_axi: | ||
| 544 | clk_disable_unprepare(priv->axi_clk); | ||
| 530 | err_clk: | 545 | err_clk: |
| 531 | clk_disable_unprepare(pltfm_host->clk); | 546 | clk_disable_unprepare(pltfm_host->clk); |
| 532 | free_pltfm: | 547 | free_pltfm: |
| @@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev) | |||
| 538 | { | 553 | { |
| 539 | struct sdhci_host *host = platform_get_drvdata(pdev); | 554 | struct sdhci_host *host = platform_get_drvdata(pdev); |
| 540 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 555 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
| 556 | struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host); | ||
| 541 | 557 | ||
| 542 | pm_runtime_get_sync(&pdev->dev); | 558 | pm_runtime_get_sync(&pdev->dev); |
| 543 | pm_runtime_disable(&pdev->dev); | 559 | pm_runtime_disable(&pdev->dev); |
| @@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev) | |||
| 546 | sdhci_remove_host(host, 0); | 562 | sdhci_remove_host(host, 0); |
| 547 | 563 | ||
| 548 | xenon_sdhc_unprepare(host); | 564 | xenon_sdhc_unprepare(host); |
| 549 | 565 | clk_disable_unprepare(priv->axi_clk); | |
| 550 | clk_disable_unprepare(pltfm_host->clk); | 566 | clk_disable_unprepare(pltfm_host->clk); |
| 551 | 567 | ||
| 552 | sdhci_pltfm_free(pdev); | 568 | sdhci_pltfm_free(pdev); |
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h index 2bc0510c0769..9994995c7c56 100644 --- a/drivers/mmc/host/sdhci-xenon.h +++ b/drivers/mmc/host/sdhci-xenon.h | |||
| @@ -83,6 +83,7 @@ struct xenon_priv { | |||
| 83 | unsigned char bus_width; | 83 | unsigned char bus_width; |
| 84 | unsigned char timing; | 84 | unsigned char timing; |
| 85 | unsigned int clock; | 85 | unsigned int clock; |
| 86 | struct clk *axi_clk; | ||
| 86 | 87 | ||
| 87 | int phy_type; | 88 | int phy_type; |
| 88 | /* | 89 | /* |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index c6678aa9b4ef..d74c7335c512 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
| @@ -1100,6 +1100,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port, | |||
| 1100 | }; | 1100 | }; |
| 1101 | int i, err; | 1101 | int i, err; |
| 1102 | 1102 | ||
| 1103 | /* DSA and CPU ports have to be members of multiple vlans */ | ||
| 1104 | if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) | ||
| 1105 | return 0; | ||
| 1106 | |||
| 1103 | if (!vid_begin) | 1107 | if (!vid_begin) |
| 1104 | return -EOPNOTSUPP; | 1108 | return -EOPNOTSUPP; |
| 1105 | 1109 | ||
| @@ -3947,7 +3951,9 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev) | |||
| 3947 | if (chip->irq > 0) { | 3951 | if (chip->irq > 0) { |
| 3948 | if (chip->info->g2_irqs > 0) | 3952 | if (chip->info->g2_irqs > 0) |
| 3949 | mv88e6xxx_g2_irq_free(chip); | 3953 | mv88e6xxx_g2_irq_free(chip); |
| 3954 | mutex_lock(&chip->reg_lock); | ||
| 3950 | mv88e6xxx_g1_irq_free(chip); | 3955 | mv88e6xxx_g1_irq_free(chip); |
| 3956 | mutex_unlock(&chip->reg_lock); | ||
| 3951 | } | 3957 | } |
| 3952 | } | 3958 | } |
| 3953 | 3959 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 214986436ece..0fdaaa643073 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | |||
| @@ -51,6 +51,10 @@ | |||
| 51 | 51 | ||
| 52 | #define AQ_CFG_SKB_FRAGS_MAX 32U | 52 | #define AQ_CFG_SKB_FRAGS_MAX 32U |
| 53 | 53 | ||
| 54 | /* Number of descriptors available in one ring to resume this ring queue | ||
| 55 | */ | ||
| 56 | #define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2) | ||
| 57 | |||
| 54 | #define AQ_CFG_NAPI_WEIGHT 64U | 58 | #define AQ_CFG_NAPI_WEIGHT 64U |
| 55 | 59 | ||
| 56 | #define AQ_CFG_MULTICAST_ADDRESS_MAX 32U | 60 | #define AQ_CFG_MULTICAST_ADDRESS_MAX 32U |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 6ac9e2602d6d..0a5bb4114eb4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
| @@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self) | |||
| 119 | return 0; | 119 | return 0; |
| 120 | } | 120 | } |
| 121 | 121 | ||
| 122 | static int aq_nic_update_link_status(struct aq_nic_s *self) | ||
| 123 | { | ||
| 124 | int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); | ||
| 125 | |||
| 126 | if (err) | ||
| 127 | return err; | ||
| 128 | |||
| 129 | if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) | ||
| 130 | pr_info("%s: link change old %d new %d\n", | ||
| 131 | AQ_CFG_DRV_NAME, self->link_status.mbps, | ||
| 132 | self->aq_hw->aq_link_status.mbps); | ||
| 133 | |||
| 134 | self->link_status = self->aq_hw->aq_link_status; | ||
| 135 | if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { | ||
| 136 | aq_utils_obj_set(&self->header.flags, | ||
| 137 | AQ_NIC_FLAG_STARTED); | ||
| 138 | aq_utils_obj_clear(&self->header.flags, | ||
| 139 | AQ_NIC_LINK_DOWN); | ||
| 140 | netif_carrier_on(self->ndev); | ||
| 141 | netif_tx_wake_all_queues(self->ndev); | ||
| 142 | } | ||
| 143 | if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) { | ||
| 144 | netif_carrier_off(self->ndev); | ||
| 145 | netif_tx_disable(self->ndev); | ||
| 146 | aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); | ||
| 147 | } | ||
| 148 | return 0; | ||
| 149 | } | ||
| 150 | |||
| 122 | static void aq_nic_service_timer_cb(unsigned long param) | 151 | static void aq_nic_service_timer_cb(unsigned long param) |
| 123 | { | 152 | { |
| 124 | struct aq_nic_s *self = (struct aq_nic_s *)param; | 153 | struct aq_nic_s *self = (struct aq_nic_s *)param; |
| @@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param) | |||
| 131 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) | 160 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) |
| 132 | goto err_exit; | 161 | goto err_exit; |
| 133 | 162 | ||
| 134 | err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); | 163 | err = aq_nic_update_link_status(self); |
| 135 | if (err < 0) | 164 | if (err) |
| 136 | goto err_exit; | 165 | goto err_exit; |
| 137 | 166 | ||
| 138 | self->link_status = self->aq_hw->aq_link_status; | ||
| 139 | |||
| 140 | self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, | 167 | self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, |
| 141 | self->aq_nic_cfg.is_interrupt_moderation); | 168 | self->aq_nic_cfg.is_interrupt_moderation); |
| 142 | 169 | ||
| 143 | if (self->link_status.mbps) { | ||
| 144 | aq_utils_obj_set(&self->header.flags, | ||
| 145 | AQ_NIC_FLAG_STARTED); | ||
| 146 | aq_utils_obj_clear(&self->header.flags, | ||
| 147 | AQ_NIC_LINK_DOWN); | ||
| 148 | netif_carrier_on(self->ndev); | ||
| 149 | } else { | ||
| 150 | netif_carrier_off(self->ndev); | ||
| 151 | aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN); | ||
| 152 | } | ||
| 153 | |||
| 154 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); | 170 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); |
| 155 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); | 171 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); |
| 156 | for (i = AQ_DIMOF(self->aq_vec); i--;) { | 172 | for (i = AQ_DIMOF(self->aq_vec); i--;) { |
| @@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, | |||
| 214 | SET_NETDEV_DEV(ndev, dev); | 230 | SET_NETDEV_DEV(ndev, dev); |
| 215 | 231 | ||
| 216 | ndev->if_port = port; | 232 | ndev->if_port = port; |
| 217 | ndev->min_mtu = ETH_MIN_MTU; | ||
| 218 | self->ndev = ndev; | 233 | self->ndev = ndev; |
| 219 | 234 | ||
| 220 | self->aq_pci_func = aq_pci_func; | 235 | self->aq_pci_func = aq_pci_func; |
| @@ -241,7 +256,6 @@ err_exit: | |||
| 241 | int aq_nic_ndev_register(struct aq_nic_s *self) | 256 | int aq_nic_ndev_register(struct aq_nic_s *self) |
| 242 | { | 257 | { |
| 243 | int err = 0; | 258 | int err = 0; |
| 244 | unsigned int i = 0U; | ||
| 245 | 259 | ||
| 246 | if (!self->ndev) { | 260 | if (!self->ndev) { |
| 247 | err = -EINVAL; | 261 | err = -EINVAL; |
| @@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self) | |||
| 263 | 277 | ||
| 264 | netif_carrier_off(self->ndev); | 278 | netif_carrier_off(self->ndev); |
| 265 | 279 | ||
| 266 | for (i = AQ_CFG_VECS_MAX; i--;) | 280 | netif_tx_disable(self->ndev); |
| 267 | aq_nic_ndev_queue_stop(self, i); | ||
| 268 | 281 | ||
| 269 | err = register_netdev(self->ndev); | 282 | err = register_netdev(self->ndev); |
| 270 | if (err < 0) | 283 | if (err < 0) |
| @@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self) | |||
| 283 | self->ndev->features = aq_hw_caps->hw_features; | 296 | self->ndev->features = aq_hw_caps->hw_features; |
| 284 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; | 297 | self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; |
| 285 | self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; | 298 | self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; |
| 299 | self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN; | ||
| 286 | 300 | ||
| 287 | return 0; | 301 | return 0; |
| 288 | } | 302 | } |
| @@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) | |||
| 318 | err = -EINVAL; | 332 | err = -EINVAL; |
| 319 | goto err_exit; | 333 | goto err_exit; |
| 320 | } | 334 | } |
| 321 | if (netif_running(ndev)) { | 335 | if (netif_running(ndev)) |
| 322 | unsigned int i; | 336 | netif_tx_disable(ndev); |
| 323 | |||
| 324 | for (i = AQ_CFG_VECS_MAX; i--;) | ||
| 325 | netif_stop_subqueue(ndev, i); | ||
| 326 | } | ||
| 327 | 337 | ||
| 328 | for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; | 338 | for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; |
| 329 | self->aq_vecs++) { | 339 | self->aq_vecs++) { |
| @@ -383,16 +393,6 @@ err_exit: | |||
| 383 | return err; | 393 | return err; |
| 384 | } | 394 | } |
| 385 | 395 | ||
| 386 | void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx) | ||
| 387 | { | ||
| 388 | netif_start_subqueue(self->ndev, idx); | ||
| 389 | } | ||
| 390 | |||
| 391 | void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx) | ||
| 392 | { | ||
| 393 | netif_stop_subqueue(self->ndev, idx); | ||
| 394 | } | ||
| 395 | |||
| 396 | int aq_nic_start(struct aq_nic_s *self) | 396 | int aq_nic_start(struct aq_nic_s *self) |
| 397 | { | 397 | { |
| 398 | struct aq_vec_s *aq_vec = NULL; | 398 | struct aq_vec_s *aq_vec = NULL; |
| @@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self) | |||
| 451 | goto err_exit; | 451 | goto err_exit; |
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | for (i = 0U, aq_vec = self->aq_vec[0]; | ||
| 455 | self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) | ||
| 456 | aq_nic_ndev_queue_start(self, i); | ||
| 457 | |||
| 458 | err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); | 454 | err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); |
| 459 | if (err < 0) | 455 | if (err < 0) |
| 460 | goto err_exit; | 456 | goto err_exit; |
| @@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self) | |||
| 463 | if (err < 0) | 459 | if (err < 0) |
| 464 | goto err_exit; | 460 | goto err_exit; |
| 465 | 461 | ||
| 462 | netif_tx_start_all_queues(self->ndev); | ||
| 463 | |||
| 466 | err_exit: | 464 | err_exit: |
| 467 | return err; | 465 | return err; |
| 468 | } | 466 | } |
| @@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
| 475 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; | 473 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
| 476 | unsigned int frag_count = 0U; | 474 | unsigned int frag_count = 0U; |
| 477 | unsigned int dx = ring->sw_tail; | 475 | unsigned int dx = ring->sw_tail; |
| 476 | struct aq_ring_buff_s *first = NULL; | ||
| 478 | struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; | 477 | struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; |
| 479 | 478 | ||
| 480 | if (unlikely(skb_is_gso(skb))) { | 479 | if (unlikely(skb_is_gso(skb))) { |
| @@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
| 485 | dx_buff->len_l4 = tcp_hdrlen(skb); | 484 | dx_buff->len_l4 = tcp_hdrlen(skb); |
| 486 | dx_buff->mss = skb_shinfo(skb)->gso_size; | 485 | dx_buff->mss = skb_shinfo(skb)->gso_size; |
| 487 | dx_buff->is_txc = 1U; | 486 | dx_buff->is_txc = 1U; |
| 487 | dx_buff->eop_index = 0xffffU; | ||
| 488 | 488 | ||
| 489 | dx_buff->is_ipv6 = | 489 | dx_buff->is_ipv6 = |
| 490 | (ip_hdr(skb)->version == 6) ? 1U : 0U; | 490 | (ip_hdr(skb)->version == 6) ? 1U : 0U; |
| @@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
| 504 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) | 504 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) |
| 505 | goto exit; | 505 | goto exit; |
| 506 | 506 | ||
| 507 | first = dx_buff; | ||
| 507 | dx_buff->len_pkt = skb->len; | 508 | dx_buff->len_pkt = skb->len; |
| 508 | dx_buff->is_sop = 1U; | 509 | dx_buff->is_sop = 1U; |
| 509 | dx_buff->is_mapped = 1U; | 510 | dx_buff->is_mapped = 1U; |
| @@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self, | |||
| 532 | 533 | ||
| 533 | for (; nr_frags--; ++frag_count) { | 534 | for (; nr_frags--; ++frag_count) { |
| 534 | unsigned int frag_len = 0U; | 535 | unsigned int frag_len = 0U; |
| 536 | unsigned int buff_offset = 0U; | ||
| 537 | unsigned int buff_size = 0U; | ||
| 535 | dma_addr_t frag_pa; | 538 | dma_addr_t frag_pa; |
| 536 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; | 539 | skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; |
| 537 | 540 | ||
| 538 | frag_len = skb_frag_size(frag); | 541 | frag_len = skb_frag_size(frag); |
| 539 | frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0, | ||
| 540 | frag_len, DMA_TO_DEVICE); | ||
| 541 | 542 | ||
| 542 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) | 543 | while (frag_len) { |
| 543 | goto mapping_error; | 544 | if (frag_len > AQ_CFG_TX_FRAME_MAX) |
| 545 | buff_size = AQ_CFG_TX_FRAME_MAX; | ||
| 546 | else | ||
| 547 | buff_size = frag_len; | ||
| 548 | |||
| 549 | frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), | ||
| 550 | frag, | ||
| 551 | buff_offset, | ||
| 552 | buff_size, | ||
| 553 | DMA_TO_DEVICE); | ||
| 554 | |||
| 555 | if (unlikely(dma_mapping_error(aq_nic_get_dev(self), | ||
| 556 | frag_pa))) | ||
| 557 | goto mapping_error; | ||
| 544 | 558 | ||
| 545 | while (frag_len > AQ_CFG_TX_FRAME_MAX) { | ||
| 546 | dx = aq_ring_next_dx(ring, dx); | 559 | dx = aq_ring_next_dx(ring, dx); |
| 547 | dx_buff = &ring->buff_ring[dx]; | 560 | dx_buff = &ring->buff_ring[dx]; |
| 548 | 561 | ||
| 549 | dx_buff->flags = 0U; | 562 | dx_buff->flags = 0U; |
| 550 | dx_buff->len = AQ_CFG_TX_FRAME_MAX; | 563 | dx_buff->len = buff_size; |
| 551 | dx_buff->pa = frag_pa; | 564 | dx_buff->pa = frag_pa; |
| 552 | dx_buff->is_mapped = 1U; | 565 | dx_buff->is_mapped = 1U; |
| 566 | dx_buff->eop_index = 0xffffU; | ||
| 567 | |||
| 568 | frag_len -= buff_size; | ||
| 569 | buff_offset += buff_size; | ||
| 553 | 570 | ||
| 554 | frag_len -= AQ_CFG_TX_FRAME_MAX; | ||
| 555 | frag_pa += AQ_CFG_TX_FRAME_MAX; | ||
| 556 | ++ret; | 571 | ++ret; |
| 557 | } | 572 | } |
| 558 | |||
| 559 | dx = aq_ring_next_dx(ring, dx); | ||
| 560 | dx_buff = &ring->buff_ring[dx]; | ||
| 561 | |||
| 562 | dx_buff->flags = 0U; | ||
| 563 | dx_buff->len = frag_len; | ||
| 564 | dx_buff->pa = frag_pa; | ||
| 565 | dx_buff->is_mapped = 1U; | ||
| 566 | ++ret; | ||
| 567 | } | 573 | } |
| 568 | 574 | ||
| 575 | first->eop_index = dx; | ||
| 569 | dx_buff->is_eop = 1U; | 576 | dx_buff->is_eop = 1U; |
| 570 | dx_buff->skb = skb; | 577 | dx_buff->skb = skb; |
| 571 | goto exit; | 578 | goto exit; |
| @@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) | |||
| 602 | unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; | 609 | unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; |
| 603 | unsigned int tc = 0U; | 610 | unsigned int tc = 0U; |
| 604 | int err = NETDEV_TX_OK; | 611 | int err = NETDEV_TX_OK; |
| 605 | bool is_nic_in_bad_state; | ||
| 606 | 612 | ||
| 607 | frags = skb_shinfo(skb)->nr_frags + 1; | 613 | frags = skb_shinfo(skb)->nr_frags + 1; |
| 608 | 614 | ||
| @@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) | |||
| 613 | goto err_exit; | 619 | goto err_exit; |
| 614 | } | 620 | } |
| 615 | 621 | ||
| 616 | is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, | 622 | aq_ring_update_queue_state(ring); |
| 617 | AQ_NIC_FLAGS_IS_NOT_TX_READY) || | ||
| 618 | (aq_ring_avail_dx(ring) < | ||
| 619 | AQ_CFG_SKB_FRAGS_MAX); | ||
| 620 | 623 | ||
| 621 | if (is_nic_in_bad_state) { | 624 | /* Above status update may stop the queue. Check this. */ |
| 622 | aq_nic_ndev_queue_stop(self, ring->idx); | 625 | if (__netif_subqueue_stopped(self->ndev, ring->idx)) { |
| 623 | err = NETDEV_TX_BUSY; | 626 | err = NETDEV_TX_BUSY; |
| 624 | goto err_exit; | 627 | goto err_exit; |
| 625 | } | 628 | } |
| @@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb) | |||
| 631 | ring, | 634 | ring, |
| 632 | frags); | 635 | frags); |
| 633 | if (err >= 0) { | 636 | if (err >= 0) { |
| 634 | if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1) | ||
| 635 | aq_nic_ndev_queue_stop(self, ring->idx); | ||
| 636 | |||
| 637 | ++ring->stats.tx.packets; | 637 | ++ring->stats.tx.packets; |
| 638 | ring->stats.tx.bytes += skb->len; | 638 | ring->stats.tx.bytes += skb->len; |
| 639 | } | 639 | } |
| @@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) | |||
| 693 | 693 | ||
| 694 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) | 694 | int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) |
| 695 | { | 695 | { |
| 696 | int err = 0; | ||
| 697 | |||
| 698 | if (new_mtu > self->aq_hw_caps.mtu) { | ||
| 699 | err = -EINVAL; | ||
| 700 | goto err_exit; | ||
| 701 | } | ||
| 702 | self->aq_nic_cfg.mtu = new_mtu; | 696 | self->aq_nic_cfg.mtu = new_mtu; |
| 703 | 697 | ||
| 704 | err_exit: | 698 | return 0; |
| 705 | return err; | ||
| 706 | } | 699 | } |
| 707 | 700 | ||
| 708 | int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) | 701 | int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) |
| @@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self) | |||
| 905 | struct aq_vec_s *aq_vec = NULL; | 898 | struct aq_vec_s *aq_vec = NULL; |
| 906 | unsigned int i = 0U; | 899 | unsigned int i = 0U; |
| 907 | 900 | ||
| 908 | for (i = 0U, aq_vec = self->aq_vec[0]; | 901 | netif_tx_disable(self->ndev); |
| 909 | self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) | ||
| 910 | aq_nic_ndev_queue_stop(self, i); | ||
| 911 | 902 | ||
| 912 | del_timer_sync(&self->service_timer); | 903 | del_timer_sync(&self->service_timer); |
| 913 | 904 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 7fc2a5ecb2b7..0ddd556ff901 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h | |||
| @@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self); | |||
| 83 | int aq_nic_init(struct aq_nic_s *self); | 83 | int aq_nic_init(struct aq_nic_s *self); |
| 84 | int aq_nic_cfg_start(struct aq_nic_s *self); | 84 | int aq_nic_cfg_start(struct aq_nic_s *self); |
| 85 | int aq_nic_ndev_register(struct aq_nic_s *self); | 85 | int aq_nic_ndev_register(struct aq_nic_s *self); |
| 86 | void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx); | ||
| 87 | void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx); | ||
| 88 | void aq_nic_ndev_free(struct aq_nic_s *self); | 86 | void aq_nic_ndev_free(struct aq_nic_s *self); |
| 89 | int aq_nic_start(struct aq_nic_s *self); | 87 | int aq_nic_start(struct aq_nic_s *self); |
| 90 | int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); | 88 | int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 4eee1996a825..0654e0c76bc2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c | |||
| @@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self) | |||
| 104 | return 0; | 104 | return 0; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i, | ||
| 108 | unsigned int t) | ||
| 109 | { | ||
| 110 | return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); | ||
| 111 | } | ||
| 112 | |||
| 113 | void aq_ring_update_queue_state(struct aq_ring_s *ring) | ||
| 114 | { | ||
| 115 | if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX) | ||
| 116 | aq_ring_queue_stop(ring); | ||
| 117 | else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES) | ||
| 118 | aq_ring_queue_wake(ring); | ||
| 119 | } | ||
| 120 | |||
| 121 | void aq_ring_queue_wake(struct aq_ring_s *ring) | ||
| 122 | { | ||
| 123 | struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); | ||
| 124 | |||
| 125 | if (__netif_subqueue_stopped(ndev, ring->idx)) { | ||
| 126 | netif_wake_subqueue(ndev, ring->idx); | ||
| 127 | ring->stats.tx.queue_restarts++; | ||
| 128 | } | ||
| 129 | } | ||
| 130 | |||
| 131 | void aq_ring_queue_stop(struct aq_ring_s *ring) | ||
| 132 | { | ||
| 133 | struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic); | ||
| 134 | |||
| 135 | if (!__netif_subqueue_stopped(ndev, ring->idx)) | ||
| 136 | netif_stop_subqueue(ndev, ring->idx); | ||
| 137 | } | ||
| 138 | |||
| 107 | void aq_ring_tx_clean(struct aq_ring_s *self) | 139 | void aq_ring_tx_clean(struct aq_ring_s *self) |
| 108 | { | 140 | { |
| 109 | struct device *dev = aq_nic_get_dev(self->aq_nic); | 141 | struct device *dev = aq_nic_get_dev(self->aq_nic); |
| @@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self) | |||
| 113 | struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; | 145 | struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; |
| 114 | 146 | ||
| 115 | if (likely(buff->is_mapped)) { | 147 | if (likely(buff->is_mapped)) { |
| 116 | if (unlikely(buff->is_sop)) | 148 | if (unlikely(buff->is_sop)) { |
| 149 | if (!buff->is_eop && | ||
| 150 | buff->eop_index != 0xffffU && | ||
| 151 | (!aq_ring_dx_in_range(self->sw_head, | ||
| 152 | buff->eop_index, | ||
| 153 | self->hw_head))) | ||
| 154 | break; | ||
| 155 | |||
| 117 | dma_unmap_single(dev, buff->pa, buff->len, | 156 | dma_unmap_single(dev, buff->pa, buff->len, |
| 118 | DMA_TO_DEVICE); | 157 | DMA_TO_DEVICE); |
| 119 | else | 158 | } else { |
| 120 | dma_unmap_page(dev, buff->pa, buff->len, | 159 | dma_unmap_page(dev, buff->pa, buff->len, |
| 121 | DMA_TO_DEVICE); | 160 | DMA_TO_DEVICE); |
| 161 | } | ||
| 122 | } | 162 | } |
| 123 | 163 | ||
| 124 | if (unlikely(buff->is_eop)) | 164 | if (unlikely(buff->is_eop)) |
| 125 | dev_kfree_skb_any(buff->skb); | 165 | dev_kfree_skb_any(buff->skb); |
| 126 | } | ||
| 127 | } | ||
| 128 | 166 | ||
| 129 | static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, | 167 | buff->pa = 0U; |
| 130 | unsigned int t) | 168 | buff->eop_index = 0xffffU; |
| 131 | { | 169 | } |
| 132 | return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t)); | ||
| 133 | } | 170 | } |
| 134 | 171 | ||
| 135 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) | 172 | #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h index 782176c5f4f8..5844078764bd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h | |||
| @@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s { | |||
| 65 | }; | 65 | }; |
| 66 | union { | 66 | union { |
| 67 | struct { | 67 | struct { |
| 68 | u32 len:16; | 68 | u16 len; |
| 69 | u32 is_ip_cso:1; | 69 | u32 is_ip_cso:1; |
| 70 | u32 is_udp_cso:1; | 70 | u32 is_udp_cso:1; |
| 71 | u32 is_tcp_cso:1; | 71 | u32 is_tcp_cso:1; |
| @@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s { | |||
| 77 | u32 is_cleaned:1; | 77 | u32 is_cleaned:1; |
| 78 | u32 is_error:1; | 78 | u32 is_error:1; |
| 79 | u32 rsvd3:6; | 79 | u32 rsvd3:6; |
| 80 | u16 eop_index; | ||
| 81 | u16 rsvd4; | ||
| 80 | }; | 82 | }; |
| 81 | u32 flags; | 83 | u64 flags; |
| 82 | }; | 84 | }; |
| 83 | }; | 85 | }; |
| 84 | 86 | ||
| @@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s { | |||
| 94 | u64 errors; | 96 | u64 errors; |
| 95 | u64 packets; | 97 | u64 packets; |
| 96 | u64 bytes; | 98 | u64 bytes; |
| 99 | u64 queue_restarts; | ||
| 97 | }; | 100 | }; |
| 98 | 101 | ||
| 99 | union aq_ring_stats_s { | 102 | union aq_ring_stats_s { |
| @@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self, | |||
| 147 | int aq_ring_init(struct aq_ring_s *self); | 150 | int aq_ring_init(struct aq_ring_s *self); |
| 148 | void aq_ring_rx_deinit(struct aq_ring_s *self); | 151 | void aq_ring_rx_deinit(struct aq_ring_s *self); |
| 149 | void aq_ring_free(struct aq_ring_s *self); | 152 | void aq_ring_free(struct aq_ring_s *self); |
| 153 | void aq_ring_update_queue_state(struct aq_ring_s *ring); | ||
| 154 | void aq_ring_queue_wake(struct aq_ring_s *ring); | ||
| 155 | void aq_ring_queue_stop(struct aq_ring_s *ring); | ||
| 150 | void aq_ring_tx_clean(struct aq_ring_s *self); | 156 | void aq_ring_tx_clean(struct aq_ring_s *self); |
| 151 | int aq_ring_rx_clean(struct aq_ring_s *self, | 157 | int aq_ring_rx_clean(struct aq_ring_s *self, |
| 152 | struct napi_struct *napi, | 158 | struct napi_struct *napi, |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index ebf588004c46..305ff8ffac2c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c | |||
| @@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) | |||
| 59 | if (ring[AQ_VEC_TX_ID].sw_head != | 59 | if (ring[AQ_VEC_TX_ID].sw_head != |
| 60 | ring[AQ_VEC_TX_ID].hw_head) { | 60 | ring[AQ_VEC_TX_ID].hw_head) { |
| 61 | aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); | 61 | aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); |
| 62 | 62 | aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); | |
| 63 | if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) > | ||
| 64 | AQ_CFG_SKB_FRAGS_MAX) { | ||
| 65 | aq_nic_ndev_queue_start(self->aq_nic, | ||
| 66 | ring[AQ_VEC_TX_ID].idx); | ||
| 67 | } | ||
| 68 | was_tx_cleaned = true; | 63 | was_tx_cleaned = true; |
| 69 | } | 64 | } |
| 70 | 65 | ||
| @@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self, | |||
| 364 | stats_tx->packets += tx->packets; | 359 | stats_tx->packets += tx->packets; |
| 365 | stats_tx->bytes += tx->bytes; | 360 | stats_tx->bytes += tx->bytes; |
| 366 | stats_tx->errors += tx->errors; | 361 | stats_tx->errors += tx->errors; |
| 362 | stats_tx->queue_restarts += tx->queue_restarts; | ||
| 367 | } | 363 | } |
| 368 | } | 364 | } |
| 369 | 365 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index f3957e930340..fcf89e25a773 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h | |||
| @@ -16,7 +16,7 @@ | |||
| 16 | 16 | ||
| 17 | #include "../aq_common.h" | 17 | #include "../aq_common.h" |
| 18 | 18 | ||
| 19 | #define HW_ATL_B0_MTU_JUMBO (16000U) | 19 | #define HW_ATL_B0_MTU_JUMBO 16352U |
| 20 | #define HW_ATL_B0_MTU 1514U | 20 | #define HW_ATL_B0_MTU 1514U |
| 21 | 21 | ||
| 22 | #define HW_ATL_B0_TX_RINGS 4U | 22 | #define HW_ATL_B0_TX_RINGS 4U |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 4f5ec9a0fbfb..bf734b32e44b 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c | |||
| @@ -351,8 +351,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self) | |||
| 351 | break; | 351 | break; |
| 352 | 352 | ||
| 353 | default: | 353 | default: |
| 354 | link_status->mbps = 0U; | 354 | return -EBUSY; |
| 355 | break; | ||
| 356 | } | 355 | } |
| 357 | } | 356 | } |
| 358 | 357 | ||
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index cec94bbb2ea5..8bc126a156e8 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
| @@ -1278,7 +1278,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev) | |||
| 1278 | 1278 | ||
| 1279 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); | 1279 | ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); |
| 1280 | if (ret) | 1280 | if (ret) |
| 1281 | return -ENOMEM; | 1281 | goto error; |
| 1282 | 1282 | ||
| 1283 | n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; | 1283 | n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; |
| 1284 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { | 1284 | for (i = 0, j = 0; i < cp->max_cid_space; i++) { |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 49b80da51ba7..805ab45e9b5a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
| @@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | |||
| 565 | return true; | 565 | return true; |
| 566 | default: | 566 | default: |
| 567 | bpf_warn_invalid_xdp_action(action); | 567 | bpf_warn_invalid_xdp_action(action); |
| 568 | /* fall through */ | ||
| 568 | case XDP_ABORTED: | 569 | case XDP_ABORTED: |
| 569 | trace_xdp_exception(nic->netdev, prog, action); | 570 | trace_xdp_exception(nic->netdev, prog, action); |
| 571 | /* fall through */ | ||
| 570 | case XDP_DROP: | 572 | case XDP_DROP: |
| 571 | /* Check if it's a recycled page, if not | 573 | /* Check if it's a recycled page, if not |
| 572 | * unmap the DMA mapping. | 574 | * unmap the DMA mapping. |
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index e0685e630afe..c1cdbfd83bdb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | |||
| @@ -2652,7 +2652,8 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev) | |||
| 2652 | dev_err(&hdev->pdev->dev, | 2652 | dev_err(&hdev->pdev->dev, |
| 2653 | "Configure rss tc size failed, invalid TC_SIZE = %d\n", | 2653 | "Configure rss tc size failed, invalid TC_SIZE = %d\n", |
| 2654 | rss_size); | 2654 | rss_size); |
| 2655 | return -EINVAL; | 2655 | ret = -EINVAL; |
| 2656 | goto err; | ||
| 2656 | } | 2657 | } |
| 2657 | 2658 | ||
| 2658 | roundup_size = roundup_pow_of_two(rss_size); | 2659 | roundup_size = roundup_pow_of_two(rss_size); |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 523f9d05a810..8a32eb7d47b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | |||
| @@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) | |||
| 175 | **/ | 175 | **/ |
| 176 | static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) | 176 | static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) |
| 177 | { | 177 | { |
| 178 | #ifndef CONFIG_SPARC | ||
| 179 | u32 regval; | ||
| 180 | u32 i; | ||
| 181 | #endif | ||
| 182 | s32 ret_val; | 178 | s32 ret_val; |
| 183 | 179 | ||
| 184 | ret_val = ixgbe_start_hw_generic(hw); | 180 | ret_val = ixgbe_start_hw_generic(hw); |
| 185 | |||
| 186 | #ifndef CONFIG_SPARC | ||
| 187 | /* Disable relaxed ordering */ | ||
| 188 | for (i = 0; ((i < hw->mac.max_tx_queues) && | ||
| 189 | (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { | ||
| 190 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); | ||
| 191 | regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; | ||
| 192 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); | ||
| 193 | } | ||
| 194 | |||
| 195 | for (i = 0; ((i < hw->mac.max_rx_queues) && | ||
| 196 | (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { | ||
| 197 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); | ||
| 198 | regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | | ||
| 199 | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); | ||
| 200 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); | ||
| 201 | } | ||
| 202 | #endif | ||
| 203 | if (ret_val) | 181 | if (ret_val) |
| 204 | return ret_val; | 182 | return ret_val; |
| 205 | 183 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 2c19070d2a0b..6e6ab6f6875e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | |||
| @@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) | |||
| 366 | } | 366 | } |
| 367 | IXGBE_WRITE_FLUSH(hw); | 367 | IXGBE_WRITE_FLUSH(hw); |
| 368 | 368 | ||
| 369 | #ifndef CONFIG_ARCH_WANT_RELAX_ORDER | ||
| 370 | /* Disable relaxed ordering */ | ||
| 371 | for (i = 0; i < hw->mac.max_tx_queues; i++) { | ||
| 372 | u32 regval; | ||
| 373 | |||
| 374 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); | ||
| 375 | regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; | ||
| 376 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); | ||
| 377 | } | ||
| 378 | |||
| 379 | for (i = 0; i < hw->mac.max_rx_queues; i++) { | ||
| 380 | u32 regval; | ||
| 381 | |||
| 382 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); | ||
| 383 | regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | | ||
| 384 | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); | ||
| 385 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); | ||
| 386 | } | ||
| 387 | #endif | ||
| 388 | return 0; | 369 | return 0; |
| 389 | } | 370 | } |
| 390 | 371 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 72c565712a5f..c3e7a8191128 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |||
| @@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
| 1048 | { | 1048 | { |
| 1049 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 1049 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| 1050 | struct ixgbe_ring *temp_ring; | 1050 | struct ixgbe_ring *temp_ring; |
| 1051 | int i, err = 0; | 1051 | int i, j, err = 0; |
| 1052 | u32 new_rx_count, new_tx_count; | 1052 | u32 new_rx_count, new_tx_count; |
| 1053 | 1053 | ||
| 1054 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | 1054 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
| @@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
| 1085 | } | 1085 | } |
| 1086 | 1086 | ||
| 1087 | /* allocate temporary buffer to store rings in */ | 1087 | /* allocate temporary buffer to store rings in */ |
| 1088 | i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); | 1088 | i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues, |
| 1089 | i = max_t(int, i, adapter->num_xdp_queues); | 1089 | adapter->num_rx_queues); |
| 1090 | temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); | 1090 | temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); |
| 1091 | 1091 | ||
| 1092 | if (!temp_ring) { | 1092 | if (!temp_ring) { |
| @@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
| 1118 | } | 1118 | } |
| 1119 | } | 1119 | } |
| 1120 | 1120 | ||
| 1121 | for (i = 0; i < adapter->num_xdp_queues; i++) { | 1121 | for (j = 0; j < adapter->num_xdp_queues; j++, i++) { |
| 1122 | memcpy(&temp_ring[i], adapter->xdp_ring[i], | 1122 | memcpy(&temp_ring[i], adapter->xdp_ring[j], |
| 1123 | sizeof(struct ixgbe_ring)); | 1123 | sizeof(struct ixgbe_ring)); |
| 1124 | 1124 | ||
| 1125 | temp_ring[i].count = new_tx_count; | 1125 | temp_ring[i].count = new_tx_count; |
| @@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
| 1139 | memcpy(adapter->tx_ring[i], &temp_ring[i], | 1139 | memcpy(adapter->tx_ring[i], &temp_ring[i], |
| 1140 | sizeof(struct ixgbe_ring)); | 1140 | sizeof(struct ixgbe_ring)); |
| 1141 | } | 1141 | } |
| 1142 | for (i = 0; i < adapter->num_xdp_queues; i++) { | 1142 | for (j = 0; j < adapter->num_xdp_queues; j++, i++) { |
| 1143 | ixgbe_free_tx_resources(adapter->xdp_ring[i]); | 1143 | ixgbe_free_tx_resources(adapter->xdp_ring[j]); |
| 1144 | 1144 | ||
| 1145 | memcpy(adapter->xdp_ring[i], &temp_ring[i], | 1145 | memcpy(adapter->xdp_ring[j], &temp_ring[i], |
| 1146 | sizeof(struct ixgbe_ring)); | 1146 | sizeof(struct ixgbe_ring)); |
| 1147 | } | 1147 | } |
| 1148 | 1148 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d962368d08d0..4d76afd13868 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
| @@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) | |||
| 4881 | IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) | 4881 | IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) |
| 4882 | return; | 4882 | return; |
| 4883 | 4883 | ||
| 4884 | vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; | 4884 | vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask; |
| 4885 | IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); | 4885 | IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); |
| 4886 | 4886 | ||
| 4887 | if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) | 4887 | if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) |
| @@ -8529,6 +8529,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) | |||
| 8529 | return ixgbe_ptp_set_ts_config(adapter, req); | 8529 | return ixgbe_ptp_set_ts_config(adapter, req); |
| 8530 | case SIOCGHWTSTAMP: | 8530 | case SIOCGHWTSTAMP: |
| 8531 | return ixgbe_ptp_get_ts_config(adapter, req); | 8531 | return ixgbe_ptp_get_ts_config(adapter, req); |
| 8532 | case SIOCGMIIPHY: | ||
| 8533 | if (!adapter->hw.phy.ops.read_reg) | ||
| 8534 | return -EOPNOTSUPP; | ||
| 8535 | /* fall through */ | ||
| 8532 | default: | 8536 | default: |
| 8533 | return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); | 8537 | return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); |
| 8534 | } | 8538 | } |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index dd0ee2691c86..9c86cb7cb988 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
| @@ -333,7 +333,7 @@ | |||
| 333 | #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) | 333 | #define MVPP2_GMAC_INBAND_AN_MASK BIT(0) |
| 334 | #define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) | 334 | #define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) |
| 335 | #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) | 335 | #define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) |
| 336 | #define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) | 336 | #define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4) |
| 337 | #define MVPP2_GMAC_DISABLE_PADDING BIT(5) | 337 | #define MVPP2_GMAC_DISABLE_PADDING BIT(5) |
| 338 | #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) | 338 | #define MVPP2_GMAC_PORT_RESET_MASK BIT(6) |
| 339 | #define MVPP2_GMAC_AUTONEG_CONFIG 0xc | 339 | #define MVPP2_GMAC_AUTONEG_CONFIG 0xc |
| @@ -676,6 +676,7 @@ enum mvpp2_tag_type { | |||
| 676 | #define MVPP2_PRS_RI_L3_MCAST BIT(15) | 676 | #define MVPP2_PRS_RI_L3_MCAST BIT(15) |
| 677 | #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) | 677 | #define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) |
| 678 | #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 | 678 | #define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 |
| 679 | #define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17) | ||
| 679 | #define MVPP2_PRS_RI_UDF3_MASK 0x300000 | 680 | #define MVPP2_PRS_RI_UDF3_MASK 0x300000 |
| 680 | #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) | 681 | #define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) |
| 681 | #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 | 682 | #define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 |
| @@ -792,6 +793,7 @@ struct mvpp2 { | |||
| 792 | struct clk *pp_clk; | 793 | struct clk *pp_clk; |
| 793 | struct clk *gop_clk; | 794 | struct clk *gop_clk; |
| 794 | struct clk *mg_clk; | 795 | struct clk *mg_clk; |
| 796 | struct clk *axi_clk; | ||
| 795 | 797 | ||
| 796 | /* List of pointers to port structures */ | 798 | /* List of pointers to port structures */ |
| 797 | struct mvpp2_port **port_list; | 799 | struct mvpp2_port **port_list; |
| @@ -2315,7 +2317,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, | |||
| 2315 | (proto != IPPROTO_IGMP)) | 2317 | (proto != IPPROTO_IGMP)) |
| 2316 | return -EINVAL; | 2318 | return -EINVAL; |
| 2317 | 2319 | ||
| 2318 | /* Fragmented packet */ | 2320 | /* Not fragmented packet */ |
| 2319 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | 2321 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, |
| 2320 | MVPP2_PE_LAST_FREE_TID); | 2322 | MVPP2_PE_LAST_FREE_TID); |
| 2321 | if (tid < 0) | 2323 | if (tid < 0) |
| @@ -2334,8 +2336,12 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, | |||
| 2334 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); | 2336 | MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); |
| 2335 | mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, | 2337 | mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, |
| 2336 | MVPP2_PRS_IPV4_DIP_AI_BIT); | 2338 | MVPP2_PRS_IPV4_DIP_AI_BIT); |
| 2337 | mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, | 2339 | mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); |
| 2338 | ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); | 2340 | |
| 2341 | mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, | ||
| 2342 | MVPP2_PRS_TCAM_PROTO_MASK_L); | ||
| 2343 | mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, | ||
| 2344 | MVPP2_PRS_TCAM_PROTO_MASK); | ||
| 2339 | 2345 | ||
| 2340 | mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); | 2346 | mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); |
| 2341 | mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); | 2347 | mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); |
| @@ -2346,7 +2352,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, | |||
| 2346 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | 2352 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); |
| 2347 | mvpp2_prs_hw_write(priv, &pe); | 2353 | mvpp2_prs_hw_write(priv, &pe); |
| 2348 | 2354 | ||
| 2349 | /* Not fragmented packet */ | 2355 | /* Fragmented packet */ |
| 2350 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, | 2356 | tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, |
| 2351 | MVPP2_PE_LAST_FREE_TID); | 2357 | MVPP2_PE_LAST_FREE_TID); |
| 2352 | if (tid < 0) | 2358 | if (tid < 0) |
| @@ -2358,8 +2364,11 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto, | |||
| 2358 | pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; | 2364 | pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; |
| 2359 | mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); | 2365 | mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); |
| 2360 | 2366 | ||
| 2361 | mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); | 2367 | mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE, |
| 2362 | mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); | 2368 | ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); |
| 2369 | |||
| 2370 | mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0); | ||
| 2371 | mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0); | ||
| 2363 | 2372 | ||
| 2364 | /* Update shadow table and hw entry */ | 2373 | /* Update shadow table and hw entry */ |
| 2365 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); | 2374 | mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); |
| @@ -4591,7 +4600,6 @@ static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port) | |||
| 4591 | val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; | 4600 | val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; |
| 4592 | } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { | 4601 | } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { |
| 4593 | val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; | 4602 | val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; |
| 4594 | val |= MVPP2_GMAC_PORT_RGMII_MASK; | ||
| 4595 | } | 4603 | } |
| 4596 | writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); | 4604 | writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); |
| 4597 | 4605 | ||
| @@ -7496,7 +7504,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv, | |||
| 7496 | /* Ports initialization */ | 7504 | /* Ports initialization */ |
| 7497 | static int mvpp2_port_probe(struct platform_device *pdev, | 7505 | static int mvpp2_port_probe(struct platform_device *pdev, |
| 7498 | struct device_node *port_node, | 7506 | struct device_node *port_node, |
| 7499 | struct mvpp2 *priv) | 7507 | struct mvpp2 *priv, int index) |
| 7500 | { | 7508 | { |
| 7501 | struct device_node *phy_node; | 7509 | struct device_node *phy_node; |
| 7502 | struct phy *comphy; | 7510 | struct phy *comphy; |
| @@ -7670,7 +7678,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, | |||
| 7670 | } | 7678 | } |
| 7671 | netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); | 7679 | netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); |
| 7672 | 7680 | ||
| 7673 | priv->port_list[id] = port; | 7681 | priv->port_list[index] = port; |
| 7674 | return 0; | 7682 | return 0; |
| 7675 | 7683 | ||
| 7676 | err_free_port_pcpu: | 7684 | err_free_port_pcpu: |
| @@ -7963,6 +7971,18 @@ static int mvpp2_probe(struct platform_device *pdev) | |||
| 7963 | err = clk_prepare_enable(priv->mg_clk); | 7971 | err = clk_prepare_enable(priv->mg_clk); |
| 7964 | if (err < 0) | 7972 | if (err < 0) |
| 7965 | goto err_gop_clk; | 7973 | goto err_gop_clk; |
| 7974 | |||
| 7975 | priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk"); | ||
| 7976 | if (IS_ERR(priv->axi_clk)) { | ||
| 7977 | err = PTR_ERR(priv->axi_clk); | ||
| 7978 | if (err == -EPROBE_DEFER) | ||
| 7979 | goto err_gop_clk; | ||
| 7980 | priv->axi_clk = NULL; | ||
| 7981 | } else { | ||
| 7982 | err = clk_prepare_enable(priv->axi_clk); | ||
| 7983 | if (err < 0) | ||
| 7984 | goto err_gop_clk; | ||
| 7985 | } | ||
| 7966 | } | 7986 | } |
| 7967 | 7987 | ||
| 7968 | /* Get system's tclk rate */ | 7988 | /* Get system's tclk rate */ |
| @@ -8005,16 +8025,19 @@ static int mvpp2_probe(struct platform_device *pdev) | |||
| 8005 | } | 8025 | } |
| 8006 | 8026 | ||
| 8007 | /* Initialize ports */ | 8027 | /* Initialize ports */ |
| 8028 | i = 0; | ||
| 8008 | for_each_available_child_of_node(dn, port_node) { | 8029 | for_each_available_child_of_node(dn, port_node) { |
| 8009 | err = mvpp2_port_probe(pdev, port_node, priv); | 8030 | err = mvpp2_port_probe(pdev, port_node, priv, i); |
| 8010 | if (err < 0) | 8031 | if (err < 0) |
| 8011 | goto err_mg_clk; | 8032 | goto err_mg_clk; |
| 8033 | i++; | ||
| 8012 | } | 8034 | } |
| 8013 | 8035 | ||
| 8014 | platform_set_drvdata(pdev, priv); | 8036 | platform_set_drvdata(pdev, priv); |
| 8015 | return 0; | 8037 | return 0; |
| 8016 | 8038 | ||
| 8017 | err_mg_clk: | 8039 | err_mg_clk: |
| 8040 | clk_disable_unprepare(priv->axi_clk); | ||
| 8018 | if (priv->hw_version == MVPP22) | 8041 | if (priv->hw_version == MVPP22) |
| 8019 | clk_disable_unprepare(priv->mg_clk); | 8042 | clk_disable_unprepare(priv->mg_clk); |
| 8020 | err_gop_clk: | 8043 | err_gop_clk: |
| @@ -8052,6 +8075,7 @@ static int mvpp2_remove(struct platform_device *pdev) | |||
| 8052 | aggr_txq->descs_dma); | 8075 | aggr_txq->descs_dma); |
| 8053 | } | 8076 | } |
| 8054 | 8077 | ||
| 8078 | clk_disable_unprepare(priv->axi_clk); | ||
| 8055 | clk_disable_unprepare(priv->mg_clk); | 8079 | clk_disable_unprepare(priv->mg_clk); |
| 8056 | clk_disable_unprepare(priv->pp_clk); | 8080 | clk_disable_unprepare(priv->pp_clk); |
| 8057 | clk_disable_unprepare(priv->gop_clk); | 8081 | clk_disable_unprepare(priv->gop_clk); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h index 1e3a6c3e4132..80eef4163f52 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h | |||
| @@ -139,7 +139,7 @@ TRACE_EVENT(mlx5_fs_del_fg, | |||
| 139 | {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} | 139 | {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} |
| 140 | 140 | ||
| 141 | TRACE_EVENT(mlx5_fs_set_fte, | 141 | TRACE_EVENT(mlx5_fs_set_fte, |
| 142 | TP_PROTO(const struct fs_fte *fte, bool new_fte), | 142 | TP_PROTO(const struct fs_fte *fte, int new_fte), |
| 143 | TP_ARGS(fte, new_fte), | 143 | TP_ARGS(fte, new_fte), |
| 144 | TP_STRUCT__entry( | 144 | TP_STRUCT__entry( |
| 145 | __field(const struct fs_fte *, fte) | 145 | __field(const struct fs_fte *, fte) |
| @@ -149,7 +149,7 @@ TRACE_EVENT(mlx5_fs_set_fte, | |||
| 149 | __field(u32, action) | 149 | __field(u32, action) |
| 150 | __field(u32, flow_tag) | 150 | __field(u32, flow_tag) |
| 151 | __field(u8, mask_enable) | 151 | __field(u8, mask_enable) |
| 152 | __field(bool, new_fte) | 152 | __field(int, new_fte) |
| 153 | __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) | 153 | __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) |
| 154 | __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) | 154 | __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) |
| 155 | __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc)) | 155 | __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc)) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index f11fd07ac4dd..850cdc980ab5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c | |||
| @@ -291,7 +291,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv) | |||
| 291 | priv->fs.vlan.filter_disabled = false; | 291 | priv->fs.vlan.filter_disabled = false; |
| 292 | if (priv->netdev->flags & IFF_PROMISC) | 292 | if (priv->netdev->flags & IFF_PROMISC) |
| 293 | return; | 293 | return; |
| 294 | mlx5e_del_any_vid_rules(priv); | 294 | mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); |
| 295 | } | 295 | } |
| 296 | 296 | ||
| 297 | void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) | 297 | void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) |
| @@ -302,7 +302,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) | |||
| 302 | priv->fs.vlan.filter_disabled = true; | 302 | priv->fs.vlan.filter_disabled = true; |
| 303 | if (priv->netdev->flags & IFF_PROMISC) | 303 | if (priv->netdev->flags & IFF_PROMISC) |
| 304 | return; | 304 | return; |
| 305 | mlx5e_add_any_vid_rules(priv); | 305 | mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); |
| 306 | } | 306 | } |
| 307 | 307 | ||
| 308 | int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, | 308 | int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index dfc29720ab77..cc11bbbd0309 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -184,7 +184,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) | |||
| 184 | struct mlx5e_sw_stats temp, *s = &temp; | 184 | struct mlx5e_sw_stats temp, *s = &temp; |
| 185 | struct mlx5e_rq_stats *rq_stats; | 185 | struct mlx5e_rq_stats *rq_stats; |
| 186 | struct mlx5e_sq_stats *sq_stats; | 186 | struct mlx5e_sq_stats *sq_stats; |
| 187 | u64 tx_offload_none = 0; | ||
| 188 | int i, j; | 187 | int i, j; |
| 189 | 188 | ||
| 190 | memset(s, 0, sizeof(*s)); | 189 | memset(s, 0, sizeof(*s)); |
| @@ -199,6 +198,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) | |||
| 199 | s->rx_lro_bytes += rq_stats->lro_bytes; | 198 | s->rx_lro_bytes += rq_stats->lro_bytes; |
| 200 | s->rx_csum_none += rq_stats->csum_none; | 199 | s->rx_csum_none += rq_stats->csum_none; |
| 201 | s->rx_csum_complete += rq_stats->csum_complete; | 200 | s->rx_csum_complete += rq_stats->csum_complete; |
| 201 | s->rx_csum_unnecessary += rq_stats->csum_unnecessary; | ||
| 202 | s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; | 202 | s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; |
| 203 | s->rx_xdp_drop += rq_stats->xdp_drop; | 203 | s->rx_xdp_drop += rq_stats->xdp_drop; |
| 204 | s->rx_xdp_tx += rq_stats->xdp_tx; | 204 | s->rx_xdp_tx += rq_stats->xdp_tx; |
| @@ -229,14 +229,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) | |||
| 229 | s->tx_queue_dropped += sq_stats->dropped; | 229 | s->tx_queue_dropped += sq_stats->dropped; |
| 230 | s->tx_xmit_more += sq_stats->xmit_more; | 230 | s->tx_xmit_more += sq_stats->xmit_more; |
| 231 | s->tx_csum_partial_inner += sq_stats->csum_partial_inner; | 231 | s->tx_csum_partial_inner += sq_stats->csum_partial_inner; |
| 232 | tx_offload_none += sq_stats->csum_none; | 232 | s->tx_csum_none += sq_stats->csum_none; |
| 233 | s->tx_csum_partial += sq_stats->csum_partial; | ||
| 233 | } | 234 | } |
| 234 | } | 235 | } |
| 235 | 236 | ||
| 236 | /* Update calculated offload counters */ | ||
| 237 | s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner; | ||
| 238 | s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete; | ||
| 239 | |||
| 240 | s->link_down_events_phy = MLX5_GET(ppcnt_reg, | 237 | s->link_down_events_phy = MLX5_GET(ppcnt_reg, |
| 241 | priv->stats.pport.phy_counters, | 238 | priv->stats.pport.phy_counters, |
| 242 | counter_set.phys_layer_cntrs.link_down_events); | 239 | counter_set.phys_layer_cntrs.link_down_events); |
| @@ -3333,8 +3330,8 @@ static int mlx5e_handle_feature(struct net_device *netdev, | |||
| 3333 | 3330 | ||
| 3334 | err = feature_handler(netdev, enable); | 3331 | err = feature_handler(netdev, enable); |
| 3335 | if (err) { | 3332 | if (err) { |
| 3336 | netdev_err(netdev, "%s feature 0x%llx failed err %d\n", | 3333 | netdev_err(netdev, "%s feature %pNF failed, err %d\n", |
| 3337 | enable ? "Enable" : "Disable", feature, err); | 3334 | enable ? "Enable" : "Disable", &feature, err); |
| 3338 | return err; | 3335 | return err; |
| 3339 | } | 3336 | } |
| 3340 | 3337 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index f1dd638384d3..15a1687483cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -627,6 +627,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
| 627 | 627 | ||
| 628 | if (lro) { | 628 | if (lro) { |
| 629 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 629 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 630 | rq->stats.csum_unnecessary++; | ||
| 630 | return; | 631 | return; |
| 631 | } | 632 | } |
| 632 | 633 | ||
| @@ -644,7 +645,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, | |||
| 644 | skb->csum_level = 1; | 645 | skb->csum_level = 1; |
| 645 | skb->encapsulation = 1; | 646 | skb->encapsulation = 1; |
| 646 | rq->stats.csum_unnecessary_inner++; | 647 | rq->stats.csum_unnecessary_inner++; |
| 648 | return; | ||
| 647 | } | 649 | } |
| 650 | rq->stats.csum_unnecessary++; | ||
| 648 | return; | 651 | return; |
| 649 | } | 652 | } |
| 650 | csum_none: | 653 | csum_none: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 6d199ffb1c0b..f8637213afc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | |||
| @@ -68,6 +68,7 @@ struct mlx5e_sw_stats { | |||
| 68 | u64 rx_xdp_drop; | 68 | u64 rx_xdp_drop; |
| 69 | u64 rx_xdp_tx; | 69 | u64 rx_xdp_tx; |
| 70 | u64 rx_xdp_tx_full; | 70 | u64 rx_xdp_tx_full; |
| 71 | u64 tx_csum_none; | ||
| 71 | u64 tx_csum_partial; | 72 | u64 tx_csum_partial; |
| 72 | u64 tx_csum_partial_inner; | 73 | u64 tx_csum_partial_inner; |
| 73 | u64 tx_queue_stopped; | 74 | u64 tx_queue_stopped; |
| @@ -108,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = { | |||
| 108 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, | 109 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, |
| 109 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, | 110 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, |
| 110 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, | 111 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, |
| 112 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, | ||
| 111 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, | 113 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, |
| 112 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, | 114 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, |
| 113 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, | 115 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, |
| @@ -339,6 +341,7 @@ struct mlx5e_rq_stats { | |||
| 339 | u64 packets; | 341 | u64 packets; |
| 340 | u64 bytes; | 342 | u64 bytes; |
| 341 | u64 csum_complete; | 343 | u64 csum_complete; |
| 344 | u64 csum_unnecessary; | ||
| 342 | u64 csum_unnecessary_inner; | 345 | u64 csum_unnecessary_inner; |
| 343 | u64 csum_none; | 346 | u64 csum_none; |
| 344 | u64 lro_packets; | 347 | u64 lro_packets; |
| @@ -363,6 +366,7 @@ static const struct counter_desc rq_stats_desc[] = { | |||
| 363 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, | 366 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, |
| 364 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, | 367 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, |
| 365 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, | 368 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, |
| 369 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, | ||
| 366 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, | 370 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, |
| 367 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, | 371 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, |
| 368 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, | 372 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, |
| @@ -392,6 +396,7 @@ struct mlx5e_sq_stats { | |||
| 392 | u64 tso_bytes; | 396 | u64 tso_bytes; |
| 393 | u64 tso_inner_packets; | 397 | u64 tso_inner_packets; |
| 394 | u64 tso_inner_bytes; | 398 | u64 tso_inner_bytes; |
| 399 | u64 csum_partial; | ||
| 395 | u64 csum_partial_inner; | 400 | u64 csum_partial_inner; |
| 396 | u64 nop; | 401 | u64 nop; |
| 397 | /* less likely accessed in data path */ | 402 | /* less likely accessed in data path */ |
| @@ -408,6 +413,7 @@ static const struct counter_desc sq_stats_desc[] = { | |||
| 408 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, | 413 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, |
| 409 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, | 414 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, |
| 410 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, | 415 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, |
| 416 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) }, | ||
| 411 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, | 417 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, |
| 412 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, | 418 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, |
| 413 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, | 419 | { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index da503e6411da..1aa2028ed995 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -1317,6 +1317,69 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda | |||
| 1317 | return true; | 1317 | return true; |
| 1318 | } | 1318 | } |
| 1319 | 1319 | ||
| 1320 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | ||
| 1321 | struct tcf_exts *exts) | ||
| 1322 | { | ||
| 1323 | const struct tc_action *a; | ||
| 1324 | bool modify_ip_header; | ||
| 1325 | LIST_HEAD(actions); | ||
| 1326 | u8 htype, ip_proto; | ||
| 1327 | void *headers_v; | ||
| 1328 | u16 ethertype; | ||
| 1329 | int nkeys, i; | ||
| 1330 | |||
| 1331 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); | ||
| 1332 | ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); | ||
| 1333 | |||
| 1334 | /* for non-IP we only re-write MACs, so we're okay */ | ||
| 1335 | if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6) | ||
| 1336 | goto out_ok; | ||
| 1337 | |||
| 1338 | modify_ip_header = false; | ||
| 1339 | tcf_exts_to_list(exts, &actions); | ||
| 1340 | list_for_each_entry(a, &actions, list) { | ||
| 1341 | if (!is_tcf_pedit(a)) | ||
| 1342 | continue; | ||
| 1343 | |||
| 1344 | nkeys = tcf_pedit_nkeys(a); | ||
| 1345 | for (i = 0; i < nkeys; i++) { | ||
| 1346 | htype = tcf_pedit_htype(a, i); | ||
| 1347 | if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 || | ||
| 1348 | htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) { | ||
| 1349 | modify_ip_header = true; | ||
| 1350 | break; | ||
| 1351 | } | ||
| 1352 | } | ||
| 1353 | } | ||
| 1354 | |||
| 1355 | ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol); | ||
| 1356 | if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) { | ||
| 1357 | pr_info("can't offload re-write of ip proto %d\n", ip_proto); | ||
| 1358 | return false; | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | out_ok: | ||
| 1362 | return true; | ||
| 1363 | } | ||
| 1364 | |||
| 1365 | static bool actions_match_supported(struct mlx5e_priv *priv, | ||
| 1366 | struct tcf_exts *exts, | ||
| 1367 | struct mlx5e_tc_flow_parse_attr *parse_attr, | ||
| 1368 | struct mlx5e_tc_flow *flow) | ||
| 1369 | { | ||
| 1370 | u32 actions; | ||
| 1371 | |||
| 1372 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) | ||
| 1373 | actions = flow->esw_attr->action; | ||
| 1374 | else | ||
| 1375 | actions = flow->nic_attr->action; | ||
| 1376 | |||
| 1377 | if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) | ||
| 1378 | return modify_header_match_supported(&parse_attr->spec, exts); | ||
| 1379 | |||
| 1380 | return true; | ||
| 1381 | } | ||
| 1382 | |||
| 1320 | static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | 1383 | static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, |
| 1321 | struct mlx5e_tc_flow_parse_attr *parse_attr, | 1384 | struct mlx5e_tc_flow_parse_attr *parse_attr, |
| 1322 | struct mlx5e_tc_flow *flow) | 1385 | struct mlx5e_tc_flow *flow) |
| @@ -1378,6 +1441,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 1378 | return -EINVAL; | 1441 | return -EINVAL; |
| 1379 | } | 1442 | } |
| 1380 | 1443 | ||
| 1444 | if (!actions_match_supported(priv, exts, parse_attr, flow)) | ||
| 1445 | return -EOPNOTSUPP; | ||
| 1446 | |||
| 1381 | return 0; | 1447 | return 0; |
| 1382 | } | 1448 | } |
| 1383 | 1449 | ||
| @@ -1564,7 +1630,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
| 1564 | break; | 1630 | break; |
| 1565 | default: | 1631 | default: |
| 1566 | err = -EOPNOTSUPP; | 1632 | err = -EOPNOTSUPP; |
| 1567 | goto out; | 1633 | goto free_encap; |
| 1568 | } | 1634 | } |
| 1569 | fl4.flowi4_tos = tun_key->tos; | 1635 | fl4.flowi4_tos = tun_key->tos; |
| 1570 | fl4.daddr = tun_key->u.ipv4.dst; | 1636 | fl4.daddr = tun_key->u.ipv4.dst; |
| @@ -1573,7 +1639,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
| 1573 | err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, | 1639 | err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, |
| 1574 | &fl4, &n, &ttl); | 1640 | &fl4, &n, &ttl); |
| 1575 | if (err) | 1641 | if (err) |
| 1576 | goto out; | 1642 | goto free_encap; |
| 1577 | 1643 | ||
| 1578 | /* used by mlx5e_detach_encap to lookup a neigh hash table | 1644 | /* used by mlx5e_detach_encap to lookup a neigh hash table |
| 1579 | * entry in the neigh hash table when a user deletes a rule | 1645 | * entry in the neigh hash table when a user deletes a rule |
| @@ -1590,7 +1656,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
| 1590 | */ | 1656 | */ |
| 1591 | err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); | 1657 | err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); |
| 1592 | if (err) | 1658 | if (err) |
| 1593 | goto out; | 1659 | goto free_encap; |
| 1594 | 1660 | ||
| 1595 | read_lock_bh(&n->lock); | 1661 | read_lock_bh(&n->lock); |
| 1596 | nud_state = n->nud_state; | 1662 | nud_state = n->nud_state; |
| @@ -1630,8 +1696,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, | |||
| 1630 | 1696 | ||
| 1631 | destroy_neigh_entry: | 1697 | destroy_neigh_entry: |
| 1632 | mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); | 1698 | mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); |
| 1633 | out: | 1699 | free_encap: |
| 1634 | kfree(encap_header); | 1700 | kfree(encap_header); |
| 1701 | out: | ||
| 1635 | if (n) | 1702 | if (n) |
| 1636 | neigh_release(n); | 1703 | neigh_release(n); |
| 1637 | return err; | 1704 | return err; |
| @@ -1668,7 +1735,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
| 1668 | break; | 1735 | break; |
| 1669 | default: | 1736 | default: |
| 1670 | err = -EOPNOTSUPP; | 1737 | err = -EOPNOTSUPP; |
| 1671 | goto out; | 1738 | goto free_encap; |
| 1672 | } | 1739 | } |
| 1673 | 1740 | ||
| 1674 | fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); | 1741 | fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); |
| @@ -1678,7 +1745,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
| 1678 | err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, | 1745 | err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, |
| 1679 | &fl6, &n, &ttl); | 1746 | &fl6, &n, &ttl); |
| 1680 | if (err) | 1747 | if (err) |
| 1681 | goto out; | 1748 | goto free_encap; |
| 1682 | 1749 | ||
| 1683 | /* used by mlx5e_detach_encap to lookup a neigh hash table | 1750 | /* used by mlx5e_detach_encap to lookup a neigh hash table |
| 1684 | * entry in the neigh hash table when a user deletes a rule | 1751 | * entry in the neigh hash table when a user deletes a rule |
| @@ -1695,7 +1762,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
| 1695 | */ | 1762 | */ |
| 1696 | err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); | 1763 | err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); |
| 1697 | if (err) | 1764 | if (err) |
| 1698 | goto out; | 1765 | goto free_encap; |
| 1699 | 1766 | ||
| 1700 | read_lock_bh(&n->lock); | 1767 | read_lock_bh(&n->lock); |
| 1701 | nud_state = n->nud_state; | 1768 | nud_state = n->nud_state; |
| @@ -1736,8 +1803,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, | |||
| 1736 | 1803 | ||
| 1737 | destroy_neigh_entry: | 1804 | destroy_neigh_entry: |
| 1738 | mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); | 1805 | mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); |
| 1739 | out: | 1806 | free_encap: |
| 1740 | kfree(encap_header); | 1807 | kfree(encap_header); |
| 1808 | out: | ||
| 1741 | if (n) | 1809 | if (n) |
| 1742 | neigh_release(n); | 1810 | neigh_release(n); |
| 1743 | return err; | 1811 | return err; |
| @@ -1791,6 +1859,7 @@ vxlan_encap_offload_err: | |||
| 1791 | } | 1859 | } |
| 1792 | } | 1860 | } |
| 1793 | 1861 | ||
| 1862 | /* must verify if encap is valid or not */ | ||
| 1794 | if (found) | 1863 | if (found) |
| 1795 | goto attach_flow; | 1864 | goto attach_flow; |
| 1796 | 1865 | ||
| @@ -1817,6 +1886,8 @@ attach_flow: | |||
| 1817 | *encap_dev = e->out_dev; | 1886 | *encap_dev = e->out_dev; |
| 1818 | if (e->flags & MLX5_ENCAP_ENTRY_VALID) | 1887 | if (e->flags & MLX5_ENCAP_ENTRY_VALID) |
| 1819 | attr->encap_id = e->encap_id; | 1888 | attr->encap_id = e->encap_id; |
| 1889 | else | ||
| 1890 | err = -EAGAIN; | ||
| 1820 | 1891 | ||
| 1821 | return err; | 1892 | return err; |
| 1822 | 1893 | ||
| @@ -1934,6 +2005,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 1934 | 2005 | ||
| 1935 | return -EINVAL; | 2006 | return -EINVAL; |
| 1936 | } | 2007 | } |
| 2008 | |||
| 2009 | if (!actions_match_supported(priv, exts, parse_attr, flow)) | ||
| 2010 | return -EOPNOTSUPP; | ||
| 2011 | |||
| 1937 | return err; | 2012 | return err; |
| 1938 | } | 2013 | } |
| 1939 | 2014 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index fee43e40fa16..1d6925d4369a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
| @@ -193,6 +193,7 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct | |||
| 193 | sq->stats.csum_partial_inner++; | 193 | sq->stats.csum_partial_inner++; |
| 194 | } else { | 194 | } else { |
| 195 | eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; | 195 | eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; |
| 196 | sq->stats.csum_partial++; | ||
| 196 | } | 197 | } |
| 197 | } else | 198 | } else |
| 198 | sq->stats.csum_none++; | 199 | sq->stats.csum_none++; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c index e37453d838db..c0fd2212e890 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c | |||
| @@ -71,11 +71,11 @@ int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr, | |||
| 71 | return 0; | 71 | return 0; |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps) | 74 | int mlx5_fpga_caps(struct mlx5_core_dev *dev) |
| 75 | { | 75 | { |
| 76 | u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0}; | 76 | u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0}; |
| 77 | 77 | ||
| 78 | return mlx5_core_access_reg(dev, in, sizeof(in), caps, | 78 | return mlx5_core_access_reg(dev, in, sizeof(in), dev->caps.fpga, |
| 79 | MLX5_ST_SZ_BYTES(fpga_cap), | 79 | MLX5_ST_SZ_BYTES(fpga_cap), |
| 80 | MLX5_REG_FPGA_CAP, 0, 0); | 80 | MLX5_REG_FPGA_CAP, 0, 0); |
| 81 | } | 81 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h index 94bdfd47c3f0..d05233c9b4f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h | |||
| @@ -65,7 +65,7 @@ struct mlx5_fpga_qp_counters { | |||
| 65 | u64 rx_total_drop; | 65 | u64 rx_total_drop; |
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps); | 68 | int mlx5_fpga_caps(struct mlx5_core_dev *dev); |
| 69 | int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query); | 69 | int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query); |
| 70 | int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op); | 70 | int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op); |
| 71 | int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr, | 71 | int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 9034e9960a76..dc8970346521 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c | |||
| @@ -139,8 +139,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) | |||
| 139 | if (err) | 139 | if (err) |
| 140 | goto out; | 140 | goto out; |
| 141 | 141 | ||
| 142 | err = mlx5_fpga_caps(fdev->mdev, | 142 | err = mlx5_fpga_caps(fdev->mdev); |
| 143 | fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]); | ||
| 144 | if (err) | 143 | if (err) |
| 145 | goto out; | 144 | goto out; |
| 146 | 145 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index e0d0efd903bc..36ecc2b2e187 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c | |||
| @@ -293,6 +293,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, | |||
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { | 295 | if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { |
| 296 | int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev, | ||
| 297 | log_max_flow_counter, | ||
| 298 | ft->type)); | ||
| 296 | int list_size = 0; | 299 | int list_size = 0; |
| 297 | 300 | ||
| 298 | list_for_each_entry(dst, &fte->node.children, node.list) { | 301 | list_for_each_entry(dst, &fte->node.children, node.list) { |
| @@ -305,12 +308,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, | |||
| 305 | in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); | 308 | in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); |
| 306 | list_size++; | 309 | list_size++; |
| 307 | } | 310 | } |
| 311 | if (list_size > max_list_size) { | ||
| 312 | err = -EINVAL; | ||
| 313 | goto err_out; | ||
| 314 | } | ||
| 308 | 315 | ||
| 309 | MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, | 316 | MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, |
| 310 | list_size); | 317 | list_size); |
| 311 | } | 318 | } |
| 312 | 319 | ||
| 313 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); | 320 | err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); |
| 321 | err_out: | ||
| 314 | kvfree(in); | 322 | kvfree(in); |
| 315 | return err; | 323 | return err; |
| 316 | } | 324 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 5509a752f98e..48dd78975062 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h | |||
| @@ -52,6 +52,7 @@ enum fs_flow_table_type { | |||
| 52 | FS_FT_FDB = 0X4, | 52 | FS_FT_FDB = 0X4, |
| 53 | FS_FT_SNIFFER_RX = 0X5, | 53 | FS_FT_SNIFFER_RX = 0X5, |
| 54 | FS_FT_SNIFFER_TX = 0X6, | 54 | FS_FT_SNIFFER_TX = 0X6, |
| 55 | FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX, | ||
| 55 | }; | 56 | }; |
| 56 | 57 | ||
| 57 | enum fs_flow_table_op_mod { | 58 | enum fs_flow_table_op_mod { |
| @@ -260,4 +261,14 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev); | |||
| 260 | #define fs_for_each_dst(pos, fte) \ | 261 | #define fs_for_each_dst(pos, fte) \ |
| 261 | fs_list_for_each_entry(pos, &(fte)->node.children) | 262 | fs_list_for_each_entry(pos, &(fte)->node.children) |
| 262 | 263 | ||
| 264 | #define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \ | ||
| 265 | (type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \ | ||
| 266 | (type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \ | ||
| 267 | (type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \ | ||
| 268 | (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \ | ||
| 269 | (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \ | ||
| 270 | (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \ | ||
| 271 | (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\ | ||
| 272 | ) | ||
| 273 | |||
| 263 | #endif | 274 | #endif |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 85298051a3e4..145e392ab849 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | |||
| @@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev) | |||
| 572 | { | 572 | { |
| 573 | struct mlx5e_priv *priv = mlx5i_epriv(netdev); | 573 | struct mlx5e_priv *priv = mlx5i_epriv(netdev); |
| 574 | const struct mlx5e_profile *profile = priv->profile; | 574 | const struct mlx5e_profile *profile = priv->profile; |
| 575 | struct mlx5_core_dev *mdev = priv->mdev; | ||
| 575 | 576 | ||
| 576 | mlx5e_detach_netdev(priv); | 577 | mlx5e_detach_netdev(priv); |
| 577 | profile->cleanup(priv); | 578 | profile->cleanup(priv); |
| 578 | destroy_workqueue(priv->wq); | 579 | destroy_workqueue(priv->wq); |
| 579 | free_netdev(netdev); | 580 | free_netdev(netdev); |
| 580 | 581 | ||
| 581 | mlx5e_destroy_mdev_resources(priv->mdev); | 582 | mlx5e_destroy_mdev_resources(mdev); |
| 582 | } | 583 | } |
| 583 | EXPORT_SYMBOL(mlx5_rdma_netdev_free); | 584 | EXPORT_SYMBOL(mlx5_rdma_netdev_free); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index 6c48e9959b65..2a8b529ce6dd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c | |||
| @@ -109,7 +109,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs) | |||
| 109 | mlx5_core_warn(dev, | 109 | mlx5_core_warn(dev, |
| 110 | "failed to restore VF %d settings, err %d\n", | 110 | "failed to restore VF %d settings, err %d\n", |
| 111 | vf, err); | 111 | vf, err); |
| 112 | continue; | 112 | continue; |
| 113 | } | 113 | } |
| 114 | } | 114 | } |
| 115 | mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); | 115 | mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 2cfb3f5d092d..c16718d296d3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
| @@ -2723,6 +2723,7 @@ static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp, | |||
| 2723 | mlxsw_sp_nexthop_rif_fini(nh); | 2723 | mlxsw_sp_nexthop_rif_fini(nh); |
| 2724 | break; | 2724 | break; |
| 2725 | case MLXSW_SP_NEXTHOP_TYPE_IPIP: | 2725 | case MLXSW_SP_NEXTHOP_TYPE_IPIP: |
| 2726 | mlxsw_sp_nexthop_rif_fini(nh); | ||
| 2726 | mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); | 2727 | mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); |
| 2727 | break; | 2728 | break; |
| 2728 | } | 2729 | } |
| @@ -2742,7 +2743,11 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp, | |||
| 2742 | router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, | 2743 | router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, |
| 2743 | MLXSW_SP_L3_PROTO_IPV4)) { | 2744 | MLXSW_SP_L3_PROTO_IPV4)) { |
| 2744 | nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; | 2745 | nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; |
| 2745 | return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); | 2746 | err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); |
| 2747 | if (err) | ||
| 2748 | return err; | ||
| 2749 | mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); | ||
| 2750 | return 0; | ||
| 2746 | } | 2751 | } |
| 2747 | 2752 | ||
| 2748 | nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; | 2753 | nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; |
| @@ -3500,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp, | |||
| 3500 | static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, | 3505 | static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, |
| 3501 | struct mlxsw_sp_fib *fib) | 3506 | struct mlxsw_sp_fib *fib) |
| 3502 | { | 3507 | { |
| 3503 | struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } }; | ||
| 3504 | struct mlxsw_sp_lpm_tree *lpm_tree; | ||
| 3505 | |||
| 3506 | /* Aggregate prefix lengths across all virtual routers to make | ||
| 3507 | * sure we only have used prefix lengths in the LPM tree. | ||
| 3508 | */ | ||
| 3509 | mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage); | ||
| 3510 | lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, | ||
| 3511 | fib->proto); | ||
| 3512 | if (IS_ERR(lpm_tree)) | ||
| 3513 | goto err_tree_get; | ||
| 3514 | mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); | ||
| 3515 | |||
| 3516 | err_tree_get: | ||
| 3517 | if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) | 3508 | if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) |
| 3518 | return; | 3509 | return; |
| 3519 | mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); | 3510 | mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); |
| @@ -4009,7 +4000,11 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp, | |||
| 4009 | router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, | 4000 | router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, |
| 4010 | MLXSW_SP_L3_PROTO_IPV6)) { | 4001 | MLXSW_SP_L3_PROTO_IPV6)) { |
| 4011 | nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; | 4002 | nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; |
| 4012 | return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); | 4003 | err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); |
| 4004 | if (err) | ||
| 4005 | return err; | ||
| 4006 | mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); | ||
| 4007 | return 0; | ||
| 4013 | } | 4008 | } |
| 4014 | 4009 | ||
| 4015 | nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; | 4010 | nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; |
| @@ -5068,6 +5063,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, | |||
| 5068 | vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); | 5063 | vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); |
| 5069 | if (IS_ERR(vr)) | 5064 | if (IS_ERR(vr)) |
| 5070 | return ERR_CAST(vr); | 5065 | return ERR_CAST(vr); |
| 5066 | vr->rif_count++; | ||
| 5071 | 5067 | ||
| 5072 | err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); | 5068 | err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); |
| 5073 | if (err) | 5069 | if (err) |
| @@ -5099,7 +5095,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, | |||
| 5099 | 5095 | ||
| 5100 | mlxsw_sp_rif_counters_alloc(rif); | 5096 | mlxsw_sp_rif_counters_alloc(rif); |
| 5101 | mlxsw_sp->router->rifs[rif_index] = rif; | 5097 | mlxsw_sp->router->rifs[rif_index] = rif; |
| 5102 | vr->rif_count++; | ||
| 5103 | 5098 | ||
| 5104 | return rif; | 5099 | return rif; |
| 5105 | 5100 | ||
| @@ -5110,6 +5105,7 @@ err_fid_get: | |||
| 5110 | kfree(rif); | 5105 | kfree(rif); |
| 5111 | err_rif_alloc: | 5106 | err_rif_alloc: |
| 5112 | err_rif_index_alloc: | 5107 | err_rif_index_alloc: |
| 5108 | vr->rif_count--; | ||
| 5113 | mlxsw_sp_vr_put(vr); | 5109 | mlxsw_sp_vr_put(vr); |
| 5114 | return ERR_PTR(err); | 5110 | return ERR_PTR(err); |
| 5115 | } | 5111 | } |
| @@ -5124,7 +5120,6 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) | |||
| 5124 | mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); | 5120 | mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); |
| 5125 | vr = &mlxsw_sp->router->vrs[rif->vr_id]; | 5121 | vr = &mlxsw_sp->router->vrs[rif->vr_id]; |
| 5126 | 5122 | ||
| 5127 | vr->rif_count--; | ||
| 5128 | mlxsw_sp->router->rifs[rif->rif_index] = NULL; | 5123 | mlxsw_sp->router->rifs[rif->rif_index] = NULL; |
| 5129 | mlxsw_sp_rif_counters_free(rif); | 5124 | mlxsw_sp_rif_counters_free(rif); |
| 5130 | ops->deconfigure(rif); | 5125 | ops->deconfigure(rif); |
| @@ -5132,6 +5127,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) | |||
| 5132 | /* Loopback RIFs are not associated with a FID. */ | 5127 | /* Loopback RIFs are not associated with a FID. */ |
| 5133 | mlxsw_sp_fid_put(fid); | 5128 | mlxsw_sp_fid_put(fid); |
| 5134 | kfree(rif); | 5129 | kfree(rif); |
| 5130 | vr->rif_count--; | ||
| 5135 | mlxsw_sp_vr_put(vr); | 5131 | mlxsw_sp_vr_put(vr); |
| 5136 | } | 5132 | } |
| 5137 | 5133 | ||
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 0ea3ca09c689..3ed9033e56db 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c | |||
| @@ -898,7 +898,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, | |||
| 898 | 898 | ||
| 899 | curr_rxbuf->dma_addr = | 899 | curr_rxbuf->dma_addr = |
| 900 | dma_map_single(adpt->netdev->dev.parent, skb->data, | 900 | dma_map_single(adpt->netdev->dev.parent, skb->data, |
| 901 | curr_rxbuf->length, DMA_FROM_DEVICE); | 901 | adpt->rxbuf_size, DMA_FROM_DEVICE); |
| 902 | |||
| 902 | ret = dma_mapping_error(adpt->netdev->dev.parent, | 903 | ret = dma_mapping_error(adpt->netdev->dev.parent, |
| 903 | curr_rxbuf->dma_addr); | 904 | curr_rxbuf->dma_addr); |
| 904 | if (ret) { | 905 | if (ret) { |
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 98f22551eb45..1e33aea59f50 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c | |||
| @@ -51,10 +51,7 @@ struct rmnet_walk_data { | |||
| 51 | 51 | ||
| 52 | static int rmnet_is_real_dev_registered(const struct net_device *real_dev) | 52 | static int rmnet_is_real_dev_registered(const struct net_device *real_dev) |
| 53 | { | 53 | { |
| 54 | rx_handler_func_t *rx_handler; | 54 | return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; |
| 55 | |||
| 56 | rx_handler = rcu_dereference(real_dev->rx_handler); | ||
| 57 | return (rx_handler == rmnet_rx_handler); | ||
| 58 | } | 55 | } |
| 59 | 56 | ||
| 60 | /* Needs rtnl lock */ | 57 | /* Needs rtnl lock */ |
diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h index a63ef82e7c72..dfae3c9d57c6 100644 --- a/drivers/net/ethernet/rocker/rocker_tlv.h +++ b/drivers/net/ethernet/rocker/rocker_tlv.h | |||
| @@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info) | |||
| 139 | int rocker_tlv_put(struct rocker_desc_info *desc_info, | 139 | int rocker_tlv_put(struct rocker_desc_info *desc_info, |
| 140 | int attrtype, int attrlen, const void *data); | 140 | int attrtype, int attrlen, const void *data); |
| 141 | 141 | ||
| 142 | static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, | 142 | static inline int |
| 143 | int attrtype, u8 value) | 143 | rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value) |
| 144 | { | 144 | { |
| 145 | return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); | 145 | u8 tmp = value; /* work around GCC PR81715 */ |
| 146 | |||
| 147 | return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp); | ||
| 146 | } | 148 | } |
| 147 | 149 | ||
| 148 | static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, | 150 | static inline int |
| 149 | int attrtype, u16 value) | 151 | rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value) |
| 150 | { | 152 | { |
| 151 | return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); | 153 | u16 tmp = value; |
| 154 | |||
| 155 | return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp); | ||
| 152 | } | 156 | } |
| 153 | 157 | ||
| 154 | static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, | 158 | static inline int |
| 155 | int attrtype, __be16 value) | 159 | rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value) |
| 156 | { | 160 | { |
| 157 | return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); | 161 | __be16 tmp = value; |
| 162 | |||
| 163 | return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp); | ||
| 158 | } | 164 | } |
| 159 | 165 | ||
| 160 | static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, | 166 | static inline int |
| 161 | int attrtype, u32 value) | 167 | rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value) |
| 162 | { | 168 | { |
| 163 | return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); | 169 | u32 tmp = value; |
| 170 | |||
| 171 | return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp); | ||
| 164 | } | 172 | } |
| 165 | 173 | ||
| 166 | static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, | 174 | static inline int |
| 167 | int attrtype, __be32 value) | 175 | rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value) |
| 168 | { | 176 | { |
| 169 | return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); | 177 | __be32 tmp = value; |
| 178 | |||
| 179 | return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp); | ||
| 170 | } | 180 | } |
| 171 | 181 | ||
| 172 | static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, | 182 | static inline int |
| 173 | int attrtype, u64 value) | 183 | rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value) |
| 174 | { | 184 | { |
| 175 | return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); | 185 | u64 tmp = value; |
| 186 | |||
| 187 | return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp); | ||
| 176 | } | 188 | } |
| 177 | 189 | ||
| 178 | static inline struct rocker_tlv * | 190 | static inline struct rocker_tlv * |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index dd6a2f9791cc..5efef8001edf 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c | |||
| @@ -511,6 +511,7 @@ static struct platform_driver dwc_eth_dwmac_driver = { | |||
| 511 | .remove = dwc_eth_dwmac_remove, | 511 | .remove = dwc_eth_dwmac_remove, |
| 512 | .driver = { | 512 | .driver = { |
| 513 | .name = "dwc-eth-dwmac", | 513 | .name = "dwc-eth-dwmac", |
| 514 | .pm = &stmmac_pltfr_pm_ops, | ||
| 514 | .of_match_table = dwc_eth_dwmac_match, | 515 | .of_match_table = dwc_eth_dwmac_match, |
| 515 | }, | 516 | }, |
| 516 | }; | 517 | }; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 99823f54696a..13133b30b575 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | |||
| @@ -83,6 +83,117 @@ struct rk_priv_data { | |||
| 83 | (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ | 83 | (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ |
| 84 | ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) | 84 | ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) |
| 85 | 85 | ||
| 86 | #define RK3128_GRF_MAC_CON0 0x0168 | ||
| 87 | #define RK3128_GRF_MAC_CON1 0x016c | ||
| 88 | |||
| 89 | /* RK3128_GRF_MAC_CON0 */ | ||
| 90 | #define RK3128_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14) | ||
| 91 | #define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14) | ||
| 92 | #define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15) | ||
| 93 | #define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15) | ||
| 94 | #define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7) | ||
| 95 | #define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0) | ||
| 96 | |||
| 97 | /* RK3128_GRF_MAC_CON1 */ | ||
| 98 | #define RK3128_GMAC_PHY_INTF_SEL_RGMII \ | ||
| 99 | (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8)) | ||
| 100 | #define RK3128_GMAC_PHY_INTF_SEL_RMII \ | ||
| 101 | (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8)) | ||
| 102 | #define RK3128_GMAC_FLOW_CTRL GRF_BIT(9) | ||
| 103 | #define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9) | ||
| 104 | #define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10) | ||
| 105 | #define RK3128_GMAC_SPEED_100M GRF_BIT(10) | ||
| 106 | #define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11) | ||
| 107 | #define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11) | ||
| 108 | #define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13)) | ||
| 109 | #define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13)) | ||
| 110 | #define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13)) | ||
| 111 | #define RK3128_GMAC_RMII_MODE GRF_BIT(14) | ||
| 112 | #define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14) | ||
| 113 | |||
| 114 | static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv, | ||
| 115 | int tx_delay, int rx_delay) | ||
| 116 | { | ||
| 117 | struct device *dev = &bsp_priv->pdev->dev; | ||
| 118 | |||
| 119 | if (IS_ERR(bsp_priv->grf)) { | ||
| 120 | dev_err(dev, "Missing rockchip,grf property\n"); | ||
| 121 | return; | ||
| 122 | } | ||
| 123 | |||
| 124 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
| 125 | RK3128_GMAC_PHY_INTF_SEL_RGMII | | ||
| 126 | RK3128_GMAC_RMII_MODE_CLR); | ||
| 127 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0, | ||
| 128 | DELAY_ENABLE(RK3128, tx_delay, rx_delay) | | ||
| 129 | RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) | | ||
| 130 | RK3128_GMAC_CLK_TX_DL_CFG(tx_delay)); | ||
| 131 | } | ||
| 132 | |||
| 133 | static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv) | ||
| 134 | { | ||
| 135 | struct device *dev = &bsp_priv->pdev->dev; | ||
| 136 | |||
| 137 | if (IS_ERR(bsp_priv->grf)) { | ||
| 138 | dev_err(dev, "Missing rockchip,grf property\n"); | ||
| 139 | return; | ||
| 140 | } | ||
| 141 | |||
| 142 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
| 143 | RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE); | ||
| 144 | } | ||
| 145 | |||
| 146 | static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed) | ||
| 147 | { | ||
| 148 | struct device *dev = &bsp_priv->pdev->dev; | ||
| 149 | |||
| 150 | if (IS_ERR(bsp_priv->grf)) { | ||
| 151 | dev_err(dev, "Missing rockchip,grf property\n"); | ||
| 152 | return; | ||
| 153 | } | ||
| 154 | |||
| 155 | if (speed == 10) | ||
| 156 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
| 157 | RK3128_GMAC_CLK_2_5M); | ||
| 158 | else if (speed == 100) | ||
| 159 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
| 160 | RK3128_GMAC_CLK_25M); | ||
| 161 | else if (speed == 1000) | ||
| 162 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
| 163 | RK3128_GMAC_CLK_125M); | ||
| 164 | else | ||
| 165 | dev_err(dev, "unknown speed value for RGMII! speed=%d", speed); | ||
| 166 | } | ||
| 167 | |||
| 168 | static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed) | ||
| 169 | { | ||
| 170 | struct device *dev = &bsp_priv->pdev->dev; | ||
| 171 | |||
| 172 | if (IS_ERR(bsp_priv->grf)) { | ||
| 173 | dev_err(dev, "Missing rockchip,grf property\n"); | ||
| 174 | return; | ||
| 175 | } | ||
| 176 | |||
| 177 | if (speed == 10) { | ||
| 178 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
| 179 | RK3128_GMAC_RMII_CLK_2_5M | | ||
| 180 | RK3128_GMAC_SPEED_10M); | ||
| 181 | } else if (speed == 100) { | ||
| 182 | regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1, | ||
| 183 | RK3128_GMAC_RMII_CLK_25M | | ||
| 184 | RK3128_GMAC_SPEED_100M); | ||
| 185 | } else { | ||
| 186 | dev_err(dev, "unknown speed value for RMII! speed=%d", speed); | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 190 | static const struct rk_gmac_ops rk3128_ops = { | ||
| 191 | .set_to_rgmii = rk3128_set_to_rgmii, | ||
| 192 | .set_to_rmii = rk3128_set_to_rmii, | ||
| 193 | .set_rgmii_speed = rk3128_set_rgmii_speed, | ||
| 194 | .set_rmii_speed = rk3128_set_rmii_speed, | ||
| 195 | }; | ||
| 196 | |||
| 86 | #define RK3228_GRF_MAC_CON0 0x0900 | 197 | #define RK3228_GRF_MAC_CON0 0x0900 |
| 87 | #define RK3228_GRF_MAC_CON1 0x0904 | 198 | #define RK3228_GRF_MAC_CON1 0x0904 |
| 88 | 199 | ||
| @@ -1313,6 +1424,7 @@ static int rk_gmac_resume(struct device *dev) | |||
| 1313 | static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); | 1424 | static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); |
| 1314 | 1425 | ||
| 1315 | static const struct of_device_id rk_gmac_dwmac_match[] = { | 1426 | static const struct of_device_id rk_gmac_dwmac_match[] = { |
| 1427 | { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops }, | ||
| 1316 | { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, | 1428 | { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, |
| 1317 | { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, | 1429 | { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, |
| 1318 | { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, | 1430 | { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index c4407e8e39a3..2f7d7ec59962 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c | |||
| @@ -296,6 +296,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) | |||
| 296 | { | 296 | { |
| 297 | void __iomem *ioaddr = hw->pcsr; | 297 | void __iomem *ioaddr = hw->pcsr; |
| 298 | unsigned int pmt = 0; | 298 | unsigned int pmt = 0; |
| 299 | u32 config; | ||
| 299 | 300 | ||
| 300 | if (mode & WAKE_MAGIC) { | 301 | if (mode & WAKE_MAGIC) { |
| 301 | pr_debug("GMAC: WOL Magic frame\n"); | 302 | pr_debug("GMAC: WOL Magic frame\n"); |
| @@ -306,6 +307,12 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode) | |||
| 306 | pmt |= power_down | global_unicast | wake_up_frame_en; | 307 | pmt |= power_down | global_unicast | wake_up_frame_en; |
| 307 | } | 308 | } |
| 308 | 309 | ||
| 310 | if (pmt) { | ||
| 311 | /* The receiver must be enabled for WOL before powering down */ | ||
| 312 | config = readl(ioaddr + GMAC_CONFIG); | ||
| 313 | config |= GMAC_CONFIG_RE; | ||
| 314 | writel(config, ioaddr + GMAC_CONFIG); | ||
| 315 | } | ||
| 309 | writel(pmt, ioaddr + GMAC_PMT); | 316 | writel(pmt, ioaddr + GMAC_PMT); |
| 310 | } | 317 | } |
| 311 | 318 | ||
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index a404552555d4..e365866600ba 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
| @@ -120,7 +120,7 @@ struct ppp { | |||
| 120 | int n_channels; /* how many channels are attached 54 */ | 120 | int n_channels; /* how many channels are attached 54 */ |
| 121 | spinlock_t rlock; /* lock for receive side 58 */ | 121 | spinlock_t rlock; /* lock for receive side 58 */ |
| 122 | spinlock_t wlock; /* lock for transmit side 5c */ | 122 | spinlock_t wlock; /* lock for transmit side 5c */ |
| 123 | int *xmit_recursion __percpu; /* xmit recursion detect */ | 123 | int __percpu *xmit_recursion; /* xmit recursion detect */ |
| 124 | int mru; /* max receive unit 60 */ | 124 | int mru; /* max receive unit 60 */ |
| 125 | unsigned int flags; /* control bits 64 */ | 125 | unsigned int flags; /* control bits 64 */ |
| 126 | unsigned int xstate; /* transmit state bits 68 */ | 126 | unsigned int xstate; /* transmit state bits 68 */ |
| @@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) | |||
| 1339 | 1339 | ||
| 1340 | static int ppp_dev_init(struct net_device *dev) | 1340 | static int ppp_dev_init(struct net_device *dev) |
| 1341 | { | 1341 | { |
| 1342 | struct ppp *ppp; | ||
| 1343 | |||
| 1342 | netdev_lockdep_set_classes(dev); | 1344 | netdev_lockdep_set_classes(dev); |
| 1345 | |||
| 1346 | ppp = netdev_priv(dev); | ||
| 1347 | /* Let the netdevice take a reference on the ppp file. This ensures | ||
| 1348 | * that ppp_destroy_interface() won't run before the device gets | ||
| 1349 | * unregistered. | ||
| 1350 | */ | ||
| 1351 | atomic_inc(&ppp->file.refcnt); | ||
| 1352 | |||
| 1343 | return 0; | 1353 | return 0; |
| 1344 | } | 1354 | } |
| 1345 | 1355 | ||
| @@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev) | |||
| 1362 | wake_up_interruptible(&ppp->file.rwait); | 1372 | wake_up_interruptible(&ppp->file.rwait); |
| 1363 | } | 1373 | } |
| 1364 | 1374 | ||
| 1375 | static void ppp_dev_priv_destructor(struct net_device *dev) | ||
| 1376 | { | ||
| 1377 | struct ppp *ppp; | ||
| 1378 | |||
| 1379 | ppp = netdev_priv(dev); | ||
| 1380 | if (atomic_dec_and_test(&ppp->file.refcnt)) | ||
| 1381 | ppp_destroy_interface(ppp); | ||
| 1382 | } | ||
| 1383 | |||
| 1365 | static const struct net_device_ops ppp_netdev_ops = { | 1384 | static const struct net_device_ops ppp_netdev_ops = { |
| 1366 | .ndo_init = ppp_dev_init, | 1385 | .ndo_init = ppp_dev_init, |
| 1367 | .ndo_uninit = ppp_dev_uninit, | 1386 | .ndo_uninit = ppp_dev_uninit, |
| @@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev) | |||
| 1387 | dev->tx_queue_len = 3; | 1406 | dev->tx_queue_len = 3; |
| 1388 | dev->type = ARPHRD_PPP; | 1407 | dev->type = ARPHRD_PPP; |
| 1389 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | 1408 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
| 1409 | dev->priv_destructor = ppp_dev_priv_destructor; | ||
| 1390 | netif_keep_dst(dev); | 1410 | netif_keep_dst(dev); |
| 1391 | } | 1411 | } |
| 1392 | 1412 | ||
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 3c9985f29950..5ce580f413b9 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1496,11 +1496,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
| 1496 | switch (tun->flags & TUN_TYPE_MASK) { | 1496 | switch (tun->flags & TUN_TYPE_MASK) { |
| 1497 | case IFF_TUN: | 1497 | case IFF_TUN: |
| 1498 | if (tun->flags & IFF_NO_PI) { | 1498 | if (tun->flags & IFF_NO_PI) { |
| 1499 | switch (skb->data[0] & 0xf0) { | 1499 | u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0; |
| 1500 | case 0x40: | 1500 | |
| 1501 | switch (ip_version) { | ||
| 1502 | case 4: | ||
| 1501 | pi.proto = htons(ETH_P_IP); | 1503 | pi.proto = htons(ETH_P_IP); |
| 1502 | break; | 1504 | break; |
| 1503 | case 0x60: | 1505 | case 6: |
| 1504 | pi.proto = htons(ETH_P_IPV6); | 1506 | pi.proto = htons(ETH_P_IPV6); |
| 1505 | break; | 1507 | break; |
| 1506 | default: | 1508 | default: |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 8ab281b478f2..52ea80bcd639 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
| @@ -54,11 +54,19 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc) | |||
| 54 | desc->bInterfaceProtocol == 3); | 54 | desc->bInterfaceProtocol == 3); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static int is_novatel_rndis(struct usb_interface_descriptor *desc) | ||
| 58 | { | ||
| 59 | return (desc->bInterfaceClass == USB_CLASS_MISC && | ||
| 60 | desc->bInterfaceSubClass == 4 && | ||
| 61 | desc->bInterfaceProtocol == 1); | ||
| 62 | } | ||
| 63 | |||
| 57 | #else | 64 | #else |
| 58 | 65 | ||
| 59 | #define is_rndis(desc) 0 | 66 | #define is_rndis(desc) 0 |
| 60 | #define is_activesync(desc) 0 | 67 | #define is_activesync(desc) 0 |
| 61 | #define is_wireless_rndis(desc) 0 | 68 | #define is_wireless_rndis(desc) 0 |
| 69 | #define is_novatel_rndis(desc) 0 | ||
| 62 | 70 | ||
| 63 | #endif | 71 | #endif |
| 64 | 72 | ||
| @@ -150,7 +158,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf) | |||
| 150 | */ | 158 | */ |
| 151 | rndis = (is_rndis(&intf->cur_altsetting->desc) || | 159 | rndis = (is_rndis(&intf->cur_altsetting->desc) || |
| 152 | is_activesync(&intf->cur_altsetting->desc) || | 160 | is_activesync(&intf->cur_altsetting->desc) || |
| 153 | is_wireless_rndis(&intf->cur_altsetting->desc)); | 161 | is_wireless_rndis(&intf->cur_altsetting->desc) || |
| 162 | is_novatel_rndis(&intf->cur_altsetting->desc)); | ||
| 154 | 163 | ||
| 155 | memset(info, 0, sizeof(*info)); | 164 | memset(info, 0, sizeof(*info)); |
| 156 | info->control = intf; | 165 | info->control = intf; |
| @@ -547,9 +556,11 @@ static const struct driver_info wwan_info = { | |||
| 547 | #define REALTEK_VENDOR_ID 0x0bda | 556 | #define REALTEK_VENDOR_ID 0x0bda |
| 548 | #define SAMSUNG_VENDOR_ID 0x04e8 | 557 | #define SAMSUNG_VENDOR_ID 0x04e8 |
| 549 | #define LENOVO_VENDOR_ID 0x17ef | 558 | #define LENOVO_VENDOR_ID 0x17ef |
| 559 | #define LINKSYS_VENDOR_ID 0x13b1 | ||
| 550 | #define NVIDIA_VENDOR_ID 0x0955 | 560 | #define NVIDIA_VENDOR_ID 0x0955 |
| 551 | #define HP_VENDOR_ID 0x03f0 | 561 | #define HP_VENDOR_ID 0x03f0 |
| 552 | #define MICROSOFT_VENDOR_ID 0x045e | 562 | #define MICROSOFT_VENDOR_ID 0x045e |
| 563 | #define UBLOX_VENDOR_ID 0x1546 | ||
| 553 | 564 | ||
| 554 | static const struct usb_device_id products[] = { | 565 | static const struct usb_device_id products[] = { |
| 555 | /* BLACKLIST !! | 566 | /* BLACKLIST !! |
| @@ -737,6 +748,15 @@ static const struct usb_device_id products[] = { | |||
| 737 | .driver_info = 0, | 748 | .driver_info = 0, |
| 738 | }, | 749 | }, |
| 739 | 750 | ||
| 751 | #if IS_ENABLED(CONFIG_USB_RTL8152) | ||
| 752 | /* Linksys USB3GIGV1 Ethernet Adapter */ | ||
| 753 | { | ||
| 754 | USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM, | ||
| 755 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
| 756 | .driver_info = 0, | ||
| 757 | }, | ||
| 758 | #endif | ||
| 759 | |||
| 740 | /* ThinkPad USB-C Dock (based on Realtek RTL8153) */ | 760 | /* ThinkPad USB-C Dock (based on Realtek RTL8153) */ |
| 741 | { | 761 | { |
| 742 | USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, | 762 | USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, |
| @@ -850,6 +870,18 @@ static const struct usb_device_id products[] = { | |||
| 850 | USB_CDC_PROTO_NONE), | 870 | USB_CDC_PROTO_NONE), |
| 851 | .driver_info = (unsigned long)&zte_cdc_info, | 871 | .driver_info = (unsigned long)&zte_cdc_info, |
| 852 | }, { | 872 | }, { |
| 873 | /* U-blox TOBY-L2 */ | ||
| 874 | USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM, | ||
| 875 | USB_CDC_SUBCLASS_ETHERNET, | ||
| 876 | USB_CDC_PROTO_NONE), | ||
| 877 | .driver_info = (unsigned long)&wwan_info, | ||
| 878 | }, { | ||
| 879 | /* U-blox SARA-U2 */ | ||
| 880 | USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM, | ||
| 881 | USB_CDC_SUBCLASS_ETHERNET, | ||
| 882 | USB_CDC_PROTO_NONE), | ||
| 883 | .driver_info = (unsigned long)&wwan_info, | ||
| 884 | }, { | ||
| 853 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, | 885 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, |
| 854 | USB_CDC_PROTO_NONE), | 886 | USB_CDC_PROTO_NONE), |
| 855 | .driver_info = (unsigned long) &cdc_info, | 887 | .driver_info = (unsigned long) &cdc_info, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ceb78e2ea4f0..941ece08ba78 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -613,6 +613,7 @@ enum rtl8152_flags { | |||
| 613 | #define VENDOR_ID_MICROSOFT 0x045e | 613 | #define VENDOR_ID_MICROSOFT 0x045e |
| 614 | #define VENDOR_ID_SAMSUNG 0x04e8 | 614 | #define VENDOR_ID_SAMSUNG 0x04e8 |
| 615 | #define VENDOR_ID_LENOVO 0x17ef | 615 | #define VENDOR_ID_LENOVO 0x17ef |
| 616 | #define VENDOR_ID_LINKSYS 0x13b1 | ||
| 616 | #define VENDOR_ID_NVIDIA 0x0955 | 617 | #define VENDOR_ID_NVIDIA 0x0955 |
| 617 | 618 | ||
| 618 | #define MCU_TYPE_PLA 0x0100 | 619 | #define MCU_TYPE_PLA 0x0100 |
| @@ -5316,6 +5317,7 @@ static const struct usb_device_id rtl8152_table[] = { | |||
| 5316 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, | 5317 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, |
| 5317 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, | 5318 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, |
| 5318 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, | 5319 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, |
| 5320 | {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, | ||
| 5319 | {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, | 5321 | {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, |
| 5320 | {} | 5322 | {} |
| 5321 | }; | 5323 | }; |
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index a151f267aebb..b807c91abe1d 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c | |||
| @@ -632,6 +632,10 @@ static const struct usb_device_id products [] = { | |||
| 632 | /* RNDIS for tethering */ | 632 | /* RNDIS for tethering */ |
| 633 | USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), | 633 | USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), |
| 634 | .driver_info = (unsigned long) &rndis_info, | 634 | .driver_info = (unsigned long) &rndis_info, |
| 635 | }, { | ||
| 636 | /* Novatel Verizon USB730L */ | ||
| 637 | USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1), | ||
| 638 | .driver_info = (unsigned long) &rndis_info, | ||
| 635 | }, | 639 | }, |
| 636 | { }, // END | 640 | { }, // END |
| 637 | }; | 641 | }; |
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c index bc1633945a56..195dafb98131 100644 --- a/drivers/net/wireless/ath/ath10k/pci.c +++ b/drivers/net/wireless/ath/ath10k/pci.c | |||
| @@ -3396,9 +3396,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) | |||
| 3396 | 3396 | ||
| 3397 | MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); | 3397 | MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); |
| 3398 | 3398 | ||
| 3399 | #ifdef CONFIG_PM | 3399 | static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev) |
| 3400 | |||
| 3401 | static int ath10k_pci_pm_suspend(struct device *dev) | ||
| 3402 | { | 3400 | { |
| 3403 | struct ath10k *ar = dev_get_drvdata(dev); | 3401 | struct ath10k *ar = dev_get_drvdata(dev); |
| 3404 | int ret; | 3402 | int ret; |
| @@ -3414,7 +3412,7 @@ static int ath10k_pci_pm_suspend(struct device *dev) | |||
| 3414 | return ret; | 3412 | return ret; |
| 3415 | } | 3413 | } |
| 3416 | 3414 | ||
| 3417 | static int ath10k_pci_pm_resume(struct device *dev) | 3415 | static __maybe_unused int ath10k_pci_pm_resume(struct device *dev) |
| 3418 | { | 3416 | { |
| 3419 | struct ath10k *ar = dev_get_drvdata(dev); | 3417 | struct ath10k *ar = dev_get_drvdata(dev); |
| 3420 | int ret; | 3418 | int ret; |
| @@ -3433,7 +3431,6 @@ static int ath10k_pci_pm_resume(struct device *dev) | |||
| 3433 | static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, | 3431 | static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, |
| 3434 | ath10k_pci_pm_suspend, | 3432 | ath10k_pci_pm_suspend, |
| 3435 | ath10k_pci_pm_resume); | 3433 | ath10k_pci_pm_resume); |
| 3436 | #endif | ||
| 3437 | 3434 | ||
| 3438 | static struct pci_driver ath10k_pci_driver = { | 3435 | static struct pci_driver ath10k_pci_driver = { |
| 3439 | .name = "ath10k_pci", | 3436 | .name = "ath10k_pci", |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index aaed4ab503ad..4157c90ad973 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
| @@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, | |||
| 980 | 980 | ||
| 981 | eth_broadcast_addr(params_le->bssid); | 981 | eth_broadcast_addr(params_le->bssid); |
| 982 | params_le->bss_type = DOT11_BSSTYPE_ANY; | 982 | params_le->bss_type = DOT11_BSSTYPE_ANY; |
| 983 | params_le->scan_type = 0; | 983 | params_le->scan_type = BRCMF_SCANTYPE_ACTIVE; |
| 984 | params_le->channel_num = 0; | 984 | params_le->channel_num = 0; |
| 985 | params_le->nprobes = cpu_to_le32(-1); | 985 | params_le->nprobes = cpu_to_le32(-1); |
| 986 | params_le->active_time = cpu_to_le32(-1); | 986 | params_le->active_time = cpu_to_le32(-1); |
| @@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, | |||
| 988 | params_le->home_time = cpu_to_le32(-1); | 988 | params_le->home_time = cpu_to_le32(-1); |
| 989 | memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le)); | 989 | memset(¶ms_le->ssid_le, 0, sizeof(params_le->ssid_le)); |
| 990 | 990 | ||
| 991 | /* if request is null exit so it will be all channel broadcast scan */ | ||
| 992 | if (!request) | ||
| 993 | return; | ||
| 994 | |||
| 995 | n_ssids = request->n_ssids; | 991 | n_ssids = request->n_ssids; |
| 996 | n_channels = request->n_channels; | 992 | n_channels = request->n_channels; |
| 993 | |||
| 997 | /* Copy channel array if applicable */ | 994 | /* Copy channel array if applicable */ |
| 998 | brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n", | 995 | brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n", |
| 999 | n_channels); | 996 | n_channels); |
| @@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg, | |||
| 1030 | ptr += sizeof(ssid_le); | 1027 | ptr += sizeof(ssid_le); |
| 1031 | } | 1028 | } |
| 1032 | } else { | 1029 | } else { |
| 1033 | brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids); | 1030 | brcmf_dbg(SCAN, "Performing passive scan\n"); |
| 1034 | if ((request->ssids) && request->ssids->ssid_len) { | 1031 | params_le->scan_type = BRCMF_SCANTYPE_PASSIVE; |
| 1035 | brcmf_dbg(SCAN, "SSID %s len=%d\n", | ||
| 1036 | params_le->ssid_le.SSID, | ||
| 1037 | request->ssids->ssid_len); | ||
| 1038 | params_le->ssid_le.SSID_len = | ||
| 1039 | cpu_to_le32(request->ssids->ssid_len); | ||
| 1040 | memcpy(¶ms_le->ssid_le.SSID, request->ssids->ssid, | ||
| 1041 | request->ssids->ssid_len); | ||
| 1042 | } | ||
| 1043 | } | 1032 | } |
| 1044 | /* Adding mask to channel numbers */ | 1033 | /* Adding mask to channel numbers */ |
| 1045 | params_le->channel_num = | 1034 | params_le->channel_num = |
| @@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, | |||
| 3162 | struct brcmf_cfg80211_info *cfg = ifp->drvr->config; | 3151 | struct brcmf_cfg80211_info *cfg = ifp->drvr->config; |
| 3163 | s32 status; | 3152 | s32 status; |
| 3164 | struct brcmf_escan_result_le *escan_result_le; | 3153 | struct brcmf_escan_result_le *escan_result_le; |
| 3154 | u32 escan_buflen; | ||
| 3165 | struct brcmf_bss_info_le *bss_info_le; | 3155 | struct brcmf_bss_info_le *bss_info_le; |
| 3166 | struct brcmf_bss_info_le *bss = NULL; | 3156 | struct brcmf_bss_info_le *bss = NULL; |
| 3167 | u32 bi_length; | 3157 | u32 bi_length; |
| @@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, | |||
| 3181 | 3171 | ||
| 3182 | if (status == BRCMF_E_STATUS_PARTIAL) { | 3172 | if (status == BRCMF_E_STATUS_PARTIAL) { |
| 3183 | brcmf_dbg(SCAN, "ESCAN Partial result\n"); | 3173 | brcmf_dbg(SCAN, "ESCAN Partial result\n"); |
| 3174 | if (e->datalen < sizeof(*escan_result_le)) { | ||
| 3175 | brcmf_err("invalid event data length\n"); | ||
| 3176 | goto exit; | ||
| 3177 | } | ||
| 3184 | escan_result_le = (struct brcmf_escan_result_le *) data; | 3178 | escan_result_le = (struct brcmf_escan_result_le *) data; |
| 3185 | if (!escan_result_le) { | 3179 | if (!escan_result_le) { |
| 3186 | brcmf_err("Invalid escan result (NULL pointer)\n"); | 3180 | brcmf_err("Invalid escan result (NULL pointer)\n"); |
| 3187 | goto exit; | 3181 | goto exit; |
| 3188 | } | 3182 | } |
| 3183 | escan_buflen = le32_to_cpu(escan_result_le->buflen); | ||
| 3184 | if (escan_buflen > BRCMF_ESCAN_BUF_SIZE || | ||
| 3185 | escan_buflen > e->datalen || | ||
| 3186 | escan_buflen < sizeof(*escan_result_le)) { | ||
| 3187 | brcmf_err("Invalid escan buffer length: %d\n", | ||
| 3188 | escan_buflen); | ||
| 3189 | goto exit; | ||
| 3190 | } | ||
| 3189 | if (le16_to_cpu(escan_result_le->bss_count) != 1) { | 3191 | if (le16_to_cpu(escan_result_le->bss_count) != 1) { |
| 3190 | brcmf_err("Invalid bss_count %d: ignoring\n", | 3192 | brcmf_err("Invalid bss_count %d: ignoring\n", |
| 3191 | escan_result_le->bss_count); | 3193 | escan_result_le->bss_count); |
| @@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp, | |||
| 3202 | } | 3204 | } |
| 3203 | 3205 | ||
| 3204 | bi_length = le32_to_cpu(bss_info_le->length); | 3206 | bi_length = le32_to_cpu(bss_info_le->length); |
| 3205 | if (bi_length != (le32_to_cpu(escan_result_le->buflen) - | 3207 | if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) { |
| 3206 | WL_ESCAN_RESULTS_FIXED_SIZE)) { | 3208 | brcmf_err("Ignoring invalid bss_info length: %d\n", |
| 3207 | brcmf_err("Invalid bss_info length %d: ignoring\n", | ||
| 3208 | bi_length); | 3209 | bi_length); |
| 3209 | goto exit; | 3210 | goto exit; |
| 3210 | } | 3211 | } |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index 8391989b1882..e0d22fedb2b4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h | |||
| @@ -45,6 +45,11 @@ | |||
| 45 | #define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff | 45 | #define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff |
| 46 | #define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16 | 46 | #define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16 |
| 47 | 47 | ||
| 48 | /* scan type definitions */ | ||
| 49 | #define BRCMF_SCANTYPE_DEFAULT 0xFF | ||
| 50 | #define BRCMF_SCANTYPE_ACTIVE 0 | ||
| 51 | #define BRCMF_SCANTYPE_PASSIVE 1 | ||
| 52 | |||
| 48 | #define BRCMF_WSEC_MAX_PSK_LEN 32 | 53 | #define BRCMF_WSEC_MAX_PSK_LEN 32 |
| 49 | #define BRCMF_WSEC_PASSPHRASE BIT(0) | 54 | #define BRCMF_WSEC_PASSPHRASE BIT(0) |
| 50 | 55 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 5de19ea10575..b205a7bfb828 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c | |||
| @@ -2167,7 +2167,7 @@ out: | |||
| 2167 | * 1. We are not using a unified image | 2167 | * 1. We are not using a unified image |
| 2168 | * 2. We are using a unified image but had an error while exiting D3 | 2168 | * 2. We are using a unified image but had an error while exiting D3 |
| 2169 | */ | 2169 | */ |
| 2170 | set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); | 2170 | set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); |
| 2171 | set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); | 2171 | set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); |
| 2172 | /* | 2172 | /* |
| 2173 | * When switching images we return 1, which causes mac80211 | 2173 | * When switching images we return 1, which causes mac80211 |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 15f2d826bb4b..3bcaa82f59b2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
| @@ -1546,6 +1546,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, | |||
| 1546 | struct iwl_mvm_mc_iter_data *data = _data; | 1546 | struct iwl_mvm_mc_iter_data *data = _data; |
| 1547 | struct iwl_mvm *mvm = data->mvm; | 1547 | struct iwl_mvm *mvm = data->mvm; |
| 1548 | struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; | 1548 | struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; |
| 1549 | struct iwl_host_cmd hcmd = { | ||
| 1550 | .id = MCAST_FILTER_CMD, | ||
| 1551 | .flags = CMD_ASYNC, | ||
| 1552 | .dataflags[0] = IWL_HCMD_DFL_NOCOPY, | ||
| 1553 | }; | ||
| 1549 | int ret, len; | 1554 | int ret, len; |
| 1550 | 1555 | ||
| 1551 | /* if we don't have free ports, mcast frames will be dropped */ | 1556 | /* if we don't have free ports, mcast frames will be dropped */ |
| @@ -1560,7 +1565,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, | |||
| 1560 | memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); | 1565 | memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); |
| 1561 | len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); | 1566 | len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); |
| 1562 | 1567 | ||
| 1563 | ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); | 1568 | hcmd.len[0] = len; |
| 1569 | hcmd.data[0] = cmd; | ||
| 1570 | |||
| 1571 | ret = iwl_mvm_send_cmd(mvm, &hcmd); | ||
| 1564 | if (ret) | 1572 | if (ret) |
| 1565 | IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); | 1573 | IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); |
| 1566 | } | 1574 | } |
| @@ -1635,6 +1643,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, | |||
| 1635 | if (!cmd) | 1643 | if (!cmd) |
| 1636 | goto out; | 1644 | goto out; |
| 1637 | 1645 | ||
| 1646 | if (changed_flags & FIF_ALLMULTI) | ||
| 1647 | cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); | ||
| 1648 | |||
| 1649 | if (cmd->pass_all) | ||
| 1650 | cmd->count = 0; | ||
| 1651 | |||
| 1638 | iwl_mvm_recalc_multicast(mvm); | 1652 | iwl_mvm_recalc_multicast(mvm); |
| 1639 | out: | 1653 | out: |
| 1640 | mutex_unlock(&mvm->mutex); | 1654 | mutex_unlock(&mvm->mutex); |
| @@ -2563,7 +2577,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm, | |||
| 2563 | * queues, so we should never get a second deferred | 2577 | * queues, so we should never get a second deferred |
| 2564 | * frame for the RA/TID. | 2578 | * frame for the RA/TID. |
| 2565 | */ | 2579 | */ |
| 2566 | iwl_mvm_start_mac_queues(mvm, info->hw_queue); | 2580 | iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue)); |
| 2567 | ieee80211_free_txskb(mvm->hw, skb); | 2581 | ieee80211_free_txskb(mvm->hw, skb); |
| 2568 | } | 2582 | } |
| 2569 | } | 2583 | } |
| @@ -3975,6 +3989,43 @@ out_unlock: | |||
| 3975 | return ret; | 3989 | return ret; |
| 3976 | } | 3990 | } |
| 3977 | 3991 | ||
| 3992 | static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) | ||
| 3993 | { | ||
| 3994 | if (drop) { | ||
| 3995 | if (iwl_mvm_has_new_tx_api(mvm)) | ||
| 3996 | /* TODO new tx api */ | ||
| 3997 | WARN_ONCE(1, | ||
| 3998 | "Need to implement flush TX queue\n"); | ||
| 3999 | else | ||
| 4000 | iwl_mvm_flush_tx_path(mvm, | ||
| 4001 | iwl_mvm_flushable_queues(mvm) & queues, | ||
| 4002 | 0); | ||
| 4003 | } else { | ||
| 4004 | if (iwl_mvm_has_new_tx_api(mvm)) { | ||
| 4005 | struct ieee80211_sta *sta; | ||
| 4006 | int i; | ||
| 4007 | |||
| 4008 | mutex_lock(&mvm->mutex); | ||
| 4009 | |||
| 4010 | for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { | ||
| 4011 | sta = rcu_dereference_protected( | ||
| 4012 | mvm->fw_id_to_mac_id[i], | ||
| 4013 | lockdep_is_held(&mvm->mutex)); | ||
| 4014 | if (IS_ERR_OR_NULL(sta)) | ||
| 4015 | continue; | ||
| 4016 | |||
| 4017 | iwl_mvm_wait_sta_queues_empty(mvm, | ||
| 4018 | iwl_mvm_sta_from_mac80211(sta)); | ||
| 4019 | } | ||
| 4020 | |||
| 4021 | mutex_unlock(&mvm->mutex); | ||
| 4022 | } else { | ||
| 4023 | iwl_trans_wait_tx_queues_empty(mvm->trans, | ||
| 4024 | queues); | ||
| 4025 | } | ||
| 4026 | } | ||
| 4027 | } | ||
| 4028 | |||
| 3978 | static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, | 4029 | static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, |
| 3979 | struct ieee80211_vif *vif, u32 queues, bool drop) | 4030 | struct ieee80211_vif *vif, u32 queues, bool drop) |
| 3980 | { | 4031 | { |
| @@ -3985,7 +4036,12 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, | |||
| 3985 | int i; | 4036 | int i; |
| 3986 | u32 msk = 0; | 4037 | u32 msk = 0; |
| 3987 | 4038 | ||
| 3988 | if (!vif || vif->type != NL80211_IFTYPE_STATION) | 4039 | if (!vif) { |
| 4040 | iwl_mvm_flush_no_vif(mvm, queues, drop); | ||
| 4041 | return; | ||
| 4042 | } | ||
| 4043 | |||
| 4044 | if (vif->type != NL80211_IFTYPE_STATION) | ||
| 3989 | return; | 4045 | return; |
| 3990 | 4046 | ||
| 3991 | /* Make sure we're done with the deferred traffic before flushing */ | 4047 | /* Make sure we're done with the deferred traffic before flushing */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index ba7bd049d3d4..0fe723ca844e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c | |||
| @@ -661,7 +661,8 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, | |||
| 661 | (lq_sta->tx_agg_tid_en & BIT(tid)) && | 661 | (lq_sta->tx_agg_tid_en & BIT(tid)) && |
| 662 | (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) { | 662 | (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) { |
| 663 | IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); | 663 | IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); |
| 664 | rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta); | 664 | if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0) |
| 665 | tid_data->state = IWL_AGG_QUEUED; | ||
| 665 | } | 666 | } |
| 666 | } | 667 | } |
| 667 | 668 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 67ffd9774712..77f77bc5d083 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | |||
| @@ -672,11 +672,12 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, | |||
| 672 | * If there was a significant jump in the nssn - adjust. | 672 | * If there was a significant jump in the nssn - adjust. |
| 673 | * If the SN is smaller than the NSSN it might need to first go into | 673 | * If the SN is smaller than the NSSN it might need to first go into |
| 674 | * the reorder buffer, in which case we just release up to it and the | 674 | * the reorder buffer, in which case we just release up to it and the |
| 675 | * rest of the function will take of storing it and releasing up to the | 675 | * rest of the function will take care of storing it and releasing up to |
| 676 | * nssn | 676 | * the nssn |
| 677 | */ | 677 | */ |
| 678 | if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, | 678 | if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, |
| 679 | buffer->buf_size)) { | 679 | buffer->buf_size) || |
| 680 | !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) { | ||
| 680 | u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; | 681 | u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; |
| 681 | 682 | ||
| 682 | iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn); | 683 | iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index 50983615dce6..774122fed454 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c | |||
| @@ -555,7 +555,7 @@ static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm) | |||
| 555 | struct iwl_host_cmd cmd = { | 555 | struct iwl_host_cmd cmd = { |
| 556 | .id = SCAN_OFFLOAD_ABORT_CMD, | 556 | .id = SCAN_OFFLOAD_ABORT_CMD, |
| 557 | }; | 557 | }; |
| 558 | u32 status; | 558 | u32 status = CAN_ABORT_STATUS; |
| 559 | 559 | ||
| 560 | ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); | 560 | ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); |
| 561 | if (ret) | 561 | if (ret) |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 411a2055dc45..c4a343534c5e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
| @@ -1285,7 +1285,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, | |||
| 1285 | { | 1285 | { |
| 1286 | struct iwl_mvm_add_sta_cmd cmd; | 1286 | struct iwl_mvm_add_sta_cmd cmd; |
| 1287 | int ret; | 1287 | int ret; |
| 1288 | u32 status; | 1288 | u32 status = ADD_STA_SUCCESS; |
| 1289 | 1289 | ||
| 1290 | lockdep_assert_held(&mvm->mutex); | 1290 | lockdep_assert_held(&mvm->mutex); |
| 1291 | 1291 | ||
| @@ -2385,8 +2385,10 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
| 2385 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) | 2385 | if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) |
| 2386 | return -EINVAL; | 2386 | return -EINVAL; |
| 2387 | 2387 | ||
| 2388 | if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { | 2388 | if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && |
| 2389 | IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", | 2389 | mvmsta->tid_data[tid].state != IWL_AGG_OFF) { |
| 2390 | IWL_ERR(mvm, | ||
| 2391 | "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", | ||
| 2390 | mvmsta->tid_data[tid].state); | 2392 | mvmsta->tid_data[tid].state); |
| 2391 | return -ENXIO; | 2393 | return -ENXIO; |
| 2392 | } | 2394 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index d13893806513..aedabe101cf0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h | |||
| @@ -281,6 +281,7 @@ struct iwl_mvm_vif; | |||
| 281 | * These states relate to a specific RA / TID. | 281 | * These states relate to a specific RA / TID. |
| 282 | * | 282 | * |
| 283 | * @IWL_AGG_OFF: aggregation is not used | 283 | * @IWL_AGG_OFF: aggregation is not used |
| 284 | * @IWL_AGG_QUEUED: aggregation start work has been queued | ||
| 284 | * @IWL_AGG_STARTING: aggregation are starting (between start and oper) | 285 | * @IWL_AGG_STARTING: aggregation are starting (between start and oper) |
| 285 | * @IWL_AGG_ON: aggregation session is up | 286 | * @IWL_AGG_ON: aggregation session is up |
| 286 | * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the | 287 | * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the |
| @@ -290,6 +291,7 @@ struct iwl_mvm_vif; | |||
| 290 | */ | 291 | */ |
| 291 | enum iwl_mvm_agg_state { | 292 | enum iwl_mvm_agg_state { |
| 292 | IWL_AGG_OFF = 0, | 293 | IWL_AGG_OFF = 0, |
| 294 | IWL_AGG_QUEUED, | ||
| 293 | IWL_AGG_STARTING, | 295 | IWL_AGG_STARTING, |
| 294 | IWL_AGG_ON, | 296 | IWL_AGG_ON, |
| 295 | IWL_EMPTYING_HW_QUEUE_ADDBA, | 297 | IWL_EMPTYING_HW_QUEUE_ADDBA, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 8876c2abc440..4d907f60bce9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c | |||
| @@ -529,6 +529,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state) | |||
| 529 | 529 | ||
| 530 | lockdep_assert_held(&mvm->mutex); | 530 | lockdep_assert_held(&mvm->mutex); |
| 531 | 531 | ||
| 532 | status = 0; | ||
| 532 | ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, | 533 | ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, |
| 533 | CTDP_CONFIG_CMD), | 534 | CTDP_CONFIG_CMD), |
| 534 | sizeof(cmd), &cmd, &status); | 535 | sizeof(cmd), &cmd, &status); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 172b5e63d3fb..6f2e2af23219 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
| @@ -564,8 +564,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, | |||
| 564 | case NL80211_IFTYPE_AP: | 564 | case NL80211_IFTYPE_AP: |
| 565 | case NL80211_IFTYPE_ADHOC: | 565 | case NL80211_IFTYPE_ADHOC: |
| 566 | /* | 566 | /* |
| 567 | * Handle legacy hostapd as well, where station will be added | 567 | * Non-bufferable frames use the broadcast station, thus they |
| 568 | * only just before sending the association response. | 568 | * use the probe queue. |
| 569 | * Also take care of the case where we send a deauth to a | 569 | * Also take care of the case where we send a deauth to a |
| 570 | * station that we don't have, or similarly an association | 570 | * station that we don't have, or similarly an association |
| 571 | * response (with non-success status) for a station we can't | 571 | * response (with non-success status) for a station we can't |
| @@ -573,9 +573,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm, | |||
| 573 | * Also, disassociate frames might happen, particular with | 573 | * Also, disassociate frames might happen, particular with |
| 574 | * reason 7 ("Class 3 frame received from nonassociated STA"). | 574 | * reason 7 ("Class 3 frame received from nonassociated STA"). |
| 575 | */ | 575 | */ |
| 576 | if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) || | 576 | if (ieee80211_is_mgmt(fc) && |
| 577 | ieee80211_is_deauth(fc) || ieee80211_is_assoc_resp(fc) || | 577 | (!ieee80211_is_bufferable_mmpdu(fc) || |
| 578 | ieee80211_is_disassoc(fc)) | 578 | ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc))) |
| 579 | return mvm->probe_queue; | 579 | return mvm->probe_queue; |
| 580 | if (info->hw_queue == info->control.vif->cab_queue) | 580 | if (info->hw_queue == info->control.vif->cab_queue) |
| 581 | return mvmvif->cab_queue; | 581 | return mvmvif->cab_queue; |
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c index 856fa6e8327e..a450bc6bc774 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c | |||
| @@ -115,6 +115,8 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) | |||
| 115 | 115 | ||
| 116 | vif = qtnf_netdev_get_priv(wdev->netdev); | 116 | vif = qtnf_netdev_get_priv(wdev->netdev); |
| 117 | 117 | ||
| 118 | qtnf_scan_done(vif->mac, true); | ||
| 119 | |||
| 118 | if (qtnf_cmd_send_del_intf(vif)) | 120 | if (qtnf_cmd_send_del_intf(vif)) |
| 119 | pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid, | 121 | pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid, |
| 120 | vif->vifid); | 122 | vif->vifid); |
| @@ -335,6 +337,8 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev) | |||
| 335 | struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); | 337 | struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); |
| 336 | int ret; | 338 | int ret; |
| 337 | 339 | ||
| 340 | qtnf_scan_done(vif->mac, true); | ||
| 341 | |||
| 338 | ret = qtnf_cmd_send_stop_ap(vif); | 342 | ret = qtnf_cmd_send_stop_ap(vif); |
| 339 | if (ret) { | 343 | if (ret) { |
| 340 | pr_err("VIF%u.%u: failed to stop AP operation in FW\n", | 344 | pr_err("VIF%u.%u: failed to stop AP operation in FW\n", |
| @@ -570,8 +574,6 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev, | |||
| 570 | !qtnf_sta_list_lookup(&vif->sta_list, params->mac)) | 574 | !qtnf_sta_list_lookup(&vif->sta_list, params->mac)) |
| 571 | return 0; | 575 | return 0; |
| 572 | 576 | ||
| 573 | qtnf_scan_done(vif->mac, true); | ||
| 574 | |||
| 575 | ret = qtnf_cmd_send_del_sta(vif, params); | 577 | ret = qtnf_cmd_send_del_sta(vif, params); |
| 576 | if (ret) | 578 | if (ret) |
| 577 | pr_err("VIF%u.%u: failed to delete STA %pM\n", | 579 | pr_err("VIF%u.%u: failed to delete STA %pM\n", |
| @@ -1134,8 +1136,9 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev) | |||
| 1134 | } | 1136 | } |
| 1135 | 1137 | ||
| 1136 | vif->sta_state = QTNF_STA_DISCONNECTED; | 1138 | vif->sta_state = QTNF_STA_DISCONNECTED; |
| 1137 | qtnf_scan_done(mac, true); | ||
| 1138 | } | 1139 | } |
| 1140 | |||
| 1141 | qtnf_scan_done(mac, true); | ||
| 1139 | } | 1142 | } |
| 1140 | 1143 | ||
| 1141 | void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif) | 1144 | void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif) |
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h index 6a4af52522b8..66db26613b1f 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h +++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h | |||
| @@ -34,6 +34,9 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted) | |||
| 34 | .aborted = aborted, | 34 | .aborted = aborted, |
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | if (timer_pending(&mac->scan_timeout)) | ||
| 38 | del_timer_sync(&mac->scan_timeout); | ||
| 39 | |||
| 37 | mutex_lock(&mac->mac_lock); | 40 | mutex_lock(&mac->mac_lock); |
| 38 | 41 | ||
| 39 | if (mac->scan_req) { | 42 | if (mac->scan_req) { |
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c index 0fc2814eafad..43d2e7fd6e02 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/event.c +++ b/drivers/net/wireless/quantenna/qtnfmac/event.c | |||
| @@ -345,8 +345,6 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac, | |||
| 345 | return -EINVAL; | 345 | return -EINVAL; |
| 346 | } | 346 | } |
| 347 | 347 | ||
| 348 | if (timer_pending(&mac->scan_timeout)) | ||
| 349 | del_timer_sync(&mac->scan_timeout); | ||
| 350 | qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED); | 348 | qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED); |
| 351 | 349 | ||
| 352 | return 0; | 350 | return 0; |
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c index 502e72b7cdcc..69131965a298 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c | |||
| @@ -661,14 +661,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) | |||
| 661 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); | 661 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); |
| 662 | dma_addr_t txbd_paddr, skb_paddr; | 662 | dma_addr_t txbd_paddr, skb_paddr; |
| 663 | struct qtnf_tx_bd *txbd; | 663 | struct qtnf_tx_bd *txbd; |
| 664 | unsigned long flags; | ||
| 664 | int len, i; | 665 | int len, i; |
| 665 | u32 info; | 666 | u32 info; |
| 666 | int ret = 0; | 667 | int ret = 0; |
| 667 | 668 | ||
| 669 | spin_lock_irqsave(&priv->tx0_lock, flags); | ||
| 670 | |||
| 668 | if (!qtnf_tx_queue_ready(priv)) { | 671 | if (!qtnf_tx_queue_ready(priv)) { |
| 669 | if (skb->dev) | 672 | if (skb->dev) |
| 670 | netif_stop_queue(skb->dev); | 673 | netif_stop_queue(skb->dev); |
| 671 | 674 | ||
| 675 | spin_unlock_irqrestore(&priv->tx0_lock, flags); | ||
| 672 | return NETDEV_TX_BUSY; | 676 | return NETDEV_TX_BUSY; |
| 673 | } | 677 | } |
| 674 | 678 | ||
| @@ -717,8 +721,10 @@ tx_done: | |||
| 717 | dev_kfree_skb_any(skb); | 721 | dev_kfree_skb_any(skb); |
| 718 | } | 722 | } |
| 719 | 723 | ||
| 720 | qtnf_pcie_data_tx_reclaim(priv); | ||
| 721 | priv->tx_done_count++; | 724 | priv->tx_done_count++; |
| 725 | spin_unlock_irqrestore(&priv->tx0_lock, flags); | ||
| 726 | |||
| 727 | qtnf_pcie_data_tx_reclaim(priv); | ||
| 722 | 728 | ||
| 723 | return NETDEV_TX_OK; | 729 | return NETDEV_TX_OK; |
| 724 | } | 730 | } |
| @@ -1247,6 +1253,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1247 | strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); | 1253 | strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); |
| 1248 | init_completion(&bus->request_firmware_complete); | 1254 | init_completion(&bus->request_firmware_complete); |
| 1249 | mutex_init(&bus->bus_lock); | 1255 | mutex_init(&bus->bus_lock); |
| 1256 | spin_lock_init(&pcie_priv->tx0_lock); | ||
| 1250 | spin_lock_init(&pcie_priv->irq_lock); | 1257 | spin_lock_init(&pcie_priv->irq_lock); |
| 1251 | spin_lock_init(&pcie_priv->tx_reclaim_lock); | 1258 | spin_lock_init(&pcie_priv->tx_reclaim_lock); |
| 1252 | 1259 | ||
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h index e76a23716ee0..86ac1ccedb52 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h | |||
| @@ -34,6 +34,8 @@ struct qtnf_pcie_bus_priv { | |||
| 34 | 34 | ||
| 35 | /* lock for tx reclaim operations */ | 35 | /* lock for tx reclaim operations */ |
| 36 | spinlock_t tx_reclaim_lock; | 36 | spinlock_t tx_reclaim_lock; |
| 37 | /* lock for tx0 operations */ | ||
| 38 | spinlock_t tx0_lock; | ||
| 37 | u8 msi_enabled; | 39 | u8 msi_enabled; |
| 38 | int mps; | 40 | int mps; |
| 39 | 41 | ||
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index bb2aad078637..5a14cc7f28ee 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
| @@ -2136,7 +2136,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, | |||
| 2136 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); | 2136 | struct nvme_ns *ns = nvme_get_ns_from_dev(dev); |
| 2137 | 2137 | ||
| 2138 | if (a == &dev_attr_uuid.attr) { | 2138 | if (a == &dev_attr_uuid.attr) { |
| 2139 | if (uuid_is_null(&ns->uuid) || | 2139 | if (uuid_is_null(&ns->uuid) && |
| 2140 | !memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) | 2140 | !memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) |
| 2141 | return 0; | 2141 | return 0; |
| 2142 | } | 2142 | } |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index cb73bc8cad3b..3f5a04c586ce 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -94,7 +94,7 @@ struct nvme_dev { | |||
| 94 | struct mutex shutdown_lock; | 94 | struct mutex shutdown_lock; |
| 95 | bool subsystem; | 95 | bool subsystem; |
| 96 | void __iomem *cmb; | 96 | void __iomem *cmb; |
| 97 | dma_addr_t cmb_dma_addr; | 97 | pci_bus_addr_t cmb_bus_addr; |
| 98 | u64 cmb_size; | 98 | u64 cmb_size; |
| 99 | u32 cmbsz; | 99 | u32 cmbsz; |
| 100 | u32 cmbloc; | 100 | u32 cmbloc; |
| @@ -1226,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq, | |||
| 1226 | if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { | 1226 | if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { |
| 1227 | unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), | 1227 | unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), |
| 1228 | dev->ctrl.page_size); | 1228 | dev->ctrl.page_size); |
| 1229 | nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; | 1229 | nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset; |
| 1230 | nvmeq->sq_cmds_io = dev->cmb + offset; | 1230 | nvmeq->sq_cmds_io = dev->cmb + offset; |
| 1231 | } else { | 1231 | } else { |
| 1232 | nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), | 1232 | nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), |
| @@ -1527,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) | |||
| 1527 | resource_size_t bar_size; | 1527 | resource_size_t bar_size; |
| 1528 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 1528 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 1529 | void __iomem *cmb; | 1529 | void __iomem *cmb; |
| 1530 | dma_addr_t dma_addr; | 1530 | int bar; |
| 1531 | 1531 | ||
| 1532 | dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); | 1532 | dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); |
| 1533 | if (!(NVME_CMB_SZ(dev->cmbsz))) | 1533 | if (!(NVME_CMB_SZ(dev->cmbsz))) |
| @@ -1540,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) | |||
| 1540 | szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); | 1540 | szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); |
| 1541 | size = szu * NVME_CMB_SZ(dev->cmbsz); | 1541 | size = szu * NVME_CMB_SZ(dev->cmbsz); |
| 1542 | offset = szu * NVME_CMB_OFST(dev->cmbloc); | 1542 | offset = szu * NVME_CMB_OFST(dev->cmbloc); |
| 1543 | bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); | 1543 | bar = NVME_CMB_BIR(dev->cmbloc); |
| 1544 | bar_size = pci_resource_len(pdev, bar); | ||
| 1544 | 1545 | ||
| 1545 | if (offset > bar_size) | 1546 | if (offset > bar_size) |
| 1546 | return NULL; | 1547 | return NULL; |
| @@ -1553,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev) | |||
| 1553 | if (size > bar_size - offset) | 1554 | if (size > bar_size - offset) |
| 1554 | size = bar_size - offset; | 1555 | size = bar_size - offset; |
| 1555 | 1556 | ||
| 1556 | dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; | 1557 | cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size); |
| 1557 | cmb = ioremap_wc(dma_addr, size); | ||
| 1558 | if (!cmb) | 1558 | if (!cmb) |
| 1559 | return NULL; | 1559 | return NULL; |
| 1560 | 1560 | ||
| 1561 | dev->cmb_dma_addr = dma_addr; | 1561 | dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset; |
| 1562 | dev->cmb_size = size; | 1562 | dev->cmb_size = size; |
| 1563 | return cmb; | 1563 | return cmb; |
| 1564 | } | 1564 | } |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 260d33c0f26c..63897531cd75 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
| @@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index) | |||
| 1781 | { | 1781 | { |
| 1782 | if (!dn || dn != of_stdout || console_set_on_cmdline) | 1782 | if (!dn || dn != of_stdout || console_set_on_cmdline) |
| 1783 | return false; | 1783 | return false; |
| 1784 | return !add_preferred_console(name, index, | 1784 | |
| 1785 | kstrdup(of_stdout_options, GFP_KERNEL)); | 1785 | /* |
| 1786 | * XXX: cast `options' to char pointer to suppress complication | ||
| 1787 | * warnings: printk, UART and console drivers expect char pointer. | ||
| 1788 | */ | ||
| 1789 | return !add_preferred_console(name, index, (char *)of_stdout_options); | ||
| 1786 | } | 1790 | } |
| 1787 | EXPORT_SYMBOL_GPL(of_console_check); | 1791 | EXPORT_SYMBOL_GPL(of_console_check); |
| 1788 | 1792 | ||
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index d507c3569a88..32771c2ced7b 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
| @@ -25,7 +25,7 @@ | |||
| 25 | #include <linux/sort.h> | 25 | #include <linux/sort.h> |
| 26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 27 | 27 | ||
| 28 | #define MAX_RESERVED_REGIONS 16 | 28 | #define MAX_RESERVED_REGIONS 32 |
| 29 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; | 29 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; |
| 30 | static int reserved_mem_count; | 30 | static int reserved_mem_count; |
| 31 | 31 | ||
diff --git a/drivers/of/property.c b/drivers/of/property.c index fbb72116e9d4..264c355ba1ff 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c | |||
| @@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode) | |||
| 954 | struct device_node *np; | 954 | struct device_node *np; |
| 955 | 955 | ||
| 956 | /* Get the parent of the port */ | 956 | /* Get the parent of the port */ |
| 957 | np = of_get_next_parent(to_of_node(fwnode)); | 957 | np = of_get_parent(to_of_node(fwnode)); |
| 958 | if (!np) | 958 | if (!np) |
| 959 | return NULL; | 959 | return NULL; |
| 960 | 960 | ||
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 89f4e3d072d7..26ed0c08f209 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c | |||
| @@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev) | |||
| 935 | bridge->sysdata = pcie; | 935 | bridge->sysdata = pcie; |
| 936 | bridge->busnr = 0; | 936 | bridge->busnr = 0; |
| 937 | bridge->ops = &advk_pcie_ops; | 937 | bridge->ops = &advk_pcie_ops; |
| 938 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
| 939 | bridge->swizzle_irq = pci_common_swizzle; | ||
| 938 | 940 | ||
| 939 | ret = pci_scan_root_bus_bridge(bridge); | 941 | ret = pci_scan_root_bus_bridge(bridge); |
| 940 | if (ret < 0) { | 942 | if (ret < 0) { |
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 9c40da54f88a..1987fec1f126 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c | |||
| @@ -233,6 +233,7 @@ struct tegra_msi { | |||
| 233 | struct msi_controller chip; | 233 | struct msi_controller chip; |
| 234 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); | 234 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); |
| 235 | struct irq_domain *domain; | 235 | struct irq_domain *domain; |
| 236 | unsigned long pages; | ||
| 236 | struct mutex lock; | 237 | struct mutex lock; |
| 237 | u64 phys; | 238 | u64 phys; |
| 238 | int irq; | 239 | int irq; |
| @@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) | |||
| 1529 | goto err; | 1530 | goto err; |
| 1530 | } | 1531 | } |
| 1531 | 1532 | ||
| 1532 | /* | 1533 | /* setup AFI/FPCI range */ |
| 1533 | * The PCI host bridge on Tegra contains some logic that intercepts | 1534 | msi->pages = __get_free_pages(GFP_KERNEL, 0); |
| 1534 | * MSI writes, which means that the MSI target address doesn't have | 1535 | msi->phys = virt_to_phys((void *)msi->pages); |
| 1535 | * to point to actual physical memory. Rather than allocating one 4 | ||
| 1536 | * KiB page of system memory that's never used, we can simply pick | ||
| 1537 | * an arbitrary address within an area reserved for system memory | ||
| 1538 | * in the FPCI address map. | ||
| 1539 | * | ||
| 1540 | * However, in order to avoid confusion, we pick an address that | ||
| 1541 | * doesn't map to physical memory. The FPCI address map reserves a | ||
| 1542 | * 1012 GiB region for system memory and memory-mapped I/O. Since | ||
| 1543 | * none of the Tegra SoCs that contain this PCI host bridge can | ||
| 1544 | * address more than 16 GiB of system memory, the last 4 KiB of | ||
| 1545 | * these 1012 GiB is a good candidate. | ||
| 1546 | */ | ||
| 1547 | msi->phys = 0xfcfffff000; | ||
| 1548 | 1536 | ||
| 1549 | afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); | 1537 | afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); |
| 1550 | afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); | 1538 | afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); |
| @@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) | |||
| 1596 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); | 1584 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); |
| 1597 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); | 1585 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); |
| 1598 | 1586 | ||
| 1587 | free_pages(msi->pages, 0); | ||
| 1588 | |||
| 1599 | if (msi->irq > 0) | 1589 | if (msi->irq > 0) |
| 1600 | free_irq(msi->irq, pcie); | 1590 | free_irq(msi->irq, pcie); |
| 1601 | 1591 | ||
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 1778cf4f81c7..82cd8b08d71f 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig | |||
| @@ -100,6 +100,7 @@ config PINCTRL_AMD | |||
| 100 | tristate "AMD GPIO pin control" | 100 | tristate "AMD GPIO pin control" |
| 101 | depends on GPIOLIB | 101 | depends on GPIOLIB |
| 102 | select GPIOLIB_IRQCHIP | 102 | select GPIOLIB_IRQCHIP |
| 103 | select PINMUX | ||
| 103 | select PINCONF | 104 | select PINCONF |
| 104 | select GENERIC_PINCONF | 105 | select GENERIC_PINCONF |
| 105 | help | 106 | help |
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 0944310225db..ff782445dfb7 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c | |||
| @@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc, | |||
| 373 | unsigned long events; | 373 | unsigned long events; |
| 374 | unsigned offset; | 374 | unsigned offset; |
| 375 | unsigned gpio; | 375 | unsigned gpio; |
| 376 | unsigned int type; | ||
| 377 | 376 | ||
| 378 | events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); | 377 | events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); |
| 379 | events &= mask; | 378 | events &= mask; |
| 380 | events &= pc->enabled_irq_map[bank]; | 379 | events &= pc->enabled_irq_map[bank]; |
| 381 | for_each_set_bit(offset, &events, 32) { | 380 | for_each_set_bit(offset, &events, 32) { |
| 382 | gpio = (32 * bank) + offset; | 381 | gpio = (32 * bank) + offset; |
| 383 | /* FIXME: no clue why the code looks up the type here */ | ||
| 384 | type = pc->irq_type[gpio]; | ||
| 385 | |||
| 386 | generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, | 382 | generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, |
| 387 | gpio)); | 383 | gpio)); |
| 388 | } | 384 | } |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 04e929fd0ffe..fadbca907c7c 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
| @@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | |||
| 1577 | struct gpio_chip *chip = &pctrl->chip; | 1577 | struct gpio_chip *chip = &pctrl->chip; |
| 1578 | bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); | 1578 | bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); |
| 1579 | int ret, i, offset; | 1579 | int ret, i, offset; |
| 1580 | int irq_base; | ||
| 1580 | 1581 | ||
| 1581 | *chip = chv_gpio_chip; | 1582 | *chip = chv_gpio_chip; |
| 1582 | 1583 | ||
| @@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | |||
| 1622 | /* Clear all interrupts */ | 1623 | /* Clear all interrupts */ |
| 1623 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); | 1624 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); |
| 1624 | 1625 | ||
| 1625 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, | 1626 | if (!need_valid_mask) { |
| 1627 | irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, | ||
| 1628 | chip->ngpio, NUMA_NO_NODE); | ||
| 1629 | if (irq_base < 0) { | ||
| 1630 | dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); | ||
| 1631 | return irq_base; | ||
| 1632 | } | ||
| 1633 | } else { | ||
| 1634 | irq_base = 0; | ||
| 1635 | } | ||
| 1636 | |||
| 1637 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base, | ||
| 1626 | handle_bad_irq, IRQ_TYPE_NONE); | 1638 | handle_bad_irq, IRQ_TYPE_NONE); |
| 1627 | if (ret) { | 1639 | if (ret) { |
| 1628 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); | 1640 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); |
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c index 315a4be8dc1e..9a68914100ad 100644 --- a/drivers/rapidio/devices/tsi721.c +++ b/drivers/rapidio/devices/tsi721.c | |||
| @@ -51,6 +51,8 @@ module_param(mbox_sel, byte, S_IRUGO); | |||
| 51 | MODULE_PARM_DESC(mbox_sel, | 51 | MODULE_PARM_DESC(mbox_sel, |
| 52 | "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); | 52 | "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); |
| 53 | 53 | ||
| 54 | static DEFINE_SPINLOCK(tsi721_maint_lock); | ||
| 55 | |||
| 54 | static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); | 56 | static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); |
| 55 | static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); | 57 | static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); |
| 56 | 58 | ||
| @@ -124,12 +126,15 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, | |||
| 124 | void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); | 126 | void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); |
| 125 | struct tsi721_dma_desc *bd_ptr; | 127 | struct tsi721_dma_desc *bd_ptr; |
| 126 | u32 rd_count, swr_ptr, ch_stat; | 128 | u32 rd_count, swr_ptr, ch_stat; |
| 129 | unsigned long flags; | ||
| 127 | int i, err = 0; | 130 | int i, err = 0; |
| 128 | u32 op = do_wr ? MAINT_WR : MAINT_RD; | 131 | u32 op = do_wr ? MAINT_WR : MAINT_RD; |
| 129 | 132 | ||
| 130 | if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) | 133 | if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) |
| 131 | return -EINVAL; | 134 | return -EINVAL; |
| 132 | 135 | ||
| 136 | spin_lock_irqsave(&tsi721_maint_lock, flags); | ||
| 137 | |||
| 133 | bd_ptr = priv->mdma.bd_base; | 138 | bd_ptr = priv->mdma.bd_base; |
| 134 | 139 | ||
| 135 | rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); | 140 | rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); |
| @@ -197,7 +202,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, | |||
| 197 | */ | 202 | */ |
| 198 | swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); | 203 | swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); |
| 199 | iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); | 204 | iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); |
| 205 | |||
| 200 | err_out: | 206 | err_out: |
| 207 | spin_unlock_irqrestore(&tsi721_maint_lock, flags); | ||
| 201 | 208 | ||
| 202 | return err; | 209 | return err; |
| 203 | } | 210 | } |
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c index a3824baca2e5..3ee9af83b638 100644 --- a/drivers/rapidio/rio-access.c +++ b/drivers/rapidio/rio-access.c | |||
| @@ -14,16 +14,8 @@ | |||
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | 15 | ||
| 16 | /* | 16 | /* |
| 17 | * These interrupt-safe spinlocks protect all accesses to RIO | ||
| 18 | * configuration space and doorbell access. | ||
| 19 | */ | ||
| 20 | static DEFINE_SPINLOCK(rio_config_lock); | ||
| 21 | static DEFINE_SPINLOCK(rio_doorbell_lock); | ||
| 22 | |||
| 23 | /* | ||
| 24 | * Wrappers for all RIO configuration access functions. They just check | 17 | * Wrappers for all RIO configuration access functions. They just check |
| 25 | * alignment, do locking and call the low-level functions pointed to | 18 | * alignment and call the low-level functions pointed to by rio_mport->ops. |
| 26 | * by rio_mport->ops. | ||
| 27 | */ | 19 | */ |
| 28 | 20 | ||
| 29 | #define RIO_8_BAD 0 | 21 | #define RIO_8_BAD 0 |
| @@ -44,13 +36,10 @@ int __rio_local_read_config_##size \ | |||
| 44 | (struct rio_mport *mport, u32 offset, type *value) \ | 36 | (struct rio_mport *mport, u32 offset, type *value) \ |
| 45 | { \ | 37 | { \ |
| 46 | int res; \ | 38 | int res; \ |
| 47 | unsigned long flags; \ | ||
| 48 | u32 data = 0; \ | 39 | u32 data = 0; \ |
| 49 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ | 40 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ |
| 50 | spin_lock_irqsave(&rio_config_lock, flags); \ | ||
| 51 | res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ | 41 | res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ |
| 52 | *value = (type)data; \ | 42 | *value = (type)data; \ |
| 53 | spin_unlock_irqrestore(&rio_config_lock, flags); \ | ||
| 54 | return res; \ | 43 | return res; \ |
| 55 | } | 44 | } |
| 56 | 45 | ||
| @@ -67,13 +56,8 @@ int __rio_local_read_config_##size \ | |||
| 67 | int __rio_local_write_config_##size \ | 56 | int __rio_local_write_config_##size \ |
| 68 | (struct rio_mport *mport, u32 offset, type value) \ | 57 | (struct rio_mport *mport, u32 offset, type value) \ |
| 69 | { \ | 58 | { \ |
| 70 | int res; \ | ||
| 71 | unsigned long flags; \ | ||
| 72 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ | 59 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ |
| 73 | spin_lock_irqsave(&rio_config_lock, flags); \ | 60 | return mport->ops->lcwrite(mport, mport->id, offset, len, value);\ |
| 74 | res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\ | ||
| 75 | spin_unlock_irqrestore(&rio_config_lock, flags); \ | ||
| 76 | return res; \ | ||
| 77 | } | 61 | } |
| 78 | 62 | ||
| 79 | RIO_LOP_READ(8, u8, 1) | 63 | RIO_LOP_READ(8, u8, 1) |
| @@ -104,13 +88,10 @@ int rio_mport_read_config_##size \ | |||
| 104 | (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ | 88 | (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ |
| 105 | { \ | 89 | { \ |
| 106 | int res; \ | 90 | int res; \ |
| 107 | unsigned long flags; \ | ||
| 108 | u32 data = 0; \ | 91 | u32 data = 0; \ |
| 109 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ | 92 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ |
| 110 | spin_lock_irqsave(&rio_config_lock, flags); \ | ||
| 111 | res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ | 93 | res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ |
| 112 | *value = (type)data; \ | 94 | *value = (type)data; \ |
| 113 | spin_unlock_irqrestore(&rio_config_lock, flags); \ | ||
| 114 | return res; \ | 95 | return res; \ |
| 115 | } | 96 | } |
| 116 | 97 | ||
| @@ -127,13 +108,9 @@ int rio_mport_read_config_##size \ | |||
| 127 | int rio_mport_write_config_##size \ | 108 | int rio_mport_write_config_##size \ |
| 128 | (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ | 109 | (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ |
| 129 | { \ | 110 | { \ |
| 130 | int res; \ | ||
| 131 | unsigned long flags; \ | ||
| 132 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ | 111 | if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ |
| 133 | spin_lock_irqsave(&rio_config_lock, flags); \ | 112 | return mport->ops->cwrite(mport, mport->id, destid, hopcount, \ |
| 134 | res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \ | 113 | offset, len, value); \ |
| 135 | spin_unlock_irqrestore(&rio_config_lock, flags); \ | ||
| 136 | return res; \ | ||
| 137 | } | 114 | } |
| 138 | 115 | ||
| 139 | RIO_OP_READ(8, u8, 1) | 116 | RIO_OP_READ(8, u8, 1) |
| @@ -162,14 +139,7 @@ EXPORT_SYMBOL_GPL(rio_mport_write_config_32); | |||
| 162 | */ | 139 | */ |
| 163 | int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) | 140 | int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) |
| 164 | { | 141 | { |
| 165 | int res; | 142 | return mport->ops->dsend(mport, mport->id, destid, data); |
| 166 | unsigned long flags; | ||
| 167 | |||
| 168 | spin_lock_irqsave(&rio_doorbell_lock, flags); | ||
| 169 | res = mport->ops->dsend(mport, mport->id, destid, data); | ||
| 170 | spin_unlock_irqrestore(&rio_doorbell_lock, flags); | ||
| 171 | |||
| 172 | return res; | ||
| 173 | } | 143 | } |
| 174 | 144 | ||
| 175 | EXPORT_SYMBOL_GPL(rio_mport_send_doorbell); | 145 | EXPORT_SYMBOL_GPL(rio_mport_send_doorbell); |
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index d0e5d6ee882c..e2c1988cd7c0 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c | |||
| @@ -523,7 +523,7 @@ int __init parse_cec_param(char *str) | |||
| 523 | if (*str == '=') | 523 | if (*str == '=') |
| 524 | str++; | 524 | str++; |
| 525 | 525 | ||
| 526 | if (!strncmp(str, "cec_disable", 7)) | 526 | if (!strcmp(str, "cec_disable")) |
| 527 | ce_arr.disabled = 1; | 527 | ce_arr.disabled = 1; |
| 528 | else | 528 | else |
| 529 | return 0; | 529 | return 0; |
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index df63e44526ac..bf04479456a0 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig | |||
| @@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL | |||
| 109 | depends on OF && ARCH_QCOM | 109 | depends on OF && ARCH_QCOM |
| 110 | depends on QCOM_SMEM | 110 | depends on QCOM_SMEM |
| 111 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) | 111 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) |
| 112 | depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n | ||
| 112 | select MFD_SYSCON | 113 | select MFD_SYSCON |
| 113 | select QCOM_RPROC_COMMON | 114 | select QCOM_RPROC_COMMON |
| 114 | select QCOM_SCM | 115 | select QCOM_SCM |
| @@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL | |||
| 120 | tristate "Qualcomm WCNSS Peripheral Image Loader" | 121 | tristate "Qualcomm WCNSS Peripheral Image Loader" |
| 121 | depends on OF && ARCH_QCOM | 122 | depends on OF && ARCH_QCOM |
| 122 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) | 123 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) |
| 124 | depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n | ||
| 123 | depends on QCOM_SMEM | 125 | depends on QCOM_SMEM |
| 124 | select QCOM_MDT_LOADER | 126 | select QCOM_MDT_LOADER |
| 125 | select QCOM_RPROC_COMMON | 127 | select QCOM_RPROC_COMMON |
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 612d91403341..633268e9d550 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c | |||
| @@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, | |||
| 264 | if (!(att->flags & ATT_OWN)) | 264 | if (!(att->flags & ATT_OWN)) |
| 265 | continue; | 265 | continue; |
| 266 | 266 | ||
| 267 | if (b > IMX7D_RPROC_MEM_MAX) | 267 | if (b >= IMX7D_RPROC_MEM_MAX) |
| 268 | break; | 268 | break; |
| 269 | 269 | ||
| 270 | priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, | 270 | priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, |
| 271 | att->sa, att->size); | 271 | att->sa, att->size); |
| 272 | if (IS_ERR(priv->mem[b].cpu_addr)) { | 272 | if (!priv->mem[b].cpu_addr) { |
| 273 | dev_err(dev, "devm_ioremap_resource failed\n"); | 273 | dev_err(dev, "devm_ioremap_resource failed\n"); |
| 274 | err = PTR_ERR(priv->mem[b].cpu_addr); | 274 | return -ENOMEM; |
| 275 | return err; | ||
| 276 | } | 275 | } |
| 277 | priv->mem[b].sys_addr = att->sa; | 276 | priv->mem[b].sys_addr = att->sa; |
| 278 | priv->mem[b].size = att->size; | 277 | priv->mem[b].size = att->size; |
| @@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, | |||
| 296 | return err; | 295 | return err; |
| 297 | } | 296 | } |
| 298 | 297 | ||
| 299 | if (b > IMX7D_RPROC_MEM_MAX) | 298 | if (b >= IMX7D_RPROC_MEM_MAX) |
| 300 | break; | 299 | break; |
| 301 | 300 | ||
| 302 | priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); | 301 | priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); |
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index e0c393214264..e2baecbb9dd3 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig | |||
| @@ -34,11 +34,12 @@ config RESET_BERLIN | |||
| 34 | help | 34 | help |
| 35 | This enables the reset controller driver for Marvell Berlin SoCs. | 35 | This enables the reset controller driver for Marvell Berlin SoCs. |
| 36 | 36 | ||
| 37 | config RESET_HSDK_V1 | 37 | config RESET_HSDK |
| 38 | bool "HSDK v1 Reset Driver" | 38 | bool "Synopsys HSDK Reset Driver" |
| 39 | default n | 39 | depends on HAS_IOMEM |
| 40 | depends on ARC_SOC_HSDK || COMPILE_TEST | ||
| 40 | help | 41 | help |
| 41 | This enables the reset controller driver for HSDK v1. | 42 | This enables the reset controller driver for HSDK board. |
| 42 | 43 | ||
| 43 | config RESET_IMX7 | 44 | config RESET_IMX7 |
| 44 | bool "i.MX7 Reset Driver" if COMPILE_TEST | 45 | bool "i.MX7 Reset Driver" if COMPILE_TEST |
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index d368367110e5..af1c15c330b3 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile | |||
| @@ -5,7 +5,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/ | |||
| 5 | obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o | 5 | obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o |
| 6 | obj-$(CONFIG_RESET_ATH79) += reset-ath79.o | 6 | obj-$(CONFIG_RESET_ATH79) += reset-ath79.o |
| 7 | obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o | 7 | obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o |
| 8 | obj-$(CONFIG_RESET_HSDK_V1) += reset-hsdk-v1.o | 8 | obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o |
| 9 | obj-$(CONFIG_RESET_IMX7) += reset-imx7.o | 9 | obj-$(CONFIG_RESET_IMX7) += reset-imx7.o |
| 10 | obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o | 10 | obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o |
| 11 | obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o | 11 | obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o |
diff --git a/drivers/reset/reset-hsdk-v1.c b/drivers/reset/reset-hsdk.c index bca13e4bf622..8bce391c6943 100644 --- a/drivers/reset/reset-hsdk-v1.c +++ b/drivers/reset/reset-hsdk.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2017 Synopsys. | 2 | * Copyright (C) 2017 Synopsys. |
| 3 | * | 3 | * |
| 4 | * Synopsys HSDKv1 SDP reset driver. | 4 | * Synopsys HSDK Development platform reset driver. |
| 5 | * | 5 | * |
| 6 | * This file is licensed under the terms of the GNU General Public | 6 | * This file is licensed under the terms of the GNU General Public |
| 7 | * License version 2. This program is licensed "as is" without any | 7 | * License version 2. This program is licensed "as is" without any |
| @@ -18,9 +18,9 @@ | |||
| 18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
| 19 | #include <linux/types.h> | 19 | #include <linux/types.h> |
| 20 | 20 | ||
| 21 | #define to_hsdkv1_rst(p) container_of((p), struct hsdkv1_rst, rcdev) | 21 | #define to_hsdk_rst(p) container_of((p), struct hsdk_rst, rcdev) |
| 22 | 22 | ||
| 23 | struct hsdkv1_rst { | 23 | struct hsdk_rst { |
| 24 | void __iomem *regs_ctl; | 24 | void __iomem *regs_ctl; |
| 25 | void __iomem *regs_rst; | 25 | void __iomem *regs_rst; |
| 26 | spinlock_t lock; | 26 | spinlock_t lock; |
| @@ -49,12 +49,12 @@ static const u32 rst_map[] = { | |||
| 49 | #define CGU_IP_SW_RESET_RESET BIT(0) | 49 | #define CGU_IP_SW_RESET_RESET BIT(0) |
| 50 | #define SW_RESET_TIMEOUT 10000 | 50 | #define SW_RESET_TIMEOUT 10000 |
| 51 | 51 | ||
| 52 | static void hsdkv1_reset_config(struct hsdkv1_rst *rst, unsigned long id) | 52 | static void hsdk_reset_config(struct hsdk_rst *rst, unsigned long id) |
| 53 | { | 53 | { |
| 54 | writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL); | 54 | writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static int hsdkv1_reset_do(struct hsdkv1_rst *rst) | 57 | static int hsdk_reset_do(struct hsdk_rst *rst) |
| 58 | { | 58 | { |
| 59 | u32 reg; | 59 | u32 reg; |
| 60 | 60 | ||
| @@ -69,28 +69,28 @@ static int hsdkv1_reset_do(struct hsdkv1_rst *rst) | |||
| 69 | !(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT); | 69 | !(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT); |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | static int hsdkv1_reset_reset(struct reset_controller_dev *rcdev, | 72 | static int hsdk_reset_reset(struct reset_controller_dev *rcdev, |
| 73 | unsigned long id) | 73 | unsigned long id) |
| 74 | { | 74 | { |
| 75 | struct hsdkv1_rst *rst = to_hsdkv1_rst(rcdev); | 75 | struct hsdk_rst *rst = to_hsdk_rst(rcdev); |
| 76 | unsigned long flags; | 76 | unsigned long flags; |
| 77 | int ret; | 77 | int ret; |
| 78 | 78 | ||
| 79 | spin_lock_irqsave(&rst->lock, flags); | 79 | spin_lock_irqsave(&rst->lock, flags); |
| 80 | hsdkv1_reset_config(rst, id); | 80 | hsdk_reset_config(rst, id); |
| 81 | ret = hsdkv1_reset_do(rst); | 81 | ret = hsdk_reset_do(rst); |
| 82 | spin_unlock_irqrestore(&rst->lock, flags); | 82 | spin_unlock_irqrestore(&rst->lock, flags); |
| 83 | 83 | ||
| 84 | return ret; | 84 | return ret; |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | static const struct reset_control_ops hsdkv1_reset_ops = { | 87 | static const struct reset_control_ops hsdk_reset_ops = { |
| 88 | .reset = hsdkv1_reset_reset, | 88 | .reset = hsdk_reset_reset, |
| 89 | }; | 89 | }; |
| 90 | 90 | ||
| 91 | static int hsdkv1_reset_probe(struct platform_device *pdev) | 91 | static int hsdk_reset_probe(struct platform_device *pdev) |
| 92 | { | 92 | { |
| 93 | struct hsdkv1_rst *rst; | 93 | struct hsdk_rst *rst; |
| 94 | struct resource *mem; | 94 | struct resource *mem; |
| 95 | 95 | ||
| 96 | rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL); | 96 | rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL); |
| @@ -110,7 +110,7 @@ static int hsdkv1_reset_probe(struct platform_device *pdev) | |||
| 110 | spin_lock_init(&rst->lock); | 110 | spin_lock_init(&rst->lock); |
| 111 | 111 | ||
| 112 | rst->rcdev.owner = THIS_MODULE; | 112 | rst->rcdev.owner = THIS_MODULE; |
| 113 | rst->rcdev.ops = &hsdkv1_reset_ops; | 113 | rst->rcdev.ops = &hsdk_reset_ops; |
| 114 | rst->rcdev.of_node = pdev->dev.of_node; | 114 | rst->rcdev.of_node = pdev->dev.of_node; |
| 115 | rst->rcdev.nr_resets = HSDK_MAX_RESETS; | 115 | rst->rcdev.nr_resets = HSDK_MAX_RESETS; |
| 116 | rst->rcdev.of_reset_n_cells = 1; | 116 | rst->rcdev.of_reset_n_cells = 1; |
| @@ -118,20 +118,20 @@ static int hsdkv1_reset_probe(struct platform_device *pdev) | |||
| 118 | return reset_controller_register(&rst->rcdev); | 118 | return reset_controller_register(&rst->rcdev); |
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | static const struct of_device_id hsdkv1_reset_dt_match[] = { | 121 | static const struct of_device_id hsdk_reset_dt_match[] = { |
| 122 | { .compatible = "snps,hsdk-v1.0-reset" }, | 122 | { .compatible = "snps,hsdk-reset" }, |
| 123 | { }, | 123 | { }, |
| 124 | }; | 124 | }; |
| 125 | 125 | ||
| 126 | static struct platform_driver hsdkv1_reset_driver = { | 126 | static struct platform_driver hsdk_reset_driver = { |
| 127 | .probe = hsdkv1_reset_probe, | 127 | .probe = hsdk_reset_probe, |
| 128 | .driver = { | 128 | .driver = { |
| 129 | .name = "hsdk-v1.0-reset", | 129 | .name = "hsdk-reset", |
| 130 | .of_match_table = hsdkv1_reset_dt_match, | 130 | .of_match_table = hsdk_reset_dt_match, |
| 131 | }, | 131 | }, |
| 132 | }; | 132 | }; |
| 133 | builtin_platform_driver(hsdkv1_reset_driver); | 133 | builtin_platform_driver(hsdk_reset_driver); |
| 134 | 134 | ||
| 135 | MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>"); | 135 | MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>"); |
| 136 | MODULE_DESCRIPTION("Synopsys HSDKv1 SDP reset driver"); | 136 | MODULE_DESCRIPTION("Synopsys HSDK SDP reset driver"); |
| 137 | MODULE_LICENSE("GPL v2"); | 137 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 5a5e927ea50f..5dcc9bf1c5bc 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c | |||
| @@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink, | |||
| 635 | unsigned long flags; | 635 | unsigned long flags; |
| 636 | 636 | ||
| 637 | intent = kzalloc(sizeof(*intent), GFP_KERNEL); | 637 | intent = kzalloc(sizeof(*intent), GFP_KERNEL); |
| 638 | |||
| 639 | if (!intent) | 638 | if (!intent) |
| 640 | return NULL; | 639 | return NULL; |
| 641 | 640 | ||
| 642 | intent->data = kzalloc(size, GFP_KERNEL); | 641 | intent->data = kzalloc(size, GFP_KERNEL); |
| 643 | if (!intent->data) | 642 | if (!intent->data) |
| 644 | return NULL; | 643 | goto free_intent; |
| 645 | 644 | ||
| 646 | spin_lock_irqsave(&channel->intent_lock, flags); | 645 | spin_lock_irqsave(&channel->intent_lock, flags); |
| 647 | ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); | 646 | ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); |
| 648 | if (ret < 0) { | 647 | if (ret < 0) { |
| 649 | spin_unlock_irqrestore(&channel->intent_lock, flags); | 648 | spin_unlock_irqrestore(&channel->intent_lock, flags); |
| 650 | return NULL; | 649 | goto free_data; |
| 651 | } | 650 | } |
| 652 | spin_unlock_irqrestore(&channel->intent_lock, flags); | 651 | spin_unlock_irqrestore(&channel->intent_lock, flags); |
| 653 | 652 | ||
| @@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink, | |||
| 656 | intent->reuse = reuseable; | 655 | intent->reuse = reuseable; |
| 657 | 656 | ||
| 658 | return intent; | 657 | return intent; |
| 658 | |||
| 659 | free_data: | ||
| 660 | kfree(intent->data); | ||
| 661 | free_intent: | ||
| 662 | kfree(intent); | ||
| 663 | return NULL; | ||
| 659 | } | 664 | } |
| 660 | 665 | ||
| 661 | static void qcom_glink_handle_rx_done(struct qcom_glink *glink, | 666 | static void qcom_glink_handle_rx_done(struct qcom_glink *glink, |
| @@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, | |||
| 1197 | 1202 | ||
| 1198 | ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); | 1203 | ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); |
| 1199 | if (ret) | 1204 | if (ret) |
| 1200 | return ret; | 1205 | goto unlock; |
| 1201 | 1206 | ||
| 1202 | ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); | 1207 | ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); |
| 1203 | if (!ret) { | 1208 | if (!ret) { |
| @@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, | |||
| 1207 | ret = channel->intent_req_result ? 0 : -ECANCELED; | 1212 | ret = channel->intent_req_result ? 0 : -ECANCELED; |
| 1208 | } | 1213 | } |
| 1209 | 1214 | ||
| 1215 | unlock: | ||
| 1210 | mutex_unlock(&channel->intent_req_lock); | 1216 | mutex_unlock(&channel->intent_req_lock); |
| 1211 | return ret; | 1217 | return ret; |
| 1212 | } | 1218 | } |
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 785fb42f6650..2799a6b08f73 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | |||
| @@ -3767,7 +3767,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd) | |||
| 3767 | */ | 3767 | */ |
| 3768 | if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { | 3768 | if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { |
| 3769 | pr_err("write_pending failed since: %d\n", vscsi->flags); | 3769 | pr_err("write_pending failed since: %d\n", vscsi->flags); |
| 3770 | return 0; | 3770 | return -EIO; |
| 3771 | } | 3771 | } |
| 3772 | 3772 | ||
| 3773 | rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, | 3773 | rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index bd4605a34f54..c62e8d111fd9 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
| @@ -2851,9 +2851,6 @@ EXPORT_SYMBOL_GPL(iscsi_session_setup); | |||
| 2851 | /** | 2851 | /** |
| 2852 | * iscsi_session_teardown - destroy session, host, and cls_session | 2852 | * iscsi_session_teardown - destroy session, host, and cls_session |
| 2853 | * @cls_session: iscsi session | 2853 | * @cls_session: iscsi session |
| 2854 | * | ||
| 2855 | * The driver must have called iscsi_remove_session before | ||
| 2856 | * calling this. | ||
| 2857 | */ | 2854 | */ |
| 2858 | void iscsi_session_teardown(struct iscsi_cls_session *cls_session) | 2855 | void iscsi_session_teardown(struct iscsi_cls_session *cls_session) |
| 2859 | { | 2856 | { |
| @@ -2863,6 +2860,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) | |||
| 2863 | 2860 | ||
| 2864 | iscsi_pool_free(&session->cmdpool); | 2861 | iscsi_pool_free(&session->cmdpool); |
| 2865 | 2862 | ||
| 2863 | iscsi_remove_session(cls_session); | ||
| 2864 | |||
| 2866 | kfree(session->password); | 2865 | kfree(session->password); |
| 2867 | kfree(session->password_in); | 2866 | kfree(session->password_in); |
| 2868 | kfree(session->username); | 2867 | kfree(session->username); |
| @@ -2877,7 +2876,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session) | |||
| 2877 | kfree(session->portal_type); | 2876 | kfree(session->portal_type); |
| 2878 | kfree(session->discovery_parent_type); | 2877 | kfree(session->discovery_parent_type); |
| 2879 | 2878 | ||
| 2880 | iscsi_destroy_session(cls_session); | 2879 | iscsi_free_session(cls_session); |
| 2880 | |||
| 2881 | iscsi_host_dec_session_cnt(shost); | 2881 | iscsi_host_dec_session_cnt(shost); |
| 2882 | module_put(owner); | 2882 | module_put(owner); |
| 2883 | } | 2883 | } |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index e7818afeda2b..15590a063ad9 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
| @@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
| 956 | if (*bflags & BLIST_NO_DIF) | 956 | if (*bflags & BLIST_NO_DIF) |
| 957 | sdev->no_dif = 1; | 957 | sdev->no_dif = 1; |
| 958 | 958 | ||
| 959 | if (*bflags & BLIST_UNMAP_LIMIT_WS) | ||
| 960 | sdev->unmap_limit_for_ws = 1; | ||
| 961 | |||
| 959 | sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; | 962 | sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; |
| 960 | 963 | ||
| 961 | if (*bflags & BLIST_TRY_VPD_PAGES) | 964 | if (*bflags & BLIST_TRY_VPD_PAGES) |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 0190aeff5f7f..7404d26895f5 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
| @@ -2211,22 +2211,6 @@ void iscsi_free_session(struct iscsi_cls_session *session) | |||
| 2211 | EXPORT_SYMBOL_GPL(iscsi_free_session); | 2211 | EXPORT_SYMBOL_GPL(iscsi_free_session); |
| 2212 | 2212 | ||
| 2213 | /** | 2213 | /** |
| 2214 | * iscsi_destroy_session - destroy iscsi session | ||
| 2215 | * @session: iscsi_session | ||
| 2216 | * | ||
| 2217 | * Can be called by a LLD or iscsi_transport. There must not be | ||
| 2218 | * any running connections. | ||
| 2219 | */ | ||
| 2220 | int iscsi_destroy_session(struct iscsi_cls_session *session) | ||
| 2221 | { | ||
| 2222 | iscsi_remove_session(session); | ||
| 2223 | ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n"); | ||
| 2224 | iscsi_free_session(session); | ||
| 2225 | return 0; | ||
| 2226 | } | ||
| 2227 | EXPORT_SYMBOL_GPL(iscsi_destroy_session); | ||
| 2228 | |||
| 2229 | /** | ||
| 2230 | * iscsi_create_conn - create iscsi class connection | 2214 | * iscsi_create_conn - create iscsi class connection |
| 2231 | * @session: iscsi cls session | 2215 | * @session: iscsi cls session |
| 2232 | * @dd_size: private driver data size | 2216 | * @dd_size: private driver data size |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index fb9f8b5f4673..d175c5c5ccf8 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) | |||
| 715 | break; | 715 | break; |
| 716 | 716 | ||
| 717 | case SD_LBP_WS16: | 717 | case SD_LBP_WS16: |
| 718 | max_blocks = min_not_zero(sdkp->max_ws_blocks, | 718 | if (sdkp->device->unmap_limit_for_ws) |
| 719 | (u32)SD_MAX_WS16_BLOCKS); | 719 | max_blocks = sdkp->max_unmap_blocks; |
| 720 | else | ||
| 721 | max_blocks = sdkp->max_ws_blocks; | ||
| 722 | |||
| 723 | max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS); | ||
| 720 | break; | 724 | break; |
| 721 | 725 | ||
| 722 | case SD_LBP_WS10: | 726 | case SD_LBP_WS10: |
| 723 | max_blocks = min_not_zero(sdkp->max_ws_blocks, | 727 | if (sdkp->device->unmap_limit_for_ws) |
| 724 | (u32)SD_MAX_WS10_BLOCKS); | 728 | max_blocks = sdkp->max_unmap_blocks; |
| 729 | else | ||
| 730 | max_blocks = sdkp->max_ws_blocks; | ||
| 731 | |||
| 732 | max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS); | ||
| 725 | break; | 733 | break; |
| 726 | 734 | ||
| 727 | case SD_LBP_ZERO: | 735 | case SD_LBP_ZERO: |
| @@ -3099,8 +3107,6 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 3099 | sd_read_security(sdkp, buffer); | 3107 | sd_read_security(sdkp, buffer); |
| 3100 | } | 3108 | } |
| 3101 | 3109 | ||
| 3102 | sdkp->first_scan = 0; | ||
| 3103 | |||
| 3104 | /* | 3110 | /* |
| 3105 | * We now have all cache related info, determine how we deal | 3111 | * We now have all cache related info, determine how we deal |
| 3106 | * with flush requests. | 3112 | * with flush requests. |
| @@ -3115,7 +3121,7 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 3115 | q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); | 3121 | q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); |
| 3116 | 3122 | ||
| 3117 | /* | 3123 | /* |
| 3118 | * Use the device's preferred I/O size for reads and writes | 3124 | * Determine the device's preferred I/O size for reads and writes |
| 3119 | * unless the reported value is unreasonably small, large, or | 3125 | * unless the reported value is unreasonably small, large, or |
| 3120 | * garbage. | 3126 | * garbage. |
| 3121 | */ | 3127 | */ |
| @@ -3129,8 +3135,19 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
| 3129 | rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), | 3135 | rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), |
| 3130 | (sector_t)BLK_DEF_MAX_SECTORS); | 3136 | (sector_t)BLK_DEF_MAX_SECTORS); |
| 3131 | 3137 | ||
| 3132 | /* Combine with controller limits */ | 3138 | /* Do not exceed controller limit */ |
| 3133 | q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); | 3139 | rw_max = min(rw_max, queue_max_hw_sectors(q)); |
| 3140 | |||
| 3141 | /* | ||
| 3142 | * Only update max_sectors if previously unset or if the current value | ||
| 3143 | * exceeds the capabilities of the hardware. | ||
| 3144 | */ | ||
| 3145 | if (sdkp->first_scan || | ||
| 3146 | q->limits.max_sectors > q->limits.max_dev_sectors || | ||
| 3147 | q->limits.max_sectors > q->limits.max_hw_sectors) | ||
| 3148 | q->limits.max_sectors = rw_max; | ||
| 3149 | |||
| 3150 | sdkp->first_scan = 0; | ||
| 3134 | 3151 | ||
| 3135 | set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); | 3152 | set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); |
| 3136 | sd_config_write_same(sdkp); | 3153 | sd_config_write_same(sdkp); |
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 2fe216b276e2..84a8ac2a779f 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c | |||
| @@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) | |||
| 694 | tty_set_termios_ldisc(tty, disc); | 694 | tty_set_termios_ldisc(tty, disc); |
| 695 | retval = tty_ldisc_open(tty, tty->ldisc); | 695 | retval = tty_ldisc_open(tty, tty->ldisc); |
| 696 | if (retval) { | 696 | if (retval) { |
| 697 | if (!WARN_ON(disc == N_TTY)) { | 697 | tty_ldisc_put(tty->ldisc); |
| 698 | tty_ldisc_put(tty->ldisc); | 698 | tty->ldisc = NULL; |
| 699 | tty->ldisc = NULL; | ||
| 700 | } | ||
| 701 | } | 699 | } |
| 702 | return retval; | 700 | return retval; |
| 703 | } | 701 | } |
| @@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit) | |||
| 752 | 750 | ||
| 753 | if (tty->ldisc) { | 751 | if (tty->ldisc) { |
| 754 | if (reinit) { | 752 | if (reinit) { |
| 755 | if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0) | 753 | if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 && |
| 756 | tty_ldisc_reinit(tty, N_TTY); | 754 | tty_ldisc_reinit(tty, N_TTY) < 0) |
| 755 | WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0); | ||
| 757 | } else | 756 | } else |
| 758 | tty_ldisc_kill(tty); | 757 | tty_ldisc_kill(tty); |
| 759 | } | 758 | } |
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index dd74c99d6ce1..5d061b3d8224 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
| @@ -2026,6 +2026,8 @@ static DEVICE_ATTR_RO(suspended); | |||
| 2026 | static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) | 2026 | static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) |
| 2027 | { | 2027 | { |
| 2028 | struct usb_composite_dev *cdev = get_gadget_data(gadget); | 2028 | struct usb_composite_dev *cdev = get_gadget_data(gadget); |
| 2029 | struct usb_gadget_strings *gstr = cdev->driver->strings[0]; | ||
| 2030 | struct usb_string *dev_str = gstr->strings; | ||
| 2029 | 2031 | ||
| 2030 | /* composite_disconnect() must already have been called | 2032 | /* composite_disconnect() must already have been called |
| 2031 | * by the underlying peripheral controller driver! | 2033 | * by the underlying peripheral controller driver! |
| @@ -2045,6 +2047,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) | |||
| 2045 | 2047 | ||
| 2046 | composite_dev_cleanup(cdev); | 2048 | composite_dev_cleanup(cdev); |
| 2047 | 2049 | ||
| 2050 | if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer) | ||
| 2051 | dev_str[USB_GADGET_MANUFACTURER_IDX].s = ""; | ||
| 2052 | |||
| 2048 | kfree(cdev->def_manufacturer); | 2053 | kfree(cdev->def_manufacturer); |
| 2049 | kfree(cdev); | 2054 | kfree(cdev); |
| 2050 | set_gadget_data(gadget, NULL); | 2055 | set_gadget_data(gadget, NULL); |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index a22a892de7b7..aeb9f3c40521 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
| @@ -1143,11 +1143,12 @@ static struct configfs_attribute *interf_grp_attrs[] = { | |||
| 1143 | NULL | 1143 | NULL |
| 1144 | }; | 1144 | }; |
| 1145 | 1145 | ||
| 1146 | int usb_os_desc_prepare_interf_dir(struct config_group *parent, | 1146 | struct config_group *usb_os_desc_prepare_interf_dir( |
| 1147 | int n_interf, | 1147 | struct config_group *parent, |
| 1148 | struct usb_os_desc **desc, | 1148 | int n_interf, |
| 1149 | char **names, | 1149 | struct usb_os_desc **desc, |
| 1150 | struct module *owner) | 1150 | char **names, |
| 1151 | struct module *owner) | ||
| 1151 | { | 1152 | { |
| 1152 | struct config_group *os_desc_group; | 1153 | struct config_group *os_desc_group; |
| 1153 | struct config_item_type *os_desc_type, *interface_type; | 1154 | struct config_item_type *os_desc_type, *interface_type; |
| @@ -1159,7 +1160,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, | |||
| 1159 | 1160 | ||
| 1160 | char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); | 1161 | char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); |
| 1161 | if (!vlabuf) | 1162 | if (!vlabuf) |
| 1162 | return -ENOMEM; | 1163 | return ERR_PTR(-ENOMEM); |
| 1163 | 1164 | ||
| 1164 | os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group); | 1165 | os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group); |
| 1165 | os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type); | 1166 | os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type); |
| @@ -1184,7 +1185,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, | |||
| 1184 | configfs_add_default_group(&d->group, os_desc_group); | 1185 | configfs_add_default_group(&d->group, os_desc_group); |
| 1185 | } | 1186 | } |
| 1186 | 1187 | ||
| 1187 | return 0; | 1188 | return os_desc_group; |
| 1188 | } | 1189 | } |
| 1189 | EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir); | 1190 | EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir); |
| 1190 | 1191 | ||
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h index 36c468c4f5e9..540d5e92ed22 100644 --- a/drivers/usb/gadget/configfs.h +++ b/drivers/usb/gadget/configfs.h | |||
| @@ -5,11 +5,12 @@ | |||
| 5 | 5 | ||
| 6 | void unregister_gadget_item(struct config_item *item); | 6 | void unregister_gadget_item(struct config_item *item); |
| 7 | 7 | ||
| 8 | int usb_os_desc_prepare_interf_dir(struct config_group *parent, | 8 | struct config_group *usb_os_desc_prepare_interf_dir( |
| 9 | int n_interf, | 9 | struct config_group *parent, |
| 10 | struct usb_os_desc **desc, | 10 | int n_interf, |
| 11 | char **names, | 11 | struct usb_os_desc **desc, |
| 12 | struct module *owner); | 12 | char **names, |
| 13 | struct module *owner); | ||
| 13 | 14 | ||
| 14 | static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) | 15 | static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) |
| 15 | { | 16 | { |
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index e1d5853ef1e4..c7c5b3ce1d98 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c | |||
| @@ -908,6 +908,7 @@ static void rndis_free_inst(struct usb_function_instance *f) | |||
| 908 | free_netdev(opts->net); | 908 | free_netdev(opts->net); |
| 909 | } | 909 | } |
| 910 | 910 | ||
| 911 | kfree(opts->rndis_interf_group); /* single VLA chunk */ | ||
| 911 | kfree(opts); | 912 | kfree(opts); |
| 912 | } | 913 | } |
| 913 | 914 | ||
| @@ -916,6 +917,7 @@ static struct usb_function_instance *rndis_alloc_inst(void) | |||
| 916 | struct f_rndis_opts *opts; | 917 | struct f_rndis_opts *opts; |
| 917 | struct usb_os_desc *descs[1]; | 918 | struct usb_os_desc *descs[1]; |
| 918 | char *names[1]; | 919 | char *names[1]; |
| 920 | struct config_group *rndis_interf_group; | ||
| 919 | 921 | ||
| 920 | opts = kzalloc(sizeof(*opts), GFP_KERNEL); | 922 | opts = kzalloc(sizeof(*opts), GFP_KERNEL); |
| 921 | if (!opts) | 923 | if (!opts) |
| @@ -940,8 +942,14 @@ static struct usb_function_instance *rndis_alloc_inst(void) | |||
| 940 | names[0] = "rndis"; | 942 | names[0] = "rndis"; |
| 941 | config_group_init_type_name(&opts->func_inst.group, "", | 943 | config_group_init_type_name(&opts->func_inst.group, "", |
| 942 | &rndis_func_type); | 944 | &rndis_func_type); |
| 943 | usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, | 945 | rndis_interf_group = |
| 944 | names, THIS_MODULE); | 946 | usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, |
| 947 | names, THIS_MODULE); | ||
| 948 | if (IS_ERR(rndis_interf_group)) { | ||
| 949 | rndis_free_inst(&opts->func_inst); | ||
| 950 | return ERR_CAST(rndis_interf_group); | ||
| 951 | } | ||
| 952 | opts->rndis_interf_group = rndis_interf_group; | ||
| 945 | 953 | ||
| 946 | return &opts->func_inst; | 954 | return &opts->func_inst; |
| 947 | } | 955 | } |
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h index a35ee3c2545d..efdb7ac381d9 100644 --- a/drivers/usb/gadget/function/u_rndis.h +++ b/drivers/usb/gadget/function/u_rndis.h | |||
| @@ -26,6 +26,7 @@ struct f_rndis_opts { | |||
| 26 | bool bound; | 26 | bool bound; |
| 27 | bool borrowed_net; | 27 | bool borrowed_net; |
| 28 | 28 | ||
| 29 | struct config_group *rndis_interf_group; | ||
| 29 | struct usb_os_desc rndis_os_desc; | 30 | struct usb_os_desc rndis_os_desc; |
| 30 | char rndis_ext_compat_id[16]; | 31 | char rndis_ext_compat_id[16]; |
| 31 | 32 | ||
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index b17618a55f1b..f04e91ef9e7c 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c | |||
| @@ -419,6 +419,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd) | |||
| 419 | static void set_link_state(struct dummy_hcd *dum_hcd) | 419 | static void set_link_state(struct dummy_hcd *dum_hcd) |
| 420 | { | 420 | { |
| 421 | struct dummy *dum = dum_hcd->dum; | 421 | struct dummy *dum = dum_hcd->dum; |
| 422 | unsigned int power_bit; | ||
| 422 | 423 | ||
| 423 | dum_hcd->active = 0; | 424 | dum_hcd->active = 0; |
| 424 | if (dum->pullup) | 425 | if (dum->pullup) |
| @@ -429,17 +430,19 @@ static void set_link_state(struct dummy_hcd *dum_hcd) | |||
| 429 | return; | 430 | return; |
| 430 | 431 | ||
| 431 | set_link_state_by_speed(dum_hcd); | 432 | set_link_state_by_speed(dum_hcd); |
| 433 | power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ? | ||
| 434 | USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER); | ||
| 432 | 435 | ||
| 433 | if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 || | 436 | if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 || |
| 434 | dum_hcd->active) | 437 | dum_hcd->active) |
| 435 | dum_hcd->resuming = 0; | 438 | dum_hcd->resuming = 0; |
| 436 | 439 | ||
| 437 | /* Currently !connected or in reset */ | 440 | /* Currently !connected or in reset */ |
| 438 | if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 || | 441 | if ((dum_hcd->port_status & power_bit) == 0 || |
| 439 | (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) { | 442 | (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) { |
| 440 | unsigned disconnect = USB_PORT_STAT_CONNECTION & | 443 | unsigned int disconnect = power_bit & |
| 441 | dum_hcd->old_status & (~dum_hcd->port_status); | 444 | dum_hcd->old_status & (~dum_hcd->port_status); |
| 442 | unsigned reset = USB_PORT_STAT_RESET & | 445 | unsigned int reset = USB_PORT_STAT_RESET & |
| 443 | (~dum_hcd->old_status) & dum_hcd->port_status; | 446 | (~dum_hcd->old_status) & dum_hcd->port_status; |
| 444 | 447 | ||
| 445 | /* Report reset and disconnect events to the driver */ | 448 | /* Report reset and disconnect events to the driver */ |
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index eee82ca55b7b..b3fc602b2e24 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c | |||
| @@ -202,12 +202,13 @@ found: | |||
| 202 | return tmp; | 202 | return tmp; |
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | if (in) { | 205 | if (in) |
| 206 | dev->in_pipe = usb_rcvbulkpipe(udev, | 206 | dev->in_pipe = usb_rcvbulkpipe(udev, |
| 207 | in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | 207 | in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); |
| 208 | if (out) | ||
| 208 | dev->out_pipe = usb_sndbulkpipe(udev, | 209 | dev->out_pipe = usb_sndbulkpipe(udev, |
| 209 | out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | 210 | out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); |
| 210 | } | 211 | |
| 211 | if (iso_in) { | 212 | if (iso_in) { |
| 212 | dev->iso_in = &iso_in->desc; | 213 | dev->iso_in = &iso_in->desc; |
| 213 | dev->in_iso_pipe = usb_rcvisocpipe(udev, | 214 | dev->in_iso_pipe = usb_rcvisocpipe(udev, |
| @@ -1964,6 +1965,9 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param, | |||
| 1964 | int status = 0; | 1965 | int status = 0; |
| 1965 | struct urb *urbs[param->sglen]; | 1966 | struct urb *urbs[param->sglen]; |
| 1966 | 1967 | ||
| 1968 | if (!param->sglen || param->iterations > UINT_MAX / param->sglen) | ||
| 1969 | return -EINVAL; | ||
| 1970 | |||
| 1967 | memset(&context, 0, sizeof(context)); | 1971 | memset(&context, 0, sizeof(context)); |
| 1968 | context.count = param->iterations * param->sglen; | 1972 | context.count = param->iterations * param->sglen; |
| 1969 | context.dev = dev; | 1973 | context.dev = dev; |
| @@ -2087,6 +2091,8 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param) | |||
| 2087 | 2091 | ||
| 2088 | if (param->iterations <= 0) | 2092 | if (param->iterations <= 0) |
| 2089 | return -EINVAL; | 2093 | return -EINVAL; |
| 2094 | if (param->sglen > MAX_SGLEN) | ||
| 2095 | return -EINVAL; | ||
| 2090 | /* | 2096 | /* |
| 2091 | * Just a bunch of test cases that every HCD is expected to handle. | 2097 | * Just a bunch of test cases that every HCD is expected to handle. |
| 2092 | * | 2098 | * |
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 5fe4a5704bde..ccc2bf5274b4 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c | |||
| @@ -329,6 +329,14 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy) | |||
| 329 | unsigned long val; | 329 | unsigned long val; |
| 330 | void __iomem *base = phy->regs; | 330 | void __iomem *base = phy->regs; |
| 331 | 331 | ||
| 332 | /* | ||
| 333 | * The USB driver may have already initiated the phy clock | ||
| 334 | * disable so wait to see if the clock turns off and if not | ||
| 335 | * then proceed with gating the clock. | ||
| 336 | */ | ||
| 337 | if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) == 0) | ||
| 338 | return; | ||
| 339 | |||
| 332 | if (phy->is_legacy_phy) { | 340 | if (phy->is_legacy_phy) { |
| 333 | val = readl(base + USB_SUSP_CTRL); | 341 | val = readl(base + USB_SUSP_CTRL); |
| 334 | val |= USB_SUSP_SET; | 342 | val |= USB_SUSP_SET; |
| @@ -351,6 +359,15 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy) | |||
| 351 | unsigned long val; | 359 | unsigned long val; |
| 352 | void __iomem *base = phy->regs; | 360 | void __iomem *base = phy->regs; |
| 353 | 361 | ||
| 362 | /* | ||
| 363 | * The USB driver may have already initiated the phy clock | ||
| 364 | * enable so wait to see if the clock turns on and if not | ||
| 365 | * then proceed with ungating the clock. | ||
| 366 | */ | ||
| 367 | if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, | ||
| 368 | USB_PHY_CLK_VALID) == 0) | ||
| 369 | return; | ||
| 370 | |||
| 354 | if (phy->is_legacy_phy) { | 371 | if (phy->is_legacy_phy) { |
| 355 | val = readl(base + USB_SUSP_CTRL); | 372 | val = readl(base + USB_SUSP_CTRL); |
| 356 | val |= USB_SUSP_CLR; | 373 | val |= USB_SUSP_CLR; |
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 68f26904c316..50285b01da92 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
| @@ -857,9 +857,9 @@ static void xfer_work(struct work_struct *work) | |||
| 857 | fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); | 857 | fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); |
| 858 | 858 | ||
| 859 | usbhs_pipe_running(pipe, 1); | 859 | usbhs_pipe_running(pipe, 1); |
| 860 | usbhsf_dma_start(pipe, fifo); | ||
| 861 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); | 860 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); |
| 862 | dma_async_issue_pending(chan); | 861 | dma_async_issue_pending(chan); |
| 862 | usbhsf_dma_start(pipe, fifo); | ||
| 863 | usbhs_pipe_enable(pipe); | 863 | usbhs_pipe_enable(pipe); |
| 864 | 864 | ||
| 865 | xfer_work_end: | 865 | xfer_work_end: |
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index fdf89800ebc3..43a862a90a77 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c | |||
| @@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options) | |||
| 186 | tty_kref_put(tty); | 186 | tty_kref_put(tty); |
| 187 | reset_open_count: | 187 | reset_open_count: |
| 188 | port->port.count = 0; | 188 | port->port.count = 0; |
| 189 | info->port = NULL; | ||
| 189 | usb_autopm_put_interface(serial->interface); | 190 | usb_autopm_put_interface(serial->interface); |
| 190 | error_get_interface: | 191 | error_get_interface: |
| 191 | usb_serial_put(serial); | 192 | usb_serial_put(serial); |
| @@ -265,7 +266,7 @@ static struct console usbcons = { | |||
| 265 | 266 | ||
| 266 | void usb_serial_console_disconnect(struct usb_serial *serial) | 267 | void usb_serial_console_disconnect(struct usb_serial *serial) |
| 267 | { | 268 | { |
| 268 | if (serial->port[0] == usbcons_info.port) { | 269 | if (serial->port[0] && serial->port[0] == usbcons_info.port) { |
| 269 | usb_serial_console_exit(); | 270 | usb_serial_console_exit(); |
| 270 | usb_serial_put(serial); | 271 | usb_serial_put(serial); |
| 271 | } | 272 | } |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 2d945c9f975c..412f812522ee 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
| @@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = { | |||
| 177 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ | 177 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
| 178 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | 178 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
| 179 | { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ | 179 | { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ |
| 180 | { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ | ||
| 180 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ | 181 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ |
| 181 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ | 182 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ |
| 182 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ | 183 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ |
| @@ -352,6 +353,7 @@ static struct usb_serial_driver * const serial_drivers[] = { | |||
| 352 | #define CP210X_PARTNUM_CP2104 0x04 | 353 | #define CP210X_PARTNUM_CP2104 0x04 |
| 353 | #define CP210X_PARTNUM_CP2105 0x05 | 354 | #define CP210X_PARTNUM_CP2105 0x05 |
| 354 | #define CP210X_PARTNUM_CP2108 0x08 | 355 | #define CP210X_PARTNUM_CP2108 0x08 |
| 356 | #define CP210X_PARTNUM_UNKNOWN 0xFF | ||
| 355 | 357 | ||
| 356 | /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ | 358 | /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ |
| 357 | struct cp210x_comm_status { | 359 | struct cp210x_comm_status { |
| @@ -1491,8 +1493,11 @@ static int cp210x_attach(struct usb_serial *serial) | |||
| 1491 | result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, | 1493 | result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, |
| 1492 | CP210X_GET_PARTNUM, &priv->partnum, | 1494 | CP210X_GET_PARTNUM, &priv->partnum, |
| 1493 | sizeof(priv->partnum)); | 1495 | sizeof(priv->partnum)); |
| 1494 | if (result < 0) | 1496 | if (result < 0) { |
| 1495 | goto err_free_priv; | 1497 | dev_warn(&serial->interface->dev, |
| 1498 | "querying part number failed\n"); | ||
| 1499 | priv->partnum = CP210X_PARTNUM_UNKNOWN; | ||
| 1500 | } | ||
| 1496 | 1501 | ||
| 1497 | usb_set_serial_data(serial, priv); | 1502 | usb_set_serial_data(serial, priv); |
| 1498 | 1503 | ||
| @@ -1505,10 +1510,6 @@ static int cp210x_attach(struct usb_serial *serial) | |||
| 1505 | } | 1510 | } |
| 1506 | 1511 | ||
| 1507 | return 0; | 1512 | return 0; |
| 1508 | err_free_priv: | ||
| 1509 | kfree(priv); | ||
| 1510 | |||
| 1511 | return result; | ||
| 1512 | } | 1513 | } |
| 1513 | 1514 | ||
| 1514 | static void cp210x_disconnect(struct usb_serial *serial) | 1515 | static void cp210x_disconnect(struct usb_serial *serial) |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 1cec03799cdf..49d1b2d4606d 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
| @@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
| 1015 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, | 1015 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, |
| 1016 | { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), | 1016 | { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), |
| 1017 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 1017 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
| 1018 | { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, | ||
| 1019 | { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, | ||
| 1018 | { } /* Terminating entry */ | 1020 | { } /* Terminating entry */ |
| 1019 | }; | 1021 | }; |
| 1020 | 1022 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4fcf1cecb6d7..f9d15bd62785 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
| @@ -610,6 +610,13 @@ | |||
| 610 | #define ADI_GNICEPLUS_PID 0xF001 | 610 | #define ADI_GNICEPLUS_PID 0xF001 |
| 611 | 611 | ||
| 612 | /* | 612 | /* |
| 613 | * Cypress WICED USB UART | ||
| 614 | */ | ||
| 615 | #define CYPRESS_VID 0x04B4 | ||
| 616 | #define CYPRESS_WICED_BT_USB_PID 0x009B | ||
| 617 | #define CYPRESS_WICED_WL_USB_PID 0xF900 | ||
| 618 | |||
| 619 | /* | ||
| 613 | * Microchip Technology, Inc. | 620 | * Microchip Technology, Inc. |
| 614 | * | 621 | * |
| 615 | * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are | 622 | * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 54bfef13966a..ba672cf4e888 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb); | |||
| 522 | 522 | ||
| 523 | /* TP-LINK Incorporated products */ | 523 | /* TP-LINK Incorporated products */ |
| 524 | #define TPLINK_VENDOR_ID 0x2357 | 524 | #define TPLINK_VENDOR_ID 0x2357 |
| 525 | #define TPLINK_PRODUCT_LTE 0x000D | ||
| 525 | #define TPLINK_PRODUCT_MA180 0x0201 | 526 | #define TPLINK_PRODUCT_MA180 0x0201 |
| 526 | 527 | ||
| 527 | /* Changhong products */ | 528 | /* Changhong products */ |
| @@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = { | |||
| 2011 | { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, | 2012 | { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, |
| 2012 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, | 2013 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, |
| 2013 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, | 2014 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, |
| 2015 | { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ | ||
| 2014 | { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), | 2016 | { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), |
| 2015 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 2017 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
| 2016 | { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ | 2018 | { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index ebc0beea69d6..eb9928963a53 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
| @@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = { | |||
| 174 | {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ | 174 | {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ |
| 175 | {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ | 175 | {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ |
| 176 | {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ | 176 | {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ |
| 177 | {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ | ||
| 178 | {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ | ||
| 179 | {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ | ||
| 180 | {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */ | ||
| 177 | 181 | ||
| 178 | /* Huawei devices */ | 182 | /* Huawei devices */ |
| 179 | {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ | 183 | {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ |
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index adaf6f6dd858..e1cbdfdb7c68 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c | |||
| @@ -310,9 +310,13 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping, | |||
| 310 | 310 | ||
| 311 | p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); | 311 | p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); |
| 312 | 312 | ||
| 313 | if (unlikely(copied < len && !PageUptodate(page))) { | 313 | if (!PageUptodate(page)) { |
| 314 | copied = 0; | 314 | if (unlikely(copied < len)) { |
| 315 | goto out; | 315 | copied = 0; |
| 316 | goto out; | ||
| 317 | } else if (len == PAGE_SIZE) { | ||
| 318 | SetPageUptodate(page); | ||
| 319 | } | ||
| 316 | } | 320 | } |
| 317 | /* | 321 | /* |
| 318 | * No need to use i_size_read() here, the i_size | 322 | * No need to use i_size_read() here, the i_size |
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index ce7181ea60fa..a7c5a9861bef 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c | |||
| @@ -54,7 +54,7 @@ typedef struct { | |||
| 54 | int size; /* size of magic/mask */ | 54 | int size; /* size of magic/mask */ |
| 55 | char *magic; /* magic or filename extension */ | 55 | char *magic; /* magic or filename extension */ |
| 56 | char *mask; /* mask, NULL for exact match */ | 56 | char *mask; /* mask, NULL for exact match */ |
| 57 | char *interpreter; /* filename of interpreter */ | 57 | const char *interpreter; /* filename of interpreter */ |
| 58 | char *name; | 58 | char *name; |
| 59 | struct dentry *dentry; | 59 | struct dentry *dentry; |
| 60 | struct file *interp_file; | 60 | struct file *interp_file; |
| @@ -131,27 +131,26 @@ static int load_misc_binary(struct linux_binprm *bprm) | |||
| 131 | { | 131 | { |
| 132 | Node *fmt; | 132 | Node *fmt; |
| 133 | struct file *interp_file = NULL; | 133 | struct file *interp_file = NULL; |
| 134 | char iname[BINPRM_BUF_SIZE]; | ||
| 135 | const char *iname_addr = iname; | ||
| 136 | int retval; | 134 | int retval; |
| 137 | int fd_binary = -1; | 135 | int fd_binary = -1; |
| 138 | 136 | ||
| 139 | retval = -ENOEXEC; | 137 | retval = -ENOEXEC; |
| 140 | if (!enabled) | 138 | if (!enabled) |
| 141 | goto ret; | 139 | return retval; |
| 142 | 140 | ||
| 143 | /* to keep locking time low, we copy the interpreter string */ | 141 | /* to keep locking time low, we copy the interpreter string */ |
| 144 | read_lock(&entries_lock); | 142 | read_lock(&entries_lock); |
| 145 | fmt = check_file(bprm); | 143 | fmt = check_file(bprm); |
| 146 | if (fmt) | 144 | if (fmt) |
| 147 | strlcpy(iname, fmt->interpreter, BINPRM_BUF_SIZE); | 145 | dget(fmt->dentry); |
| 148 | read_unlock(&entries_lock); | 146 | read_unlock(&entries_lock); |
| 149 | if (!fmt) | 147 | if (!fmt) |
| 150 | goto ret; | 148 | return retval; |
| 151 | 149 | ||
| 152 | /* Need to be able to load the file after exec */ | 150 | /* Need to be able to load the file after exec */ |
| 151 | retval = -ENOENT; | ||
| 153 | if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) | 152 | if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) |
| 154 | return -ENOENT; | 153 | goto ret; |
| 155 | 154 | ||
| 156 | if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) { | 155 | if (!(fmt->flags & MISC_FMT_PRESERVE_ARGV0)) { |
| 157 | retval = remove_arg_zero(bprm); | 156 | retval = remove_arg_zero(bprm); |
| @@ -195,22 +194,22 @@ static int load_misc_binary(struct linux_binprm *bprm) | |||
| 195 | bprm->argc++; | 194 | bprm->argc++; |
| 196 | 195 | ||
| 197 | /* add the interp as argv[0] */ | 196 | /* add the interp as argv[0] */ |
| 198 | retval = copy_strings_kernel(1, &iname_addr, bprm); | 197 | retval = copy_strings_kernel(1, &fmt->interpreter, bprm); |
| 199 | if (retval < 0) | 198 | if (retval < 0) |
| 200 | goto error; | 199 | goto error; |
| 201 | bprm->argc++; | 200 | bprm->argc++; |
| 202 | 201 | ||
| 203 | /* Update interp in case binfmt_script needs it. */ | 202 | /* Update interp in case binfmt_script needs it. */ |
| 204 | retval = bprm_change_interp(iname, bprm); | 203 | retval = bprm_change_interp(fmt->interpreter, bprm); |
| 205 | if (retval < 0) | 204 | if (retval < 0) |
| 206 | goto error; | 205 | goto error; |
| 207 | 206 | ||
| 208 | if (fmt->flags & MISC_FMT_OPEN_FILE && fmt->interp_file) { | 207 | if (fmt->flags & MISC_FMT_OPEN_FILE) { |
| 209 | interp_file = filp_clone_open(fmt->interp_file); | 208 | interp_file = filp_clone_open(fmt->interp_file); |
| 210 | if (!IS_ERR(interp_file)) | 209 | if (!IS_ERR(interp_file)) |
| 211 | deny_write_access(interp_file); | 210 | deny_write_access(interp_file); |
| 212 | } else { | 211 | } else { |
| 213 | interp_file = open_exec(iname); | 212 | interp_file = open_exec(fmt->interpreter); |
| 214 | } | 213 | } |
| 215 | retval = PTR_ERR(interp_file); | 214 | retval = PTR_ERR(interp_file); |
| 216 | if (IS_ERR(interp_file)) | 215 | if (IS_ERR(interp_file)) |
| @@ -238,6 +237,7 @@ static int load_misc_binary(struct linux_binprm *bprm) | |||
| 238 | goto error; | 237 | goto error; |
| 239 | 238 | ||
| 240 | ret: | 239 | ret: |
| 240 | dput(fmt->dentry); | ||
| 241 | return retval; | 241 | return retval; |
| 242 | error: | 242 | error: |
| 243 | if (fd_binary > 0) | 243 | if (fd_binary > 0) |
| @@ -594,8 +594,13 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode) | |||
| 594 | 594 | ||
| 595 | static void bm_evict_inode(struct inode *inode) | 595 | static void bm_evict_inode(struct inode *inode) |
| 596 | { | 596 | { |
| 597 | Node *e = inode->i_private; | ||
| 598 | |||
| 599 | if (e && e->flags & MISC_FMT_OPEN_FILE) | ||
| 600 | filp_close(e->interp_file, NULL); | ||
| 601 | |||
| 597 | clear_inode(inode); | 602 | clear_inode(inode); |
| 598 | kfree(inode->i_private); | 603 | kfree(e); |
| 599 | } | 604 | } |
| 600 | 605 | ||
| 601 | static void kill_node(Node *e) | 606 | static void kill_node(Node *e) |
| @@ -603,24 +608,14 @@ static void kill_node(Node *e) | |||
| 603 | struct dentry *dentry; | 608 | struct dentry *dentry; |
| 604 | 609 | ||
| 605 | write_lock(&entries_lock); | 610 | write_lock(&entries_lock); |
| 606 | dentry = e->dentry; | 611 | list_del_init(&e->list); |
| 607 | if (dentry) { | ||
| 608 | list_del_init(&e->list); | ||
| 609 | e->dentry = NULL; | ||
| 610 | } | ||
| 611 | write_unlock(&entries_lock); | 612 | write_unlock(&entries_lock); |
| 612 | 613 | ||
| 613 | if ((e->flags & MISC_FMT_OPEN_FILE) && e->interp_file) { | 614 | dentry = e->dentry; |
| 614 | filp_close(e->interp_file, NULL); | 615 | drop_nlink(d_inode(dentry)); |
| 615 | e->interp_file = NULL; | 616 | d_drop(dentry); |
| 616 | } | 617 | dput(dentry); |
| 617 | 618 | simple_release_fs(&bm_mnt, &entry_count); | |
| 618 | if (dentry) { | ||
| 619 | drop_nlink(d_inode(dentry)); | ||
| 620 | d_drop(dentry); | ||
| 621 | dput(dentry); | ||
| 622 | simple_release_fs(&bm_mnt, &entry_count); | ||
| 623 | } | ||
| 624 | } | 619 | } |
| 625 | 620 | ||
| 626 | /* /<entry> */ | 621 | /* /<entry> */ |
| @@ -665,7 +660,8 @@ static ssize_t bm_entry_write(struct file *file, const char __user *buffer, | |||
| 665 | root = file_inode(file)->i_sb->s_root; | 660 | root = file_inode(file)->i_sb->s_root; |
| 666 | inode_lock(d_inode(root)); | 661 | inode_lock(d_inode(root)); |
| 667 | 662 | ||
| 668 | kill_node(e); | 663 | if (!list_empty(&e->list)) |
| 664 | kill_node(e); | ||
| 669 | 665 | ||
| 670 | inode_unlock(d_inode(root)); | 666 | inode_unlock(d_inode(root)); |
| 671 | break; | 667 | break; |
| @@ -794,7 +790,7 @@ static ssize_t bm_status_write(struct file *file, const char __user *buffer, | |||
| 794 | inode_lock(d_inode(root)); | 790 | inode_lock(d_inode(root)); |
| 795 | 791 | ||
| 796 | while (!list_empty(&entries)) | 792 | while (!list_empty(&entries)) |
| 797 | kill_node(list_entry(entries.next, Node, list)); | 793 | kill_node(list_first_entry(&entries, Node, list)); |
| 798 | 794 | ||
| 799 | inode_unlock(d_inode(root)); | 795 | inode_unlock(d_inode(root)); |
| 800 | break; | 796 | break; |
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index afdf4e3cafc2..7cde3f46ad26 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c | |||
| @@ -19,7 +19,6 @@ static int load_script(struct linux_binprm *bprm) | |||
| 19 | const char *i_arg, *i_name; | 19 | const char *i_arg, *i_name; |
| 20 | char *cp; | 20 | char *cp; |
| 21 | struct file *file; | 21 | struct file *file; |
| 22 | char interp[BINPRM_BUF_SIZE]; | ||
| 23 | int retval; | 22 | int retval; |
| 24 | 23 | ||
| 25 | if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) | 24 | if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) |
| @@ -55,7 +54,7 @@ static int load_script(struct linux_binprm *bprm) | |||
| 55 | break; | 54 | break; |
| 56 | } | 55 | } |
| 57 | for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++); | 56 | for (cp = bprm->buf+2; (*cp == ' ') || (*cp == '\t'); cp++); |
| 58 | if (*cp == '\0') | 57 | if (*cp == '\0') |
| 59 | return -ENOEXEC; /* No interpreter name found */ | 58 | return -ENOEXEC; /* No interpreter name found */ |
| 60 | i_name = cp; | 59 | i_name = cp; |
| 61 | i_arg = NULL; | 60 | i_arg = NULL; |
| @@ -65,7 +64,6 @@ static int load_script(struct linux_binprm *bprm) | |||
| 65 | *cp++ = '\0'; | 64 | *cp++ = '\0'; |
| 66 | if (*cp) | 65 | if (*cp) |
| 67 | i_arg = cp; | 66 | i_arg = cp; |
| 68 | strcpy (interp, i_name); | ||
| 69 | /* | 67 | /* |
| 70 | * OK, we've parsed out the interpreter name and | 68 | * OK, we've parsed out the interpreter name and |
| 71 | * (optional) argument. | 69 | * (optional) argument. |
| @@ -80,24 +78,27 @@ static int load_script(struct linux_binprm *bprm) | |||
| 80 | if (retval) | 78 | if (retval) |
| 81 | return retval; | 79 | return retval; |
| 82 | retval = copy_strings_kernel(1, &bprm->interp, bprm); | 80 | retval = copy_strings_kernel(1, &bprm->interp, bprm); |
| 83 | if (retval < 0) return retval; | 81 | if (retval < 0) |
| 82 | return retval; | ||
| 84 | bprm->argc++; | 83 | bprm->argc++; |
| 85 | if (i_arg) { | 84 | if (i_arg) { |
| 86 | retval = copy_strings_kernel(1, &i_arg, bprm); | 85 | retval = copy_strings_kernel(1, &i_arg, bprm); |
| 87 | if (retval < 0) return retval; | 86 | if (retval < 0) |
| 87 | return retval; | ||
| 88 | bprm->argc++; | 88 | bprm->argc++; |
| 89 | } | 89 | } |
| 90 | retval = copy_strings_kernel(1, &i_name, bprm); | 90 | retval = copy_strings_kernel(1, &i_name, bprm); |
| 91 | if (retval) return retval; | 91 | if (retval) |
| 92 | return retval; | ||
| 92 | bprm->argc++; | 93 | bprm->argc++; |
| 93 | retval = bprm_change_interp(interp, bprm); | 94 | retval = bprm_change_interp(i_name, bprm); |
| 94 | if (retval < 0) | 95 | if (retval < 0) |
| 95 | return retval; | 96 | return retval; |
| 96 | 97 | ||
| 97 | /* | 98 | /* |
| 98 | * OK, now restart the process with the interpreter's dentry. | 99 | * OK, now restart the process with the interpreter's dentry. |
| 99 | */ | 100 | */ |
| 100 | file = open_exec(interp); | 101 | file = open_exec(i_name); |
| 101 | if (IS_ERR(file)) | 102 | if (IS_ERR(file)) |
| 102 | return PTR_ERR(file); | 103 | return PTR_ERR(file); |
| 103 | 104 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 93d088ffc05c..789f55e851ae 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, | |||
| 716 | 716 | ||
| 717 | set_page_writeback(page); | 717 | set_page_writeback(page); |
| 718 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true); | 718 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true); |
| 719 | if (result) | 719 | if (result) { |
| 720 | end_page_writeback(page); | 720 | end_page_writeback(page); |
| 721 | else | 721 | } else { |
| 722 | clean_page_buffers(page); | ||
| 722 | unlock_page(page); | 723 | unlock_page(page); |
| 724 | } | ||
| 723 | blk_queue_exit(bdev->bd_queue); | 725 | blk_queue_exit(bdev->bd_queue); |
| 724 | return result; | 726 | return result; |
| 725 | } | 727 | } |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 899ddaeeacec..8fc690384c58 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
| @@ -722,7 +722,7 @@ struct btrfs_delayed_root; | |||
| 722 | * Indicate that a whole-filesystem exclusive operation is running | 722 | * Indicate that a whole-filesystem exclusive operation is running |
| 723 | * (device replace, resize, device add/delete, balance) | 723 | * (device replace, resize, device add/delete, balance) |
| 724 | */ | 724 | */ |
| 725 | #define BTRFS_FS_EXCL_OP 14 | 725 | #define BTRFS_FS_EXCL_OP 16 |
| 726 | 726 | ||
| 727 | struct btrfs_fs_info { | 727 | struct btrfs_fs_info { |
| 728 | u8 fsid[BTRFS_FSID_SIZE]; | 728 | u8 fsid[BTRFS_FSID_SIZE]; |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 12ab19a4b93e..970190cd347e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -2801,7 +2801,7 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, | |||
| 2801 | } | 2801 | } |
| 2802 | } | 2802 | } |
| 2803 | 2803 | ||
| 2804 | bio = btrfs_bio_alloc(bdev, sector << 9); | 2804 | bio = btrfs_bio_alloc(bdev, (u64)sector << 9); |
| 2805 | bio_add_page(bio, page, page_size, offset); | 2805 | bio_add_page(bio, page, page_size, offset); |
| 2806 | bio->bi_end_io = end_io_func; | 2806 | bio->bi_end_io = end_io_func; |
| 2807 | bio->bi_private = tree; | 2807 | bio->bi_private = tree; |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 84edfc60d87a..f23c820daaed 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
| @@ -734,12 +734,13 @@ static int __choose_mds(struct ceph_mds_client *mdsc, | |||
| 734 | inode = req->r_inode; | 734 | inode = req->r_inode; |
| 735 | ihold(inode); | 735 | ihold(inode); |
| 736 | } else { | 736 | } else { |
| 737 | /* req->r_dentry is non-null for LSSNAP request. | 737 | /* req->r_dentry is non-null for LSSNAP request */ |
| 738 | * fall-thru */ | 738 | rcu_read_lock(); |
| 739 | WARN_ON_ONCE(!req->r_dentry); | 739 | inode = get_nonsnap_parent(req->r_dentry); |
| 740 | rcu_read_unlock(); | ||
| 741 | dout("__choose_mds using snapdir's parent %p\n", inode); | ||
| 740 | } | 742 | } |
| 741 | } | 743 | } else if (req->r_dentry) { |
| 742 | if (!inode && req->r_dentry) { | ||
| 743 | /* ignore race with rename; old or new d_parent is okay */ | 744 | /* ignore race with rename; old or new d_parent is okay */ |
| 744 | struct dentry *parent; | 745 | struct dentry *parent; |
| 745 | struct inode *dir; | 746 | struct inode *dir; |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 1ffc8b426c1c..7fc0b850c352 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
| @@ -374,12 +374,10 @@ static int build_snap_context(struct ceph_snap_realm *realm, | |||
| 374 | realm->ino, realm, snapc, snapc->seq, | 374 | realm->ino, realm, snapc, snapc->seq, |
| 375 | (unsigned int) snapc->num_snaps); | 375 | (unsigned int) snapc->num_snaps); |
| 376 | 376 | ||
| 377 | if (realm->cached_context) { | 377 | ceph_put_snap_context(realm->cached_context); |
| 378 | ceph_put_snap_context(realm->cached_context); | ||
| 379 | /* queue realm for cap_snap creation */ | ||
| 380 | list_add_tail(&realm->dirty_item, dirty_realms); | ||
| 381 | } | ||
| 382 | realm->cached_context = snapc; | 378 | realm->cached_context = snapc; |
| 379 | /* queue realm for cap_snap creation */ | ||
| 380 | list_add_tail(&realm->dirty_item, dirty_realms); | ||
| 383 | return 0; | 381 | return 0; |
| 384 | 382 | ||
| 385 | fail: | 383 | fail: |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 62cf812ed0e5..96415c65bbdc 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
| @@ -866,7 +866,8 @@ out: | |||
| 866 | */ | 866 | */ |
| 867 | if (sdio->boundary) { | 867 | if (sdio->boundary) { |
| 868 | ret = dio_send_cur_page(dio, sdio, map_bh); | 868 | ret = dio_send_cur_page(dio, sdio, map_bh); |
| 869 | dio_bio_submit(dio, sdio); | 869 | if (sdio->bio) |
| 870 | dio_bio_submit(dio, sdio); | ||
| 870 | put_page(sdio->cur_page); | 871 | put_page(sdio->cur_page); |
| 871 | sdio->cur_page = NULL; | 872 | sdio->cur_page = NULL; |
| 872 | } | 873 | } |
| @@ -1410,7 +1410,7 @@ static void free_bprm(struct linux_binprm *bprm) | |||
| 1410 | kfree(bprm); | 1410 | kfree(bprm); |
| 1411 | } | 1411 | } |
| 1412 | 1412 | ||
| 1413 | int bprm_change_interp(char *interp, struct linux_binprm *bprm) | 1413 | int bprm_change_interp(const char *interp, struct linux_binprm *bprm) |
| 1414 | { | 1414 | { |
| 1415 | /* If a binfmt changed the interp, free it first. */ | 1415 | /* If a binfmt changed the interp, free it first. */ |
| 1416 | if (bprm->interp != bprm->filename) | 1416 | if (bprm->interp != bprm->filename) |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 9a7c90386947..4b4a72f392be 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
| @@ -2525,7 +2525,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); | |||
| 2525 | bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); | 2525 | bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); |
| 2526 | void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); | 2526 | void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); |
| 2527 | void stop_discard_thread(struct f2fs_sb_info *sbi); | 2527 | void stop_discard_thread(struct f2fs_sb_info *sbi); |
| 2528 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi); | 2528 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount); |
| 2529 | void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); | 2529 | void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); |
| 2530 | void release_discard_addrs(struct f2fs_sb_info *sbi); | 2530 | void release_discard_addrs(struct f2fs_sb_info *sbi); |
| 2531 | int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); | 2531 | int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); |
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 621b9b3d320b..c695ff462ee6 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
| @@ -1210,11 +1210,11 @@ void stop_discard_thread(struct f2fs_sb_info *sbi) | |||
| 1210 | } | 1210 | } |
| 1211 | 1211 | ||
| 1212 | /* This comes from f2fs_put_super and f2fs_trim_fs */ | 1212 | /* This comes from f2fs_put_super and f2fs_trim_fs */ |
| 1213 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) | 1213 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount) |
| 1214 | { | 1214 | { |
| 1215 | __issue_discard_cmd(sbi, false); | 1215 | __issue_discard_cmd(sbi, false); |
| 1216 | __drop_discard_cmd(sbi); | 1216 | __drop_discard_cmd(sbi); |
| 1217 | __wait_discard_cmd(sbi, false); | 1217 | __wait_discard_cmd(sbi, !umount); |
| 1218 | } | 1218 | } |
| 1219 | 1219 | ||
| 1220 | static void mark_discard_range_all(struct f2fs_sb_info *sbi) | 1220 | static void mark_discard_range_all(struct f2fs_sb_info *sbi) |
| @@ -2244,7 +2244,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) | |||
| 2244 | } | 2244 | } |
| 2245 | /* It's time to issue all the filed discards */ | 2245 | /* It's time to issue all the filed discards */ |
| 2246 | mark_discard_range_all(sbi); | 2246 | mark_discard_range_all(sbi); |
| 2247 | f2fs_wait_discard_bios(sbi); | 2247 | f2fs_wait_discard_bios(sbi, false); |
| 2248 | out: | 2248 | out: |
| 2249 | range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); | 2249 | range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); |
| 2250 | return err; | 2250 | return err; |
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 89f61eb3d167..933c3d529e65 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c | |||
| @@ -801,7 +801,7 @@ static void f2fs_put_super(struct super_block *sb) | |||
| 801 | } | 801 | } |
| 802 | 802 | ||
| 803 | /* be sure to wait for any on-going discard commands */ | 803 | /* be sure to wait for any on-going discard commands */ |
| 804 | f2fs_wait_discard_bios(sbi); | 804 | f2fs_wait_discard_bios(sbi, true); |
| 805 | 805 | ||
| 806 | if (f2fs_discard_en(sbi) && !sbi->discard_blks) { | 806 | if (f2fs_discard_en(sbi) && !sbi->discard_blks) { |
| 807 | struct cp_control cpc = { | 807 | struct cp_control cpc = { |
diff --git a/fs/mpage.c b/fs/mpage.c index 37bb77c1302c..c991faec70b9 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
| @@ -468,6 +468,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped) | |||
| 468 | try_to_free_buffers(page); | 468 | try_to_free_buffers(page); |
| 469 | } | 469 | } |
| 470 | 470 | ||
| 471 | /* | ||
| 472 | * For situations where we want to clean all buffers attached to a page. | ||
| 473 | * We don't need to calculate how many buffers are attached to the page, | ||
| 474 | * we just need to specify a number larger than the maximum number of buffers. | ||
| 475 | */ | ||
| 476 | void clean_page_buffers(struct page *page) | ||
| 477 | { | ||
| 478 | clean_buffers(page, ~0U); | ||
| 479 | } | ||
| 480 | |||
| 471 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | 481 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, |
| 472 | void *data) | 482 | void *data) |
| 473 | { | 483 | { |
| @@ -605,10 +615,8 @@ alloc_new: | |||
| 605 | if (bio == NULL) { | 615 | if (bio == NULL) { |
| 606 | if (first_unmapped == blocks_per_page) { | 616 | if (first_unmapped == blocks_per_page) { |
| 607 | if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), | 617 | if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), |
| 608 | page, wbc)) { | 618 | page, wbc)) |
| 609 | clean_buffers(page, first_unmapped); | ||
| 610 | goto out; | 619 | goto out; |
| 611 | } | ||
| 612 | } | 620 | } |
| 613 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | 621 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
| 614 | BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); | 622 | BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); |
diff --git a/fs/namespace.c b/fs/namespace.c index 54059b142d6b..3b601f115b6c 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -468,7 +468,9 @@ static inline int may_write_real(struct file *file) | |||
| 468 | 468 | ||
| 469 | /* File refers to upper, writable layer? */ | 469 | /* File refers to upper, writable layer? */ |
| 470 | upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER); | 470 | upperdentry = d_real(dentry, NULL, 0, D_REAL_UPPER); |
| 471 | if (upperdentry && file_inode(file) == d_inode(upperdentry)) | 471 | if (upperdentry && |
| 472 | (file_inode(file) == d_inode(upperdentry) || | ||
| 473 | file_inode(file) == d_inode(dentry))) | ||
| 472 | return 0; | 474 | return 0; |
| 473 | 475 | ||
| 474 | /* Lower layer: can't write to real file, sorry... */ | 476 | /* Lower layer: can't write to real file, sorry... */ |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index efebe6cf4378..22880ef6d8dd 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
| @@ -218,7 +218,6 @@ static void nfs_cb_idr_remove_locked(struct nfs_client *clp) | |||
| 218 | static void pnfs_init_server(struct nfs_server *server) | 218 | static void pnfs_init_server(struct nfs_server *server) |
| 219 | { | 219 | { |
| 220 | rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC"); | 220 | rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC"); |
| 221 | rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC"); | ||
| 222 | } | 221 | } |
| 223 | 222 | ||
| 224 | #else | 223 | #else |
| @@ -888,6 +887,7 @@ struct nfs_server *nfs_alloc_server(void) | |||
| 888 | ida_init(&server->openowner_id); | 887 | ida_init(&server->openowner_id); |
| 889 | ida_init(&server->lockowner_id); | 888 | ida_init(&server->lockowner_id); |
| 890 | pnfs_init_server(server); | 889 | pnfs_init_server(server); |
| 890 | rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC"); | ||
| 891 | 891 | ||
| 892 | return server; | 892 | return server; |
| 893 | } | 893 | } |
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 44c638b7876c..508126eb49f9 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c | |||
| @@ -745,7 +745,8 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg) | |||
| 745 | struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); | 745 | struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); |
| 746 | 746 | ||
| 747 | dprintk("--> %s\n", __func__); | 747 | dprintk("--> %s\n", __func__); |
| 748 | nfs4_fl_put_deviceid(fl->dsaddr); | 748 | if (fl->dsaddr != NULL) |
| 749 | nfs4_fl_put_deviceid(fl->dsaddr); | ||
| 749 | /* This assumes a single RW lseg */ | 750 | /* This assumes a single RW lseg */ |
| 750 | if (lseg->pls_range.iomode == IOMODE_RW) { | 751 | if (lseg->pls_range.iomode == IOMODE_RW) { |
| 751 | struct nfs4_filelayout *flo; | 752 | struct nfs4_filelayout *flo; |
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index dd5d27da8c0c..30426c1a1bbd 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c | |||
| @@ -274,7 +274,7 @@ static struct key *nfs_idmap_request_key(const char *name, size_t namelen, | |||
| 274 | ssize_t ret; | 274 | ssize_t ret; |
| 275 | 275 | ||
| 276 | ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc); | 276 | ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc); |
| 277 | if (ret <= 0) | 277 | if (ret < 0) |
| 278 | return ERR_PTR(ret); | 278 | return ERR_PTR(ret); |
| 279 | 279 | ||
| 280 | rkey = request_key(&key_type_id_resolver, desc, ""); | 280 | rkey = request_key(&key_type_id_resolver, desc, ""); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6c61e2b99635..f90090e8c959 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -8399,8 +8399,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, | |||
| 8399 | lo = NFS_I(inode)->layout; | 8399 | lo = NFS_I(inode)->layout; |
| 8400 | /* If the open stateid was bad, then recover it. */ | 8400 | /* If the open stateid was bad, then recover it. */ |
| 8401 | if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || | 8401 | if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || |
| 8402 | nfs4_stateid_match_other(&lgp->args.stateid, | 8402 | !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { |
| 8403 | &lgp->args.ctx->state->stateid)) { | ||
| 8404 | spin_unlock(&inode->i_lock); | 8403 | spin_unlock(&inode->i_lock); |
| 8405 | exception->state = lgp->args.ctx->state; | 8404 | exception->state = lgp->args.ctx->state; |
| 8406 | exception->stateid = &lgp->args.stateid; | 8405 | exception->stateid = &lgp->args.stateid; |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 37c8af003275..14ed9791ec9c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
| @@ -1842,8 +1842,8 @@ static void encode_create_session(struct xdr_stream *xdr, | |||
| 1842 | * Assumes OPEN is the biggest non-idempotent compound. | 1842 | * Assumes OPEN is the biggest non-idempotent compound. |
| 1843 | * 2 is the verifier. | 1843 | * 2 is the verifier. |
| 1844 | */ | 1844 | */ |
| 1845 | max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + | 1845 | max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + 2) |
| 1846 | RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT; | 1846 | * XDR_UNIT + RPC_MAX_AUTH_SIZE; |
| 1847 | 1847 | ||
| 1848 | encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr); | 1848 | encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr); |
| 1849 | p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12); | 1849 | p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12); |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 3c69db7d4905..8487486ec496 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
| @@ -927,6 +927,13 @@ nfsd4_secinfo_release(union nfsd4_op_u *u) | |||
| 927 | exp_put(u->secinfo.si_exp); | 927 | exp_put(u->secinfo.si_exp); |
| 928 | } | 928 | } |
| 929 | 929 | ||
| 930 | static void | ||
| 931 | nfsd4_secinfo_no_name_release(union nfsd4_op_u *u) | ||
| 932 | { | ||
| 933 | if (u->secinfo_no_name.sin_exp) | ||
| 934 | exp_put(u->secinfo_no_name.sin_exp); | ||
| 935 | } | ||
| 936 | |||
| 930 | static __be32 | 937 | static __be32 |
| 931 | nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | 938 | nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
| 932 | union nfsd4_op_u *u) | 939 | union nfsd4_op_u *u) |
| @@ -2375,7 +2382,7 @@ static const struct nfsd4_operation nfsd4_ops[] = { | |||
| 2375 | }, | 2382 | }, |
| 2376 | [OP_SECINFO_NO_NAME] = { | 2383 | [OP_SECINFO_NO_NAME] = { |
| 2377 | .op_func = nfsd4_secinfo_no_name, | 2384 | .op_func = nfsd4_secinfo_no_name, |
| 2378 | .op_release = nfsd4_secinfo_release, | 2385 | .op_release = nfsd4_secinfo_no_name_release, |
| 2379 | .op_flags = OP_HANDLES_WRONGSEC, | 2386 | .op_flags = OP_HANDLES_WRONGSEC, |
| 2380 | .op_name = "OP_SECINFO_NO_NAME", | 2387 | .op_name = "OP_SECINFO_NO_NAME", |
| 2381 | .op_rsize_bop = nfsd4_secinfo_rsize, | 2388 | .op_rsize_bop = nfsd4_secinfo_rsize, |
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index aad97b30d5e6..c441f9387a1b 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c | |||
| @@ -561,10 +561,8 @@ static int ovl_do_copy_up(struct ovl_copy_up_ctx *c) | |||
| 561 | c->tmpfile = true; | 561 | c->tmpfile = true; |
| 562 | err = ovl_copy_up_locked(c); | 562 | err = ovl_copy_up_locked(c); |
| 563 | } else { | 563 | } else { |
| 564 | err = -EIO; | 564 | err = ovl_lock_rename_workdir(c->workdir, c->destdir); |
| 565 | if (lock_rename(c->workdir, c->destdir) != NULL) { | 565 | if (!err) { |
| 566 | pr_err("overlayfs: failed to lock workdir+upperdir\n"); | ||
| 567 | } else { | ||
| 568 | err = ovl_copy_up_locked(c); | 566 | err = ovl_copy_up_locked(c); |
| 569 | unlock_rename(c->workdir, c->destdir); | 567 | unlock_rename(c->workdir, c->destdir); |
| 570 | } | 568 | } |
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 3309b1912241..cc961a3bd3bd 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c | |||
| @@ -216,26 +216,6 @@ out_unlock: | |||
| 216 | return err; | 216 | return err; |
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | static int ovl_lock_rename_workdir(struct dentry *workdir, | ||
| 220 | struct dentry *upperdir) | ||
| 221 | { | ||
| 222 | /* Workdir should not be the same as upperdir */ | ||
| 223 | if (workdir == upperdir) | ||
| 224 | goto err; | ||
| 225 | |||
| 226 | /* Workdir should not be subdir of upperdir and vice versa */ | ||
| 227 | if (lock_rename(workdir, upperdir) != NULL) | ||
| 228 | goto err_unlock; | ||
| 229 | |||
| 230 | return 0; | ||
| 231 | |||
| 232 | err_unlock: | ||
| 233 | unlock_rename(workdir, upperdir); | ||
| 234 | err: | ||
| 235 | pr_err("overlayfs: failed to lock workdir+upperdir\n"); | ||
| 236 | return -EIO; | ||
| 237 | } | ||
| 238 | |||
| 239 | static struct dentry *ovl_clear_empty(struct dentry *dentry, | 219 | static struct dentry *ovl_clear_empty(struct dentry *dentry, |
| 240 | struct list_head *list) | 220 | struct list_head *list) |
| 241 | { | 221 | { |
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index c3addd1114f1..654bea1a5ac9 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c | |||
| @@ -506,6 +506,7 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry, | |||
| 506 | 506 | ||
| 507 | index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len); | 507 | index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len); |
| 508 | if (IS_ERR(index)) { | 508 | if (IS_ERR(index)) { |
| 509 | err = PTR_ERR(index); | ||
| 509 | pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n" | 510 | pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n" |
| 510 | "overlayfs: mount with '-o index=off' to disable inodes index.\n", | 511 | "overlayfs: mount with '-o index=off' to disable inodes index.\n", |
| 511 | d_inode(origin)->i_ino, name.len, name.name, | 512 | d_inode(origin)->i_ino, name.len, name.name, |
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index d4e8c1a08fb0..c706a6f99928 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h | |||
| @@ -235,6 +235,7 @@ bool ovl_inuse_trylock(struct dentry *dentry); | |||
| 235 | void ovl_inuse_unlock(struct dentry *dentry); | 235 | void ovl_inuse_unlock(struct dentry *dentry); |
| 236 | int ovl_nlink_start(struct dentry *dentry, bool *locked); | 236 | int ovl_nlink_start(struct dentry *dentry, bool *locked); |
| 237 | void ovl_nlink_end(struct dentry *dentry, bool locked); | 237 | void ovl_nlink_end(struct dentry *dentry, bool locked); |
| 238 | int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir); | ||
| 238 | 239 | ||
| 239 | static inline bool ovl_is_impuredir(struct dentry *dentry) | 240 | static inline bool ovl_is_impuredir(struct dentry *dentry) |
| 240 | { | 241 | { |
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index 878a750986dd..25d9b5adcd42 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h | |||
| @@ -37,6 +37,9 @@ struct ovl_fs { | |||
| 37 | bool noxattr; | 37 | bool noxattr; |
| 38 | /* sb common to all layers */ | 38 | /* sb common to all layers */ |
| 39 | struct super_block *same_sb; | 39 | struct super_block *same_sb; |
| 40 | /* Did we take the inuse lock? */ | ||
| 41 | bool upperdir_locked; | ||
| 42 | bool workdir_locked; | ||
| 40 | }; | 43 | }; |
| 41 | 44 | ||
| 42 | /* private information held for every overlayfs dentry */ | 45 | /* private information held for every overlayfs dentry */ |
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 62e9b22a2077..0f85ee9c3268 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c | |||
| @@ -988,6 +988,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt, | |||
| 988 | struct path *lowerstack, unsigned int numlower) | 988 | struct path *lowerstack, unsigned int numlower) |
| 989 | { | 989 | { |
| 990 | int err; | 990 | int err; |
| 991 | struct dentry *index = NULL; | ||
| 991 | struct inode *dir = dentry->d_inode; | 992 | struct inode *dir = dentry->d_inode; |
| 992 | struct path path = { .mnt = mnt, .dentry = dentry }; | 993 | struct path path = { .mnt = mnt, .dentry = dentry }; |
| 993 | LIST_HEAD(list); | 994 | LIST_HEAD(list); |
| @@ -1007,8 +1008,6 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt, | |||
| 1007 | 1008 | ||
| 1008 | inode_lock_nested(dir, I_MUTEX_PARENT); | 1009 | inode_lock_nested(dir, I_MUTEX_PARENT); |
| 1009 | list_for_each_entry(p, &list, l_node) { | 1010 | list_for_each_entry(p, &list, l_node) { |
| 1010 | struct dentry *index; | ||
| 1011 | |||
| 1012 | if (p->name[0] == '.') { | 1011 | if (p->name[0] == '.') { |
| 1013 | if (p->len == 1) | 1012 | if (p->len == 1) |
| 1014 | continue; | 1013 | continue; |
| @@ -1018,6 +1017,7 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt, | |||
| 1018 | index = lookup_one_len(p->name, dentry, p->len); | 1017 | index = lookup_one_len(p->name, dentry, p->len); |
| 1019 | if (IS_ERR(index)) { | 1018 | if (IS_ERR(index)) { |
| 1020 | err = PTR_ERR(index); | 1019 | err = PTR_ERR(index); |
| 1020 | index = NULL; | ||
| 1021 | break; | 1021 | break; |
| 1022 | } | 1022 | } |
| 1023 | err = ovl_verify_index(index, lowerstack, numlower); | 1023 | err = ovl_verify_index(index, lowerstack, numlower); |
| @@ -1029,7 +1029,9 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt, | |||
| 1029 | break; | 1029 | break; |
| 1030 | } | 1030 | } |
| 1031 | dput(index); | 1031 | dput(index); |
| 1032 | index = NULL; | ||
| 1032 | } | 1033 | } |
| 1034 | dput(index); | ||
| 1033 | inode_unlock(dir); | 1035 | inode_unlock(dir); |
| 1034 | out: | 1036 | out: |
| 1035 | ovl_cache_free(&list); | 1037 | ovl_cache_free(&list); |
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index fd5ea4facc62..092d150643c1 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
| @@ -211,9 +211,10 @@ static void ovl_put_super(struct super_block *sb) | |||
| 211 | 211 | ||
| 212 | dput(ufs->indexdir); | 212 | dput(ufs->indexdir); |
| 213 | dput(ufs->workdir); | 213 | dput(ufs->workdir); |
| 214 | ovl_inuse_unlock(ufs->workbasedir); | 214 | if (ufs->workdir_locked) |
| 215 | ovl_inuse_unlock(ufs->workbasedir); | ||
| 215 | dput(ufs->workbasedir); | 216 | dput(ufs->workbasedir); |
| 216 | if (ufs->upper_mnt) | 217 | if (ufs->upper_mnt && ufs->upperdir_locked) |
| 217 | ovl_inuse_unlock(ufs->upper_mnt->mnt_root); | 218 | ovl_inuse_unlock(ufs->upper_mnt->mnt_root); |
| 218 | mntput(ufs->upper_mnt); | 219 | mntput(ufs->upper_mnt); |
| 219 | for (i = 0; i < ufs->numlower; i++) | 220 | for (i = 0; i < ufs->numlower; i++) |
| @@ -881,9 +882,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
| 881 | goto out_put_upperpath; | 882 | goto out_put_upperpath; |
| 882 | 883 | ||
| 883 | err = -EBUSY; | 884 | err = -EBUSY; |
| 884 | if (!ovl_inuse_trylock(upperpath.dentry)) { | 885 | if (ovl_inuse_trylock(upperpath.dentry)) { |
| 885 | pr_err("overlayfs: upperdir is in-use by another mount\n"); | 886 | ufs->upperdir_locked = true; |
| 887 | } else if (ufs->config.index) { | ||
| 888 | pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n"); | ||
| 886 | goto out_put_upperpath; | 889 | goto out_put_upperpath; |
| 890 | } else { | ||
| 891 | pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); | ||
| 887 | } | 892 | } |
| 888 | 893 | ||
| 889 | err = ovl_mount_dir(ufs->config.workdir, &workpath); | 894 | err = ovl_mount_dir(ufs->config.workdir, &workpath); |
| @@ -901,9 +906,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
| 901 | } | 906 | } |
| 902 | 907 | ||
| 903 | err = -EBUSY; | 908 | err = -EBUSY; |
| 904 | if (!ovl_inuse_trylock(workpath.dentry)) { | 909 | if (ovl_inuse_trylock(workpath.dentry)) { |
| 905 | pr_err("overlayfs: workdir is in-use by another mount\n"); | 910 | ufs->workdir_locked = true; |
| 911 | } else if (ufs->config.index) { | ||
| 912 | pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n"); | ||
| 906 | goto out_put_workpath; | 913 | goto out_put_workpath; |
| 914 | } else { | ||
| 915 | pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); | ||
| 907 | } | 916 | } |
| 908 | 917 | ||
| 909 | ufs->workbasedir = workpath.dentry; | 918 | ufs->workbasedir = workpath.dentry; |
| @@ -1156,11 +1165,13 @@ out_put_lowerpath: | |||
| 1156 | out_free_lowertmp: | 1165 | out_free_lowertmp: |
| 1157 | kfree(lowertmp); | 1166 | kfree(lowertmp); |
| 1158 | out_unlock_workdentry: | 1167 | out_unlock_workdentry: |
| 1159 | ovl_inuse_unlock(workpath.dentry); | 1168 | if (ufs->workdir_locked) |
| 1169 | ovl_inuse_unlock(workpath.dentry); | ||
| 1160 | out_put_workpath: | 1170 | out_put_workpath: |
| 1161 | path_put(&workpath); | 1171 | path_put(&workpath); |
| 1162 | out_unlock_upperdentry: | 1172 | out_unlock_upperdentry: |
| 1163 | ovl_inuse_unlock(upperpath.dentry); | 1173 | if (ufs->upperdir_locked) |
| 1174 | ovl_inuse_unlock(upperpath.dentry); | ||
| 1164 | out_put_upperpath: | 1175 | out_put_upperpath: |
| 1165 | path_put(&upperpath); | 1176 | path_put(&upperpath); |
| 1166 | out_free_config: | 1177 | out_free_config: |
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index 117794582f9f..b9b239fa5cfd 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c | |||
| @@ -430,7 +430,7 @@ void ovl_inuse_unlock(struct dentry *dentry) | |||
| 430 | } | 430 | } |
| 431 | } | 431 | } |
| 432 | 432 | ||
| 433 | /* Called must hold OVL_I(inode)->oi_lock */ | 433 | /* Caller must hold OVL_I(inode)->lock */ |
| 434 | static void ovl_cleanup_index(struct dentry *dentry) | 434 | static void ovl_cleanup_index(struct dentry *dentry) |
| 435 | { | 435 | { |
| 436 | struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode; | 436 | struct inode *dir = ovl_indexdir(dentry->d_sb)->d_inode; |
| @@ -469,6 +469,9 @@ static void ovl_cleanup_index(struct dentry *dentry) | |||
| 469 | err = PTR_ERR(index); | 469 | err = PTR_ERR(index); |
| 470 | if (!IS_ERR(index)) | 470 | if (!IS_ERR(index)) |
| 471 | err = ovl_cleanup(dir, index); | 471 | err = ovl_cleanup(dir, index); |
| 472 | else | ||
| 473 | index = NULL; | ||
| 474 | |||
| 472 | inode_unlock(dir); | 475 | inode_unlock(dir); |
| 473 | if (err) | 476 | if (err) |
| 474 | goto fail; | 477 | goto fail; |
| @@ -557,3 +560,22 @@ void ovl_nlink_end(struct dentry *dentry, bool locked) | |||
| 557 | mutex_unlock(&OVL_I(d_inode(dentry))->lock); | 560 | mutex_unlock(&OVL_I(d_inode(dentry))->lock); |
| 558 | } | 561 | } |
| 559 | } | 562 | } |
| 563 | |||
| 564 | int ovl_lock_rename_workdir(struct dentry *workdir, struct dentry *upperdir) | ||
| 565 | { | ||
| 566 | /* Workdir should not be the same as upperdir */ | ||
| 567 | if (workdir == upperdir) | ||
| 568 | goto err; | ||
| 569 | |||
| 570 | /* Workdir should not be subdir of upperdir and vice versa */ | ||
| 571 | if (lock_rename(workdir, upperdir) != NULL) | ||
| 572 | goto err_unlock; | ||
| 573 | |||
| 574 | return 0; | ||
| 575 | |||
| 576 | err_unlock: | ||
| 577 | unlock_rename(workdir, upperdir); | ||
| 578 | err: | ||
| 579 | pr_err("overlayfs: failed to lock workdir+upperdir\n"); | ||
| 580 | return -EIO; | ||
| 581 | } | ||
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 50b0556a124f..52ad15192e72 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
| @@ -1297,21 +1297,18 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space, | |||
| 1297 | spin_lock(&dquot->dq_dqb_lock); | 1297 | spin_lock(&dquot->dq_dqb_lock); |
| 1298 | if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || | 1298 | if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || |
| 1299 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | 1299 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) |
| 1300 | goto add; | 1300 | goto finish; |
| 1301 | 1301 | ||
| 1302 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace | 1302 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace |
| 1303 | + space + rsv_space; | 1303 | + space + rsv_space; |
| 1304 | 1304 | ||
| 1305 | if (flags & DQUOT_SPACE_NOFAIL) | ||
| 1306 | goto add; | ||
| 1307 | |||
| 1308 | if (dquot->dq_dqb.dqb_bhardlimit && | 1305 | if (dquot->dq_dqb.dqb_bhardlimit && |
| 1309 | tspace > dquot->dq_dqb.dqb_bhardlimit && | 1306 | tspace > dquot->dq_dqb.dqb_bhardlimit && |
| 1310 | !ignore_hardlimit(dquot)) { | 1307 | !ignore_hardlimit(dquot)) { |
| 1311 | if (flags & DQUOT_SPACE_WARN) | 1308 | if (flags & DQUOT_SPACE_WARN) |
| 1312 | prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); | 1309 | prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); |
| 1313 | ret = -EDQUOT; | 1310 | ret = -EDQUOT; |
| 1314 | goto out; | 1311 | goto finish; |
| 1315 | } | 1312 | } |
| 1316 | 1313 | ||
| 1317 | if (dquot->dq_dqb.dqb_bsoftlimit && | 1314 | if (dquot->dq_dqb.dqb_bsoftlimit && |
| @@ -1322,7 +1319,7 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space, | |||
| 1322 | if (flags & DQUOT_SPACE_WARN) | 1319 | if (flags & DQUOT_SPACE_WARN) |
| 1323 | prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); | 1320 | prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); |
| 1324 | ret = -EDQUOT; | 1321 | ret = -EDQUOT; |
| 1325 | goto out; | 1322 | goto finish; |
| 1326 | } | 1323 | } |
| 1327 | 1324 | ||
| 1328 | if (dquot->dq_dqb.dqb_bsoftlimit && | 1325 | if (dquot->dq_dqb.dqb_bsoftlimit && |
| @@ -1338,13 +1335,21 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space, | |||
| 1338 | * be always printed | 1335 | * be always printed |
| 1339 | */ | 1336 | */ |
| 1340 | ret = -EDQUOT; | 1337 | ret = -EDQUOT; |
| 1341 | goto out; | 1338 | goto finish; |
| 1342 | } | 1339 | } |
| 1343 | } | 1340 | } |
| 1344 | add: | 1341 | finish: |
| 1345 | dquot->dq_dqb.dqb_rsvspace += rsv_space; | 1342 | /* |
| 1346 | dquot->dq_dqb.dqb_curspace += space; | 1343 | * We have to be careful and go through warning generation & grace time |
| 1347 | out: | 1344 | * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it |
| 1345 | * only here... | ||
| 1346 | */ | ||
| 1347 | if (flags & DQUOT_SPACE_NOFAIL) | ||
| 1348 | ret = 0; | ||
| 1349 | if (!ret) { | ||
| 1350 | dquot->dq_dqb.dqb_rsvspace += rsv_space; | ||
| 1351 | dquot->dq_dqb.dqb_curspace += space; | ||
| 1352 | } | ||
| 1348 | spin_unlock(&dquot->dq_dqb_lock); | 1353 | spin_unlock(&dquot->dq_dqb_lock); |
| 1349 | return ret; | 1354 | return ret; |
| 1350 | } | 1355 | } |
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ef4b48d1ea42..1c713fd5b3e6 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c | |||
| @@ -588,6 +588,12 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, | |||
| 588 | break; | 588 | break; |
| 589 | if (ACCESS_ONCE(ctx->released) || | 589 | if (ACCESS_ONCE(ctx->released) || |
| 590 | fatal_signal_pending(current)) { | 590 | fatal_signal_pending(current)) { |
| 591 | /* | ||
| 592 | * &ewq->wq may be queued in fork_event, but | ||
| 593 | * __remove_wait_queue ignores the head | ||
| 594 | * parameter. It would be a problem if it | ||
| 595 | * didn't. | ||
| 596 | */ | ||
| 591 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); | 597 | __remove_wait_queue(&ctx->event_wqh, &ewq->wq); |
| 592 | if (ewq->msg.event == UFFD_EVENT_FORK) { | 598 | if (ewq->msg.event == UFFD_EVENT_FORK) { |
| 593 | struct userfaultfd_ctx *new; | 599 | struct userfaultfd_ctx *new; |
| @@ -1061,6 +1067,12 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, | |||
| 1061 | (unsigned long) | 1067 | (unsigned long) |
| 1062 | uwq->msg.arg.reserved.reserved1; | 1068 | uwq->msg.arg.reserved.reserved1; |
| 1063 | list_move(&uwq->wq.entry, &fork_event); | 1069 | list_move(&uwq->wq.entry, &fork_event); |
| 1070 | /* | ||
| 1071 | * fork_nctx can be freed as soon as | ||
| 1072 | * we drop the lock, unless we take a | ||
| 1073 | * reference on it. | ||
| 1074 | */ | ||
| 1075 | userfaultfd_ctx_get(fork_nctx); | ||
| 1064 | spin_unlock(&ctx->event_wqh.lock); | 1076 | spin_unlock(&ctx->event_wqh.lock); |
| 1065 | ret = 0; | 1077 | ret = 0; |
| 1066 | break; | 1078 | break; |
| @@ -1091,19 +1103,53 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, | |||
| 1091 | 1103 | ||
| 1092 | if (!ret && msg->event == UFFD_EVENT_FORK) { | 1104 | if (!ret && msg->event == UFFD_EVENT_FORK) { |
| 1093 | ret = resolve_userfault_fork(ctx, fork_nctx, msg); | 1105 | ret = resolve_userfault_fork(ctx, fork_nctx, msg); |
| 1106 | spin_lock(&ctx->event_wqh.lock); | ||
| 1107 | if (!list_empty(&fork_event)) { | ||
| 1108 | /* | ||
| 1109 | * The fork thread didn't abort, so we can | ||
| 1110 | * drop the temporary refcount. | ||
| 1111 | */ | ||
| 1112 | userfaultfd_ctx_put(fork_nctx); | ||
| 1113 | |||
| 1114 | uwq = list_first_entry(&fork_event, | ||
| 1115 | typeof(*uwq), | ||
| 1116 | wq.entry); | ||
| 1117 | /* | ||
| 1118 | * If fork_event list wasn't empty and in turn | ||
| 1119 | * the event wasn't already released by fork | ||
| 1120 | * (the event is allocated on fork kernel | ||
| 1121 | * stack), put the event back to its place in | ||
| 1122 | * the event_wq. fork_event head will be freed | ||
| 1123 | * as soon as we return so the event cannot | ||
| 1124 | * stay queued there no matter the current | ||
| 1125 | * "ret" value. | ||
| 1126 | */ | ||
| 1127 | list_del(&uwq->wq.entry); | ||
| 1128 | __add_wait_queue(&ctx->event_wqh, &uwq->wq); | ||
| 1094 | 1129 | ||
| 1095 | if (!ret) { | 1130 | /* |
| 1096 | spin_lock(&ctx->event_wqh.lock); | 1131 | * Leave the event in the waitqueue and report |
| 1097 | if (!list_empty(&fork_event)) { | 1132 | * error to userland if we failed to resolve |
| 1098 | uwq = list_first_entry(&fork_event, | 1133 | * the userfault fork. |
| 1099 | typeof(*uwq), | 1134 | */ |
| 1100 | wq.entry); | 1135 | if (likely(!ret)) |
| 1101 | list_del(&uwq->wq.entry); | ||
| 1102 | __add_wait_queue(&ctx->event_wqh, &uwq->wq); | ||
| 1103 | userfaultfd_event_complete(ctx, uwq); | 1136 | userfaultfd_event_complete(ctx, uwq); |
| 1104 | } | 1137 | } else { |
| 1105 | spin_unlock(&ctx->event_wqh.lock); | 1138 | /* |
| 1139 | * Here the fork thread aborted and the | ||
| 1140 | * refcount from the fork thread on fork_nctx | ||
| 1141 | * has already been released. We still hold | ||
| 1142 | * the reference we took before releasing the | ||
| 1143 | * lock above. If resolve_userfault_fork | ||
| 1144 | * failed we've to drop it because the | ||
| 1145 | * fork_nctx has to be freed in such case. If | ||
| 1146 | * it succeeded we'll hold it because the new | ||
| 1147 | * uffd references it. | ||
| 1148 | */ | ||
| 1149 | if (ret) | ||
| 1150 | userfaultfd_ctx_put(fork_nctx); | ||
| 1106 | } | 1151 | } |
| 1152 | spin_unlock(&ctx->event_wqh.lock); | ||
| 1107 | } | 1153 | } |
| 1108 | 1154 | ||
| 1109 | return ret; | 1155 | return ret; |
diff --git a/fs/xattr.c b/fs/xattr.c index 4424f7fecf14..61cd28ba25f3 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
| @@ -250,7 +250,7 @@ xattr_getsecurity(struct inode *inode, const char *name, void *value, | |||
| 250 | } | 250 | } |
| 251 | memcpy(value, buffer, len); | 251 | memcpy(value, buffer, len); |
| 252 | out: | 252 | out: |
| 253 | security_release_secctx(buffer, len); | 253 | kfree(buffer); |
| 254 | out_noalloc: | 254 | out_noalloc: |
| 255 | return len; | 255 | return len; |
| 256 | } | 256 | } |
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 744dcaec34cc..f965ce832bc0 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c | |||
| @@ -1584,6 +1584,10 @@ xfs_alloc_ag_vextent_small( | |||
| 1584 | 1584 | ||
| 1585 | bp = xfs_btree_get_bufs(args->mp, args->tp, | 1585 | bp = xfs_btree_get_bufs(args->mp, args->tp, |
| 1586 | args->agno, fbno, 0); | 1586 | args->agno, fbno, 0); |
| 1587 | if (!bp) { | ||
| 1588 | error = -EFSCORRUPTED; | ||
| 1589 | goto error0; | ||
| 1590 | } | ||
| 1587 | xfs_trans_binval(args->tp, bp); | 1591 | xfs_trans_binval(args->tp, bp); |
| 1588 | } | 1592 | } |
| 1589 | args->len = 1; | 1593 | args->len = 1; |
| @@ -2141,6 +2145,10 @@ xfs_alloc_fix_freelist( | |||
| 2141 | if (error) | 2145 | if (error) |
| 2142 | goto out_agbp_relse; | 2146 | goto out_agbp_relse; |
| 2143 | bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); | 2147 | bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); |
| 2148 | if (!bp) { | ||
| 2149 | error = -EFSCORRUPTED; | ||
| 2150 | goto out_agbp_relse; | ||
| 2151 | } | ||
| 2144 | xfs_trans_binval(tp, bp); | 2152 | xfs_trans_binval(tp, bp); |
| 2145 | } | 2153 | } |
| 2146 | 2154 | ||
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 044a363119be..def32fa1c225 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
| @@ -1477,14 +1477,14 @@ xfs_bmap_isaeof( | |||
| 1477 | int is_empty; | 1477 | int is_empty; |
| 1478 | int error; | 1478 | int error; |
| 1479 | 1479 | ||
| 1480 | bma->aeof = 0; | 1480 | bma->aeof = false; |
| 1481 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, | 1481 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, |
| 1482 | &is_empty); | 1482 | &is_empty); |
| 1483 | if (error) | 1483 | if (error) |
| 1484 | return error; | 1484 | return error; |
| 1485 | 1485 | ||
| 1486 | if (is_empty) { | 1486 | if (is_empty) { |
| 1487 | bma->aeof = 1; | 1487 | bma->aeof = true; |
| 1488 | return 0; | 1488 | return 0; |
| 1489 | } | 1489 | } |
| 1490 | 1490 | ||
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 988bb3f31446..dfd643909f85 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c | |||
| @@ -1962,7 +1962,7 @@ xfs_difree_inobt( | |||
| 1962 | if (!(mp->m_flags & XFS_MOUNT_IKEEP) && | 1962 | if (!(mp->m_flags & XFS_MOUNT_IKEEP) && |
| 1963 | rec.ir_free == XFS_INOBT_ALL_FREE && | 1963 | rec.ir_free == XFS_INOBT_ALL_FREE && |
| 1964 | mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { | 1964 | mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { |
| 1965 | xic->deleted = 1; | 1965 | xic->deleted = true; |
| 1966 | xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); | 1966 | xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); |
| 1967 | xic->alloc = xfs_inobt_irec_to_allocmask(&rec); | 1967 | xic->alloc = xfs_inobt_irec_to_allocmask(&rec); |
| 1968 | 1968 | ||
| @@ -1989,7 +1989,7 @@ xfs_difree_inobt( | |||
| 1989 | 1989 | ||
| 1990 | xfs_difree_inode_chunk(mp, agno, &rec, dfops); | 1990 | xfs_difree_inode_chunk(mp, agno, &rec, dfops); |
| 1991 | } else { | 1991 | } else { |
| 1992 | xic->deleted = 0; | 1992 | xic->deleted = false; |
| 1993 | 1993 | ||
| 1994 | error = xfs_inobt_update(cur, &rec); | 1994 | error = xfs_inobt_update(cur, &rec); |
| 1995 | if (error) { | 1995 | if (error) { |
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h index 8372e9bcd7b6..71de185735e0 100644 --- a/fs/xfs/libxfs/xfs_log_format.h +++ b/fs/xfs/libxfs/xfs_log_format.h | |||
| @@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format { | |||
| 270 | uint32_t ilf_fields; /* flags for fields logged */ | 270 | uint32_t ilf_fields; /* flags for fields logged */ |
| 271 | uint16_t ilf_asize; /* size of attr d/ext/root */ | 271 | uint16_t ilf_asize; /* size of attr d/ext/root */ |
| 272 | uint16_t ilf_dsize; /* size of data/ext/root */ | 272 | uint16_t ilf_dsize; /* size of data/ext/root */ |
| 273 | uint32_t ilf_pad; /* pad for 64 bit boundary */ | ||
| 273 | uint64_t ilf_ino; /* inode number */ | 274 | uint64_t ilf_ino; /* inode number */ |
| 274 | union { | 275 | union { |
| 275 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ | 276 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ |
| @@ -280,29 +281,17 @@ typedef struct xfs_inode_log_format { | |||
| 280 | int32_t ilf_boffset; /* off of inode in buffer */ | 281 | int32_t ilf_boffset; /* off of inode in buffer */ |
| 281 | } xfs_inode_log_format_t; | 282 | } xfs_inode_log_format_t; |
| 282 | 283 | ||
| 283 | typedef struct xfs_inode_log_format_32 { | 284 | /* |
| 284 | uint16_t ilf_type; /* inode log item type */ | 285 | * Old 32 bit systems will log in this format without the 64 bit |
| 285 | uint16_t ilf_size; /* size of this item */ | 286 | * alignment padding. Recovery will detect this and convert it to the |
| 286 | uint32_t ilf_fields; /* flags for fields logged */ | 287 | * correct format. |
| 287 | uint16_t ilf_asize; /* size of attr d/ext/root */ | 288 | */ |
| 288 | uint16_t ilf_dsize; /* size of data/ext/root */ | 289 | struct xfs_inode_log_format_32 { |
| 289 | uint64_t ilf_ino; /* inode number */ | ||
| 290 | union { | ||
| 291 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ | ||
| 292 | uuid_t ilfu_uuid; /* mount point value */ | ||
| 293 | } ilf_u; | ||
| 294 | int64_t ilf_blkno; /* blkno of inode buffer */ | ||
| 295 | int32_t ilf_len; /* len of inode buffer */ | ||
| 296 | int32_t ilf_boffset; /* off of inode in buffer */ | ||
| 297 | } __attribute__((packed)) xfs_inode_log_format_32_t; | ||
| 298 | |||
| 299 | typedef struct xfs_inode_log_format_64 { | ||
| 300 | uint16_t ilf_type; /* inode log item type */ | 290 | uint16_t ilf_type; /* inode log item type */ |
| 301 | uint16_t ilf_size; /* size of this item */ | 291 | uint16_t ilf_size; /* size of this item */ |
| 302 | uint32_t ilf_fields; /* flags for fields logged */ | 292 | uint32_t ilf_fields; /* flags for fields logged */ |
| 303 | uint16_t ilf_asize; /* size of attr d/ext/root */ | 293 | uint16_t ilf_asize; /* size of attr d/ext/root */ |
| 304 | uint16_t ilf_dsize; /* size of data/ext/root */ | 294 | uint16_t ilf_dsize; /* size of data/ext/root */ |
| 305 | uint32_t ilf_pad; /* pad for 64 bit boundary */ | ||
| 306 | uint64_t ilf_ino; /* inode number */ | 295 | uint64_t ilf_ino; /* inode number */ |
| 307 | union { | 296 | union { |
| 308 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ | 297 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ |
| @@ -311,7 +300,7 @@ typedef struct xfs_inode_log_format_64 { | |||
| 311 | int64_t ilf_blkno; /* blkno of inode buffer */ | 300 | int64_t ilf_blkno; /* blkno of inode buffer */ |
| 312 | int32_t ilf_len; /* len of inode buffer */ | 301 | int32_t ilf_len; /* len of inode buffer */ |
| 313 | int32_t ilf_boffset; /* off of inode in buffer */ | 302 | int32_t ilf_boffset; /* off of inode in buffer */ |
| 314 | } xfs_inode_log_format_64_t; | 303 | } __attribute__((packed)); |
| 315 | 304 | ||
| 316 | 305 | ||
| 317 | /* | 306 | /* |
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 7034e17535de..3354140de07e 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c | |||
| @@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode) | |||
| 247 | int | 247 | int |
| 248 | xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) | 248 | xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
| 249 | { | 249 | { |
| 250 | umode_t mode; | ||
| 251 | bool set_mode = false; | ||
| 250 | int error = 0; | 252 | int error = 0; |
| 251 | 253 | ||
| 252 | if (!acl) | 254 | if (!acl) |
| @@ -257,16 +259,24 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) | |||
| 257 | return error; | 259 | return error; |
| 258 | 260 | ||
| 259 | if (type == ACL_TYPE_ACCESS) { | 261 | if (type == ACL_TYPE_ACCESS) { |
| 260 | umode_t mode; | ||
| 261 | |||
| 262 | error = posix_acl_update_mode(inode, &mode, &acl); | 262 | error = posix_acl_update_mode(inode, &mode, &acl); |
| 263 | if (error) | 263 | if (error) |
| 264 | return error; | 264 | return error; |
| 265 | error = xfs_set_mode(inode, mode); | 265 | set_mode = true; |
| 266 | if (error) | ||
| 267 | return error; | ||
| 268 | } | 266 | } |
| 269 | 267 | ||
| 270 | set_acl: | 268 | set_acl: |
| 271 | return __xfs_set_acl(inode, acl, type); | 269 | error = __xfs_set_acl(inode, acl, type); |
| 270 | if (error) | ||
| 271 | return error; | ||
| 272 | |||
| 273 | /* | ||
| 274 | * We set the mode after successfully updating the ACL xattr because the | ||
| 275 | * xattr update can fail at ENOSPC and we don't want to change the mode | ||
| 276 | * if the ACL update hasn't been applied. | ||
| 277 | */ | ||
| 278 | if (set_mode) | ||
| 279 | error = xfs_set_mode(inode, mode); | ||
| 280 | |||
| 281 | return error; | ||
| 272 | } | 282 | } |
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index ebd66b19fbfc..e3a950ed35a8 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c | |||
| @@ -302,6 +302,8 @@ xfs_attr3_node_inactive( | |||
| 302 | &bp, XFS_ATTR_FORK); | 302 | &bp, XFS_ATTR_FORK); |
| 303 | if (error) | 303 | if (error) |
| 304 | return error; | 304 | return error; |
| 305 | node = bp->b_addr; | ||
| 306 | btree = dp->d_ops->node_tree_p(node); | ||
| 305 | child_fsb = be32_to_cpu(btree[i + 1].before); | 307 | child_fsb = be32_to_cpu(btree[i + 1].before); |
| 306 | xfs_trans_brelse(*trans, bp); | 308 | xfs_trans_brelse(*trans, bp); |
| 307 | } | 309 | } |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index bc6c6e10a969..6503cfa44262 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
| @@ -84,6 +84,7 @@ xfs_zero_extent( | |||
| 84 | GFP_NOFS, 0); | 84 | GFP_NOFS, 0); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | #ifdef CONFIG_XFS_RT | ||
| 87 | int | 88 | int |
| 88 | xfs_bmap_rtalloc( | 89 | xfs_bmap_rtalloc( |
| 89 | struct xfs_bmalloca *ap) /* bmap alloc argument struct */ | 90 | struct xfs_bmalloca *ap) /* bmap alloc argument struct */ |
| @@ -190,6 +191,7 @@ xfs_bmap_rtalloc( | |||
| 190 | } | 191 | } |
| 191 | return 0; | 192 | return 0; |
| 192 | } | 193 | } |
| 194 | #endif /* CONFIG_XFS_RT */ | ||
| 193 | 195 | ||
| 194 | /* | 196 | /* |
| 195 | * Check if the endoff is outside the last extent. If so the caller will grow | 197 | * Check if the endoff is outside the last extent. If so the caller will grow |
| @@ -2122,11 +2124,31 @@ xfs_swap_extents( | |||
| 2122 | ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK; | 2124 | ip->i_d.di_flags2 |= tip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK; |
| 2123 | tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; | 2125 | tip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK; |
| 2124 | tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK; | 2126 | tip->i_d.di_flags2 |= f & XFS_DIFLAG2_REFLINK; |
| 2127 | } | ||
| 2128 | |||
| 2129 | /* Swap the cow forks. */ | ||
| 2130 | if (xfs_sb_version_hasreflink(&mp->m_sb)) { | ||
| 2131 | xfs_extnum_t extnum; | ||
| 2132 | |||
| 2133 | ASSERT(ip->i_cformat == XFS_DINODE_FMT_EXTENTS); | ||
| 2134 | ASSERT(tip->i_cformat == XFS_DINODE_FMT_EXTENTS); | ||
| 2135 | |||
| 2136 | extnum = ip->i_cnextents; | ||
| 2137 | ip->i_cnextents = tip->i_cnextents; | ||
| 2138 | tip->i_cnextents = extnum; | ||
| 2139 | |||
| 2125 | cowfp = ip->i_cowfp; | 2140 | cowfp = ip->i_cowfp; |
| 2126 | ip->i_cowfp = tip->i_cowfp; | 2141 | ip->i_cowfp = tip->i_cowfp; |
| 2127 | tip->i_cowfp = cowfp; | 2142 | tip->i_cowfp = cowfp; |
| 2128 | xfs_inode_set_cowblocks_tag(ip); | 2143 | |
| 2129 | xfs_inode_set_cowblocks_tag(tip); | 2144 | if (ip->i_cowfp && ip->i_cnextents) |
| 2145 | xfs_inode_set_cowblocks_tag(ip); | ||
| 2146 | else | ||
| 2147 | xfs_inode_clear_cowblocks_tag(ip); | ||
| 2148 | if (tip->i_cowfp && tip->i_cnextents) | ||
| 2149 | xfs_inode_set_cowblocks_tag(tip); | ||
| 2150 | else | ||
| 2151 | xfs_inode_clear_cowblocks_tag(tip); | ||
| 2130 | } | 2152 | } |
| 2131 | 2153 | ||
| 2132 | xfs_trans_log_inode(tp, ip, src_log_flags); | 2154 | xfs_trans_log_inode(tp, ip, src_log_flags); |
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h index 0eaa81dc49be..7d330b3c77c3 100644 --- a/fs/xfs/xfs_bmap_util.h +++ b/fs/xfs/xfs_bmap_util.h | |||
| @@ -28,7 +28,20 @@ struct xfs_mount; | |||
| 28 | struct xfs_trans; | 28 | struct xfs_trans; |
| 29 | struct xfs_bmalloca; | 29 | struct xfs_bmalloca; |
| 30 | 30 | ||
| 31 | #ifdef CONFIG_XFS_RT | ||
| 31 | int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); | 32 | int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); |
| 33 | #else /* !CONFIG_XFS_RT */ | ||
| 34 | /* | ||
| 35 | * Attempts to allocate RT extents when RT is disable indicates corruption and | ||
| 36 | * should trigger a shutdown. | ||
| 37 | */ | ||
| 38 | static inline int | ||
| 39 | xfs_bmap_rtalloc(struct xfs_bmalloca *ap) | ||
| 40 | { | ||
| 41 | return -EFSCORRUPTED; | ||
| 42 | } | ||
| 43 | #endif /* CONFIG_XFS_RT */ | ||
| 44 | |||
| 32 | int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, | 45 | int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, |
| 33 | int whichfork, int *eof); | 46 | int whichfork, int *eof); |
| 34 | int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, | 47 | int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 309e26c9dddb..56d0e526870c 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
| @@ -764,7 +764,7 @@ xfs_file_fallocate( | |||
| 764 | enum xfs_prealloc_flags flags = 0; | 764 | enum xfs_prealloc_flags flags = 0; |
| 765 | uint iolock = XFS_IOLOCK_EXCL; | 765 | uint iolock = XFS_IOLOCK_EXCL; |
| 766 | loff_t new_size = 0; | 766 | loff_t new_size = 0; |
| 767 | bool do_file_insert = 0; | 767 | bool do_file_insert = false; |
| 768 | 768 | ||
| 769 | if (!S_ISREG(inode->i_mode)) | 769 | if (!S_ISREG(inode->i_mode)) |
| 770 | return -EINVAL; | 770 | return -EINVAL; |
| @@ -825,7 +825,7 @@ xfs_file_fallocate( | |||
| 825 | error = -EINVAL; | 825 | error = -EINVAL; |
| 826 | goto out_unlock; | 826 | goto out_unlock; |
| 827 | } | 827 | } |
| 828 | do_file_insert = 1; | 828 | do_file_insert = true; |
| 829 | } else { | 829 | } else { |
| 830 | flags |= XFS_PREALLOC_SET; | 830 | flags |= XFS_PREALLOC_SET; |
| 831 | 831 | ||
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index 814ed729881d..560e0b40ac1b 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c | |||
| @@ -521,6 +521,7 @@ __xfs_getfsmap_rtdev( | |||
| 521 | return query_fn(tp, info); | 521 | return query_fn(tp, info); |
| 522 | } | 522 | } |
| 523 | 523 | ||
| 524 | #ifdef CONFIG_XFS_RT | ||
| 524 | /* Actually query the realtime bitmap. */ | 525 | /* Actually query the realtime bitmap. */ |
| 525 | STATIC int | 526 | STATIC int |
| 526 | xfs_getfsmap_rtdev_rtbitmap_query( | 527 | xfs_getfsmap_rtdev_rtbitmap_query( |
| @@ -561,6 +562,7 @@ xfs_getfsmap_rtdev_rtbitmap( | |||
| 561 | return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query, | 562 | return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query, |
| 562 | info); | 563 | info); |
| 563 | } | 564 | } |
| 565 | #endif /* CONFIG_XFS_RT */ | ||
| 564 | 566 | ||
| 565 | /* Execute a getfsmap query against the regular data device. */ | 567 | /* Execute a getfsmap query against the regular data device. */ |
| 566 | STATIC int | 568 | STATIC int |
| @@ -795,7 +797,15 @@ xfs_getfsmap_check_keys( | |||
| 795 | return false; | 797 | return false; |
| 796 | } | 798 | } |
| 797 | 799 | ||
| 800 | /* | ||
| 801 | * There are only two devices if we didn't configure RT devices at build time. | ||
| 802 | */ | ||
| 803 | #ifdef CONFIG_XFS_RT | ||
| 798 | #define XFS_GETFSMAP_DEVS 3 | 804 | #define XFS_GETFSMAP_DEVS 3 |
| 805 | #else | ||
| 806 | #define XFS_GETFSMAP_DEVS 2 | ||
| 807 | #endif /* CONFIG_XFS_RT */ | ||
| 808 | |||
| 799 | /* | 809 | /* |
| 800 | * Get filesystem's extents as described in head, and format for | 810 | * Get filesystem's extents as described in head, and format for |
| 801 | * output. Calls formatter to fill the user's buffer until all | 811 | * output. Calls formatter to fill the user's buffer until all |
| @@ -853,10 +863,12 @@ xfs_getfsmap( | |||
| 853 | handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev); | 863 | handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev); |
| 854 | handlers[1].fn = xfs_getfsmap_logdev; | 864 | handlers[1].fn = xfs_getfsmap_logdev; |
| 855 | } | 865 | } |
| 866 | #ifdef CONFIG_XFS_RT | ||
| 856 | if (mp->m_rtdev_targp) { | 867 | if (mp->m_rtdev_targp) { |
| 857 | handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev); | 868 | handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev); |
| 858 | handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; | 869 | handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; |
| 859 | } | 870 | } |
| 871 | #endif /* CONFIG_XFS_RT */ | ||
| 860 | 872 | ||
| 861 | xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev), | 873 | xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev), |
| 862 | xfs_getfsmap_dev_compare); | 874 | xfs_getfsmap_dev_compare); |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index a705f34b58fa..9bbc2d7cc8cb 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
| @@ -364,6 +364,9 @@ xfs_inode_to_log_dinode( | |||
| 364 | to->di_dmstate = from->di_dmstate; | 364 | to->di_dmstate = from->di_dmstate; |
| 365 | to->di_flags = from->di_flags; | 365 | to->di_flags = from->di_flags; |
| 366 | 366 | ||
| 367 | /* log a dummy value to ensure log structure is fully initialised */ | ||
| 368 | to->di_next_unlinked = NULLAGINO; | ||
| 369 | |||
| 367 | if (from->di_version == 3) { | 370 | if (from->di_version == 3) { |
| 368 | to->di_changecount = inode->i_version; | 371 | to->di_changecount = inode->i_version; |
| 369 | to->di_crtime.t_sec = from->di_crtime.t_sec; | 372 | to->di_crtime.t_sec = from->di_crtime.t_sec; |
| @@ -404,6 +407,11 @@ xfs_inode_item_format_core( | |||
| 404 | * the second with the on-disk inode structure, and a possible third and/or | 407 | * the second with the on-disk inode structure, and a possible third and/or |
| 405 | * fourth with the inode data/extents/b-tree root and inode attributes | 408 | * fourth with the inode data/extents/b-tree root and inode attributes |
| 406 | * data/extents/b-tree root. | 409 | * data/extents/b-tree root. |
| 410 | * | ||
| 411 | * Note: Always use the 64 bit inode log format structure so we don't | ||
| 412 | * leave an uninitialised hole in the format item on 64 bit systems. Log | ||
| 413 | * recovery on 32 bit systems handles this just fine, so there's no reason | ||
| 414 | * for not using an initialising the properly padded structure all the time. | ||
| 407 | */ | 415 | */ |
| 408 | STATIC void | 416 | STATIC void |
| 409 | xfs_inode_item_format( | 417 | xfs_inode_item_format( |
| @@ -412,8 +420,8 @@ xfs_inode_item_format( | |||
| 412 | { | 420 | { |
| 413 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); | 421 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
| 414 | struct xfs_inode *ip = iip->ili_inode; | 422 | struct xfs_inode *ip = iip->ili_inode; |
| 415 | struct xfs_inode_log_format *ilf; | ||
| 416 | struct xfs_log_iovec *vecp = NULL; | 423 | struct xfs_log_iovec *vecp = NULL; |
| 424 | struct xfs_inode_log_format *ilf; | ||
| 417 | 425 | ||
| 418 | ASSERT(ip->i_d.di_version > 1); | 426 | ASSERT(ip->i_d.di_version > 1); |
| 419 | 427 | ||
| @@ -425,7 +433,17 @@ xfs_inode_item_format( | |||
| 425 | ilf->ilf_boffset = ip->i_imap.im_boffset; | 433 | ilf->ilf_boffset = ip->i_imap.im_boffset; |
| 426 | ilf->ilf_fields = XFS_ILOG_CORE; | 434 | ilf->ilf_fields = XFS_ILOG_CORE; |
| 427 | ilf->ilf_size = 2; /* format + core */ | 435 | ilf->ilf_size = 2; /* format + core */ |
| 428 | xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format)); | 436 | |
| 437 | /* | ||
| 438 | * make sure we don't leak uninitialised data into the log in the case | ||
| 439 | * when we don't log every field in the inode. | ||
| 440 | */ | ||
| 441 | ilf->ilf_dsize = 0; | ||
| 442 | ilf->ilf_asize = 0; | ||
| 443 | ilf->ilf_pad = 0; | ||
| 444 | uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null); | ||
| 445 | |||
| 446 | xlog_finish_iovec(lv, vecp, sizeof(*ilf)); | ||
| 429 | 447 | ||
| 430 | xfs_inode_item_format_core(ip, lv, &vecp); | 448 | xfs_inode_item_format_core(ip, lv, &vecp); |
| 431 | xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); | 449 | xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); |
| @@ -855,44 +873,29 @@ xfs_istale_done( | |||
| 855 | } | 873 | } |
| 856 | 874 | ||
| 857 | /* | 875 | /* |
| 858 | * convert an xfs_inode_log_format struct from either 32 or 64 bit versions | 876 | * convert an xfs_inode_log_format struct from the old 32 bit version |
| 859 | * (which can have different field alignments) to the native version | 877 | * (which can have different field alignments) to the native 64 bit version |
| 860 | */ | 878 | */ |
| 861 | int | 879 | int |
| 862 | xfs_inode_item_format_convert( | 880 | xfs_inode_item_format_convert( |
| 863 | xfs_log_iovec_t *buf, | 881 | struct xfs_log_iovec *buf, |
| 864 | xfs_inode_log_format_t *in_f) | 882 | struct xfs_inode_log_format *in_f) |
| 865 | { | 883 | { |
| 866 | if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) { | 884 | struct xfs_inode_log_format_32 *in_f32 = buf->i_addr; |
| 867 | xfs_inode_log_format_32_t *in_f32 = buf->i_addr; | 885 | |
| 868 | 886 | if (buf->i_len != sizeof(*in_f32)) | |
| 869 | in_f->ilf_type = in_f32->ilf_type; | 887 | return -EFSCORRUPTED; |
| 870 | in_f->ilf_size = in_f32->ilf_size; | 888 | |
| 871 | in_f->ilf_fields = in_f32->ilf_fields; | 889 | in_f->ilf_type = in_f32->ilf_type; |
| 872 | in_f->ilf_asize = in_f32->ilf_asize; | 890 | in_f->ilf_size = in_f32->ilf_size; |
| 873 | in_f->ilf_dsize = in_f32->ilf_dsize; | 891 | in_f->ilf_fields = in_f32->ilf_fields; |
| 874 | in_f->ilf_ino = in_f32->ilf_ino; | 892 | in_f->ilf_asize = in_f32->ilf_asize; |
| 875 | /* copy biggest field of ilf_u */ | 893 | in_f->ilf_dsize = in_f32->ilf_dsize; |
| 876 | uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid); | 894 | in_f->ilf_ino = in_f32->ilf_ino; |
| 877 | in_f->ilf_blkno = in_f32->ilf_blkno; | 895 | /* copy biggest field of ilf_u */ |
| 878 | in_f->ilf_len = in_f32->ilf_len; | 896 | uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid); |
| 879 | in_f->ilf_boffset = in_f32->ilf_boffset; | 897 | in_f->ilf_blkno = in_f32->ilf_blkno; |
| 880 | return 0; | 898 | in_f->ilf_len = in_f32->ilf_len; |
| 881 | } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){ | 899 | in_f->ilf_boffset = in_f32->ilf_boffset; |
| 882 | xfs_inode_log_format_64_t *in_f64 = buf->i_addr; | 900 | return 0; |
| 883 | |||
| 884 | in_f->ilf_type = in_f64->ilf_type; | ||
| 885 | in_f->ilf_size = in_f64->ilf_size; | ||
| 886 | in_f->ilf_fields = in_f64->ilf_fields; | ||
| 887 | in_f->ilf_asize = in_f64->ilf_asize; | ||
| 888 | in_f->ilf_dsize = in_f64->ilf_dsize; | ||
| 889 | in_f->ilf_ino = in_f64->ilf_ino; | ||
| 890 | /* copy biggest field of ilf_u */ | ||
| 891 | uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid); | ||
| 892 | in_f->ilf_blkno = in_f64->ilf_blkno; | ||
| 893 | in_f->ilf_len = in_f64->ilf_len; | ||
| 894 | in_f->ilf_boffset = in_f64->ilf_boffset; | ||
| 895 | return 0; | ||
| 896 | } | ||
| 897 | return -EFSCORRUPTED; | ||
| 898 | } | 901 | } |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index c5107c7bc4bf..dc95a49d62e7 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
| @@ -2515,7 +2515,7 @@ next_lv: | |||
| 2515 | if (lv) | 2515 | if (lv) |
| 2516 | vecp = lv->lv_iovecp; | 2516 | vecp = lv->lv_iovecp; |
| 2517 | } | 2517 | } |
| 2518 | if (record_cnt == 0 && ordered == false) { | 2518 | if (record_cnt == 0 && !ordered) { |
| 2519 | if (!lv) | 2519 | if (!lv) |
| 2520 | return 0; | 2520 | return 0; |
| 2521 | break; | 2521 | break; |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index ea7d4b4e50d0..e9727d0a541a 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
| @@ -704,7 +704,7 @@ xfs_mountfs( | |||
| 704 | xfs_set_maxicount(mp); | 704 | xfs_set_maxicount(mp); |
| 705 | 705 | ||
| 706 | /* enable fail_at_unmount as default */ | 706 | /* enable fail_at_unmount as default */ |
| 707 | mp->m_fail_unmount = 1; | 707 | mp->m_fail_unmount = true; |
| 708 | 708 | ||
| 709 | error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); | 709 | error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); |
| 710 | if (error) | 710 | if (error) |
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h index 0c381d71b242..0492436a053f 100644 --- a/fs/xfs/xfs_ondisk.h +++ b/fs/xfs/xfs_ondisk.h | |||
| @@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void) | |||
| 134 | XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28); | 134 | XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28); |
| 135 | XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8); | 135 | XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8); |
| 136 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52); | 136 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52); |
| 137 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64, 56); | 137 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56); |
| 138 | XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20); | 138 | XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20); |
| 139 | XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16); | 139 | XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16); |
| 140 | } | 140 | } |
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 3246815c24d6..37e603bf1591 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c | |||
| @@ -736,7 +736,13 @@ xfs_reflink_end_cow( | |||
| 736 | /* If there is a hole at end_fsb - 1 go to the previous extent */ | 736 | /* If there is a hole at end_fsb - 1 go to the previous extent */ |
| 737 | if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) || | 737 | if (!xfs_iext_lookup_extent(ip, ifp, end_fsb - 1, &idx, &got) || |
| 738 | got.br_startoff > end_fsb) { | 738 | got.br_startoff > end_fsb) { |
| 739 | ASSERT(idx > 0); | 739 | /* |
| 740 | * In case of racing, overlapping AIO writes no COW extents | ||
| 741 | * might be left by the time I/O completes for the loser of | ||
| 742 | * the race. In that case we are done. | ||
| 743 | */ | ||
| 744 | if (idx <= 0) | ||
| 745 | goto out_cancel; | ||
| 740 | xfs_iext_get_extent(ifp, --idx, &got); | 746 | xfs_iext_get_extent(ifp, --idx, &got); |
| 741 | } | 747 | } |
| 742 | 748 | ||
| @@ -809,6 +815,7 @@ next_extent: | |||
| 809 | 815 | ||
| 810 | out_defer: | 816 | out_defer: |
| 811 | xfs_defer_cancel(&dfops); | 817 | xfs_defer_cancel(&dfops); |
| 818 | out_cancel: | ||
| 812 | xfs_trans_cancel(tp); | 819 | xfs_trans_cancel(tp); |
| 813 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | 820 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
| 814 | out: | 821 | out: |
diff --git a/include/dt-bindings/reset/snps,hsdk-reset.h b/include/dt-bindings/reset/snps,hsdk-reset.h new file mode 100644 index 000000000000..e1a643e4bc91 --- /dev/null +++ b/include/dt-bindings/reset/snps,hsdk-reset.h | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | /** | ||
| 2 | * This header provides index for the HSDK reset controller. | ||
| 3 | */ | ||
| 4 | #ifndef _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK | ||
| 5 | #define _DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK | ||
| 6 | |||
| 7 | #define HSDK_APB_RESET 0 | ||
| 8 | #define HSDK_AXI_RESET 1 | ||
| 9 | #define HSDK_ETH_RESET 2 | ||
| 10 | #define HSDK_USB_RESET 3 | ||
| 11 | #define HSDK_SDIO_RESET 4 | ||
| 12 | #define HSDK_HDMI_RESET 5 | ||
| 13 | #define HSDK_GFX_RESET 6 | ||
| 14 | #define HSDK_DMAC_RESET 7 | ||
| 15 | #define HSDK_EBI_RESET 8 | ||
| 16 | |||
| 17 | #endif /*_DT_BINDINGS_RESET_CONTROLLER_SNPS_HSDK*/ | ||
diff --git a/include/dt-bindings/reset/snps,hsdk-v1-reset.h b/include/dt-bindings/reset/snps,hsdk-v1-reset.h deleted file mode 100644 index d898c89b7123..000000000000 --- a/include/dt-bindings/reset/snps,hsdk-v1-reset.h +++ /dev/null | |||
| @@ -1,17 +0,0 @@ | |||
| 1 | /** | ||
| 2 | * This header provides index for the HSDK v1 reset controller. | ||
| 3 | */ | ||
| 4 | #ifndef _DT_BINDINGS_RESET_CONTROLLER_HSDK_V1 | ||
| 5 | #define _DT_BINDINGS_RESET_CONTROLLER_HSDK_V1 | ||
| 6 | |||
| 7 | #define HSDK_V1_APB_RESET 0 | ||
| 8 | #define HSDK_V1_AXI_RESET 1 | ||
| 9 | #define HSDK_V1_ETH_RESET 2 | ||
| 10 | #define HSDK_V1_USB_RESET 3 | ||
| 11 | #define HSDK_V1_SDIO_RESET 4 | ||
| 12 | #define HSDK_V1_HDMI_RESET 5 | ||
| 13 | #define HSDK_V1_GFX_RESET 6 | ||
| 14 | #define HSDK_V1_DMAC_RESET 7 | ||
| 15 | #define HSDK_V1_EBI_RESET 8 | ||
| 16 | |||
| 17 | #endif /*_DT_BINDINGS_RESET_CONTROLLER_HSDK_V1*/ | ||
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index fb44d6180ca0..18d05b5491f3 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
| @@ -131,7 +131,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm, | |||
| 131 | int executable_stack); | 131 | int executable_stack); |
| 132 | extern int transfer_args_to_stack(struct linux_binprm *bprm, | 132 | extern int transfer_args_to_stack(struct linux_binprm *bprm, |
| 133 | unsigned long *sp_location); | 133 | unsigned long *sp_location); |
| 134 | extern int bprm_change_interp(char *interp, struct linux_binprm *bprm); | 134 | extern int bprm_change_interp(const char *interp, struct linux_binprm *bprm); |
| 135 | extern int copy_strings_kernel(int argc, const char *const *argv, | 135 | extern int copy_strings_kernel(int argc, const char *const *argv, |
| 136 | struct linux_binprm *bprm); | 136 | struct linux_binprm *bprm); |
| 137 | extern int prepare_bprm_creds(struct linux_binprm *bprm); | 137 | extern int prepare_bprm_creds(struct linux_binprm *bprm); |
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 8b9d6fff002d..f2deb71958b2 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h | |||
| @@ -92,7 +92,7 @@ | |||
| 92 | /** | 92 | /** |
| 93 | * FIELD_GET() - extract a bitfield element | 93 | * FIELD_GET() - extract a bitfield element |
| 94 | * @_mask: shifted mask defining the field's length and position | 94 | * @_mask: shifted mask defining the field's length and position |
| 95 | * @_reg: 32bit value of entire bitfield | 95 | * @_reg: value of entire bitfield |
| 96 | * | 96 | * |
| 97 | * FIELD_GET() extracts the field specified by @_mask from the | 97 | * FIELD_GET() extracts the field specified by @_mask from the |
| 98 | * bitfield passed in as @_reg by masking and shifting it down. | 98 | * bitfield passed in as @_reg by masking and shifting it down. |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8390859e79e7..f1af7d63d678 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
| @@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) | |||
| 368 | { | 368 | { |
| 369 | } | 369 | } |
| 370 | 370 | ||
| 371 | static inline int bpf_obj_get_user(const char __user *pathname) | ||
| 372 | { | ||
| 373 | return -EOPNOTSUPP; | ||
| 374 | } | ||
| 375 | |||
| 371 | static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, | 376 | static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, |
| 372 | u32 key) | 377 | u32 key) |
| 373 | { | 378 | { |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index c8dae555eccf..446b24cac67d 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
| @@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *, | |||
| 232 | loff_t, unsigned, unsigned, | 232 | loff_t, unsigned, unsigned, |
| 233 | struct page *, void *); | 233 | struct page *, void *); |
| 234 | void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); | 234 | void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); |
| 235 | void clean_page_buffers(struct page *page); | ||
| 235 | int cont_write_begin(struct file *, struct address_space *, loff_t, | 236 | int cont_write_begin(struct file *, struct address_space *, loff_t, |
| 236 | unsigned, unsigned, struct page **, void **, | 237 | unsigned, unsigned, struct page **, void **, |
| 237 | get_block_t *, loff_t *); | 238 | get_block_t *, loff_t *); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 339e73742e73..13dab191a23e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -403,7 +403,7 @@ struct address_space { | |||
| 403 | unsigned long flags; /* error bits */ | 403 | unsigned long flags; /* error bits */ |
| 404 | spinlock_t private_lock; /* for use by the address_space */ | 404 | spinlock_t private_lock; /* for use by the address_space */ |
| 405 | gfp_t gfp_mask; /* implicit gfp mask for allocations */ | 405 | gfp_t gfp_mask; /* implicit gfp mask for allocations */ |
| 406 | struct list_head private_list; /* ditto */ | 406 | struct list_head private_list; /* for use by the address_space */ |
| 407 | void *private_data; /* ditto */ | 407 | void *private_data; /* ditto */ |
| 408 | errseq_t wb_err; | 408 | errseq_t wb_err; |
| 409 | } __attribute__((aligned(sizeof(long)))) __randomize_layout; | 409 | } __attribute__((aligned(sizeof(long)))) __randomize_layout; |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 0ad4c3044cf9..91189bb0c818 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -44,6 +44,12 @@ | |||
| 44 | 44 | ||
| 45 | #define STACK_MAGIC 0xdeadbeef | 45 | #define STACK_MAGIC 0xdeadbeef |
| 46 | 46 | ||
| 47 | /** | ||
| 48 | * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value | ||
| 49 | * @x: value to repeat | ||
| 50 | * | ||
| 51 | * NOTE: @x is not checked for > 0xff; larger values produce odd results. | ||
| 52 | */ | ||
| 47 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) | 53 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) |
| 48 | 54 | ||
| 49 | /* @a is a power of 2 value */ | 55 | /* @a is a power of 2 value */ |
| @@ -57,6 +63,10 @@ | |||
| 57 | #define READ 0 | 63 | #define READ 0 |
| 58 | #define WRITE 1 | 64 | #define WRITE 1 |
| 59 | 65 | ||
| 66 | /** | ||
| 67 | * ARRAY_SIZE - get the number of elements in array @arr | ||
| 68 | * @arr: array to be sized | ||
| 69 | */ | ||
| 60 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) | 70 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
| 61 | 71 | ||
| 62 | #define u64_to_user_ptr(x) ( \ | 72 | #define u64_to_user_ptr(x) ( \ |
| @@ -76,7 +86,15 @@ | |||
| 76 | #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) | 86 | #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) |
| 77 | #define round_down(x, y) ((x) & ~__round_mask(x, y)) | 87 | #define round_down(x, y) ((x) & ~__round_mask(x, y)) |
| 78 | 88 | ||
| 89 | /** | ||
| 90 | * FIELD_SIZEOF - get the size of a struct's field | ||
| 91 | * @t: the target struct | ||
| 92 | * @f: the target struct's field | ||
| 93 | * Return: the size of @f in the struct definition without having a | ||
| 94 | * declared instance of @t. | ||
| 95 | */ | ||
| 79 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) | 96 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) |
| 97 | |||
| 80 | #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP | 98 | #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP |
| 81 | 99 | ||
| 82 | #define DIV_ROUND_DOWN_ULL(ll, d) \ | 100 | #define DIV_ROUND_DOWN_ULL(ll, d) \ |
| @@ -107,7 +125,7 @@ | |||
| 107 | /* | 125 | /* |
| 108 | * Divide positive or negative dividend by positive or negative divisor | 126 | * Divide positive or negative dividend by positive or negative divisor |
| 109 | * and round to closest integer. Result is undefined for negative | 127 | * and round to closest integer. Result is undefined for negative |
| 110 | * divisors if he dividend variable type is unsigned and for negative | 128 | * divisors if the dividend variable type is unsigned and for negative |
| 111 | * dividends if the divisor variable type is unsigned. | 129 | * dividends if the divisor variable type is unsigned. |
| 112 | */ | 130 | */ |
| 113 | #define DIV_ROUND_CLOSEST(x, divisor)( \ | 131 | #define DIV_ROUND_CLOSEST(x, divisor)( \ |
| @@ -247,13 +265,13 @@ extern int _cond_resched(void); | |||
| 247 | * @ep_ro: right open interval endpoint | 265 | * @ep_ro: right open interval endpoint |
| 248 | * | 266 | * |
| 249 | * Perform a "reciprocal multiplication" in order to "scale" a value into | 267 | * Perform a "reciprocal multiplication" in order to "scale" a value into |
| 250 | * range [0, ep_ro), where the upper interval endpoint is right-open. | 268 | * range [0, @ep_ro), where the upper interval endpoint is right-open. |
| 251 | * This is useful, e.g. for accessing a index of an array containing | 269 | * This is useful, e.g. for accessing a index of an array containing |
| 252 | * ep_ro elements, for example. Think of it as sort of modulus, only that | 270 | * @ep_ro elements, for example. Think of it as sort of modulus, only that |
| 253 | * the result isn't that of modulo. ;) Note that if initial input is a | 271 | * the result isn't that of modulo. ;) Note that if initial input is a |
| 254 | * small value, then result will return 0. | 272 | * small value, then result will return 0. |
| 255 | * | 273 | * |
| 256 | * Return: a result based on val in interval [0, ep_ro). | 274 | * Return: a result based on @val in interval [0, @ep_ro). |
| 257 | */ | 275 | */ |
| 258 | static inline u32 reciprocal_scale(u32 val, u32 ep_ro) | 276 | static inline u32 reciprocal_scale(u32 val, u32 ep_ro) |
| 259 | { | 277 | { |
| @@ -618,8 +636,8 @@ do { \ | |||
| 618 | * trace_printk - printf formatting in the ftrace buffer | 636 | * trace_printk - printf formatting in the ftrace buffer |
| 619 | * @fmt: the printf format for printing | 637 | * @fmt: the printf format for printing |
| 620 | * | 638 | * |
| 621 | * Note: __trace_printk is an internal function for trace_printk and | 639 | * Note: __trace_printk is an internal function for trace_printk() and |
| 622 | * the @ip is passed in via the trace_printk macro. | 640 | * the @ip is passed in via the trace_printk() macro. |
| 623 | * | 641 | * |
| 624 | * This function allows a kernel developer to debug fast path sections | 642 | * This function allows a kernel developer to debug fast path sections |
| 625 | * that printk is not appropriate for. By scattering in various | 643 | * that printk is not appropriate for. By scattering in various |
| @@ -629,7 +647,7 @@ do { \ | |||
| 629 | * This is intended as a debugging tool for the developer only. | 647 | * This is intended as a debugging tool for the developer only. |
| 630 | * Please refrain from leaving trace_printks scattered around in | 648 | * Please refrain from leaving trace_printks scattered around in |
| 631 | * your code. (Extra memory is used for special buffers that are | 649 | * your code. (Extra memory is used for special buffers that are |
| 632 | * allocated when trace_printk() is used) | 650 | * allocated when trace_printk() is used.) |
| 633 | * | 651 | * |
| 634 | * A little optization trick is done here. If there's only one | 652 | * A little optization trick is done here. If there's only one |
| 635 | * argument, there's no need to scan the string for printf formats. | 653 | * argument, there's no need to scan the string for printf formats. |
| @@ -681,7 +699,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); | |||
| 681 | * the @ip is passed in via the trace_puts macro. | 699 | * the @ip is passed in via the trace_puts macro. |
| 682 | * | 700 | * |
| 683 | * This is similar to trace_printk() but is made for those really fast | 701 | * This is similar to trace_printk() but is made for those really fast |
| 684 | * paths that a developer wants the least amount of "Heisenbug" affects, | 702 | * paths that a developer wants the least amount of "Heisenbug" effects, |
| 685 | * where the processing of the print format is still too much. | 703 | * where the processing of the print format is still too much. |
| 686 | * | 704 | * |
| 687 | * This function allows a kernel developer to debug fast path sections | 705 | * This function allows a kernel developer to debug fast path sections |
| @@ -692,7 +710,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); | |||
| 692 | * This is intended as a debugging tool for the developer only. | 710 | * This is intended as a debugging tool for the developer only. |
| 693 | * Please refrain from leaving trace_puts scattered around in | 711 | * Please refrain from leaving trace_puts scattered around in |
| 694 | * your code. (Extra memory is used for special buffers that are | 712 | * your code. (Extra memory is used for special buffers that are |
| 695 | * allocated when trace_puts() is used) | 713 | * allocated when trace_puts() is used.) |
| 696 | * | 714 | * |
| 697 | * Returns: 0 if nothing was written, positive # if string was. | 715 | * Returns: 0 if nothing was written, positive # if string was. |
| 698 | * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) | 716 | * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) |
| @@ -771,6 +789,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 771 | t2 min2 = (y); \ | 789 | t2 min2 = (y); \ |
| 772 | (void) (&min1 == &min2); \ | 790 | (void) (&min1 == &min2); \ |
| 773 | min1 < min2 ? min1 : min2; }) | 791 | min1 < min2 ? min1 : min2; }) |
| 792 | |||
| 793 | /** | ||
| 794 | * min - return minimum of two values of the same or compatible types | ||
| 795 | * @x: first value | ||
| 796 | * @y: second value | ||
| 797 | */ | ||
| 774 | #define min(x, y) \ | 798 | #define min(x, y) \ |
| 775 | __min(typeof(x), typeof(y), \ | 799 | __min(typeof(x), typeof(y), \ |
| 776 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ | 800 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ |
| @@ -781,12 +805,31 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 781 | t2 max2 = (y); \ | 805 | t2 max2 = (y); \ |
| 782 | (void) (&max1 == &max2); \ | 806 | (void) (&max1 == &max2); \ |
| 783 | max1 > max2 ? max1 : max2; }) | 807 | max1 > max2 ? max1 : max2; }) |
| 808 | |||
| 809 | /** | ||
| 810 | * max - return maximum of two values of the same or compatible types | ||
| 811 | * @x: first value | ||
| 812 | * @y: second value | ||
| 813 | */ | ||
| 784 | #define max(x, y) \ | 814 | #define max(x, y) \ |
| 785 | __max(typeof(x), typeof(y), \ | 815 | __max(typeof(x), typeof(y), \ |
| 786 | __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \ | 816 | __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \ |
| 787 | x, y) | 817 | x, y) |
| 788 | 818 | ||
| 819 | /** | ||
| 820 | * min3 - return minimum of three values | ||
| 821 | * @x: first value | ||
| 822 | * @y: second value | ||
| 823 | * @z: third value | ||
| 824 | */ | ||
| 789 | #define min3(x, y, z) min((typeof(x))min(x, y), z) | 825 | #define min3(x, y, z) min((typeof(x))min(x, y), z) |
| 826 | |||
| 827 | /** | ||
| 828 | * max3 - return maximum of three values | ||
| 829 | * @x: first value | ||
| 830 | * @y: second value | ||
| 831 | * @z: third value | ||
| 832 | */ | ||
| 790 | #define max3(x, y, z) max((typeof(x))max(x, y), z) | 833 | #define max3(x, y, z) max((typeof(x))max(x, y), z) |
| 791 | 834 | ||
| 792 | /** | 835 | /** |
| @@ -805,8 +848,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 805 | * @lo: lowest allowable value | 848 | * @lo: lowest allowable value |
| 806 | * @hi: highest allowable value | 849 | * @hi: highest allowable value |
| 807 | * | 850 | * |
| 808 | * This macro does strict typechecking of lo/hi to make sure they are of the | 851 | * This macro does strict typechecking of @lo/@hi to make sure they are of the |
| 809 | * same type as val. See the unnecessary pointer comparisons. | 852 | * same type as @val. See the unnecessary pointer comparisons. |
| 810 | */ | 853 | */ |
| 811 | #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) | 854 | #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) |
| 812 | 855 | ||
| @@ -816,11 +859,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 816 | * | 859 | * |
| 817 | * Or not use min/max/clamp at all, of course. | 860 | * Or not use min/max/clamp at all, of course. |
| 818 | */ | 861 | */ |
| 862 | |||
| 863 | /** | ||
| 864 | * min_t - return minimum of two values, using the specified type | ||
| 865 | * @type: data type to use | ||
| 866 | * @x: first value | ||
| 867 | * @y: second value | ||
| 868 | */ | ||
| 819 | #define min_t(type, x, y) \ | 869 | #define min_t(type, x, y) \ |
| 820 | __min(type, type, \ | 870 | __min(type, type, \ |
| 821 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ | 871 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ |
| 822 | x, y) | 872 | x, y) |
| 823 | 873 | ||
| 874 | /** | ||
| 875 | * max_t - return maximum of two values, using the specified type | ||
| 876 | * @type: data type to use | ||
| 877 | * @x: first value | ||
| 878 | * @y: second value | ||
| 879 | */ | ||
| 824 | #define max_t(type, x, y) \ | 880 | #define max_t(type, x, y) \ |
| 825 | __max(type, type, \ | 881 | __max(type, type, \ |
| 826 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ | 882 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ |
| @@ -834,7 +890,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 834 | * @hi: maximum allowable value | 890 | * @hi: maximum allowable value |
| 835 | * | 891 | * |
| 836 | * This macro does no typechecking and uses temporary variables of type | 892 | * This macro does no typechecking and uses temporary variables of type |
| 837 | * 'type' to make all the comparisons. | 893 | * @type to make all the comparisons. |
| 838 | */ | 894 | */ |
| 839 | #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) | 895 | #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) |
| 840 | 896 | ||
| @@ -845,15 +901,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
| 845 | * @hi: maximum allowable value | 901 | * @hi: maximum allowable value |
| 846 | * | 902 | * |
| 847 | * This macro does no typechecking and uses temporary variables of whatever | 903 | * This macro does no typechecking and uses temporary variables of whatever |
| 848 | * type the input argument 'val' is. This is useful when val is an unsigned | 904 | * type the input argument @val is. This is useful when @val is an unsigned |
| 849 | * type and min and max are literals that will otherwise be assigned a signed | 905 | * type and @lo and @hi are literals that will otherwise be assigned a signed |
| 850 | * integer type. | 906 | * integer type. |
| 851 | */ | 907 | */ |
| 852 | #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) | 908 | #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) |
| 853 | 909 | ||
| 854 | 910 | ||
| 855 | /* | 911 | /** |
| 856 | * swap - swap value of @a and @b | 912 | * swap - swap values of @a and @b |
| 913 | * @a: first value | ||
| 914 | * @b: second value | ||
| 857 | */ | 915 | */ |
| 858 | #define swap(a, b) \ | 916 | #define swap(a, b) \ |
| 859 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) | 917 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index eaf4ad209c8f..e32dbc4934db 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -980,7 +980,6 @@ enum mlx5_cap_type { | |||
| 980 | MLX5_CAP_RESERVED, | 980 | MLX5_CAP_RESERVED, |
| 981 | MLX5_CAP_VECTOR_CALC, | 981 | MLX5_CAP_VECTOR_CALC, |
| 982 | MLX5_CAP_QOS, | 982 | MLX5_CAP_QOS, |
| 983 | MLX5_CAP_FPGA, | ||
| 984 | /* NUM OF CAP Types */ | 983 | /* NUM OF CAP Types */ |
| 985 | MLX5_CAP_NUM | 984 | MLX5_CAP_NUM |
| 986 | }; | 985 | }; |
| @@ -1110,10 +1109,10 @@ enum mlx5_mcam_feature_groups { | |||
| 1110 | MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) | 1109 | MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) |
| 1111 | 1110 | ||
| 1112 | #define MLX5_CAP_FPGA(mdev, cap) \ | 1111 | #define MLX5_CAP_FPGA(mdev, cap) \ |
| 1113 | MLX5_GET(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) | 1112 | MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap) |
| 1114 | 1113 | ||
| 1115 | #define MLX5_CAP64_FPGA(mdev, cap) \ | 1114 | #define MLX5_CAP64_FPGA(mdev, cap) \ |
| 1116 | MLX5_GET64(fpga_cap, (mdev)->caps.hca_cur[MLX5_CAP_FPGA], cap) | 1115 | MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) |
| 1117 | 1116 | ||
| 1118 | enum { | 1117 | enum { |
| 1119 | MLX5_CMD_STAT_OK = 0x0, | 1118 | MLX5_CMD_STAT_OK = 0x0, |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 02ff700e4f30..401c8972cc3a 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -774,6 +774,7 @@ struct mlx5_core_dev { | |||
| 774 | u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; | 774 | u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; |
| 775 | u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; | 775 | u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; |
| 776 | u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; | 776 | u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; |
| 777 | u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; | ||
| 777 | } caps; | 778 | } caps; |
| 778 | phys_addr_t iseg_base; | 779 | phys_addr_t iseg_base; |
| 779 | struct mlx5_init_seg __iomem *iseg; | 780 | struct mlx5_init_seg __iomem *iseg; |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index a528b35a022e..69772347f866 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
| @@ -327,7 +327,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { | |||
| 327 | u8 reserved_at_80[0x18]; | 327 | u8 reserved_at_80[0x18]; |
| 328 | u8 log_max_destination[0x8]; | 328 | u8 log_max_destination[0x8]; |
| 329 | 329 | ||
| 330 | u8 reserved_at_a0[0x18]; | 330 | u8 log_max_flow_counter[0x8]; |
| 331 | u8 reserved_at_a8[0x10]; | ||
| 331 | u8 log_max_flow[0x8]; | 332 | u8 log_max_flow[0x8]; |
| 332 | 333 | ||
| 333 | u8 reserved_at_c0[0x40]; | 334 | u8 reserved_at_c0[0x40]; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f8c10d336e42..065d99deb847 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -240,7 +240,7 @@ extern unsigned int kobjsize(const void *objp); | |||
| 240 | 240 | ||
| 241 | #if defined(CONFIG_X86_INTEL_MPX) | 241 | #if defined(CONFIG_X86_INTEL_MPX) |
| 242 | /* MPX specific bounds table or bounds directory */ | 242 | /* MPX specific bounds table or bounds directory */ |
| 243 | # define VM_MPX VM_HIGH_ARCH_BIT_4 | 243 | # define VM_MPX VM_HIGH_ARCH_4 |
| 244 | #else | 244 | #else |
| 245 | # define VM_MPX VM_NONE | 245 | # define VM_MPX VM_NONE |
| 246 | #endif | 246 | #endif |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index f3f2d07feb2a..9a43763a68ad 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
| @@ -316,7 +316,7 @@ struct mmc_host { | |||
| 316 | #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ | 316 | #define MMC_CAP_UHS_SDR50 (1 << 18) /* Host supports UHS SDR50 mode */ |
| 317 | #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ | 317 | #define MMC_CAP_UHS_SDR104 (1 << 19) /* Host supports UHS SDR104 mode */ |
| 318 | #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ | 318 | #define MMC_CAP_UHS_DDR50 (1 << 20) /* Host supports UHS DDR50 mode */ |
| 319 | #define MMC_CAP_NO_BOUNCE_BUFF (1 << 21) /* Disable bounce buffers on host */ | 319 | /* (1 << 21) is free for reuse */ |
| 320 | #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ | 320 | #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ |
| 321 | #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ | 321 | #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ |
| 322 | #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ | 322 | #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ |
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index 7b2e31b1745a..6866e8126982 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
| @@ -400,6 +400,11 @@ extern void mmu_notifier_synchronize(void); | |||
| 400 | 400 | ||
| 401 | #else /* CONFIG_MMU_NOTIFIER */ | 401 | #else /* CONFIG_MMU_NOTIFIER */ |
| 402 | 402 | ||
| 403 | static inline int mm_has_notifiers(struct mm_struct *mm) | ||
| 404 | { | ||
| 405 | return 0; | ||
| 406 | } | ||
| 407 | |||
| 403 | static inline void mmu_notifier_release(struct mm_struct *mm) | 408 | static inline void mmu_notifier_release(struct mm_struct *mm) |
| 404 | { | 409 | { |
| 405 | } | 410 | } |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 356a814e7c8e..c8f89417740b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -1094,8 +1094,14 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) | |||
| 1094 | #error Allocator MAX_ORDER exceeds SECTION_SIZE | 1094 | #error Allocator MAX_ORDER exceeds SECTION_SIZE |
| 1095 | #endif | 1095 | #endif |
| 1096 | 1096 | ||
| 1097 | #define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT) | 1097 | static inline unsigned long pfn_to_section_nr(unsigned long pfn) |
| 1098 | #define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT) | 1098 | { |
| 1099 | return pfn >> PFN_SECTION_SHIFT; | ||
| 1100 | } | ||
| 1101 | static inline unsigned long section_nr_to_pfn(unsigned long sec) | ||
| 1102 | { | ||
| 1103 | return sec << PFN_SECTION_SHIFT; | ||
| 1104 | } | ||
| 1099 | 1105 | ||
| 1100 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) | 1106 | #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) |
| 1101 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) | 1107 | #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) |
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 2c2a5514b0df..528b24c78308 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h | |||
| @@ -108,9 +108,10 @@ struct ebt_table { | |||
| 108 | 108 | ||
| 109 | #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ | 109 | #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ |
| 110 | ~(__alignof__(struct _xt_align)-1)) | 110 | ~(__alignof__(struct _xt_align)-1)) |
| 111 | extern struct ebt_table *ebt_register_table(struct net *net, | 111 | extern int ebt_register_table(struct net *net, |
| 112 | const struct ebt_table *table, | 112 | const struct ebt_table *table, |
| 113 | const struct nf_hook_ops *); | 113 | const struct nf_hook_ops *ops, |
| 114 | struct ebt_table **res); | ||
| 114 | extern void ebt_unregister_table(struct net *net, struct ebt_table *table, | 115 | extern void ebt_unregister_table(struct net *net, struct ebt_table *table, |
| 115 | const struct nf_hook_ops *); | 116 | const struct nf_hook_ops *); |
| 116 | extern unsigned int ebt_do_table(struct sk_buff *skb, | 117 | extern unsigned int ebt_do_table(struct sk_buff *skb, |
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index a36abe2da13e..27e249ed7c5c 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
| @@ -12,11 +12,31 @@ | |||
| 12 | 12 | ||
| 13 | #ifdef CONFIG_LOCKUP_DETECTOR | 13 | #ifdef CONFIG_LOCKUP_DETECTOR |
| 14 | void lockup_detector_init(void); | 14 | void lockup_detector_init(void); |
| 15 | void lockup_detector_soft_poweroff(void); | ||
| 16 | void lockup_detector_cleanup(void); | ||
| 17 | bool is_hardlockup(void); | ||
| 18 | |||
| 19 | extern int watchdog_user_enabled; | ||
| 20 | extern int nmi_watchdog_user_enabled; | ||
| 21 | extern int soft_watchdog_user_enabled; | ||
| 22 | extern int watchdog_thresh; | ||
| 23 | extern unsigned long watchdog_enabled; | ||
| 24 | |||
| 25 | extern struct cpumask watchdog_cpumask; | ||
| 26 | extern unsigned long *watchdog_cpumask_bits; | ||
| 27 | #ifdef CONFIG_SMP | ||
| 28 | extern int sysctl_softlockup_all_cpu_backtrace; | ||
| 29 | extern int sysctl_hardlockup_all_cpu_backtrace; | ||
| 15 | #else | 30 | #else |
| 16 | static inline void lockup_detector_init(void) | 31 | #define sysctl_softlockup_all_cpu_backtrace 0 |
| 17 | { | 32 | #define sysctl_hardlockup_all_cpu_backtrace 0 |
| 18 | } | 33 | #endif /* !CONFIG_SMP */ |
| 19 | #endif | 34 | |
| 35 | #else /* CONFIG_LOCKUP_DETECTOR */ | ||
| 36 | static inline void lockup_detector_init(void) { } | ||
| 37 | static inline void lockup_detector_soft_poweroff(void) { } | ||
| 38 | static inline void lockup_detector_cleanup(void) { } | ||
| 39 | #endif /* !CONFIG_LOCKUP_DETECTOR */ | ||
| 20 | 40 | ||
| 21 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | 41 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
| 22 | extern void touch_softlockup_watchdog_sched(void); | 42 | extern void touch_softlockup_watchdog_sched(void); |
| @@ -24,29 +44,17 @@ extern void touch_softlockup_watchdog(void); | |||
| 24 | extern void touch_softlockup_watchdog_sync(void); | 44 | extern void touch_softlockup_watchdog_sync(void); |
| 25 | extern void touch_all_softlockup_watchdogs(void); | 45 | extern void touch_all_softlockup_watchdogs(void); |
| 26 | extern unsigned int softlockup_panic; | 46 | extern unsigned int softlockup_panic; |
| 27 | extern int soft_watchdog_enabled; | ||
| 28 | extern atomic_t watchdog_park_in_progress; | ||
| 29 | #else | 47 | #else |
| 30 | static inline void touch_softlockup_watchdog_sched(void) | 48 | static inline void touch_softlockup_watchdog_sched(void) { } |
| 31 | { | 49 | static inline void touch_softlockup_watchdog(void) { } |
| 32 | } | 50 | static inline void touch_softlockup_watchdog_sync(void) { } |
| 33 | static inline void touch_softlockup_watchdog(void) | 51 | static inline void touch_all_softlockup_watchdogs(void) { } |
| 34 | { | ||
| 35 | } | ||
| 36 | static inline void touch_softlockup_watchdog_sync(void) | ||
| 37 | { | ||
| 38 | } | ||
| 39 | static inline void touch_all_softlockup_watchdogs(void) | ||
| 40 | { | ||
| 41 | } | ||
| 42 | #endif | 52 | #endif |
| 43 | 53 | ||
| 44 | #ifdef CONFIG_DETECT_HUNG_TASK | 54 | #ifdef CONFIG_DETECT_HUNG_TASK |
| 45 | void reset_hung_task_detector(void); | 55 | void reset_hung_task_detector(void); |
| 46 | #else | 56 | #else |
| 47 | static inline void reset_hung_task_detector(void) | 57 | static inline void reset_hung_task_detector(void) { } |
| 48 | { | ||
| 49 | } | ||
| 50 | #endif | 58 | #endif |
| 51 | 59 | ||
| 52 | /* | 60 | /* |
| @@ -54,12 +62,12 @@ static inline void reset_hung_task_detector(void) | |||
| 54 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - | 62 | * 'watchdog_enabled' variable. Each lockup detector has its dedicated bit - |
| 55 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. | 63 | * bit 0 for the hard lockup detector and bit 1 for the soft lockup detector. |
| 56 | * | 64 | * |
| 57 | * 'watchdog_user_enabled', 'nmi_watchdog_enabled' and 'soft_watchdog_enabled' | 65 | * 'watchdog_user_enabled', 'nmi_watchdog_user_enabled' and |
| 58 | * are variables that are only used as an 'interface' between the parameters | 66 | * 'soft_watchdog_user_enabled' are variables that are only used as an |
| 59 | * in /proc/sys/kernel and the internal state bits in 'watchdog_enabled'. The | 67 | * 'interface' between the parameters in /proc/sys/kernel and the internal |
| 60 | * 'watchdog_thresh' variable is handled differently because its value is not | 68 | * state bits in 'watchdog_enabled'. The 'watchdog_thresh' variable is |
| 61 | * boolean, and the lockup detectors are 'suspended' while 'watchdog_thresh' | 69 | * handled differently because its value is not boolean, and the lockup |
| 62 | * is equal zero. | 70 | * detectors are 'suspended' while 'watchdog_thresh' is equal zero. |
| 63 | */ | 71 | */ |
| 64 | #define NMI_WATCHDOG_ENABLED_BIT 0 | 72 | #define NMI_WATCHDOG_ENABLED_BIT 0 |
| 65 | #define SOFT_WATCHDOG_ENABLED_BIT 1 | 73 | #define SOFT_WATCHDOG_ENABLED_BIT 1 |
| @@ -73,17 +81,41 @@ extern unsigned int hardlockup_panic; | |||
| 73 | static inline void hardlockup_detector_disable(void) {} | 81 | static inline void hardlockup_detector_disable(void) {} |
| 74 | #endif | 82 | #endif |
| 75 | 83 | ||
| 84 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) | ||
| 85 | # define NMI_WATCHDOG_SYSCTL_PERM 0644 | ||
| 86 | #else | ||
| 87 | # define NMI_WATCHDOG_SYSCTL_PERM 0444 | ||
| 88 | #endif | ||
| 89 | |||
| 76 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) | 90 | #if defined(CONFIG_HARDLOCKUP_DETECTOR_PERF) |
| 77 | extern void arch_touch_nmi_watchdog(void); | 91 | extern void arch_touch_nmi_watchdog(void); |
| 92 | extern void hardlockup_detector_perf_stop(void); | ||
| 93 | extern void hardlockup_detector_perf_restart(void); | ||
| 94 | extern void hardlockup_detector_perf_disable(void); | ||
| 95 | extern void hardlockup_detector_perf_enable(void); | ||
| 96 | extern void hardlockup_detector_perf_cleanup(void); | ||
| 97 | extern int hardlockup_detector_perf_init(void); | ||
| 78 | #else | 98 | #else |
| 79 | #if !defined(CONFIG_HAVE_NMI_WATCHDOG) | 99 | static inline void hardlockup_detector_perf_stop(void) { } |
| 100 | static inline void hardlockup_detector_perf_restart(void) { } | ||
| 101 | static inline void hardlockup_detector_perf_disable(void) { } | ||
| 102 | static inline void hardlockup_detector_perf_enable(void) { } | ||
| 103 | static inline void hardlockup_detector_perf_cleanup(void) { } | ||
| 104 | # if !defined(CONFIG_HAVE_NMI_WATCHDOG) | ||
| 105 | static inline int hardlockup_detector_perf_init(void) { return -ENODEV; } | ||
| 80 | static inline void arch_touch_nmi_watchdog(void) {} | 106 | static inline void arch_touch_nmi_watchdog(void) {} |
| 107 | # else | ||
| 108 | static inline int hardlockup_detector_perf_init(void) { return 0; } | ||
| 109 | # endif | ||
| 81 | #endif | 110 | #endif |
| 82 | #endif | 111 | |
| 112 | void watchdog_nmi_stop(void); | ||
| 113 | void watchdog_nmi_start(void); | ||
| 114 | int watchdog_nmi_probe(void); | ||
| 83 | 115 | ||
| 84 | /** | 116 | /** |
| 85 | * touch_nmi_watchdog - restart NMI watchdog timeout. | 117 | * touch_nmi_watchdog - restart NMI watchdog timeout. |
| 86 | * | 118 | * |
| 87 | * If the architecture supports the NMI watchdog, touch_nmi_watchdog() | 119 | * If the architecture supports the NMI watchdog, touch_nmi_watchdog() |
| 88 | * may be used to reset the timeout - for code which intentionally | 120 | * may be used to reset the timeout - for code which intentionally |
| 89 | * disables interrupts for a long time. This call is stateless. | 121 | * disables interrupts for a long time. This call is stateless. |
| @@ -153,22 +185,6 @@ static inline bool trigger_single_cpu_backtrace(int cpu) | |||
| 153 | u64 hw_nmi_get_sample_period(int watchdog_thresh); | 185 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
| 154 | #endif | 186 | #endif |
| 155 | 187 | ||
| 156 | #ifdef CONFIG_LOCKUP_DETECTOR | ||
| 157 | extern int nmi_watchdog_enabled; | ||
| 158 | extern int watchdog_user_enabled; | ||
| 159 | extern int watchdog_thresh; | ||
| 160 | extern unsigned long watchdog_enabled; | ||
| 161 | extern struct cpumask watchdog_cpumask; | ||
| 162 | extern unsigned long *watchdog_cpumask_bits; | ||
| 163 | extern int __read_mostly watchdog_suspended; | ||
| 164 | #ifdef CONFIG_SMP | ||
| 165 | extern int sysctl_softlockup_all_cpu_backtrace; | ||
| 166 | extern int sysctl_hardlockup_all_cpu_backtrace; | ||
| 167 | #else | ||
| 168 | #define sysctl_softlockup_all_cpu_backtrace 0 | ||
| 169 | #define sysctl_hardlockup_all_cpu_backtrace 0 | ||
| 170 | #endif | ||
| 171 | |||
| 172 | #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ | 188 | #if defined(CONFIG_HARDLOCKUP_CHECK_TIMESTAMP) && \ |
| 173 | defined(CONFIG_HARDLOCKUP_DETECTOR) | 189 | defined(CONFIG_HARDLOCKUP_DETECTOR) |
| 174 | void watchdog_update_hrtimer_threshold(u64 period); | 190 | void watchdog_update_hrtimer_threshold(u64 period); |
| @@ -176,7 +192,6 @@ void watchdog_update_hrtimer_threshold(u64 period); | |||
| 176 | static inline void watchdog_update_hrtimer_threshold(u64 period) { } | 192 | static inline void watchdog_update_hrtimer_threshold(u64 period) { } |
| 177 | #endif | 193 | #endif |
| 178 | 194 | ||
| 179 | extern bool is_hardlockup(void); | ||
| 180 | struct ctl_table; | 195 | struct ctl_table; |
| 181 | extern int proc_watchdog(struct ctl_table *, int , | 196 | extern int proc_watchdog(struct ctl_table *, int , |
| 182 | void __user *, size_t *, loff_t *); | 197 | void __user *, size_t *, loff_t *); |
| @@ -188,18 +203,6 @@ extern int proc_watchdog_thresh(struct ctl_table *, int , | |||
| 188 | void __user *, size_t *, loff_t *); | 203 | void __user *, size_t *, loff_t *); |
| 189 | extern int proc_watchdog_cpumask(struct ctl_table *, int, | 204 | extern int proc_watchdog_cpumask(struct ctl_table *, int, |
| 190 | void __user *, size_t *, loff_t *); | 205 | void __user *, size_t *, loff_t *); |
| 191 | extern int lockup_detector_suspend(void); | ||
| 192 | extern void lockup_detector_resume(void); | ||
| 193 | #else | ||
| 194 | static inline int lockup_detector_suspend(void) | ||
| 195 | { | ||
| 196 | return 0; | ||
| 197 | } | ||
| 198 | |||
| 199 | static inline void lockup_detector_resume(void) | ||
| 200 | { | ||
| 201 | } | ||
| 202 | #endif | ||
| 203 | 206 | ||
| 204 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI | 207 | #ifdef CONFIG_HAVE_ACPI_APEI_NMI |
| 205 | #include <asm/nmi.h> | 208 | #include <asm/nmi.h> |
diff --git a/include/linux/of.h b/include/linux/of.h index cfc34117fc92..b240ed69dc96 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -734,6 +734,16 @@ static inline struct device_node *of_get_cpu_node(int cpu, | |||
| 734 | return NULL; | 734 | return NULL; |
| 735 | } | 735 | } |
| 736 | 736 | ||
| 737 | static inline int of_n_addr_cells(struct device_node *np) | ||
| 738 | { | ||
| 739 | return 0; | ||
| 740 | |||
| 741 | } | ||
| 742 | static inline int of_n_size_cells(struct device_node *np) | ||
| 743 | { | ||
| 744 | return 0; | ||
| 745 | } | ||
| 746 | |||
| 737 | static inline int of_property_read_u64(const struct device_node *np, | 747 | static inline int of_property_read_u64(const struct device_node *np, |
| 738 | const char *propname, u64 *out_value) | 748 | const char *propname, u64 *out_value) |
| 739 | { | 749 | { |
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 3a19c253bdb1..ae53e413fb13 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h | |||
| @@ -84,6 +84,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm) | |||
| 84 | 84 | ||
| 85 | /* mmput gets rid of the mappings and all user-space */ | 85 | /* mmput gets rid of the mappings and all user-space */ |
| 86 | extern void mmput(struct mm_struct *); | 86 | extern void mmput(struct mm_struct *); |
| 87 | #ifdef CONFIG_MMU | ||
| 88 | /* same as above but performs the slow path from the async context. Can | ||
| 89 | * be called from the atomic context as well | ||
| 90 | */ | ||
| 91 | void mmput_async(struct mm_struct *); | ||
| 92 | #endif | ||
| 87 | 93 | ||
| 88 | /* Grab a reference to a task's mm, if it is not already going away */ | 94 | /* Grab a reference to a task's mm, if it is not already going away */ |
| 89 | extern struct mm_struct *get_task_mm(struct task_struct *task); | 95 | extern struct mm_struct *get_task_mm(struct task_struct *task); |
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index d7b6dab956ec..7d065abc7a47 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h | |||
| @@ -71,14 +71,6 @@ struct sched_domain_shared { | |||
| 71 | atomic_t ref; | 71 | atomic_t ref; |
| 72 | atomic_t nr_busy_cpus; | 72 | atomic_t nr_busy_cpus; |
| 73 | int has_idle_cores; | 73 | int has_idle_cores; |
| 74 | |||
| 75 | /* | ||
| 76 | * Some variables from the most recent sd_lb_stats for this domain, | ||
| 77 | * used by wake_affine(). | ||
| 78 | */ | ||
| 79 | unsigned long nr_running; | ||
| 80 | unsigned long load; | ||
| 81 | unsigned long capacity; | ||
| 82 | }; | 74 | }; |
| 83 | 75 | ||
| 84 | struct sched_domain { | 76 | struct sched_domain { |
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h index 12910cf19869..c149aa7bedf3 100644 --- a/include/linux/smpboot.h +++ b/include/linux/smpboot.h | |||
| @@ -55,7 +55,7 @@ smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) | |||
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); | 57 | void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); |
| 58 | int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, | 58 | void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, |
| 59 | const struct cpumask *); | 59 | const struct cpumask *); |
| 60 | 60 | ||
| 61 | #endif | 61 | #endif |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 905d769d8ddc..5f7eeab990fe 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
| @@ -42,7 +42,7 @@ enum { | |||
| 42 | #define THREAD_ALIGN THREAD_SIZE | 42 | #define THREAD_ALIGN THREAD_SIZE |
| 43 | #endif | 43 | #endif |
| 44 | 44 | ||
| 45 | #ifdef CONFIG_DEBUG_STACK_USAGE | 45 | #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) |
| 46 | # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ | 46 | # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ |
| 47 | __GFP_ZERO) | 47 | __GFP_ZERO) |
| 48 | #else | 48 | #else |
diff --git a/include/net/netlink.h b/include/net/netlink.h index e51cf5f81597..14c289393071 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h | |||
| @@ -773,7 +773,10 @@ static inline int nla_parse_nested(struct nlattr *tb[], int maxtype, | |||
| 773 | */ | 773 | */ |
| 774 | static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) | 774 | static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) |
| 775 | { | 775 | { |
| 776 | return nla_put(skb, attrtype, sizeof(u8), &value); | 776 | /* temporary variables to work around GCC PR81715 with asan-stack=1 */ |
| 777 | u8 tmp = value; | ||
| 778 | |||
| 779 | return nla_put(skb, attrtype, sizeof(u8), &tmp); | ||
| 777 | } | 780 | } |
| 778 | 781 | ||
| 779 | /** | 782 | /** |
| @@ -784,7 +787,9 @@ static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value) | |||
| 784 | */ | 787 | */ |
| 785 | static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) | 788 | static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) |
| 786 | { | 789 | { |
| 787 | return nla_put(skb, attrtype, sizeof(u16), &value); | 790 | u16 tmp = value; |
| 791 | |||
| 792 | return nla_put(skb, attrtype, sizeof(u16), &tmp); | ||
| 788 | } | 793 | } |
| 789 | 794 | ||
| 790 | /** | 795 | /** |
| @@ -795,7 +800,9 @@ static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value) | |||
| 795 | */ | 800 | */ |
| 796 | static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) | 801 | static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) |
| 797 | { | 802 | { |
| 798 | return nla_put(skb, attrtype, sizeof(__be16), &value); | 803 | __be16 tmp = value; |
| 804 | |||
| 805 | return nla_put(skb, attrtype, sizeof(__be16), &tmp); | ||
| 799 | } | 806 | } |
| 800 | 807 | ||
| 801 | /** | 808 | /** |
| @@ -806,7 +813,9 @@ static inline int nla_put_be16(struct sk_buff *skb, int attrtype, __be16 value) | |||
| 806 | */ | 813 | */ |
| 807 | static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) | 814 | static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) |
| 808 | { | 815 | { |
| 809 | return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, value); | 816 | __be16 tmp = value; |
| 817 | |||
| 818 | return nla_put_be16(skb, attrtype | NLA_F_NET_BYTEORDER, tmp); | ||
| 810 | } | 819 | } |
| 811 | 820 | ||
| 812 | /** | 821 | /** |
| @@ -817,7 +826,9 @@ static inline int nla_put_net16(struct sk_buff *skb, int attrtype, __be16 value) | |||
| 817 | */ | 826 | */ |
| 818 | static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) | 827 | static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) |
| 819 | { | 828 | { |
| 820 | return nla_put(skb, attrtype, sizeof(__le16), &value); | 829 | __le16 tmp = value; |
| 830 | |||
| 831 | return nla_put(skb, attrtype, sizeof(__le16), &tmp); | ||
| 821 | } | 832 | } |
| 822 | 833 | ||
| 823 | /** | 834 | /** |
| @@ -828,7 +839,9 @@ static inline int nla_put_le16(struct sk_buff *skb, int attrtype, __le16 value) | |||
| 828 | */ | 839 | */ |
| 829 | static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) | 840 | static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) |
| 830 | { | 841 | { |
| 831 | return nla_put(skb, attrtype, sizeof(u32), &value); | 842 | u32 tmp = value; |
| 843 | |||
| 844 | return nla_put(skb, attrtype, sizeof(u32), &tmp); | ||
| 832 | } | 845 | } |
| 833 | 846 | ||
| 834 | /** | 847 | /** |
| @@ -839,7 +852,9 @@ static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value) | |||
| 839 | */ | 852 | */ |
| 840 | static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) | 853 | static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) |
| 841 | { | 854 | { |
| 842 | return nla_put(skb, attrtype, sizeof(__be32), &value); | 855 | __be32 tmp = value; |
| 856 | |||
| 857 | return nla_put(skb, attrtype, sizeof(__be32), &tmp); | ||
| 843 | } | 858 | } |
| 844 | 859 | ||
| 845 | /** | 860 | /** |
| @@ -850,7 +865,9 @@ static inline int nla_put_be32(struct sk_buff *skb, int attrtype, __be32 value) | |||
| 850 | */ | 865 | */ |
| 851 | static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) | 866 | static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) |
| 852 | { | 867 | { |
| 853 | return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, value); | 868 | __be32 tmp = value; |
| 869 | |||
| 870 | return nla_put_be32(skb, attrtype | NLA_F_NET_BYTEORDER, tmp); | ||
| 854 | } | 871 | } |
| 855 | 872 | ||
| 856 | /** | 873 | /** |
| @@ -861,7 +878,9 @@ static inline int nla_put_net32(struct sk_buff *skb, int attrtype, __be32 value) | |||
| 861 | */ | 878 | */ |
| 862 | static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) | 879 | static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) |
| 863 | { | 880 | { |
| 864 | return nla_put(skb, attrtype, sizeof(__le32), &value); | 881 | __le32 tmp = value; |
| 882 | |||
| 883 | return nla_put(skb, attrtype, sizeof(__le32), &tmp); | ||
| 865 | } | 884 | } |
| 866 | 885 | ||
| 867 | /** | 886 | /** |
| @@ -874,7 +893,9 @@ static inline int nla_put_le32(struct sk_buff *skb, int attrtype, __le32 value) | |||
| 874 | static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype, | 893 | static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype, |
| 875 | u64 value, int padattr) | 894 | u64 value, int padattr) |
| 876 | { | 895 | { |
| 877 | return nla_put_64bit(skb, attrtype, sizeof(u64), &value, padattr); | 896 | u64 tmp = value; |
| 897 | |||
| 898 | return nla_put_64bit(skb, attrtype, sizeof(u64), &tmp, padattr); | ||
| 878 | } | 899 | } |
| 879 | 900 | ||
| 880 | /** | 901 | /** |
| @@ -887,7 +908,9 @@ static inline int nla_put_u64_64bit(struct sk_buff *skb, int attrtype, | |||
| 887 | static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value, | 908 | static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value, |
| 888 | int padattr) | 909 | int padattr) |
| 889 | { | 910 | { |
| 890 | return nla_put_64bit(skb, attrtype, sizeof(__be64), &value, padattr); | 911 | __be64 tmp = value; |
| 912 | |||
| 913 | return nla_put_64bit(skb, attrtype, sizeof(__be64), &tmp, padattr); | ||
| 891 | } | 914 | } |
| 892 | 915 | ||
| 893 | /** | 916 | /** |
| @@ -900,7 +923,9 @@ static inline int nla_put_be64(struct sk_buff *skb, int attrtype, __be64 value, | |||
| 900 | static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value, | 923 | static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value, |
| 901 | int padattr) | 924 | int padattr) |
| 902 | { | 925 | { |
| 903 | return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, value, | 926 | __be64 tmp = value; |
| 927 | |||
| 928 | return nla_put_be64(skb, attrtype | NLA_F_NET_BYTEORDER, tmp, | ||
| 904 | padattr); | 929 | padattr); |
| 905 | } | 930 | } |
| 906 | 931 | ||
| @@ -914,7 +939,9 @@ static inline int nla_put_net64(struct sk_buff *skb, int attrtype, __be64 value, | |||
| 914 | static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value, | 939 | static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value, |
| 915 | int padattr) | 940 | int padattr) |
| 916 | { | 941 | { |
| 917 | return nla_put_64bit(skb, attrtype, sizeof(__le64), &value, padattr); | 942 | __le64 tmp = value; |
| 943 | |||
| 944 | return nla_put_64bit(skb, attrtype, sizeof(__le64), &tmp, padattr); | ||
| 918 | } | 945 | } |
| 919 | 946 | ||
| 920 | /** | 947 | /** |
| @@ -925,7 +952,9 @@ static inline int nla_put_le64(struct sk_buff *skb, int attrtype, __le64 value, | |||
| 925 | */ | 952 | */ |
| 926 | static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) | 953 | static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) |
| 927 | { | 954 | { |
| 928 | return nla_put(skb, attrtype, sizeof(s8), &value); | 955 | s8 tmp = value; |
| 956 | |||
| 957 | return nla_put(skb, attrtype, sizeof(s8), &tmp); | ||
| 929 | } | 958 | } |
| 930 | 959 | ||
| 931 | /** | 960 | /** |
| @@ -936,7 +965,9 @@ static inline int nla_put_s8(struct sk_buff *skb, int attrtype, s8 value) | |||
| 936 | */ | 965 | */ |
| 937 | static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) | 966 | static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) |
| 938 | { | 967 | { |
| 939 | return nla_put(skb, attrtype, sizeof(s16), &value); | 968 | s16 tmp = value; |
| 969 | |||
| 970 | return nla_put(skb, attrtype, sizeof(s16), &tmp); | ||
| 940 | } | 971 | } |
| 941 | 972 | ||
| 942 | /** | 973 | /** |
| @@ -947,7 +978,9 @@ static inline int nla_put_s16(struct sk_buff *skb, int attrtype, s16 value) | |||
| 947 | */ | 978 | */ |
| 948 | static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) | 979 | static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) |
| 949 | { | 980 | { |
| 950 | return nla_put(skb, attrtype, sizeof(s32), &value); | 981 | s32 tmp = value; |
| 982 | |||
| 983 | return nla_put(skb, attrtype, sizeof(s32), &tmp); | ||
| 951 | } | 984 | } |
| 952 | 985 | ||
| 953 | /** | 986 | /** |
| @@ -960,7 +993,9 @@ static inline int nla_put_s32(struct sk_buff *skb, int attrtype, s32 value) | |||
| 960 | static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, | 993 | static inline int nla_put_s64(struct sk_buff *skb, int attrtype, s64 value, |
| 961 | int padattr) | 994 | int padattr) |
| 962 | { | 995 | { |
| 963 | return nla_put_64bit(skb, attrtype, sizeof(s64), &value, padattr); | 996 | s64 tmp = value; |
| 997 | |||
| 998 | return nla_put_64bit(skb, attrtype, sizeof(s64), &tmp, padattr); | ||
| 964 | } | 999 | } |
| 965 | 1000 | ||
| 966 | /** | 1001 | /** |
| @@ -1010,7 +1045,9 @@ static inline int nla_put_msecs(struct sk_buff *skb, int attrtype, | |||
| 1010 | static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype, | 1045 | static inline int nla_put_in_addr(struct sk_buff *skb, int attrtype, |
| 1011 | __be32 addr) | 1046 | __be32 addr) |
| 1012 | { | 1047 | { |
| 1013 | return nla_put_be32(skb, attrtype, addr); | 1048 | __be32 tmp = addr; |
| 1049 | |||
| 1050 | return nla_put_be32(skb, attrtype, tmp); | ||
| 1014 | } | 1051 | } |
| 1015 | 1052 | ||
| 1016 | /** | 1053 | /** |
diff --git a/include/net/protocol.h b/include/net/protocol.h index 65ba335b0e7e..4fc75f7ae23b 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h | |||
| @@ -39,8 +39,8 @@ | |||
| 39 | 39 | ||
| 40 | /* This is used to register protocols. */ | 40 | /* This is used to register protocols. */ |
| 41 | struct net_protocol { | 41 | struct net_protocol { |
| 42 | void (*early_demux)(struct sk_buff *skb); | 42 | int (*early_demux)(struct sk_buff *skb); |
| 43 | void (*early_demux_handler)(struct sk_buff *skb); | 43 | int (*early_demux_handler)(struct sk_buff *skb); |
| 44 | int (*handler)(struct sk_buff *skb); | 44 | int (*handler)(struct sk_buff *skb); |
| 45 | void (*err_handler)(struct sk_buff *skb, u32 info); | 45 | void (*err_handler)(struct sk_buff *skb, u32 info); |
| 46 | unsigned int no_policy:1, | 46 | unsigned int no_policy:1, |
diff --git a/include/net/route.h b/include/net/route.h index 57dfc6850d37..d538e6db1afe 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
| @@ -175,7 +175,9 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 | |||
| 175 | fl4->fl4_gre_key = gre_key; | 175 | fl4->fl4_gre_key = gre_key; |
| 176 | return ip_route_output_key(net, fl4); | 176 | return ip_route_output_key(net, fl4); |
| 177 | } | 177 | } |
| 178 | 178 | int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |
| 179 | u8 tos, struct net_device *dev, | ||
| 180 | struct in_device *in_dev, u32 *itag); | ||
| 179 | int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, | 181 | int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, |
| 180 | u8 tos, struct net_device *devin); | 182 | u8 tos, struct net_device *devin); |
| 181 | int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src, | 183 | int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src, |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 3bc910a9bfc6..89974c5286d8 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
| @@ -345,7 +345,7 @@ void tcp_v4_err(struct sk_buff *skb, u32); | |||
| 345 | 345 | ||
| 346 | void tcp_shutdown(struct sock *sk, int how); | 346 | void tcp_shutdown(struct sock *sk, int how); |
| 347 | 347 | ||
| 348 | void tcp_v4_early_demux(struct sk_buff *skb); | 348 | int tcp_v4_early_demux(struct sk_buff *skb); |
| 349 | int tcp_v4_rcv(struct sk_buff *skb); | 349 | int tcp_v4_rcv(struct sk_buff *skb); |
| 350 | 350 | ||
| 351 | int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); | 351 | int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw); |
diff --git a/include/net/udp.h b/include/net/udp.h index 12dfbfe2e2d7..6c759c8594e2 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
| @@ -259,7 +259,7 @@ static inline struct sk_buff *skb_recv_udp(struct sock *sk, unsigned int flags, | |||
| 259 | return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err); | 259 | return __skb_recv_udp(sk, flags, noblock, &peeked, &off, err); |
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | void udp_v4_early_demux(struct sk_buff *skb); | 262 | int udp_v4_early_demux(struct sk_buff *skb); |
| 263 | bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); | 263 | bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst); |
| 264 | int udp_get_port(struct sock *sk, unsigned short snum, | 264 | int udp_get_port(struct sock *sk, unsigned short snum, |
| 265 | int (*saddr_cmp)(const struct sock *, | 265 | int (*saddr_cmp)(const struct sock *, |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 82e93ee94708..67c5a9f223f7 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
| @@ -192,6 +192,7 @@ struct scsi_device { | |||
| 192 | unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ | 192 | unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ |
| 193 | unsigned broken_fua:1; /* Don't set FUA bit */ | 193 | unsigned broken_fua:1; /* Don't set FUA bit */ |
| 194 | unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ | 194 | unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */ |
| 195 | unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */ | ||
| 195 | 196 | ||
| 196 | atomic_t disk_events_disable_depth; /* disable depth for disk events */ | 197 | atomic_t disk_events_disable_depth; /* disable depth for disk events */ |
| 197 | 198 | ||
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h index 9592570e092a..36b03013d629 100644 --- a/include/scsi/scsi_devinfo.h +++ b/include/scsi/scsi_devinfo.h | |||
| @@ -29,5 +29,6 @@ | |||
| 29 | #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ | 29 | #define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */ |
| 30 | #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ | 30 | #define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */ |
| 31 | #define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ | 31 | #define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */ |
| 32 | #define BLIST_UNMAP_LIMIT_WS 0x80000000 /* Use UNMAP limit for WRITE SAME */ | ||
| 32 | 33 | ||
| 33 | #endif | 34 | #endif |
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index 6183d20a01fb..b266d2a3bcb1 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h | |||
| @@ -434,7 +434,6 @@ extern struct iscsi_cls_session *iscsi_create_session(struct Scsi_Host *shost, | |||
| 434 | unsigned int target_id); | 434 | unsigned int target_id); |
| 435 | extern void iscsi_remove_session(struct iscsi_cls_session *session); | 435 | extern void iscsi_remove_session(struct iscsi_cls_session *session); |
| 436 | extern void iscsi_free_session(struct iscsi_cls_session *session); | 436 | extern void iscsi_free_session(struct iscsi_cls_session *session); |
| 437 | extern int iscsi_destroy_session(struct iscsi_cls_session *session); | ||
| 438 | extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, | 437 | extern struct iscsi_cls_conn *iscsi_create_conn(struct iscsi_cls_session *sess, |
| 439 | int dd_size, uint32_t cid); | 438 | int dd_size, uint32_t cid); |
| 440 | extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); | 439 | extern int iscsi_destroy_conn(struct iscsi_cls_conn *conn); |
diff --git a/include/sound/hda_verbs.h b/include/sound/hda_verbs.h index d0509db6d0ec..f89cd5ee1c7a 100644 --- a/include/sound/hda_verbs.h +++ b/include/sound/hda_verbs.h | |||
| @@ -95,6 +95,7 @@ enum { | |||
| 95 | #define AC_VERB_SET_EAPD_BTLENABLE 0x70c | 95 | #define AC_VERB_SET_EAPD_BTLENABLE 0x70c |
| 96 | #define AC_VERB_SET_DIGI_CONVERT_1 0x70d | 96 | #define AC_VERB_SET_DIGI_CONVERT_1 0x70d |
| 97 | #define AC_VERB_SET_DIGI_CONVERT_2 0x70e | 97 | #define AC_VERB_SET_DIGI_CONVERT_2 0x70e |
| 98 | #define AC_VERB_SET_DIGI_CONVERT_3 0x73e | ||
| 98 | #define AC_VERB_SET_VOLUME_KNOB_CONTROL 0x70f | 99 | #define AC_VERB_SET_VOLUME_KNOB_CONTROL 0x70f |
| 99 | #define AC_VERB_SET_GPIO_DATA 0x715 | 100 | #define AC_VERB_SET_GPIO_DATA 0x715 |
| 100 | #define AC_VERB_SET_GPIO_MASK 0x716 | 101 | #define AC_VERB_SET_GPIO_MASK 0x716 |
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h index a03acd0d398a..695257ae64ac 100644 --- a/include/sound/seq_virmidi.h +++ b/include/sound/seq_virmidi.h | |||
| @@ -60,6 +60,7 @@ struct snd_virmidi_dev { | |||
| 60 | int port; /* created/attached port */ | 60 | int port; /* created/attached port */ |
| 61 | unsigned int flags; /* SNDRV_VIRMIDI_* */ | 61 | unsigned int flags; /* SNDRV_VIRMIDI_* */ |
| 62 | rwlock_t filelist_lock; | 62 | rwlock_t filelist_lock; |
| 63 | struct rw_semaphore filelist_sem; | ||
| 63 | struct list_head filelist; | 64 | struct list_head filelist; |
| 64 | }; | 65 | }; |
| 65 | 66 | ||
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 43ab5c402f98..f90860d1f897 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
| @@ -312,7 +312,7 @@ union bpf_attr { | |||
| 312 | * jump into another BPF program | 312 | * jump into another BPF program |
| 313 | * @ctx: context pointer passed to next program | 313 | * @ctx: context pointer passed to next program |
| 314 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY | 314 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY |
| 315 | * @index: index inside array that selects specific program to run | 315 | * @index: 32-bit index inside array that selects specific program to run |
| 316 | * Return: 0 on success or negative error | 316 | * Return: 0 on success or negative error |
| 317 | * | 317 | * |
| 318 | * int bpf_clone_redirect(skb, ifindex, flags) | 318 | * int bpf_clone_redirect(skb, ifindex, flags) |
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h index 412c06a624c8..ccaea525340b 100644 --- a/include/uapi/linux/dm-ioctl.h +++ b/include/uapi/linux/dm-ioctl.h | |||
| @@ -269,9 +269,9 @@ enum { | |||
| 269 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) | 269 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) |
| 270 | 270 | ||
| 271 | #define DM_VERSION_MAJOR 4 | 271 | #define DM_VERSION_MAJOR 4 |
| 272 | #define DM_VERSION_MINOR 36 | 272 | #define DM_VERSION_MINOR 37 |
| 273 | #define DM_VERSION_PATCHLEVEL 0 | 273 | #define DM_VERSION_PATCHLEVEL 0 |
| 274 | #define DM_VERSION_EXTRA "-ioctl (2017-06-09)" | 274 | #define DM_VERSION_EXTRA "-ioctl (2017-09-20)" |
| 275 | 275 | ||
| 276 | /* Status bits */ | 276 | /* Status bits */ |
| 277 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ | 277 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |
diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h index b97725af2ac0..da161b56c79e 100644 --- a/include/uapi/linux/netfilter/xt_bpf.h +++ b/include/uapi/linux/netfilter/xt_bpf.h | |||
| @@ -23,6 +23,7 @@ enum xt_bpf_modes { | |||
| 23 | XT_BPF_MODE_FD_PINNED, | 23 | XT_BPF_MODE_FD_PINNED, |
| 24 | XT_BPF_MODE_FD_ELF, | 24 | XT_BPF_MODE_FD_ELF, |
| 25 | }; | 25 | }; |
| 26 | #define XT_BPF_MODE_PATH_PINNED XT_BPF_MODE_FD_PINNED | ||
| 26 | 27 | ||
| 27 | struct xt_bpf_info_v1 { | 28 | struct xt_bpf_info_v1 { |
| 28 | __u16 mode; | 29 | __u16 mode; |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 917cc04a0a94..7b62df86be1d 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
| @@ -1022,7 +1022,7 @@ select_insn: | |||
| 1022 | struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; | 1022 | struct bpf_map *map = (struct bpf_map *) (unsigned long) BPF_R2; |
| 1023 | struct bpf_array *array = container_of(map, struct bpf_array, map); | 1023 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 1024 | struct bpf_prog *prog; | 1024 | struct bpf_prog *prog; |
| 1025 | u64 index = BPF_R3; | 1025 | u32 index = BPF_R3; |
| 1026 | 1026 | ||
| 1027 | if (unlikely(index >= array->map.max_entries)) | 1027 | if (unlikely(index >= array->map.max_entries)) |
| 1028 | goto out; | 1028 | goto out; |
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index e833ed914358..be1dde967208 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c | |||
| @@ -363,6 +363,7 @@ out: | |||
| 363 | putname(pname); | 363 | putname(pname); |
| 364 | return ret; | 364 | return ret; |
| 365 | } | 365 | } |
| 366 | EXPORT_SYMBOL_GPL(bpf_obj_get_user); | ||
| 366 | 367 | ||
| 367 | static void bpf_evict_inode(struct inode *inode) | 368 | static void bpf_evict_inode(struct inode *inode) |
| 368 | { | 369 | { |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b914fbe1383e..8b8d6ba39e23 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -653,6 +653,10 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) | |||
| 653 | { | 653 | { |
| 654 | struct bpf_verifier_state *parent = state->parent; | 654 | struct bpf_verifier_state *parent = state->parent; |
| 655 | 655 | ||
| 656 | if (regno == BPF_REG_FP) | ||
| 657 | /* We don't need to worry about FP liveness because it's read-only */ | ||
| 658 | return; | ||
| 659 | |||
| 656 | while (parent) { | 660 | while (parent) { |
| 657 | /* if read wasn't screened by an earlier write ... */ | 661 | /* if read wasn't screened by an earlier write ... */ |
| 658 | if (state->regs[regno].live & REG_LIVE_WRITTEN) | 662 | if (state->regs[regno].live & REG_LIVE_WRITTEN) |
| @@ -2345,6 +2349,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
| 2345 | * copy register state to dest reg | 2349 | * copy register state to dest reg |
| 2346 | */ | 2350 | */ |
| 2347 | regs[insn->dst_reg] = regs[insn->src_reg]; | 2351 | regs[insn->dst_reg] = regs[insn->src_reg]; |
| 2352 | regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; | ||
| 2348 | } else { | 2353 | } else { |
| 2349 | /* R1 = (u32) R2 */ | 2354 | /* R1 = (u32) R2 */ |
| 2350 | if (is_pointer_value(env, insn->src_reg)) { | 2355 | if (is_pointer_value(env, insn->src_reg)) { |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 8de11a29e495..d851df22f5c5 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/lockdep.h> | 24 | #include <linux/lockdep.h> |
| 25 | #include <linux/tick.h> | 25 | #include <linux/tick.h> |
| 26 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
| 27 | #include <linux/nmi.h> | ||
| 27 | #include <linux/smpboot.h> | 28 | #include <linux/smpboot.h> |
| 28 | #include <linux/relay.h> | 29 | #include <linux/relay.h> |
| 29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
| @@ -897,6 +898,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, | |||
| 897 | 898 | ||
| 898 | out: | 899 | out: |
| 899 | cpus_write_unlock(); | 900 | cpus_write_unlock(); |
| 901 | /* | ||
| 902 | * Do post unplug cleanup. This is still protected against | ||
| 903 | * concurrent CPU hotplug via cpu_add_remove_lock. | ||
| 904 | */ | ||
| 905 | lockup_detector_cleanup(); | ||
| 900 | return ret; | 906 | return ret; |
| 901 | } | 907 | } |
| 902 | 908 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 6bc21e202ae4..9d93db81fa36 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -662,7 +662,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) | |||
| 662 | /* | 662 | /* |
| 663 | * Do not update time when cgroup is not active | 663 | * Do not update time when cgroup is not active |
| 664 | */ | 664 | */ |
| 665 | if (cgrp == event->cgrp) | 665 | if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) |
| 666 | __update_cgrp_time(event->cgrp); | 666 | __update_cgrp_time(event->cgrp); |
| 667 | } | 667 | } |
| 668 | 668 | ||
| @@ -8955,6 +8955,14 @@ static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) | |||
| 8955 | 8955 | ||
| 8956 | static void free_pmu_context(struct pmu *pmu) | 8956 | static void free_pmu_context(struct pmu *pmu) |
| 8957 | { | 8957 | { |
| 8958 | /* | ||
| 8959 | * Static contexts such as perf_sw_context have a global lifetime | ||
| 8960 | * and may be shared between different PMUs. Avoid freeing them | ||
| 8961 | * when a single PMU is going away. | ||
| 8962 | */ | ||
| 8963 | if (pmu->task_ctx_nr > perf_invalid_context) | ||
| 8964 | return; | ||
| 8965 | |||
| 8958 | mutex_lock(&pmus_lock); | 8966 | mutex_lock(&pmus_lock); |
| 8959 | free_percpu(pmu->pmu_cpu_context); | 8967 | free_percpu(pmu->pmu_cpu_context); |
| 8960 | mutex_unlock(&pmus_lock); | 8968 | mutex_unlock(&pmus_lock); |
diff --git a/kernel/exit.c b/kernel/exit.c index f2cd53e92147..cf28528842bc 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -1610,6 +1610,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, | |||
| 1610 | if (!infop) | 1610 | if (!infop) |
| 1611 | return err; | 1611 | return err; |
| 1612 | 1612 | ||
| 1613 | if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) | ||
| 1614 | goto Efault; | ||
| 1615 | |||
| 1613 | user_access_begin(); | 1616 | user_access_begin(); |
| 1614 | unsafe_put_user(signo, &infop->si_signo, Efault); | 1617 | unsafe_put_user(signo, &infop->si_signo, Efault); |
| 1615 | unsafe_put_user(0, &infop->si_errno, Efault); | 1618 | unsafe_put_user(0, &infop->si_errno, Efault); |
| @@ -1735,6 +1738,9 @@ COMPAT_SYSCALL_DEFINE5(waitid, | |||
| 1735 | if (!infop) | 1738 | if (!infop) |
| 1736 | return err; | 1739 | return err; |
| 1737 | 1740 | ||
| 1741 | if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) | ||
| 1742 | goto Efault; | ||
| 1743 | |||
| 1738 | user_access_begin(); | 1744 | user_access_begin(); |
| 1739 | unsafe_put_user(signo, &infop->si_signo, Efault); | 1745 | unsafe_put_user(signo, &infop->si_signo, Efault); |
| 1740 | unsafe_put_user(0, &infop->si_errno, Efault); | 1746 | unsafe_put_user(0, &infop->si_errno, Efault); |
diff --git a/kernel/fork.c b/kernel/fork.c index 10646182440f..07cc743698d3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -215,6 +215,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) | |||
| 215 | if (!s) | 215 | if (!s) |
| 216 | continue; | 216 | continue; |
| 217 | 217 | ||
| 218 | #ifdef CONFIG_DEBUG_KMEMLEAK | ||
| 219 | /* Clear stale pointers from reused stack. */ | ||
| 220 | memset(s->addr, 0, THREAD_SIZE); | ||
| 221 | #endif | ||
| 218 | tsk->stack_vm_area = s; | 222 | tsk->stack_vm_area = s; |
| 219 | return s->addr; | 223 | return s->addr; |
| 220 | } | 224 | } |
| @@ -946,6 +950,24 @@ void mmput(struct mm_struct *mm) | |||
| 946 | } | 950 | } |
| 947 | EXPORT_SYMBOL_GPL(mmput); | 951 | EXPORT_SYMBOL_GPL(mmput); |
| 948 | 952 | ||
| 953 | #ifdef CONFIG_MMU | ||
| 954 | static void mmput_async_fn(struct work_struct *work) | ||
| 955 | { | ||
| 956 | struct mm_struct *mm = container_of(work, struct mm_struct, | ||
| 957 | async_put_work); | ||
| 958 | |||
| 959 | __mmput(mm); | ||
| 960 | } | ||
| 961 | |||
| 962 | void mmput_async(struct mm_struct *mm) | ||
| 963 | { | ||
| 964 | if (atomic_dec_and_test(&mm->mm_users)) { | ||
| 965 | INIT_WORK(&mm->async_put_work, mmput_async_fn); | ||
| 966 | schedule_work(&mm->async_put_work); | ||
| 967 | } | ||
| 968 | } | ||
| 969 | #endif | ||
| 970 | |||
| 949 | /** | 971 | /** |
| 950 | * set_mm_exe_file - change a reference to the mm's executable file | 972 | * set_mm_exe_file - change a reference to the mm's executable file |
| 951 | * | 973 | * |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 6fc89fd93824..5a2ef92c2782 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) | |||
| 265 | irq_setup_affinity(desc); | 265 | irq_setup_affinity(desc); |
| 266 | break; | 266 | break; |
| 267 | case IRQ_STARTUP_MANAGED: | 267 | case IRQ_STARTUP_MANAGED: |
| 268 | irq_do_set_affinity(d, aff, false); | ||
| 268 | ret = __irq_startup(desc); | 269 | ret = __irq_startup(desc); |
| 269 | irq_set_affinity_locked(d, aff, false); | ||
| 270 | break; | 270 | break; |
| 271 | case IRQ_STARTUP_ABORT: | 271 | case IRQ_STARTUP_ABORT: |
| 272 | return 0; | 272 | return 0; |
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 638eb9c83d9f..9eb09aef0313 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c | |||
| @@ -18,8 +18,34 @@ | |||
| 18 | static inline bool irq_needs_fixup(struct irq_data *d) | 18 | static inline bool irq_needs_fixup(struct irq_data *d) |
| 19 | { | 19 | { |
| 20 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); | 20 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); |
| 21 | unsigned int cpu = smp_processor_id(); | ||
| 21 | 22 | ||
| 22 | return cpumask_test_cpu(smp_processor_id(), m); | 23 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
| 24 | /* | ||
| 25 | * The cpumask_empty() check is a workaround for interrupt chips, | ||
| 26 | * which do not implement effective affinity, but the architecture has | ||
| 27 | * enabled the config switch. Use the general affinity mask instead. | ||
| 28 | */ | ||
| 29 | if (cpumask_empty(m)) | ||
| 30 | m = irq_data_get_affinity_mask(d); | ||
| 31 | |||
| 32 | /* | ||
| 33 | * Sanity check. If the mask is not empty when excluding the outgoing | ||
| 34 | * CPU then it must contain at least one online CPU. The outgoing CPU | ||
| 35 | * has been removed from the online mask already. | ||
| 36 | */ | ||
| 37 | if (cpumask_any_but(m, cpu) < nr_cpu_ids && | ||
| 38 | cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) { | ||
| 39 | /* | ||
| 40 | * If this happens then there was a missed IRQ fixup at some | ||
| 41 | * point. Warn about it and enforce fixup. | ||
| 42 | */ | ||
| 43 | pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", | ||
| 44 | cpumask_pr_args(m), d->irq, cpu); | ||
| 45 | return true; | ||
| 46 | } | ||
| 47 | #endif | ||
| 48 | return cpumask_test_cpu(cpu, m); | ||
| 23 | } | 49 | } |
| 24 | 50 | ||
| 25 | static bool migrate_one_irq(struct irq_desc *desc) | 51 | static bool migrate_one_irq(struct irq_desc *desc) |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d00132b5c325..4bff6a10ae8e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -168,6 +168,19 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
| 168 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | 168 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
| 169 | } | 169 | } |
| 170 | 170 | ||
| 171 | static void irq_validate_effective_affinity(struct irq_data *data) | ||
| 172 | { | ||
| 173 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | ||
| 174 | const struct cpumask *m = irq_data_get_effective_affinity_mask(data); | ||
| 175 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
| 176 | |||
| 177 | if (!cpumask_empty(m)) | ||
| 178 | return; | ||
| 179 | pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", | ||
| 180 | chip->name, data->irq); | ||
| 181 | #endif | ||
| 182 | } | ||
| 183 | |||
| 171 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | 184 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
| 172 | bool force) | 185 | bool force) |
| 173 | { | 186 | { |
| @@ -175,12 +188,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
| 175 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 188 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
| 176 | int ret; | 189 | int ret; |
| 177 | 190 | ||
| 191 | if (!chip || !chip->irq_set_affinity) | ||
| 192 | return -EINVAL; | ||
| 193 | |||
| 178 | ret = chip->irq_set_affinity(data, mask, force); | 194 | ret = chip->irq_set_affinity(data, mask, force); |
| 179 | switch (ret) { | 195 | switch (ret) { |
| 180 | case IRQ_SET_MASK_OK: | 196 | case IRQ_SET_MASK_OK: |
| 181 | case IRQ_SET_MASK_OK_DONE: | 197 | case IRQ_SET_MASK_OK_DONE: |
| 182 | cpumask_copy(desc->irq_common_data.affinity, mask); | 198 | cpumask_copy(desc->irq_common_data.affinity, mask); |
| 183 | case IRQ_SET_MASK_OK_NOCOPY: | 199 | case IRQ_SET_MASK_OK_NOCOPY: |
| 200 | irq_validate_effective_affinity(data); | ||
| 184 | irq_set_thread_affinity(desc); | 201 | irq_set_thread_affinity(desc); |
| 185 | ret = 0; | 202 | ret = 0; |
| 186 | } | 203 | } |
diff --git a/kernel/kcmp.c b/kernel/kcmp.c index ea34ed8bb952..055bb2962a0b 100644 --- a/kernel/kcmp.c +++ b/kernel/kcmp.c | |||
| @@ -131,7 +131,7 @@ static int kcmp_epoll_target(struct task_struct *task1, | |||
| 131 | if (filp_epoll) { | 131 | if (filp_epoll) { |
| 132 | filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff); | 132 | filp_tgt = get_epoll_tfile_raw_ptr(filp_epoll, slot.tfd, slot.toff); |
| 133 | fput(filp_epoll); | 133 | fput(filp_epoll); |
| 134 | } else | 134 | } |
| 135 | 135 | ||
| 136 | if (IS_ERR(filp_tgt)) | 136 | if (IS_ERR(filp_tgt)) |
| 137 | return PTR_ERR(filp_tgt); | 137 | return PTR_ERR(filp_tgt); |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index b9628e43c78f..bf8c8fd72589 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
| @@ -830,6 +830,41 @@ int klp_register_patch(struct klp_patch *patch) | |||
| 830 | } | 830 | } |
| 831 | EXPORT_SYMBOL_GPL(klp_register_patch); | 831 | EXPORT_SYMBOL_GPL(klp_register_patch); |
| 832 | 832 | ||
| 833 | /* | ||
| 834 | * Remove parts of patches that touch a given kernel module. The list of | ||
| 835 | * patches processed might be limited. When limit is NULL, all patches | ||
| 836 | * will be handled. | ||
| 837 | */ | ||
| 838 | static void klp_cleanup_module_patches_limited(struct module *mod, | ||
| 839 | struct klp_patch *limit) | ||
| 840 | { | ||
| 841 | struct klp_patch *patch; | ||
| 842 | struct klp_object *obj; | ||
| 843 | |||
| 844 | list_for_each_entry(patch, &klp_patches, list) { | ||
| 845 | if (patch == limit) | ||
| 846 | break; | ||
| 847 | |||
| 848 | klp_for_each_object(patch, obj) { | ||
| 849 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) | ||
| 850 | continue; | ||
| 851 | |||
| 852 | /* | ||
| 853 | * Only unpatch the module if the patch is enabled or | ||
| 854 | * is in transition. | ||
| 855 | */ | ||
| 856 | if (patch->enabled || patch == klp_transition_patch) { | ||
| 857 | pr_notice("reverting patch '%s' on unloading module '%s'\n", | ||
| 858 | patch->mod->name, obj->mod->name); | ||
| 859 | klp_unpatch_object(obj); | ||
| 860 | } | ||
| 861 | |||
| 862 | klp_free_object_loaded(obj); | ||
| 863 | break; | ||
| 864 | } | ||
| 865 | } | ||
| 866 | } | ||
| 867 | |||
| 833 | int klp_module_coming(struct module *mod) | 868 | int klp_module_coming(struct module *mod) |
| 834 | { | 869 | { |
| 835 | int ret; | 870 | int ret; |
| @@ -894,7 +929,7 @@ err: | |||
| 894 | pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", | 929 | pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", |
| 895 | patch->mod->name, obj->mod->name, obj->mod->name); | 930 | patch->mod->name, obj->mod->name, obj->mod->name); |
| 896 | mod->klp_alive = false; | 931 | mod->klp_alive = false; |
| 897 | klp_free_object_loaded(obj); | 932 | klp_cleanup_module_patches_limited(mod, patch); |
| 898 | mutex_unlock(&klp_mutex); | 933 | mutex_unlock(&klp_mutex); |
| 899 | 934 | ||
| 900 | return ret; | 935 | return ret; |
| @@ -902,9 +937,6 @@ err: | |||
| 902 | 937 | ||
| 903 | void klp_module_going(struct module *mod) | 938 | void klp_module_going(struct module *mod) |
| 904 | { | 939 | { |
| 905 | struct klp_patch *patch; | ||
| 906 | struct klp_object *obj; | ||
| 907 | |||
| 908 | if (WARN_ON(mod->state != MODULE_STATE_GOING && | 940 | if (WARN_ON(mod->state != MODULE_STATE_GOING && |
| 909 | mod->state != MODULE_STATE_COMING)) | 941 | mod->state != MODULE_STATE_COMING)) |
| 910 | return; | 942 | return; |
| @@ -917,25 +949,7 @@ void klp_module_going(struct module *mod) | |||
| 917 | */ | 949 | */ |
| 918 | mod->klp_alive = false; | 950 | mod->klp_alive = false; |
| 919 | 951 | ||
| 920 | list_for_each_entry(patch, &klp_patches, list) { | 952 | klp_cleanup_module_patches_limited(mod, NULL); |
| 921 | klp_for_each_object(patch, obj) { | ||
| 922 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) | ||
| 923 | continue; | ||
| 924 | |||
| 925 | /* | ||
| 926 | * Only unpatch the module if the patch is enabled or | ||
| 927 | * is in transition. | ||
| 928 | */ | ||
| 929 | if (patch->enabled || patch == klp_transition_patch) { | ||
| 930 | pr_notice("reverting patch '%s' on unloading module '%s'\n", | ||
| 931 | patch->mod->name, obj->mod->name); | ||
| 932 | klp_unpatch_object(obj); | ||
| 933 | } | ||
| 934 | |||
| 935 | klp_free_object_loaded(obj); | ||
| 936 | break; | ||
| 937 | } | ||
| 938 | } | ||
| 939 | 953 | ||
| 940 | mutex_unlock(&klp_mutex); | 954 | mutex_unlock(&klp_mutex); |
| 941 | } | 955 | } |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 44c8d0d17170..e36e652d996f 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
| @@ -1873,10 +1873,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
| 1873 | struct held_lock *next, int distance, struct stack_trace *trace, | 1873 | struct held_lock *next, int distance, struct stack_trace *trace, |
| 1874 | int (*save)(struct stack_trace *trace)) | 1874 | int (*save)(struct stack_trace *trace)) |
| 1875 | { | 1875 | { |
| 1876 | struct lock_list *uninitialized_var(target_entry); | ||
| 1876 | struct lock_list *entry; | 1877 | struct lock_list *entry; |
| 1877 | int ret; | ||
| 1878 | struct lock_list this; | 1878 | struct lock_list this; |
| 1879 | struct lock_list *uninitialized_var(target_entry); | 1879 | int ret; |
| 1880 | 1880 | ||
| 1881 | /* | 1881 | /* |
| 1882 | * Prove that the new <prev> -> <next> dependency would not | 1882 | * Prove that the new <prev> -> <next> dependency would not |
| @@ -1890,8 +1890,17 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
| 1890 | this.class = hlock_class(next); | 1890 | this.class = hlock_class(next); |
| 1891 | this.parent = NULL; | 1891 | this.parent = NULL; |
| 1892 | ret = check_noncircular(&this, hlock_class(prev), &target_entry); | 1892 | ret = check_noncircular(&this, hlock_class(prev), &target_entry); |
| 1893 | if (unlikely(!ret)) | 1893 | if (unlikely(!ret)) { |
| 1894 | if (!trace->entries) { | ||
| 1895 | /* | ||
| 1896 | * If @save fails here, the printing might trigger | ||
| 1897 | * a WARN but because of the !nr_entries it should | ||
| 1898 | * not do bad things. | ||
| 1899 | */ | ||
| 1900 | save(trace); | ||
| 1901 | } | ||
| 1894 | return print_circular_bug(&this, target_entry, next, prev, trace); | 1902 | return print_circular_bug(&this, target_entry, next, prev, trace); |
| 1903 | } | ||
| 1895 | else if (unlikely(ret < 0)) | 1904 | else if (unlikely(ret < 0)) |
| 1896 | return print_bfs_bug(ret); | 1905 | return print_bfs_bug(ret); |
| 1897 | 1906 | ||
| @@ -1938,7 +1947,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
| 1938 | return print_bfs_bug(ret); | 1947 | return print_bfs_bug(ret); |
| 1939 | 1948 | ||
| 1940 | 1949 | ||
| 1941 | if (save && !save(trace)) | 1950 | if (!trace->entries && !save(trace)) |
| 1942 | return 0; | 1951 | return 0; |
| 1943 | 1952 | ||
| 1944 | /* | 1953 | /* |
| @@ -1958,20 +1967,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
| 1958 | if (!ret) | 1967 | if (!ret) |
| 1959 | return 0; | 1968 | return 0; |
| 1960 | 1969 | ||
| 1961 | /* | ||
| 1962 | * Debugging printouts: | ||
| 1963 | */ | ||
| 1964 | if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { | ||
| 1965 | graph_unlock(); | ||
| 1966 | printk("\n new dependency: "); | ||
| 1967 | print_lock_name(hlock_class(prev)); | ||
| 1968 | printk(KERN_CONT " => "); | ||
| 1969 | print_lock_name(hlock_class(next)); | ||
| 1970 | printk(KERN_CONT "\n"); | ||
| 1971 | dump_stack(); | ||
| 1972 | if (!graph_lock()) | ||
| 1973 | return 0; | ||
| 1974 | } | ||
| 1975 | return 2; | 1970 | return 2; |
| 1976 | } | 1971 | } |
| 1977 | 1972 | ||
| @@ -1986,8 +1981,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
| 1986 | { | 1981 | { |
| 1987 | int depth = curr->lockdep_depth; | 1982 | int depth = curr->lockdep_depth; |
| 1988 | struct held_lock *hlock; | 1983 | struct held_lock *hlock; |
| 1989 | struct stack_trace trace; | 1984 | struct stack_trace trace = { |
| 1990 | int (*save)(struct stack_trace *trace) = save_trace; | 1985 | .nr_entries = 0, |
| 1986 | .max_entries = 0, | ||
| 1987 | .entries = NULL, | ||
| 1988 | .skip = 0, | ||
| 1989 | }; | ||
| 1991 | 1990 | ||
| 1992 | /* | 1991 | /* |
| 1993 | * Debugging checks. | 1992 | * Debugging checks. |
| @@ -2018,18 +2017,11 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
| 2018 | */ | 2017 | */ |
| 2019 | if (hlock->read != 2 && hlock->check) { | 2018 | if (hlock->read != 2 && hlock->check) { |
| 2020 | int ret = check_prev_add(curr, hlock, next, | 2019 | int ret = check_prev_add(curr, hlock, next, |
| 2021 | distance, &trace, save); | 2020 | distance, &trace, save_trace); |
| 2022 | if (!ret) | 2021 | if (!ret) |
| 2023 | return 0; | 2022 | return 0; |
| 2024 | 2023 | ||
| 2025 | /* | 2024 | /* |
| 2026 | * Stop saving stack_trace if save_trace() was | ||
| 2027 | * called at least once: | ||
| 2028 | */ | ||
| 2029 | if (save && ret == 2) | ||
| 2030 | save = NULL; | ||
| 2031 | |||
| 2032 | /* | ||
| 2033 | * Stop after the first non-trylock entry, | 2025 | * Stop after the first non-trylock entry, |
| 2034 | * as non-trylock entries have added their | 2026 | * as non-trylock entries have added their |
| 2035 | * own direct dependencies already, so this | 2027 | * own direct dependencies already, so this |
diff --git a/kernel/memremap.c b/kernel/memremap.c index 6bcbfbf1a8fd..403ab9cdb949 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c | |||
| @@ -350,7 +350,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, | |||
| 350 | pgprot_t pgprot = PAGE_KERNEL; | 350 | pgprot_t pgprot = PAGE_KERNEL; |
| 351 | struct dev_pagemap *pgmap; | 351 | struct dev_pagemap *pgmap; |
| 352 | struct page_map *page_map; | 352 | struct page_map *page_map; |
| 353 | int error, nid, is_ram; | 353 | int error, nid, is_ram, i = 0; |
| 354 | 354 | ||
| 355 | align_start = res->start & ~(SECTION_SIZE - 1); | 355 | align_start = res->start & ~(SECTION_SIZE - 1); |
| 356 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) | 356 | align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) |
| @@ -448,6 +448,8 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, | |||
| 448 | list_del(&page->lru); | 448 | list_del(&page->lru); |
| 449 | page->pgmap = pgmap; | 449 | page->pgmap = pgmap; |
| 450 | percpu_ref_get(ref); | 450 | percpu_ref_get(ref); |
| 451 | if (!(++i % 1024)) | ||
| 452 | cond_resched(); | ||
| 451 | } | 453 | } |
| 452 | devres_add(dev, page_map); | 454 | devres_add(dev, page_map); |
| 453 | return __va(res->start); | 455 | return __va(res->start); |
diff --git a/kernel/params.c b/kernel/params.c index 60b2d8101355..cc9108c2a1fd 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
| @@ -224,7 +224,7 @@ char *parse_args(const char *doing, | |||
| 224 | } \ | 224 | } \ |
| 225 | int param_get_##name(char *buffer, const struct kernel_param *kp) \ | 225 | int param_get_##name(char *buffer, const struct kernel_param *kp) \ |
| 226 | { \ | 226 | { \ |
| 227 | return scnprintf(buffer, PAGE_SIZE, format, \ | 227 | return scnprintf(buffer, PAGE_SIZE, format "\n", \ |
| 228 | *((type *)kp->arg)); \ | 228 | *((type *)kp->arg)); \ |
| 229 | } \ | 229 | } \ |
| 230 | const struct kernel_param_ops param_ops_##name = { \ | 230 | const struct kernel_param_ops param_ops_##name = { \ |
| @@ -236,14 +236,14 @@ char *parse_args(const char *doing, | |||
| 236 | EXPORT_SYMBOL(param_ops_##name) | 236 | EXPORT_SYMBOL(param_ops_##name) |
| 237 | 237 | ||
| 238 | 238 | ||
| 239 | STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8); | 239 | STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", kstrtou8); |
| 240 | STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16); | 240 | STANDARD_PARAM_DEF(short, short, "%hi", kstrtos16); |
| 241 | STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16); | 241 | STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", kstrtou16); |
| 242 | STANDARD_PARAM_DEF(int, int, "%i", kstrtoint); | 242 | STANDARD_PARAM_DEF(int, int, "%i", kstrtoint); |
| 243 | STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint); | 243 | STANDARD_PARAM_DEF(uint, unsigned int, "%u", kstrtouint); |
| 244 | STANDARD_PARAM_DEF(long, long, "%li", kstrtol); | 244 | STANDARD_PARAM_DEF(long, long, "%li", kstrtol); |
| 245 | STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul); | 245 | STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", kstrtoul); |
| 246 | STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull); | 246 | STANDARD_PARAM_DEF(ullong, unsigned long long, "%llu", kstrtoull); |
| 247 | 247 | ||
| 248 | int param_set_charp(const char *val, const struct kernel_param *kp) | 248 | int param_set_charp(const char *val, const struct kernel_param *kp) |
| 249 | { | 249 | { |
| @@ -270,7 +270,7 @@ EXPORT_SYMBOL(param_set_charp); | |||
| 270 | 270 | ||
| 271 | int param_get_charp(char *buffer, const struct kernel_param *kp) | 271 | int param_get_charp(char *buffer, const struct kernel_param *kp) |
| 272 | { | 272 | { |
| 273 | return scnprintf(buffer, PAGE_SIZE, "%s", *((char **)kp->arg)); | 273 | return scnprintf(buffer, PAGE_SIZE, "%s\n", *((char **)kp->arg)); |
| 274 | } | 274 | } |
| 275 | EXPORT_SYMBOL(param_get_charp); | 275 | EXPORT_SYMBOL(param_get_charp); |
| 276 | 276 | ||
| @@ -301,7 +301,7 @@ EXPORT_SYMBOL(param_set_bool); | |||
| 301 | int param_get_bool(char *buffer, const struct kernel_param *kp) | 301 | int param_get_bool(char *buffer, const struct kernel_param *kp) |
| 302 | { | 302 | { |
| 303 | /* Y and N chosen as being relatively non-coder friendly */ | 303 | /* Y and N chosen as being relatively non-coder friendly */ |
| 304 | return sprintf(buffer, "%c", *(bool *)kp->arg ? 'Y' : 'N'); | 304 | return sprintf(buffer, "%c\n", *(bool *)kp->arg ? 'Y' : 'N'); |
| 305 | } | 305 | } |
| 306 | EXPORT_SYMBOL(param_get_bool); | 306 | EXPORT_SYMBOL(param_get_bool); |
| 307 | 307 | ||
| @@ -360,7 +360,7 @@ EXPORT_SYMBOL(param_set_invbool); | |||
| 360 | 360 | ||
| 361 | int param_get_invbool(char *buffer, const struct kernel_param *kp) | 361 | int param_get_invbool(char *buffer, const struct kernel_param *kp) |
| 362 | { | 362 | { |
| 363 | return sprintf(buffer, "%c", (*(bool *)kp->arg) ? 'N' : 'Y'); | 363 | return sprintf(buffer, "%c\n", (*(bool *)kp->arg) ? 'N' : 'Y'); |
| 364 | } | 364 | } |
| 365 | EXPORT_SYMBOL(param_get_invbool); | 365 | EXPORT_SYMBOL(param_get_invbool); |
| 366 | 366 | ||
| @@ -460,8 +460,9 @@ static int param_array_get(char *buffer, const struct kernel_param *kp) | |||
| 460 | struct kernel_param p = *kp; | 460 | struct kernel_param p = *kp; |
| 461 | 461 | ||
| 462 | for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { | 462 | for (i = off = 0; i < (arr->num ? *arr->num : arr->max); i++) { |
| 463 | /* Replace \n with comma */ | ||
| 463 | if (i) | 464 | if (i) |
| 464 | buffer[off++] = ','; | 465 | buffer[off - 1] = ','; |
| 465 | p.arg = arr->elem + arr->elemsize * i; | 466 | p.arg = arr->elem + arr->elemsize * i; |
| 466 | check_kparam_locked(p.mod); | 467 | check_kparam_locked(p.mod); |
| 467 | ret = arr->ops->get(buffer + off, &p); | 468 | ret = arr->ops->get(buffer + off, &p); |
| @@ -507,7 +508,7 @@ EXPORT_SYMBOL(param_set_copystring); | |||
| 507 | int param_get_string(char *buffer, const struct kernel_param *kp) | 508 | int param_get_string(char *buffer, const struct kernel_param *kp) |
| 508 | { | 509 | { |
| 509 | const struct kparam_string *kps = kp->str; | 510 | const struct kparam_string *kps = kp->str; |
| 510 | return strlcpy(buffer, kps->string, kps->maxlen); | 511 | return scnprintf(buffer, PAGE_SIZE, "%s\n", kps->string); |
| 511 | } | 512 | } |
| 512 | EXPORT_SYMBOL(param_get_string); | 513 | EXPORT_SYMBOL(param_get_string); |
| 513 | 514 | ||
| @@ -549,10 +550,6 @@ static ssize_t param_attr_show(struct module_attribute *mattr, | |||
| 549 | kernel_param_lock(mk->mod); | 550 | kernel_param_lock(mk->mod); |
| 550 | count = attribute->param->ops->get(buf, attribute->param); | 551 | count = attribute->param->ops->get(buf, attribute->param); |
| 551 | kernel_param_unlock(mk->mod); | 552 | kernel_param_unlock(mk->mod); |
| 552 | if (count > 0) { | ||
| 553 | strcat(buf, "\n"); | ||
| 554 | ++count; | ||
| 555 | } | ||
| 556 | return count; | 553 | return count; |
| 557 | } | 554 | } |
| 558 | 555 | ||
| @@ -600,7 +597,7 @@ EXPORT_SYMBOL(kernel_param_unlock); | |||
| 600 | /* | 597 | /* |
| 601 | * add_sysfs_param - add a parameter to sysfs | 598 | * add_sysfs_param - add a parameter to sysfs |
| 602 | * @mk: struct module_kobject | 599 | * @mk: struct module_kobject |
| 603 | * @kparam: the actual parameter definition to add to sysfs | 600 | * @kp: the actual parameter definition to add to sysfs |
| 604 | * @name: name of parameter | 601 | * @name: name of parameter |
| 605 | * | 602 | * |
| 606 | * Create a kobject if for a (per-module) parameter if mp NULL, and | 603 | * Create a kobject if for a (per-module) parameter if mp NULL, and |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 3e2b4f519009..ccd2d20e6b06 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
| @@ -120,22 +120,26 @@ static void s2idle_loop(void) | |||
| 120 | * frozen processes + suspended devices + idle processors. | 120 | * frozen processes + suspended devices + idle processors. |
| 121 | * Thus s2idle_enter() should be called right after | 121 | * Thus s2idle_enter() should be called right after |
| 122 | * all devices have been suspended. | 122 | * all devices have been suspended. |
| 123 | * | ||
| 124 | * Wakeups during the noirq suspend of devices may be spurious, | ||
| 125 | * so prevent them from terminating the loop right away. | ||
| 123 | */ | 126 | */ |
| 124 | error = dpm_noirq_suspend_devices(PMSG_SUSPEND); | 127 | error = dpm_noirq_suspend_devices(PMSG_SUSPEND); |
| 125 | if (!error) | 128 | if (!error) |
| 126 | s2idle_enter(); | 129 | s2idle_enter(); |
| 130 | else if (error == -EBUSY && pm_wakeup_pending()) | ||
| 131 | error = 0; | ||
| 127 | 132 | ||
| 128 | dpm_noirq_resume_devices(PMSG_RESUME); | 133 | if (!error && s2idle_ops && s2idle_ops->wake) |
| 129 | if (error && (error != -EBUSY || !pm_wakeup_pending())) { | ||
| 130 | dpm_noirq_end(); | ||
| 131 | break; | ||
| 132 | } | ||
| 133 | |||
| 134 | if (s2idle_ops && s2idle_ops->wake) | ||
| 135 | s2idle_ops->wake(); | 134 | s2idle_ops->wake(); |
| 136 | 135 | ||
| 136 | dpm_noirq_resume_devices(PMSG_RESUME); | ||
| 137 | |||
| 137 | dpm_noirq_end(); | 138 | dpm_noirq_end(); |
| 138 | 139 | ||
| 140 | if (error) | ||
| 141 | break; | ||
| 142 | |||
| 139 | if (s2idle_ops && s2idle_ops->sync) | 143 | if (s2idle_ops && s2idle_ops->sync) |
| 140 | s2idle_ops->sync(); | 144 | s2idle_ops->sync(); |
| 141 | 145 | ||
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 0c44c7b42e6d..b0ad62b0e7b8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
| @@ -884,7 +884,7 @@ void rcu_irq_exit(void) | |||
| 884 | rdtp = this_cpu_ptr(&rcu_dynticks); | 884 | rdtp = this_cpu_ptr(&rcu_dynticks); |
| 885 | 885 | ||
| 886 | /* Page faults can happen in NMI handlers, so check... */ | 886 | /* Page faults can happen in NMI handlers, so check... */ |
| 887 | if (READ_ONCE(rdtp->dynticks_nmi_nesting)) | 887 | if (rdtp->dynticks_nmi_nesting) |
| 888 | return; | 888 | return; |
| 889 | 889 | ||
| 890 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && | 890 | WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && |
| @@ -1022,7 +1022,7 @@ void rcu_irq_enter(void) | |||
| 1022 | rdtp = this_cpu_ptr(&rcu_dynticks); | 1022 | rdtp = this_cpu_ptr(&rcu_dynticks); |
| 1023 | 1023 | ||
| 1024 | /* Page faults can happen in NMI handlers, so check... */ | 1024 | /* Page faults can happen in NMI handlers, so check... */ |
| 1025 | if (READ_ONCE(rdtp->dynticks_nmi_nesting)) | 1025 | if (rdtp->dynticks_nmi_nesting) |
| 1026 | return; | 1026 | return; |
| 1027 | 1027 | ||
| 1028 | oldval = rdtp->dynticks_nesting; | 1028 | oldval = rdtp->dynticks_nesting; |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 70ba32e08a23..d3f3094856fe 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
| @@ -5356,91 +5356,62 @@ static int wake_wide(struct task_struct *p) | |||
| 5356 | return 1; | 5356 | return 1; |
| 5357 | } | 5357 | } |
| 5358 | 5358 | ||
| 5359 | struct llc_stats { | 5359 | /* |
| 5360 | unsigned long nr_running; | 5360 | * The purpose of wake_affine() is to quickly determine on which CPU we can run |
| 5361 | unsigned long load; | 5361 | * soonest. For the purpose of speed we only consider the waking and previous |
| 5362 | unsigned long capacity; | 5362 | * CPU. |
| 5363 | int has_capacity; | 5363 | * |
| 5364 | }; | 5364 | * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or |
| 5365 | * will be) idle. | ||
| 5366 | * | ||
| 5367 | * wake_affine_weight() - considers the weight to reflect the average | ||
| 5368 | * scheduling latency of the CPUs. This seems to work | ||
| 5369 | * for the overloaded case. | ||
| 5370 | */ | ||
| 5365 | 5371 | ||
| 5366 | static bool get_llc_stats(struct llc_stats *stats, int cpu) | 5372 | static bool |
| 5373 | wake_affine_idle(struct sched_domain *sd, struct task_struct *p, | ||
| 5374 | int this_cpu, int prev_cpu, int sync) | ||
| 5367 | { | 5375 | { |
| 5368 | struct sched_domain_shared *sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); | 5376 | if (idle_cpu(this_cpu)) |
| 5369 | 5377 | return true; | |
| 5370 | if (!sds) | ||
| 5371 | return false; | ||
| 5372 | 5378 | ||
| 5373 | stats->nr_running = READ_ONCE(sds->nr_running); | 5379 | if (sync && cpu_rq(this_cpu)->nr_running == 1) |
| 5374 | stats->load = READ_ONCE(sds->load); | 5380 | return true; |
| 5375 | stats->capacity = READ_ONCE(sds->capacity); | ||
| 5376 | stats->has_capacity = stats->nr_running < per_cpu(sd_llc_size, cpu); | ||
| 5377 | 5381 | ||
| 5378 | return true; | 5382 | return false; |
| 5379 | } | 5383 | } |
| 5380 | 5384 | ||
| 5381 | /* | ||
| 5382 | * Can a task be moved from prev_cpu to this_cpu without causing a load | ||
| 5383 | * imbalance that would trigger the load balancer? | ||
| 5384 | * | ||
| 5385 | * Since we're running on 'stale' values, we might in fact create an imbalance | ||
| 5386 | * but recomputing these values is expensive, as that'd mean iteration 2 cache | ||
| 5387 | * domains worth of CPUs. | ||
| 5388 | */ | ||
| 5389 | static bool | 5385 | static bool |
| 5390 | wake_affine_llc(struct sched_domain *sd, struct task_struct *p, | 5386 | wake_affine_weight(struct sched_domain *sd, struct task_struct *p, |
| 5391 | int this_cpu, int prev_cpu, int sync) | 5387 | int this_cpu, int prev_cpu, int sync) |
| 5392 | { | 5388 | { |
| 5393 | struct llc_stats prev_stats, this_stats; | ||
| 5394 | s64 this_eff_load, prev_eff_load; | 5389 | s64 this_eff_load, prev_eff_load; |
| 5395 | unsigned long task_load; | 5390 | unsigned long task_load; |
| 5396 | 5391 | ||
| 5397 | if (!get_llc_stats(&prev_stats, prev_cpu) || | 5392 | this_eff_load = target_load(this_cpu, sd->wake_idx); |
| 5398 | !get_llc_stats(&this_stats, this_cpu)) | 5393 | prev_eff_load = source_load(prev_cpu, sd->wake_idx); |
| 5399 | return false; | ||
| 5400 | 5394 | ||
| 5401 | /* | ||
| 5402 | * If sync wakeup then subtract the (maximum possible) | ||
| 5403 | * effect of the currently running task from the load | ||
| 5404 | * of the current LLC. | ||
| 5405 | */ | ||
| 5406 | if (sync) { | 5395 | if (sync) { |
| 5407 | unsigned long current_load = task_h_load(current); | 5396 | unsigned long current_load = task_h_load(current); |
| 5408 | 5397 | ||
| 5409 | /* in this case load hits 0 and this LLC is considered 'idle' */ | 5398 | if (current_load > this_eff_load) |
| 5410 | if (current_load > this_stats.load) | ||
| 5411 | return true; | 5399 | return true; |
| 5412 | 5400 | ||
| 5413 | this_stats.load -= current_load; | 5401 | this_eff_load -= current_load; |
| 5414 | } | 5402 | } |
| 5415 | 5403 | ||
| 5416 | /* | ||
| 5417 | * The has_capacity stuff is not SMT aware, but by trying to balance | ||
| 5418 | * the nr_running on both ends we try and fill the domain at equal | ||
| 5419 | * rates, thereby first consuming cores before siblings. | ||
| 5420 | */ | ||
| 5421 | |||
| 5422 | /* if the old cache has capacity, stay there */ | ||
| 5423 | if (prev_stats.has_capacity && prev_stats.nr_running < this_stats.nr_running+1) | ||
| 5424 | return false; | ||
| 5425 | |||
| 5426 | /* if this cache has capacity, come here */ | ||
| 5427 | if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running) | ||
| 5428 | return true; | ||
| 5429 | |||
| 5430 | /* | ||
| 5431 | * Check to see if we can move the load without causing too much | ||
| 5432 | * imbalance. | ||
| 5433 | */ | ||
| 5434 | task_load = task_h_load(p); | 5404 | task_load = task_h_load(p); |
| 5435 | 5405 | ||
| 5436 | this_eff_load = 100; | 5406 | this_eff_load += task_load; |
| 5437 | this_eff_load *= prev_stats.capacity; | 5407 | if (sched_feat(WA_BIAS)) |
| 5438 | 5408 | this_eff_load *= 100; | |
| 5439 | prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; | 5409 | this_eff_load *= capacity_of(prev_cpu); |
| 5440 | prev_eff_load *= this_stats.capacity; | ||
| 5441 | 5410 | ||
| 5442 | this_eff_load *= this_stats.load + task_load; | 5411 | prev_eff_load -= task_load; |
| 5443 | prev_eff_load *= prev_stats.load - task_load; | 5412 | if (sched_feat(WA_BIAS)) |
| 5413 | prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; | ||
| 5414 | prev_eff_load *= capacity_of(this_cpu); | ||
| 5444 | 5415 | ||
| 5445 | return this_eff_load <= prev_eff_load; | 5416 | return this_eff_load <= prev_eff_load; |
| 5446 | } | 5417 | } |
| @@ -5449,22 +5420,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, | |||
| 5449 | int prev_cpu, int sync) | 5420 | int prev_cpu, int sync) |
| 5450 | { | 5421 | { |
| 5451 | int this_cpu = smp_processor_id(); | 5422 | int this_cpu = smp_processor_id(); |
| 5452 | bool affine; | 5423 | bool affine = false; |
| 5453 | 5424 | ||
| 5454 | /* | 5425 | if (sched_feat(WA_IDLE) && !affine) |
| 5455 | * Default to no affine wakeups; wake_affine() should not effect a task | 5426 | affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync); |
| 5456 | * placement the load-balancer feels inclined to undo. The conservative | ||
| 5457 | * option is therefore to not move tasks when they wake up. | ||
| 5458 | */ | ||
| 5459 | affine = false; | ||
| 5460 | 5427 | ||
| 5461 | /* | 5428 | if (sched_feat(WA_WEIGHT) && !affine) |
| 5462 | * If the wakeup is across cache domains, try to evaluate if movement | 5429 | affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); |
| 5463 | * makes sense, otherwise rely on select_idle_siblings() to do | ||
| 5464 | * placement inside the cache domain. | ||
| 5465 | */ | ||
| 5466 | if (!cpus_share_cache(prev_cpu, this_cpu)) | ||
| 5467 | affine = wake_affine_llc(sd, p, this_cpu, prev_cpu, sync); | ||
| 5468 | 5430 | ||
| 5469 | schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); | 5431 | schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); |
| 5470 | if (affine) { | 5432 | if (affine) { |
| @@ -7600,7 +7562,6 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq) | |||
| 7600 | */ | 7562 | */ |
| 7601 | static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) | 7563 | static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) |
| 7602 | { | 7564 | { |
| 7603 | struct sched_domain_shared *shared = env->sd->shared; | ||
| 7604 | struct sched_domain *child = env->sd->child; | 7565 | struct sched_domain *child = env->sd->child; |
| 7605 | struct sched_group *sg = env->sd->groups; | 7566 | struct sched_group *sg = env->sd->groups; |
| 7606 | struct sg_lb_stats *local = &sds->local_stat; | 7567 | struct sg_lb_stats *local = &sds->local_stat; |
| @@ -7672,22 +7633,6 @@ next_group: | |||
| 7672 | if (env->dst_rq->rd->overload != overload) | 7633 | if (env->dst_rq->rd->overload != overload) |
| 7673 | env->dst_rq->rd->overload = overload; | 7634 | env->dst_rq->rd->overload = overload; |
| 7674 | } | 7635 | } |
| 7675 | |||
| 7676 | if (!shared) | ||
| 7677 | return; | ||
| 7678 | |||
| 7679 | /* | ||
| 7680 | * Since these are sums over groups they can contain some CPUs | ||
| 7681 | * multiple times for the NUMA domains. | ||
| 7682 | * | ||
| 7683 | * Currently only wake_affine_llc() and find_busiest_group() | ||
| 7684 | * uses these numbers, only the last is affected by this problem. | ||
| 7685 | * | ||
| 7686 | * XXX fix that. | ||
| 7687 | */ | ||
| 7688 | WRITE_ONCE(shared->nr_running, sds->total_running); | ||
| 7689 | WRITE_ONCE(shared->load, sds->total_load); | ||
| 7690 | WRITE_ONCE(shared->capacity, sds->total_capacity); | ||
| 7691 | } | 7636 | } |
| 7692 | 7637 | ||
| 7693 | /** | 7638 | /** |
| @@ -8098,6 +8043,13 @@ static int should_we_balance(struct lb_env *env) | |||
| 8098 | int cpu, balance_cpu = -1; | 8043 | int cpu, balance_cpu = -1; |
| 8099 | 8044 | ||
| 8100 | /* | 8045 | /* |
| 8046 | * Ensure the balancing environment is consistent; can happen | ||
| 8047 | * when the softirq triggers 'during' hotplug. | ||
| 8048 | */ | ||
| 8049 | if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) | ||
| 8050 | return 0; | ||
| 8051 | |||
| 8052 | /* | ||
| 8101 | * In the newly idle case, we will allow all the cpu's | 8053 | * In the newly idle case, we will allow all the cpu's |
| 8102 | * to do the newly idle load balance. | 8054 | * to do the newly idle load balance. |
| 8103 | */ | 8055 | */ |
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index d3fb15555291..319ed0e8a347 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h | |||
| @@ -81,3 +81,6 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true) | |||
| 81 | SCHED_FEAT(LB_MIN, false) | 81 | SCHED_FEAT(LB_MIN, false) |
| 82 | SCHED_FEAT(ATTACH_AGE_LOAD, true) | 82 | SCHED_FEAT(ATTACH_AGE_LOAD, true) |
| 83 | 83 | ||
| 84 | SCHED_FEAT(WA_IDLE, true) | ||
| 85 | SCHED_FEAT(WA_WEIGHT, true) | ||
| 86 | SCHED_FEAT(WA_BIAS, true) | ||
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index bb3a38005b9c..0ae832e13b97 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
| @@ -473,7 +473,7 @@ static long seccomp_attach_filter(unsigned int flags, | |||
| 473 | return 0; | 473 | return 0; |
| 474 | } | 474 | } |
| 475 | 475 | ||
| 476 | void __get_seccomp_filter(struct seccomp_filter *filter) | 476 | static void __get_seccomp_filter(struct seccomp_filter *filter) |
| 477 | { | 477 | { |
| 478 | /* Reference count is bounded by the number of total processes. */ | 478 | /* Reference count is bounded by the number of total processes. */ |
| 479 | refcount_inc(&filter->usage); | 479 | refcount_inc(&filter->usage); |
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 1d71c051a951..5043e7433f4b 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
| @@ -344,39 +344,30 @@ EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread); | |||
| 344 | * by the client, but only by calling this function. | 344 | * by the client, but only by calling this function. |
| 345 | * This function can only be called on a registered smp_hotplug_thread. | 345 | * This function can only be called on a registered smp_hotplug_thread. |
| 346 | */ | 346 | */ |
| 347 | int smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, | 347 | void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread, |
| 348 | const struct cpumask *new) | 348 | const struct cpumask *new) |
| 349 | { | 349 | { |
| 350 | struct cpumask *old = plug_thread->cpumask; | 350 | struct cpumask *old = plug_thread->cpumask; |
| 351 | cpumask_var_t tmp; | 351 | static struct cpumask tmp; |
| 352 | unsigned int cpu; | 352 | unsigned int cpu; |
| 353 | 353 | ||
| 354 | if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) | 354 | lockdep_assert_cpus_held(); |
| 355 | return -ENOMEM; | ||
| 356 | |||
| 357 | get_online_cpus(); | ||
| 358 | mutex_lock(&smpboot_threads_lock); | 355 | mutex_lock(&smpboot_threads_lock); |
| 359 | 356 | ||
| 360 | /* Park threads that were exclusively enabled on the old mask. */ | 357 | /* Park threads that were exclusively enabled on the old mask. */ |
| 361 | cpumask_andnot(tmp, old, new); | 358 | cpumask_andnot(&tmp, old, new); |
| 362 | for_each_cpu_and(cpu, tmp, cpu_online_mask) | 359 | for_each_cpu_and(cpu, &tmp, cpu_online_mask) |
| 363 | smpboot_park_thread(plug_thread, cpu); | 360 | smpboot_park_thread(plug_thread, cpu); |
| 364 | 361 | ||
| 365 | /* Unpark threads that are exclusively enabled on the new mask. */ | 362 | /* Unpark threads that are exclusively enabled on the new mask. */ |
| 366 | cpumask_andnot(tmp, new, old); | 363 | cpumask_andnot(&tmp, new, old); |
| 367 | for_each_cpu_and(cpu, tmp, cpu_online_mask) | 364 | for_each_cpu_and(cpu, &tmp, cpu_online_mask) |
| 368 | smpboot_unpark_thread(plug_thread, cpu); | 365 | smpboot_unpark_thread(plug_thread, cpu); |
| 369 | 366 | ||
| 370 | cpumask_copy(old, new); | 367 | cpumask_copy(old, new); |
| 371 | 368 | ||
| 372 | mutex_unlock(&smpboot_threads_lock); | 369 | mutex_unlock(&smpboot_threads_lock); |
| 373 | put_online_cpus(); | ||
| 374 | |||
| 375 | free_cpumask_var(tmp); | ||
| 376 | |||
| 377 | return 0; | ||
| 378 | } | 370 | } |
| 379 | EXPORT_SYMBOL_GPL(smpboot_update_cpumask_percpu_thread); | ||
| 380 | 371 | ||
| 381 | static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD); | 372 | static DEFINE_PER_CPU(atomic_t, cpu_hotplug_state) = ATOMIC_INIT(CPU_POST_DEAD); |
| 382 | 373 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 423554ad3610..d9c31bc2eaea 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -872,9 +872,9 @@ static struct ctl_table kern_table[] = { | |||
| 872 | #if defined(CONFIG_LOCKUP_DETECTOR) | 872 | #if defined(CONFIG_LOCKUP_DETECTOR) |
| 873 | { | 873 | { |
| 874 | .procname = "watchdog", | 874 | .procname = "watchdog", |
| 875 | .data = &watchdog_user_enabled, | 875 | .data = &watchdog_user_enabled, |
| 876 | .maxlen = sizeof (int), | 876 | .maxlen = sizeof(int), |
| 877 | .mode = 0644, | 877 | .mode = 0644, |
| 878 | .proc_handler = proc_watchdog, | 878 | .proc_handler = proc_watchdog, |
| 879 | .extra1 = &zero, | 879 | .extra1 = &zero, |
| 880 | .extra2 = &one, | 880 | .extra2 = &one, |
| @@ -890,16 +890,12 @@ static struct ctl_table kern_table[] = { | |||
| 890 | }, | 890 | }, |
| 891 | { | 891 | { |
| 892 | .procname = "nmi_watchdog", | 892 | .procname = "nmi_watchdog", |
| 893 | .data = &nmi_watchdog_enabled, | 893 | .data = &nmi_watchdog_user_enabled, |
| 894 | .maxlen = sizeof (int), | 894 | .maxlen = sizeof(int), |
| 895 | .mode = 0644, | 895 | .mode = NMI_WATCHDOG_SYSCTL_PERM, |
| 896 | .proc_handler = proc_nmi_watchdog, | 896 | .proc_handler = proc_nmi_watchdog, |
| 897 | .extra1 = &zero, | 897 | .extra1 = &zero, |
| 898 | #if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR) | ||
| 899 | .extra2 = &one, | 898 | .extra2 = &one, |
| 900 | #else | ||
| 901 | .extra2 = &zero, | ||
| 902 | #endif | ||
| 903 | }, | 899 | }, |
| 904 | { | 900 | { |
| 905 | .procname = "watchdog_cpumask", | 901 | .procname = "watchdog_cpumask", |
| @@ -911,9 +907,9 @@ static struct ctl_table kern_table[] = { | |||
| 911 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | 907 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
| 912 | { | 908 | { |
| 913 | .procname = "soft_watchdog", | 909 | .procname = "soft_watchdog", |
| 914 | .data = &soft_watchdog_enabled, | 910 | .data = &soft_watchdog_user_enabled, |
| 915 | .maxlen = sizeof (int), | 911 | .maxlen = sizeof(int), |
| 916 | .mode = 0644, | 912 | .mode = 0644, |
| 917 | .proc_handler = proc_soft_watchdog, | 913 | .proc_handler = proc_soft_watchdog, |
| 918 | .extra1 = &zero, | 914 | .extra1 = &zero, |
| 919 | .extra2 = &one, | 915 | .extra2 = &one, |
| @@ -2188,8 +2184,6 @@ static int do_proc_douintvec_conv(unsigned long *lvalp, | |||
| 2188 | if (write) { | 2184 | if (write) { |
| 2189 | if (*lvalp > UINT_MAX) | 2185 | if (*lvalp > UINT_MAX) |
| 2190 | return -EINVAL; | 2186 | return -EINVAL; |
| 2191 | if (*lvalp > UINT_MAX) | ||
| 2192 | return -EINVAL; | ||
| 2193 | *valp = *lvalp; | 2187 | *valp = *lvalp; |
| 2194 | } else { | 2188 | } else { |
| 2195 | unsigned int val = *valp; | 2189 | unsigned int val = *valp; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6abfafd7f173..8319e09e15b9 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -4954,9 +4954,6 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | |||
| 4954 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | 4954 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
| 4955 | static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); | 4955 | static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer); |
| 4956 | 4956 | ||
| 4957 | static unsigned long save_global_trampoline; | ||
| 4958 | static unsigned long save_global_flags; | ||
| 4959 | |||
| 4960 | static int __init set_graph_function(char *str) | 4957 | static int __init set_graph_function(char *str) |
| 4961 | { | 4958 | { |
| 4962 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | 4959 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
| @@ -6808,17 +6805,6 @@ void unregister_ftrace_graph(void) | |||
| 6808 | unregister_pm_notifier(&ftrace_suspend_notifier); | 6805 | unregister_pm_notifier(&ftrace_suspend_notifier); |
| 6809 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 6806 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
| 6810 | 6807 | ||
| 6811 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 6812 | /* | ||
| 6813 | * Function graph does not allocate the trampoline, but | ||
| 6814 | * other global_ops do. We need to reset the ALLOC_TRAMP flag | ||
| 6815 | * if one was used. | ||
| 6816 | */ | ||
| 6817 | global_ops.trampoline = save_global_trampoline; | ||
| 6818 | if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP) | ||
| 6819 | global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP; | ||
| 6820 | #endif | ||
| 6821 | |||
| 6822 | out: | 6808 | out: |
| 6823 | mutex_unlock(&ftrace_lock); | 6809 | mutex_unlock(&ftrace_lock); |
| 6824 | } | 6810 | } |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index f5d52024f6b7..6bcb854909c0 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -29,20 +29,29 @@ | |||
| 29 | #include <linux/kvm_para.h> | 29 | #include <linux/kvm_para.h> |
| 30 | #include <linux/kthread.h> | 30 | #include <linux/kthread.h> |
| 31 | 31 | ||
| 32 | /* Watchdog configuration */ | 32 | static DEFINE_MUTEX(watchdog_mutex); |
| 33 | static DEFINE_MUTEX(watchdog_proc_mutex); | ||
| 34 | |||
| 35 | int __read_mostly nmi_watchdog_enabled; | ||
| 36 | 33 | ||
| 37 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) | 34 | #if defined(CONFIG_HARDLOCKUP_DETECTOR) || defined(CONFIG_HAVE_NMI_WATCHDOG) |
| 38 | unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED | | 35 | # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED | NMI_WATCHDOG_ENABLED) |
| 39 | NMI_WATCHDOG_ENABLED; | 36 | # define NMI_WATCHDOG_DEFAULT 1 |
| 40 | #else | 37 | #else |
| 41 | unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED; | 38 | # define WATCHDOG_DEFAULT (SOFT_WATCHDOG_ENABLED) |
| 39 | # define NMI_WATCHDOG_DEFAULT 0 | ||
| 42 | #endif | 40 | #endif |
| 43 | 41 | ||
| 42 | unsigned long __read_mostly watchdog_enabled; | ||
| 43 | int __read_mostly watchdog_user_enabled = 1; | ||
| 44 | int __read_mostly nmi_watchdog_user_enabled = NMI_WATCHDOG_DEFAULT; | ||
| 45 | int __read_mostly soft_watchdog_user_enabled = 1; | ||
| 46 | int __read_mostly watchdog_thresh = 10; | ||
| 47 | int __read_mostly nmi_watchdog_available; | ||
| 48 | |||
| 49 | struct cpumask watchdog_allowed_mask __read_mostly; | ||
| 50 | |||
| 51 | struct cpumask watchdog_cpumask __read_mostly; | ||
| 52 | unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); | ||
| 53 | |||
| 44 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 54 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
| 45 | /* boot commands */ | ||
| 46 | /* | 55 | /* |
| 47 | * Should we panic when a soft-lockup or hard-lockup occurs: | 56 | * Should we panic when a soft-lockup or hard-lockup occurs: |
| 48 | */ | 57 | */ |
| @@ -56,9 +65,9 @@ unsigned int __read_mostly hardlockup_panic = | |||
| 56 | * kernel command line parameters are parsed, because otherwise it is not | 65 | * kernel command line parameters are parsed, because otherwise it is not |
| 57 | * possible to override this in hardlockup_panic_setup(). | 66 | * possible to override this in hardlockup_panic_setup(). |
| 58 | */ | 67 | */ |
| 59 | void hardlockup_detector_disable(void) | 68 | void __init hardlockup_detector_disable(void) |
| 60 | { | 69 | { |
| 61 | watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; | 70 | nmi_watchdog_user_enabled = 0; |
| 62 | } | 71 | } |
| 63 | 72 | ||
| 64 | static int __init hardlockup_panic_setup(char *str) | 73 | static int __init hardlockup_panic_setup(char *str) |
| @@ -68,48 +77,24 @@ static int __init hardlockup_panic_setup(char *str) | |||
| 68 | else if (!strncmp(str, "nopanic", 7)) | 77 | else if (!strncmp(str, "nopanic", 7)) |
| 69 | hardlockup_panic = 0; | 78 | hardlockup_panic = 0; |
| 70 | else if (!strncmp(str, "0", 1)) | 79 | else if (!strncmp(str, "0", 1)) |
| 71 | watchdog_enabled &= ~NMI_WATCHDOG_ENABLED; | 80 | nmi_watchdog_user_enabled = 0; |
| 72 | else if (!strncmp(str, "1", 1)) | 81 | else if (!strncmp(str, "1", 1)) |
| 73 | watchdog_enabled |= NMI_WATCHDOG_ENABLED; | 82 | nmi_watchdog_user_enabled = 1; |
| 74 | return 1; | 83 | return 1; |
| 75 | } | 84 | } |
| 76 | __setup("nmi_watchdog=", hardlockup_panic_setup); | 85 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
| 77 | 86 | ||
| 78 | #endif | 87 | # ifdef CONFIG_SMP |
| 79 | |||
| 80 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | ||
| 81 | int __read_mostly soft_watchdog_enabled; | ||
| 82 | #endif | ||
| 83 | |||
| 84 | int __read_mostly watchdog_user_enabled; | ||
| 85 | int __read_mostly watchdog_thresh = 10; | ||
| 86 | |||
| 87 | #ifdef CONFIG_SMP | ||
| 88 | int __read_mostly sysctl_softlockup_all_cpu_backtrace; | ||
| 89 | int __read_mostly sysctl_hardlockup_all_cpu_backtrace; | 88 | int __read_mostly sysctl_hardlockup_all_cpu_backtrace; |
| 90 | #endif | ||
| 91 | struct cpumask watchdog_cpumask __read_mostly; | ||
| 92 | unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask); | ||
| 93 | 89 | ||
| 94 | /* | 90 | static int __init hardlockup_all_cpu_backtrace_setup(char *str) |
| 95 | * The 'watchdog_running' variable is set to 1 when the watchdog threads | 91 | { |
| 96 | * are registered/started and is set to 0 when the watchdog threads are | 92 | sysctl_hardlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0); |
| 97 | * unregistered/stopped, so it is an indicator whether the threads exist. | 93 | return 1; |
| 98 | */ | 94 | } |
| 99 | static int __read_mostly watchdog_running; | 95 | __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup); |
| 100 | /* | 96 | # endif /* CONFIG_SMP */ |
| 101 | * If a subsystem has a need to deactivate the watchdog temporarily, it | 97 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
| 102 | * can use the suspend/resume interface to achieve this. The content of | ||
| 103 | * the 'watchdog_suspended' variable reflects this state. Existing threads | ||
| 104 | * are parked/unparked by the lockup_detector_{suspend|resume} functions | ||
| 105 | * (see comment blocks pertaining to those functions for further details). | ||
| 106 | * | ||
| 107 | * 'watchdog_suspended' also prevents threads from being registered/started | ||
| 108 | * or unregistered/stopped via parameters in /proc/sys/kernel, so the state | ||
| 109 | * of 'watchdog_running' cannot change while the watchdog is deactivated | ||
| 110 | * temporarily (see related code in 'proc' handlers). | ||
| 111 | */ | ||
| 112 | int __read_mostly watchdog_suspended; | ||
| 113 | 98 | ||
| 114 | /* | 99 | /* |
| 115 | * These functions can be overridden if an architecture implements its | 100 | * These functions can be overridden if an architecture implements its |
| @@ -121,36 +106,68 @@ int __read_mostly watchdog_suspended; | |||
| 121 | */ | 106 | */ |
| 122 | int __weak watchdog_nmi_enable(unsigned int cpu) | 107 | int __weak watchdog_nmi_enable(unsigned int cpu) |
| 123 | { | 108 | { |
| 109 | hardlockup_detector_perf_enable(); | ||
| 124 | return 0; | 110 | return 0; |
| 125 | } | 111 | } |
| 112 | |||
| 126 | void __weak watchdog_nmi_disable(unsigned int cpu) | 113 | void __weak watchdog_nmi_disable(unsigned int cpu) |
| 127 | { | 114 | { |
| 115 | hardlockup_detector_perf_disable(); | ||
| 128 | } | 116 | } |
| 129 | 117 | ||
| 130 | /* | 118 | /* Return 0, if a NMI watchdog is available. Error code otherwise */ |
| 131 | * watchdog_nmi_reconfigure can be implemented to be notified after any | 119 | int __weak __init watchdog_nmi_probe(void) |
| 132 | * watchdog configuration change. The arch hardlockup watchdog should | 120 | { |
| 133 | * respond to the following variables: | 121 | return hardlockup_detector_perf_init(); |
| 134 | * - nmi_watchdog_enabled | 122 | } |
| 123 | |||
| 124 | /** | ||
| 125 | * watchdog_nmi_stop - Stop the watchdog for reconfiguration | ||
| 126 | * | ||
| 127 | * The reconfiguration steps are: | ||
| 128 | * watchdog_nmi_stop(); | ||
| 129 | * update_variables(); | ||
| 130 | * watchdog_nmi_start(); | ||
| 131 | */ | ||
| 132 | void __weak watchdog_nmi_stop(void) { } | ||
| 133 | |||
| 134 | /** | ||
| 135 | * watchdog_nmi_start - Start the watchdog after reconfiguration | ||
| 136 | * | ||
| 137 | * Counterpart to watchdog_nmi_stop(). | ||
| 138 | * | ||
| 139 | * The following variables have been updated in update_variables() and | ||
| 140 | * contain the currently valid configuration: | ||
| 141 | * - watchdog_enabled | ||
| 135 | * - watchdog_thresh | 142 | * - watchdog_thresh |
| 136 | * - watchdog_cpumask | 143 | * - watchdog_cpumask |
| 137 | * - sysctl_hardlockup_all_cpu_backtrace | ||
| 138 | * - hardlockup_panic | ||
| 139 | * - watchdog_suspended | ||
| 140 | */ | 144 | */ |
| 141 | void __weak watchdog_nmi_reconfigure(void) | 145 | void __weak watchdog_nmi_start(void) { } |
| 146 | |||
| 147 | /** | ||
| 148 | * lockup_detector_update_enable - Update the sysctl enable bit | ||
| 149 | * | ||
| 150 | * Caller needs to make sure that the NMI/perf watchdogs are off, so this | ||
| 151 | * can't race with watchdog_nmi_disable(). | ||
| 152 | */ | ||
| 153 | static void lockup_detector_update_enable(void) | ||
| 142 | { | 154 | { |
| 155 | watchdog_enabled = 0; | ||
| 156 | if (!watchdog_user_enabled) | ||
| 157 | return; | ||
| 158 | if (nmi_watchdog_available && nmi_watchdog_user_enabled) | ||
| 159 | watchdog_enabled |= NMI_WATCHDOG_ENABLED; | ||
| 160 | if (soft_watchdog_user_enabled) | ||
| 161 | watchdog_enabled |= SOFT_WATCHDOG_ENABLED; | ||
| 143 | } | 162 | } |
| 144 | 163 | ||
| 145 | |||
| 146 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR | 164 | #ifdef CONFIG_SOFTLOCKUP_DETECTOR |
| 147 | 165 | ||
| 148 | /* Helper for online, unparked cpus. */ | 166 | /* Global variables, exported for sysctl */ |
| 149 | #define for_each_watchdog_cpu(cpu) \ | 167 | unsigned int __read_mostly softlockup_panic = |
| 150 | for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask) | 168 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; |
| 151 | |||
| 152 | atomic_t watchdog_park_in_progress = ATOMIC_INIT(0); | ||
| 153 | 169 | ||
| 170 | static bool softlockup_threads_initialized __read_mostly; | ||
| 154 | static u64 __read_mostly sample_period; | 171 | static u64 __read_mostly sample_period; |
| 155 | 172 | ||
| 156 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | 173 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
| @@ -164,50 +181,40 @@ static DEFINE_PER_CPU(struct task_struct *, softlockup_task_ptr_saved); | |||
| 164 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | 181 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
| 165 | static unsigned long soft_lockup_nmi_warn; | 182 | static unsigned long soft_lockup_nmi_warn; |
| 166 | 183 | ||
| 167 | unsigned int __read_mostly softlockup_panic = | ||
| 168 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | ||
| 169 | |||
| 170 | static int __init softlockup_panic_setup(char *str) | 184 | static int __init softlockup_panic_setup(char *str) |
| 171 | { | 185 | { |
| 172 | softlockup_panic = simple_strtoul(str, NULL, 0); | 186 | softlockup_panic = simple_strtoul(str, NULL, 0); |
| 173 | |||
| 174 | return 1; | 187 | return 1; |
| 175 | } | 188 | } |
| 176 | __setup("softlockup_panic=", softlockup_panic_setup); | 189 | __setup("softlockup_panic=", softlockup_panic_setup); |
| 177 | 190 | ||
| 178 | static int __init nowatchdog_setup(char *str) | 191 | static int __init nowatchdog_setup(char *str) |
| 179 | { | 192 | { |
| 180 | watchdog_enabled = 0; | 193 | watchdog_user_enabled = 0; |
| 181 | return 1; | 194 | return 1; |
| 182 | } | 195 | } |
| 183 | __setup("nowatchdog", nowatchdog_setup); | 196 | __setup("nowatchdog", nowatchdog_setup); |
| 184 | 197 | ||
| 185 | static int __init nosoftlockup_setup(char *str) | 198 | static int __init nosoftlockup_setup(char *str) |
| 186 | { | 199 | { |
| 187 | watchdog_enabled &= ~SOFT_WATCHDOG_ENABLED; | 200 | soft_watchdog_user_enabled = 0; |
| 188 | return 1; | 201 | return 1; |
| 189 | } | 202 | } |
| 190 | __setup("nosoftlockup", nosoftlockup_setup); | 203 | __setup("nosoftlockup", nosoftlockup_setup); |
| 191 | 204 | ||
| 192 | #ifdef CONFIG_SMP | 205 | #ifdef CONFIG_SMP |
| 206 | int __read_mostly sysctl_softlockup_all_cpu_backtrace; | ||
| 207 | |||
| 193 | static int __init softlockup_all_cpu_backtrace_setup(char *str) | 208 | static int __init softlockup_all_cpu_backtrace_setup(char *str) |
| 194 | { | 209 | { |
| 195 | sysctl_softlockup_all_cpu_backtrace = | 210 | sysctl_softlockup_all_cpu_backtrace = !!simple_strtol(str, NULL, 0); |
| 196 | !!simple_strtol(str, NULL, 0); | ||
| 197 | return 1; | 211 | return 1; |
| 198 | } | 212 | } |
| 199 | __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); | 213 | __setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup); |
| 200 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
| 201 | static int __init hardlockup_all_cpu_backtrace_setup(char *str) | ||
| 202 | { | ||
| 203 | sysctl_hardlockup_all_cpu_backtrace = | ||
| 204 | !!simple_strtol(str, NULL, 0); | ||
| 205 | return 1; | ||
| 206 | } | ||
| 207 | __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup); | ||
| 208 | #endif | ||
| 209 | #endif | 214 | #endif |
| 210 | 215 | ||
| 216 | static void __lockup_detector_cleanup(void); | ||
| 217 | |||
| 211 | /* | 218 | /* |
| 212 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- | 219 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- |
| 213 | * lockups can have false positives under extreme conditions. So we generally | 220 | * lockups can have false positives under extreme conditions. So we generally |
| @@ -278,11 +285,15 @@ void touch_all_softlockup_watchdogs(void) | |||
| 278 | int cpu; | 285 | int cpu; |
| 279 | 286 | ||
| 280 | /* | 287 | /* |
| 281 | * this is done lockless | 288 | * watchdog_mutex cannpt be taken here, as this might be called |
| 282 | * do we care if a 0 races with a timestamp? | 289 | * from (soft)interrupt context, so the access to |
| 283 | * all it means is the softlock check starts one cycle later | 290 | * watchdog_allowed_cpumask might race with a concurrent update. |
| 291 | * | ||
| 292 | * The watchdog time stamp can race against a concurrent real | ||
| 293 | * update as well, the only side effect might be a cycle delay for | ||
| 294 | * the softlockup check. | ||
| 284 | */ | 295 | */ |
| 285 | for_each_watchdog_cpu(cpu) | 296 | for_each_cpu(cpu, &watchdog_allowed_mask) |
| 286 | per_cpu(watchdog_touch_ts, cpu) = 0; | 297 | per_cpu(watchdog_touch_ts, cpu) = 0; |
| 287 | wq_watchdog_touch(-1); | 298 | wq_watchdog_touch(-1); |
| 288 | } | 299 | } |
| @@ -322,9 +333,6 @@ static void watchdog_interrupt_count(void) | |||
| 322 | __this_cpu_inc(hrtimer_interrupts); | 333 | __this_cpu_inc(hrtimer_interrupts); |
| 323 | } | 334 | } |
| 324 | 335 | ||
| 325 | static int watchdog_enable_all_cpus(void); | ||
| 326 | static void watchdog_disable_all_cpus(void); | ||
| 327 | |||
| 328 | /* watchdog kicker functions */ | 336 | /* watchdog kicker functions */ |
| 329 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | 337 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
| 330 | { | 338 | { |
| @@ -333,7 +341,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
| 333 | int duration; | 341 | int duration; |
| 334 | int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; | 342 | int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace; |
| 335 | 343 | ||
| 336 | if (atomic_read(&watchdog_park_in_progress) != 0) | 344 | if (!watchdog_enabled) |
| 337 | return HRTIMER_NORESTART; | 345 | return HRTIMER_NORESTART; |
| 338 | 346 | ||
| 339 | /* kick the hardlockup detector */ | 347 | /* kick the hardlockup detector */ |
| @@ -447,32 +455,38 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio) | |||
| 447 | 455 | ||
| 448 | static void watchdog_enable(unsigned int cpu) | 456 | static void watchdog_enable(unsigned int cpu) |
| 449 | { | 457 | { |
| 450 | struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); | 458 | struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); |
| 451 | 459 | ||
| 452 | /* kick off the timer for the hardlockup detector */ | 460 | /* |
| 461 | * Start the timer first to prevent the NMI watchdog triggering | ||
| 462 | * before the timer has a chance to fire. | ||
| 463 | */ | ||
| 453 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 464 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 454 | hrtimer->function = watchdog_timer_fn; | 465 | hrtimer->function = watchdog_timer_fn; |
| 455 | |||
| 456 | /* Enable the perf event */ | ||
| 457 | watchdog_nmi_enable(cpu); | ||
| 458 | |||
| 459 | /* done here because hrtimer_start can only pin to smp_processor_id() */ | ||
| 460 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), | 466 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), |
| 461 | HRTIMER_MODE_REL_PINNED); | 467 | HRTIMER_MODE_REL_PINNED); |
| 462 | 468 | ||
| 463 | /* initialize timestamp */ | 469 | /* Initialize timestamp */ |
| 464 | watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); | ||
| 465 | __touch_watchdog(); | 470 | __touch_watchdog(); |
| 471 | /* Enable the perf event */ | ||
| 472 | if (watchdog_enabled & NMI_WATCHDOG_ENABLED) | ||
| 473 | watchdog_nmi_enable(cpu); | ||
| 474 | |||
| 475 | watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); | ||
| 466 | } | 476 | } |
| 467 | 477 | ||
| 468 | static void watchdog_disable(unsigned int cpu) | 478 | static void watchdog_disable(unsigned int cpu) |
| 469 | { | 479 | { |
| 470 | struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer); | 480 | struct hrtimer *hrtimer = this_cpu_ptr(&watchdog_hrtimer); |
| 471 | 481 | ||
| 472 | watchdog_set_prio(SCHED_NORMAL, 0); | 482 | watchdog_set_prio(SCHED_NORMAL, 0); |
| 473 | hrtimer_cancel(hrtimer); | 483 | /* |
| 474 | /* disable the perf event */ | 484 | * Disable the perf event first. That prevents that a large delay |
| 485 | * between disabling the timer and disabling the perf event causes | ||
| 486 | * the perf NMI to detect a false positive. | ||
| 487 | */ | ||
| 475 | watchdog_nmi_disable(cpu); | 488 | watchdog_nmi_disable(cpu); |
| 489 | hrtimer_cancel(hrtimer); | ||
| 476 | } | 490 | } |
| 477 | 491 | ||
| 478 | static void watchdog_cleanup(unsigned int cpu, bool online) | 492 | static void watchdog_cleanup(unsigned int cpu, bool online) |
| @@ -499,21 +513,6 @@ static void watchdog(unsigned int cpu) | |||
| 499 | __this_cpu_write(soft_lockup_hrtimer_cnt, | 513 | __this_cpu_write(soft_lockup_hrtimer_cnt, |
| 500 | __this_cpu_read(hrtimer_interrupts)); | 514 | __this_cpu_read(hrtimer_interrupts)); |
| 501 | __touch_watchdog(); | 515 | __touch_watchdog(); |
| 502 | |||
| 503 | /* | ||
| 504 | * watchdog_nmi_enable() clears the NMI_WATCHDOG_ENABLED bit in the | ||
| 505 | * failure path. Check for failures that can occur asynchronously - | ||
| 506 | * for example, when CPUs are on-lined - and shut down the hardware | ||
| 507 | * perf event on each CPU accordingly. | ||
| 508 | * | ||
| 509 | * The only non-obvious place this bit can be cleared is through | ||
| 510 | * watchdog_nmi_enable(), so a pr_info() is placed there. Placing a | ||
| 511 | * pr_info here would be too noisy as it would result in a message | ||
| 512 | * every few seconds if the hardlockup was disabled but the softlockup | ||
| 513 | * enabled. | ||
| 514 | */ | ||
| 515 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | ||
| 516 | watchdog_nmi_disable(cpu); | ||
| 517 | } | 516 | } |
| 518 | 517 | ||
| 519 | static struct smp_hotplug_thread watchdog_threads = { | 518 | static struct smp_hotplug_thread watchdog_threads = { |
| @@ -527,295 +526,174 @@ static struct smp_hotplug_thread watchdog_threads = { | |||
| 527 | .unpark = watchdog_enable, | 526 | .unpark = watchdog_enable, |
| 528 | }; | 527 | }; |
| 529 | 528 | ||
| 530 | /* | 529 | static void softlockup_update_smpboot_threads(void) |
| 531 | * park all watchdog threads that are specified in 'watchdog_cpumask' | ||
| 532 | * | ||
| 533 | * This function returns an error if kthread_park() of a watchdog thread | ||
| 534 | * fails. In this situation, the watchdog threads of some CPUs can already | ||
| 535 | * be parked and the watchdog threads of other CPUs can still be runnable. | ||
| 536 | * Callers are expected to handle this special condition as appropriate in | ||
| 537 | * their context. | ||
| 538 | * | ||
| 539 | * This function may only be called in a context that is protected against | ||
| 540 | * races with CPU hotplug - for example, via get_online_cpus(). | ||
| 541 | */ | ||
| 542 | static int watchdog_park_threads(void) | ||
| 543 | { | 530 | { |
| 544 | int cpu, ret = 0; | 531 | lockdep_assert_held(&watchdog_mutex); |
| 545 | 532 | ||
| 546 | atomic_set(&watchdog_park_in_progress, 1); | 533 | if (!softlockup_threads_initialized) |
| 534 | return; | ||
| 547 | 535 | ||
| 548 | for_each_watchdog_cpu(cpu) { | 536 | smpboot_update_cpumask_percpu_thread(&watchdog_threads, |
| 549 | ret = kthread_park(per_cpu(softlockup_watchdog, cpu)); | 537 | &watchdog_allowed_mask); |
| 550 | if (ret) | ||
| 551 | break; | ||
| 552 | } | ||
| 553 | |||
| 554 | atomic_set(&watchdog_park_in_progress, 0); | ||
| 555 | |||
| 556 | return ret; | ||
| 557 | } | 538 | } |
| 558 | 539 | ||
| 559 | /* | 540 | /* Temporarily park all watchdog threads */ |
| 560 | * unpark all watchdog threads that are specified in 'watchdog_cpumask' | 541 | static void softlockup_park_all_threads(void) |
| 561 | * | ||
| 562 | * This function may only be called in a context that is protected against | ||
| 563 | * races with CPU hotplug - for example, via get_online_cpus(). | ||
| 564 | */ | ||
| 565 | static void watchdog_unpark_threads(void) | ||
| 566 | { | 542 | { |
| 567 | int cpu; | 543 | cpumask_clear(&watchdog_allowed_mask); |
| 568 | 544 | softlockup_update_smpboot_threads(); | |
| 569 | for_each_watchdog_cpu(cpu) | ||
| 570 | kthread_unpark(per_cpu(softlockup_watchdog, cpu)); | ||
| 571 | } | 545 | } |
| 572 | 546 | ||
| 573 | static int update_watchdog_all_cpus(void) | 547 | /* Unpark enabled threads */ |
| 548 | static void softlockup_unpark_threads(void) | ||
| 574 | { | 549 | { |
| 575 | int ret; | 550 | cpumask_copy(&watchdog_allowed_mask, &watchdog_cpumask); |
| 576 | 551 | softlockup_update_smpboot_threads(); | |
| 577 | ret = watchdog_park_threads(); | ||
| 578 | if (ret) | ||
| 579 | return ret; | ||
| 580 | |||
| 581 | watchdog_unpark_threads(); | ||
| 582 | |||
| 583 | return 0; | ||
| 584 | } | 552 | } |
| 585 | 553 | ||
| 586 | static int watchdog_enable_all_cpus(void) | 554 | static void lockup_detector_reconfigure(void) |
| 587 | { | 555 | { |
| 588 | int err = 0; | 556 | cpus_read_lock(); |
| 589 | 557 | watchdog_nmi_stop(); | |
| 590 | if (!watchdog_running) { | 558 | softlockup_park_all_threads(); |
| 591 | err = smpboot_register_percpu_thread_cpumask(&watchdog_threads, | 559 | set_sample_period(); |
| 592 | &watchdog_cpumask); | 560 | lockup_detector_update_enable(); |
| 593 | if (err) | 561 | if (watchdog_enabled && watchdog_thresh) |
| 594 | pr_err("Failed to create watchdog threads, disabled\n"); | 562 | softlockup_unpark_threads(); |
| 595 | else | 563 | watchdog_nmi_start(); |
| 596 | watchdog_running = 1; | 564 | cpus_read_unlock(); |
| 597 | } else { | 565 | /* |
| 598 | /* | 566 | * Must be called outside the cpus locked section to prevent |
| 599 | * Enable/disable the lockup detectors or | 567 | * recursive locking in the perf code. |
| 600 | * change the sample period 'on the fly'. | 568 | */ |
| 601 | */ | 569 | __lockup_detector_cleanup(); |
| 602 | err = update_watchdog_all_cpus(); | ||
| 603 | |||
| 604 | if (err) { | ||
| 605 | watchdog_disable_all_cpus(); | ||
| 606 | pr_err("Failed to update lockup detectors, disabled\n"); | ||
| 607 | } | ||
| 608 | } | ||
| 609 | |||
| 610 | if (err) | ||
| 611 | watchdog_enabled = 0; | ||
| 612 | |||
| 613 | return err; | ||
| 614 | } | 570 | } |
| 615 | 571 | ||
| 616 | static void watchdog_disable_all_cpus(void) | 572 | /* |
| 573 | * Create the watchdog thread infrastructure and configure the detector(s). | ||
| 574 | * | ||
| 575 | * The threads are not unparked as watchdog_allowed_mask is empty. When | ||
| 576 | * the threads are sucessfully initialized, take the proper locks and | ||
| 577 | * unpark the threads in the watchdog_cpumask if the watchdog is enabled. | ||
| 578 | */ | ||
| 579 | static __init void lockup_detector_setup(void) | ||
| 617 | { | 580 | { |
| 618 | if (watchdog_running) { | 581 | int ret; |
| 619 | watchdog_running = 0; | ||
| 620 | smpboot_unregister_percpu_thread(&watchdog_threads); | ||
| 621 | } | ||
| 622 | } | ||
| 623 | 582 | ||
| 624 | #ifdef CONFIG_SYSCTL | 583 | /* |
| 625 | static int watchdog_update_cpus(void) | 584 | * If sysctl is off and watchdog got disabled on the command line, |
| 626 | { | 585 | * nothing to do here. |
| 627 | return smpboot_update_cpumask_percpu_thread( | 586 | */ |
| 628 | &watchdog_threads, &watchdog_cpumask); | 587 | lockup_detector_update_enable(); |
| 629 | } | ||
| 630 | #endif | ||
| 631 | 588 | ||
| 632 | #else /* SOFTLOCKUP */ | 589 | if (!IS_ENABLED(CONFIG_SYSCTL) && |
| 633 | static int watchdog_park_threads(void) | 590 | !(watchdog_enabled && watchdog_thresh)) |
| 634 | { | 591 | return; |
| 635 | return 0; | ||
| 636 | } | ||
| 637 | 592 | ||
| 638 | static void watchdog_unpark_threads(void) | 593 | ret = smpboot_register_percpu_thread_cpumask(&watchdog_threads, |
| 639 | { | 594 | &watchdog_allowed_mask); |
| 640 | } | 595 | if (ret) { |
| 596 | pr_err("Failed to initialize soft lockup detector threads\n"); | ||
| 597 | return; | ||
| 598 | } | ||
| 641 | 599 | ||
| 642 | static int watchdog_enable_all_cpus(void) | 600 | mutex_lock(&watchdog_mutex); |
| 643 | { | 601 | softlockup_threads_initialized = true; |
| 644 | return 0; | 602 | lockup_detector_reconfigure(); |
| 603 | mutex_unlock(&watchdog_mutex); | ||
| 645 | } | 604 | } |
| 646 | 605 | ||
| 647 | static void watchdog_disable_all_cpus(void) | 606 | #else /* CONFIG_SOFTLOCKUP_DETECTOR */ |
| 607 | static inline int watchdog_park_threads(void) { return 0; } | ||
| 608 | static inline void watchdog_unpark_threads(void) { } | ||
| 609 | static inline int watchdog_enable_all_cpus(void) { return 0; } | ||
| 610 | static inline void watchdog_disable_all_cpus(void) { } | ||
| 611 | static void lockup_detector_reconfigure(void) | ||
| 648 | { | 612 | { |
| 613 | cpus_read_lock(); | ||
| 614 | watchdog_nmi_stop(); | ||
| 615 | lockup_detector_update_enable(); | ||
| 616 | watchdog_nmi_start(); | ||
| 617 | cpus_read_unlock(); | ||
| 649 | } | 618 | } |
| 650 | 619 | static inline void lockup_detector_setup(void) | |
| 651 | #ifdef CONFIG_SYSCTL | ||
| 652 | static int watchdog_update_cpus(void) | ||
| 653 | { | 620 | { |
| 654 | return 0; | 621 | lockup_detector_reconfigure(); |
| 655 | } | 622 | } |
| 656 | #endif | 623 | #endif /* !CONFIG_SOFTLOCKUP_DETECTOR */ |
| 657 | 624 | ||
| 658 | static void set_sample_period(void) | 625 | static void __lockup_detector_cleanup(void) |
| 659 | { | 626 | { |
| 627 | lockdep_assert_held(&watchdog_mutex); | ||
| 628 | hardlockup_detector_perf_cleanup(); | ||
| 660 | } | 629 | } |
| 661 | #endif /* SOFTLOCKUP */ | ||
| 662 | 630 | ||
| 663 | /* | 631 | /** |
| 664 | * Suspend the hard and soft lockup detector by parking the watchdog threads. | 632 | * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes |
| 633 | * | ||
| 634 | * Caller must not hold the cpu hotplug rwsem. | ||
| 665 | */ | 635 | */ |
| 666 | int lockup_detector_suspend(void) | 636 | void lockup_detector_cleanup(void) |
| 667 | { | 637 | { |
| 668 | int ret = 0; | 638 | mutex_lock(&watchdog_mutex); |
| 669 | 639 | __lockup_detector_cleanup(); | |
| 670 | get_online_cpus(); | 640 | mutex_unlock(&watchdog_mutex); |
| 671 | mutex_lock(&watchdog_proc_mutex); | ||
| 672 | /* | ||
| 673 | * Multiple suspend requests can be active in parallel (counted by | ||
| 674 | * the 'watchdog_suspended' variable). If the watchdog threads are | ||
| 675 | * running, the first caller takes care that they will be parked. | ||
| 676 | * The state of 'watchdog_running' cannot change while a suspend | ||
| 677 | * request is active (see related code in 'proc' handlers). | ||
| 678 | */ | ||
| 679 | if (watchdog_running && !watchdog_suspended) | ||
| 680 | ret = watchdog_park_threads(); | ||
| 681 | |||
| 682 | if (ret == 0) | ||
| 683 | watchdog_suspended++; | ||
| 684 | else { | ||
| 685 | watchdog_disable_all_cpus(); | ||
| 686 | pr_err("Failed to suspend lockup detectors, disabled\n"); | ||
| 687 | watchdog_enabled = 0; | ||
| 688 | } | ||
| 689 | |||
| 690 | watchdog_nmi_reconfigure(); | ||
| 691 | |||
| 692 | mutex_unlock(&watchdog_proc_mutex); | ||
| 693 | |||
| 694 | return ret; | ||
| 695 | } | 641 | } |
| 696 | 642 | ||
| 697 | /* | 643 | /** |
| 698 | * Resume the hard and soft lockup detector by unparking the watchdog threads. | 644 | * lockup_detector_soft_poweroff - Interface to stop lockup detector(s) |
| 645 | * | ||
| 646 | * Special interface for parisc. It prevents lockup detector warnings from | ||
| 647 | * the default pm_poweroff() function which busy loops forever. | ||
| 699 | */ | 648 | */ |
| 700 | void lockup_detector_resume(void) | 649 | void lockup_detector_soft_poweroff(void) |
| 701 | { | 650 | { |
| 702 | mutex_lock(&watchdog_proc_mutex); | 651 | watchdog_enabled = 0; |
| 703 | |||
| 704 | watchdog_suspended--; | ||
| 705 | /* | ||
| 706 | * The watchdog threads are unparked if they were previously running | ||
| 707 | * and if there is no more active suspend request. | ||
| 708 | */ | ||
| 709 | if (watchdog_running && !watchdog_suspended) | ||
| 710 | watchdog_unpark_threads(); | ||
| 711 | |||
| 712 | watchdog_nmi_reconfigure(); | ||
| 713 | |||
| 714 | mutex_unlock(&watchdog_proc_mutex); | ||
| 715 | put_online_cpus(); | ||
| 716 | } | 652 | } |
| 717 | 653 | ||
| 718 | #ifdef CONFIG_SYSCTL | 654 | #ifdef CONFIG_SYSCTL |
| 719 | 655 | ||
| 720 | /* | 656 | /* Propagate any changes to the watchdog threads */ |
| 721 | * Update the run state of the lockup detectors. | 657 | static void proc_watchdog_update(void) |
| 722 | */ | ||
| 723 | static int proc_watchdog_update(void) | ||
| 724 | { | 658 | { |
| 725 | int err = 0; | 659 | /* Remove impossible cpus to keep sysctl output clean. */ |
| 726 | 660 | cpumask_and(&watchdog_cpumask, &watchdog_cpumask, cpu_possible_mask); | |
| 727 | /* | 661 | lockup_detector_reconfigure(); |
| 728 | * Watchdog threads won't be started if they are already active. | ||
| 729 | * The 'watchdog_running' variable in watchdog_*_all_cpus() takes | ||
| 730 | * care of this. If those threads are already active, the sample | ||
| 731 | * period will be updated and the lockup detectors will be enabled | ||
| 732 | * or disabled 'on the fly'. | ||
| 733 | */ | ||
| 734 | if (watchdog_enabled && watchdog_thresh) | ||
| 735 | err = watchdog_enable_all_cpus(); | ||
| 736 | else | ||
| 737 | watchdog_disable_all_cpus(); | ||
| 738 | |||
| 739 | watchdog_nmi_reconfigure(); | ||
| 740 | |||
| 741 | return err; | ||
| 742 | |||
| 743 | } | 662 | } |
| 744 | 663 | ||
| 745 | /* | 664 | /* |
| 746 | * common function for watchdog, nmi_watchdog and soft_watchdog parameter | 665 | * common function for watchdog, nmi_watchdog and soft_watchdog parameter |
| 747 | * | 666 | * |
| 748 | * caller | table->data points to | 'which' contains the flag(s) | 667 | * caller | table->data points to | 'which' |
| 749 | * -------------------|-----------------------|----------------------------- | 668 | * -------------------|----------------------------|-------------------------- |
| 750 | * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED or'ed | 669 | * proc_watchdog | watchdog_user_enabled | NMI_WATCHDOG_ENABLED | |
| 751 | * | | with SOFT_WATCHDOG_ENABLED | 670 | * | | SOFT_WATCHDOG_ENABLED |
| 752 | * -------------------|-----------------------|----------------------------- | 671 | * -------------------|----------------------------|-------------------------- |
| 753 | * proc_nmi_watchdog | nmi_watchdog_enabled | NMI_WATCHDOG_ENABLED | 672 | * proc_nmi_watchdog | nmi_watchdog_user_enabled | NMI_WATCHDOG_ENABLED |
| 754 | * -------------------|-----------------------|----------------------------- | 673 | * -------------------|----------------------------|-------------------------- |
| 755 | * proc_soft_watchdog | soft_watchdog_enabled | SOFT_WATCHDOG_ENABLED | 674 | * proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED |
| 756 | */ | 675 | */ |
| 757 | static int proc_watchdog_common(int which, struct ctl_table *table, int write, | 676 | static int proc_watchdog_common(int which, struct ctl_table *table, int write, |
| 758 | void __user *buffer, size_t *lenp, loff_t *ppos) | 677 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 759 | { | 678 | { |
| 760 | int err, old, new; | 679 | int err, old, *param = table->data; |
| 761 | int *watchdog_param = (int *)table->data; | ||
| 762 | 680 | ||
| 763 | get_online_cpus(); | 681 | mutex_lock(&watchdog_mutex); |
| 764 | mutex_lock(&watchdog_proc_mutex); | ||
| 765 | 682 | ||
| 766 | if (watchdog_suspended) { | ||
| 767 | /* no parameter changes allowed while watchdog is suspended */ | ||
| 768 | err = -EAGAIN; | ||
| 769 | goto out; | ||
| 770 | } | ||
| 771 | |||
| 772 | /* | ||
| 773 | * If the parameter is being read return the state of the corresponding | ||
| 774 | * bit(s) in 'watchdog_enabled', else update 'watchdog_enabled' and the | ||
| 775 | * run state of the lockup detectors. | ||
| 776 | */ | ||
| 777 | if (!write) { | 683 | if (!write) { |
| 778 | *watchdog_param = (watchdog_enabled & which) != 0; | 684 | /* |
| 685 | * On read synchronize the userspace interface. This is a | ||
| 686 | * racy snapshot. | ||
| 687 | */ | ||
| 688 | *param = (watchdog_enabled & which) != 0; | ||
| 779 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 689 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 780 | } else { | 690 | } else { |
| 691 | old = READ_ONCE(*param); | ||
| 781 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 692 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 782 | if (err) | 693 | if (!err && old != READ_ONCE(*param)) |
| 783 | goto out; | 694 | proc_watchdog_update(); |
| 784 | |||
| 785 | /* | ||
| 786 | * There is a race window between fetching the current value | ||
| 787 | * from 'watchdog_enabled' and storing the new value. During | ||
| 788 | * this race window, watchdog_nmi_enable() can sneak in and | ||
| 789 | * clear the NMI_WATCHDOG_ENABLED bit in 'watchdog_enabled'. | ||
| 790 | * The 'cmpxchg' detects this race and the loop retries. | ||
| 791 | */ | ||
| 792 | do { | ||
| 793 | old = watchdog_enabled; | ||
| 794 | /* | ||
| 795 | * If the parameter value is not zero set the | ||
| 796 | * corresponding bit(s), else clear it(them). | ||
| 797 | */ | ||
| 798 | if (*watchdog_param) | ||
| 799 | new = old | which; | ||
| 800 | else | ||
| 801 | new = old & ~which; | ||
| 802 | } while (cmpxchg(&watchdog_enabled, old, new) != old); | ||
| 803 | |||
| 804 | /* | ||
| 805 | * Update the run state of the lockup detectors. There is _no_ | ||
| 806 | * need to check the value returned by proc_watchdog_update() | ||
| 807 | * and to restore the previous value of 'watchdog_enabled' as | ||
| 808 | * both lockup detectors are disabled if proc_watchdog_update() | ||
| 809 | * returns an error. | ||
| 810 | */ | ||
| 811 | if (old == new) | ||
| 812 | goto out; | ||
| 813 | |||
| 814 | err = proc_watchdog_update(); | ||
| 815 | } | 695 | } |
| 816 | out: | 696 | mutex_unlock(&watchdog_mutex); |
| 817 | mutex_unlock(&watchdog_proc_mutex); | ||
| 818 | put_online_cpus(); | ||
| 819 | return err; | 697 | return err; |
| 820 | } | 698 | } |
| 821 | 699 | ||
| @@ -835,6 +713,8 @@ int proc_watchdog(struct ctl_table *table, int write, | |||
| 835 | int proc_nmi_watchdog(struct ctl_table *table, int write, | 713 | int proc_nmi_watchdog(struct ctl_table *table, int write, |
| 836 | void __user *buffer, size_t *lenp, loff_t *ppos) | 714 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 837 | { | 715 | { |
| 716 | if (!nmi_watchdog_available && write) | ||
| 717 | return -ENOTSUPP; | ||
| 838 | return proc_watchdog_common(NMI_WATCHDOG_ENABLED, | 718 | return proc_watchdog_common(NMI_WATCHDOG_ENABLED, |
| 839 | table, write, buffer, lenp, ppos); | 719 | table, write, buffer, lenp, ppos); |
| 840 | } | 720 | } |
| @@ -855,39 +735,17 @@ int proc_soft_watchdog(struct ctl_table *table, int write, | |||
| 855 | int proc_watchdog_thresh(struct ctl_table *table, int write, | 735 | int proc_watchdog_thresh(struct ctl_table *table, int write, |
| 856 | void __user *buffer, size_t *lenp, loff_t *ppos) | 736 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 857 | { | 737 | { |
| 858 | int err, old, new; | 738 | int err, old; |
| 859 | |||
| 860 | get_online_cpus(); | ||
| 861 | mutex_lock(&watchdog_proc_mutex); | ||
| 862 | 739 | ||
| 863 | if (watchdog_suspended) { | 740 | mutex_lock(&watchdog_mutex); |
| 864 | /* no parameter changes allowed while watchdog is suspended */ | ||
| 865 | err = -EAGAIN; | ||
| 866 | goto out; | ||
| 867 | } | ||
| 868 | 741 | ||
| 869 | old = ACCESS_ONCE(watchdog_thresh); | 742 | old = READ_ONCE(watchdog_thresh); |
| 870 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 743 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
| 871 | 744 | ||
| 872 | if (err || !write) | 745 | if (!err && write && old != READ_ONCE(watchdog_thresh)) |
| 873 | goto out; | 746 | proc_watchdog_update(); |
| 874 | |||
| 875 | /* | ||
| 876 | * Update the sample period. Restore on failure. | ||
| 877 | */ | ||
| 878 | new = ACCESS_ONCE(watchdog_thresh); | ||
| 879 | if (old == new) | ||
| 880 | goto out; | ||
| 881 | 747 | ||
| 882 | set_sample_period(); | 748 | mutex_unlock(&watchdog_mutex); |
| 883 | err = proc_watchdog_update(); | ||
| 884 | if (err) { | ||
| 885 | watchdog_thresh = old; | ||
| 886 | set_sample_period(); | ||
| 887 | } | ||
| 888 | out: | ||
| 889 | mutex_unlock(&watchdog_proc_mutex); | ||
| 890 | put_online_cpus(); | ||
| 891 | return err; | 749 | return err; |
| 892 | } | 750 | } |
| 893 | 751 | ||
| @@ -902,45 +760,19 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write, | |||
| 902 | { | 760 | { |
| 903 | int err; | 761 | int err; |
| 904 | 762 | ||
| 905 | get_online_cpus(); | 763 | mutex_lock(&watchdog_mutex); |
| 906 | mutex_lock(&watchdog_proc_mutex); | ||
| 907 | |||
| 908 | if (watchdog_suspended) { | ||
| 909 | /* no parameter changes allowed while watchdog is suspended */ | ||
| 910 | err = -EAGAIN; | ||
| 911 | goto out; | ||
| 912 | } | ||
| 913 | 764 | ||
| 914 | err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); | 765 | err = proc_do_large_bitmap(table, write, buffer, lenp, ppos); |
| 915 | if (!err && write) { | 766 | if (!err && write) |
| 916 | /* Remove impossible cpus to keep sysctl output cleaner. */ | 767 | proc_watchdog_update(); |
| 917 | cpumask_and(&watchdog_cpumask, &watchdog_cpumask, | ||
| 918 | cpu_possible_mask); | ||
| 919 | |||
| 920 | if (watchdog_running) { | ||
| 921 | /* | ||
| 922 | * Failure would be due to being unable to allocate | ||
| 923 | * a temporary cpumask, so we are likely not in a | ||
| 924 | * position to do much else to make things better. | ||
| 925 | */ | ||
| 926 | if (watchdog_update_cpus() != 0) | ||
| 927 | pr_err("cpumask update failed\n"); | ||
| 928 | } | ||
| 929 | 768 | ||
| 930 | watchdog_nmi_reconfigure(); | 769 | mutex_unlock(&watchdog_mutex); |
| 931 | } | ||
| 932 | out: | ||
| 933 | mutex_unlock(&watchdog_proc_mutex); | ||
| 934 | put_online_cpus(); | ||
| 935 | return err; | 770 | return err; |
| 936 | } | 771 | } |
| 937 | |||
| 938 | #endif /* CONFIG_SYSCTL */ | 772 | #endif /* CONFIG_SYSCTL */ |
| 939 | 773 | ||
| 940 | void __init lockup_detector_init(void) | 774 | void __init lockup_detector_init(void) |
| 941 | { | 775 | { |
| 942 | set_sample_period(); | ||
| 943 | |||
| 944 | #ifdef CONFIG_NO_HZ_FULL | 776 | #ifdef CONFIG_NO_HZ_FULL |
| 945 | if (tick_nohz_full_enabled()) { | 777 | if (tick_nohz_full_enabled()) { |
| 946 | pr_info("Disabling watchdog on nohz_full cores by default\n"); | 778 | pr_info("Disabling watchdog on nohz_full cores by default\n"); |
| @@ -951,6 +783,7 @@ void __init lockup_detector_init(void) | |||
| 951 | cpumask_copy(&watchdog_cpumask, cpu_possible_mask); | 783 | cpumask_copy(&watchdog_cpumask, cpu_possible_mask); |
| 952 | #endif | 784 | #endif |
| 953 | 785 | ||
| 954 | if (watchdog_enabled) | 786 | if (!watchdog_nmi_probe()) |
| 955 | watchdog_enable_all_cpus(); | 787 | nmi_watchdog_available = true; |
| 788 | lockup_detector_setup(); | ||
| 956 | } | 789 | } |
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 3a09ea1b1d3d..71a62ceacdc8 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c | |||
| @@ -21,8 +21,10 @@ | |||
| 21 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); | 21 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); |
| 22 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); | 22 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); |
| 23 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | 23 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
| 24 | static struct cpumask dead_events_mask; | ||
| 24 | 25 | ||
| 25 | static unsigned long hardlockup_allcpu_dumped; | 26 | static unsigned long hardlockup_allcpu_dumped; |
| 27 | static unsigned int watchdog_cpus; | ||
| 26 | 28 | ||
| 27 | void arch_touch_nmi_watchdog(void) | 29 | void arch_touch_nmi_watchdog(void) |
| 28 | { | 30 | { |
| @@ -103,15 +105,12 @@ static struct perf_event_attr wd_hw_attr = { | |||
| 103 | 105 | ||
| 104 | /* Callback function for perf event subsystem */ | 106 | /* Callback function for perf event subsystem */ |
| 105 | static void watchdog_overflow_callback(struct perf_event *event, | 107 | static void watchdog_overflow_callback(struct perf_event *event, |
| 106 | struct perf_sample_data *data, | 108 | struct perf_sample_data *data, |
| 107 | struct pt_regs *regs) | 109 | struct pt_regs *regs) |
| 108 | { | 110 | { |
| 109 | /* Ensure the watchdog never gets throttled */ | 111 | /* Ensure the watchdog never gets throttled */ |
| 110 | event->hw.interrupts = 0; | 112 | event->hw.interrupts = 0; |
| 111 | 113 | ||
| 112 | if (atomic_read(&watchdog_park_in_progress) != 0) | ||
| 113 | return; | ||
| 114 | |||
| 115 | if (__this_cpu_read(watchdog_nmi_touch) == true) { | 114 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
| 116 | __this_cpu_write(watchdog_nmi_touch, false); | 115 | __this_cpu_write(watchdog_nmi_touch, false); |
| 117 | return; | 116 | return; |
| @@ -160,104 +159,131 @@ static void watchdog_overflow_callback(struct perf_event *event, | |||
| 160 | return; | 159 | return; |
| 161 | } | 160 | } |
| 162 | 161 | ||
| 163 | /* | 162 | static int hardlockup_detector_event_create(void) |
| 164 | * People like the simple clean cpu node info on boot. | ||
| 165 | * Reduce the watchdog noise by only printing messages | ||
| 166 | * that are different from what cpu0 displayed. | ||
| 167 | */ | ||
| 168 | static unsigned long firstcpu_err; | ||
| 169 | static atomic_t watchdog_cpus; | ||
| 170 | |||
| 171 | int watchdog_nmi_enable(unsigned int cpu) | ||
| 172 | { | 163 | { |
| 164 | unsigned int cpu = smp_processor_id(); | ||
| 173 | struct perf_event_attr *wd_attr; | 165 | struct perf_event_attr *wd_attr; |
| 174 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | 166 | struct perf_event *evt; |
| 175 | int firstcpu = 0; | ||
| 176 | |||
| 177 | /* nothing to do if the hard lockup detector is disabled */ | ||
| 178 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | ||
| 179 | goto out; | ||
| 180 | |||
| 181 | /* is it already setup and enabled? */ | ||
| 182 | if (event && event->state > PERF_EVENT_STATE_OFF) | ||
| 183 | goto out; | ||
| 184 | |||
| 185 | /* it is setup but not enabled */ | ||
| 186 | if (event != NULL) | ||
| 187 | goto out_enable; | ||
| 188 | |||
| 189 | if (atomic_inc_return(&watchdog_cpus) == 1) | ||
| 190 | firstcpu = 1; | ||
| 191 | 167 | ||
| 192 | wd_attr = &wd_hw_attr; | 168 | wd_attr = &wd_hw_attr; |
| 193 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); | 169 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
| 194 | 170 | ||
| 195 | /* Try to register using hardware perf events */ | 171 | /* Try to register using hardware perf events */ |
| 196 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); | 172 | evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL, |
| 173 | watchdog_overflow_callback, NULL); | ||
| 174 | if (IS_ERR(evt)) { | ||
| 175 | pr_info("Perf event create on CPU %d failed with %ld\n", cpu, | ||
| 176 | PTR_ERR(evt)); | ||
| 177 | return PTR_ERR(evt); | ||
| 178 | } | ||
| 179 | this_cpu_write(watchdog_ev, evt); | ||
| 180 | return 0; | ||
| 181 | } | ||
| 197 | 182 | ||
| 198 | /* save the first cpu's error for future comparision */ | 183 | /** |
| 199 | if (firstcpu && IS_ERR(event)) | 184 | * hardlockup_detector_perf_enable - Enable the local event |
| 200 | firstcpu_err = PTR_ERR(event); | 185 | */ |
| 186 | void hardlockup_detector_perf_enable(void) | ||
| 187 | { | ||
| 188 | if (hardlockup_detector_event_create()) | ||
| 189 | return; | ||
| 201 | 190 | ||
| 202 | if (!IS_ERR(event)) { | 191 | if (!watchdog_cpus++) |
| 203 | /* only print for the first cpu initialized */ | 192 | pr_info("Enabled. Permanently consumes one hw-PMU counter.\n"); |
| 204 | if (firstcpu || firstcpu_err) | ||
| 205 | pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); | ||
| 206 | goto out_save; | ||
| 207 | } | ||
| 208 | 193 | ||
| 209 | /* | 194 | perf_event_enable(this_cpu_read(watchdog_ev)); |
| 210 | * Disable the hard lockup detector if _any_ CPU fails to set up | ||
| 211 | * set up the hardware perf event. The watchdog() function checks | ||
| 212 | * the NMI_WATCHDOG_ENABLED bit periodically. | ||
| 213 | * | ||
| 214 | * The barriers are for syncing up watchdog_enabled across all the | ||
| 215 | * cpus, as clear_bit() does not use barriers. | ||
| 216 | */ | ||
| 217 | smp_mb__before_atomic(); | ||
| 218 | clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled); | ||
| 219 | smp_mb__after_atomic(); | ||
| 220 | |||
| 221 | /* skip displaying the same error again */ | ||
| 222 | if (!firstcpu && (PTR_ERR(event) == firstcpu_err)) | ||
| 223 | return PTR_ERR(event); | ||
| 224 | |||
| 225 | /* vary the KERN level based on the returned errno */ | ||
| 226 | if (PTR_ERR(event) == -EOPNOTSUPP) | ||
| 227 | pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); | ||
| 228 | else if (PTR_ERR(event) == -ENOENT) | ||
| 229 | pr_warn("disabled (cpu%i): hardware events not enabled\n", | ||
| 230 | cpu); | ||
| 231 | else | ||
| 232 | pr_err("disabled (cpu%i): unable to create perf event: %ld\n", | ||
| 233 | cpu, PTR_ERR(event)); | ||
| 234 | |||
| 235 | pr_info("Shutting down hard lockup detector on all cpus\n"); | ||
| 236 | |||
| 237 | return PTR_ERR(event); | ||
| 238 | |||
| 239 | /* success path */ | ||
| 240 | out_save: | ||
| 241 | per_cpu(watchdog_ev, cpu) = event; | ||
| 242 | out_enable: | ||
| 243 | perf_event_enable(per_cpu(watchdog_ev, cpu)); | ||
| 244 | out: | ||
| 245 | return 0; | ||
| 246 | } | 195 | } |
| 247 | 196 | ||
| 248 | void watchdog_nmi_disable(unsigned int cpu) | 197 | /** |
| 198 | * hardlockup_detector_perf_disable - Disable the local event | ||
| 199 | */ | ||
| 200 | void hardlockup_detector_perf_disable(void) | ||
| 249 | { | 201 | { |
| 250 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | 202 | struct perf_event *event = this_cpu_read(watchdog_ev); |
| 251 | 203 | ||
| 252 | if (event) { | 204 | if (event) { |
| 253 | perf_event_disable(event); | 205 | perf_event_disable(event); |
| 206 | cpumask_set_cpu(smp_processor_id(), &dead_events_mask); | ||
| 207 | watchdog_cpus--; | ||
| 208 | } | ||
| 209 | } | ||
| 210 | |||
| 211 | /** | ||
| 212 | * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them | ||
| 213 | * | ||
| 214 | * Called from lockup_detector_cleanup(). Serialized by the caller. | ||
| 215 | */ | ||
| 216 | void hardlockup_detector_perf_cleanup(void) | ||
| 217 | { | ||
| 218 | int cpu; | ||
| 219 | |||
| 220 | for_each_cpu(cpu, &dead_events_mask) { | ||
| 221 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
| 222 | |||
| 223 | /* | ||
| 224 | * Required because for_each_cpu() reports unconditionally | ||
| 225 | * CPU0 as set on UP kernels. Sigh. | ||
| 226 | */ | ||
| 227 | if (event) | ||
| 228 | perf_event_release_kernel(event); | ||
| 254 | per_cpu(watchdog_ev, cpu) = NULL; | 229 | per_cpu(watchdog_ev, cpu) = NULL; |
| 230 | } | ||
| 231 | cpumask_clear(&dead_events_mask); | ||
| 232 | } | ||
| 233 | |||
| 234 | /** | ||
| 235 | * hardlockup_detector_perf_stop - Globally stop watchdog events | ||
| 236 | * | ||
| 237 | * Special interface for x86 to handle the perf HT bug. | ||
| 238 | */ | ||
| 239 | void __init hardlockup_detector_perf_stop(void) | ||
| 240 | { | ||
| 241 | int cpu; | ||
| 242 | |||
| 243 | lockdep_assert_cpus_held(); | ||
| 244 | |||
| 245 | for_each_online_cpu(cpu) { | ||
| 246 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
| 247 | |||
| 248 | if (event) | ||
| 249 | perf_event_disable(event); | ||
| 250 | } | ||
| 251 | } | ||
| 255 | 252 | ||
| 256 | /* should be in cleanup, but blocks oprofile */ | 253 | /** |
| 257 | perf_event_release_kernel(event); | 254 | * hardlockup_detector_perf_restart - Globally restart watchdog events |
| 255 | * | ||
| 256 | * Special interface for x86 to handle the perf HT bug. | ||
| 257 | */ | ||
| 258 | void __init hardlockup_detector_perf_restart(void) | ||
| 259 | { | ||
| 260 | int cpu; | ||
| 261 | |||
| 262 | lockdep_assert_cpus_held(); | ||
| 263 | |||
| 264 | if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED)) | ||
| 265 | return; | ||
| 266 | |||
| 267 | for_each_online_cpu(cpu) { | ||
| 268 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
| 269 | |||
| 270 | if (event) | ||
| 271 | perf_event_enable(event); | ||
| 272 | } | ||
| 273 | } | ||
| 274 | |||
| 275 | /** | ||
| 276 | * hardlockup_detector_perf_init - Probe whether NMI event is available at all | ||
| 277 | */ | ||
| 278 | int __init hardlockup_detector_perf_init(void) | ||
| 279 | { | ||
| 280 | int ret = hardlockup_detector_event_create(); | ||
| 258 | 281 | ||
| 259 | /* watchdog_nmi_enable() expects this to be zero initially. */ | 282 | if (ret) { |
| 260 | if (atomic_dec_and_test(&watchdog_cpus)) | 283 | pr_info("Perf NMI watchdog permanently disabled\n"); |
| 261 | firstcpu_err = 0; | 284 | } else { |
| 285 | perf_event_release_kernel(this_cpu_read(watchdog_ev)); | ||
| 286 | this_cpu_write(watchdog_ev, NULL); | ||
| 262 | } | 287 | } |
| 288 | return ret; | ||
| 263 | } | 289 | } |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2689b7c50c52..dfdad67d8f6c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -1092,8 +1092,8 @@ config PROVE_LOCKING | |||
| 1092 | select DEBUG_MUTEXES | 1092 | select DEBUG_MUTEXES |
| 1093 | select DEBUG_RT_MUTEXES if RT_MUTEXES | 1093 | select DEBUG_RT_MUTEXES if RT_MUTEXES |
| 1094 | select DEBUG_LOCK_ALLOC | 1094 | select DEBUG_LOCK_ALLOC |
| 1095 | select LOCKDEP_CROSSRELEASE | 1095 | select LOCKDEP_CROSSRELEASE if BROKEN |
| 1096 | select LOCKDEP_COMPLETIONS | 1096 | select LOCKDEP_COMPLETIONS if BROKEN |
| 1097 | select TRACE_IRQFLAGS | 1097 | select TRACE_IRQFLAGS |
| 1098 | default n | 1098 | default n |
| 1099 | help | 1099 | help |
| @@ -1590,6 +1590,54 @@ config LATENCYTOP | |||
| 1590 | 1590 | ||
| 1591 | source kernel/trace/Kconfig | 1591 | source kernel/trace/Kconfig |
| 1592 | 1592 | ||
| 1593 | config PROVIDE_OHCI1394_DMA_INIT | ||
| 1594 | bool "Remote debugging over FireWire early on boot" | ||
| 1595 | depends on PCI && X86 | ||
| 1596 | help | ||
| 1597 | If you want to debug problems which hang or crash the kernel early | ||
| 1598 | on boot and the crashing machine has a FireWire port, you can use | ||
| 1599 | this feature to remotely access the memory of the crashed machine | ||
| 1600 | over FireWire. This employs remote DMA as part of the OHCI1394 | ||
| 1601 | specification which is now the standard for FireWire controllers. | ||
| 1602 | |||
| 1603 | With remote DMA, you can monitor the printk buffer remotely using | ||
| 1604 | firescope and access all memory below 4GB using fireproxy from gdb. | ||
| 1605 | Even controlling a kernel debugger is possible using remote DMA. | ||
| 1606 | |||
| 1607 | Usage: | ||
| 1608 | |||
| 1609 | If ohci1394_dma=early is used as boot parameter, it will initialize | ||
| 1610 | all OHCI1394 controllers which are found in the PCI config space. | ||
| 1611 | |||
| 1612 | As all changes to the FireWire bus such as enabling and disabling | ||
| 1613 | devices cause a bus reset and thereby disable remote DMA for all | ||
| 1614 | devices, be sure to have the cable plugged and FireWire enabled on | ||
| 1615 | the debugging host before booting the debug target for debugging. | ||
| 1616 | |||
| 1617 | This code (~1k) is freed after boot. By then, the firewire stack | ||
| 1618 | in charge of the OHCI-1394 controllers should be used instead. | ||
| 1619 | |||
| 1620 | See Documentation/debugging-via-ohci1394.txt for more information. | ||
| 1621 | |||
| 1622 | config DMA_API_DEBUG | ||
| 1623 | bool "Enable debugging of DMA-API usage" | ||
| 1624 | depends on HAVE_DMA_API_DEBUG | ||
| 1625 | help | ||
| 1626 | Enable this option to debug the use of the DMA API by device drivers. | ||
| 1627 | With this option you will be able to detect common bugs in device | ||
| 1628 | drivers like double-freeing of DMA mappings or freeing mappings that | ||
| 1629 | were never allocated. | ||
| 1630 | |||
| 1631 | This also attempts to catch cases where a page owned by DMA is | ||
| 1632 | accessed by the cpu in a way that could cause data corruption. For | ||
| 1633 | example, this enables cow_user_page() to check that the source page is | ||
| 1634 | not undergoing DMA. | ||
| 1635 | |||
| 1636 | This option causes a performance degradation. Use only if you want to | ||
| 1637 | debug device drivers and dma interactions. | ||
| 1638 | |||
| 1639 | If unsure, say N. | ||
| 1640 | |||
| 1593 | menu "Runtime Testing" | 1641 | menu "Runtime Testing" |
| 1594 | 1642 | ||
| 1595 | config LKDTM | 1643 | config LKDTM |
| @@ -1749,56 +1797,6 @@ config TEST_PARMAN | |||
| 1749 | 1797 | ||
| 1750 | If unsure, say N. | 1798 | If unsure, say N. |
| 1751 | 1799 | ||
| 1752 | endmenu # runtime tests | ||
| 1753 | |||
| 1754 | config PROVIDE_OHCI1394_DMA_INIT | ||
| 1755 | bool "Remote debugging over FireWire early on boot" | ||
| 1756 | depends on PCI && X86 | ||
| 1757 | help | ||
| 1758 | If you want to debug problems which hang or crash the kernel early | ||
| 1759 | on boot and the crashing machine has a FireWire port, you can use | ||
| 1760 | this feature to remotely access the memory of the crashed machine | ||
| 1761 | over FireWire. This employs remote DMA as part of the OHCI1394 | ||
| 1762 | specification which is now the standard for FireWire controllers. | ||
| 1763 | |||
| 1764 | With remote DMA, you can monitor the printk buffer remotely using | ||
| 1765 | firescope and access all memory below 4GB using fireproxy from gdb. | ||
| 1766 | Even controlling a kernel debugger is possible using remote DMA. | ||
| 1767 | |||
| 1768 | Usage: | ||
| 1769 | |||
| 1770 | If ohci1394_dma=early is used as boot parameter, it will initialize | ||
| 1771 | all OHCI1394 controllers which are found in the PCI config space. | ||
| 1772 | |||
| 1773 | As all changes to the FireWire bus such as enabling and disabling | ||
| 1774 | devices cause a bus reset and thereby disable remote DMA for all | ||
| 1775 | devices, be sure to have the cable plugged and FireWire enabled on | ||
| 1776 | the debugging host before booting the debug target for debugging. | ||
| 1777 | |||
| 1778 | This code (~1k) is freed after boot. By then, the firewire stack | ||
| 1779 | in charge of the OHCI-1394 controllers should be used instead. | ||
| 1780 | |||
| 1781 | See Documentation/debugging-via-ohci1394.txt for more information. | ||
| 1782 | |||
| 1783 | config DMA_API_DEBUG | ||
| 1784 | bool "Enable debugging of DMA-API usage" | ||
| 1785 | depends on HAVE_DMA_API_DEBUG | ||
| 1786 | help | ||
| 1787 | Enable this option to debug the use of the DMA API by device drivers. | ||
| 1788 | With this option you will be able to detect common bugs in device | ||
| 1789 | drivers like double-freeing of DMA mappings or freeing mappings that | ||
| 1790 | were never allocated. | ||
| 1791 | |||
| 1792 | This also attempts to catch cases where a page owned by DMA is | ||
| 1793 | accessed by the cpu in a way that could cause data corruption. For | ||
| 1794 | example, this enables cow_user_page() to check that the source page is | ||
| 1795 | not undergoing DMA. | ||
| 1796 | |||
| 1797 | This option causes a performance degradation. Use only if you want to | ||
| 1798 | debug device drivers and dma interactions. | ||
| 1799 | |||
| 1800 | If unsure, say N. | ||
| 1801 | |||
| 1802 | config TEST_LKM | 1800 | config TEST_LKM |
| 1803 | tristate "Test module loading with 'hello world' module" | 1801 | tristate "Test module loading with 'hello world' module" |
| 1804 | default n | 1802 | default n |
| @@ -1873,18 +1871,6 @@ config TEST_UDELAY | |||
| 1873 | 1871 | ||
| 1874 | If unsure, say N. | 1872 | If unsure, say N. |
| 1875 | 1873 | ||
| 1876 | config MEMTEST | ||
| 1877 | bool "Memtest" | ||
| 1878 | depends on HAVE_MEMBLOCK | ||
| 1879 | ---help--- | ||
| 1880 | This option adds a kernel parameter 'memtest', which allows memtest | ||
| 1881 | to be set. | ||
| 1882 | memtest=0, mean disabled; -- default | ||
| 1883 | memtest=1, mean do 1 test pattern; | ||
| 1884 | ... | ||
| 1885 | memtest=17, mean do 17 test patterns. | ||
| 1886 | If you are unsure how to answer this question, answer N. | ||
| 1887 | |||
| 1888 | config TEST_STATIC_KEYS | 1874 | config TEST_STATIC_KEYS |
| 1889 | tristate "Test static keys" | 1875 | tristate "Test static keys" |
| 1890 | default n | 1876 | default n |
| @@ -1894,16 +1880,6 @@ config TEST_STATIC_KEYS | |||
| 1894 | 1880 | ||
| 1895 | If unsure, say N. | 1881 | If unsure, say N. |
| 1896 | 1882 | ||
| 1897 | config BUG_ON_DATA_CORRUPTION | ||
| 1898 | bool "Trigger a BUG when data corruption is detected" | ||
| 1899 | select DEBUG_LIST | ||
| 1900 | help | ||
| 1901 | Select this option if the kernel should BUG when it encounters | ||
| 1902 | data corruption in kernel memory structures when they get checked | ||
| 1903 | for validity. | ||
| 1904 | |||
| 1905 | If unsure, say N. | ||
| 1906 | |||
| 1907 | config TEST_KMOD | 1883 | config TEST_KMOD |
| 1908 | tristate "kmod stress tester" | 1884 | tristate "kmod stress tester" |
| 1909 | default n | 1885 | default n |
| @@ -1941,6 +1917,29 @@ config TEST_DEBUG_VIRTUAL | |||
| 1941 | 1917 | ||
| 1942 | If unsure, say N. | 1918 | If unsure, say N. |
| 1943 | 1919 | ||
| 1920 | endmenu # runtime tests | ||
| 1921 | |||
| 1922 | config MEMTEST | ||
| 1923 | bool "Memtest" | ||
| 1924 | depends on HAVE_MEMBLOCK | ||
| 1925 | ---help--- | ||
| 1926 | This option adds a kernel parameter 'memtest', which allows memtest | ||
| 1927 | to be set. | ||
| 1928 | memtest=0, mean disabled; -- default | ||
| 1929 | memtest=1, mean do 1 test pattern; | ||
| 1930 | ... | ||
| 1931 | memtest=17, mean do 17 test patterns. | ||
| 1932 | If you are unsure how to answer this question, answer N. | ||
| 1933 | |||
| 1934 | config BUG_ON_DATA_CORRUPTION | ||
| 1935 | bool "Trigger a BUG when data corruption is detected" | ||
| 1936 | select DEBUG_LIST | ||
| 1937 | help | ||
| 1938 | Select this option if the kernel should BUG when it encounters | ||
| 1939 | data corruption in kernel memory structures when they get checked | ||
| 1940 | for validity. | ||
| 1941 | |||
| 1942 | If unsure, say N. | ||
| 1944 | 1943 | ||
| 1945 | source "samples/Kconfig" | 1944 | source "samples/Kconfig" |
| 1946 | 1945 | ||
| @@ -146,8 +146,8 @@ EXPORT_SYMBOL(idr_get_next_ext); | |||
| 146 | * idr_alloc() and idr_remove() (as long as the ID being removed is not | 146 | * idr_alloc() and idr_remove() (as long as the ID being removed is not |
| 147 | * the one being replaced!). | 147 | * the one being replaced!). |
| 148 | * | 148 | * |
| 149 | * Returns: 0 on success. %-ENOENT indicates that @id was not found. | 149 | * Returns: the old value on success. %-ENOENT indicates that @id was not |
| 150 | * %-EINVAL indicates that @id or @ptr were not valid. | 150 | * found. %-EINVAL indicates that @id or @ptr were not valid. |
| 151 | */ | 151 | */ |
| 152 | void *idr_replace(struct idr *idr, void *ptr, int id) | 152 | void *idr_replace(struct idr *idr, void *ptr, int id) |
| 153 | { | 153 | { |
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index cd0b5c964bd0..2b827b8a1d8c 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c | |||
| @@ -2031,11 +2031,13 @@ void locking_selftest(void) | |||
| 2031 | print_testname("mixed read-lock/lock-write ABBA"); | 2031 | print_testname("mixed read-lock/lock-write ABBA"); |
| 2032 | pr_cont(" |"); | 2032 | pr_cont(" |"); |
| 2033 | dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK); | 2033 | dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK); |
| 2034 | #ifdef CONFIG_PROVE_LOCKING | ||
| 2034 | /* | 2035 | /* |
| 2035 | * Lockdep does indeed fail here, but there's nothing we can do about | 2036 | * Lockdep does indeed fail here, but there's nothing we can do about |
| 2036 | * that now. Don't kill lockdep for it. | 2037 | * that now. Don't kill lockdep for it. |
| 2037 | */ | 2038 | */ |
| 2038 | unexpected_testcase_failures--; | 2039 | unexpected_testcase_failures--; |
| 2040 | #endif | ||
| 2039 | 2041 | ||
| 2040 | pr_cont(" |"); | 2042 | pr_cont(" |"); |
| 2041 | dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM); | 2043 | dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM); |
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index bd3574312b82..141734d255e4 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c | |||
| @@ -85,8 +85,8 @@ static FORCE_INLINE int LZ4_decompress_generic( | |||
| 85 | const BYTE * const lowLimit = lowPrefix - dictSize; | 85 | const BYTE * const lowLimit = lowPrefix - dictSize; |
| 86 | 86 | ||
| 87 | const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; | 87 | const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize; |
| 88 | const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; | 88 | static const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; |
| 89 | const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 }; | 89 | static const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 }; |
| 90 | 90 | ||
| 91 | const int safeDecode = (endOnInput == endOnInputSize); | 91 | const int safeDecode = (endOnInput == endOnInputSize); |
| 92 | const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); | 92 | const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB))); |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c index 08f8043cac61..d01f47135239 100644 --- a/lib/ratelimit.c +++ b/lib/ratelimit.c | |||
| @@ -48,7 +48,9 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func) | |||
| 48 | if (time_is_before_jiffies(rs->begin + rs->interval)) { | 48 | if (time_is_before_jiffies(rs->begin + rs->interval)) { |
| 49 | if (rs->missed) { | 49 | if (rs->missed) { |
| 50 | if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { | 50 | if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) { |
| 51 | pr_warn("%s: %d callbacks suppressed\n", func, rs->missed); | 51 | printk_deferred(KERN_WARNING |
| 52 | "%s: %d callbacks suppressed\n", | ||
| 53 | func, rs->missed); | ||
| 52 | rs->missed = 0; | 54 | rs->missed = 0; |
| 53 | } | 55 | } |
| 54 | } | 56 | } |
| @@ -460,7 +460,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, | |||
| 460 | 460 | ||
| 461 | trace_cma_alloc(pfn, page, count, align); | 461 | trace_cma_alloc(pfn, page, count, align); |
| 462 | 462 | ||
| 463 | if (ret) { | 463 | if (ret && !(gfp_mask & __GFP_NOWARN)) { |
| 464 | pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", | 464 | pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", |
| 465 | __func__, count, ret); | 465 | __func__, count, ret); |
| 466 | cma_debug_show_areas(cma); | 466 | cma_debug_show_areas(cma); |
diff --git a/mm/compaction.c b/mm/compaction.c index fb548e4c7bd4..03d31a875341 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
| @@ -1999,17 +1999,14 @@ void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx) | |||
| 1999 | if (pgdat->kcompactd_max_order < order) | 1999 | if (pgdat->kcompactd_max_order < order) |
| 2000 | pgdat->kcompactd_max_order = order; | 2000 | pgdat->kcompactd_max_order = order; |
| 2001 | 2001 | ||
| 2002 | /* | ||
| 2003 | * Pairs with implicit barrier in wait_event_freezable() | ||
| 2004 | * such that wakeups are not missed in the lockless | ||
| 2005 | * waitqueue_active() call. | ||
| 2006 | */ | ||
| 2007 | smp_acquire__after_ctrl_dep(); | ||
| 2008 | |||
| 2009 | if (pgdat->kcompactd_classzone_idx > classzone_idx) | 2002 | if (pgdat->kcompactd_classzone_idx > classzone_idx) |
| 2010 | pgdat->kcompactd_classzone_idx = classzone_idx; | 2003 | pgdat->kcompactd_classzone_idx = classzone_idx; |
| 2011 | 2004 | ||
| 2012 | if (!waitqueue_active(&pgdat->kcompactd_wait)) | 2005 | /* |
| 2006 | * Pairs with implicit barrier in wait_event_freezable() | ||
| 2007 | * such that wakeups are not missed. | ||
| 2008 | */ | ||
| 2009 | if (!wq_has_sleeper(&pgdat->kcompactd_wait)) | ||
| 2013 | return; | 2010 | return; |
| 2014 | 2011 | ||
| 2015 | if (!kcompactd_node_suitable(pgdat)) | 2012 | if (!kcompactd_node_suitable(pgdat)) |
diff --git a/mm/filemap.c b/mm/filemap.c index db250d0e0565..594d73fef8b4 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
| @@ -620,6 +620,14 @@ int file_check_and_advance_wb_err(struct file *file) | |||
| 620 | trace_file_check_and_advance_wb_err(file, old); | 620 | trace_file_check_and_advance_wb_err(file, old); |
| 621 | spin_unlock(&file->f_lock); | 621 | spin_unlock(&file->f_lock); |
| 622 | } | 622 | } |
| 623 | |||
| 624 | /* | ||
| 625 | * We're mostly using this function as a drop in replacement for | ||
| 626 | * filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect | ||
| 627 | * that the legacy code would have had on these flags. | ||
| 628 | */ | ||
| 629 | clear_bit(AS_EIO, &mapping->flags); | ||
| 630 | clear_bit(AS_ENOSPC, &mapping->flags); | ||
| 623 | return err; | 631 | return err; |
| 624 | } | 632 | } |
| 625 | EXPORT_SYMBOL(file_check_and_advance_wb_err); | 633 | EXPORT_SYMBOL(file_check_and_advance_wb_err); |
| @@ -1990,6 +1990,7 @@ static void stable_tree_append(struct rmap_item *rmap_item, | |||
| 1990 | */ | 1990 | */ |
| 1991 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | 1991 | static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) |
| 1992 | { | 1992 | { |
| 1993 | struct mm_struct *mm = rmap_item->mm; | ||
| 1993 | struct rmap_item *tree_rmap_item; | 1994 | struct rmap_item *tree_rmap_item; |
| 1994 | struct page *tree_page = NULL; | 1995 | struct page *tree_page = NULL; |
| 1995 | struct stable_node *stable_node; | 1996 | struct stable_node *stable_node; |
| @@ -2062,9 +2063,11 @@ static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) | |||
| 2062 | if (ksm_use_zero_pages && (checksum == zero_checksum)) { | 2063 | if (ksm_use_zero_pages && (checksum == zero_checksum)) { |
| 2063 | struct vm_area_struct *vma; | 2064 | struct vm_area_struct *vma; |
| 2064 | 2065 | ||
| 2065 | vma = find_mergeable_vma(rmap_item->mm, rmap_item->address); | 2066 | down_read(&mm->mmap_sem); |
| 2067 | vma = find_mergeable_vma(mm, rmap_item->address); | ||
| 2066 | err = try_to_merge_one_page(vma, page, | 2068 | err = try_to_merge_one_page(vma, page, |
| 2067 | ZERO_PAGE(rmap_item->address)); | 2069 | ZERO_PAGE(rmap_item->address)); |
| 2070 | up_read(&mm->mmap_sem); | ||
| 2068 | /* | 2071 | /* |
| 2069 | * In case of failure, the page was not really empty, so we | 2072 | * In case of failure, the page was not really empty, so we |
| 2070 | * need to continue. Otherwise we're done. | 2073 | * need to continue. Otherwise we're done. |
diff --git a/mm/list_lru.c b/mm/list_lru.c index 7a40fa2be858..f141f0c80ff3 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c | |||
| @@ -325,12 +325,12 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru) | |||
| 325 | { | 325 | { |
| 326 | int size = memcg_nr_cache_ids; | 326 | int size = memcg_nr_cache_ids; |
| 327 | 327 | ||
| 328 | nlru->memcg_lrus = kmalloc(size * sizeof(void *), GFP_KERNEL); | 328 | nlru->memcg_lrus = kvmalloc(size * sizeof(void *), GFP_KERNEL); |
| 329 | if (!nlru->memcg_lrus) | 329 | if (!nlru->memcg_lrus) |
| 330 | return -ENOMEM; | 330 | return -ENOMEM; |
| 331 | 331 | ||
| 332 | if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) { | 332 | if (__memcg_init_list_lru_node(nlru->memcg_lrus, 0, size)) { |
| 333 | kfree(nlru->memcg_lrus); | 333 | kvfree(nlru->memcg_lrus); |
| 334 | return -ENOMEM; | 334 | return -ENOMEM; |
| 335 | } | 335 | } |
| 336 | 336 | ||
| @@ -340,7 +340,7 @@ static int memcg_init_list_lru_node(struct list_lru_node *nlru) | |||
| 340 | static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) | 340 | static void memcg_destroy_list_lru_node(struct list_lru_node *nlru) |
| 341 | { | 341 | { |
| 342 | __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids); | 342 | __memcg_destroy_list_lru_node(nlru->memcg_lrus, 0, memcg_nr_cache_ids); |
| 343 | kfree(nlru->memcg_lrus); | 343 | kvfree(nlru->memcg_lrus); |
| 344 | } | 344 | } |
| 345 | 345 | ||
| 346 | static int memcg_update_list_lru_node(struct list_lru_node *nlru, | 346 | static int memcg_update_list_lru_node(struct list_lru_node *nlru, |
| @@ -351,12 +351,12 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru, | |||
| 351 | BUG_ON(old_size > new_size); | 351 | BUG_ON(old_size > new_size); |
| 352 | 352 | ||
| 353 | old = nlru->memcg_lrus; | 353 | old = nlru->memcg_lrus; |
| 354 | new = kmalloc(new_size * sizeof(void *), GFP_KERNEL); | 354 | new = kvmalloc(new_size * sizeof(void *), GFP_KERNEL); |
| 355 | if (!new) | 355 | if (!new) |
| 356 | return -ENOMEM; | 356 | return -ENOMEM; |
| 357 | 357 | ||
| 358 | if (__memcg_init_list_lru_node(new, old_size, new_size)) { | 358 | if (__memcg_init_list_lru_node(new, old_size, new_size)) { |
| 359 | kfree(new); | 359 | kvfree(new); |
| 360 | return -ENOMEM; | 360 | return -ENOMEM; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| @@ -373,7 +373,7 @@ static int memcg_update_list_lru_node(struct list_lru_node *nlru, | |||
| 373 | nlru->memcg_lrus = new; | 373 | nlru->memcg_lrus = new; |
| 374 | spin_unlock_irq(&nlru->lock); | 374 | spin_unlock_irq(&nlru->lock); |
| 375 | 375 | ||
| 376 | kfree(old); | 376 | kvfree(old); |
| 377 | return 0; | 377 | return 0; |
| 378 | } | 378 | } |
| 379 | 379 | ||
diff --git a/mm/madvise.c b/mm/madvise.c index 21261ff0466f..fd70d6aabc3e 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
| @@ -625,18 +625,26 @@ static int madvise_inject_error(int behavior, | |||
| 625 | { | 625 | { |
| 626 | struct page *page; | 626 | struct page *page; |
| 627 | struct zone *zone; | 627 | struct zone *zone; |
| 628 | unsigned int order; | ||
| 628 | 629 | ||
| 629 | if (!capable(CAP_SYS_ADMIN)) | 630 | if (!capable(CAP_SYS_ADMIN)) |
| 630 | return -EPERM; | 631 | return -EPERM; |
| 631 | 632 | ||
| 632 | for (; start < end; start += PAGE_SIZE << | 633 | |
| 633 | compound_order(compound_head(page))) { | 634 | for (; start < end; start += PAGE_SIZE << order) { |
| 634 | int ret; | 635 | int ret; |
| 635 | 636 | ||
| 636 | ret = get_user_pages_fast(start, 1, 0, &page); | 637 | ret = get_user_pages_fast(start, 1, 0, &page); |
| 637 | if (ret != 1) | 638 | if (ret != 1) |
| 638 | return ret; | 639 | return ret; |
| 639 | 640 | ||
| 641 | /* | ||
| 642 | * When soft offlining hugepages, after migrating the page | ||
| 643 | * we dissolve it, therefore in the second loop "page" will | ||
| 644 | * no longer be a compound page, and order will be 0. | ||
| 645 | */ | ||
| 646 | order = compound_order(compound_head(page)); | ||
| 647 | |||
| 640 | if (PageHWPoison(page)) { | 648 | if (PageHWPoison(page)) { |
| 641 | put_page(page); | 649 | put_page(page); |
| 642 | continue; | 650 | continue; |
| @@ -749,6 +757,9 @@ madvise_behavior_valid(int behavior) | |||
| 749 | * MADV_DONTFORK - omit this area from child's address space when forking: | 757 | * MADV_DONTFORK - omit this area from child's address space when forking: |
| 750 | * typically, to avoid COWing pages pinned by get_user_pages(). | 758 | * typically, to avoid COWing pages pinned by get_user_pages(). |
| 751 | * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. | 759 | * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. |
| 760 | * MADV_WIPEONFORK - present the child process with zero-filled memory in this | ||
| 761 | * range after a fork. | ||
| 762 | * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK | ||
| 752 | * MADV_HWPOISON - trigger memory error handler as if the given memory range | 763 | * MADV_HWPOISON - trigger memory error handler as if the given memory range |
| 753 | * were corrupted by unrecoverable hardware memory failure. | 764 | * were corrupted by unrecoverable hardware memory failure. |
| 754 | * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. | 765 | * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. |
| @@ -769,7 +780,9 @@ madvise_behavior_valid(int behavior) | |||
| 769 | * zero - success | 780 | * zero - success |
| 770 | * -EINVAL - start + len < 0, start is not page-aligned, | 781 | * -EINVAL - start + len < 0, start is not page-aligned, |
| 771 | * "behavior" is not a valid value, or application | 782 | * "behavior" is not a valid value, or application |
| 772 | * is attempting to release locked or shared pages. | 783 | * is attempting to release locked or shared pages, |
| 784 | * or the specified address range includes file, Huge TLB, | ||
| 785 | * MAP_SHARED or VMPFNMAP range. | ||
| 773 | * -ENOMEM - addresses in the specified range are not currently | 786 | * -ENOMEM - addresses in the specified range are not currently |
| 774 | * mapped, or are outside the AS of the process. | 787 | * mapped, or are outside the AS of the process. |
| 775 | * -EIO - an I/O error occurred while paging in data. | 788 | * -EIO - an I/O error occurred while paging in data. |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 15af3da5af02..d5f3a62887cf 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
| @@ -1777,6 +1777,10 @@ static void drain_local_stock(struct work_struct *dummy) | |||
| 1777 | struct memcg_stock_pcp *stock; | 1777 | struct memcg_stock_pcp *stock; |
| 1778 | unsigned long flags; | 1778 | unsigned long flags; |
| 1779 | 1779 | ||
| 1780 | /* | ||
| 1781 | * The only protection from memory hotplug vs. drain_stock races is | ||
| 1782 | * that we always operate on local CPU stock here with IRQ disabled | ||
| 1783 | */ | ||
| 1780 | local_irq_save(flags); | 1784 | local_irq_save(flags); |
| 1781 | 1785 | ||
| 1782 | stock = this_cpu_ptr(&memcg_stock); | 1786 | stock = this_cpu_ptr(&memcg_stock); |
| @@ -1821,27 +1825,33 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) | |||
| 1821 | /* If someone's already draining, avoid adding running more workers. */ | 1825 | /* If someone's already draining, avoid adding running more workers. */ |
| 1822 | if (!mutex_trylock(&percpu_charge_mutex)) | 1826 | if (!mutex_trylock(&percpu_charge_mutex)) |
| 1823 | return; | 1827 | return; |
| 1824 | /* Notify other cpus that system-wide "drain" is running */ | 1828 | /* |
| 1825 | get_online_cpus(); | 1829 | * Notify other cpus that system-wide "drain" is running |
| 1830 | * We do not care about races with the cpu hotplug because cpu down | ||
| 1831 | * as well as workers from this path always operate on the local | ||
| 1832 | * per-cpu data. CPU up doesn't touch memcg_stock at all. | ||
| 1833 | */ | ||
| 1826 | curcpu = get_cpu(); | 1834 | curcpu = get_cpu(); |
| 1827 | for_each_online_cpu(cpu) { | 1835 | for_each_online_cpu(cpu) { |
| 1828 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); | 1836 | struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); |
| 1829 | struct mem_cgroup *memcg; | 1837 | struct mem_cgroup *memcg; |
| 1830 | 1838 | ||
| 1831 | memcg = stock->cached; | 1839 | memcg = stock->cached; |
| 1832 | if (!memcg || !stock->nr_pages) | 1840 | if (!memcg || !stock->nr_pages || !css_tryget(&memcg->css)) |
| 1833 | continue; | 1841 | continue; |
| 1834 | if (!mem_cgroup_is_descendant(memcg, root_memcg)) | 1842 | if (!mem_cgroup_is_descendant(memcg, root_memcg)) { |
| 1843 | css_put(&memcg->css); | ||
| 1835 | continue; | 1844 | continue; |
| 1845 | } | ||
| 1836 | if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { | 1846 | if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { |
| 1837 | if (cpu == curcpu) | 1847 | if (cpu == curcpu) |
| 1838 | drain_local_stock(&stock->work); | 1848 | drain_local_stock(&stock->work); |
| 1839 | else | 1849 | else |
| 1840 | schedule_work_on(cpu, &stock->work); | 1850 | schedule_work_on(cpu, &stock->work); |
| 1841 | } | 1851 | } |
| 1852 | css_put(&memcg->css); | ||
| 1842 | } | 1853 | } |
| 1843 | put_cpu(); | 1854 | put_cpu(); |
| 1844 | put_online_cpus(); | ||
| 1845 | mutex_unlock(&percpu_charge_mutex); | 1855 | mutex_unlock(&percpu_charge_mutex); |
| 1846 | } | 1856 | } |
| 1847 | 1857 | ||
| @@ -5648,7 +5658,8 @@ static void uncharge_batch(const struct uncharge_gather *ug) | |||
| 5648 | static void uncharge_page(struct page *page, struct uncharge_gather *ug) | 5658 | static void uncharge_page(struct page *page, struct uncharge_gather *ug) |
| 5649 | { | 5659 | { |
| 5650 | VM_BUG_ON_PAGE(PageLRU(page), page); | 5660 | VM_BUG_ON_PAGE(PageLRU(page), page); |
| 5651 | VM_BUG_ON_PAGE(!PageHWPoison(page) && page_count(page), page); | 5661 | VM_BUG_ON_PAGE(page_count(page) && !is_zone_device_page(page) && |
| 5662 | !PageHWPoison(page) , page); | ||
| 5652 | 5663 | ||
| 5653 | if (!page->mem_cgroup) | 5664 | if (!page->mem_cgroup) |
| 5654 | return; | 5665 | return; |
diff --git a/mm/memory.c b/mm/memory.c index ec4e15494901..a728bed16c20 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -845,7 +845,7 @@ struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr, | |||
| 845 | * vm_normal_page() so that we do not have to special case all | 845 | * vm_normal_page() so that we do not have to special case all |
| 846 | * call site of vm_normal_page(). | 846 | * call site of vm_normal_page(). |
| 847 | */ | 847 | */ |
| 848 | if (likely(pfn < highest_memmap_pfn)) { | 848 | if (likely(pfn <= highest_memmap_pfn)) { |
| 849 | struct page *page = pfn_to_page(pfn); | 849 | struct page *page = pfn_to_page(pfn); |
| 850 | 850 | ||
| 851 | if (is_device_public_page(page)) { | 851 | if (is_device_public_page(page)) { |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index e882cb6da994..d4b5f29906b9 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -328,6 +328,7 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn, | |||
| 328 | if (err && (err != -EEXIST)) | 328 | if (err && (err != -EEXIST)) |
| 329 | break; | 329 | break; |
| 330 | err = 0; | 330 | err = 0; |
| 331 | cond_resched(); | ||
| 331 | } | 332 | } |
| 332 | vmemmap_populate_print_last(); | 333 | vmemmap_populate_print_last(); |
| 333 | out: | 334 | out: |
| @@ -337,7 +338,7 @@ EXPORT_SYMBOL_GPL(__add_pages); | |||
| 337 | 338 | ||
| 338 | #ifdef CONFIG_MEMORY_HOTREMOVE | 339 | #ifdef CONFIG_MEMORY_HOTREMOVE |
| 339 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ | 340 | /* find the smallest valid pfn in the range [start_pfn, end_pfn) */ |
| 340 | static int find_smallest_section_pfn(int nid, struct zone *zone, | 341 | static unsigned long find_smallest_section_pfn(int nid, struct zone *zone, |
| 341 | unsigned long start_pfn, | 342 | unsigned long start_pfn, |
| 342 | unsigned long end_pfn) | 343 | unsigned long end_pfn) |
| 343 | { | 344 | { |
| @@ -362,7 +363,7 @@ static int find_smallest_section_pfn(int nid, struct zone *zone, | |||
| 362 | } | 363 | } |
| 363 | 364 | ||
| 364 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ | 365 | /* find the biggest valid pfn in the range [start_pfn, end_pfn). */ |
| 365 | static int find_biggest_section_pfn(int nid, struct zone *zone, | 366 | static unsigned long find_biggest_section_pfn(int nid, struct zone *zone, |
| 366 | unsigned long start_pfn, | 367 | unsigned long start_pfn, |
| 367 | unsigned long end_pfn) | 368 | unsigned long end_pfn) |
| 368 | { | 369 | { |
| @@ -550,7 +551,7 @@ static int __remove_section(struct zone *zone, struct mem_section *ms, | |||
| 550 | return ret; | 551 | return ret; |
| 551 | 552 | ||
| 552 | scn_nr = __section_nr(ms); | 553 | scn_nr = __section_nr(ms); |
| 553 | start_pfn = section_nr_to_pfn(scn_nr); | 554 | start_pfn = section_nr_to_pfn((unsigned long)scn_nr); |
| 554 | __remove_zone(zone, start_pfn); | 555 | __remove_zone(zone, start_pfn); |
| 555 | 556 | ||
| 556 | sparse_remove_one_section(zone, ms, map_offset); | 557 | sparse_remove_one_section(zone, ms, map_offset); |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 006ba625c0b8..a2af6d58a68f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -1920,8 +1920,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, | |||
| 1920 | struct page *page; | 1920 | struct page *page; |
| 1921 | 1921 | ||
| 1922 | page = __alloc_pages(gfp, order, nid); | 1922 | page = __alloc_pages(gfp, order, nid); |
| 1923 | if (page && page_to_nid(page) == nid) | 1923 | if (page && page_to_nid(page) == nid) { |
| 1924 | inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); | 1924 | preempt_disable(); |
| 1925 | __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT); | ||
| 1926 | preempt_enable(); | ||
| 1927 | } | ||
| 1925 | return page; | 1928 | return page; |
| 1926 | } | 1929 | } |
| 1927 | 1930 | ||
diff --git a/mm/migrate.c b/mm/migrate.c index 6954c1435833..e00814ca390e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
| @@ -2146,8 +2146,9 @@ static int migrate_vma_collect_hole(unsigned long start, | |||
| 2146 | unsigned long addr; | 2146 | unsigned long addr; |
| 2147 | 2147 | ||
| 2148 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | 2148 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { |
| 2149 | migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE; | 2149 | migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; |
| 2150 | migrate->dst[migrate->npages] = 0; | 2150 | migrate->dst[migrate->npages] = 0; |
| 2151 | migrate->npages++; | ||
| 2151 | migrate->cpages++; | 2152 | migrate->cpages++; |
| 2152 | } | 2153 | } |
| 2153 | 2154 | ||
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 99736e026712..dee0f75c3013 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/ratelimit.h> | 40 | #include <linux/ratelimit.h> |
| 41 | #include <linux/kthread.h> | 41 | #include <linux/kthread.h> |
| 42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
| 43 | #include <linux/mmu_notifier.h> | ||
| 43 | 44 | ||
| 44 | #include <asm/tlb.h> | 45 | #include <asm/tlb.h> |
| 45 | #include "internal.h" | 46 | #include "internal.h" |
| @@ -495,6 +496,21 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) | |||
| 495 | } | 496 | } |
| 496 | 497 | ||
| 497 | /* | 498 | /* |
| 499 | * If the mm has notifiers then we would need to invalidate them around | ||
| 500 | * unmap_page_range and that is risky because notifiers can sleep and | ||
| 501 | * what they do is basically undeterministic. So let's have a short | ||
| 502 | * sleep to give the oom victim some more time. | ||
| 503 | * TODO: we really want to get rid of this ugly hack and make sure that | ||
| 504 | * notifiers cannot block for unbounded amount of time and add | ||
| 505 | * mmu_notifier_invalidate_range_{start,end} around unmap_page_range | ||
| 506 | */ | ||
| 507 | if (mm_has_notifiers(mm)) { | ||
| 508 | up_read(&mm->mmap_sem); | ||
| 509 | schedule_timeout_idle(HZ); | ||
| 510 | goto unlock_oom; | ||
| 511 | } | ||
| 512 | |||
| 513 | /* | ||
| 498 | * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't | 514 | * MMF_OOM_SKIP is set by exit_mmap when the OOM reaper can't |
| 499 | * work on the mm anymore. The check for MMF_OOM_SKIP must run | 515 | * work on the mm anymore. The check for MMF_OOM_SKIP must run |
| 500 | * under mmap_sem for reading because it serializes against the | 516 | * under mmap_sem for reading because it serializes against the |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c841af88836a..77e4d3c5c57b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -1190,7 +1190,7 @@ static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone, | |||
| 1190 | } | 1190 | } |
| 1191 | 1191 | ||
| 1192 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | 1192 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
| 1193 | static void init_reserved_page(unsigned long pfn) | 1193 | static void __meminit init_reserved_page(unsigned long pfn) |
| 1194 | { | 1194 | { |
| 1195 | pg_data_t *pgdat; | 1195 | pg_data_t *pgdat; |
| 1196 | int nid, zid; | 1196 | int nid, zid; |
| @@ -5367,6 +5367,7 @@ not_early: | |||
| 5367 | 5367 | ||
| 5368 | __init_single_page(page, pfn, zone, nid); | 5368 | __init_single_page(page, pfn, zone, nid); |
| 5369 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); | 5369 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
| 5370 | cond_resched(); | ||
| 5370 | } else { | 5371 | } else { |
| 5371 | __init_single_pfn(pfn, zone, nid); | 5372 | __init_single_pfn(pfn, zone, nid); |
| 5372 | } | 5373 | } |
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 6a03946469a9..53afbb919a1c 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c | |||
| @@ -6,17 +6,6 @@ | |||
| 6 | 6 | ||
| 7 | #include "internal.h" | 7 | #include "internal.h" |
| 8 | 8 | ||
| 9 | static inline bool check_pmd(struct page_vma_mapped_walk *pvmw) | ||
| 10 | { | ||
| 11 | pmd_t pmde; | ||
| 12 | /* | ||
| 13 | * Make sure we don't re-load pmd between present and !trans_huge check. | ||
| 14 | * We need a consistent view. | ||
| 15 | */ | ||
| 16 | pmde = READ_ONCE(*pvmw->pmd); | ||
| 17 | return pmd_present(pmde) && !pmd_trans_huge(pmde); | ||
| 18 | } | ||
| 19 | |||
| 20 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) | 9 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) |
| 21 | { | 10 | { |
| 22 | page_vma_mapped_walk_done(pvmw); | 11 | page_vma_mapped_walk_done(pvmw); |
| @@ -116,6 +105,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | |||
| 116 | pgd_t *pgd; | 105 | pgd_t *pgd; |
| 117 | p4d_t *p4d; | 106 | p4d_t *p4d; |
| 118 | pud_t *pud; | 107 | pud_t *pud; |
| 108 | pmd_t pmde; | ||
| 119 | 109 | ||
| 120 | /* The only possible pmd mapping has been handled on last iteration */ | 110 | /* The only possible pmd mapping has been handled on last iteration */ |
| 121 | if (pvmw->pmd && !pvmw->pte) | 111 | if (pvmw->pmd && !pvmw->pte) |
| @@ -148,7 +138,13 @@ restart: | |||
| 148 | if (!pud_present(*pud)) | 138 | if (!pud_present(*pud)) |
| 149 | return false; | 139 | return false; |
| 150 | pvmw->pmd = pmd_offset(pud, pvmw->address); | 140 | pvmw->pmd = pmd_offset(pud, pvmw->address); |
| 151 | if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) { | 141 | /* |
| 142 | * Make sure the pmd value isn't cached in a register by the | ||
| 143 | * compiler and used as a stale value after we've observed a | ||
| 144 | * subsequent update. | ||
| 145 | */ | ||
| 146 | pmde = READ_ONCE(*pvmw->pmd); | ||
| 147 | if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { | ||
| 152 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); | 148 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); |
| 153 | if (likely(pmd_trans_huge(*pvmw->pmd))) { | 149 | if (likely(pmd_trans_huge(*pvmw->pmd))) { |
| 154 | if (pvmw->flags & PVMW_MIGRATION) | 150 | if (pvmw->flags & PVMW_MIGRATION) |
| @@ -167,17 +163,15 @@ restart: | |||
| 167 | return not_found(pvmw); | 163 | return not_found(pvmw); |
| 168 | return true; | 164 | return true; |
| 169 | } | 165 | } |
| 170 | } else | 166 | } |
| 171 | WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); | ||
| 172 | return not_found(pvmw); | 167 | return not_found(pvmw); |
| 173 | } else { | 168 | } else { |
| 174 | /* THP pmd was split under us: handle on pte level */ | 169 | /* THP pmd was split under us: handle on pte level */ |
| 175 | spin_unlock(pvmw->ptl); | 170 | spin_unlock(pvmw->ptl); |
| 176 | pvmw->ptl = NULL; | 171 | pvmw->ptl = NULL; |
| 177 | } | 172 | } |
| 178 | } else { | 173 | } else if (!pmd_present(pmde)) { |
| 179 | if (!check_pmd(pvmw)) | 174 | return false; |
| 180 | return false; | ||
| 181 | } | 175 | } |
| 182 | if (!map_pte(pvmw)) | 176 | if (!map_pte(pvmw)) |
| 183 | goto next_pte; | 177 | goto next_pte; |
diff --git a/mm/rodata_test.c b/mm/rodata_test.c index 6bb4deb12e78..d908c8769b48 100644 --- a/mm/rodata_test.c +++ b/mm/rodata_test.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
| 15 | #include <asm/sections.h> | 15 | #include <asm/sections.h> |
| 16 | 16 | ||
| 17 | const int rodata_test_data = 0xC3; | 17 | static const int rodata_test_data = 0xC3; |
| 18 | 18 | ||
| 19 | void rodata_test(void) | 19 | void rodata_test(void) |
| 20 | { | 20 | { |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 904a83be82de..80164599ca5d 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
| @@ -165,9 +165,9 @@ static int init_memcg_params(struct kmem_cache *s, | |||
| 165 | if (!memcg_nr_cache_ids) | 165 | if (!memcg_nr_cache_ids) |
| 166 | return 0; | 166 | return 0; |
| 167 | 167 | ||
| 168 | arr = kzalloc(sizeof(struct memcg_cache_array) + | 168 | arr = kvzalloc(sizeof(struct memcg_cache_array) + |
| 169 | memcg_nr_cache_ids * sizeof(void *), | 169 | memcg_nr_cache_ids * sizeof(void *), |
| 170 | GFP_KERNEL); | 170 | GFP_KERNEL); |
| 171 | if (!arr) | 171 | if (!arr) |
| 172 | return -ENOMEM; | 172 | return -ENOMEM; |
| 173 | 173 | ||
| @@ -178,15 +178,23 @@ static int init_memcg_params(struct kmem_cache *s, | |||
| 178 | static void destroy_memcg_params(struct kmem_cache *s) | 178 | static void destroy_memcg_params(struct kmem_cache *s) |
| 179 | { | 179 | { |
| 180 | if (is_root_cache(s)) | 180 | if (is_root_cache(s)) |
| 181 | kfree(rcu_access_pointer(s->memcg_params.memcg_caches)); | 181 | kvfree(rcu_access_pointer(s->memcg_params.memcg_caches)); |
| 182 | } | ||
| 183 | |||
| 184 | static void free_memcg_params(struct rcu_head *rcu) | ||
| 185 | { | ||
| 186 | struct memcg_cache_array *old; | ||
| 187 | |||
| 188 | old = container_of(rcu, struct memcg_cache_array, rcu); | ||
| 189 | kvfree(old); | ||
| 182 | } | 190 | } |
| 183 | 191 | ||
| 184 | static int update_memcg_params(struct kmem_cache *s, int new_array_size) | 192 | static int update_memcg_params(struct kmem_cache *s, int new_array_size) |
| 185 | { | 193 | { |
| 186 | struct memcg_cache_array *old, *new; | 194 | struct memcg_cache_array *old, *new; |
| 187 | 195 | ||
| 188 | new = kzalloc(sizeof(struct memcg_cache_array) + | 196 | new = kvzalloc(sizeof(struct memcg_cache_array) + |
| 189 | new_array_size * sizeof(void *), GFP_KERNEL); | 197 | new_array_size * sizeof(void *), GFP_KERNEL); |
| 190 | if (!new) | 198 | if (!new) |
| 191 | return -ENOMEM; | 199 | return -ENOMEM; |
| 192 | 200 | ||
| @@ -198,7 +206,7 @@ static int update_memcg_params(struct kmem_cache *s, int new_array_size) | |||
| 198 | 206 | ||
| 199 | rcu_assign_pointer(s->memcg_params.memcg_caches, new); | 207 | rcu_assign_pointer(s->memcg_params.memcg_caches, new); |
| 200 | if (old) | 208 | if (old) |
| 201 | kfree_rcu(old, rcu); | 209 | call_rcu(&old->rcu, free_memcg_params); |
| 202 | return 0; | 210 | return 0; |
| 203 | } | 211 | } |
| 204 | 212 | ||
| @@ -575,7 +575,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, | |||
| 575 | void *arg) | 575 | void *arg) |
| 576 | { | 576 | { |
| 577 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && | 577 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && |
| 578 | !PageUnevictable(page)) { | 578 | !PageSwapCache(page) && !PageUnevictable(page)) { |
| 579 | bool active = PageActive(page); | 579 | bool active = PageActive(page); |
| 580 | 580 | ||
| 581 | del_page_from_lru_list(page, lruvec, | 581 | del_page_from_lru_list(page, lruvec, |
| @@ -665,7 +665,7 @@ void deactivate_file_page(struct page *page) | |||
| 665 | void mark_page_lazyfree(struct page *page) | 665 | void mark_page_lazyfree(struct page *page) |
| 666 | { | 666 | { |
| 667 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && | 667 | if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && |
| 668 | !PageUnevictable(page)) { | 668 | !PageSwapCache(page) && !PageUnevictable(page)) { |
| 669 | struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); | 669 | struct pagevec *pvec = &get_cpu_var(lru_lazyfree_pvecs); |
| 670 | 670 | ||
| 671 | get_page(page); | 671 | get_page(page); |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 71ce2d1ccbf7..05b6803f0cce 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
| @@ -39,10 +39,6 @@ struct address_space *swapper_spaces[MAX_SWAPFILES]; | |||
| 39 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; | 39 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; |
| 40 | bool swap_vma_readahead = true; | 40 | bool swap_vma_readahead = true; |
| 41 | 41 | ||
| 42 | #define SWAP_RA_MAX_ORDER_DEFAULT 3 | ||
| 43 | |||
| 44 | static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT; | ||
| 45 | |||
| 46 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) | 42 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) |
| 47 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) | 43 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) |
| 48 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK | 44 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK |
| @@ -242,6 +238,17 @@ int add_to_swap(struct page *page) | |||
| 242 | * clear SWAP_HAS_CACHE flag. | 238 | * clear SWAP_HAS_CACHE flag. |
| 243 | */ | 239 | */ |
| 244 | goto fail; | 240 | goto fail; |
| 241 | /* | ||
| 242 | * Normally the page will be dirtied in unmap because its pte should be | ||
| 243 | * dirty. A special case is MADV_FREE page. The page'e pte could have | ||
| 244 | * dirty bit cleared but the page's SwapBacked bit is still set because | ||
| 245 | * clearing the dirty bit and SwapBacked bit has no lock protected. For | ||
| 246 | * such page, unmap will not set dirty bit for it, so page reclaim will | ||
| 247 | * not write the page out. This can cause data corruption when the page | ||
| 248 | * is swap in later. Always setting the dirty bit for the page solves | ||
| 249 | * the problem. | ||
| 250 | */ | ||
| 251 | set_page_dirty(page); | ||
| 245 | 252 | ||
| 246 | return 1; | 253 | return 1; |
| 247 | 254 | ||
| @@ -653,6 +660,13 @@ struct page *swap_readahead_detect(struct vm_fault *vmf, | |||
| 653 | pte_t *tpte; | 660 | pte_t *tpte; |
| 654 | #endif | 661 | #endif |
| 655 | 662 | ||
| 663 | max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), | ||
| 664 | SWAP_RA_ORDER_CEILING); | ||
| 665 | if (max_win == 1) { | ||
| 666 | swap_ra->win = 1; | ||
| 667 | return NULL; | ||
| 668 | } | ||
| 669 | |||
| 656 | faddr = vmf->address; | 670 | faddr = vmf->address; |
| 657 | entry = pte_to_swp_entry(vmf->orig_pte); | 671 | entry = pte_to_swp_entry(vmf->orig_pte); |
| 658 | if ((unlikely(non_swap_entry(entry)))) | 672 | if ((unlikely(non_swap_entry(entry)))) |
| @@ -661,12 +675,6 @@ struct page *swap_readahead_detect(struct vm_fault *vmf, | |||
| 661 | if (page) | 675 | if (page) |
| 662 | return page; | 676 | return page; |
| 663 | 677 | ||
| 664 | max_win = 1 << READ_ONCE(swap_ra_max_order); | ||
| 665 | if (max_win == 1) { | ||
| 666 | swap_ra->win = 1; | ||
| 667 | return NULL; | ||
| 668 | } | ||
| 669 | |||
| 670 | fpfn = PFN_DOWN(faddr); | 678 | fpfn = PFN_DOWN(faddr); |
| 671 | swap_ra_info = GET_SWAP_RA_VAL(vma); | 679 | swap_ra_info = GET_SWAP_RA_VAL(vma); |
| 672 | pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); | 680 | pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); |
| @@ -775,32 +783,8 @@ static struct kobj_attribute vma_ra_enabled_attr = | |||
| 775 | __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, | 783 | __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, |
| 776 | vma_ra_enabled_store); | 784 | vma_ra_enabled_store); |
| 777 | 785 | ||
| 778 | static ssize_t vma_ra_max_order_show(struct kobject *kobj, | ||
| 779 | struct kobj_attribute *attr, char *buf) | ||
| 780 | { | ||
| 781 | return sprintf(buf, "%d\n", swap_ra_max_order); | ||
| 782 | } | ||
| 783 | static ssize_t vma_ra_max_order_store(struct kobject *kobj, | ||
| 784 | struct kobj_attribute *attr, | ||
| 785 | const char *buf, size_t count) | ||
| 786 | { | ||
| 787 | int err, v; | ||
| 788 | |||
| 789 | err = kstrtoint(buf, 10, &v); | ||
| 790 | if (err || v > SWAP_RA_ORDER_CEILING || v <= 0) | ||
| 791 | return -EINVAL; | ||
| 792 | |||
| 793 | swap_ra_max_order = v; | ||
| 794 | |||
| 795 | return count; | ||
| 796 | } | ||
| 797 | static struct kobj_attribute vma_ra_max_order_attr = | ||
| 798 | __ATTR(vma_ra_max_order, 0644, vma_ra_max_order_show, | ||
| 799 | vma_ra_max_order_store); | ||
| 800 | |||
| 801 | static struct attribute *swap_attrs[] = { | 786 | static struct attribute *swap_attrs[] = { |
| 802 | &vma_ra_enabled_attr.attr, | 787 | &vma_ra_enabled_attr.attr, |
| 803 | &vma_ra_max_order_attr.attr, | ||
| 804 | NULL, | 788 | NULL, |
| 805 | }; | 789 | }; |
| 806 | 790 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 8a43db6284eb..673942094328 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -1695,11 +1695,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
| 1695 | for (i = 0; i < area->nr_pages; i++) { | 1695 | for (i = 0; i < area->nr_pages; i++) { |
| 1696 | struct page *page; | 1696 | struct page *page; |
| 1697 | 1697 | ||
| 1698 | if (fatal_signal_pending(current)) { | ||
| 1699 | area->nr_pages = i; | ||
| 1700 | goto fail_no_warn; | ||
| 1701 | } | ||
| 1702 | |||
| 1703 | if (node == NUMA_NO_NODE) | 1698 | if (node == NUMA_NO_NODE) |
| 1704 | page = alloc_page(alloc_mask|highmem_mask); | 1699 | page = alloc_page(alloc_mask|highmem_mask); |
| 1705 | else | 1700 | else |
| @@ -1723,7 +1718,6 @@ fail: | |||
| 1723 | warn_alloc(gfp_mask, NULL, | 1718 | warn_alloc(gfp_mask, NULL, |
| 1724 | "vmalloc: allocation failure, allocated %ld of %ld bytes", | 1719 | "vmalloc: allocation failure, allocated %ld of %ld bytes", |
| 1725 | (area->nr_pages*PAGE_SIZE), area->size); | 1720 | (area->nr_pages*PAGE_SIZE), area->size); |
| 1726 | fail_no_warn: | ||
| 1727 | vfree(area->addr); | 1721 | vfree(area->addr); |
| 1728 | return NULL; | 1722 | return NULL; |
| 1729 | } | 1723 | } |
diff --git a/mm/z3fold.c b/mm/z3fold.c index 486550df32be..b2ba2ba585f3 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
| @@ -250,6 +250,7 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) | |||
| 250 | 250 | ||
| 251 | WARN_ON(!list_empty(&zhdr->buddy)); | 251 | WARN_ON(!list_empty(&zhdr->buddy)); |
| 252 | set_bit(PAGE_STALE, &page->private); | 252 | set_bit(PAGE_STALE, &page->private); |
| 253 | clear_bit(NEEDS_COMPACTING, &page->private); | ||
| 253 | spin_lock(&pool->lock); | 254 | spin_lock(&pool->lock); |
| 254 | if (!list_empty(&page->lru)) | 255 | if (!list_empty(&page->lru)) |
| 255 | list_del(&page->lru); | 256 | list_del(&page->lru); |
| @@ -303,7 +304,6 @@ static void free_pages_work(struct work_struct *w) | |||
| 303 | list_del(&zhdr->buddy); | 304 | list_del(&zhdr->buddy); |
| 304 | if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) | 305 | if (WARN_ON(!test_bit(PAGE_STALE, &page->private))) |
| 305 | continue; | 306 | continue; |
| 306 | clear_bit(NEEDS_COMPACTING, &page->private); | ||
| 307 | spin_unlock(&pool->stale_lock); | 307 | spin_unlock(&pool->stale_lock); |
| 308 | cancel_work_sync(&zhdr->work); | 308 | cancel_work_sync(&zhdr->work); |
| 309 | free_z3fold_page(page); | 309 | free_z3fold_page(page); |
| @@ -624,10 +624,8 @@ lookup: | |||
| 624 | * stale pages list. cancel_work_sync() can sleep so we must make | 624 | * stale pages list. cancel_work_sync() can sleep so we must make |
| 625 | * sure it won't be called in case we're in atomic context. | 625 | * sure it won't be called in case we're in atomic context. |
| 626 | */ | 626 | */ |
| 627 | if (zhdr && (can_sleep || !work_pending(&zhdr->work) || | 627 | if (zhdr && (can_sleep || !work_pending(&zhdr->work))) { |
| 628 | !unlikely(work_busy(&zhdr->work)))) { | ||
| 629 | list_del(&zhdr->buddy); | 628 | list_del(&zhdr->buddy); |
| 630 | clear_bit(NEEDS_COMPACTING, &page->private); | ||
| 631 | spin_unlock(&pool->stale_lock); | 629 | spin_unlock(&pool->stale_lock); |
| 632 | if (can_sleep) | 630 | if (can_sleep) |
| 633 | cancel_work_sync(&zhdr->work); | 631 | cancel_work_sync(&zhdr->work); |
| @@ -875,16 +873,18 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) | |||
| 875 | goto next; | 873 | goto next; |
| 876 | } | 874 | } |
| 877 | next: | 875 | next: |
| 876 | spin_lock(&pool->lock); | ||
| 878 | if (test_bit(PAGE_HEADLESS, &page->private)) { | 877 | if (test_bit(PAGE_HEADLESS, &page->private)) { |
| 879 | if (ret == 0) { | 878 | if (ret == 0) { |
| 879 | spin_unlock(&pool->lock); | ||
| 880 | free_z3fold_page(page); | 880 | free_z3fold_page(page); |
| 881 | return 0; | 881 | return 0; |
| 882 | } | 882 | } |
| 883 | } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { | 883 | } else if (kref_put(&zhdr->refcount, release_z3fold_page)) { |
| 884 | atomic64_dec(&pool->pages_nr); | 884 | atomic64_dec(&pool->pages_nr); |
| 885 | spin_unlock(&pool->lock); | ||
| 885 | return 0; | 886 | return 0; |
| 886 | } | 887 | } |
| 887 | spin_lock(&pool->lock); | ||
| 888 | 888 | ||
| 889 | /* | 889 | /* |
| 890 | * Add to the beginning of LRU. | 890 | * Add to the beginning of LRU. |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index e2ed69850489..0bc31de9071a 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
| @@ -21,6 +21,12 @@ bool vlan_do_receive(struct sk_buff **skbp) | |||
| 21 | if (unlikely(!skb)) | 21 | if (unlikely(!skb)) |
| 22 | return false; | 22 | return false; |
| 23 | 23 | ||
| 24 | if (unlikely(!(vlan_dev->flags & IFF_UP))) { | ||
| 25 | kfree_skb(skb); | ||
| 26 | *skbp = NULL; | ||
| 27 | return false; | ||
| 28 | } | ||
| 29 | |||
| 24 | skb->dev = vlan_dev; | 30 | skb->dev = vlan_dev; |
| 25 | if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { | 31 | if (unlikely(skb->pkt_type == PACKET_OTHERHOST)) { |
| 26 | /* Our lower layer thinks this is not local, let's make sure. | 32 | /* Our lower layer thinks this is not local, let's make sure. |
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index 2585b100ebbb..276b60262981 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c | |||
| @@ -65,8 +65,8 @@ static int ebt_broute(struct sk_buff *skb) | |||
| 65 | 65 | ||
| 66 | static int __net_init broute_net_init(struct net *net) | 66 | static int __net_init broute_net_init(struct net *net) |
| 67 | { | 67 | { |
| 68 | net->xt.broute_table = ebt_register_table(net, &broute_table, NULL); | 68 | return ebt_register_table(net, &broute_table, NULL, |
| 69 | return PTR_ERR_OR_ZERO(net->xt.broute_table); | 69 | &net->xt.broute_table); |
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | static void __net_exit broute_net_exit(struct net *net) | 72 | static void __net_exit broute_net_exit(struct net *net) |
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index 45a00dbdbcad..c41da5fac84f 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c | |||
| @@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_filter[] = { | |||
| 93 | 93 | ||
| 94 | static int __net_init frame_filter_net_init(struct net *net) | 94 | static int __net_init frame_filter_net_init(struct net *net) |
| 95 | { | 95 | { |
| 96 | net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter); | 96 | return ebt_register_table(net, &frame_filter, ebt_ops_filter, |
| 97 | return PTR_ERR_OR_ZERO(net->xt.frame_filter); | 97 | &net->xt.frame_filter); |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | static void __net_exit frame_filter_net_exit(struct net *net) | 100 | static void __net_exit frame_filter_net_exit(struct net *net) |
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 57cd5bb154e7..08df7406ecb3 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c | |||
| @@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_nat[] = { | |||
| 93 | 93 | ||
| 94 | static int __net_init frame_nat_net_init(struct net *net) | 94 | static int __net_init frame_nat_net_init(struct net *net) |
| 95 | { | 95 | { |
| 96 | net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat); | 96 | return ebt_register_table(net, &frame_nat, ebt_ops_nat, |
| 97 | return PTR_ERR_OR_ZERO(net->xt.frame_nat); | 97 | &net->xt.frame_nat); |
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | static void __net_exit frame_nat_net_exit(struct net *net) | 100 | static void __net_exit frame_nat_net_exit(struct net *net) |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 83951f978445..3b3dcf719e07 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
| @@ -1169,9 +1169,8 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table) | |||
| 1169 | kfree(table); | 1169 | kfree(table); |
| 1170 | } | 1170 | } |
| 1171 | 1171 | ||
| 1172 | struct ebt_table * | 1172 | int ebt_register_table(struct net *net, const struct ebt_table *input_table, |
| 1173 | ebt_register_table(struct net *net, const struct ebt_table *input_table, | 1173 | const struct nf_hook_ops *ops, struct ebt_table **res) |
| 1174 | const struct nf_hook_ops *ops) | ||
| 1175 | { | 1174 | { |
| 1176 | struct ebt_table_info *newinfo; | 1175 | struct ebt_table_info *newinfo; |
| 1177 | struct ebt_table *t, *table; | 1176 | struct ebt_table *t, *table; |
| @@ -1183,7 +1182,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table, | |||
| 1183 | repl->entries == NULL || repl->entries_size == 0 || | 1182 | repl->entries == NULL || repl->entries_size == 0 || |
| 1184 | repl->counters != NULL || input_table->private != NULL) { | 1183 | repl->counters != NULL || input_table->private != NULL) { |
| 1185 | BUGPRINT("Bad table data for ebt_register_table!!!\n"); | 1184 | BUGPRINT("Bad table data for ebt_register_table!!!\n"); |
| 1186 | return ERR_PTR(-EINVAL); | 1185 | return -EINVAL; |
| 1187 | } | 1186 | } |
| 1188 | 1187 | ||
| 1189 | /* Don't add one table to multiple lists. */ | 1188 | /* Don't add one table to multiple lists. */ |
| @@ -1252,16 +1251,18 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table, | |||
| 1252 | list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]); | 1251 | list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]); |
| 1253 | mutex_unlock(&ebt_mutex); | 1252 | mutex_unlock(&ebt_mutex); |
| 1254 | 1253 | ||
| 1254 | WRITE_ONCE(*res, table); | ||
| 1255 | |||
| 1255 | if (!ops) | 1256 | if (!ops) |
| 1256 | return table; | 1257 | return 0; |
| 1257 | 1258 | ||
| 1258 | ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); | 1259 | ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); |
| 1259 | if (ret) { | 1260 | if (ret) { |
| 1260 | __ebt_unregister_table(net, table); | 1261 | __ebt_unregister_table(net, table); |
| 1261 | return ERR_PTR(ret); | 1262 | *res = NULL; |
| 1262 | } | 1263 | } |
| 1263 | 1264 | ||
| 1264 | return table; | 1265 | return ret; |
| 1265 | free_unlock: | 1266 | free_unlock: |
| 1266 | mutex_unlock(&ebt_mutex); | 1267 | mutex_unlock(&ebt_mutex); |
| 1267 | free_chainstack: | 1268 | free_chainstack: |
| @@ -1276,7 +1277,7 @@ free_newinfo: | |||
| 1276 | free_table: | 1277 | free_table: |
| 1277 | kfree(table); | 1278 | kfree(table); |
| 1278 | out: | 1279 | out: |
| 1279 | return ERR_PTR(ret); | 1280 | return ret; |
| 1280 | } | 1281 | } |
| 1281 | 1282 | ||
| 1282 | void ebt_unregister_table(struct net *net, struct ebt_table *table, | 1283 | void ebt_unregister_table(struct net *net, struct ebt_table *table, |
diff --git a/net/core/filter.c b/net/core/filter.c index 82edad58d066..74b8c91fb5f4 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
| @@ -989,10 +989,14 @@ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) | |||
| 989 | 989 | ||
| 990 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) | 990 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) |
| 991 | { | 991 | { |
| 992 | bool ret = __sk_filter_charge(sk, fp); | 992 | if (!refcount_inc_not_zero(&fp->refcnt)) |
| 993 | if (ret) | 993 | return false; |
| 994 | refcount_inc(&fp->refcnt); | 994 | |
| 995 | return ret; | 995 | if (!__sk_filter_charge(sk, fp)) { |
| 996 | sk_filter_release(fp); | ||
| 997 | return false; | ||
| 998 | } | ||
| 999 | return true; | ||
| 996 | } | 1000 | } |
| 997 | 1001 | ||
| 998 | static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) | 1002 | static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index a78fd61da0ec..d4bcdcc68e92 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
| @@ -3854,6 +3854,9 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, | |||
| 3854 | return -EMSGSIZE; | 3854 | return -EMSGSIZE; |
| 3855 | 3855 | ||
| 3856 | ifsm = nlmsg_data(nlh); | 3856 | ifsm = nlmsg_data(nlh); |
| 3857 | ifsm->family = PF_UNSPEC; | ||
| 3858 | ifsm->pad1 = 0; | ||
| 3859 | ifsm->pad2 = 0; | ||
| 3857 | ifsm->ifindex = dev->ifindex; | 3860 | ifsm->ifindex = dev->ifindex; |
| 3858 | ifsm->filter_mask = filter_mask; | 3861 | ifsm->filter_mask = filter_mask; |
| 3859 | 3862 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 9b7b6bbb2a23..23953b741a41 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -1654,6 +1654,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
| 1654 | 1654 | ||
| 1655 | sock_copy(newsk, sk); | 1655 | sock_copy(newsk, sk); |
| 1656 | 1656 | ||
| 1657 | newsk->sk_prot_creator = sk->sk_prot; | ||
| 1658 | |||
| 1657 | /* SANITY */ | 1659 | /* SANITY */ |
| 1658 | if (likely(newsk->sk_net_refcnt)) | 1660 | if (likely(newsk->sk_net_refcnt)) |
| 1659 | get_net(sock_net(newsk)); | 1661 | get_net(sock_net(newsk)); |
| @@ -1682,13 +1684,16 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
| 1682 | 1684 | ||
| 1683 | sock_reset_flag(newsk, SOCK_DONE); | 1685 | sock_reset_flag(newsk, SOCK_DONE); |
| 1684 | 1686 | ||
| 1685 | filter = rcu_dereference_protected(newsk->sk_filter, 1); | 1687 | rcu_read_lock(); |
| 1688 | filter = rcu_dereference(sk->sk_filter); | ||
| 1686 | if (filter != NULL) | 1689 | if (filter != NULL) |
| 1687 | /* though it's an empty new sock, the charging may fail | 1690 | /* though it's an empty new sock, the charging may fail |
| 1688 | * if sysctl_optmem_max was changed between creation of | 1691 | * if sysctl_optmem_max was changed between creation of |
| 1689 | * original socket and cloning | 1692 | * original socket and cloning |
| 1690 | */ | 1693 | */ |
| 1691 | is_charged = sk_filter_charge(newsk, filter); | 1694 | is_charged = sk_filter_charge(newsk, filter); |
| 1695 | RCU_INIT_POINTER(newsk->sk_filter, filter); | ||
| 1696 | rcu_read_unlock(); | ||
| 1692 | 1697 | ||
| 1693 | if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { | 1698 | if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { |
| 1694 | /* We need to make sure that we don't uncharge the new | 1699 | /* We need to make sure that we don't uncharge the new |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 2afa99506f8b..865e29e62bad 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
| @@ -1301,28 +1301,33 @@ int dsa_slave_create(struct dsa_port *port, const char *name) | |||
| 1301 | p->old_duplex = -1; | 1301 | p->old_duplex = -1; |
| 1302 | 1302 | ||
| 1303 | port->netdev = slave_dev; | 1303 | port->netdev = slave_dev; |
| 1304 | ret = register_netdev(slave_dev); | ||
| 1305 | if (ret) { | ||
| 1306 | netdev_err(master, "error %d registering interface %s\n", | ||
| 1307 | ret, slave_dev->name); | ||
| 1308 | port->netdev = NULL; | ||
| 1309 | free_percpu(p->stats64); | ||
| 1310 | free_netdev(slave_dev); | ||
| 1311 | return ret; | ||
| 1312 | } | ||
| 1313 | 1304 | ||
| 1314 | netif_carrier_off(slave_dev); | 1305 | netif_carrier_off(slave_dev); |
| 1315 | 1306 | ||
| 1316 | ret = dsa_slave_phy_setup(p, slave_dev); | 1307 | ret = dsa_slave_phy_setup(p, slave_dev); |
| 1317 | if (ret) { | 1308 | if (ret) { |
| 1318 | netdev_err(master, "error %d setting up slave phy\n", ret); | 1309 | netdev_err(master, "error %d setting up slave phy\n", ret); |
| 1319 | unregister_netdev(slave_dev); | 1310 | goto out_free; |
| 1320 | free_percpu(p->stats64); | 1311 | } |
| 1321 | free_netdev(slave_dev); | 1312 | |
| 1322 | return ret; | 1313 | ret = register_netdev(slave_dev); |
| 1314 | if (ret) { | ||
| 1315 | netdev_err(master, "error %d registering interface %s\n", | ||
| 1316 | ret, slave_dev->name); | ||
| 1317 | goto out_phy; | ||
| 1323 | } | 1318 | } |
| 1324 | 1319 | ||
| 1325 | return 0; | 1320 | return 0; |
| 1321 | |||
| 1322 | out_phy: | ||
| 1323 | phy_disconnect(p->phy); | ||
| 1324 | if (of_phy_is_fixed_link(p->dp->dn)) | ||
| 1325 | of_phy_deregister_fixed_link(p->dp->dn); | ||
| 1326 | out_free: | ||
| 1327 | free_percpu(p->stats64); | ||
| 1328 | free_netdev(slave_dev); | ||
| 1329 | port->netdev = NULL; | ||
| 1330 | return ret; | ||
| 1326 | } | 1331 | } |
| 1327 | 1332 | ||
| 1328 | void dsa_slave_destroy(struct net_device *slave_dev) | 1333 | void dsa_slave_destroy(struct net_device *slave_dev) |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 416bb304a281..1859c473b21a 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
| @@ -86,7 +86,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
| 86 | greh = (struct gre_base_hdr *)skb_transport_header(skb); | 86 | greh = (struct gre_base_hdr *)skb_transport_header(skb); |
| 87 | pcsum = (__sum16 *)(greh + 1); | 87 | pcsum = (__sum16 *)(greh + 1); |
| 88 | 88 | ||
| 89 | if (gso_partial) { | 89 | if (gso_partial && skb_is_gso(skb)) { |
| 90 | unsigned int partial_adj; | 90 | unsigned int partial_adj; |
| 91 | 91 | ||
| 92 | /* Adjust checksum to account for the fact that | 92 | /* Adjust checksum to account for the fact that |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index e7eb590c86ce..b20c8ac64081 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
| @@ -128,9 +128,9 @@ static struct inet_peer *lookup(const struct inetpeer_addr *daddr, | |||
| 128 | break; | 128 | break; |
| 129 | } | 129 | } |
| 130 | if (cmp == -1) | 130 | if (cmp == -1) |
| 131 | pp = &(*pp)->rb_left; | 131 | pp = &next->rb_left; |
| 132 | else | 132 | else |
| 133 | pp = &(*pp)->rb_right; | 133 | pp = &next->rb_right; |
| 134 | } | 134 | } |
| 135 | *parent_p = parent; | 135 | *parent_p = parent; |
| 136 | *pp_p = pp; | 136 | *pp_p = pp; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 0162fb955b33..467e44d7587d 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -259,7 +259,6 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 259 | struct ip_tunnel *tunnel; | 259 | struct ip_tunnel *tunnel; |
| 260 | struct erspanhdr *ershdr; | 260 | struct erspanhdr *ershdr; |
| 261 | const struct iphdr *iph; | 261 | const struct iphdr *iph; |
| 262 | __be32 session_id; | ||
| 263 | __be32 index; | 262 | __be32 index; |
| 264 | int len; | 263 | int len; |
| 265 | 264 | ||
| @@ -275,8 +274,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 275 | /* The original GRE header does not have key field, | 274 | /* The original GRE header does not have key field, |
| 276 | * Use ERSPAN 10-bit session ID as key. | 275 | * Use ERSPAN 10-bit session ID as key. |
| 277 | */ | 276 | */ |
| 278 | session_id = cpu_to_be32(ntohs(ershdr->session_id)); | 277 | tpi->key = cpu_to_be32(ntohs(ershdr->session_id) & ID_MASK); |
| 279 | tpi->key = session_id; | ||
| 280 | index = ershdr->md.index; | 278 | index = ershdr->md.index; |
| 281 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, | 279 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, |
| 282 | tpi->flags | TUNNEL_KEY, | 280 | tpi->flags | TUNNEL_KEY, |
| @@ -733,7 +731,7 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb, | |||
| 733 | if (skb_cow_head(skb, dev->needed_headroom)) | 731 | if (skb_cow_head(skb, dev->needed_headroom)) |
| 734 | goto free_skb; | 732 | goto free_skb; |
| 735 | 733 | ||
| 736 | if (skb->len > dev->mtu) { | 734 | if (skb->len - dev->hard_header_len > dev->mtu) { |
| 737 | pskb_trim(skb, dev->mtu); | 735 | pskb_trim(skb, dev->mtu); |
| 738 | truncate = true; | 736 | truncate = true; |
| 739 | } | 737 | } |
| @@ -1223,6 +1221,7 @@ static int gre_tap_init(struct net_device *dev) | |||
| 1223 | { | 1221 | { |
| 1224 | __gre_tunnel_init(dev); | 1222 | __gre_tunnel_init(dev); |
| 1225 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1223 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
| 1224 | netif_keep_dst(dev); | ||
| 1226 | 1225 | ||
| 1227 | return ip_tunnel_init(dev); | 1226 | return ip_tunnel_init(dev); |
| 1228 | } | 1227 | } |
| @@ -1246,13 +1245,16 @@ static int erspan_tunnel_init(struct net_device *dev) | |||
| 1246 | 1245 | ||
| 1247 | tunnel->tun_hlen = 8; | 1246 | tunnel->tun_hlen = 8; |
| 1248 | tunnel->parms.iph.protocol = IPPROTO_GRE; | 1247 | tunnel->parms.iph.protocol = IPPROTO_GRE; |
| 1249 | t_hlen = tunnel->hlen + sizeof(struct iphdr) + sizeof(struct erspanhdr); | 1248 | tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen + |
| 1249 | sizeof(struct erspanhdr); | ||
| 1250 | t_hlen = tunnel->hlen + sizeof(struct iphdr); | ||
| 1250 | 1251 | ||
| 1251 | dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; | 1252 | dev->needed_headroom = LL_MAX_HEADER + t_hlen + 4; |
| 1252 | dev->mtu = ETH_DATA_LEN - t_hlen - 4; | 1253 | dev->mtu = ETH_DATA_LEN - t_hlen - 4; |
| 1253 | dev->features |= GRE_FEATURES; | 1254 | dev->features |= GRE_FEATURES; |
| 1254 | dev->hw_features |= GRE_FEATURES; | 1255 | dev->hw_features |= GRE_FEATURES; |
| 1255 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1256 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
| 1257 | netif_keep_dst(dev); | ||
| 1256 | 1258 | ||
| 1257 | return ip_tunnel_init(dev); | 1259 | return ip_tunnel_init(dev); |
| 1258 | } | 1260 | } |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index fa2dc8f692c6..57fc13c6ab2b 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
| @@ -311,9 +311,10 @@ drop: | |||
| 311 | static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | 311 | static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
| 312 | { | 312 | { |
| 313 | const struct iphdr *iph = ip_hdr(skb); | 313 | const struct iphdr *iph = ip_hdr(skb); |
| 314 | struct rtable *rt; | 314 | int (*edemux)(struct sk_buff *skb); |
| 315 | struct net_device *dev = skb->dev; | 315 | struct net_device *dev = skb->dev; |
| 316 | void (*edemux)(struct sk_buff *skb); | 316 | struct rtable *rt; |
| 317 | int err; | ||
| 317 | 318 | ||
| 318 | /* if ingress device is enslaved to an L3 master device pass the | 319 | /* if ingress device is enslaved to an L3 master device pass the |
| 319 | * skb to its handler for processing | 320 | * skb to its handler for processing |
| @@ -331,7 +332,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
| 331 | 332 | ||
| 332 | ipprot = rcu_dereference(inet_protos[protocol]); | 333 | ipprot = rcu_dereference(inet_protos[protocol]); |
| 333 | if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { | 334 | if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { |
| 334 | edemux(skb); | 335 | err = edemux(skb); |
| 336 | if (unlikely(err)) | ||
| 337 | goto drop_error; | ||
| 335 | /* must reload iph, skb->head might have changed */ | 338 | /* must reload iph, skb->head might have changed */ |
| 336 | iph = ip_hdr(skb); | 339 | iph = ip_hdr(skb); |
| 337 | } | 340 | } |
| @@ -342,13 +345,10 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
| 342 | * how the packet travels inside Linux networking. | 345 | * how the packet travels inside Linux networking. |
| 343 | */ | 346 | */ |
| 344 | if (!skb_valid_dst(skb)) { | 347 | if (!skb_valid_dst(skb)) { |
| 345 | int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, | 348 | err = ip_route_input_noref(skb, iph->daddr, iph->saddr, |
| 346 | iph->tos, dev); | 349 | iph->tos, dev); |
| 347 | if (unlikely(err)) { | 350 | if (unlikely(err)) |
| 348 | if (err == -EXDEV) | 351 | goto drop_error; |
| 349 | __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER); | ||
| 350 | goto drop; | ||
| 351 | } | ||
| 352 | } | 352 | } |
| 353 | 353 | ||
| 354 | #ifdef CONFIG_IP_ROUTE_CLASSID | 354 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| @@ -399,6 +399,11 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) | |||
| 399 | drop: | 399 | drop: |
| 400 | kfree_skb(skb); | 400 | kfree_skb(skb); |
| 401 | return NET_RX_DROP; | 401 | return NET_RX_DROP; |
| 402 | |||
| 403 | drop_error: | ||
| 404 | if (err == -EXDEV) | ||
| 405 | __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER); | ||
| 406 | goto drop; | ||
| 402 | } | 407 | } |
| 403 | 408 | ||
| 404 | /* | 409 | /* |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index 5ed63d250950..89453cf62158 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
| @@ -168,6 +168,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 168 | struct ip_tunnel_parm *parms = &tunnel->parms; | 168 | struct ip_tunnel_parm *parms = &tunnel->parms; |
| 169 | struct dst_entry *dst = skb_dst(skb); | 169 | struct dst_entry *dst = skb_dst(skb); |
| 170 | struct net_device *tdev; /* Device to other host */ | 170 | struct net_device *tdev; /* Device to other host */ |
| 171 | int pkt_len = skb->len; | ||
| 171 | int err; | 172 | int err; |
| 172 | int mtu; | 173 | int mtu; |
| 173 | 174 | ||
| @@ -229,7 +230,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, | |||
| 229 | 230 | ||
| 230 | err = dst_output(tunnel->net, skb->sk, skb); | 231 | err = dst_output(tunnel->net, skb->sk, skb); |
| 231 | if (net_xmit_eval(err) == 0) | 232 | if (net_xmit_eval(err) == 0) |
| 232 | err = skb->len; | 233 | err = pkt_len; |
| 233 | iptunnel_xmit_stats(dev, err); | 234 | iptunnel_xmit_stats(dev, err); |
| 234 | return NETDEV_TX_OK; | 235 | return NETDEV_TX_OK; |
| 235 | 236 | ||
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 811689e523c3..f75fc6b53115 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c | |||
| @@ -330,7 +330,8 @@ static unsigned int ipv4_synproxy_hook(void *priv, | |||
| 330 | if (synproxy == NULL) | 330 | if (synproxy == NULL) |
| 331 | return NF_ACCEPT; | 331 | return NF_ACCEPT; |
| 332 | 332 | ||
| 333 | if (nf_is_loopback_packet(skb)) | 333 | if (nf_is_loopback_packet(skb) || |
| 334 | ip_hdr(skb)->protocol != IPPROTO_TCP) | ||
| 334 | return NF_ACCEPT; | 335 | return NF_ACCEPT; |
| 335 | 336 | ||
| 336 | thoff = ip_hdrlen(skb); | 337 | thoff = ip_hdrlen(skb); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 94d4cd2d5ea4..3d9f1c2f81c5 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
| @@ -1520,43 +1520,56 @@ struct rtable *rt_dst_alloc(struct net_device *dev, | |||
| 1520 | EXPORT_SYMBOL(rt_dst_alloc); | 1520 | EXPORT_SYMBOL(rt_dst_alloc); |
| 1521 | 1521 | ||
| 1522 | /* called in rcu_read_lock() section */ | 1522 | /* called in rcu_read_lock() section */ |
| 1523 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | 1523 | int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
| 1524 | u8 tos, struct net_device *dev, int our) | 1524 | u8 tos, struct net_device *dev, |
| 1525 | struct in_device *in_dev, u32 *itag) | ||
| 1525 | { | 1526 | { |
| 1526 | struct rtable *rth; | ||
| 1527 | struct in_device *in_dev = __in_dev_get_rcu(dev); | ||
| 1528 | unsigned int flags = RTCF_MULTICAST; | ||
| 1529 | u32 itag = 0; | ||
| 1530 | int err; | 1527 | int err; |
| 1531 | 1528 | ||
| 1532 | /* Primary sanity checks. */ | 1529 | /* Primary sanity checks. */ |
| 1533 | |||
| 1534 | if (!in_dev) | 1530 | if (!in_dev) |
| 1535 | return -EINVAL; | 1531 | return -EINVAL; |
| 1536 | 1532 | ||
| 1537 | if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || | 1533 | if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) || |
| 1538 | skb->protocol != htons(ETH_P_IP)) | 1534 | skb->protocol != htons(ETH_P_IP)) |
| 1539 | goto e_inval; | 1535 | return -EINVAL; |
| 1540 | 1536 | ||
| 1541 | if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) | 1537 | if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev)) |
| 1542 | goto e_inval; | 1538 | return -EINVAL; |
| 1543 | 1539 | ||
| 1544 | if (ipv4_is_zeronet(saddr)) { | 1540 | if (ipv4_is_zeronet(saddr)) { |
| 1545 | if (!ipv4_is_local_multicast(daddr)) | 1541 | if (!ipv4_is_local_multicast(daddr)) |
| 1546 | goto e_inval; | 1542 | return -EINVAL; |
| 1547 | } else { | 1543 | } else { |
| 1548 | err = fib_validate_source(skb, saddr, 0, tos, 0, dev, | 1544 | err = fib_validate_source(skb, saddr, 0, tos, 0, dev, |
| 1549 | in_dev, &itag); | 1545 | in_dev, itag); |
| 1550 | if (err < 0) | 1546 | if (err < 0) |
| 1551 | goto e_err; | 1547 | return err; |
| 1552 | } | 1548 | } |
| 1549 | return 0; | ||
| 1550 | } | ||
| 1551 | |||
| 1552 | /* called in rcu_read_lock() section */ | ||
| 1553 | static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | ||
| 1554 | u8 tos, struct net_device *dev, int our) | ||
| 1555 | { | ||
| 1556 | struct in_device *in_dev = __in_dev_get_rcu(dev); | ||
| 1557 | unsigned int flags = RTCF_MULTICAST; | ||
| 1558 | struct rtable *rth; | ||
| 1559 | u32 itag = 0; | ||
| 1560 | int err; | ||
| 1561 | |||
| 1562 | err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag); | ||
| 1563 | if (err) | ||
| 1564 | return err; | ||
| 1565 | |||
| 1553 | if (our) | 1566 | if (our) |
| 1554 | flags |= RTCF_LOCAL; | 1567 | flags |= RTCF_LOCAL; |
| 1555 | 1568 | ||
| 1556 | rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, | 1569 | rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST, |
| 1557 | IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false); | 1570 | IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false); |
| 1558 | if (!rth) | 1571 | if (!rth) |
| 1559 | goto e_nobufs; | 1572 | return -ENOBUFS; |
| 1560 | 1573 | ||
| 1561 | #ifdef CONFIG_IP_ROUTE_CLASSID | 1574 | #ifdef CONFIG_IP_ROUTE_CLASSID |
| 1562 | rth->dst.tclassid = itag; | 1575 | rth->dst.tclassid = itag; |
| @@ -1572,13 +1585,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
| 1572 | 1585 | ||
| 1573 | skb_dst_set(skb, &rth->dst); | 1586 | skb_dst_set(skb, &rth->dst); |
| 1574 | return 0; | 1587 | return 0; |
| 1575 | |||
| 1576 | e_nobufs: | ||
| 1577 | return -ENOBUFS; | ||
| 1578 | e_inval: | ||
| 1579 | return -EINVAL; | ||
| 1580 | e_err: | ||
| 1581 | return err; | ||
| 1582 | } | 1588 | } |
| 1583 | 1589 | ||
| 1584 | 1590 | ||
| @@ -2507,7 +2513,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
| 2507 | struct rtable *ort = (struct rtable *) dst_orig; | 2513 | struct rtable *ort = (struct rtable *) dst_orig; |
| 2508 | struct rtable *rt; | 2514 | struct rtable *rt; |
| 2509 | 2515 | ||
| 2510 | rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0); | 2516 | rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0); |
| 2511 | if (rt) { | 2517 | if (rt) { |
| 2512 | struct dst_entry *new = &rt->dst; | 2518 | struct dst_entry *new = &rt->dst; |
| 2513 | 2519 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d9416b5162bc..85164d4d3e53 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -1503,23 +1503,23 @@ csum_err: | |||
| 1503 | } | 1503 | } |
| 1504 | EXPORT_SYMBOL(tcp_v4_do_rcv); | 1504 | EXPORT_SYMBOL(tcp_v4_do_rcv); |
| 1505 | 1505 | ||
| 1506 | void tcp_v4_early_demux(struct sk_buff *skb) | 1506 | int tcp_v4_early_demux(struct sk_buff *skb) |
| 1507 | { | 1507 | { |
| 1508 | const struct iphdr *iph; | 1508 | const struct iphdr *iph; |
| 1509 | const struct tcphdr *th; | 1509 | const struct tcphdr *th; |
| 1510 | struct sock *sk; | 1510 | struct sock *sk; |
| 1511 | 1511 | ||
| 1512 | if (skb->pkt_type != PACKET_HOST) | 1512 | if (skb->pkt_type != PACKET_HOST) |
| 1513 | return; | 1513 | return 0; |
| 1514 | 1514 | ||
| 1515 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) | 1515 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) |
| 1516 | return; | 1516 | return 0; |
| 1517 | 1517 | ||
| 1518 | iph = ip_hdr(skb); | 1518 | iph = ip_hdr(skb); |
| 1519 | th = tcp_hdr(skb); | 1519 | th = tcp_hdr(skb); |
| 1520 | 1520 | ||
| 1521 | if (th->doff < sizeof(struct tcphdr) / 4) | 1521 | if (th->doff < sizeof(struct tcphdr) / 4) |
| 1522 | return; | 1522 | return 0; |
| 1523 | 1523 | ||
| 1524 | sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, | 1524 | sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, |
| 1525 | iph->saddr, th->source, | 1525 | iph->saddr, th->source, |
| @@ -1538,6 +1538,7 @@ void tcp_v4_early_demux(struct sk_buff *skb) | |||
| 1538 | skb_dst_set_noref(skb, dst); | 1538 | skb_dst_set_noref(skb, dst); |
| 1539 | } | 1539 | } |
| 1540 | } | 1540 | } |
| 1541 | return 0; | ||
| 1541 | } | 1542 | } |
| 1542 | 1543 | ||
| 1543 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) | 1544 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index ef29df8648e4..e45177ceb0ee 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -2221,9 +2221,10 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net, | |||
| 2221 | return NULL; | 2221 | return NULL; |
| 2222 | } | 2222 | } |
| 2223 | 2223 | ||
| 2224 | void udp_v4_early_demux(struct sk_buff *skb) | 2224 | int udp_v4_early_demux(struct sk_buff *skb) |
| 2225 | { | 2225 | { |
| 2226 | struct net *net = dev_net(skb->dev); | 2226 | struct net *net = dev_net(skb->dev); |
| 2227 | struct in_device *in_dev = NULL; | ||
| 2227 | const struct iphdr *iph; | 2228 | const struct iphdr *iph; |
| 2228 | const struct udphdr *uh; | 2229 | const struct udphdr *uh; |
| 2229 | struct sock *sk = NULL; | 2230 | struct sock *sk = NULL; |
| @@ -2234,25 +2235,21 @@ void udp_v4_early_demux(struct sk_buff *skb) | |||
| 2234 | 2235 | ||
| 2235 | /* validate the packet */ | 2236 | /* validate the packet */ |
| 2236 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) | 2237 | if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr))) |
| 2237 | return; | 2238 | return 0; |
| 2238 | 2239 | ||
| 2239 | iph = ip_hdr(skb); | 2240 | iph = ip_hdr(skb); |
| 2240 | uh = udp_hdr(skb); | 2241 | uh = udp_hdr(skb); |
| 2241 | 2242 | ||
| 2242 | if (skb->pkt_type == PACKET_BROADCAST || | 2243 | if (skb->pkt_type == PACKET_MULTICAST) { |
| 2243 | skb->pkt_type == PACKET_MULTICAST) { | 2244 | in_dev = __in_dev_get_rcu(skb->dev); |
| 2244 | struct in_device *in_dev = __in_dev_get_rcu(skb->dev); | ||
| 2245 | 2245 | ||
| 2246 | if (!in_dev) | 2246 | if (!in_dev) |
| 2247 | return; | 2247 | return 0; |
| 2248 | 2248 | ||
| 2249 | /* we are supposed to accept bcast packets */ | 2249 | ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, |
| 2250 | if (skb->pkt_type == PACKET_MULTICAST) { | 2250 | iph->protocol); |
| 2251 | ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, | 2251 | if (!ours) |
| 2252 | iph->protocol); | 2252 | return 0; |
| 2253 | if (!ours) | ||
| 2254 | return; | ||
| 2255 | } | ||
| 2256 | 2253 | ||
| 2257 | sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, | 2254 | sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, |
| 2258 | uh->source, iph->saddr, | 2255 | uh->source, iph->saddr, |
| @@ -2263,7 +2260,7 @@ void udp_v4_early_demux(struct sk_buff *skb) | |||
| 2263 | } | 2260 | } |
| 2264 | 2261 | ||
| 2265 | if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) | 2262 | if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt)) |
| 2266 | return; | 2263 | return 0; |
| 2267 | 2264 | ||
| 2268 | skb->sk = sk; | 2265 | skb->sk = sk; |
| 2269 | skb->destructor = sock_efree; | 2266 | skb->destructor = sock_efree; |
| @@ -2272,12 +2269,23 @@ void udp_v4_early_demux(struct sk_buff *skb) | |||
| 2272 | if (dst) | 2269 | if (dst) |
| 2273 | dst = dst_check(dst, 0); | 2270 | dst = dst_check(dst, 0); |
| 2274 | if (dst) { | 2271 | if (dst) { |
| 2272 | u32 itag = 0; | ||
| 2273 | |||
| 2275 | /* set noref for now. | 2274 | /* set noref for now. |
| 2276 | * any place which wants to hold dst has to call | 2275 | * any place which wants to hold dst has to call |
| 2277 | * dst_hold_safe() | 2276 | * dst_hold_safe() |
| 2278 | */ | 2277 | */ |
| 2279 | skb_dst_set_noref(skb, dst); | 2278 | skb_dst_set_noref(skb, dst); |
| 2279 | |||
| 2280 | /* for unconnected multicast sockets we need to validate | ||
| 2281 | * the source on each packet | ||
| 2282 | */ | ||
| 2283 | if (!inet_sk(sk)->inet_daddr && in_dev) | ||
| 2284 | return ip_mc_validate_source(skb, iph->daddr, | ||
| 2285 | iph->saddr, iph->tos, | ||
| 2286 | skb->dev, in_dev, &itag); | ||
| 2280 | } | 2287 | } |
| 2288 | return 0; | ||
| 2281 | } | 2289 | } |
| 2282 | 2290 | ||
| 2283 | int udp_rcv(struct sk_buff *skb) | 2291 | int udp_rcv(struct sk_buff *skb) |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 97658bfc1b58..e360d55be555 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
| @@ -120,7 +120,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, | |||
| 120 | * will be using a length value equal to only one MSS sized | 120 | * will be using a length value equal to only one MSS sized |
| 121 | * segment instead of the entire frame. | 121 | * segment instead of the entire frame. |
| 122 | */ | 122 | */ |
| 123 | if (gso_partial) { | 123 | if (gso_partial && skb_is_gso(skb)) { |
| 124 | uh->len = htons(skb_shinfo(skb)->gso_size + | 124 | uh->len = htons(skb_shinfo(skb)->gso_size + |
| 125 | SKB_GSO_CB(skb)->data_offset + | 125 | SKB_GSO_CB(skb)->data_offset + |
| 126 | skb->head - (unsigned char *)uh); | 126 | skb->head - (unsigned char *)uh); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 96861c702c06..4a96ebbf8eda 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -3820,8 +3820,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) | |||
| 3820 | goto out; | 3820 | goto out; |
| 3821 | 3821 | ||
| 3822 | if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || | 3822 | if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || |
| 3823 | dev_net(dev)->ipv6.devconf_all->accept_dad < 1 || | 3823 | (dev_net(dev)->ipv6.devconf_all->accept_dad < 1 && |
| 3824 | idev->cnf.accept_dad < 1 || | 3824 | idev->cnf.accept_dad < 1) || |
| 3825 | !(ifp->flags&IFA_F_TENTATIVE) || | 3825 | !(ifp->flags&IFA_F_TENTATIVE) || |
| 3826 | ifp->flags & IFA_F_NODAD) { | 3826 | ifp->flags & IFA_F_NODAD) { |
| 3827 | bump_id = ifp->flags & IFA_F_TENTATIVE; | 3827 | bump_id = ifp->flags & IFA_F_TENTATIVE; |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 20f66f4c9460..1602b491b281 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
| @@ -1311,6 +1311,7 @@ static void ip6gre_tap_setup(struct net_device *dev) | |||
| 1311 | dev->features |= NETIF_F_NETNS_LOCAL; | 1311 | dev->features |= NETIF_F_NETNS_LOCAL; |
| 1312 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; | 1312 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
| 1313 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; | 1313 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
| 1314 | netif_keep_dst(dev); | ||
| 1314 | } | 1315 | } |
| 1315 | 1316 | ||
| 1316 | static bool ip6gre_netlink_encap_parms(struct nlattr *data[], | 1317 | static bool ip6gre_netlink_encap_parms(struct nlattr *data[], |
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index cdb3728faca7..4a87f9428ca5 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
| @@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
| 105 | 105 | ||
| 106 | for (skb = segs; skb; skb = skb->next) { | 106 | for (skb = segs; skb; skb = skb->next) { |
| 107 | ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); | 107 | ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); |
| 108 | if (gso_partial) | 108 | if (gso_partial && skb_is_gso(skb)) |
| 109 | payload_len = skb_shinfo(skb)->gso_size + | 109 | payload_len = skb_shinfo(skb)->gso_size + |
| 110 | SKB_GSO_CB(skb)->data_offset + | 110 | SKB_GSO_CB(skb)->data_offset + |
| 111 | skb->head - (unsigned char *)(ipv6h + 1); | 111 | skb->head - (unsigned char *)(ipv6h + 1); |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index f2f21c24915f..a1c24443cd9e 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
| @@ -1043,6 +1043,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, | |||
| 1043 | struct dst_entry *dst = NULL, *ndst = NULL; | 1043 | struct dst_entry *dst = NULL, *ndst = NULL; |
| 1044 | struct net_device *tdev; | 1044 | struct net_device *tdev; |
| 1045 | int mtu; | 1045 | int mtu; |
| 1046 | unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0; | ||
| 1046 | unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; | 1047 | unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; |
| 1047 | unsigned int max_headroom = psh_hlen; | 1048 | unsigned int max_headroom = psh_hlen; |
| 1048 | bool use_cache = false; | 1049 | bool use_cache = false; |
| @@ -1124,7 +1125,7 @@ route_lookup: | |||
| 1124 | t->parms.name); | 1125 | t->parms.name); |
| 1125 | goto tx_err_dst_release; | 1126 | goto tx_err_dst_release; |
| 1126 | } | 1127 | } |
| 1127 | mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen; | 1128 | mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen; |
| 1128 | if (encap_limit >= 0) { | 1129 | if (encap_limit >= 0) { |
| 1129 | max_headroom += 8; | 1130 | max_headroom += 8; |
| 1130 | mtu -= 8; | 1131 | mtu -= 8; |
| @@ -1133,7 +1134,7 @@ route_lookup: | |||
| 1133 | mtu = IPV6_MIN_MTU; | 1134 | mtu = IPV6_MIN_MTU; |
| 1134 | if (skb_dst(skb) && !t->parms.collect_md) | 1135 | if (skb_dst(skb) && !t->parms.collect_md) |
| 1135 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); | 1136 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); |
| 1136 | if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) { | 1137 | if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { |
| 1137 | *pmtu = mtu; | 1138 | *pmtu = mtu; |
| 1138 | err = -EMSGSIZE; | 1139 | err = -EMSGSIZE; |
| 1139 | goto tx_err_dst_release; | 1140 | goto tx_err_dst_release; |
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 79444a4bfd6d..bcdc2d557de1 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c | |||
| @@ -445,6 +445,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
| 445 | struct dst_entry *dst = skb_dst(skb); | 445 | struct dst_entry *dst = skb_dst(skb); |
| 446 | struct net_device *tdev; | 446 | struct net_device *tdev; |
| 447 | struct xfrm_state *x; | 447 | struct xfrm_state *x; |
| 448 | int pkt_len = skb->len; | ||
| 448 | int err = -1; | 449 | int err = -1; |
| 449 | int mtu; | 450 | int mtu; |
| 450 | 451 | ||
| @@ -502,7 +503,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) | |||
| 502 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); | 503 | struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats); |
| 503 | 504 | ||
| 504 | u64_stats_update_begin(&tstats->syncp); | 505 | u64_stats_update_begin(&tstats->syncp); |
| 505 | tstats->tx_bytes += skb->len; | 506 | tstats->tx_bytes += pkt_len; |
| 506 | tstats->tx_packets++; | 507 | tstats->tx_packets++; |
| 507 | u64_stats_update_end(&tstats->syncp); | 508 | u64_stats_update_end(&tstats->syncp); |
| 508 | } else { | 509 | } else { |
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index a5cd43d75393..437af8c95277 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c | |||
| @@ -353,7 +353,7 @@ static unsigned int ipv6_synproxy_hook(void *priv, | |||
| 353 | nexthdr = ipv6_hdr(skb)->nexthdr; | 353 | nexthdr = ipv6_hdr(skb)->nexthdr; |
| 354 | thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, | 354 | thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, |
| 355 | &frag_off); | 355 | &frag_off); |
| 356 | if (thoff < 0) | 356 | if (thoff < 0 || nexthdr != IPPROTO_TCP) |
| 357 | return NF_ACCEPT; | 357 | return NF_ACCEPT; |
| 358 | 358 | ||
| 359 | th = skb_header_pointer(skb, thoff, sizeof(_th), &_th); | 359 | th = skb_header_pointer(skb, thoff, sizeof(_th), &_th); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 26cc9f483b6d..a96d5b385d8f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -1325,7 +1325,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori | |||
| 1325 | struct dst_entry *new = NULL; | 1325 | struct dst_entry *new = NULL; |
| 1326 | 1326 | ||
| 1327 | rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1, | 1327 | rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1, |
| 1328 | DST_OBSOLETE_NONE, 0); | 1328 | DST_OBSOLETE_DEAD, 0); |
| 1329 | if (rt) { | 1329 | if (rt) { |
| 1330 | rt6_info_init(rt); | 1330 | rt6_info_init(rt); |
| 1331 | 1331 | ||
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index ee485df73ccd..02d61101b108 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
| @@ -1314,6 +1314,9 @@ again: | |||
| 1314 | 1314 | ||
| 1315 | hlist_del_init(&session->hlist); | 1315 | hlist_del_init(&session->hlist); |
| 1316 | 1316 | ||
| 1317 | if (test_and_set_bit(0, &session->dead)) | ||
| 1318 | goto again; | ||
| 1319 | |||
| 1317 | if (session->ref != NULL) | 1320 | if (session->ref != NULL) |
| 1318 | (*session->ref)(session); | 1321 | (*session->ref)(session); |
| 1319 | 1322 | ||
| @@ -1685,14 +1688,12 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | |||
| 1685 | 1688 | ||
| 1686 | /* This function is used by the netlink TUNNEL_DELETE command. | 1689 | /* This function is used by the netlink TUNNEL_DELETE command. |
| 1687 | */ | 1690 | */ |
| 1688 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | 1691 | void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) |
| 1689 | { | 1692 | { |
| 1690 | l2tp_tunnel_inc_refcount(tunnel); | 1693 | if (!test_and_set_bit(0, &tunnel->dead)) { |
| 1691 | if (false == queue_work(l2tp_wq, &tunnel->del_work)) { | 1694 | l2tp_tunnel_inc_refcount(tunnel); |
| 1692 | l2tp_tunnel_dec_refcount(tunnel); | 1695 | queue_work(l2tp_wq, &tunnel->del_work); |
| 1693 | return 1; | ||
| 1694 | } | 1696 | } |
| 1695 | return 0; | ||
| 1696 | } | 1697 | } |
| 1697 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | 1698 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); |
| 1698 | 1699 | ||
| @@ -1750,6 +1751,9 @@ EXPORT_SYMBOL_GPL(__l2tp_session_unhash); | |||
| 1750 | */ | 1751 | */ |
| 1751 | int l2tp_session_delete(struct l2tp_session *session) | 1752 | int l2tp_session_delete(struct l2tp_session *session) |
| 1752 | { | 1753 | { |
| 1754 | if (test_and_set_bit(0, &session->dead)) | ||
| 1755 | return 0; | ||
| 1756 | |||
| 1753 | if (session->ref) | 1757 | if (session->ref) |
| 1754 | (*session->ref)(session); | 1758 | (*session->ref)(session); |
| 1755 | __l2tp_session_unhash(session); | 1759 | __l2tp_session_unhash(session); |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index a305e0c5925a..67c79d9b5c6c 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
| @@ -76,6 +76,7 @@ struct l2tp_session_cfg { | |||
| 76 | struct l2tp_session { | 76 | struct l2tp_session { |
| 77 | int magic; /* should be | 77 | int magic; /* should be |
| 78 | * L2TP_SESSION_MAGIC */ | 78 | * L2TP_SESSION_MAGIC */ |
| 79 | long dead; | ||
| 79 | 80 | ||
| 80 | struct l2tp_tunnel *tunnel; /* back pointer to tunnel | 81 | struct l2tp_tunnel *tunnel; /* back pointer to tunnel |
| 81 | * context */ | 82 | * context */ |
| @@ -160,6 +161,9 @@ struct l2tp_tunnel_cfg { | |||
| 160 | 161 | ||
| 161 | struct l2tp_tunnel { | 162 | struct l2tp_tunnel { |
| 162 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ | 163 | int magic; /* Should be L2TP_TUNNEL_MAGIC */ |
| 164 | |||
| 165 | unsigned long dead; | ||
| 166 | |||
| 163 | struct rcu_head rcu; | 167 | struct rcu_head rcu; |
| 164 | rwlock_t hlist_lock; /* protect session_hlist */ | 168 | rwlock_t hlist_lock; /* protect session_hlist */ |
| 165 | bool acpt_newsess; /* Indicates whether this | 169 | bool acpt_newsess; /* Indicates whether this |
| @@ -254,7 +258,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, | |||
| 254 | u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, | 258 | u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, |
| 255 | struct l2tp_tunnel **tunnelp); | 259 | struct l2tp_tunnel **tunnelp); |
| 256 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); | 260 | void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel); |
| 257 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); | 261 | void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel); |
| 258 | struct l2tp_session *l2tp_session_create(int priv_size, | 262 | struct l2tp_session *l2tp_session_create(int priv_size, |
| 259 | struct l2tp_tunnel *tunnel, | 263 | struct l2tp_tunnel *tunnel, |
| 260 | u32 session_id, u32 peer_session_id, | 264 | u32 session_id, u32 peer_session_id, |
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 87da9ef61860..014a7bc2a872 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c | |||
| @@ -44,7 +44,6 @@ struct l2tp_eth { | |||
| 44 | struct net_device *dev; | 44 | struct net_device *dev; |
| 45 | struct sock *tunnel_sock; | 45 | struct sock *tunnel_sock; |
| 46 | struct l2tp_session *session; | 46 | struct l2tp_session *session; |
| 47 | struct list_head list; | ||
| 48 | atomic_long_t tx_bytes; | 47 | atomic_long_t tx_bytes; |
| 49 | atomic_long_t tx_packets; | 48 | atomic_long_t tx_packets; |
| 50 | atomic_long_t tx_dropped; | 49 | atomic_long_t tx_dropped; |
| @@ -58,17 +57,6 @@ struct l2tp_eth_sess { | |||
| 58 | struct net_device *dev; | 57 | struct net_device *dev; |
| 59 | }; | 58 | }; |
| 60 | 59 | ||
| 61 | /* per-net private data for this module */ | ||
| 62 | static unsigned int l2tp_eth_net_id; | ||
| 63 | struct l2tp_eth_net { | ||
| 64 | struct list_head l2tp_eth_dev_list; | ||
| 65 | spinlock_t l2tp_eth_lock; | ||
| 66 | }; | ||
| 67 | |||
| 68 | static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net) | ||
| 69 | { | ||
| 70 | return net_generic(net, l2tp_eth_net_id); | ||
| 71 | } | ||
| 72 | 60 | ||
| 73 | static int l2tp_eth_dev_init(struct net_device *dev) | 61 | static int l2tp_eth_dev_init(struct net_device *dev) |
| 74 | { | 62 | { |
| @@ -84,12 +72,6 @@ static int l2tp_eth_dev_init(struct net_device *dev) | |||
| 84 | 72 | ||
| 85 | static void l2tp_eth_dev_uninit(struct net_device *dev) | 73 | static void l2tp_eth_dev_uninit(struct net_device *dev) |
| 86 | { | 74 | { |
| 87 | struct l2tp_eth *priv = netdev_priv(dev); | ||
| 88 | struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev)); | ||
| 89 | |||
| 90 | spin_lock(&pn->l2tp_eth_lock); | ||
| 91 | list_del_init(&priv->list); | ||
| 92 | spin_unlock(&pn->l2tp_eth_lock); | ||
| 93 | dev_put(dev); | 75 | dev_put(dev); |
| 94 | } | 76 | } |
| 95 | 77 | ||
| @@ -273,7 +255,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, | |||
| 273 | struct l2tp_eth *priv; | 255 | struct l2tp_eth *priv; |
| 274 | struct l2tp_eth_sess *spriv; | 256 | struct l2tp_eth_sess *spriv; |
| 275 | int rc; | 257 | int rc; |
| 276 | struct l2tp_eth_net *pn; | ||
| 277 | 258 | ||
| 278 | if (cfg->ifname) { | 259 | if (cfg->ifname) { |
| 279 | strlcpy(name, cfg->ifname, IFNAMSIZ); | 260 | strlcpy(name, cfg->ifname, IFNAMSIZ); |
| @@ -305,7 +286,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, | |||
| 305 | priv = netdev_priv(dev); | 286 | priv = netdev_priv(dev); |
| 306 | priv->dev = dev; | 287 | priv->dev = dev; |
| 307 | priv->session = session; | 288 | priv->session = session; |
| 308 | INIT_LIST_HEAD(&priv->list); | ||
| 309 | 289 | ||
| 310 | priv->tunnel_sock = tunnel->sock; | 290 | priv->tunnel_sock = tunnel->sock; |
| 311 | session->recv_skb = l2tp_eth_dev_recv; | 291 | session->recv_skb = l2tp_eth_dev_recv; |
| @@ -326,10 +306,6 @@ static int l2tp_eth_create(struct net *net, struct l2tp_tunnel *tunnel, | |||
| 326 | strlcpy(session->ifname, dev->name, IFNAMSIZ); | 306 | strlcpy(session->ifname, dev->name, IFNAMSIZ); |
| 327 | 307 | ||
| 328 | dev_hold(dev); | 308 | dev_hold(dev); |
| 329 | pn = l2tp_eth_pernet(dev_net(dev)); | ||
| 330 | spin_lock(&pn->l2tp_eth_lock); | ||
| 331 | list_add(&priv->list, &pn->l2tp_eth_dev_list); | ||
| 332 | spin_unlock(&pn->l2tp_eth_lock); | ||
| 333 | 309 | ||
| 334 | return 0; | 310 | return 0; |
| 335 | 311 | ||
| @@ -342,22 +318,6 @@ out: | |||
| 342 | return rc; | 318 | return rc; |
| 343 | } | 319 | } |
| 344 | 320 | ||
| 345 | static __net_init int l2tp_eth_init_net(struct net *net) | ||
| 346 | { | ||
| 347 | struct l2tp_eth_net *pn = net_generic(net, l2tp_eth_net_id); | ||
| 348 | |||
| 349 | INIT_LIST_HEAD(&pn->l2tp_eth_dev_list); | ||
| 350 | spin_lock_init(&pn->l2tp_eth_lock); | ||
| 351 | |||
| 352 | return 0; | ||
| 353 | } | ||
| 354 | |||
| 355 | static struct pernet_operations l2tp_eth_net_ops = { | ||
| 356 | .init = l2tp_eth_init_net, | ||
| 357 | .id = &l2tp_eth_net_id, | ||
| 358 | .size = sizeof(struct l2tp_eth_net), | ||
| 359 | }; | ||
| 360 | |||
| 361 | 321 | ||
| 362 | static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { | 322 | static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = { |
| 363 | .session_create = l2tp_eth_create, | 323 | .session_create = l2tp_eth_create, |
| @@ -371,25 +331,18 @@ static int __init l2tp_eth_init(void) | |||
| 371 | 331 | ||
| 372 | err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); | 332 | err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); |
| 373 | if (err) | 333 | if (err) |
| 374 | goto out; | 334 | goto err; |
| 375 | |||
| 376 | err = register_pernet_device(&l2tp_eth_net_ops); | ||
| 377 | if (err) | ||
| 378 | goto out_unreg; | ||
| 379 | 335 | ||
| 380 | pr_info("L2TP ethernet pseudowire support (L2TPv3)\n"); | 336 | pr_info("L2TP ethernet pseudowire support (L2TPv3)\n"); |
| 381 | 337 | ||
| 382 | return 0; | 338 | return 0; |
| 383 | 339 | ||
| 384 | out_unreg: | 340 | err: |
| 385 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); | ||
| 386 | out: | ||
| 387 | return err; | 341 | return err; |
| 388 | } | 342 | } |
| 389 | 343 | ||
| 390 | static void __exit l2tp_eth_exit(void) | 344 | static void __exit l2tp_eth_exit(void) |
| 391 | { | 345 | { |
| 392 | unregister_pernet_device(&l2tp_eth_net_ops); | ||
| 393 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); | 346 | l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); |
| 394 | } | 347 | } |
| 395 | 348 | ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 50e3ee9a9d61..bc6e8bfc5be4 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
| @@ -437,11 +437,11 @@ static void pppol2tp_session_close(struct l2tp_session *session) | |||
| 437 | 437 | ||
| 438 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); | 438 | BUG_ON(session->magic != L2TP_SESSION_MAGIC); |
| 439 | 439 | ||
| 440 | if (sock) { | 440 | if (sock) |
| 441 | inet_shutdown(sock, SEND_SHUTDOWN); | 441 | inet_shutdown(sock, SEND_SHUTDOWN); |
| 442 | /* Don't let the session go away before our socket does */ | 442 | |
| 443 | l2tp_session_inc_refcount(session); | 443 | /* Don't let the session go away before our socket does */ |
| 444 | } | 444 | l2tp_session_inc_refcount(session); |
| 445 | } | 445 | } |
| 446 | 446 | ||
| 447 | /* Really kill the session socket. (Called from sock_put() if | 447 | /* Really kill the session socket. (Called from sock_put() if |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index e495b5e484b1..cf84f7b37cd9 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
| @@ -1191,14 +1191,17 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb, | |||
| 1191 | from->family == to->family)) | 1191 | from->family == to->family)) |
| 1192 | return -IPSET_ERR_TYPE_MISMATCH; | 1192 | return -IPSET_ERR_TYPE_MISMATCH; |
| 1193 | 1193 | ||
| 1194 | if (from->ref_netlink || to->ref_netlink) | 1194 | write_lock_bh(&ip_set_ref_lock); |
| 1195 | |||
| 1196 | if (from->ref_netlink || to->ref_netlink) { | ||
| 1197 | write_unlock_bh(&ip_set_ref_lock); | ||
| 1195 | return -EBUSY; | 1198 | return -EBUSY; |
| 1199 | } | ||
| 1196 | 1200 | ||
| 1197 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); | 1201 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); |
| 1198 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); | 1202 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); |
| 1199 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); | 1203 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); |
| 1200 | 1204 | ||
| 1201 | write_lock_bh(&ip_set_ref_lock); | ||
| 1202 | swap(from->ref, to->ref); | 1205 | swap(from->ref, to->ref); |
| 1203 | ip_set(inst, from_id) = to; | 1206 | ip_set(inst, from_id) = to; |
| 1204 | ip_set(inst, to_id) = from; | 1207 | ip_set(inst, to_id) = from; |
| @@ -2072,25 +2075,28 @@ static struct pernet_operations ip_set_net_ops = { | |||
| 2072 | static int __init | 2075 | static int __init |
| 2073 | ip_set_init(void) | 2076 | ip_set_init(void) |
| 2074 | { | 2077 | { |
| 2075 | int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); | 2078 | int ret = register_pernet_subsys(&ip_set_net_ops); |
| 2079 | |||
| 2080 | if (ret) { | ||
| 2081 | pr_err("ip_set: cannot register pernet_subsys.\n"); | ||
| 2082 | return ret; | ||
| 2083 | } | ||
| 2076 | 2084 | ||
| 2085 | ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); | ||
| 2077 | if (ret != 0) { | 2086 | if (ret != 0) { |
| 2078 | pr_err("ip_set: cannot register with nfnetlink.\n"); | 2087 | pr_err("ip_set: cannot register with nfnetlink.\n"); |
| 2088 | unregister_pernet_subsys(&ip_set_net_ops); | ||
| 2079 | return ret; | 2089 | return ret; |
| 2080 | } | 2090 | } |
| 2091 | |||
| 2081 | ret = nf_register_sockopt(&so_set); | 2092 | ret = nf_register_sockopt(&so_set); |
| 2082 | if (ret != 0) { | 2093 | if (ret != 0) { |
| 2083 | pr_err("SO_SET registry failed: %d\n", ret); | 2094 | pr_err("SO_SET registry failed: %d\n", ret); |
| 2084 | nfnetlink_subsys_unregister(&ip_set_netlink_subsys); | 2095 | nfnetlink_subsys_unregister(&ip_set_netlink_subsys); |
| 2096 | unregister_pernet_subsys(&ip_set_net_ops); | ||
| 2085 | return ret; | 2097 | return ret; |
| 2086 | } | 2098 | } |
| 2087 | ret = register_pernet_subsys(&ip_set_net_ops); | 2099 | |
| 2088 | if (ret) { | ||
| 2089 | pr_err("ip_set: cannot register pernet_subsys.\n"); | ||
| 2090 | nf_unregister_sockopt(&so_set); | ||
| 2091 | nfnetlink_subsys_unregister(&ip_set_netlink_subsys); | ||
| 2092 | return ret; | ||
| 2093 | } | ||
| 2094 | pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL); | 2100 | pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL); |
| 2095 | return 0; | 2101 | return 0; |
| 2096 | } | 2102 | } |
| @@ -2098,9 +2104,10 @@ ip_set_init(void) | |||
| 2098 | static void __exit | 2104 | static void __exit |
| 2099 | ip_set_fini(void) | 2105 | ip_set_fini(void) |
| 2100 | { | 2106 | { |
| 2101 | unregister_pernet_subsys(&ip_set_net_ops); | ||
| 2102 | nf_unregister_sockopt(&so_set); | 2107 | nf_unregister_sockopt(&so_set); |
| 2103 | nfnetlink_subsys_unregister(&ip_set_netlink_subsys); | 2108 | nfnetlink_subsys_unregister(&ip_set_netlink_subsys); |
| 2109 | |||
| 2110 | unregister_pernet_subsys(&ip_set_net_ops); | ||
| 2104 | pr_debug("these are the famous last words\n"); | 2111 | pr_debug("these are the famous last words\n"); |
| 2105 | } | 2112 | } |
| 2106 | 2113 | ||
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index 20bfbd315f61..613eb212cb48 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c | |||
| @@ -123,13 +123,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 123 | return ret; | 123 | return ret; |
| 124 | 124 | ||
| 125 | ip &= ip_set_hostmask(h->netmask); | 125 | ip &= ip_set_hostmask(h->netmask); |
| 126 | e.ip = htonl(ip); | ||
| 127 | if (e.ip == 0) | ||
| 128 | return -IPSET_ERR_HASH_ELEM; | ||
| 126 | 129 | ||
| 127 | if (adt == IPSET_TEST) { | 130 | if (adt == IPSET_TEST) |
| 128 | e.ip = htonl(ip); | ||
| 129 | if (e.ip == 0) | ||
| 130 | return -IPSET_ERR_HASH_ELEM; | ||
| 131 | return adtfn(set, &e, &ext, &ext, flags); | 131 | return adtfn(set, &e, &ext, &ext, flags); |
| 132 | } | ||
| 133 | 132 | ||
| 134 | ip_to = ip; | 133 | ip_to = ip; |
| 135 | if (tb[IPSET_ATTR_IP_TO]) { | 134 | if (tb[IPSET_ATTR_IP_TO]) { |
| @@ -148,17 +147,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 148 | 147 | ||
| 149 | hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); | 148 | hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); |
| 150 | 149 | ||
| 151 | if (retried) | 150 | if (retried) { |
| 152 | ip = ntohl(h->next.ip); | 151 | ip = ntohl(h->next.ip); |
| 153 | for (; !before(ip_to, ip); ip += hosts) { | ||
| 154 | e.ip = htonl(ip); | 152 | e.ip = htonl(ip); |
| 155 | if (e.ip == 0) | 153 | } |
| 156 | return -IPSET_ERR_HASH_ELEM; | 154 | for (; ip <= ip_to;) { |
| 157 | ret = adtfn(set, &e, &ext, &ext, flags); | 155 | ret = adtfn(set, &e, &ext, &ext, flags); |
| 158 | |||
| 159 | if (ret && !ip_set_eexist(ret, flags)) | 156 | if (ret && !ip_set_eexist(ret, flags)) |
| 160 | return ret; | 157 | return ret; |
| 161 | 158 | ||
| 159 | ip += hosts; | ||
| 160 | e.ip = htonl(ip); | ||
| 161 | if (e.ip == 0) | ||
| 162 | return 0; | ||
| 163 | |||
| 162 | ret = 0; | 164 | ret = 0; |
| 163 | } | 165 | } |
| 164 | return ret; | 166 | return ret; |
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c index b64cf14e8352..f3ba8348cf9d 100644 --- a/net/netfilter/ipset/ip_set_hash_ipmark.c +++ b/net/netfilter/ipset/ip_set_hash_ipmark.c | |||
| @@ -149,7 +149,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 149 | 149 | ||
| 150 | if (retried) | 150 | if (retried) |
| 151 | ip = ntohl(h->next.ip); | 151 | ip = ntohl(h->next.ip); |
| 152 | for (; !before(ip_to, ip); ip++) { | 152 | for (; ip <= ip_to; ip++) { |
| 153 | e.ip = htonl(ip); | 153 | e.ip = htonl(ip); |
| 154 | ret = adtfn(set, &e, &ext, &ext, flags); | 154 | ret = adtfn(set, &e, &ext, &ext, flags); |
| 155 | 155 | ||
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index f438740e6c6a..ddb8039ec1d2 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c | |||
| @@ -178,7 +178,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 178 | 178 | ||
| 179 | if (retried) | 179 | if (retried) |
| 180 | ip = ntohl(h->next.ip); | 180 | ip = ntohl(h->next.ip); |
| 181 | for (; !before(ip_to, ip); ip++) { | 181 | for (; ip <= ip_to; ip++) { |
| 182 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) | 182 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) |
| 183 | : port; | 183 | : port; |
| 184 | for (; p <= port_to; p++) { | 184 | for (; p <= port_to; p++) { |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index 6215fb898c50..a7f4d7a85420 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c | |||
| @@ -185,7 +185,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 185 | 185 | ||
| 186 | if (retried) | 186 | if (retried) |
| 187 | ip = ntohl(h->next.ip); | 187 | ip = ntohl(h->next.ip); |
| 188 | for (; !before(ip_to, ip); ip++) { | 188 | for (; ip <= ip_to; ip++) { |
| 189 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) | 189 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) |
| 190 | : port; | 190 | : port; |
| 191 | for (; p <= port_to; p++) { | 191 | for (; p <= port_to; p++) { |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index 5ab1b99a53c2..a2f19b9906e9 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c | |||
| @@ -271,7 +271,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 271 | 271 | ||
| 272 | if (retried) | 272 | if (retried) |
| 273 | ip = ntohl(h->next.ip); | 273 | ip = ntohl(h->next.ip); |
| 274 | for (; !before(ip_to, ip); ip++) { | 274 | for (; ip <= ip_to; ip++) { |
| 275 | e.ip = htonl(ip); | 275 | e.ip = htonl(ip); |
| 276 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) | 276 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) |
| 277 | : port; | 277 | : port; |
| @@ -281,7 +281,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 281 | ip == ntohl(h->next.ip) && | 281 | ip == ntohl(h->next.ip) && |
| 282 | p == ntohs(h->next.port) | 282 | p == ntohs(h->next.port) |
| 283 | ? ntohl(h->next.ip2) : ip2_from; | 283 | ? ntohl(h->next.ip2) : ip2_from; |
| 284 | while (!after(ip2, ip2_to)) { | 284 | while (ip2 <= ip2_to) { |
| 285 | e.ip2 = htonl(ip2); | 285 | e.ip2 = htonl(ip2); |
| 286 | ip2_last = ip_set_range_to_cidr(ip2, ip2_to, | 286 | ip2_last = ip_set_range_to_cidr(ip2, ip2_to, |
| 287 | &cidr); | 287 | &cidr); |
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 5d9e895452e7..1c67a1761e45 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c | |||
| @@ -193,7 +193,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 193 | } | 193 | } |
| 194 | if (retried) | 194 | if (retried) |
| 195 | ip = ntohl(h->next.ip); | 195 | ip = ntohl(h->next.ip); |
| 196 | while (!after(ip, ip_to)) { | 196 | while (ip <= ip_to) { |
| 197 | e.ip = htonl(ip); | 197 | e.ip = htonl(ip); |
| 198 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); | 198 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); |
| 199 | ret = adtfn(set, &e, &ext, &ext, flags); | 199 | ret = adtfn(set, &e, &ext, &ext, flags); |
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 44cf11939c91..d417074f1c1a 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c | |||
| @@ -255,7 +255,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 255 | 255 | ||
| 256 | if (retried) | 256 | if (retried) |
| 257 | ip = ntohl(h->next.ip); | 257 | ip = ntohl(h->next.ip); |
| 258 | while (!after(ip, ip_to)) { | 258 | while (ip <= ip_to) { |
| 259 | e.ip = htonl(ip); | 259 | e.ip = htonl(ip); |
| 260 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); | 260 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); |
| 261 | ret = adtfn(set, &e, &ext, &ext, flags); | 261 | ret = adtfn(set, &e, &ext, &ext, flags); |
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index db614e13b193..7f9ae2e9645b 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c | |||
| @@ -250,13 +250,13 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 250 | if (retried) | 250 | if (retried) |
| 251 | ip = ntohl(h->next.ip[0]); | 251 | ip = ntohl(h->next.ip[0]); |
| 252 | 252 | ||
| 253 | while (!after(ip, ip_to)) { | 253 | while (ip <= ip_to) { |
| 254 | e.ip[0] = htonl(ip); | 254 | e.ip[0] = htonl(ip); |
| 255 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); | 255 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); |
| 256 | ip2 = (retried && | 256 | ip2 = (retried && |
| 257 | ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1]) | 257 | ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1]) |
| 258 | : ip2_from; | 258 | : ip2_from; |
| 259 | while (!after(ip2, ip2_to)) { | 259 | while (ip2 <= ip2_to) { |
| 260 | e.ip[1] = htonl(ip2); | 260 | e.ip[1] = htonl(ip2); |
| 261 | last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]); | 261 | last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]); |
| 262 | ret = adtfn(set, &e, &ext, &ext, flags); | 262 | ret = adtfn(set, &e, &ext, &ext, flags); |
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index 54b64b6cd0cd..e6ef382febe4 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c | |||
| @@ -241,7 +241,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 241 | 241 | ||
| 242 | if (retried) | 242 | if (retried) |
| 243 | ip = ntohl(h->next.ip); | 243 | ip = ntohl(h->next.ip); |
| 244 | while (!after(ip, ip_to)) { | 244 | while (ip <= ip_to) { |
| 245 | e.ip = htonl(ip); | 245 | e.ip = htonl(ip); |
| 246 | last = ip_set_range_to_cidr(ip, ip_to, &cidr); | 246 | last = ip_set_range_to_cidr(ip, ip_to, &cidr); |
| 247 | e.cidr = cidr - 1; | 247 | e.cidr = cidr - 1; |
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index aff846960ac4..8602f2595a1a 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c | |||
| @@ -291,7 +291,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 291 | if (retried) | 291 | if (retried) |
| 292 | ip = ntohl(h->next.ip[0]); | 292 | ip = ntohl(h->next.ip[0]); |
| 293 | 293 | ||
| 294 | while (!after(ip, ip_to)) { | 294 | while (ip <= ip_to) { |
| 295 | e.ip[0] = htonl(ip); | 295 | e.ip[0] = htonl(ip); |
| 296 | ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); | 296 | ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); |
| 297 | p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port) | 297 | p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port) |
| @@ -301,7 +301,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
| 301 | ip2 = (retried && ip == ntohl(h->next.ip[0]) && | 301 | ip2 = (retried && ip == ntohl(h->next.ip[0]) && |
| 302 | p == ntohs(h->next.port)) ? ntohl(h->next.ip[1]) | 302 | p == ntohs(h->next.port)) ? ntohl(h->next.ip[1]) |
| 303 | : ip2_from; | 303 | : ip2_from; |
| 304 | while (!after(ip2, ip2_to)) { | 304 | while (ip2 <= ip2_to) { |
| 305 | e.ip[1] = htonl(ip2); | 305 | e.ip[1] = htonl(ip2); |
| 306 | ip2_last = ip_set_range_to_cidr(ip2, ip2_to, | 306 | ip2_last = ip_set_range_to_cidr(ip2, ip2_to, |
| 307 | &e.cidr[1]); | 307 | &e.cidr[1]); |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 90d396814798..4527921b1c3a 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
| @@ -921,6 +921,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, | |||
| 921 | { | 921 | { |
| 922 | struct sk_buff *new_skb = NULL; | 922 | struct sk_buff *new_skb = NULL; |
| 923 | struct iphdr *old_iph = NULL; | 923 | struct iphdr *old_iph = NULL; |
| 924 | __u8 old_dsfield; | ||
| 924 | #ifdef CONFIG_IP_VS_IPV6 | 925 | #ifdef CONFIG_IP_VS_IPV6 |
| 925 | struct ipv6hdr *old_ipv6h = NULL; | 926 | struct ipv6hdr *old_ipv6h = NULL; |
| 926 | #endif | 927 | #endif |
| @@ -945,7 +946,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, | |||
| 945 | *payload_len = | 946 | *payload_len = |
| 946 | ntohs(old_ipv6h->payload_len) + | 947 | ntohs(old_ipv6h->payload_len) + |
| 947 | sizeof(*old_ipv6h); | 948 | sizeof(*old_ipv6h); |
| 948 | *dsfield = ipv6_get_dsfield(old_ipv6h); | 949 | old_dsfield = ipv6_get_dsfield(old_ipv6h); |
| 949 | *ttl = old_ipv6h->hop_limit; | 950 | *ttl = old_ipv6h->hop_limit; |
| 950 | if (df) | 951 | if (df) |
| 951 | *df = 0; | 952 | *df = 0; |
| @@ -960,12 +961,15 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, | |||
| 960 | 961 | ||
| 961 | /* fix old IP header checksum */ | 962 | /* fix old IP header checksum */ |
| 962 | ip_send_check(old_iph); | 963 | ip_send_check(old_iph); |
| 963 | *dsfield = ipv4_get_dsfield(old_iph); | 964 | old_dsfield = ipv4_get_dsfield(old_iph); |
| 964 | *ttl = old_iph->ttl; | 965 | *ttl = old_iph->ttl; |
| 965 | if (payload_len) | 966 | if (payload_len) |
| 966 | *payload_len = ntohs(old_iph->tot_len); | 967 | *payload_len = ntohs(old_iph->tot_len); |
| 967 | } | 968 | } |
| 968 | 969 | ||
| 970 | /* Implement full-functionality option for ECN encapsulation */ | ||
| 971 | *dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield); | ||
| 972 | |||
| 969 | return skb; | 973 | return skb; |
| 970 | error: | 974 | error: |
| 971 | kfree_skb(skb); | 975 | kfree_skb(skb); |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 929927171426..64e1ee091225 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -1048,7 +1048,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, | |||
| 1048 | if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) | 1048 | if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) |
| 1049 | goto nla_put_failure; | 1049 | goto nla_put_failure; |
| 1050 | 1050 | ||
| 1051 | if (nft_dump_stats(skb, nft_base_chain(chain)->stats)) | 1051 | if (basechain->stats && nft_dump_stats(skb, basechain->stats)) |
| 1052 | goto nla_put_failure; | 1052 | goto nla_put_failure; |
| 1053 | } | 1053 | } |
| 1054 | 1054 | ||
| @@ -1487,8 +1487,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, | |||
| 1487 | 1487 | ||
| 1488 | chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], | 1488 | chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], |
| 1489 | genmask); | 1489 | genmask); |
| 1490 | if (IS_ERR(chain2)) | 1490 | if (!IS_ERR(chain2)) |
| 1491 | return PTR_ERR(chain2); | 1491 | return -EEXIST; |
| 1492 | } | 1492 | } |
| 1493 | 1493 | ||
| 1494 | if (nla[NFTA_CHAIN_COUNTERS]) { | 1494 | if (nla[NFTA_CHAIN_COUNTERS]) { |
| @@ -2741,8 +2741,10 @@ cont: | |||
| 2741 | list_for_each_entry(i, &ctx->table->sets, list) { | 2741 | list_for_each_entry(i, &ctx->table->sets, list) { |
| 2742 | if (!nft_is_active_next(ctx->net, i)) | 2742 | if (!nft_is_active_next(ctx->net, i)) |
| 2743 | continue; | 2743 | continue; |
| 2744 | if (!strcmp(set->name, i->name)) | 2744 | if (!strcmp(set->name, i->name)) { |
| 2745 | kfree(set->name); | ||
| 2745 | return -ENFILE; | 2746 | return -ENFILE; |
| 2747 | } | ||
| 2746 | } | 2748 | } |
| 2747 | return 0; | 2749 | return 0; |
| 2748 | } | 2750 | } |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index c83a3b5e1c6c..d8571f414208 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
| @@ -892,7 +892,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | |||
| 892 | if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) | 892 | if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) |
| 893 | return ERR_PTR(-EFAULT); | 893 | return ERR_PTR(-EFAULT); |
| 894 | 894 | ||
| 895 | strlcpy(info->name, compat_tmp.name, sizeof(info->name)); | 895 | memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); |
| 896 | info->num_counters = compat_tmp.num_counters; | 896 | info->num_counters = compat_tmp.num_counters; |
| 897 | user += sizeof(compat_tmp); | 897 | user += sizeof(compat_tmp); |
| 898 | } else | 898 | } else |
| @@ -905,9 +905,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | |||
| 905 | if (copy_from_user(info, user, sizeof(*info)) != 0) | 905 | if (copy_from_user(info, user, sizeof(*info)) != 0) |
| 906 | return ERR_PTR(-EFAULT); | 906 | return ERR_PTR(-EFAULT); |
| 907 | 907 | ||
| 908 | info->name[sizeof(info->name) - 1] = '\0'; | ||
| 909 | user += sizeof(*info); | 908 | user += sizeof(*info); |
| 910 | } | 909 | } |
| 910 | info->name[sizeof(info->name) - 1] = '\0'; | ||
| 911 | 911 | ||
| 912 | size = sizeof(struct xt_counters); | 912 | size = sizeof(struct xt_counters); |
| 913 | size *= info->num_counters; | 913 | size *= info->num_counters; |
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c index 38986a95216c..29123934887b 100644 --- a/net/netfilter/xt_bpf.c +++ b/net/netfilter/xt_bpf.c | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | */ | 8 | */ |
| 9 | 9 | ||
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/syscalls.h> | ||
| 11 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
| 12 | #include <linux/filter.h> | 13 | #include <linux/filter.h> |
| 13 | #include <linux/bpf.h> | 14 | #include <linux/bpf.h> |
| @@ -49,6 +50,22 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret) | |||
| 49 | return 0; | 50 | return 0; |
| 50 | } | 51 | } |
| 51 | 52 | ||
| 53 | static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) | ||
| 54 | { | ||
| 55 | mm_segment_t oldfs = get_fs(); | ||
| 56 | int retval, fd; | ||
| 57 | |||
| 58 | set_fs(KERNEL_DS); | ||
| 59 | fd = bpf_obj_get_user(path); | ||
| 60 | set_fs(oldfs); | ||
| 61 | if (fd < 0) | ||
| 62 | return fd; | ||
| 63 | |||
| 64 | retval = __bpf_mt_check_fd(fd, ret); | ||
| 65 | sys_close(fd); | ||
| 66 | return retval; | ||
| 67 | } | ||
| 68 | |||
| 52 | static int bpf_mt_check(const struct xt_mtchk_param *par) | 69 | static int bpf_mt_check(const struct xt_mtchk_param *par) |
| 53 | { | 70 | { |
| 54 | struct xt_bpf_info *info = par->matchinfo; | 71 | struct xt_bpf_info *info = par->matchinfo; |
| @@ -66,9 +83,10 @@ static int bpf_mt_check_v1(const struct xt_mtchk_param *par) | |||
| 66 | return __bpf_mt_check_bytecode(info->bpf_program, | 83 | return __bpf_mt_check_bytecode(info->bpf_program, |
| 67 | info->bpf_program_num_elem, | 84 | info->bpf_program_num_elem, |
| 68 | &info->filter); | 85 | &info->filter); |
| 69 | else if (info->mode == XT_BPF_MODE_FD_PINNED || | 86 | else if (info->mode == XT_BPF_MODE_FD_ELF) |
| 70 | info->mode == XT_BPF_MODE_FD_ELF) | ||
| 71 | return __bpf_mt_check_fd(info->fd, &info->filter); | 87 | return __bpf_mt_check_fd(info->fd, &info->filter); |
| 88 | else if (info->mode == XT_BPF_MODE_PATH_PINNED) | ||
| 89 | return __bpf_mt_check_path(info->path, &info->filter); | ||
| 72 | else | 90 | else |
| 73 | return -EINVAL; | 91 | return -EINVAL; |
| 74 | } | 92 | } |
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index e75ef39669c5..575d2153e3b8 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
| @@ -76,7 +76,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, | |||
| 76 | transparent = nf_sk_is_transparent(sk); | 76 | transparent = nf_sk_is_transparent(sk); |
| 77 | 77 | ||
| 78 | if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && | 78 | if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && |
| 79 | transparent) | 79 | transparent && sk_fullsock(sk)) |
| 80 | pskb->mark = sk->sk_mark; | 80 | pskb->mark = sk->sk_mark; |
| 81 | 81 | ||
| 82 | if (sk != skb->sk) | 82 | if (sk != skb->sk) |
| @@ -133,7 +133,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par) | |||
| 133 | transparent = nf_sk_is_transparent(sk); | 133 | transparent = nf_sk_is_transparent(sk); |
| 134 | 134 | ||
| 135 | if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && | 135 | if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && |
| 136 | transparent) | 136 | transparent && sk_fullsock(sk)) |
| 137 | pskb->mark = sk->sk_mark; | 137 | pskb->mark = sk->sk_mark; |
| 138 | 138 | ||
| 139 | if (sk != skb->sk) | 139 | if (sk != skb->sk) |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 327807731b44..f34750691c5c 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -2266,14 +2266,18 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
| 2266 | cb->min_dump_alloc = control->min_dump_alloc; | 2266 | cb->min_dump_alloc = control->min_dump_alloc; |
| 2267 | cb->skb = skb; | 2267 | cb->skb = skb; |
| 2268 | 2268 | ||
| 2269 | if (cb->start) { | ||
| 2270 | ret = cb->start(cb); | ||
| 2271 | if (ret) | ||
| 2272 | goto error_unlock; | ||
| 2273 | } | ||
| 2274 | |||
| 2269 | nlk->cb_running = true; | 2275 | nlk->cb_running = true; |
| 2270 | 2276 | ||
| 2271 | mutex_unlock(nlk->cb_mutex); | 2277 | mutex_unlock(nlk->cb_mutex); |
| 2272 | 2278 | ||
| 2273 | if (cb->start) | ||
| 2274 | cb->start(cb); | ||
| 2275 | |||
| 2276 | ret = netlink_dump(sk); | 2279 | ret = netlink_dump(sk); |
| 2280 | |||
| 2277 | sock_put(sk); | 2281 | sock_put(sk); |
| 2278 | 2282 | ||
| 2279 | if (ret) | 2283 | if (ret) |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index d288f52c53f7..bec01a3daf5b 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -2840,6 +2840,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2840 | struct virtio_net_hdr vnet_hdr = { 0 }; | 2840 | struct virtio_net_hdr vnet_hdr = { 0 }; |
| 2841 | int offset = 0; | 2841 | int offset = 0; |
| 2842 | struct packet_sock *po = pkt_sk(sk); | 2842 | struct packet_sock *po = pkt_sk(sk); |
| 2843 | bool has_vnet_hdr = false; | ||
| 2843 | int hlen, tlen, linear; | 2844 | int hlen, tlen, linear; |
| 2844 | int extra_len = 0; | 2845 | int extra_len = 0; |
| 2845 | 2846 | ||
| @@ -2883,6 +2884,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2883 | err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); | 2884 | err = packet_snd_vnet_parse(msg, &len, &vnet_hdr); |
| 2884 | if (err) | 2885 | if (err) |
| 2885 | goto out_unlock; | 2886 | goto out_unlock; |
| 2887 | has_vnet_hdr = true; | ||
| 2886 | } | 2888 | } |
| 2887 | 2889 | ||
| 2888 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { | 2890 | if (unlikely(sock_flag(sk, SOCK_NOFCS))) { |
| @@ -2941,7 +2943,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) | |||
| 2941 | skb->priority = sk->sk_priority; | 2943 | skb->priority = sk->sk_priority; |
| 2942 | skb->mark = sockc.mark; | 2944 | skb->mark = sockc.mark; |
| 2943 | 2945 | ||
| 2944 | if (po->has_vnet_hdr) { | 2946 | if (has_vnet_hdr) { |
| 2945 | err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); | 2947 | err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le()); |
| 2946 | if (err) | 2948 | if (err) |
| 2947 | goto out_free; | 2949 | goto out_free; |
| @@ -3069,13 +3071,15 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex, | |||
| 3069 | int ret = 0; | 3071 | int ret = 0; |
| 3070 | bool unlisted = false; | 3072 | bool unlisted = false; |
| 3071 | 3073 | ||
| 3072 | if (po->fanout) | ||
| 3073 | return -EINVAL; | ||
| 3074 | |||
| 3075 | lock_sock(sk); | 3074 | lock_sock(sk); |
| 3076 | spin_lock(&po->bind_lock); | 3075 | spin_lock(&po->bind_lock); |
| 3077 | rcu_read_lock(); | 3076 | rcu_read_lock(); |
| 3078 | 3077 | ||
| 3078 | if (po->fanout) { | ||
| 3079 | ret = -EINVAL; | ||
| 3080 | goto out_unlock; | ||
| 3081 | } | ||
| 3082 | |||
| 3079 | if (name) { | 3083 | if (name) { |
| 3080 | dev = dev_get_by_name_rcu(sock_net(sk), name); | 3084 | dev = dev_get_by_name_rcu(sock_net(sk), name); |
| 3081 | if (!dev) { | 3085 | if (!dev) { |
diff --git a/net/sctp/sctp_diag.c b/net/sctp/sctp_diag.c index 22ed01a76b19..a72a7d925d46 100644 --- a/net/sctp/sctp_diag.c +++ b/net/sctp/sctp_diag.c | |||
| @@ -463,6 +463,7 @@ static void sctp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb, | |||
| 463 | .r = r, | 463 | .r = r, |
| 464 | .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN), | 464 | .net_admin = netlink_net_capable(cb->skb, CAP_NET_ADMIN), |
| 465 | }; | 465 | }; |
| 466 | int pos = cb->args[2]; | ||
| 466 | 467 | ||
| 467 | /* eps hashtable dumps | 468 | /* eps hashtable dumps |
| 468 | * args: | 469 | * args: |
| @@ -493,7 +494,8 @@ skip: | |||
| 493 | goto done; | 494 | goto done; |
| 494 | 495 | ||
| 495 | sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump, | 496 | sctp_for_each_transport(sctp_sock_filter, sctp_sock_dump, |
| 496 | net, (int *)&cb->args[2], &commp); | 497 | net, &pos, &commp); |
| 498 | cb->args[2] = pos; | ||
| 497 | 499 | ||
| 498 | done: | 500 | done: |
| 499 | cb->args[1] = cb->args[4]; | 501 | cb->args[1] = cb->args[4]; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 9b5de31aa429..c1841f234a71 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
| @@ -2203,7 +2203,7 @@ static void xs_udp_setup_socket(struct work_struct *work) | |||
| 2203 | struct sock_xprt *transport = | 2203 | struct sock_xprt *transport = |
| 2204 | container_of(work, struct sock_xprt, connect_worker.work); | 2204 | container_of(work, struct sock_xprt, connect_worker.work); |
| 2205 | struct rpc_xprt *xprt = &transport->xprt; | 2205 | struct rpc_xprt *xprt = &transport->xprt; |
| 2206 | struct socket *sock = transport->sock; | 2206 | struct socket *sock; |
| 2207 | int status = -EIO; | 2207 | int status = -EIO; |
| 2208 | 2208 | ||
| 2209 | sock = xs_create_sock(xprt, transport, | 2209 | sock = xs_create_sock(xprt, transport, |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 7d99029df342..a140dd4a84af 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
| @@ -233,7 +233,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, | |||
| 233 | struct sk_buff_head xmitq; | 233 | struct sk_buff_head xmitq; |
| 234 | int rc = 0; | 234 | int rc = 0; |
| 235 | 235 | ||
| 236 | __skb_queue_head_init(&xmitq); | 236 | skb_queue_head_init(&xmitq); |
| 237 | tipc_bcast_lock(net); | 237 | tipc_bcast_lock(net); |
| 238 | if (tipc_link_bc_peers(l)) | 238 | if (tipc_link_bc_peers(l)) |
| 239 | rc = tipc_link_xmit(l, pkts, &xmitq); | 239 | rc = tipc_link_xmit(l, pkts, &xmitq); |
| @@ -263,7 +263,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts, | |||
| 263 | u32 dst, selector; | 263 | u32 dst, selector; |
| 264 | 264 | ||
| 265 | selector = msg_link_selector(buf_msg(skb_peek(pkts))); | 265 | selector = msg_link_selector(buf_msg(skb_peek(pkts))); |
| 266 | __skb_queue_head_init(&_pkts); | 266 | skb_queue_head_init(&_pkts); |
| 267 | 267 | ||
| 268 | list_for_each_entry_safe(n, tmp, &dests->list, list) { | 268 | list_for_each_entry_safe(n, tmp, &dests->list, list) { |
| 269 | dst = n->value; | 269 | dst = n->value; |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 6ef379f004ac..17146c16ee2d 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
| @@ -551,7 +551,7 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) | |||
| 551 | return false; | 551 | return false; |
| 552 | if (msg_errcode(msg)) | 552 | if (msg_errcode(msg)) |
| 553 | return false; | 553 | return false; |
| 554 | *err = -TIPC_ERR_NO_NAME; | 554 | *err = TIPC_ERR_NO_NAME; |
| 555 | if (skb_linearize(skb)) | 555 | if (skb_linearize(skb)) |
| 556 | return false; | 556 | return false; |
| 557 | msg = buf_msg(skb); | 557 | msg = buf_msg(skb); |
| @@ -568,6 +568,14 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) | |||
| 568 | msg_set_destnode(msg, dnode); | 568 | msg_set_destnode(msg, dnode); |
| 569 | msg_set_destport(msg, dport); | 569 | msg_set_destport(msg, dport); |
| 570 | *err = TIPC_OK; | 570 | *err = TIPC_OK; |
| 571 | |||
| 572 | if (!skb_cloned(skb)) | ||
| 573 | return true; | ||
| 574 | |||
| 575 | /* Unclone buffer in case it was bundled */ | ||
| 576 | if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) | ||
| 577 | return false; | ||
| 578 | |||
| 571 | return true; | 579 | return true; |
| 572 | } | 580 | } |
| 573 | 581 | ||
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 690874293cfc..d396cb61a280 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -549,6 +549,14 @@ nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = { | |||
| 549 | [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED }, | 549 | [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED }, |
| 550 | }; | 550 | }; |
| 551 | 551 | ||
| 552 | /* policy for packet pattern attributes */ | ||
| 553 | static const struct nla_policy | ||
| 554 | nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = { | ||
| 555 | [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, }, | ||
| 556 | [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, }, | ||
| 557 | [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 }, | ||
| 558 | }; | ||
| 559 | |||
| 552 | static int nl80211_prepare_wdev_dump(struct sk_buff *skb, | 560 | static int nl80211_prepare_wdev_dump(struct sk_buff *skb, |
| 553 | struct netlink_callback *cb, | 561 | struct netlink_callback *cb, |
| 554 | struct cfg80211_registered_device **rdev, | 562 | struct cfg80211_registered_device **rdev, |
| @@ -10532,7 +10540,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) | |||
| 10532 | u8 *mask_pat; | 10540 | u8 *mask_pat; |
| 10533 | 10541 | ||
| 10534 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, | 10542 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, |
| 10535 | NULL, info->extack); | 10543 | nl80211_packet_pattern_policy, |
| 10544 | info->extack); | ||
| 10536 | err = -EINVAL; | 10545 | err = -EINVAL; |
| 10537 | if (!pat_tb[NL80211_PKTPAT_MASK] || | 10546 | if (!pat_tb[NL80211_PKTPAT_MASK] || |
| 10538 | !pat_tb[NL80211_PKTPAT_PATTERN]) | 10547 | !pat_tb[NL80211_PKTPAT_PATTERN]) |
| @@ -10781,7 +10790,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, | |||
| 10781 | rem) { | 10790 | rem) { |
| 10782 | u8 *mask_pat; | 10791 | u8 *mask_pat; |
| 10783 | 10792 | ||
| 10784 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL); | 10793 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, |
| 10794 | nl80211_packet_pattern_policy, NULL); | ||
| 10785 | if (!pat_tb[NL80211_PKTPAT_MASK] || | 10795 | if (!pat_tb[NL80211_PKTPAT_MASK] || |
| 10786 | !pat_tb[NL80211_PKTPAT_PATTERN]) | 10796 | !pat_tb[NL80211_PKTPAT_PATTERN]) |
| 10787 | return -EINVAL; | 10797 | return -EINVAL; |
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index acf00104ef31..30e5746085b8 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c | |||
| @@ -91,6 +91,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, | |||
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { | 93 | if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { |
| 94 | xso->dev = NULL; | ||
| 94 | dev_put(dev); | 95 | dev_put(dev); |
| 95 | return 0; | 96 | return 0; |
| 96 | } | 97 | } |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 2515cd2bc5db..8ac9d32fb79d 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
| @@ -429,7 +429,8 @@ resume: | |||
| 429 | nf_reset(skb); | 429 | nf_reset(skb); |
| 430 | 430 | ||
| 431 | if (decaps) { | 431 | if (decaps) { |
| 432 | skb->sp->olen = 0; | 432 | if (skb->sp) |
| 433 | skb->sp->olen = 0; | ||
| 433 | skb_dst_drop(skb); | 434 | skb_dst_drop(skb); |
| 434 | gro_cells_receive(&gro_cells, skb); | 435 | gro_cells_receive(&gro_cells, skb); |
| 435 | return 0; | 436 | return 0; |
| @@ -440,7 +441,8 @@ resume: | |||
| 440 | 441 | ||
| 441 | err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async); | 442 | err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async); |
| 442 | if (xfrm_gro) { | 443 | if (xfrm_gro) { |
| 443 | skb->sp->olen = 0; | 444 | if (skb->sp) |
| 445 | skb->sp->olen = 0; | ||
| 444 | skb_dst_drop(skb); | 446 | skb_dst_drop(skb); |
| 445 | gro_cells_receive(&gro_cells, skb); | 447 | gro_cells_receive(&gro_cells, skb); |
| 446 | return err; | 448 | return err; |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 0dab1cd79ce4..12213477cd3a 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
| @@ -732,12 +732,12 @@ restart: | |||
| 732 | } | 732 | } |
| 733 | } | 733 | } |
| 734 | } | 734 | } |
| 735 | out: | ||
| 736 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); | ||
| 735 | if (cnt) { | 737 | if (cnt) { |
| 736 | err = 0; | 738 | err = 0; |
| 737 | xfrm_policy_cache_flush(); | 739 | xfrm_policy_cache_flush(); |
| 738 | } | 740 | } |
| 739 | out: | ||
| 740 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); | ||
| 741 | return err; | 741 | return err; |
| 742 | } | 742 | } |
| 743 | EXPORT_SYMBOL(xfrm_state_flush); | 743 | EXPORT_SYMBOL(xfrm_state_flush); |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 2bfbd9121e3b..b997f1395357 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
| @@ -657,6 +657,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
| 657 | 657 | ||
| 658 | if (err < 0) { | 658 | if (err < 0) { |
| 659 | x->km.state = XFRM_STATE_DEAD; | 659 | x->km.state = XFRM_STATE_DEAD; |
| 660 | xfrm_dev_state_delete(x); | ||
| 660 | __xfrm_state_put(x); | 661 | __xfrm_state_put(x); |
| 661 | goto out; | 662 | goto out; |
| 662 | } | 663 | } |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index dd2c262aebbf..8b80bac055e4 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
| @@ -6390,7 +6390,7 @@ sub process { | |||
| 6390 | exit(0); | 6390 | exit(0); |
| 6391 | } | 6391 | } |
| 6392 | 6392 | ||
| 6393 | if (!$is_patch && $file !~ /cover-letter\.patch$/) { | 6393 | if (!$is_patch && $filename !~ /cover-letter\.patch$/) { |
| 6394 | ERROR("NOT_UNIFIED_DIFF", | 6394 | ERROR("NOT_UNIFIED_DIFF", |
| 6395 | "Does not appear to be a unified-diff format patch\n"); | 6395 | "Does not appear to be a unified-diff format patch\n"); |
| 6396 | } | 6396 | } |
diff --git a/scripts/faddr2line b/scripts/faddr2line index 29df825d375c..2f6ce802397d 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line | |||
| @@ -103,11 +103,12 @@ __faddr2line() { | |||
| 103 | 103 | ||
| 104 | # Go through each of the object's symbols which match the func name. | 104 | # Go through each of the object's symbols which match the func name. |
| 105 | # In rare cases there might be duplicates. | 105 | # In rare cases there might be duplicates. |
| 106 | file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}') | ||
| 106 | while read symbol; do | 107 | while read symbol; do |
| 107 | local fields=($symbol) | 108 | local fields=($symbol) |
| 108 | local sym_base=0x${fields[0]} | 109 | local sym_base=0x${fields[0]} |
| 109 | local sym_type=${fields[1]} | 110 | local sym_type=${fields[1]} |
| 110 | local sym_end=0x${fields[3]} | 111 | local sym_end=${fields[3]} |
| 111 | 112 | ||
| 112 | # calculate the size | 113 | # calculate the size |
| 113 | local sym_size=$(($sym_end - $sym_base)) | 114 | local sym_size=$(($sym_end - $sym_base)) |
| @@ -157,7 +158,7 @@ __faddr2line() { | |||
| 157 | addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" | 158 | addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" |
| 158 | DONE=1 | 159 | DONE=1 |
| 159 | 160 | ||
| 160 | done < <(nm -n $objfile | awk -v fn=$func '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, $1 }') | 161 | done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }') |
| 161 | } | 162 | } |
| 162 | 163 | ||
| 163 | [[ $# -lt 2 ]] && usage | 164 | [[ $# -lt 2 ]] && usage |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 5d554419170b..9ee9bf7fd1a2 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c | |||
| @@ -158,7 +158,7 @@ static int read_symbol(FILE *in, struct sym_entry *s) | |||
| 158 | else if (str[0] == '$') | 158 | else if (str[0] == '$') |
| 159 | return -1; | 159 | return -1; |
| 160 | /* exclude debugging symbols */ | 160 | /* exclude debugging symbols */ |
| 161 | else if (stype == 'N') | 161 | else if (stype == 'N' || stype == 'n') |
| 162 | return -1; | 162 | return -1; |
| 163 | 163 | ||
| 164 | /* include the type field in the symbol name, so that it gets | 164 | /* include the type field in the symbol name, so that it gets |
diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 400ef35169c5..aa0cc49ad1ad 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt | |||
| @@ -53,6 +53,7 @@ acumulator||accumulator | |||
| 53 | adapater||adapter | 53 | adapater||adapter |
| 54 | addional||additional | 54 | addional||additional |
| 55 | additionaly||additionally | 55 | additionaly||additionally |
| 56 | additonal||additional | ||
| 56 | addres||address | 57 | addres||address |
| 57 | adddress||address | 58 | adddress||address |
| 58 | addreses||addresses | 59 | addreses||addresses |
| @@ -67,6 +68,8 @@ adviced||advised | |||
| 67 | afecting||affecting | 68 | afecting||affecting |
| 68 | againt||against | 69 | againt||against |
| 69 | agaist||against | 70 | agaist||against |
| 71 | aggreataon||aggregation | ||
| 72 | aggreation||aggregation | ||
| 70 | albumns||albums | 73 | albumns||albums |
| 71 | alegorical||allegorical | 74 | alegorical||allegorical |
| 72 | algined||aligned | 75 | algined||aligned |
| @@ -80,6 +83,8 @@ aligment||alignment | |||
| 80 | alignement||alignment | 83 | alignement||alignment |
| 81 | allign||align | 84 | allign||align |
| 82 | alligned||aligned | 85 | alligned||aligned |
| 86 | alllocate||allocate | ||
| 87 | alloated||allocated | ||
| 83 | allocatote||allocate | 88 | allocatote||allocate |
| 84 | allocatrd||allocated | 89 | allocatrd||allocated |
| 85 | allocte||allocate | 90 | allocte||allocate |
| @@ -171,6 +176,7 @@ availale||available | |||
| 171 | availavility||availability | 176 | availavility||availability |
| 172 | availble||available | 177 | availble||available |
| 173 | availiable||available | 178 | availiable||available |
| 179 | availible||available | ||
| 174 | avalable||available | 180 | avalable||available |
| 175 | avaliable||available | 181 | avaliable||available |
| 176 | aysnc||async | 182 | aysnc||async |
| @@ -203,6 +209,7 @@ broadcat||broadcast | |||
| 203 | cacluated||calculated | 209 | cacluated||calculated |
| 204 | caculation||calculation | 210 | caculation||calculation |
| 205 | calender||calendar | 211 | calender||calendar |
| 212 | calescing||coalescing | ||
| 206 | calle||called | 213 | calle||called |
| 207 | callibration||calibration | 214 | callibration||calibration |
| 208 | calucate||calculate | 215 | calucate||calculate |
| @@ -210,6 +217,7 @@ calulate||calculate | |||
| 210 | cancelation||cancellation | 217 | cancelation||cancellation |
| 211 | cancle||cancel | 218 | cancle||cancel |
| 212 | capabilites||capabilities | 219 | capabilites||capabilities |
| 220 | capabilty||capability | ||
| 213 | capabitilies||capabilities | 221 | capabitilies||capabilities |
| 214 | capatibilities||capabilities | 222 | capatibilities||capabilities |
| 215 | capapbilities||capabilities | 223 | capapbilities||capabilities |
| @@ -302,6 +310,7 @@ containts||contains | |||
| 302 | contaisn||contains | 310 | contaisn||contains |
| 303 | contant||contact | 311 | contant||contact |
| 304 | contence||contents | 312 | contence||contents |
| 313 | continious||continuous | ||
| 305 | continous||continuous | 314 | continous||continuous |
| 306 | continously||continuously | 315 | continously||continuously |
| 307 | continueing||continuing | 316 | continueing||continuing |
| @@ -393,6 +402,7 @@ differrence||difference | |||
| 393 | diffrent||different | 402 | diffrent||different |
| 394 | diffrentiate||differentiate | 403 | diffrentiate||differentiate |
| 395 | difinition||definition | 404 | difinition||definition |
| 405 | dimesions||dimensions | ||
| 396 | diplay||display | 406 | diplay||display |
| 397 | direectly||directly | 407 | direectly||directly |
| 398 | disassocation||disassociation | 408 | disassocation||disassociation |
| @@ -449,6 +459,7 @@ equiped||equipped | |||
| 449 | equivelant||equivalent | 459 | equivelant||equivalent |
| 450 | equivilant||equivalent | 460 | equivilant||equivalent |
| 451 | eror||error | 461 | eror||error |
| 462 | errorr||error | ||
| 452 | estbalishment||establishment | 463 | estbalishment||establishment |
| 453 | etsablishment||establishment | 464 | etsablishment||establishment |
| 454 | etsbalishment||establishment | 465 | etsbalishment||establishment |
| @@ -481,6 +492,7 @@ failied||failed | |||
| 481 | faillure||failure | 492 | faillure||failure |
| 482 | failue||failure | 493 | failue||failure |
| 483 | failuer||failure | 494 | failuer||failure |
| 495 | failng||failing | ||
| 484 | faireness||fairness | 496 | faireness||fairness |
| 485 | falied||failed | 497 | falied||failed |
| 486 | faliure||failure | 498 | faliure||failure |
| @@ -493,6 +505,7 @@ fetaure||feature | |||
| 493 | fetaures||features | 505 | fetaures||features |
| 494 | fileystem||filesystem | 506 | fileystem||filesystem |
| 495 | fimware||firmware | 507 | fimware||firmware |
| 508 | firware||firmware | ||
| 496 | finanize||finalize | 509 | finanize||finalize |
| 497 | findn||find | 510 | findn||find |
| 498 | finilizes||finalizes | 511 | finilizes||finalizes |
| @@ -502,6 +515,7 @@ folloing||following | |||
| 502 | followign||following | 515 | followign||following |
| 503 | followings||following | 516 | followings||following |
| 504 | follwing||following | 517 | follwing||following |
| 518 | fonud||found | ||
| 505 | forseeable||foreseeable | 519 | forseeable||foreseeable |
| 506 | forse||force | 520 | forse||force |
| 507 | fortan||fortran | 521 | fortan||fortran |
| @@ -532,6 +546,7 @@ grabing||grabbing | |||
| 532 | grahical||graphical | 546 | grahical||graphical |
| 533 | grahpical||graphical | 547 | grahpical||graphical |
| 534 | grapic||graphic | 548 | grapic||graphic |
| 549 | grranted||granted | ||
| 535 | guage||gauge | 550 | guage||gauge |
| 536 | guarenteed||guaranteed | 551 | guarenteed||guaranteed |
| 537 | guarentee||guarantee | 552 | guarentee||guarantee |
| @@ -543,6 +558,7 @@ happend||happened | |||
| 543 | harware||hardware | 558 | harware||hardware |
| 544 | heirarchically||hierarchically | 559 | heirarchically||hierarchically |
| 545 | helpfull||helpful | 560 | helpfull||helpful |
| 561 | hybernate||hibernate | ||
| 546 | hierachy||hierarchy | 562 | hierachy||hierarchy |
| 547 | hierarchie||hierarchy | 563 | hierarchie||hierarchy |
| 548 | howver||however | 564 | howver||however |
| @@ -565,16 +581,19 @@ implemenation||implementation | |||
| 565 | implementaiton||implementation | 581 | implementaiton||implementation |
| 566 | implementated||implemented | 582 | implementated||implemented |
| 567 | implemention||implementation | 583 | implemention||implementation |
| 584 | implementd||implemented | ||
| 568 | implemetation||implementation | 585 | implemetation||implementation |
| 569 | implemntation||implementation | 586 | implemntation||implementation |
| 570 | implentation||implementation | 587 | implentation||implementation |
| 571 | implmentation||implementation | 588 | implmentation||implementation |
| 572 | implmenting||implementing | 589 | implmenting||implementing |
| 590 | incative||inactive | ||
| 573 | incomming||incoming | 591 | incomming||incoming |
| 574 | incompatabilities||incompatibilities | 592 | incompatabilities||incompatibilities |
| 575 | incompatable||incompatible | 593 | incompatable||incompatible |
| 576 | inconsistant||inconsistent | 594 | inconsistant||inconsistent |
| 577 | increas||increase | 595 | increas||increase |
| 596 | incremeted||incremented | ||
| 578 | incrment||increment | 597 | incrment||increment |
| 579 | indendation||indentation | 598 | indendation||indentation |
| 580 | indended||intended | 599 | indended||intended |
| @@ -619,6 +638,7 @@ interger||integer | |||
| 619 | intermittant||intermittent | 638 | intermittant||intermittent |
| 620 | internel||internal | 639 | internel||internal |
| 621 | interoprability||interoperability | 640 | interoprability||interoperability |
| 641 | interuupt||interrupt | ||
| 622 | interrface||interface | 642 | interrface||interface |
| 623 | interrrupt||interrupt | 643 | interrrupt||interrupt |
| 624 | interrup||interrupt | 644 | interrup||interrupt |
| @@ -638,8 +658,10 @@ intrrupt||interrupt | |||
| 638 | intterrupt||interrupt | 658 | intterrupt||interrupt |
| 639 | intuative||intuitive | 659 | intuative||intuitive |
| 640 | invaid||invalid | 660 | invaid||invalid |
| 661 | invald||invalid | ||
| 641 | invalde||invalid | 662 | invalde||invalid |
| 642 | invalide||invalid | 663 | invalide||invalid |
| 664 | invalidiate||invalidate | ||
| 643 | invalud||invalid | 665 | invalud||invalid |
| 644 | invididual||individual | 666 | invididual||individual |
| 645 | invokation||invocation | 667 | invokation||invocation |
| @@ -713,6 +735,7 @@ misformed||malformed | |||
| 713 | mispelled||misspelled | 735 | mispelled||misspelled |
| 714 | mispelt||misspelt | 736 | mispelt||misspelt |
| 715 | mising||missing | 737 | mising||missing |
| 738 | mismactch||mismatch | ||
| 716 | missmanaged||mismanaged | 739 | missmanaged||mismanaged |
| 717 | missmatch||mismatch | 740 | missmatch||mismatch |
| 718 | miximum||maximum | 741 | miximum||maximum |
| @@ -731,6 +754,7 @@ multidimensionnal||multidimensional | |||
| 731 | multple||multiple | 754 | multple||multiple |
| 732 | mumber||number | 755 | mumber||number |
| 733 | muticast||multicast | 756 | muticast||multicast |
| 757 | mutilcast||multicast | ||
| 734 | mutiple||multiple | 758 | mutiple||multiple |
| 735 | mutli||multi | 759 | mutli||multi |
| 736 | nams||names | 760 | nams||names |
| @@ -834,6 +858,7 @@ posible||possible | |||
| 834 | positon||position | 858 | positon||position |
| 835 | possibilites||possibilities | 859 | possibilites||possibilities |
| 836 | powerfull||powerful | 860 | powerfull||powerful |
| 861 | preample||preamble | ||
| 837 | preapre||prepare | 862 | preapre||prepare |
| 838 | preceeded||preceded | 863 | preceeded||preceded |
| 839 | preceeding||preceding | 864 | preceeding||preceding |
| @@ -1059,6 +1084,7 @@ sturcture||structure | |||
| 1059 | subdirectoires||subdirectories | 1084 | subdirectoires||subdirectories |
| 1060 | suble||subtle | 1085 | suble||subtle |
| 1061 | substract||subtract | 1086 | substract||subtract |
| 1087 | submition||submission | ||
| 1062 | succesfully||successfully | 1088 | succesfully||successfully |
| 1063 | succesful||successful | 1089 | succesful||successful |
| 1064 | successed||succeeded | 1090 | successed||succeeded |
| @@ -1078,6 +1104,7 @@ suppoted||supported | |||
| 1078 | suppported||supported | 1104 | suppported||supported |
| 1079 | suppport||support | 1105 | suppport||support |
| 1080 | supress||suppress | 1106 | supress||suppress |
| 1107 | surpressed||suppressed | ||
| 1081 | surpresses||suppresses | 1108 | surpresses||suppresses |
| 1082 | susbsystem||subsystem | 1109 | susbsystem||subsystem |
| 1083 | suspeneded||suspended | 1110 | suspeneded||suspended |
| @@ -1091,6 +1118,7 @@ swithced||switched | |||
| 1091 | swithcing||switching | 1118 | swithcing||switching |
| 1092 | swithed||switched | 1119 | swithed||switched |
| 1093 | swithing||switching | 1120 | swithing||switching |
| 1121 | swtich||switch | ||
| 1094 | symetric||symmetric | 1122 | symetric||symmetric |
| 1095 | synax||syntax | 1123 | synax||syntax |
| 1096 | synchonized||synchronized | 1124 | synchonized||synchronized |
| @@ -1111,7 +1139,9 @@ therfore||therefore | |||
| 1111 | thier||their | 1139 | thier||their |
| 1112 | threds||threads | 1140 | threds||threads |
| 1113 | threshhold||threshold | 1141 | threshhold||threshold |
| 1142 | thresold||threshold | ||
| 1114 | throught||through | 1143 | throught||through |
| 1144 | troughput||throughput | ||
| 1115 | thses||these | 1145 | thses||these |
| 1116 | tiggered||triggered | 1146 | tiggered||triggered |
| 1117 | tipically||typically | 1147 | tipically||typically |
| @@ -1120,6 +1150,7 @@ tmis||this | |||
| 1120 | torerable||tolerable | 1150 | torerable||tolerable |
| 1121 | tramsmitted||transmitted | 1151 | tramsmitted||transmitted |
| 1122 | tramsmit||transmit | 1152 | tramsmit||transmit |
| 1153 | tranasction||transaction | ||
| 1123 | tranfer||transfer | 1154 | tranfer||transfer |
| 1124 | transciever||transceiver | 1155 | transciever||transceiver |
| 1125 | transferd||transferred | 1156 | transferd||transferred |
| @@ -1133,6 +1164,7 @@ trasmission||transmission | |||
| 1133 | treshold||threshold | 1164 | treshold||threshold |
| 1134 | trigerring||triggering | 1165 | trigerring||triggering |
| 1135 | trun||turn | 1166 | trun||turn |
| 1167 | tunning||tuning | ||
| 1136 | ture||true | 1168 | ture||true |
| 1137 | tyep||type | 1169 | tyep||type |
| 1138 | udpate||update | 1170 | udpate||update |
| @@ -1199,6 +1231,7 @@ visiters||visitors | |||
| 1199 | vitual||virtual | 1231 | vitual||virtual |
| 1200 | wakeus||wakeups | 1232 | wakeus||wakeups |
| 1201 | wating||waiting | 1233 | wating||waiting |
| 1234 | wiat||wait | ||
| 1202 | wether||whether | 1235 | wether||whether |
| 1203 | whataver||whatever | 1236 | whataver||whatever |
| 1204 | whcih||which | 1237 | whcih||which |
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 319add31b4a4..286171a16ed2 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c | |||
| @@ -1473,7 +1473,7 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name) | |||
| 1473 | * @inode: the object | 1473 | * @inode: the object |
| 1474 | * @name: attribute name | 1474 | * @name: attribute name |
| 1475 | * @buffer: where to put the result | 1475 | * @buffer: where to put the result |
| 1476 | * @alloc: unused | 1476 | * @alloc: duplicate memory |
| 1477 | * | 1477 | * |
| 1478 | * Returns the size of the attribute or an error code | 1478 | * Returns the size of the attribute or an error code |
| 1479 | */ | 1479 | */ |
| @@ -1486,43 +1486,38 @@ static int smack_inode_getsecurity(struct inode *inode, | |||
| 1486 | struct super_block *sbp; | 1486 | struct super_block *sbp; |
| 1487 | struct inode *ip = (struct inode *)inode; | 1487 | struct inode *ip = (struct inode *)inode; |
| 1488 | struct smack_known *isp; | 1488 | struct smack_known *isp; |
| 1489 | int ilen; | ||
| 1490 | int rc = 0; | ||
| 1491 | 1489 | ||
| 1492 | if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { | 1490 | if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) |
| 1493 | isp = smk_of_inode(inode); | 1491 | isp = smk_of_inode(inode); |
| 1494 | ilen = strlen(isp->smk_known); | 1492 | else { |
| 1495 | *buffer = isp->smk_known; | 1493 | /* |
| 1496 | return ilen; | 1494 | * The rest of the Smack xattrs are only on sockets. |
| 1497 | } | 1495 | */ |
| 1496 | sbp = ip->i_sb; | ||
| 1497 | if (sbp->s_magic != SOCKFS_MAGIC) | ||
| 1498 | return -EOPNOTSUPP; | ||
| 1498 | 1499 | ||
| 1499 | /* | 1500 | sock = SOCKET_I(ip); |
| 1500 | * The rest of the Smack xattrs are only on sockets. | 1501 | if (sock == NULL || sock->sk == NULL) |
| 1501 | */ | 1502 | return -EOPNOTSUPP; |
| 1502 | sbp = ip->i_sb; | ||
| 1503 | if (sbp->s_magic != SOCKFS_MAGIC) | ||
| 1504 | return -EOPNOTSUPP; | ||
| 1505 | 1503 | ||
| 1506 | sock = SOCKET_I(ip); | 1504 | ssp = sock->sk->sk_security; |
| 1507 | if (sock == NULL || sock->sk == NULL) | ||
| 1508 | return -EOPNOTSUPP; | ||
| 1509 | |||
| 1510 | ssp = sock->sk->sk_security; | ||
| 1511 | 1505 | ||
| 1512 | if (strcmp(name, XATTR_SMACK_IPIN) == 0) | 1506 | if (strcmp(name, XATTR_SMACK_IPIN) == 0) |
| 1513 | isp = ssp->smk_in; | 1507 | isp = ssp->smk_in; |
| 1514 | else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) | 1508 | else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) |
| 1515 | isp = ssp->smk_out; | 1509 | isp = ssp->smk_out; |
| 1516 | else | 1510 | else |
| 1517 | return -EOPNOTSUPP; | 1511 | return -EOPNOTSUPP; |
| 1512 | } | ||
| 1518 | 1513 | ||
| 1519 | ilen = strlen(isp->smk_known); | 1514 | if (alloc) { |
| 1520 | if (rc == 0) { | 1515 | *buffer = kstrdup(isp->smk_known, GFP_KERNEL); |
| 1521 | *buffer = isp->smk_known; | 1516 | if (*buffer == NULL) |
| 1522 | rc = ilen; | 1517 | return -ENOMEM; |
| 1523 | } | 1518 | } |
| 1524 | 1519 | ||
| 1525 | return rc; | 1520 | return strlen(isp->smk_known); |
| 1526 | } | 1521 | } |
| 1527 | 1522 | ||
| 1528 | 1523 | ||
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c index fec1dfdb14ad..4490a699030b 100644 --- a/sound/core/compress_offload.c +++ b/sound/core/compress_offload.c | |||
| @@ -948,14 +948,13 @@ static const struct file_operations snd_compr_file_ops = { | |||
| 948 | static int snd_compress_dev_register(struct snd_device *device) | 948 | static int snd_compress_dev_register(struct snd_device *device) |
| 949 | { | 949 | { |
| 950 | int ret = -EINVAL; | 950 | int ret = -EINVAL; |
| 951 | char str[16]; | ||
| 952 | struct snd_compr *compr; | 951 | struct snd_compr *compr; |
| 953 | 952 | ||
| 954 | if (snd_BUG_ON(!device || !device->device_data)) | 953 | if (snd_BUG_ON(!device || !device->device_data)) |
| 955 | return -EBADFD; | 954 | return -EBADFD; |
| 956 | compr = device->device_data; | 955 | compr = device->device_data; |
| 957 | 956 | ||
| 958 | pr_debug("reg %s for device %s, direction %d\n", str, compr->name, | 957 | pr_debug("reg device %s, direction %d\n", compr->name, |
| 959 | compr->direction); | 958 | compr->direction); |
| 960 | /* register compressed device */ | 959 | /* register compressed device */ |
| 961 | ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, | 960 | ret = snd_register_device(SNDRV_DEVICE_TYPE_COMPRESS, |
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c index 3a1cc7b97e46..b719d0bd833e 100644 --- a/sound/core/pcm_compat.c +++ b/sound/core/pcm_compat.c | |||
| @@ -547,6 +547,7 @@ struct snd_pcm_mmap_status_x32 { | |||
| 547 | u32 pad2; /* alignment */ | 547 | u32 pad2; /* alignment */ |
| 548 | struct timespec tstamp; | 548 | struct timespec tstamp; |
| 549 | s32 suspended_state; | 549 | s32 suspended_state; |
| 550 | s32 pad3; | ||
| 550 | struct timespec audio_tstamp; | 551 | struct timespec audio_tstamp; |
| 551 | } __packed; | 552 | } __packed; |
| 552 | 553 | ||
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index ea2d0ae85bd3..6c9cba2166d9 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c | |||
| @@ -1259,6 +1259,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg) | |||
| 1259 | struct snd_seq_port_info *info = arg; | 1259 | struct snd_seq_port_info *info = arg; |
| 1260 | struct snd_seq_client_port *port; | 1260 | struct snd_seq_client_port *port; |
| 1261 | struct snd_seq_port_callback *callback; | 1261 | struct snd_seq_port_callback *callback; |
| 1262 | int port_idx; | ||
| 1262 | 1263 | ||
| 1263 | /* it is not allowed to create the port for an another client */ | 1264 | /* it is not allowed to create the port for an another client */ |
| 1264 | if (info->addr.client != client->number) | 1265 | if (info->addr.client != client->number) |
| @@ -1269,7 +1270,9 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg) | |||
| 1269 | return -ENOMEM; | 1270 | return -ENOMEM; |
| 1270 | 1271 | ||
| 1271 | if (client->type == USER_CLIENT && info->kernel) { | 1272 | if (client->type == USER_CLIENT && info->kernel) { |
| 1272 | snd_seq_delete_port(client, port->addr.port); | 1273 | port_idx = port->addr.port; |
| 1274 | snd_seq_port_unlock(port); | ||
| 1275 | snd_seq_delete_port(client, port_idx); | ||
| 1273 | return -EINVAL; | 1276 | return -EINVAL; |
| 1274 | } | 1277 | } |
| 1275 | if (client->type == KERNEL_CLIENT) { | 1278 | if (client->type == KERNEL_CLIENT) { |
| @@ -1290,6 +1293,7 @@ static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg) | |||
| 1290 | 1293 | ||
| 1291 | snd_seq_set_port_info(port, info); | 1294 | snd_seq_set_port_info(port, info); |
| 1292 | snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port); | 1295 | snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port); |
| 1296 | snd_seq_port_unlock(port); | ||
| 1293 | 1297 | ||
| 1294 | return 0; | 1298 | return 0; |
| 1295 | } | 1299 | } |
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c index 0a7020c82bfc..d21ece9f8d73 100644 --- a/sound/core/seq/seq_ports.c +++ b/sound/core/seq/seq_ports.c | |||
| @@ -122,7 +122,9 @@ static void port_subs_info_init(struct snd_seq_port_subs_info *grp) | |||
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | 124 | ||
| 125 | /* create a port, port number is returned (-1 on failure) */ | 125 | /* create a port, port number is returned (-1 on failure); |
| 126 | * the caller needs to unref the port via snd_seq_port_unlock() appropriately | ||
| 127 | */ | ||
| 126 | struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, | 128 | struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, |
| 127 | int port) | 129 | int port) |
| 128 | { | 130 | { |
| @@ -151,6 +153,7 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, | |||
| 151 | snd_use_lock_init(&new_port->use_lock); | 153 | snd_use_lock_init(&new_port->use_lock); |
| 152 | port_subs_info_init(&new_port->c_src); | 154 | port_subs_info_init(&new_port->c_src); |
| 153 | port_subs_info_init(&new_port->c_dest); | 155 | port_subs_info_init(&new_port->c_dest); |
| 156 | snd_use_lock_use(&new_port->use_lock); | ||
| 154 | 157 | ||
| 155 | num = port >= 0 ? port : 0; | 158 | num = port >= 0 ? port : 0; |
| 156 | mutex_lock(&client->ports_mutex); | 159 | mutex_lock(&client->ports_mutex); |
| @@ -165,9 +168,9 @@ struct snd_seq_client_port *snd_seq_create_port(struct snd_seq_client *client, | |||
| 165 | list_add_tail(&new_port->list, &p->list); | 168 | list_add_tail(&new_port->list, &p->list); |
| 166 | client->num_ports++; | 169 | client->num_ports++; |
| 167 | new_port->addr.port = num; /* store the port number in the port */ | 170 | new_port->addr.port = num; /* store the port number in the port */ |
| 171 | sprintf(new_port->name, "port-%d", num); | ||
| 168 | write_unlock_irqrestore(&client->ports_lock, flags); | 172 | write_unlock_irqrestore(&client->ports_lock, flags); |
| 169 | mutex_unlock(&client->ports_mutex); | 173 | mutex_unlock(&client->ports_mutex); |
| 170 | sprintf(new_port->name, "port-%d", num); | ||
| 171 | 174 | ||
| 172 | return new_port; | 175 | return new_port; |
| 173 | } | 176 | } |
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c index 8d93a4021c78..f48a4cd24ffc 100644 --- a/sound/core/seq/seq_virmidi.c +++ b/sound/core/seq/seq_virmidi.c | |||
| @@ -77,13 +77,17 @@ static void snd_virmidi_init_event(struct snd_virmidi *vmidi, | |||
| 77 | * decode input event and put to read buffer of each opened file | 77 | * decode input event and put to read buffer of each opened file |
| 78 | */ | 78 | */ |
| 79 | static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev, | 79 | static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev, |
| 80 | struct snd_seq_event *ev) | 80 | struct snd_seq_event *ev, |
| 81 | bool atomic) | ||
| 81 | { | 82 | { |
| 82 | struct snd_virmidi *vmidi; | 83 | struct snd_virmidi *vmidi; |
| 83 | unsigned char msg[4]; | 84 | unsigned char msg[4]; |
| 84 | int len; | 85 | int len; |
| 85 | 86 | ||
| 86 | read_lock(&rdev->filelist_lock); | 87 | if (atomic) |
| 88 | read_lock(&rdev->filelist_lock); | ||
| 89 | else | ||
| 90 | down_read(&rdev->filelist_sem); | ||
| 87 | list_for_each_entry(vmidi, &rdev->filelist, list) { | 91 | list_for_each_entry(vmidi, &rdev->filelist, list) { |
| 88 | if (!vmidi->trigger) | 92 | if (!vmidi->trigger) |
| 89 | continue; | 93 | continue; |
| @@ -97,7 +101,10 @@ static int snd_virmidi_dev_receive_event(struct snd_virmidi_dev *rdev, | |||
| 97 | snd_rawmidi_receive(vmidi->substream, msg, len); | 101 | snd_rawmidi_receive(vmidi->substream, msg, len); |
| 98 | } | 102 | } |
| 99 | } | 103 | } |
| 100 | read_unlock(&rdev->filelist_lock); | 104 | if (atomic) |
| 105 | read_unlock(&rdev->filelist_lock); | ||
| 106 | else | ||
| 107 | up_read(&rdev->filelist_sem); | ||
| 101 | 108 | ||
| 102 | return 0; | 109 | return 0; |
| 103 | } | 110 | } |
| @@ -115,7 +122,7 @@ int snd_virmidi_receive(struct snd_rawmidi *rmidi, struct snd_seq_event *ev) | |||
| 115 | struct snd_virmidi_dev *rdev; | 122 | struct snd_virmidi_dev *rdev; |
| 116 | 123 | ||
| 117 | rdev = rmidi->private_data; | 124 | rdev = rmidi->private_data; |
| 118 | return snd_virmidi_dev_receive_event(rdev, ev); | 125 | return snd_virmidi_dev_receive_event(rdev, ev, true); |
| 119 | } | 126 | } |
| 120 | #endif /* 0 */ | 127 | #endif /* 0 */ |
| 121 | 128 | ||
| @@ -130,7 +137,7 @@ static int snd_virmidi_event_input(struct snd_seq_event *ev, int direct, | |||
| 130 | rdev = private_data; | 137 | rdev = private_data; |
| 131 | if (!(rdev->flags & SNDRV_VIRMIDI_USE)) | 138 | if (!(rdev->flags & SNDRV_VIRMIDI_USE)) |
| 132 | return 0; /* ignored */ | 139 | return 0; /* ignored */ |
| 133 | return snd_virmidi_dev_receive_event(rdev, ev); | 140 | return snd_virmidi_dev_receive_event(rdev, ev, atomic); |
| 134 | } | 141 | } |
| 135 | 142 | ||
| 136 | /* | 143 | /* |
| @@ -209,7 +216,6 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream) | |||
| 209 | struct snd_virmidi_dev *rdev = substream->rmidi->private_data; | 216 | struct snd_virmidi_dev *rdev = substream->rmidi->private_data; |
| 210 | struct snd_rawmidi_runtime *runtime = substream->runtime; | 217 | struct snd_rawmidi_runtime *runtime = substream->runtime; |
| 211 | struct snd_virmidi *vmidi; | 218 | struct snd_virmidi *vmidi; |
| 212 | unsigned long flags; | ||
| 213 | 219 | ||
| 214 | vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL); | 220 | vmidi = kzalloc(sizeof(*vmidi), GFP_KERNEL); |
| 215 | if (vmidi == NULL) | 221 | if (vmidi == NULL) |
| @@ -223,9 +229,11 @@ static int snd_virmidi_input_open(struct snd_rawmidi_substream *substream) | |||
| 223 | vmidi->client = rdev->client; | 229 | vmidi->client = rdev->client; |
| 224 | vmidi->port = rdev->port; | 230 | vmidi->port = rdev->port; |
| 225 | runtime->private_data = vmidi; | 231 | runtime->private_data = vmidi; |
| 226 | write_lock_irqsave(&rdev->filelist_lock, flags); | 232 | down_write(&rdev->filelist_sem); |
| 233 | write_lock_irq(&rdev->filelist_lock); | ||
| 227 | list_add_tail(&vmidi->list, &rdev->filelist); | 234 | list_add_tail(&vmidi->list, &rdev->filelist); |
| 228 | write_unlock_irqrestore(&rdev->filelist_lock, flags); | 235 | write_unlock_irq(&rdev->filelist_lock); |
| 236 | up_write(&rdev->filelist_sem); | ||
| 229 | vmidi->rdev = rdev; | 237 | vmidi->rdev = rdev; |
| 230 | return 0; | 238 | return 0; |
| 231 | } | 239 | } |
| @@ -264,9 +272,11 @@ static int snd_virmidi_input_close(struct snd_rawmidi_substream *substream) | |||
| 264 | struct snd_virmidi_dev *rdev = substream->rmidi->private_data; | 272 | struct snd_virmidi_dev *rdev = substream->rmidi->private_data; |
| 265 | struct snd_virmidi *vmidi = substream->runtime->private_data; | 273 | struct snd_virmidi *vmidi = substream->runtime->private_data; |
| 266 | 274 | ||
| 275 | down_write(&rdev->filelist_sem); | ||
| 267 | write_lock_irq(&rdev->filelist_lock); | 276 | write_lock_irq(&rdev->filelist_lock); |
| 268 | list_del(&vmidi->list); | 277 | list_del(&vmidi->list); |
| 269 | write_unlock_irq(&rdev->filelist_lock); | 278 | write_unlock_irq(&rdev->filelist_lock); |
| 279 | up_write(&rdev->filelist_sem); | ||
| 270 | snd_midi_event_free(vmidi->parser); | 280 | snd_midi_event_free(vmidi->parser); |
| 271 | substream->runtime->private_data = NULL; | 281 | substream->runtime->private_data = NULL; |
| 272 | kfree(vmidi); | 282 | kfree(vmidi); |
| @@ -520,6 +530,7 @@ int snd_virmidi_new(struct snd_card *card, int device, struct snd_rawmidi **rrmi | |||
| 520 | rdev->rmidi = rmidi; | 530 | rdev->rmidi = rmidi; |
| 521 | rdev->device = device; | 531 | rdev->device = device; |
| 522 | rdev->client = -1; | 532 | rdev->client = -1; |
| 533 | init_rwsem(&rdev->filelist_sem); | ||
| 523 | rwlock_init(&rdev->filelist_lock); | 534 | rwlock_init(&rdev->filelist_lock); |
| 524 | INIT_LIST_HEAD(&rdev->filelist); | 535 | INIT_LIST_HEAD(&rdev->filelist); |
| 525 | rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH; | 536 | rdev->seq_mode = SNDRV_VIRMIDI_SEQ_DISPATCH; |
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index 7e3aa50b21f9..5badd08e1d69 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c | |||
| @@ -103,6 +103,7 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 103 | void __user *puhr; | 103 | void __user *puhr; |
| 104 | union hpi_message_buffer_v1 *hm; | 104 | union hpi_message_buffer_v1 *hm; |
| 105 | union hpi_response_buffer_v1 *hr; | 105 | union hpi_response_buffer_v1 *hr; |
| 106 | u16 msg_size; | ||
| 106 | u16 res_max_size; | 107 | u16 res_max_size; |
| 107 | u32 uncopied_bytes; | 108 | u32 uncopied_bytes; |
| 108 | int err = 0; | 109 | int err = 0; |
| @@ -127,22 +128,25 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 127 | } | 128 | } |
| 128 | 129 | ||
| 129 | /* Now read the message size and data from user space. */ | 130 | /* Now read the message size and data from user space. */ |
| 130 | if (get_user(hm->h.size, (u16 __user *)puhm)) { | 131 | if (get_user(msg_size, (u16 __user *)puhm)) { |
| 131 | err = -EFAULT; | 132 | err = -EFAULT; |
| 132 | goto out; | 133 | goto out; |
| 133 | } | 134 | } |
| 134 | if (hm->h.size > sizeof(*hm)) | 135 | if (msg_size > sizeof(*hm)) |
| 135 | hm->h.size = sizeof(*hm); | 136 | msg_size = sizeof(*hm); |
| 136 | 137 | ||
| 137 | /* printk(KERN_INFO "message size %d\n", hm->h.wSize); */ | 138 | /* printk(KERN_INFO "message size %d\n", hm->h.wSize); */ |
| 138 | 139 | ||
| 139 | uncopied_bytes = copy_from_user(hm, puhm, hm->h.size); | 140 | uncopied_bytes = copy_from_user(hm, puhm, msg_size); |
| 140 | if (uncopied_bytes) { | 141 | if (uncopied_bytes) { |
| 141 | HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes); | 142 | HPI_DEBUG_LOG(ERROR, "uncopied bytes %d\n", uncopied_bytes); |
| 142 | err = -EFAULT; | 143 | err = -EFAULT; |
| 143 | goto out; | 144 | goto out; |
| 144 | } | 145 | } |
| 145 | 146 | ||
| 147 | /* Override h.size in case it is changed between two userspace fetches */ | ||
| 148 | hm->h.size = msg_size; | ||
| 149 | |||
| 146 | if (get_user(res_max_size, (u16 __user *)puhr)) { | 150 | if (get_user(res_max_size, (u16 __user *)puhr)) { |
| 147 | err = -EFAULT; | 151 | err = -EFAULT; |
| 148 | goto out; | 152 | goto out; |
diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c index 7326695bca33..d68f99e076a8 100644 --- a/sound/pci/echoaudio/echoaudio.c +++ b/sound/pci/echoaudio/echoaudio.c | |||
| @@ -1272,11 +1272,11 @@ static int snd_echo_mixer_info(struct snd_kcontrol *kcontrol, | |||
| 1272 | 1272 | ||
| 1273 | chip = snd_kcontrol_chip(kcontrol); | 1273 | chip = snd_kcontrol_chip(kcontrol); |
| 1274 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; | 1274 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; |
| 1275 | uinfo->count = 1; | ||
| 1275 | uinfo->value.integer.min = ECHOGAIN_MINOUT; | 1276 | uinfo->value.integer.min = ECHOGAIN_MINOUT; |
| 1276 | uinfo->value.integer.max = ECHOGAIN_MAXOUT; | 1277 | uinfo->value.integer.max = ECHOGAIN_MAXOUT; |
| 1277 | uinfo->dimen.d[0] = num_busses_out(chip); | 1278 | uinfo->dimen.d[0] = num_busses_out(chip); |
| 1278 | uinfo->dimen.d[1] = num_busses_in(chip); | 1279 | uinfo->dimen.d[1] = num_busses_in(chip); |
| 1279 | uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1]; | ||
| 1280 | return 0; | 1280 | return 0; |
| 1281 | } | 1281 | } |
| 1282 | 1282 | ||
| @@ -1344,11 +1344,11 @@ static int snd_echo_vmixer_info(struct snd_kcontrol *kcontrol, | |||
| 1344 | 1344 | ||
| 1345 | chip = snd_kcontrol_chip(kcontrol); | 1345 | chip = snd_kcontrol_chip(kcontrol); |
| 1346 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; | 1346 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; |
| 1347 | uinfo->count = 1; | ||
| 1347 | uinfo->value.integer.min = ECHOGAIN_MINOUT; | 1348 | uinfo->value.integer.min = ECHOGAIN_MINOUT; |
| 1348 | uinfo->value.integer.max = ECHOGAIN_MAXOUT; | 1349 | uinfo->value.integer.max = ECHOGAIN_MAXOUT; |
| 1349 | uinfo->dimen.d[0] = num_busses_out(chip); | 1350 | uinfo->dimen.d[0] = num_busses_out(chip); |
| 1350 | uinfo->dimen.d[1] = num_pipes_out(chip); | 1351 | uinfo->dimen.d[1] = num_pipes_out(chip); |
| 1351 | uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1]; | ||
| 1352 | return 0; | 1352 | return 0; |
| 1353 | } | 1353 | } |
| 1354 | 1354 | ||
| @@ -1728,6 +1728,7 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol, | |||
| 1728 | struct snd_ctl_elem_info *uinfo) | 1728 | struct snd_ctl_elem_info *uinfo) |
| 1729 | { | 1729 | { |
| 1730 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; | 1730 | uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; |
| 1731 | uinfo->count = 96; | ||
| 1731 | uinfo->value.integer.min = ECHOGAIN_MINOUT; | 1732 | uinfo->value.integer.min = ECHOGAIN_MINOUT; |
| 1732 | uinfo->value.integer.max = 0; | 1733 | uinfo->value.integer.max = 0; |
| 1733 | #ifdef ECHOCARD_HAS_VMIXER | 1734 | #ifdef ECHOCARD_HAS_VMIXER |
| @@ -1737,7 +1738,6 @@ static int snd_echo_vumeters_info(struct snd_kcontrol *kcontrol, | |||
| 1737 | #endif | 1738 | #endif |
| 1738 | uinfo->dimen.d[1] = 16; /* 16 channels */ | 1739 | uinfo->dimen.d[1] = 16; /* 16 channels */ |
| 1739 | uinfo->dimen.d[2] = 2; /* 0=level, 1=peak */ | 1740 | uinfo->dimen.d[2] = 2; /* 0=level, 1=peak */ |
| 1740 | uinfo->count = uinfo->dimen.d[0] * uinfo->dimen.d[1] * uinfo->dimen.d[2]; | ||
| 1741 | return 0; | 1741 | return 0; |
| 1742 | } | 1742 | } |
| 1743 | 1743 | ||
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 2b64fabd5faa..c19c81d230bd 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
| @@ -906,6 +906,7 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid, | |||
| 906 | hda_nid_t pin_nid, u32 stream_tag, int format) | 906 | hda_nid_t pin_nid, u32 stream_tag, int format) |
| 907 | { | 907 | { |
| 908 | struct hdmi_spec *spec = codec->spec; | 908 | struct hdmi_spec *spec = codec->spec; |
| 909 | unsigned int param; | ||
| 909 | int err; | 910 | int err; |
| 910 | 911 | ||
| 911 | err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format)); | 912 | err = spec->ops.pin_hbr_setup(codec, pin_nid, is_hbr_format(format)); |
| @@ -915,6 +916,26 @@ static int hdmi_setup_stream(struct hda_codec *codec, hda_nid_t cvt_nid, | |||
| 915 | return err; | 916 | return err; |
| 916 | } | 917 | } |
| 917 | 918 | ||
| 919 | if (is_haswell_plus(codec)) { | ||
| 920 | |||
| 921 | /* | ||
| 922 | * on recent platforms IEC Coding Type is required for HBR | ||
| 923 | * support, read current Digital Converter settings and set | ||
| 924 | * ICT bitfield if needed. | ||
| 925 | */ | ||
| 926 | param = snd_hda_codec_read(codec, cvt_nid, 0, | ||
| 927 | AC_VERB_GET_DIGI_CONVERT_1, 0); | ||
| 928 | |||
| 929 | param = (param >> 16) & ~(AC_DIG3_ICT); | ||
| 930 | |||
| 931 | /* on recent platforms ICT mode is required for HBR support */ | ||
| 932 | if (is_hbr_format(format)) | ||
| 933 | param |= 0x1; | ||
| 934 | |||
| 935 | snd_hda_codec_write(codec, cvt_nid, 0, | ||
| 936 | AC_VERB_SET_DIGI_CONVERT_3, param); | ||
| 937 | } | ||
| 938 | |||
| 918 | snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format); | 939 | snd_hda_codec_setup_stream(codec, cvt_nid, stream_tag, 0, format); |
| 919 | return 0; | 940 | return 0; |
| 920 | } | 941 | } |
diff --git a/sound/usb/caiaq/device.c b/sound/usb/caiaq/device.c index 0fb6b1b79261..d8409d9ae55b 100644 --- a/sound/usb/caiaq/device.c +++ b/sound/usb/caiaq/device.c | |||
| @@ -469,10 +469,12 @@ static int init_card(struct snd_usb_caiaqdev *cdev) | |||
| 469 | 469 | ||
| 470 | err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0); | 470 | err = snd_usb_caiaq_send_command(cdev, EP1_CMD_GET_DEVICE_INFO, NULL, 0); |
| 471 | if (err) | 471 | if (err) |
| 472 | return err; | 472 | goto err_kill_urb; |
| 473 | 473 | ||
| 474 | if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) | 474 | if (!wait_event_timeout(cdev->ep1_wait_queue, cdev->spec_received, HZ)) { |
| 475 | return -ENODEV; | 475 | err = -ENODEV; |
| 476 | goto err_kill_urb; | ||
| 477 | } | ||
| 476 | 478 | ||
| 477 | usb_string(usb_dev, usb_dev->descriptor.iManufacturer, | 479 | usb_string(usb_dev, usb_dev->descriptor.iManufacturer, |
| 478 | cdev->vendor_name, CAIAQ_USB_STR_LEN); | 480 | cdev->vendor_name, CAIAQ_USB_STR_LEN); |
| @@ -507,6 +509,10 @@ static int init_card(struct snd_usb_caiaqdev *cdev) | |||
| 507 | 509 | ||
| 508 | setup_card(cdev); | 510 | setup_card(cdev); |
| 509 | return 0; | 511 | return 0; |
| 512 | |||
| 513 | err_kill_urb: | ||
| 514 | usb_kill_urb(&cdev->ep1_in_urb); | ||
| 515 | return err; | ||
| 510 | } | 516 | } |
| 511 | 517 | ||
| 512 | static int snd_probe(struct usb_interface *intf, | 518 | static int snd_probe(struct usb_interface *intf, |
diff --git a/sound/usb/card.c b/sound/usb/card.c index 3dc36d913550..23d1d23aefec 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
| @@ -221,6 +221,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) | |||
| 221 | struct usb_interface_descriptor *altsd; | 221 | struct usb_interface_descriptor *altsd; |
| 222 | void *control_header; | 222 | void *control_header; |
| 223 | int i, protocol; | 223 | int i, protocol; |
| 224 | int rest_bytes; | ||
| 224 | 225 | ||
| 225 | /* find audiocontrol interface */ | 226 | /* find audiocontrol interface */ |
| 226 | host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0]; | 227 | host_iface = &usb_ifnum_to_if(dev, ctrlif)->altsetting[0]; |
| @@ -235,6 +236,15 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) | |||
| 235 | return -EINVAL; | 236 | return -EINVAL; |
| 236 | } | 237 | } |
| 237 | 238 | ||
| 239 | rest_bytes = (void *)(host_iface->extra + host_iface->extralen) - | ||
| 240 | control_header; | ||
| 241 | |||
| 242 | /* just to be sure -- this shouldn't hit at all */ | ||
| 243 | if (rest_bytes <= 0) { | ||
| 244 | dev_err(&dev->dev, "invalid control header\n"); | ||
| 245 | return -EINVAL; | ||
| 246 | } | ||
| 247 | |||
| 238 | switch (protocol) { | 248 | switch (protocol) { |
| 239 | default: | 249 | default: |
| 240 | dev_warn(&dev->dev, | 250 | dev_warn(&dev->dev, |
| @@ -245,11 +255,21 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif) | |||
| 245 | case UAC_VERSION_1: { | 255 | case UAC_VERSION_1: { |
| 246 | struct uac1_ac_header_descriptor *h1 = control_header; | 256 | struct uac1_ac_header_descriptor *h1 = control_header; |
| 247 | 257 | ||
| 258 | if (rest_bytes < sizeof(*h1)) { | ||
| 259 | dev_err(&dev->dev, "too short v1 buffer descriptor\n"); | ||
| 260 | return -EINVAL; | ||
| 261 | } | ||
| 262 | |||
| 248 | if (!h1->bInCollection) { | 263 | if (!h1->bInCollection) { |
| 249 | dev_info(&dev->dev, "skipping empty audio interface (v1)\n"); | 264 | dev_info(&dev->dev, "skipping empty audio interface (v1)\n"); |
| 250 | return -EINVAL; | 265 | return -EINVAL; |
| 251 | } | 266 | } |
| 252 | 267 | ||
| 268 | if (rest_bytes < h1->bLength) { | ||
| 269 | dev_err(&dev->dev, "invalid buffer length (v1)\n"); | ||
| 270 | return -EINVAL; | ||
| 271 | } | ||
| 272 | |||
| 253 | if (h1->bLength < sizeof(*h1) + h1->bInCollection) { | 273 | if (h1->bLength < sizeof(*h1) + h1->bInCollection) { |
| 254 | dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n"); | 274 | dev_err(&dev->dev, "invalid UAC_HEADER (v1)\n"); |
| 255 | return -EINVAL; | 275 | return -EINVAL; |
diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 0ff5a7d2e19f..c8f723c3a033 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c | |||
| @@ -779,9 +779,10 @@ int line6_probe(struct usb_interface *interface, | |||
| 779 | return 0; | 779 | return 0; |
| 780 | 780 | ||
| 781 | error: | 781 | error: |
| 782 | if (line6->disconnect) | 782 | /* we can call disconnect callback here because no close-sync is |
| 783 | line6->disconnect(line6); | 783 | * needed yet at this point |
| 784 | snd_card_free(card); | 784 | */ |
| 785 | line6_disconnect(interface); | ||
| 785 | return ret; | 786 | return ret; |
| 786 | } | 787 | } |
| 787 | EXPORT_SYMBOL_GPL(line6_probe); | 788 | EXPORT_SYMBOL_GPL(line6_probe); |
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c index 956f847a96e4..451007c27743 100644 --- a/sound/usb/line6/podhd.c +++ b/sound/usb/line6/podhd.c | |||
| @@ -301,7 +301,8 @@ static void podhd_disconnect(struct usb_line6 *line6) | |||
| 301 | 301 | ||
| 302 | intf = usb_ifnum_to_if(line6->usbdev, | 302 | intf = usb_ifnum_to_if(line6->usbdev, |
| 303 | pod->line6.properties->ctrl_if); | 303 | pod->line6.properties->ctrl_if); |
| 304 | usb_driver_release_interface(&podhd_driver, intf); | 304 | if (intf) |
| 305 | usb_driver_release_interface(&podhd_driver, intf); | ||
| 305 | } | 306 | } |
| 306 | } | 307 | } |
| 307 | 308 | ||
| @@ -317,6 +318,9 @@ static int podhd_init(struct usb_line6 *line6, | |||
| 317 | 318 | ||
| 318 | line6->disconnect = podhd_disconnect; | 319 | line6->disconnect = podhd_disconnect; |
| 319 | 320 | ||
| 321 | init_timer(&pod->startup_timer); | ||
| 322 | INIT_WORK(&pod->startup_work, podhd_startup_workqueue); | ||
| 323 | |||
| 320 | if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) { | 324 | if (pod->line6.properties->capabilities & LINE6_CAP_CONTROL) { |
| 321 | /* claim the data interface */ | 325 | /* claim the data interface */ |
| 322 | intf = usb_ifnum_to_if(line6->usbdev, | 326 | intf = usb_ifnum_to_if(line6->usbdev, |
| @@ -358,8 +362,6 @@ static int podhd_init(struct usb_line6 *line6, | |||
| 358 | } | 362 | } |
| 359 | 363 | ||
| 360 | /* init device and delay registering */ | 364 | /* init device and delay registering */ |
| 361 | init_timer(&pod->startup_timer); | ||
| 362 | INIT_WORK(&pod->startup_work, podhd_startup_workqueue); | ||
| 363 | podhd_startup(pod); | 365 | podhd_startup(pod); |
| 364 | return 0; | 366 | return 0; |
| 365 | } | 367 | } |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 9732edf77f86..91bc8f18791e 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
| @@ -2234,6 +2234,9 @@ static int parse_audio_unit(struct mixer_build *state, int unitid) | |||
| 2234 | 2234 | ||
| 2235 | static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) | 2235 | static void snd_usb_mixer_free(struct usb_mixer_interface *mixer) |
| 2236 | { | 2236 | { |
| 2237 | /* kill pending URBs */ | ||
| 2238 | snd_usb_mixer_disconnect(mixer); | ||
| 2239 | |||
| 2237 | kfree(mixer->id_elems); | 2240 | kfree(mixer->id_elems); |
| 2238 | if (mixer->urb) { | 2241 | if (mixer->urb) { |
| 2239 | kfree(mixer->urb->transfer_buffer); | 2242 | kfree(mixer->urb->transfer_buffer); |
| @@ -2584,8 +2587,13 @@ _error: | |||
| 2584 | 2587 | ||
| 2585 | void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer) | 2588 | void snd_usb_mixer_disconnect(struct usb_mixer_interface *mixer) |
| 2586 | { | 2589 | { |
| 2587 | usb_kill_urb(mixer->urb); | 2590 | if (mixer->disconnected) |
| 2588 | usb_kill_urb(mixer->rc_urb); | 2591 | return; |
| 2592 | if (mixer->urb) | ||
| 2593 | usb_kill_urb(mixer->urb); | ||
| 2594 | if (mixer->rc_urb) | ||
| 2595 | usb_kill_urb(mixer->rc_urb); | ||
| 2596 | mixer->disconnected = true; | ||
| 2589 | } | 2597 | } |
| 2590 | 2598 | ||
| 2591 | #ifdef CONFIG_PM | 2599 | #ifdef CONFIG_PM |
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index 2b4b067646ab..545d99b09706 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h | |||
| @@ -22,6 +22,8 @@ struct usb_mixer_interface { | |||
| 22 | struct urb *rc_urb; | 22 | struct urb *rc_urb; |
| 23 | struct usb_ctrlrequest *rc_setup_packet; | 23 | struct usb_ctrlrequest *rc_setup_packet; |
| 24 | u8 rc_buffer[6]; | 24 | u8 rc_buffer[6]; |
| 25 | |||
| 26 | bool disconnected; | ||
| 25 | }; | 27 | }; |
| 26 | 28 | ||
| 27 | #define MAX_CHANNELS 16 /* max logical channels */ | 29 | #define MAX_CHANNELS 16 /* max logical channels */ |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 913552078285..9ddaae3784f5 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
| @@ -1137,6 +1137,9 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) | |||
| 1137 | case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */ | 1137 | case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */ |
| 1138 | case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */ | 1138 | case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */ |
| 1139 | case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ | 1139 | case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */ |
| 1140 | case USB_ID(0x047F, 0xC022): /* Plantronics C310 */ | ||
| 1141 | case USB_ID(0x047F, 0xC02F): /* Plantronics P610 */ | ||
| 1142 | case USB_ID(0x047F, 0xC036): /* Plantronics C520-M */ | ||
| 1140 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ | 1143 | case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */ |
| 1141 | case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ | 1144 | case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */ |
| 1142 | case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ | 1145 | case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */ |
diff --git a/sound/usb/usx2y/usb_stream.c b/sound/usb/usx2y/usb_stream.c index 4dab49080700..e229abd21652 100644 --- a/sound/usb/usx2y/usb_stream.c +++ b/sound/usb/usx2y/usb_stream.c | |||
| @@ -191,7 +191,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk, | |||
| 191 | } | 191 | } |
| 192 | 192 | ||
| 193 | pg = get_order(read_size); | 193 | pg = get_order(read_size); |
| 194 | sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); | 194 | sk->s = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO| |
| 195 | __GFP_NOWARN, pg); | ||
| 195 | if (!sk->s) { | 196 | if (!sk->s) { |
| 196 | snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); | 197 | snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); |
| 197 | goto out; | 198 | goto out; |
| @@ -211,7 +212,8 @@ struct usb_stream *usb_stream_new(struct usb_stream_kernel *sk, | |||
| 211 | pg = get_order(write_size); | 212 | pg = get_order(write_size); |
| 212 | 213 | ||
| 213 | sk->write_page = | 214 | sk->write_page = |
| 214 | (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO, pg); | 215 | (void *)__get_free_pages(GFP_KERNEL|__GFP_COMP|__GFP_ZERO| |
| 216 | __GFP_NOWARN, pg); | ||
| 215 | if (!sk->write_page) { | 217 | if (!sk->write_page) { |
| 216 | snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); | 218 | snd_printk(KERN_WARNING "couldn't __get_free_pages()\n"); |
| 217 | usb_stream_free(sk); | 219 | usb_stream_free(sk); |
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 43ab5c402f98..f90860d1f897 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
| @@ -312,7 +312,7 @@ union bpf_attr { | |||
| 312 | * jump into another BPF program | 312 | * jump into another BPF program |
| 313 | * @ctx: context pointer passed to next program | 313 | * @ctx: context pointer passed to next program |
| 314 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY | 314 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY |
| 315 | * @index: index inside array that selects specific program to run | 315 | * @index: 32-bit index inside array that selects specific program to run |
| 316 | * Return: 0 on success or negative error | 316 | * Return: 0 on success or negative error |
| 317 | * | 317 | * |
| 318 | * int bpf_clone_redirect(skb, ifindex, flags) | 318 | * int bpf_clone_redirect(skb, ifindex, flags) |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 3d4c3b5e1868..0c977b6e0f8b 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
| @@ -586,7 +586,7 @@ static void print_sample_brstack(struct perf_sample *sample, | |||
| 586 | thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); | 586 | thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); |
| 587 | } | 587 | } |
| 588 | 588 | ||
| 589 | printf("0x%"PRIx64, from); | 589 | printf(" 0x%"PRIx64, from); |
| 590 | if (PRINT_FIELD(DSO)) { | 590 | if (PRINT_FIELD(DSO)) { |
| 591 | printf("("); | 591 | printf("("); |
| 592 | map__fprintf_dsoname(alf.map, stdout); | 592 | map__fprintf_dsoname(alf.map, stdout); |
| @@ -681,7 +681,7 @@ static void print_sample_brstackoff(struct perf_sample *sample, | |||
| 681 | if (alt.map && !alt.map->dso->adjust_symbols) | 681 | if (alt.map && !alt.map->dso->adjust_symbols) |
| 682 | to = map__map_ip(alt.map, to); | 682 | to = map__map_ip(alt.map, to); |
| 683 | 683 | ||
| 684 | printf("0x%"PRIx64, from); | 684 | printf(" 0x%"PRIx64, from); |
| 685 | if (PRINT_FIELD(DSO)) { | 685 | if (PRINT_FIELD(DSO)) { |
| 686 | printf("("); | 686 | printf("("); |
| 687 | map__fprintf_dsoname(alf.map, stdout); | 687 | map__fprintf_dsoname(alf.map, stdout); |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index be09d77cade0..a971caf3759d 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
| @@ -685,6 +685,8 @@ static enum match_result match_chain(struct callchain_cursor_node *node, | |||
| 685 | { | 685 | { |
| 686 | struct symbol *sym = node->sym; | 686 | struct symbol *sym = node->sym; |
| 687 | u64 left, right; | 687 | u64 left, right; |
| 688 | struct dso *left_dso = NULL; | ||
| 689 | struct dso *right_dso = NULL; | ||
| 688 | 690 | ||
| 689 | if (callchain_param.key == CCKEY_SRCLINE) { | 691 | if (callchain_param.key == CCKEY_SRCLINE) { |
| 690 | enum match_result match = match_chain_srcline(node, cnode); | 692 | enum match_result match = match_chain_srcline(node, cnode); |
| @@ -696,12 +698,14 @@ static enum match_result match_chain(struct callchain_cursor_node *node, | |||
| 696 | if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) { | 698 | if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) { |
| 697 | left = cnode->ms.sym->start; | 699 | left = cnode->ms.sym->start; |
| 698 | right = sym->start; | 700 | right = sym->start; |
| 701 | left_dso = cnode->ms.map->dso; | ||
| 702 | right_dso = node->map->dso; | ||
| 699 | } else { | 703 | } else { |
| 700 | left = cnode->ip; | 704 | left = cnode->ip; |
| 701 | right = node->ip; | 705 | right = node->ip; |
| 702 | } | 706 | } |
| 703 | 707 | ||
| 704 | if (left == right) { | 708 | if (left == right && left_dso == right_dso) { |
| 705 | if (node->branch) { | 709 | if (node->branch) { |
| 706 | cnode->branch_count++; | 710 | cnode->branch_count++; |
| 707 | 711 | ||
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index f6257fb4f08c..39b15968eab1 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
| @@ -309,10 +309,11 @@ static char *get_config_name(struct list_head *head_terms) | |||
| 309 | static struct perf_evsel * | 309 | static struct perf_evsel * |
| 310 | __add_event(struct list_head *list, int *idx, | 310 | __add_event(struct list_head *list, int *idx, |
| 311 | struct perf_event_attr *attr, | 311 | struct perf_event_attr *attr, |
| 312 | char *name, struct cpu_map *cpus, | 312 | char *name, struct perf_pmu *pmu, |
| 313 | struct list_head *config_terms, bool auto_merge_stats) | 313 | struct list_head *config_terms, bool auto_merge_stats) |
| 314 | { | 314 | { |
| 315 | struct perf_evsel *evsel; | 315 | struct perf_evsel *evsel; |
| 316 | struct cpu_map *cpus = pmu ? pmu->cpus : NULL; | ||
| 316 | 317 | ||
| 317 | event_attr_init(attr); | 318 | event_attr_init(attr); |
| 318 | 319 | ||
| @@ -323,7 +324,7 @@ __add_event(struct list_head *list, int *idx, | |||
| 323 | (*idx)++; | 324 | (*idx)++; |
| 324 | evsel->cpus = cpu_map__get(cpus); | 325 | evsel->cpus = cpu_map__get(cpus); |
| 325 | evsel->own_cpus = cpu_map__get(cpus); | 326 | evsel->own_cpus = cpu_map__get(cpus); |
| 326 | evsel->system_wide = !!cpus; | 327 | evsel->system_wide = pmu ? pmu->is_uncore : false; |
| 327 | evsel->auto_merge_stats = auto_merge_stats; | 328 | evsel->auto_merge_stats = auto_merge_stats; |
| 328 | 329 | ||
| 329 | if (name) | 330 | if (name) |
| @@ -1233,7 +1234,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state, | |||
| 1233 | 1234 | ||
| 1234 | if (!head_config) { | 1235 | if (!head_config) { |
| 1235 | attr.type = pmu->type; | 1236 | attr.type = pmu->type; |
| 1236 | evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu->cpus, NULL, auto_merge_stats); | 1237 | evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats); |
| 1237 | return evsel ? 0 : -ENOMEM; | 1238 | return evsel ? 0 : -ENOMEM; |
| 1238 | } | 1239 | } |
| 1239 | 1240 | ||
| @@ -1254,7 +1255,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state, | |||
| 1254 | return -EINVAL; | 1255 | return -EINVAL; |
| 1255 | 1256 | ||
| 1256 | evsel = __add_event(list, &parse_state->idx, &attr, | 1257 | evsel = __add_event(list, &parse_state->idx, &attr, |
| 1257 | get_config_name(head_config), pmu->cpus, | 1258 | get_config_name(head_config), pmu, |
| 1258 | &config_terms, auto_merge_stats); | 1259 | &config_terms, auto_merge_stats); |
| 1259 | if (evsel) { | 1260 | if (evsel) { |
| 1260 | evsel->unit = info.unit; | 1261 | evsel->unit = info.unit; |
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index ac16a9db1fb5..1c4d7b4e4fb5 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c | |||
| @@ -470,17 +470,36 @@ static void pmu_read_sysfs(void) | |||
| 470 | closedir(dir); | 470 | closedir(dir); |
| 471 | } | 471 | } |
| 472 | 472 | ||
| 473 | static struct cpu_map *__pmu_cpumask(const char *path) | ||
| 474 | { | ||
| 475 | FILE *file; | ||
| 476 | struct cpu_map *cpus; | ||
| 477 | |||
| 478 | file = fopen(path, "r"); | ||
| 479 | if (!file) | ||
| 480 | return NULL; | ||
| 481 | |||
| 482 | cpus = cpu_map__read(file); | ||
| 483 | fclose(file); | ||
| 484 | return cpus; | ||
| 485 | } | ||
| 486 | |||
| 487 | /* | ||
| 488 | * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64) | ||
| 489 | * may have a "cpus" file. | ||
| 490 | */ | ||
| 491 | #define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask" | ||
| 492 | #define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus" | ||
| 493 | |||
| 473 | static struct cpu_map *pmu_cpumask(const char *name) | 494 | static struct cpu_map *pmu_cpumask(const char *name) |
| 474 | { | 495 | { |
| 475 | struct stat st; | ||
| 476 | char path[PATH_MAX]; | 496 | char path[PATH_MAX]; |
| 477 | FILE *file; | ||
| 478 | struct cpu_map *cpus; | 497 | struct cpu_map *cpus; |
| 479 | const char *sysfs = sysfs__mountpoint(); | 498 | const char *sysfs = sysfs__mountpoint(); |
| 480 | const char *templates[] = { | 499 | const char *templates[] = { |
| 481 | "%s/bus/event_source/devices/%s/cpumask", | 500 | CPUS_TEMPLATE_UNCORE, |
| 482 | "%s/bus/event_source/devices/%s/cpus", | 501 | CPUS_TEMPLATE_CPU, |
| 483 | NULL | 502 | NULL |
| 484 | }; | 503 | }; |
| 485 | const char **template; | 504 | const char **template; |
| 486 | 505 | ||
| @@ -489,20 +508,25 @@ static struct cpu_map *pmu_cpumask(const char *name) | |||
| 489 | 508 | ||
| 490 | for (template = templates; *template; template++) { | 509 | for (template = templates; *template; template++) { |
| 491 | snprintf(path, PATH_MAX, *template, sysfs, name); | 510 | snprintf(path, PATH_MAX, *template, sysfs, name); |
| 492 | if (stat(path, &st) == 0) | 511 | cpus = __pmu_cpumask(path); |
| 493 | break; | 512 | if (cpus) |
| 513 | return cpus; | ||
| 494 | } | 514 | } |
| 495 | 515 | ||
| 496 | if (!*template) | 516 | return NULL; |
| 497 | return NULL; | 517 | } |
| 498 | 518 | ||
| 499 | file = fopen(path, "r"); | 519 | static bool pmu_is_uncore(const char *name) |
| 500 | if (!file) | 520 | { |
| 501 | return NULL; | 521 | char path[PATH_MAX]; |
| 522 | struct cpu_map *cpus; | ||
| 523 | const char *sysfs = sysfs__mountpoint(); | ||
| 502 | 524 | ||
| 503 | cpus = cpu_map__read(file); | 525 | snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name); |
| 504 | fclose(file); | 526 | cpus = __pmu_cpumask(path); |
| 505 | return cpus; | 527 | cpu_map__put(cpus); |
| 528 | |||
| 529 | return !!cpus; | ||
| 506 | } | 530 | } |
| 507 | 531 | ||
| 508 | /* | 532 | /* |
| @@ -617,6 +641,8 @@ static struct perf_pmu *pmu_lookup(const char *name) | |||
| 617 | 641 | ||
| 618 | pmu->cpus = pmu_cpumask(name); | 642 | pmu->cpus = pmu_cpumask(name); |
| 619 | 643 | ||
| 644 | pmu->is_uncore = pmu_is_uncore(name); | ||
| 645 | |||
| 620 | INIT_LIST_HEAD(&pmu->format); | 646 | INIT_LIST_HEAD(&pmu->format); |
| 621 | INIT_LIST_HEAD(&pmu->aliases); | 647 | INIT_LIST_HEAD(&pmu->aliases); |
| 622 | list_splice(&format, &pmu->format); | 648 | list_splice(&format, &pmu->format); |
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 389e9729331f..fe0de0502ce2 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h | |||
| @@ -22,6 +22,7 @@ struct perf_pmu { | |||
| 22 | char *name; | 22 | char *name; |
| 23 | __u32 type; | 23 | __u32 type; |
| 24 | bool selectable; | 24 | bool selectable; |
| 25 | bool is_uncore; | ||
| 25 | struct perf_event_attr *default_config; | 26 | struct perf_event_attr *default_config; |
| 26 | struct cpu_map *cpus; | 27 | struct cpu_map *cpus; |
| 27 | struct list_head format; /* HEAD struct perf_pmu_format -> list */ | 28 | struct list_head format; /* HEAD struct perf_pmu_format -> list */ |
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile index 0f5e347b068d..152823b6cb21 100644 --- a/tools/testing/selftests/mqueue/Makefile +++ b/tools/testing/selftests/mqueue/Makefile | |||
| @@ -5,8 +5,8 @@ TEST_GEN_PROGS := mq_open_tests mq_perf_tests | |||
| 5 | include ../lib.mk | 5 | include ../lib.mk |
| 6 | 6 | ||
| 7 | override define RUN_TESTS | 7 | override define RUN_TESTS |
| 8 | $(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]" | 8 | @$(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]" |
| 9 | $(OUTPUT)//mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" | 9 | @$(OUTPUT)/mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" |
| 10 | endef | 10 | endef |
| 11 | 11 | ||
| 12 | override define EMIT_TESTS | 12 | override define EMIT_TESTS |
diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c index 00f286661dcd..dd4162fc0419 100644 --- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c | |||
| @@ -341,7 +341,7 @@ int main(int argc, char **argv) | |||
| 341 | return 0; | 341 | return 0; |
| 342 | case 'n': | 342 | case 'n': |
| 343 | t = atoi(optarg); | 343 | t = atoi(optarg); |
| 344 | if (t > ARRAY_SIZE(test_cases)) | 344 | if (t >= ARRAY_SIZE(test_cases)) |
| 345 | error(1, 0, "Invalid test case: %d", t); | 345 | error(1, 0, "Invalid test case: %d", t); |
| 346 | all_tests = false; | 346 | all_tests = false; |
| 347 | test_cases[t].enabled = true; | 347 | test_cases[t].enabled = true; |
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index a2c53a3d223d..de2f9ec8a87f 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c | |||
| @@ -397,7 +397,7 @@ static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy, | |||
| 397 | } | 397 | } |
| 398 | } | 398 | } |
| 399 | 399 | ||
| 400 | static int copy_page(int ufd, unsigned long offset) | 400 | static int __copy_page(int ufd, unsigned long offset, bool retry) |
| 401 | { | 401 | { |
| 402 | struct uffdio_copy uffdio_copy; | 402 | struct uffdio_copy uffdio_copy; |
| 403 | 403 | ||
| @@ -418,7 +418,7 @@ static int copy_page(int ufd, unsigned long offset) | |||
| 418 | fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", | 418 | fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", |
| 419 | uffdio_copy.copy), exit(1); | 419 | uffdio_copy.copy), exit(1); |
| 420 | } else { | 420 | } else { |
| 421 | if (test_uffdio_copy_eexist) { | 421 | if (test_uffdio_copy_eexist && retry) { |
| 422 | test_uffdio_copy_eexist = false; | 422 | test_uffdio_copy_eexist = false; |
| 423 | retry_copy_page(ufd, &uffdio_copy, offset); | 423 | retry_copy_page(ufd, &uffdio_copy, offset); |
| 424 | } | 424 | } |
| @@ -427,6 +427,16 @@ static int copy_page(int ufd, unsigned long offset) | |||
| 427 | return 0; | 427 | return 0; |
| 428 | } | 428 | } |
| 429 | 429 | ||
| 430 | static int copy_page_retry(int ufd, unsigned long offset) | ||
| 431 | { | ||
| 432 | return __copy_page(ufd, offset, true); | ||
| 433 | } | ||
| 434 | |||
| 435 | static int copy_page(int ufd, unsigned long offset) | ||
| 436 | { | ||
| 437 | return __copy_page(ufd, offset, false); | ||
| 438 | } | ||
| 439 | |||
| 430 | static void *uffd_poll_thread(void *arg) | 440 | static void *uffd_poll_thread(void *arg) |
| 431 | { | 441 | { |
| 432 | unsigned long cpu = (unsigned long) arg; | 442 | unsigned long cpu = (unsigned long) arg; |
| @@ -544,7 +554,7 @@ static void *background_thread(void *arg) | |||
| 544 | for (page_nr = cpu * nr_pages_per_cpu; | 554 | for (page_nr = cpu * nr_pages_per_cpu; |
| 545 | page_nr < (cpu+1) * nr_pages_per_cpu; | 555 | page_nr < (cpu+1) * nr_pages_per_cpu; |
| 546 | page_nr++) | 556 | page_nr++) |
| 547 | copy_page(uffd, page_nr * page_size); | 557 | copy_page_retry(uffd, page_nr * page_size); |
| 548 | 558 | ||
| 549 | return NULL; | 559 | return NULL; |
| 550 | } | 560 | } |
| @@ -779,7 +789,7 @@ static void retry_uffdio_zeropage(int ufd, | |||
| 779 | } | 789 | } |
| 780 | } | 790 | } |
| 781 | 791 | ||
| 782 | static int uffdio_zeropage(int ufd, unsigned long offset) | 792 | static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry) |
| 783 | { | 793 | { |
| 784 | struct uffdio_zeropage uffdio_zeropage; | 794 | struct uffdio_zeropage uffdio_zeropage; |
| 785 | int ret; | 795 | int ret; |
| @@ -814,7 +824,7 @@ static int uffdio_zeropage(int ufd, unsigned long offset) | |||
| 814 | fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n", | 824 | fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n", |
| 815 | uffdio_zeropage.zeropage), exit(1); | 825 | uffdio_zeropage.zeropage), exit(1); |
| 816 | } else { | 826 | } else { |
| 817 | if (test_uffdio_zeropage_eexist) { | 827 | if (test_uffdio_zeropage_eexist && retry) { |
| 818 | test_uffdio_zeropage_eexist = false; | 828 | test_uffdio_zeropage_eexist = false; |
| 819 | retry_uffdio_zeropage(ufd, &uffdio_zeropage, | 829 | retry_uffdio_zeropage(ufd, &uffdio_zeropage, |
| 820 | offset); | 830 | offset); |
| @@ -830,6 +840,11 @@ static int uffdio_zeropage(int ufd, unsigned long offset) | |||
| 830 | return 0; | 840 | return 0; |
| 831 | } | 841 | } |
| 832 | 842 | ||
| 843 | static int uffdio_zeropage(int ufd, unsigned long offset) | ||
| 844 | { | ||
| 845 | return __uffdio_zeropage(ufd, offset, false); | ||
| 846 | } | ||
| 847 | |||
| 833 | /* exercise UFFDIO_ZEROPAGE */ | 848 | /* exercise UFFDIO_ZEROPAGE */ |
| 834 | static int userfaultfd_zeropage_test(void) | 849 | static int userfaultfd_zeropage_test(void) |
| 835 | { | 850 | { |
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 97f187e2663f..0a74a20ca32b 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile | |||
| @@ -20,7 +20,7 @@ BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64) | |||
| 20 | BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32)) | 20 | BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32)) |
| 21 | BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) | 21 | BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) |
| 22 | 22 | ||
| 23 | CFLAGS := -O2 -g -std=gnu99 -pthread -Wall | 23 | CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie |
| 24 | 24 | ||
| 25 | UNAME_M := $(shell uname -m) | 25 | UNAME_M := $(shell uname -m) |
| 26 | CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) | 26 | CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) |
