diff options
582 files changed, 8771 insertions, 4382 deletions
diff --git a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt index 30c546900b60..07dbb358182c 100644 --- a/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt +++ b/Documentation/devicetree/bindings/arm/marvell/cp110-system-controller0.txt | |||
| @@ -45,7 +45,7 @@ The following clocks are available: | |||
| 45 | - 1 15 SATA | 45 | - 1 15 SATA |
| 46 | - 1 16 SATA USB | 46 | - 1 16 SATA USB |
| 47 | - 1 17 Main | 47 | - 1 17 Main |
| 48 | - 1 18 SD/MMC | 48 | - 1 18 SD/MMC/GOP |
| 49 | - 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART) | 49 | - 1 21 Slow IO (SPI, NOR, BootROM, I2C, UART) |
| 50 | - 1 22 USB3H0 | 50 | - 1 22 USB3H0 |
| 51 | - 1 23 USB3H1 | 51 | - 1 23 USB3H1 |
| @@ -65,7 +65,7 @@ Required properties: | |||
| 65 | "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", | 65 | "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", |
| 66 | "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", | 66 | "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", |
| 67 | "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", | 67 | "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", |
| 68 | "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", | 68 | "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io", |
| 69 | "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; | 69 | "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; |
| 70 | 70 | ||
| 71 | Example: | 71 | Example: |
| @@ -78,6 +78,6 @@ Example: | |||
| 78 | gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", | 78 | gate-clock-output-names = "cpm-audio", "cpm-communit", "cpm-nand", "cpm-ppv2", "cpm-sdio", |
| 79 | "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", | 79 | "cpm-mg-domain", "cpm-mg-core", "cpm-xor1", "cpm-xor0", "cpm-gop-dp", "none", |
| 80 | "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", | 80 | "cpm-pcie_x10", "cpm-pcie_x11", "cpm-pcie_x4", "cpm-pcie-xor", "cpm-sata", |
| 81 | "cpm-sata-usb", "cpm-main", "cpm-sd-mmc", "none", "none", "cpm-slow-io", | 81 | "cpm-sata-usb", "cpm-main", "cpm-sd-mmc-gop", "none", "none", "cpm-slow-io", |
| 82 | "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; | 82 | "cpm-usb3h0", "cpm-usb3h1", "cpm-usb3dev", "cpm-eip150", "cpm-eip197"; |
| 83 | }; | 83 | }; |
diff --git a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt index a78265993665..ca5204b3bc21 100644 --- a/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt +++ b/Documentation/devicetree/bindings/display/exynos/exynos_dsim.txt | |||
| @@ -4,7 +4,6 @@ Required properties: | |||
| 4 | - compatible: value should be one of the following | 4 | - compatible: value should be one of the following |
| 5 | "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */ | 5 | "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */ |
| 6 | "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ | 6 | "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ |
| 7 | "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */ | ||
| 8 | "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ | 7 | "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ |
| 9 | "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */ | 8 | "samsung,exynos5422-mipi-dsi" /* for Exynos5422/5800 SoCs */ |
| 10 | "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */ | 9 | "samsung,exynos5433-mipi-dsi" /* for Exynos5433 SoCs */ |
diff --git a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt index 18645e0228b0..5837402c3ade 100644 --- a/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt +++ b/Documentation/devicetree/bindings/display/exynos/samsung-fimd.txt | |||
| @@ -11,7 +11,6 @@ Required properties: | |||
| 11 | "samsung,s5pv210-fimd"; /* for S5PV210 SoC */ | 11 | "samsung,s5pv210-fimd"; /* for S5PV210 SoC */ |
| 12 | "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ | 12 | "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ |
| 13 | "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ | 13 | "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ |
| 14 | "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */ | ||
| 15 | "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */ | 14 | "samsung,exynos5250-fimd"; /* for Exynos5250 SoCs */ |
| 16 | "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */ | 15 | "samsung,exynos5420-fimd"; /* for Exynos5420/5422/5800 SoCs */ |
| 17 | 16 | ||
diff --git a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt index ea9c1c9607f6..520d61dad6dd 100644 --- a/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt +++ b/Documentation/devicetree/bindings/mmc/rockchip-dw-mshc.txt | |||
| @@ -13,7 +13,7 @@ Required Properties: | |||
| 13 | - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following, | 13 | - "rockchip,rk2928-dw-mshc": for Rockchip RK2928 and following, |
| 14 | before RK3288 | 14 | before RK3288 |
| 15 | - "rockchip,rk3288-dw-mshc": for Rockchip RK3288 | 15 | - "rockchip,rk3288-dw-mshc": for Rockchip RK3288 |
| 16 | - "rockchip,rk1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK1108 | 16 | - "rockchip,rv1108-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RV1108 |
| 17 | - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036 | 17 | - "rockchip,rk3036-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3036 |
| 18 | - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368 | 18 | - "rockchip,rk3368-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3368 |
| 19 | - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399 | 19 | - "rockchip,rk3399-dw-mshc", "rockchip,rk3288-dw-mshc": for Rockchip RK3399 |
diff --git a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt b/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt deleted file mode 100644 index e68ae5dec9c9..000000000000 --- a/Documentation/devicetree/bindings/phy/brcm,nsp-usb3-phy.txt +++ /dev/null | |||
| @@ -1,39 +0,0 @@ | |||
| 1 | Broadcom USB3 phy binding for northstar plus SoC | ||
| 2 | The USB3 phy is internal to the SoC and is accessed using mdio interface. | ||
| 3 | |||
| 4 | Required mdio bus properties: | ||
| 5 | - reg: Should be 0x0 for SoC internal USB3 phy | ||
| 6 | - #address-cells: must be 1 | ||
| 7 | - #size-cells: must be 0 | ||
| 8 | |||
| 9 | Required USB3 PHY properties: | ||
| 10 | - compatible: should be "brcm,nsp-usb3-phy" | ||
| 11 | - reg: USB3 Phy address on SoC internal MDIO bus and it should be 0x10. | ||
| 12 | - usb3-ctrl-syscon: handler of syscon node defining physical address | ||
| 13 | of usb3 control register. | ||
| 14 | - #phy-cells: must be 0 | ||
| 15 | |||
| 16 | Required usb3 control properties: | ||
| 17 | - compatible: should be "brcm,nsp-usb3-ctrl" | ||
| 18 | - reg: offset and length of the control registers | ||
| 19 | |||
| 20 | Example: | ||
| 21 | |||
| 22 | mdio@0 { | ||
| 23 | reg = <0x0>; | ||
| 24 | #address-cells = <1>; | ||
| 25 | #size-cells = <0>; | ||
| 26 | |||
| 27 | usb3_phy: usb-phy@10 { | ||
| 28 | compatible = "brcm,nsp-usb3-phy"; | ||
| 29 | reg = <0x10>; | ||
| 30 | usb3-ctrl-syscon = <&usb3_ctrl>; | ||
| 31 | #phy-cells = <0>; | ||
| 32 | status = "disabled"; | ||
| 33 | }; | ||
| 34 | }; | ||
| 35 | |||
| 36 | usb3_ctrl: syscon@104408 { | ||
| 37 | compatible = "brcm,nsp-usb3-ctrl", "syscon"; | ||
| 38 | reg = <0x104408 0x3fc>; | ||
| 39 | }; | ||
diff --git a/Documentation/extcon/intel-int3496.txt b/Documentation/extcon/intel-int3496.txt index af0b366c25b7..8155dbc7fad3 100644 --- a/Documentation/extcon/intel-int3496.txt +++ b/Documentation/extcon/intel-int3496.txt | |||
| @@ -20,3 +20,8 @@ Index 1: The output gpio for enabling Vbus output from the device to the otg | |||
| 20 | Index 2: The output gpio for muxing of the data pins between the USB host and | 20 | Index 2: The output gpio for muxing of the data pins between the USB host and |
| 21 | the USB peripheral controller, write 1 to mux to the peripheral | 21 | the USB peripheral controller, write 1 to mux to the peripheral |
| 22 | controller | 22 | controller |
| 23 | |||
| 24 | There is a mapping between indices and GPIO connection IDs as follows | ||
| 25 | id index 0 | ||
| 26 | vbus index 1 | ||
| 27 | mux index 2 | ||
diff --git a/Documentation/gcc-plugins.txt b/Documentation/gcc-plugins.txt index 891c69464434..433eaefb4aa1 100644 --- a/Documentation/gcc-plugins.txt +++ b/Documentation/gcc-plugins.txt | |||
| @@ -18,8 +18,8 @@ because gcc versions 4.5 and 4.6 are compiled by a C compiler, | |||
| 18 | gcc-4.7 can be compiled by a C or a C++ compiler, | 18 | gcc-4.7 can be compiled by a C or a C++ compiler, |
| 19 | and versions 4.8+ can only be compiled by a C++ compiler. | 19 | and versions 4.8+ can only be compiled by a C++ compiler. |
| 20 | 20 | ||
| 21 | Currently the GCC plugin infrastructure supports only the x86, arm and arm64 | 21 | Currently the GCC plugin infrastructure supports only the x86, arm, arm64 and |
| 22 | architectures. | 22 | powerpc architectures. |
| 23 | 23 | ||
| 24 | This infrastructure was ported from grsecurity [6] and PaX [7]. | 24 | This infrastructure was ported from grsecurity [6] and PaX [7]. |
| 25 | 25 | ||
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 3c248f772ae6..fd106899afd1 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
| @@ -3377,6 +3377,69 @@ struct kvm_ppc_resize_hpt { | |||
| 3377 | __u32 pad; | 3377 | __u32 pad; |
| 3378 | }; | 3378 | }; |
| 3379 | 3379 | ||
| 3380 | 4.104 KVM_X86_GET_MCE_CAP_SUPPORTED | ||
| 3381 | |||
| 3382 | Capability: KVM_CAP_MCE | ||
| 3383 | Architectures: x86 | ||
| 3384 | Type: system ioctl | ||
| 3385 | Parameters: u64 mce_cap (out) | ||
| 3386 | Returns: 0 on success, -1 on error | ||
| 3387 | |||
| 3388 | Returns supported MCE capabilities. The u64 mce_cap parameter | ||
| 3389 | has the same format as the MSR_IA32_MCG_CAP register. Supported | ||
| 3390 | capabilities will have the corresponding bits set. | ||
| 3391 | |||
| 3392 | 4.105 KVM_X86_SETUP_MCE | ||
| 3393 | |||
| 3394 | Capability: KVM_CAP_MCE | ||
| 3395 | Architectures: x86 | ||
| 3396 | Type: vcpu ioctl | ||
| 3397 | Parameters: u64 mcg_cap (in) | ||
| 3398 | Returns: 0 on success, | ||
| 3399 | -EFAULT if u64 mcg_cap cannot be read, | ||
| 3400 | -EINVAL if the requested number of banks is invalid, | ||
| 3401 | -EINVAL if requested MCE capability is not supported. | ||
| 3402 | |||
| 3403 | Initializes MCE support for use. The u64 mcg_cap parameter | ||
| 3404 | has the same format as the MSR_IA32_MCG_CAP register and | ||
| 3405 | specifies which capabilities should be enabled. The maximum | ||
| 3406 | supported number of error-reporting banks can be retrieved when | ||
| 3407 | checking for KVM_CAP_MCE. The supported capabilities can be | ||
| 3408 | retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED. | ||
| 3409 | |||
| 3410 | 4.106 KVM_X86_SET_MCE | ||
| 3411 | |||
| 3412 | Capability: KVM_CAP_MCE | ||
| 3413 | Architectures: x86 | ||
| 3414 | Type: vcpu ioctl | ||
| 3415 | Parameters: struct kvm_x86_mce (in) | ||
| 3416 | Returns: 0 on success, | ||
| 3417 | -EFAULT if struct kvm_x86_mce cannot be read, | ||
| 3418 | -EINVAL if the bank number is invalid, | ||
| 3419 | -EINVAL if VAL bit is not set in status field. | ||
| 3420 | |||
| 3421 | Inject a machine check error (MCE) into the guest. The input | ||
| 3422 | parameter is: | ||
| 3423 | |||
| 3424 | struct kvm_x86_mce { | ||
| 3425 | __u64 status; | ||
| 3426 | __u64 addr; | ||
| 3427 | __u64 misc; | ||
| 3428 | __u64 mcg_status; | ||
| 3429 | __u8 bank; | ||
| 3430 | __u8 pad1[7]; | ||
| 3431 | __u64 pad2[3]; | ||
| 3432 | }; | ||
| 3433 | |||
| 3434 | If the MCE being reported is an uncorrected error, KVM will | ||
| 3435 | inject it as an MCE exception into the guest. If the guest | ||
| 3436 | MCG_STATUS register reports that an MCE is in progress, KVM | ||
| 3437 | causes an KVM_EXIT_SHUTDOWN vmexit. | ||
| 3438 | |||
| 3439 | Otherwise, if the MCE is a corrected error, KVM will just | ||
| 3440 | store it in the corresponding bank (provided this bank is | ||
| 3441 | not holding a previously reported uncorrected error). | ||
| 3442 | |||
| 3380 | 5. The kvm_run structure | 3443 | 5. The kvm_run structure |
| 3381 | ------------------------ | 3444 | ------------------------ |
| 3382 | 3445 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index c776906f67a9..1b0a87ffffab 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -3216,7 +3216,6 @@ F: drivers/platform/chrome/ | |||
| 3216 | 3216 | ||
| 3217 | CISCO VIC ETHERNET NIC DRIVER | 3217 | CISCO VIC ETHERNET NIC DRIVER |
| 3218 | M: Christian Benvenuti <benve@cisco.com> | 3218 | M: Christian Benvenuti <benve@cisco.com> |
| 3219 | M: Sujith Sankar <ssujith@cisco.com> | ||
| 3220 | M: Govindarajulu Varadarajan <_govind@gmx.com> | 3219 | M: Govindarajulu Varadarajan <_govind@gmx.com> |
| 3221 | M: Neel Patel <neepatel@cisco.com> | 3220 | M: Neel Patel <neepatel@cisco.com> |
| 3222 | S: Supported | 3221 | S: Supported |
| @@ -4776,6 +4775,12 @@ L: linux-edac@vger.kernel.org | |||
| 4776 | S: Maintained | 4775 | S: Maintained |
| 4777 | F: drivers/edac/mpc85xx_edac.[ch] | 4776 | F: drivers/edac/mpc85xx_edac.[ch] |
| 4778 | 4777 | ||
| 4778 | EDAC-PND2 | ||
| 4779 | M: Tony Luck <tony.luck@intel.com> | ||
| 4780 | L: linux-edac@vger.kernel.org | ||
| 4781 | S: Maintained | ||
| 4782 | F: drivers/edac/pnd2_edac.[ch] | ||
| 4783 | |||
| 4779 | EDAC-PASEMI | 4784 | EDAC-PASEMI |
| 4780 | M: Egor Martovetsky <egor@pasemi.com> | 4785 | M: Egor Martovetsky <egor@pasemi.com> |
| 4781 | L: linux-edac@vger.kernel.org | 4786 | L: linux-edac@vger.kernel.org |
| @@ -7774,13 +7779,6 @@ F: include/net/mac80211.h | |||
| 7774 | F: net/mac80211/ | 7779 | F: net/mac80211/ |
| 7775 | F: drivers/net/wireless/mac80211_hwsim.[ch] | 7780 | F: drivers/net/wireless/mac80211_hwsim.[ch] |
| 7776 | 7781 | ||
| 7777 | MACVLAN DRIVER | ||
| 7778 | M: Patrick McHardy <kaber@trash.net> | ||
| 7779 | L: netdev@vger.kernel.org | ||
| 7780 | S: Maintained | ||
| 7781 | F: drivers/net/macvlan.c | ||
| 7782 | F: include/linux/if_macvlan.h | ||
| 7783 | |||
| 7784 | MAILBOX API | 7782 | MAILBOX API |
| 7785 | M: Jassi Brar <jassisinghbrar@gmail.com> | 7783 | M: Jassi Brar <jassisinghbrar@gmail.com> |
| 7786 | L: linux-kernel@vger.kernel.org | 7784 | L: linux-kernel@vger.kernel.org |
| @@ -7853,6 +7851,8 @@ F: drivers/net/ethernet/marvell/mvneta.* | |||
| 7853 | MARVELL MWIFIEX WIRELESS DRIVER | 7851 | MARVELL MWIFIEX WIRELESS DRIVER |
| 7854 | M: Amitkumar Karwar <akarwar@marvell.com> | 7852 | M: Amitkumar Karwar <akarwar@marvell.com> |
| 7855 | M: Nishant Sarmukadam <nishants@marvell.com> | 7853 | M: Nishant Sarmukadam <nishants@marvell.com> |
| 7854 | M: Ganapathi Bhat <gbhat@marvell.com> | ||
| 7855 | M: Xinming Hu <huxm@marvell.com> | ||
| 7856 | L: linux-wireless@vger.kernel.org | 7856 | L: linux-wireless@vger.kernel.org |
| 7857 | S: Maintained | 7857 | S: Maintained |
| 7858 | F: drivers/net/wireless/marvell/mwifiex/ | 7858 | F: drivers/net/wireless/marvell/mwifiex/ |
| @@ -13383,14 +13383,6 @@ W: https://linuxtv.org | |||
| 13383 | S: Maintained | 13383 | S: Maintained |
| 13384 | F: drivers/media/platform/vivid/* | 13384 | F: drivers/media/platform/vivid/* |
| 13385 | 13385 | ||
| 13386 | VLAN (802.1Q) | ||
| 13387 | M: Patrick McHardy <kaber@trash.net> | ||
| 13388 | L: netdev@vger.kernel.org | ||
| 13389 | S: Maintained | ||
| 13390 | F: drivers/net/macvlan.c | ||
| 13391 | F: include/linux/if_*vlan.h | ||
| 13392 | F: net/8021q/ | ||
| 13393 | |||
| 13394 | VLYNQ BUS | 13386 | VLYNQ BUS |
| 13395 | M: Florian Fainelli <f.fainelli@gmail.com> | 13387 | M: Florian Fainelli <f.fainelli@gmail.com> |
| 13396 | L: openwrt-devel@lists.openwrt.org (subscribers-only) | 13388 | L: openwrt-devel@lists.openwrt.org (subscribers-only) |
| @@ -1,7 +1,7 @@ | |||
| 1 | VERSION = 4 | 1 | VERSION = 4 |
| 2 | PATCHLEVEL = 11 | 2 | PATCHLEVEL = 11 |
| 3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
| 4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc4 |
| 5 | NAME = Fearless Coyote | 5 | NAME = Fearless Coyote |
| 6 | 6 | ||
| 7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi index 02981eae96b9..1ec8e0d80191 100644 --- a/arch/arm/boot/dts/am335x-pcm-953.dtsi +++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi | |||
| @@ -63,14 +63,14 @@ | |||
| 63 | label = "home"; | 63 | label = "home"; |
| 64 | linux,code = <KEY_HOME>; | 64 | linux,code = <KEY_HOME>; |
| 65 | gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>; | 65 | gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>; |
| 66 | gpio-key,wakeup; | 66 | wakeup-source; |
| 67 | }; | 67 | }; |
| 68 | 68 | ||
| 69 | button@1 { | 69 | button@1 { |
| 70 | label = "menu"; | 70 | label = "menu"; |
| 71 | linux,code = <KEY_MENU>; | 71 | linux,code = <KEY_MENU>; |
| 72 | gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>; | 72 | gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>; |
| 73 | gpio-key,wakeup; | 73 | wakeup-source; |
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | }; | 76 | }; |
diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi index 0d341c545b01..e5ac1d81d15c 100644 --- a/arch/arm/boot/dts/am57xx-idk-common.dtsi +++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi | |||
| @@ -315,6 +315,13 @@ | |||
| 315 | /* ID & VBUS GPIOs provided in board dts */ | 315 | /* ID & VBUS GPIOs provided in board dts */ |
| 316 | }; | 316 | }; |
| 317 | }; | 317 | }; |
| 318 | |||
| 319 | tpic2810: tpic2810@60 { | ||
| 320 | compatible = "ti,tpic2810"; | ||
| 321 | reg = <0x60>; | ||
| 322 | gpio-controller; | ||
| 323 | #gpio-cells = <2>; | ||
| 324 | }; | ||
| 318 | }; | 325 | }; |
| 319 | 326 | ||
| 320 | &mcspi3 { | 327 | &mcspi3 { |
| @@ -330,13 +337,6 @@ | |||
| 330 | spi-max-frequency = <1000000>; | 337 | spi-max-frequency = <1000000>; |
| 331 | spi-cpol; | 338 | spi-cpol; |
| 332 | }; | 339 | }; |
| 333 | |||
| 334 | tpic2810: tpic2810@60 { | ||
| 335 | compatible = "ti,tpic2810"; | ||
| 336 | reg = <0x60>; | ||
| 337 | gpio-controller; | ||
| 338 | #gpio-cells = <2>; | ||
| 339 | }; | ||
| 340 | }; | 340 | }; |
| 341 | 341 | ||
| 342 | &uart3 { | 342 | &uart3 { |
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 4fbb089cf5ad..00de62dc0042 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi | |||
| @@ -66,14 +66,14 @@ | |||
| 66 | timer@20200 { | 66 | timer@20200 { |
| 67 | compatible = "arm,cortex-a9-global-timer"; | 67 | compatible = "arm,cortex-a9-global-timer"; |
| 68 | reg = <0x20200 0x100>; | 68 | reg = <0x20200 0x100>; |
| 69 | interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>; | 69 | interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>; |
| 70 | clocks = <&periph_clk>; | 70 | clocks = <&periph_clk>; |
| 71 | }; | 71 | }; |
| 72 | 72 | ||
| 73 | local-timer@20600 { | 73 | local-timer@20600 { |
| 74 | compatible = "arm,cortex-a9-twd-timer"; | 74 | compatible = "arm,cortex-a9-twd-timer"; |
| 75 | reg = <0x20600 0x100>; | 75 | reg = <0x20600 0x100>; |
| 76 | interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>; | 76 | interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>; |
| 77 | clocks = <&periph_clk>; | 77 | clocks = <&periph_clk>; |
| 78 | }; | 78 | }; |
| 79 | 79 | ||
diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts index bfd923096a8c..ae31a5826e91 100644 --- a/arch/arm/boot/dts/bcm953012k.dts +++ b/arch/arm/boot/dts/bcm953012k.dts | |||
| @@ -48,15 +48,14 @@ | |||
| 48 | }; | 48 | }; |
| 49 | 49 | ||
| 50 | memory { | 50 | memory { |
| 51 | reg = <0x00000000 0x10000000>; | 51 | reg = <0x80000000 0x10000000>; |
| 52 | }; | 52 | }; |
| 53 | }; | 53 | }; |
| 54 | 54 | ||
| 55 | &uart0 { | 55 | &uart0 { |
| 56 | clock-frequency = <62499840>; | 56 | status = "okay"; |
| 57 | }; | 57 | }; |
| 58 | 58 | ||
| 59 | &uart1 { | 59 | &uart1 { |
| 60 | clock-frequency = <62499840>; | ||
| 61 | status = "okay"; | 60 | status = "okay"; |
| 62 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts index 3f04a40eb90c..df05e7f568af 100644 --- a/arch/arm/boot/dts/bcm958522er.dts +++ b/arch/arm/boot/dts/bcm958522er.dts | |||
| @@ -55,6 +55,7 @@ | |||
| 55 | gpio-restart { | 55 | gpio-restart { |
| 56 | compatible = "gpio-restart"; | 56 | compatible = "gpio-restart"; |
| 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; | 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; |
| 58 | open-source; | ||
| 58 | priority = <200>; | 59 | priority = <200>; |
| 59 | }; | 60 | }; |
| 60 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts index 9fd542200d3d..4a3ab19c6281 100644 --- a/arch/arm/boot/dts/bcm958525er.dts +++ b/arch/arm/boot/dts/bcm958525er.dts | |||
| @@ -55,6 +55,7 @@ | |||
| 55 | gpio-restart { | 55 | gpio-restart { |
| 56 | compatible = "gpio-restart"; | 56 | compatible = "gpio-restart"; |
| 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; | 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; |
| 58 | open-source; | ||
| 58 | priority = <200>; | 59 | priority = <200>; |
| 59 | }; | 60 | }; |
| 60 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts index 41e7fd350fcd..81f78435d8c7 100644 --- a/arch/arm/boot/dts/bcm958525xmc.dts +++ b/arch/arm/boot/dts/bcm958525xmc.dts | |||
| @@ -55,6 +55,7 @@ | |||
| 55 | gpio-restart { | 55 | gpio-restart { |
| 56 | compatible = "gpio-restart"; | 56 | compatible = "gpio-restart"; |
| 57 | gpios = <&gpioa 31 GPIO_ACTIVE_LOW>; | 57 | gpios = <&gpioa 31 GPIO_ACTIVE_LOW>; |
| 58 | open-source; | ||
| 58 | priority = <200>; | 59 | priority = <200>; |
| 59 | }; | 60 | }; |
| 60 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts index 477c4860db52..c88b8fefcb2f 100644 --- a/arch/arm/boot/dts/bcm958622hr.dts +++ b/arch/arm/boot/dts/bcm958622hr.dts | |||
| @@ -55,6 +55,7 @@ | |||
| 55 | gpio-restart { | 55 | gpio-restart { |
| 56 | compatible = "gpio-restart"; | 56 | compatible = "gpio-restart"; |
| 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; | 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; |
| 58 | open-source; | ||
| 58 | priority = <200>; | 59 | priority = <200>; |
| 59 | }; | 60 | }; |
| 60 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts index c0a499d5ba44..d503fa0dde31 100644 --- a/arch/arm/boot/dts/bcm958623hr.dts +++ b/arch/arm/boot/dts/bcm958623hr.dts | |||
| @@ -55,6 +55,7 @@ | |||
| 55 | gpio-restart { | 55 | gpio-restart { |
| 56 | compatible = "gpio-restart"; | 56 | compatible = "gpio-restart"; |
| 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; | 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; |
| 58 | open-source; | ||
| 58 | priority = <200>; | 59 | priority = <200>; |
| 59 | }; | 60 | }; |
| 60 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index f7eb5854a224..cc0363b843c1 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts | |||
| @@ -55,6 +55,7 @@ | |||
| 55 | gpio-restart { | 55 | gpio-restart { |
| 56 | compatible = "gpio-restart"; | 56 | compatible = "gpio-restart"; |
| 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; | 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; |
| 58 | open-source; | ||
| 58 | priority = <200>; | 59 | priority = <200>; |
| 59 | }; | 60 | }; |
| 60 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts index 16666324fda8..74e15a3cd9f8 100644 --- a/arch/arm/boot/dts/bcm988312hr.dts +++ b/arch/arm/boot/dts/bcm988312hr.dts | |||
| @@ -55,6 +55,7 @@ | |||
| 55 | gpio-restart { | 55 | gpio-restart { |
| 56 | compatible = "gpio-restart"; | 56 | compatible = "gpio-restart"; |
| 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; | 57 | gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; |
| 58 | open-source; | ||
| 58 | priority = <200>; | 59 | priority = <200>; |
| 59 | }; | 60 | }; |
| 60 | }; | 61 | }; |
diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi index 49f466fe0b1d..dcfc97591433 100644 --- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi +++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi | |||
| @@ -121,11 +121,6 @@ | |||
| 121 | }; | 121 | }; |
| 122 | }; | 122 | }; |
| 123 | 123 | ||
| 124 | &cpu0 { | ||
| 125 | arm-supply = <&sw1a_reg>; | ||
| 126 | soc-supply = <&sw1c_reg>; | ||
| 127 | }; | ||
| 128 | |||
| 129 | &fec1 { | 124 | &fec1 { |
| 130 | pinctrl-names = "default"; | 125 | pinctrl-names = "default"; |
| 131 | pinctrl-0 = <&pinctrl_enet1>; | 126 | pinctrl-0 = <&pinctrl_enet1>; |
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 22332be72140..528b4e9c6d3d 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi | |||
| @@ -266,7 +266,7 @@ | |||
| 266 | }; | 266 | }; |
| 267 | 267 | ||
| 268 | usb1: ohci@00400000 { | 268 | usb1: ohci@00400000 { |
| 269 | compatible = "atmel,sama5d2-ohci", "usb-ohci"; | 269 | compatible = "atmel,at91rm9200-ohci", "usb-ohci"; |
| 270 | reg = <0x00400000 0x100000>; | 270 | reg = <0x00400000 0x100000>; |
| 271 | interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>; | 271 | interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>; |
| 272 | clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; | 272 | clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; |
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index 82d8c4771293..162e1eb5373d 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <dt-bindings/mfd/dbx500-prcmu.h> | 14 | #include <dt-bindings/mfd/dbx500-prcmu.h> |
| 15 | #include <dt-bindings/arm/ux500_pm_domains.h> | 15 | #include <dt-bindings/arm/ux500_pm_domains.h> |
| 16 | #include <dt-bindings/gpio/gpio.h> | 16 | #include <dt-bindings/gpio/gpio.h> |
| 17 | #include <dt-bindings/clock/ste-ab8500.h> | ||
| 17 | #include "skeleton.dtsi" | 18 | #include "skeleton.dtsi" |
| 18 | 19 | ||
| 19 | / { | 20 | / { |
| @@ -603,6 +604,11 @@ | |||
| 603 | interrupt-controller; | 604 | interrupt-controller; |
| 604 | #interrupt-cells = <2>; | 605 | #interrupt-cells = <2>; |
| 605 | 606 | ||
| 607 | ab8500_clock: clock-controller { | ||
| 608 | compatible = "stericsson,ab8500-clk"; | ||
| 609 | #clock-cells = <1>; | ||
| 610 | }; | ||
| 611 | |||
| 606 | ab8500_gpio: ab8500-gpio { | 612 | ab8500_gpio: ab8500-gpio { |
| 607 | compatible = "stericsson,ab8500-gpio"; | 613 | compatible = "stericsson,ab8500-gpio"; |
| 608 | gpio-controller; | 614 | gpio-controller; |
| @@ -686,6 +692,8 @@ | |||
| 686 | 692 | ||
| 687 | ab8500-pwm { | 693 | ab8500-pwm { |
| 688 | compatible = "stericsson,ab8500-pwm"; | 694 | compatible = "stericsson,ab8500-pwm"; |
| 695 | clocks = <&ab8500_clock AB8500_SYSCLK_INT>; | ||
| 696 | clock-names = "intclk"; | ||
| 689 | }; | 697 | }; |
| 690 | 698 | ||
| 691 | ab8500-debugfs { | 699 | ab8500-debugfs { |
| @@ -700,6 +708,9 @@ | |||
| 700 | V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>; | 708 | V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>; |
| 701 | V-DMIC-supply = <&ab8500_ldo_dmic_reg>; | 709 | V-DMIC-supply = <&ab8500_ldo_dmic_reg>; |
| 702 | 710 | ||
| 711 | clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>; | ||
| 712 | clock-names = "audioclk"; | ||
| 713 | |||
| 703 | stericsson,earpeice-cmv = <950>; /* Units in mV. */ | 714 | stericsson,earpeice-cmv = <950>; /* Units in mV. */ |
| 704 | }; | 715 | }; |
| 705 | 716 | ||
| @@ -1095,6 +1106,14 @@ | |||
| 1095 | status = "disabled"; | 1106 | status = "disabled"; |
| 1096 | }; | 1107 | }; |
| 1097 | 1108 | ||
| 1109 | sound { | ||
| 1110 | compatible = "stericsson,snd-soc-mop500"; | ||
| 1111 | stericsson,cpu-dai = <&msp1 &msp3>; | ||
| 1112 | stericsson,audio-codec = <&codec>; | ||
| 1113 | clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>; | ||
| 1114 | clock-names = "sysclk", "ulpclk", "intclk"; | ||
| 1115 | }; | ||
| 1116 | |||
| 1098 | msp0: msp@80123000 { | 1117 | msp0: msp@80123000 { |
| 1099 | compatible = "stericsson,ux500-msp-i2s"; | 1118 | compatible = "stericsson,ux500-msp-i2s"; |
| 1100 | reg = <0x80123000 0x1000>; | 1119 | reg = <0x80123000 0x1000>; |
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index f37f9e10713c..9e359e4f342e 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi | |||
| @@ -186,15 +186,6 @@ | |||
| 186 | status = "okay"; | 186 | status = "okay"; |
| 187 | }; | 187 | }; |
| 188 | 188 | ||
| 189 | sound { | ||
| 190 | compatible = "stericsson,snd-soc-mop500"; | ||
| 191 | |||
| 192 | stericsson,cpu-dai = <&msp1 &msp3>; | ||
| 193 | stericsson,audio-codec = <&codec>; | ||
| 194 | clocks = <&prcmu_clk PRCMU_SYSCLK>; | ||
| 195 | clock-names = "sysclk"; | ||
| 196 | }; | ||
| 197 | |||
| 198 | msp0: msp@80123000 { | 189 | msp0: msp@80123000 { |
| 199 | pinctrl-names = "default"; | 190 | pinctrl-names = "default"; |
| 200 | pinctrl-0 = <&msp0_default_mode>; | 191 | pinctrl-0 = <&msp0_default_mode>; |
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index dd5514def604..ade1d0d4e5f4 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts | |||
| @@ -159,15 +159,6 @@ | |||
| 159 | "", "", "", "", "", "", "", ""; | 159 | "", "", "", "", "", "", "", ""; |
| 160 | }; | 160 | }; |
| 161 | 161 | ||
| 162 | sound { | ||
| 163 | compatible = "stericsson,snd-soc-mop500"; | ||
| 164 | |||
| 165 | stericsson,cpu-dai = <&msp1 &msp3>; | ||
| 166 | stericsson,audio-codec = <&codec>; | ||
| 167 | clocks = <&prcmu_clk PRCMU_SYSCLK>; | ||
| 168 | clock-names = "sysclk"; | ||
| 169 | }; | ||
| 170 | |||
| 171 | msp0: msp@80123000 { | 162 | msp0: msp@80123000 { |
| 172 | pinctrl-names = "default"; | 163 | pinctrl-names = "default"; |
| 173 | pinctrl-0 = <&msp0_default_mode>; | 164 | pinctrl-0 = <&msp0_default_mode>; |
diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts index 72ec0d5ae052..bbf1c8cbaac6 100644 --- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts +++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts | |||
| @@ -167,7 +167,7 @@ | |||
| 167 | reg = <8>; | 167 | reg = <8>; |
| 168 | label = "cpu"; | 168 | label = "cpu"; |
| 169 | ethernet = <&gmac>; | 169 | ethernet = <&gmac>; |
| 170 | phy-mode = "rgmii"; | 170 | phy-mode = "rgmii-txid"; |
| 171 | fixed-link { | 171 | fixed-link { |
| 172 | speed = <1000>; | 172 | speed = <1000>; |
| 173 | full-duplex; | 173 | full-duplex; |
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi index a952cc0703cc..8a3ed21cb7bc 100644 --- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi | |||
| @@ -495,7 +495,7 @@ | |||
| 495 | resets = <&ccu RST_BUS_GPU>; | 495 | resets = <&ccu RST_BUS_GPU>; |
| 496 | 496 | ||
| 497 | assigned-clocks = <&ccu CLK_GPU>; | 497 | assigned-clocks = <&ccu CLK_GPU>; |
| 498 | assigned-clock-rates = <408000000>; | 498 | assigned-clock-rates = <384000000>; |
| 499 | }; | 499 | }; |
| 500 | 500 | ||
| 501 | gic: interrupt-controller@01c81000 { | 501 | gic: interrupt-controller@01c81000 { |
diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi index 7097c18ff487..d6bd15898db6 100644 --- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi +++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi | |||
| @@ -50,8 +50,6 @@ | |||
| 50 | 50 | ||
| 51 | backlight: backlight { | 51 | backlight: backlight { |
| 52 | compatible = "pwm-backlight"; | 52 | compatible = "pwm-backlight"; |
| 53 | pinctrl-names = "default"; | ||
| 54 | pinctrl-0 = <&bl_en_pin>; | ||
| 55 | pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>; | 53 | pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>; |
| 56 | brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>; | 54 | brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>; |
| 57 | default-brightness-level = <8>; | 55 | default-brightness-level = <8>; |
| @@ -93,11 +91,6 @@ | |||
| 93 | }; | 91 | }; |
| 94 | 92 | ||
| 95 | &pio { | 93 | &pio { |
| 96 | bl_en_pin: bl_en_pin@0 { | ||
| 97 | pins = "PH6"; | ||
| 98 | function = "gpio_in"; | ||
| 99 | }; | ||
| 100 | |||
| 101 | mmc0_cd_pin: mmc0_cd_pin@0 { | 94 | mmc0_cd_pin: mmc0_cd_pin@0 { |
| 102 | pins = "PB4"; | 95 | pins = "PB4"; |
| 103 | function = "gpio_in"; | 96 | function = "gpio_in"; |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index f2462a6bdba6..decd388d613d 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
| @@ -188,6 +188,7 @@ CONFIG_WL12XX=m | |||
| 188 | CONFIG_WL18XX=m | 188 | CONFIG_WL18XX=m |
| 189 | CONFIG_WLCORE_SPI=m | 189 | CONFIG_WLCORE_SPI=m |
| 190 | CONFIG_WLCORE_SDIO=m | 190 | CONFIG_WLCORE_SDIO=m |
| 191 | CONFIG_INPUT_MOUSEDEV=m | ||
| 191 | CONFIG_INPUT_JOYDEV=m | 192 | CONFIG_INPUT_JOYDEV=m |
| 192 | CONFIG_INPUT_EVDEV=m | 193 | CONFIG_INPUT_EVDEV=m |
| 193 | CONFIG_KEYBOARD_ATKBD=m | 194 | CONFIG_KEYBOARD_ATKBD=m |
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 3d89b7905bd9..a277981f414d 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c | |||
| @@ -289,6 +289,22 @@ static void at91_ddr_standby(void) | |||
| 289 | at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); | 289 | at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); |
| 290 | } | 290 | } |
| 291 | 291 | ||
| 292 | static void sama5d3_ddr_standby(void) | ||
| 293 | { | ||
| 294 | u32 lpr0; | ||
| 295 | u32 saved_lpr0; | ||
| 296 | |||
| 297 | saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR); | ||
| 298 | lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB; | ||
| 299 | lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN; | ||
| 300 | |||
| 301 | at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0); | ||
| 302 | |||
| 303 | cpu_do_idle(); | ||
| 304 | |||
| 305 | at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); | ||
| 306 | } | ||
| 307 | |||
| 292 | /* We manage both DDRAM/SDRAM controllers, we need more than one value to | 308 | /* We manage both DDRAM/SDRAM controllers, we need more than one value to |
| 293 | * remember. | 309 | * remember. |
| 294 | */ | 310 | */ |
| @@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = { | |||
| 323 | { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, | 339 | { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, |
| 324 | { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, | 340 | { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, |
| 325 | { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, | 341 | { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, |
| 326 | { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby }, | 342 | { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby }, |
| 327 | { /*sentinel*/ } | 343 | { /*sentinel*/ } |
| 328 | }; | 344 | }; |
| 329 | 345 | ||
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 093458b62c8d..c89757abb0ae 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile | |||
| @@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o | |||
| 241 | 241 | ||
| 242 | onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o | 242 | onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o |
| 243 | obj-y += $(onenand-m) $(onenand-y) | 243 | obj-y += $(onenand-m) $(onenand-y) |
| 244 | |||
| 245 | nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o | ||
| 246 | obj-y += $(nand-m) $(nand-y) | ||
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c deleted file mode 100644 index f6ac027f3c3b..000000000000 --- a/arch/arm/mach-omap2/gpmc-nand.c +++ /dev/null | |||
| @@ -1,154 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * gpmc-nand.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 Texas Instruments | ||
| 5 | * Vimal Singh <vimalsingh@ti.com> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/kernel.h> | ||
| 13 | #include <linux/platform_device.h> | ||
| 14 | #include <linux/io.h> | ||
| 15 | #include <linux/omap-gpmc.h> | ||
| 16 | #include <linux/mtd/nand.h> | ||
| 17 | #include <linux/platform_data/mtd-nand-omap2.h> | ||
| 18 | |||
| 19 | #include <asm/mach/flash.h> | ||
| 20 | |||
| 21 | #include "soc.h" | ||
| 22 | |||
| 23 | /* minimum size for IO mapping */ | ||
| 24 | #define NAND_IO_SIZE 4 | ||
| 25 | |||
| 26 | static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt) | ||
| 27 | { | ||
| 28 | /* platforms which support all ECC schemes */ | ||
| 29 | if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() || | ||
| 30 | soc_is_omap54xx() || soc_is_dra7xx()) | ||
| 31 | return 1; | ||
| 32 | |||
| 33 | if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW || | ||
| 34 | ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) { | ||
| 35 | if (cpu_is_omap24xx()) | ||
| 36 | return 0; | ||
| 37 | else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0)) | ||
| 38 | return 0; | ||
| 39 | else | ||
| 40 | return 1; | ||
| 41 | } | ||
| 42 | |||
| 43 | /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes | ||
| 44 | * which require H/W based ECC error detection */ | ||
| 45 | if ((cpu_is_omap34xx() || cpu_is_omap3630()) && | ||
| 46 | ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) || | ||
| 47 | (ecc_opt == OMAP_ECC_BCH8_CODE_HW))) | ||
| 48 | return 0; | ||
| 49 | |||
| 50 | /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */ | ||
| 51 | if (ecc_opt == OMAP_ECC_HAM1_CODE_HW || | ||
| 52 | ecc_opt == OMAP_ECC_HAM1_CODE_SW) | ||
| 53 | return 1; | ||
| 54 | else | ||
| 55 | return 0; | ||
| 56 | } | ||
| 57 | |||
| 58 | /* This function will go away once the device-tree convertion is complete */ | ||
| 59 | static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data, | ||
| 60 | struct gpmc_settings *s) | ||
| 61 | { | ||
| 62 | /* Enable RD PIN Monitoring Reg */ | ||
| 63 | if (gpmc_nand_data->dev_ready) { | ||
| 64 | s->wait_on_read = true; | ||
| 65 | s->wait_on_write = true; | ||
| 66 | } | ||
| 67 | |||
| 68 | if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16) | ||
| 69 | s->device_width = GPMC_DEVWIDTH_16BIT; | ||
| 70 | else | ||
| 71 | s->device_width = GPMC_DEVWIDTH_8BIT; | ||
| 72 | } | ||
| 73 | |||
| 74 | int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data, | ||
| 75 | struct gpmc_timings *gpmc_t) | ||
| 76 | { | ||
| 77 | int err = 0; | ||
| 78 | struct gpmc_settings s; | ||
| 79 | struct platform_device *pdev; | ||
| 80 | struct resource gpmc_nand_res[] = { | ||
| 81 | { .flags = IORESOURCE_MEM, }, | ||
| 82 | { .flags = IORESOURCE_IRQ, }, | ||
| 83 | { .flags = IORESOURCE_IRQ, }, | ||
| 84 | }; | ||
| 85 | |||
| 86 | BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM); | ||
| 87 | |||
| 88 | err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE, | ||
| 89 | (unsigned long *)&gpmc_nand_res[0].start); | ||
| 90 | if (err < 0) { | ||
| 91 | pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n", | ||
| 92 | gpmc_nand_data->cs, err); | ||
| 93 | return err; | ||
| 94 | } | ||
| 95 | gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1; | ||
| 96 | gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE); | ||
| 97 | gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT); | ||
| 98 | |||
| 99 | memset(&s, 0, sizeof(struct gpmc_settings)); | ||
| 100 | gpmc_set_legacy(gpmc_nand_data, &s); | ||
| 101 | |||
| 102 | s.device_nand = true; | ||
| 103 | |||
| 104 | if (gpmc_t) { | ||
| 105 | err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s); | ||
| 106 | if (err < 0) { | ||
| 107 | pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n", | ||
| 108 | err); | ||
| 109 | return err; | ||
| 110 | } | ||
| 111 | } | ||
| 112 | |||
| 113 | err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s); | ||
| 114 | if (err < 0) | ||
| 115 | goto out_free_cs; | ||
| 116 | |||
| 117 | err = gpmc_configure(GPMC_CONFIG_WP, 0); | ||
| 118 | if (err < 0) | ||
| 119 | goto out_free_cs; | ||
| 120 | |||
| 121 | if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) { | ||
| 122 | pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n"); | ||
| 123 | err = -EINVAL; | ||
| 124 | goto out_free_cs; | ||
| 125 | } | ||
| 126 | |||
| 127 | |||
| 128 | pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs); | ||
| 129 | if (pdev) { | ||
| 130 | err = platform_device_add_resources(pdev, gpmc_nand_res, | ||
| 131 | ARRAY_SIZE(gpmc_nand_res)); | ||
| 132 | if (!err) | ||
| 133 | pdev->dev.platform_data = gpmc_nand_data; | ||
| 134 | } else { | ||
| 135 | err = -ENOMEM; | ||
| 136 | } | ||
| 137 | if (err) | ||
| 138 | goto out_free_pdev; | ||
| 139 | |||
| 140 | err = platform_device_add(pdev); | ||
| 141 | if (err) { | ||
| 142 | dev_err(&pdev->dev, "Unable to register NAND device\n"); | ||
| 143 | goto out_free_pdev; | ||
| 144 | } | ||
| 145 | |||
| 146 | return 0; | ||
| 147 | |||
| 148 | out_free_pdev: | ||
| 149 | platform_device_put(pdev); | ||
| 150 | out_free_cs: | ||
| 151 | gpmc_cs_free(gpmc_nand_data->cs); | ||
| 152 | |||
| 153 | return err; | ||
| 154 | } | ||
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c index 8633c703546a..2944af820558 100644 --- a/arch/arm/mach-omap2/gpmc-onenand.c +++ b/arch/arm/mach-omap2/gpmc-onenand.c | |||
| @@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr) | |||
| 367 | return ret; | 367 | return ret; |
| 368 | } | 368 | } |
| 369 | 369 | ||
| 370 | void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) | 370 | int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) |
| 371 | { | 371 | { |
| 372 | int err; | 372 | int err; |
| 373 | struct device *dev = &gpmc_onenand_device.dev; | 373 | struct device *dev = &gpmc_onenand_device.dev; |
| @@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) | |||
| 393 | if (err < 0) { | 393 | if (err < 0) { |
| 394 | dev_err(dev, "Cannot request GPMC CS %d, error %d\n", | 394 | dev_err(dev, "Cannot request GPMC CS %d, error %d\n", |
| 395 | gpmc_onenand_data->cs, err); | 395 | gpmc_onenand_data->cs, err); |
| 396 | return; | 396 | return err; |
| 397 | } | 397 | } |
| 398 | 398 | ||
| 399 | gpmc_onenand_resource.end = gpmc_onenand_resource.start + | 399 | gpmc_onenand_resource.end = gpmc_onenand_resource.start + |
| 400 | ONENAND_IO_SIZE - 1; | 400 | ONENAND_IO_SIZE - 1; |
| 401 | 401 | ||
| 402 | if (platform_device_register(&gpmc_onenand_device) < 0) { | 402 | err = platform_device_register(&gpmc_onenand_device); |
| 403 | if (err) { | ||
| 403 | dev_err(dev, "Unable to register OneNAND device\n"); | 404 | dev_err(dev, "Unable to register OneNAND device\n"); |
| 404 | gpmc_cs_free(gpmc_onenand_data->cs); | 405 | gpmc_cs_free(gpmc_onenand_data->cs); |
| 405 | return; | ||
| 406 | } | 406 | } |
| 407 | |||
| 408 | return err; | ||
| 407 | } | 409 | } |
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S index fe36ce2734d4..4c6f14cf92a8 100644 --- a/arch/arm/mach-omap2/omap-headsmp.S +++ b/arch/arm/mach-omap2/omap-headsmp.S | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | 17 | ||
| 18 | #include <linux/linkage.h> | 18 | #include <linux/linkage.h> |
| 19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
| 20 | #include <asm/assembler.h> | ||
| 20 | 21 | ||
| 21 | #include "omap44xx.h" | 22 | #include "omap44xx.h" |
| 22 | 23 | ||
| @@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0 | |||
| 66 | cmp r0, r4 | 67 | cmp r0, r4 |
| 67 | bne wait_2 | 68 | bne wait_2 |
| 68 | ldr r12, =API_HYP_ENTRY | 69 | ldr r12, =API_HYP_ENTRY |
| 69 | adr r0, hyp_boot | 70 | badr r0, hyp_boot |
| 70 | smc #0 | 71 | smc #0 |
| 71 | hyp_boot: | 72 | hyp_boot: |
| 72 | b omap_secondary_startup | 73 | b omap_secondary_startup |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 56f917ec8621..1435fee39a89 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
| @@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = { | |||
| 2112 | }; | 2112 | }; |
| 2113 | 2113 | ||
| 2114 | /* L4 CORE -> SR1 interface */ | 2114 | /* L4 CORE -> SR1 interface */ |
| 2115 | static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = { | ||
| 2116 | { | ||
| 2117 | .pa_start = OMAP34XX_SR1_BASE, | ||
| 2118 | .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1, | ||
| 2119 | .flags = ADDR_TYPE_RT, | ||
| 2120 | }, | ||
| 2121 | { }, | ||
| 2122 | }; | ||
| 2115 | 2123 | ||
| 2116 | static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = { | 2124 | static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = { |
| 2117 | .master = &omap3xxx_l4_core_hwmod, | 2125 | .master = &omap3xxx_l4_core_hwmod, |
| 2118 | .slave = &omap34xx_sr1_hwmod, | 2126 | .slave = &omap34xx_sr1_hwmod, |
| 2119 | .clk = "sr_l4_ick", | 2127 | .clk = "sr_l4_ick", |
| 2128 | .addr = omap3_sr1_addr_space, | ||
| 2120 | .user = OCP_USER_MPU, | 2129 | .user = OCP_USER_MPU, |
| 2121 | }; | 2130 | }; |
| 2122 | 2131 | ||
| @@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = { | |||
| 2124 | .master = &omap3xxx_l4_core_hwmod, | 2133 | .master = &omap3xxx_l4_core_hwmod, |
| 2125 | .slave = &omap36xx_sr1_hwmod, | 2134 | .slave = &omap36xx_sr1_hwmod, |
| 2126 | .clk = "sr_l4_ick", | 2135 | .clk = "sr_l4_ick", |
| 2136 | .addr = omap3_sr1_addr_space, | ||
| 2127 | .user = OCP_USER_MPU, | 2137 | .user = OCP_USER_MPU, |
| 2128 | }; | 2138 | }; |
| 2129 | 2139 | ||
| 2130 | /* L4 CORE -> SR1 interface */ | 2140 | /* L4 CORE -> SR1 interface */ |
| 2141 | static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = { | ||
| 2142 | { | ||
| 2143 | .pa_start = OMAP34XX_SR2_BASE, | ||
| 2144 | .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1, | ||
| 2145 | .flags = ADDR_TYPE_RT, | ||
| 2146 | }, | ||
| 2147 | { }, | ||
| 2148 | }; | ||
| 2131 | 2149 | ||
| 2132 | static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = { | 2150 | static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = { |
| 2133 | .master = &omap3xxx_l4_core_hwmod, | 2151 | .master = &omap3xxx_l4_core_hwmod, |
| 2134 | .slave = &omap34xx_sr2_hwmod, | 2152 | .slave = &omap34xx_sr2_hwmod, |
| 2135 | .clk = "sr_l4_ick", | 2153 | .clk = "sr_l4_ick", |
| 2154 | .addr = omap3_sr2_addr_space, | ||
| 2136 | .user = OCP_USER_MPU, | 2155 | .user = OCP_USER_MPU, |
| 2137 | }; | 2156 | }; |
| 2138 | 2157 | ||
| @@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = { | |||
| 2140 | .master = &omap3xxx_l4_core_hwmod, | 2159 | .master = &omap3xxx_l4_core_hwmod, |
| 2141 | .slave = &omap36xx_sr2_hwmod, | 2160 | .slave = &omap36xx_sr2_hwmod, |
| 2142 | .clk = "sr_l4_ick", | 2161 | .clk = "sr_l4_ick", |
| 2162 | .addr = omap3_sr2_addr_space, | ||
| 2143 | .user = OCP_USER_MPU, | 2163 | .user = OCP_USER_MPU, |
| 2144 | }; | 2164 | }; |
| 2145 | 2165 | ||
| @@ -3111,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = { | |||
| 3111 | * Return: 0 if device named @dev_name is not likely to be accessible, | 3131 | * Return: 0 if device named @dev_name is not likely to be accessible, |
| 3112 | * or 1 if it is likely to be accessible. | 3132 | * or 1 if it is likely to be accessible. |
| 3113 | */ | 3133 | */ |
| 3114 | static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, | 3134 | static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, |
| 3115 | const char *dev_name) | 3135 | const char *dev_name) |
| 3116 | { | 3136 | { |
| 3137 | struct device_node *node; | ||
| 3138 | bool available; | ||
| 3139 | |||
| 3117 | if (!bus) | 3140 | if (!bus) |
| 3118 | return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0; | 3141 | return omap_type() == OMAP2_DEVICE_TYPE_GP; |
| 3119 | 3142 | ||
| 3120 | if (of_device_is_available(of_find_node_by_name(bus, dev_name))) | 3143 | node = of_get_child_by_name(bus, dev_name); |
| 3121 | return 1; | 3144 | available = of_device_is_available(node); |
| 3145 | of_node_put(node); | ||
| 3122 | 3146 | ||
| 3123 | return 0; | 3147 | return available; |
| 3124 | } | 3148 | } |
| 3125 | 3149 | ||
| 3126 | int __init omap3xxx_hwmod_init(void) | 3150 | int __init omap3xxx_hwmod_init(void) |
| @@ -3189,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void) | |||
| 3189 | 3213 | ||
| 3190 | if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) { | 3214 | if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) { |
| 3191 | r = omap_hwmod_register_links(h_sham); | 3215 | r = omap_hwmod_register_links(h_sham); |
| 3192 | if (r < 0) | 3216 | if (r < 0) { |
| 3217 | of_node_put(bus); | ||
| 3193 | return r; | 3218 | return r; |
| 3219 | } | ||
| 3194 | } | 3220 | } |
| 3195 | 3221 | ||
| 3196 | if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) { | 3222 | if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) { |
| 3197 | r = omap_hwmod_register_links(h_aes); | 3223 | r = omap_hwmod_register_links(h_aes); |
| 3198 | if (r < 0) | 3224 | if (r < 0) { |
| 3225 | of_node_put(bus); | ||
| 3199 | return r; | 3226 | return r; |
| 3227 | } | ||
| 3200 | } | 3228 | } |
| 3229 | of_node_put(bus); | ||
| 3201 | 3230 | ||
| 3202 | /* | 3231 | /* |
| 3203 | * Register hwmod links specific to certain ES levels of a | 3232 | * Register hwmod links specific to certain ES levels of a |
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 3c2cb5d5adfa..0bb0e9c6376c 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl | |||
| @@ -411,3 +411,4 @@ | |||
| 411 | 394 common pkey_mprotect sys_pkey_mprotect | 411 | 394 common pkey_mprotect sys_pkey_mprotect |
| 412 | 395 common pkey_alloc sys_pkey_alloc | 412 | 395 common pkey_alloc sys_pkey_alloc |
| 413 | 396 common pkey_free sys_pkey_free | 413 | 396 common pkey_free sys_pkey_free |
| 414 | 397 common statx sys_statx | ||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 8c7c244247b6..3741859765cf 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -1073,6 +1073,10 @@ config SYSVIPC_COMPAT | |||
| 1073 | def_bool y | 1073 | def_bool y |
| 1074 | depends on COMPAT && SYSVIPC | 1074 | depends on COMPAT && SYSVIPC |
| 1075 | 1075 | ||
| 1076 | config KEYS_COMPAT | ||
| 1077 | def_bool y | ||
| 1078 | depends on COMPAT && KEYS | ||
| 1079 | |||
| 1076 | endmenu | 1080 | endmenu |
| 1077 | 1081 | ||
| 1078 | menu "Power management options" | 1082 | menu "Power management options" |
diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi index 9f9e203c09c5..bcb03fc32665 100644 --- a/arch/arm64/boot/dts/broadcom/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi | |||
| @@ -114,6 +114,7 @@ | |||
| 114 | pcie0: pcie@20020000 { | 114 | pcie0: pcie@20020000 { |
| 115 | compatible = "brcm,iproc-pcie"; | 115 | compatible = "brcm,iproc-pcie"; |
| 116 | reg = <0 0x20020000 0 0x1000>; | 116 | reg = <0 0x20020000 0 0x1000>; |
| 117 | dma-coherent; | ||
| 117 | 118 | ||
| 118 | #interrupt-cells = <1>; | 119 | #interrupt-cells = <1>; |
| 119 | interrupt-map-mask = <0 0 0 0>; | 120 | interrupt-map-mask = <0 0 0 0>; |
| @@ -144,6 +145,7 @@ | |||
| 144 | pcie4: pcie@50020000 { | 145 | pcie4: pcie@50020000 { |
| 145 | compatible = "brcm,iproc-pcie"; | 146 | compatible = "brcm,iproc-pcie"; |
| 146 | reg = <0 0x50020000 0 0x1000>; | 147 | reg = <0 0x50020000 0 0x1000>; |
| 148 | dma-coherent; | ||
| 147 | 149 | ||
| 148 | #interrupt-cells = <1>; | 150 | #interrupt-cells = <1>; |
| 149 | interrupt-map-mask = <0 0 0 0>; | 151 | interrupt-map-mask = <0 0 0 0>; |
| @@ -174,6 +176,7 @@ | |||
| 174 | pcie8: pcie@60c00000 { | 176 | pcie8: pcie@60c00000 { |
| 175 | compatible = "brcm,iproc-pcie-paxc"; | 177 | compatible = "brcm,iproc-pcie-paxc"; |
| 176 | reg = <0 0x60c00000 0 0x1000>; | 178 | reg = <0 0x60c00000 0 0x1000>; |
| 179 | dma-coherent; | ||
| 177 | linux,pci-domain = <8>; | 180 | linux,pci-domain = <8>; |
| 178 | 181 | ||
| 179 | bus-range = <0x0 0x1>; | 182 | bus-range = <0x0 0x1>; |
| @@ -203,6 +206,7 @@ | |||
| 203 | <0x61030000 0x100>; | 206 | <0x61030000 0x100>; |
| 204 | reg-names = "amac_base", "idm_base", "nicpm_base"; | 207 | reg-names = "amac_base", "idm_base", "nicpm_base"; |
| 205 | interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>; | 208 | interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>; |
| 209 | dma-coherent; | ||
| 206 | phy-handle = <&gphy0>; | 210 | phy-handle = <&gphy0>; |
| 207 | phy-mode = "rgmii"; | 211 | phy-mode = "rgmii"; |
| 208 | status = "disabled"; | 212 | status = "disabled"; |
| @@ -213,6 +217,7 @@ | |||
| 213 | reg = <0x612c0000 0x445>; /* PDC FS0 regs */ | 217 | reg = <0x612c0000 0x445>; /* PDC FS0 regs */ |
| 214 | interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; | 218 | interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; |
| 215 | #mbox-cells = <1>; | 219 | #mbox-cells = <1>; |
| 220 | dma-coherent; | ||
| 216 | brcm,rx-status-len = <32>; | 221 | brcm,rx-status-len = <32>; |
| 217 | brcm,use-bcm-hdr; | 222 | brcm,use-bcm-hdr; |
| 218 | }; | 223 | }; |
| @@ -222,6 +227,7 @@ | |||
| 222 | reg = <0x612e0000 0x445>; /* PDC FS1 regs */ | 227 | reg = <0x612e0000 0x445>; /* PDC FS1 regs */ |
| 223 | interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; | 228 | interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; |
| 224 | #mbox-cells = <1>; | 229 | #mbox-cells = <1>; |
| 230 | dma-coherent; | ||
| 225 | brcm,rx-status-len = <32>; | 231 | brcm,rx-status-len = <32>; |
| 226 | brcm,use-bcm-hdr; | 232 | brcm,use-bcm-hdr; |
| 227 | }; | 233 | }; |
| @@ -231,6 +237,7 @@ | |||
| 231 | reg = <0x61300000 0x445>; /* PDC FS2 regs */ | 237 | reg = <0x61300000 0x445>; /* PDC FS2 regs */ |
| 232 | interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; | 238 | interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; |
| 233 | #mbox-cells = <1>; | 239 | #mbox-cells = <1>; |
| 240 | dma-coherent; | ||
| 234 | brcm,rx-status-len = <32>; | 241 | brcm,rx-status-len = <32>; |
| 235 | brcm,use-bcm-hdr; | 242 | brcm,use-bcm-hdr; |
| 236 | }; | 243 | }; |
| @@ -240,6 +247,7 @@ | |||
| 240 | reg = <0x61320000 0x445>; /* PDC FS3 regs */ | 247 | reg = <0x61320000 0x445>; /* PDC FS3 regs */ |
| 241 | interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>; | 248 | interrupts = <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>; |
| 242 | #mbox-cells = <1>; | 249 | #mbox-cells = <1>; |
| 250 | dma-coherent; | ||
| 243 | brcm,rx-status-len = <32>; | 251 | brcm,rx-status-len = <32>; |
| 244 | brcm,use-bcm-hdr; | 252 | brcm,use-bcm-hdr; |
| 245 | }; | 253 | }; |
| @@ -644,6 +652,7 @@ | |||
| 644 | sata: ahci@663f2000 { | 652 | sata: ahci@663f2000 { |
| 645 | compatible = "brcm,iproc-ahci", "generic-ahci"; | 653 | compatible = "brcm,iproc-ahci", "generic-ahci"; |
| 646 | reg = <0x663f2000 0x1000>; | 654 | reg = <0x663f2000 0x1000>; |
| 655 | dma-coherent; | ||
| 647 | reg-names = "ahci"; | 656 | reg-names = "ahci"; |
| 648 | interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>; | 657 | interrupts = <GIC_SPI 438 IRQ_TYPE_LEVEL_HIGH>; |
| 649 | #address-cells = <1>; | 658 | #address-cells = <1>; |
| @@ -667,6 +676,7 @@ | |||
| 667 | compatible = "brcm,sdhci-iproc-cygnus"; | 676 | compatible = "brcm,sdhci-iproc-cygnus"; |
| 668 | reg = <0x66420000 0x100>; | 677 | reg = <0x66420000 0x100>; |
| 669 | interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>; | 678 | interrupts = <GIC_SPI 421 IRQ_TYPE_LEVEL_HIGH>; |
| 679 | dma-coherent; | ||
| 670 | bus-width = <8>; | 680 | bus-width = <8>; |
| 671 | clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; | 681 | clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; |
| 672 | status = "disabled"; | 682 | status = "disabled"; |
| @@ -676,6 +686,7 @@ | |||
| 676 | compatible = "brcm,sdhci-iproc-cygnus"; | 686 | compatible = "brcm,sdhci-iproc-cygnus"; |
| 677 | reg = <0x66430000 0x100>; | 687 | reg = <0x66430000 0x100>; |
| 678 | interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>; | 688 | interrupts = <GIC_SPI 422 IRQ_TYPE_LEVEL_HIGH>; |
| 689 | dma-coherent; | ||
| 679 | bus-width = <8>; | 690 | bus-width = <8>; |
| 680 | clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; | 691 | clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; |
| 681 | status = "disabled"; | 692 | status = "disabled"; |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 05310ad8c5ab..f31c48d0cd68 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
| @@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void) | |||
| 251 | static inline bool system_uses_ttbr0_pan(void) | 251 | static inline bool system_uses_ttbr0_pan(void) |
| 252 | { | 252 | { |
| 253 | return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && | 253 | return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && |
| 254 | !cpus_have_cap(ARM64_HAS_PAN); | 254 | !cpus_have_const_cap(ARM64_HAS_PAN); |
| 255 | } | 255 | } |
| 256 | 256 | ||
| 257 | #endif /* __ASSEMBLY__ */ | 257 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index e78ac26324bd..bdbeb06dc11e 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h | |||
| @@ -44,7 +44,7 @@ | |||
| 44 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) | 44 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) |
| 45 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) | 45 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) |
| 46 | 46 | ||
| 47 | #define __NR_compat_syscalls 394 | 47 | #define __NR_compat_syscalls 398 |
| 48 | #endif | 48 | #endif |
| 49 | 49 | ||
| 50 | #define __ARCH_WANT_SYS_CLONE | 50 | #define __ARCH_WANT_SYS_CLONE |
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index b7e8ef16ff0d..c66b51aab195 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
| @@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range) | |||
| 809 | __SYSCALL(__NR_preadv2, compat_sys_preadv2) | 809 | __SYSCALL(__NR_preadv2, compat_sys_preadv2) |
| 810 | #define __NR_pwritev2 393 | 810 | #define __NR_pwritev2 393 |
| 811 | __SYSCALL(__NR_pwritev2, compat_sys_pwritev2) | 811 | __SYSCALL(__NR_pwritev2, compat_sys_pwritev2) |
| 812 | #define __NR_pkey_mprotect 394 | ||
| 813 | __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect) | ||
| 814 | #define __NR_pkey_alloc 395 | ||
| 815 | __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) | ||
| 816 | #define __NR_pkey_free 396 | ||
| 817 | __SYSCALL(__NR_pkey_free, sys_pkey_free) | ||
| 818 | #define __NR_statx 397 | ||
| 819 | __SYSCALL(__NR_statx, sys_statx) | ||
| 812 | 820 | ||
| 813 | /* | 821 | /* |
| 814 | * Please add new compat syscalls above this comment and update | 822 | * Please add new compat syscalls above this comment and update |
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 75a0f8acef66..fd691087dc9a 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c | |||
| @@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu) | |||
| 30 | } | 30 | } |
| 31 | 31 | ||
| 32 | /** | 32 | /** |
| 33 | * cpu_suspend() - function to enter a low-power idle state | 33 | * arm_cpuidle_suspend() - function to enter a low-power idle state |
| 34 | * @arg: argument to pass to CPU suspend operations | 34 | * @arg: argument to pass to CPU suspend operations |
| 35 | * | 35 | * |
| 36 | * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU | 36 | * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU |
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 769f24ef628c..d7e90d97f5c4 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c | |||
| @@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) | |||
| 131 | /* | 131 | /* |
| 132 | * The kernel Image should not extend across a 1GB/32MB/512MB alignment | 132 | * The kernel Image should not extend across a 1GB/32MB/512MB alignment |
| 133 | * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this | 133 | * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this |
| 134 | * happens, increase the KASLR offset by the size of the kernel image. | 134 | * happens, increase the KASLR offset by the size of the kernel image |
| 135 | * rounded up by SWAPPER_BLOCK_SIZE. | ||
| 135 | */ | 136 | */ |
| 136 | if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != | 137 | if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != |
| 137 | (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) | 138 | (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) { |
| 138 | offset = (offset + (u64)(_end - _text)) & mask; | 139 | u64 kimg_sz = _end - _text; |
| 140 | offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE)) | ||
| 141 | & mask; | ||
| 142 | } | ||
| 139 | 143 | ||
| 140 | if (IS_ENABLED(CONFIG_KASAN)) | 144 | if (IS_ENABLED(CONFIG_KASAN)) |
| 141 | /* | 145 | /* |
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 2a07aae5b8a2..c5c45942fb6e 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c | |||
| @@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) | |||
| 372 | return 0; | 372 | return 0; |
| 373 | } | 373 | } |
| 374 | 374 | ||
| 375 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | ||
| 376 | unsigned long val, void *data) | ||
| 377 | { | ||
| 378 | return NOTIFY_DONE; | ||
| 379 | } | ||
| 380 | |||
| 381 | static void __kprobes kprobe_handler(struct pt_regs *regs) | 375 | static void __kprobes kprobe_handler(struct pt_regs *regs) |
| 382 | { | 376 | { |
| 383 | struct kprobe *p, *cur_kprobe; | 377 | struct kprobe *p, *cur_kprobe; |
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 55d1e9205543..687a358a3733 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c | |||
| @@ -162,7 +162,7 @@ void __init kasan_init(void) | |||
| 162 | clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); | 162 | clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); |
| 163 | 163 | ||
| 164 | vmemmap_populate(kimg_shadow_start, kimg_shadow_end, | 164 | vmemmap_populate(kimg_shadow_start, kimg_shadow_end, |
| 165 | pfn_to_nid(virt_to_pfn(_text))); | 165 | pfn_to_nid(virt_to_pfn(lm_alias(_text)))); |
| 166 | 166 | ||
| 167 | /* | 167 | /* |
| 168 | * vmemmap_populate() has populated the shadow region that covers the | 168 | * vmemmap_populate() has populated the shadow region that covers the |
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 048bf076f7df..531cb9eb3319 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig | |||
| @@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y | |||
| 25 | # CONFIG_EFI_PARTITION is not set | 25 | # CONFIG_EFI_PARTITION is not set |
| 26 | CONFIG_SYSV68_PARTITION=y | 26 | CONFIG_SYSV68_PARTITION=y |
| 27 | CONFIG_IOSCHED_DEADLINE=m | 27 | CONFIG_IOSCHED_DEADLINE=m |
| 28 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 28 | CONFIG_KEXEC=y | 29 | CONFIG_KEXEC=y |
| 29 | CONFIG_BOOTINFO_PROC=y | 30 | CONFIG_BOOTINFO_PROC=y |
| 30 | CONFIG_M68020=y | 31 | CONFIG_M68020=y |
| @@ -60,6 +61,7 @@ CONFIG_NET_IPVTI=m | |||
| 60 | CONFIG_NET_FOU_IP_TUNNELS=y | 61 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 61 | CONFIG_INET_AH=m | 62 | CONFIG_INET_AH=m |
| 62 | CONFIG_INET_ESP=m | 63 | CONFIG_INET_ESP=m |
| 64 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 63 | CONFIG_INET_IPCOMP=m | 65 | CONFIG_INET_IPCOMP=m |
| 64 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 66 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 65 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 67 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -71,6 +73,7 @@ CONFIG_IPV6=m | |||
| 71 | CONFIG_IPV6_ROUTER_PREF=y | 73 | CONFIG_IPV6_ROUTER_PREF=y |
| 72 | CONFIG_INET6_AH=m | 74 | CONFIG_INET6_AH=m |
| 73 | CONFIG_INET6_ESP=m | 75 | CONFIG_INET6_ESP=m |
| 76 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 74 | CONFIG_INET6_IPCOMP=m | 77 | CONFIG_INET6_IPCOMP=m |
| 75 | CONFIG_IPV6_ILA=m | 78 | CONFIG_IPV6_ILA=m |
| 76 | CONFIG_IPV6_VTI=m | 79 | CONFIG_IPV6_VTI=m |
| @@ -101,6 +104,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 101 | CONFIG_NFT_CT=m | 104 | CONFIG_NFT_CT=m |
| 102 | CONFIG_NFT_SET_RBTREE=m | 105 | CONFIG_NFT_SET_RBTREE=m |
| 103 | CONFIG_NFT_SET_HASH=m | 106 | CONFIG_NFT_SET_HASH=m |
| 107 | CONFIG_NFT_SET_BITMAP=m | ||
| 104 | CONFIG_NFT_COUNTER=m | 108 | CONFIG_NFT_COUNTER=m |
| 105 | CONFIG_NFT_LOG=m | 109 | CONFIG_NFT_LOG=m |
| 106 | CONFIG_NFT_LIMIT=m | 110 | CONFIG_NFT_LIMIT=m |
| @@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 298 | CONFIG_NET_L3_MASTER_DEV=y | 302 | CONFIG_NET_L3_MASTER_DEV=y |
| 299 | CONFIG_AF_KCM=m | 303 | CONFIG_AF_KCM=m |
| 300 | # CONFIG_WIRELESS is not set | 304 | # CONFIG_WIRELESS is not set |
| 305 | CONFIG_PSAMPLE=m | ||
| 306 | CONFIG_NET_IFE=m | ||
| 301 | CONFIG_NET_DEVLINK=m | 307 | CONFIG_NET_DEVLINK=m |
| 302 | # CONFIG_UEVENT_HELPER is not set | 308 | # CONFIG_UEVENT_HELPER is not set |
| 303 | CONFIG_DEVTMPFS=y | 309 | CONFIG_DEVTMPFS=y |
| @@ -371,6 +377,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 371 | CONFIG_MACVLAN=m | 377 | CONFIG_MACVLAN=m |
| 372 | CONFIG_MACVTAP=m | 378 | CONFIG_MACVTAP=m |
| 373 | CONFIG_IPVLAN=m | 379 | CONFIG_IPVLAN=m |
| 380 | CONFIG_IPVTAP=m | ||
| 374 | CONFIG_VXLAN=m | 381 | CONFIG_VXLAN=m |
| 375 | CONFIG_GENEVE=m | 382 | CONFIG_GENEVE=m |
| 376 | CONFIG_GTP=m | 383 | CONFIG_GTP=m |
| @@ -383,6 +390,7 @@ CONFIG_VETH=m | |||
| 383 | # CONFIG_NET_VENDOR_AMAZON is not set | 390 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 384 | CONFIG_A2065=y | 391 | CONFIG_A2065=y |
| 385 | CONFIG_ARIADNE=y | 392 | CONFIG_ARIADNE=y |
| 393 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 386 | # CONFIG_NET_VENDOR_ARC is not set | 394 | # CONFIG_NET_VENDOR_ARC is not set |
| 387 | # CONFIG_NET_CADENCE is not set | 395 | # CONFIG_NET_CADENCE is not set |
| 388 | # CONFIG_NET_VENDOR_BROADCOM is not set | 396 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -404,7 +412,6 @@ CONFIG_ZORRO8390=y | |||
| 404 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 412 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 405 | # CONFIG_NET_VENDOR_SMSC is not set | 413 | # CONFIG_NET_VENDOR_SMSC is not set |
| 406 | # CONFIG_NET_VENDOR_STMICRO is not set | 414 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 407 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 408 | # CONFIG_NET_VENDOR_VIA is not set | 415 | # CONFIG_NET_VENDOR_VIA is not set |
| 409 | # CONFIG_NET_VENDOR_WIZNET is not set | 416 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 410 | CONFIG_PPP=m | 417 | CONFIG_PPP=m |
| @@ -564,6 +571,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 564 | CONFIG_DLM=m | 571 | CONFIG_DLM=m |
| 565 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 572 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 566 | CONFIG_MAGIC_SYSRQ=y | 573 | CONFIG_MAGIC_SYSRQ=y |
| 574 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 575 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 567 | CONFIG_ASYNC_RAID6_TEST=m | 576 | CONFIG_ASYNC_RAID6_TEST=m |
| 568 | CONFIG_TEST_HEXDUMP=m | 577 | CONFIG_TEST_HEXDUMP=m |
| 569 | CONFIG_TEST_STRING_HELPERS=m | 578 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -594,6 +603,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 594 | CONFIG_CRYPTO_LRW=m | 603 | CONFIG_CRYPTO_LRW=m |
| 595 | CONFIG_CRYPTO_PCBC=m | 604 | CONFIG_CRYPTO_PCBC=m |
| 596 | CONFIG_CRYPTO_KEYWRAP=m | 605 | CONFIG_CRYPTO_KEYWRAP=m |
| 606 | CONFIG_CRYPTO_CMAC=m | ||
| 597 | CONFIG_CRYPTO_XCBC=m | 607 | CONFIG_CRYPTO_XCBC=m |
| 598 | CONFIG_CRYPTO_VMAC=m | 608 | CONFIG_CRYPTO_VMAC=m |
| 599 | CONFIG_CRYPTO_MICHAEL_MIC=m | 609 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -605,6 +615,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 605 | CONFIG_CRYPTO_SHA3=m | 615 | CONFIG_CRYPTO_SHA3=m |
| 606 | CONFIG_CRYPTO_TGR192=m | 616 | CONFIG_CRYPTO_TGR192=m |
| 607 | CONFIG_CRYPTO_WP512=m | 617 | CONFIG_CRYPTO_WP512=m |
| 618 | CONFIG_CRYPTO_AES_TI=m | ||
| 608 | CONFIG_CRYPTO_ANUBIS=m | 619 | CONFIG_CRYPTO_ANUBIS=m |
| 609 | CONFIG_CRYPTO_BLOWFISH=m | 620 | CONFIG_CRYPTO_BLOWFISH=m |
| 610 | CONFIG_CRYPTO_CAMELLIA=m | 621 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -629,4 +640,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 629 | CONFIG_CRYPTO_USER_API_RNG=m | 640 | CONFIG_CRYPTO_USER_API_RNG=m |
| 630 | CONFIG_CRYPTO_USER_API_AEAD=m | 641 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 631 | # CONFIG_CRYPTO_HW is not set | 642 | # CONFIG_CRYPTO_HW is not set |
| 643 | CONFIG_CRC32_SELFTEST=m | ||
| 632 | CONFIG_XZ_DEC_TEST=m | 644 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index d4de24963f5f..ca91d39555da 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig | |||
| @@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y | |||
| 26 | # CONFIG_EFI_PARTITION is not set | 26 | # CONFIG_EFI_PARTITION is not set |
| 27 | CONFIG_SYSV68_PARTITION=y | 27 | CONFIG_SYSV68_PARTITION=y |
| 28 | CONFIG_IOSCHED_DEADLINE=m | 28 | CONFIG_IOSCHED_DEADLINE=m |
| 29 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 29 | CONFIG_KEXEC=y | 30 | CONFIG_KEXEC=y |
| 30 | CONFIG_BOOTINFO_PROC=y | 31 | CONFIG_BOOTINFO_PROC=y |
| 31 | CONFIG_M68020=y | 32 | CONFIG_M68020=y |
| @@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m | |||
| 58 | CONFIG_NET_FOU_IP_TUNNELS=y | 59 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 59 | CONFIG_INET_AH=m | 60 | CONFIG_INET_AH=m |
| 60 | CONFIG_INET_ESP=m | 61 | CONFIG_INET_ESP=m |
| 62 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 61 | CONFIG_INET_IPCOMP=m | 63 | CONFIG_INET_IPCOMP=m |
| 62 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 64 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 63 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 65 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -69,6 +71,7 @@ CONFIG_IPV6=m | |||
| 69 | CONFIG_IPV6_ROUTER_PREF=y | 71 | CONFIG_IPV6_ROUTER_PREF=y |
| 70 | CONFIG_INET6_AH=m | 72 | CONFIG_INET6_AH=m |
| 71 | CONFIG_INET6_ESP=m | 73 | CONFIG_INET6_ESP=m |
| 74 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 72 | CONFIG_INET6_IPCOMP=m | 75 | CONFIG_INET6_IPCOMP=m |
| 73 | CONFIG_IPV6_ILA=m | 76 | CONFIG_IPV6_ILA=m |
| 74 | CONFIG_IPV6_VTI=m | 77 | CONFIG_IPV6_VTI=m |
| @@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 99 | CONFIG_NFT_CT=m | 102 | CONFIG_NFT_CT=m |
| 100 | CONFIG_NFT_SET_RBTREE=m | 103 | CONFIG_NFT_SET_RBTREE=m |
| 101 | CONFIG_NFT_SET_HASH=m | 104 | CONFIG_NFT_SET_HASH=m |
| 105 | CONFIG_NFT_SET_BITMAP=m | ||
| 102 | CONFIG_NFT_COUNTER=m | 106 | CONFIG_NFT_COUNTER=m |
| 103 | CONFIG_NFT_LOG=m | 107 | CONFIG_NFT_LOG=m |
| 104 | CONFIG_NFT_LIMIT=m | 108 | CONFIG_NFT_LIMIT=m |
| @@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 296 | CONFIG_NET_L3_MASTER_DEV=y | 300 | CONFIG_NET_L3_MASTER_DEV=y |
| 297 | CONFIG_AF_KCM=m | 301 | CONFIG_AF_KCM=m |
| 298 | # CONFIG_WIRELESS is not set | 302 | # CONFIG_WIRELESS is not set |
| 303 | CONFIG_PSAMPLE=m | ||
| 304 | CONFIG_NET_IFE=m | ||
| 299 | CONFIG_NET_DEVLINK=m | 305 | CONFIG_NET_DEVLINK=m |
| 300 | # CONFIG_UEVENT_HELPER is not set | 306 | # CONFIG_UEVENT_HELPER is not set |
| 301 | CONFIG_DEVTMPFS=y | 307 | CONFIG_DEVTMPFS=y |
| @@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 353 | CONFIG_MACVLAN=m | 359 | CONFIG_MACVLAN=m |
| 354 | CONFIG_MACVTAP=m | 360 | CONFIG_MACVTAP=m |
| 355 | CONFIG_IPVLAN=m | 361 | CONFIG_IPVLAN=m |
| 362 | CONFIG_IPVTAP=m | ||
| 356 | CONFIG_VXLAN=m | 363 | CONFIG_VXLAN=m |
| 357 | CONFIG_GENEVE=m | 364 | CONFIG_GENEVE=m |
| 358 | CONFIG_GTP=m | 365 | CONFIG_GTP=m |
| @@ -362,6 +369,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y | |||
| 362 | CONFIG_VETH=m | 369 | CONFIG_VETH=m |
| 363 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 370 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 364 | # CONFIG_NET_VENDOR_AMAZON is not set | 371 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 372 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 365 | # CONFIG_NET_VENDOR_ARC is not set | 373 | # CONFIG_NET_VENDOR_ARC is not set |
| 366 | # CONFIG_NET_CADENCE is not set | 374 | # CONFIG_NET_CADENCE is not set |
| 367 | # CONFIG_NET_VENDOR_BROADCOM is not set | 375 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -378,7 +386,6 @@ CONFIG_VETH=m | |||
| 378 | # CONFIG_NET_VENDOR_SEEQ is not set | 386 | # CONFIG_NET_VENDOR_SEEQ is not set |
| 379 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 387 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 380 | # CONFIG_NET_VENDOR_STMICRO is not set | 388 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 381 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 382 | # CONFIG_NET_VENDOR_VIA is not set | 389 | # CONFIG_NET_VENDOR_VIA is not set |
| 383 | # CONFIG_NET_VENDOR_WIZNET is not set | 390 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 384 | CONFIG_PPP=m | 391 | CONFIG_PPP=m |
| @@ -523,6 +530,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 523 | CONFIG_DLM=m | 530 | CONFIG_DLM=m |
| 524 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 531 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 525 | CONFIG_MAGIC_SYSRQ=y | 532 | CONFIG_MAGIC_SYSRQ=y |
| 533 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 534 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 526 | CONFIG_ASYNC_RAID6_TEST=m | 535 | CONFIG_ASYNC_RAID6_TEST=m |
| 527 | CONFIG_TEST_HEXDUMP=m | 536 | CONFIG_TEST_HEXDUMP=m |
| 528 | CONFIG_TEST_STRING_HELPERS=m | 537 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -553,6 +562,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 553 | CONFIG_CRYPTO_LRW=m | 562 | CONFIG_CRYPTO_LRW=m |
| 554 | CONFIG_CRYPTO_PCBC=m | 563 | CONFIG_CRYPTO_PCBC=m |
| 555 | CONFIG_CRYPTO_KEYWRAP=m | 564 | CONFIG_CRYPTO_KEYWRAP=m |
| 565 | CONFIG_CRYPTO_CMAC=m | ||
| 556 | CONFIG_CRYPTO_XCBC=m | 566 | CONFIG_CRYPTO_XCBC=m |
| 557 | CONFIG_CRYPTO_VMAC=m | 567 | CONFIG_CRYPTO_VMAC=m |
| 558 | CONFIG_CRYPTO_MICHAEL_MIC=m | 568 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -564,6 +574,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 564 | CONFIG_CRYPTO_SHA3=m | 574 | CONFIG_CRYPTO_SHA3=m |
| 565 | CONFIG_CRYPTO_TGR192=m | 575 | CONFIG_CRYPTO_TGR192=m |
| 566 | CONFIG_CRYPTO_WP512=m | 576 | CONFIG_CRYPTO_WP512=m |
| 577 | CONFIG_CRYPTO_AES_TI=m | ||
| 567 | CONFIG_CRYPTO_ANUBIS=m | 578 | CONFIG_CRYPTO_ANUBIS=m |
| 568 | CONFIG_CRYPTO_BLOWFISH=m | 579 | CONFIG_CRYPTO_BLOWFISH=m |
| 569 | CONFIG_CRYPTO_CAMELLIA=m | 580 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -588,4 +599,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 588 | CONFIG_CRYPTO_USER_API_RNG=m | 599 | CONFIG_CRYPTO_USER_API_RNG=m |
| 589 | CONFIG_CRYPTO_USER_API_AEAD=m | 600 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 590 | # CONFIG_CRYPTO_HW is not set | 601 | # CONFIG_CRYPTO_HW is not set |
| 602 | CONFIG_CRC32_SELFTEST=m | ||
| 591 | CONFIG_XZ_DEC_TEST=m | 603 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index fc0fd3f871f3..23a3d8a691e2 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig | |||
| @@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y | |||
| 25 | # CONFIG_EFI_PARTITION is not set | 25 | # CONFIG_EFI_PARTITION is not set |
| 26 | CONFIG_SYSV68_PARTITION=y | 26 | CONFIG_SYSV68_PARTITION=y |
| 27 | CONFIG_IOSCHED_DEADLINE=m | 27 | CONFIG_IOSCHED_DEADLINE=m |
| 28 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 28 | CONFIG_KEXEC=y | 29 | CONFIG_KEXEC=y |
| 29 | CONFIG_BOOTINFO_PROC=y | 30 | CONFIG_BOOTINFO_PROC=y |
| 30 | CONFIG_M68020=y | 31 | CONFIG_M68020=y |
| @@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m | |||
| 58 | CONFIG_NET_FOU_IP_TUNNELS=y | 59 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 59 | CONFIG_INET_AH=m | 60 | CONFIG_INET_AH=m |
| 60 | CONFIG_INET_ESP=m | 61 | CONFIG_INET_ESP=m |
| 62 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 61 | CONFIG_INET_IPCOMP=m | 63 | CONFIG_INET_IPCOMP=m |
| 62 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 64 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 63 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 65 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -69,6 +71,7 @@ CONFIG_IPV6=m | |||
| 69 | CONFIG_IPV6_ROUTER_PREF=y | 71 | CONFIG_IPV6_ROUTER_PREF=y |
| 70 | CONFIG_INET6_AH=m | 72 | CONFIG_INET6_AH=m |
| 71 | CONFIG_INET6_ESP=m | 73 | CONFIG_INET6_ESP=m |
| 74 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 72 | CONFIG_INET6_IPCOMP=m | 75 | CONFIG_INET6_IPCOMP=m |
| 73 | CONFIG_IPV6_ILA=m | 76 | CONFIG_IPV6_ILA=m |
| 74 | CONFIG_IPV6_VTI=m | 77 | CONFIG_IPV6_VTI=m |
| @@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 99 | CONFIG_NFT_CT=m | 102 | CONFIG_NFT_CT=m |
| 100 | CONFIG_NFT_SET_RBTREE=m | 103 | CONFIG_NFT_SET_RBTREE=m |
| 101 | CONFIG_NFT_SET_HASH=m | 104 | CONFIG_NFT_SET_HASH=m |
| 105 | CONFIG_NFT_SET_BITMAP=m | ||
| 102 | CONFIG_NFT_COUNTER=m | 106 | CONFIG_NFT_COUNTER=m |
| 103 | CONFIG_NFT_LOG=m | 107 | CONFIG_NFT_LOG=m |
| 104 | CONFIG_NFT_LIMIT=m | 108 | CONFIG_NFT_LIMIT=m |
| @@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 296 | CONFIG_NET_L3_MASTER_DEV=y | 300 | CONFIG_NET_L3_MASTER_DEV=y |
| 297 | CONFIG_AF_KCM=m | 301 | CONFIG_AF_KCM=m |
| 298 | # CONFIG_WIRELESS is not set | 302 | # CONFIG_WIRELESS is not set |
| 303 | CONFIG_PSAMPLE=m | ||
| 304 | CONFIG_NET_IFE=m | ||
| 299 | CONFIG_NET_DEVLINK=m | 305 | CONFIG_NET_DEVLINK=m |
| 300 | # CONFIG_UEVENT_HELPER is not set | 306 | # CONFIG_UEVENT_HELPER is not set |
| 301 | CONFIG_DEVTMPFS=y | 307 | CONFIG_DEVTMPFS=y |
| @@ -362,6 +368,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 362 | CONFIG_MACVLAN=m | 368 | CONFIG_MACVLAN=m |
| 363 | CONFIG_MACVTAP=m | 369 | CONFIG_MACVTAP=m |
| 364 | CONFIG_IPVLAN=m | 370 | CONFIG_IPVLAN=m |
| 371 | CONFIG_IPVTAP=m | ||
| 365 | CONFIG_VXLAN=m | 372 | CONFIG_VXLAN=m |
| 366 | CONFIG_GENEVE=m | 373 | CONFIG_GENEVE=m |
| 367 | CONFIG_GTP=m | 374 | CONFIG_GTP=m |
| @@ -372,6 +379,7 @@ CONFIG_VETH=m | |||
| 372 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 379 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 373 | # CONFIG_NET_VENDOR_AMAZON is not set | 380 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 374 | CONFIG_ATARILANCE=y | 381 | CONFIG_ATARILANCE=y |
| 382 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 375 | # CONFIG_NET_VENDOR_ARC is not set | 383 | # CONFIG_NET_VENDOR_ARC is not set |
| 376 | # CONFIG_NET_CADENCE is not set | 384 | # CONFIG_NET_CADENCE is not set |
| 377 | # CONFIG_NET_VENDOR_BROADCOM is not set | 385 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -389,7 +397,6 @@ CONFIG_NE2000=y | |||
| 389 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 397 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 390 | CONFIG_SMC91X=y | 398 | CONFIG_SMC91X=y |
| 391 | # CONFIG_NET_VENDOR_STMICRO is not set | 399 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 392 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 393 | # CONFIG_NET_VENDOR_VIA is not set | 400 | # CONFIG_NET_VENDOR_VIA is not set |
| 394 | # CONFIG_NET_VENDOR_WIZNET is not set | 401 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 395 | CONFIG_PPP=m | 402 | CONFIG_PPP=m |
| @@ -544,6 +551,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 544 | CONFIG_DLM=m | 551 | CONFIG_DLM=m |
| 545 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 552 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 546 | CONFIG_MAGIC_SYSRQ=y | 553 | CONFIG_MAGIC_SYSRQ=y |
| 554 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 555 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 547 | CONFIG_ASYNC_RAID6_TEST=m | 556 | CONFIG_ASYNC_RAID6_TEST=m |
| 548 | CONFIG_TEST_HEXDUMP=m | 557 | CONFIG_TEST_HEXDUMP=m |
| 549 | CONFIG_TEST_STRING_HELPERS=m | 558 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -574,6 +583,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 574 | CONFIG_CRYPTO_LRW=m | 583 | CONFIG_CRYPTO_LRW=m |
| 575 | CONFIG_CRYPTO_PCBC=m | 584 | CONFIG_CRYPTO_PCBC=m |
| 576 | CONFIG_CRYPTO_KEYWRAP=m | 585 | CONFIG_CRYPTO_KEYWRAP=m |
| 586 | CONFIG_CRYPTO_CMAC=m | ||
| 577 | CONFIG_CRYPTO_XCBC=m | 587 | CONFIG_CRYPTO_XCBC=m |
| 578 | CONFIG_CRYPTO_VMAC=m | 588 | CONFIG_CRYPTO_VMAC=m |
| 579 | CONFIG_CRYPTO_MICHAEL_MIC=m | 589 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -585,6 +595,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 585 | CONFIG_CRYPTO_SHA3=m | 595 | CONFIG_CRYPTO_SHA3=m |
| 586 | CONFIG_CRYPTO_TGR192=m | 596 | CONFIG_CRYPTO_TGR192=m |
| 587 | CONFIG_CRYPTO_WP512=m | 597 | CONFIG_CRYPTO_WP512=m |
| 598 | CONFIG_CRYPTO_AES_TI=m | ||
| 588 | CONFIG_CRYPTO_ANUBIS=m | 599 | CONFIG_CRYPTO_ANUBIS=m |
| 589 | CONFIG_CRYPTO_BLOWFISH=m | 600 | CONFIG_CRYPTO_BLOWFISH=m |
| 590 | CONFIG_CRYPTO_CAMELLIA=m | 601 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -609,4 +620,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 609 | CONFIG_CRYPTO_USER_API_RNG=m | 620 | CONFIG_CRYPTO_USER_API_RNG=m |
| 610 | CONFIG_CRYPTO_USER_API_AEAD=m | 621 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 611 | # CONFIG_CRYPTO_HW is not set | 622 | # CONFIG_CRYPTO_HW is not set |
| 623 | CONFIG_CRC32_SELFTEST=m | ||
| 612 | CONFIG_XZ_DEC_TEST=m | 624 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 52e984a0aa69..95deb95140fe 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig | |||
| @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y | |||
| 25 | CONFIG_SUN_PARTITION=y | 25 | CONFIG_SUN_PARTITION=y |
| 26 | # CONFIG_EFI_PARTITION is not set | 26 | # CONFIG_EFI_PARTITION is not set |
| 27 | CONFIG_IOSCHED_DEADLINE=m | 27 | CONFIG_IOSCHED_DEADLINE=m |
| 28 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 28 | CONFIG_KEXEC=y | 29 | CONFIG_KEXEC=y |
| 29 | CONFIG_BOOTINFO_PROC=y | 30 | CONFIG_BOOTINFO_PROC=y |
| 30 | CONFIG_M68040=y | 31 | CONFIG_M68040=y |
| @@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m | |||
| 56 | CONFIG_NET_FOU_IP_TUNNELS=y | 57 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 57 | CONFIG_INET_AH=m | 58 | CONFIG_INET_AH=m |
| 58 | CONFIG_INET_ESP=m | 59 | CONFIG_INET_ESP=m |
| 60 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 59 | CONFIG_INET_IPCOMP=m | 61 | CONFIG_INET_IPCOMP=m |
| 60 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 62 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 61 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 63 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -67,6 +69,7 @@ CONFIG_IPV6=m | |||
| 67 | CONFIG_IPV6_ROUTER_PREF=y | 69 | CONFIG_IPV6_ROUTER_PREF=y |
| 68 | CONFIG_INET6_AH=m | 70 | CONFIG_INET6_AH=m |
| 69 | CONFIG_INET6_ESP=m | 71 | CONFIG_INET6_ESP=m |
| 72 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 70 | CONFIG_INET6_IPCOMP=m | 73 | CONFIG_INET6_IPCOMP=m |
| 71 | CONFIG_IPV6_ILA=m | 74 | CONFIG_IPV6_ILA=m |
| 72 | CONFIG_IPV6_VTI=m | 75 | CONFIG_IPV6_VTI=m |
| @@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 97 | CONFIG_NFT_CT=m | 100 | CONFIG_NFT_CT=m |
| 98 | CONFIG_NFT_SET_RBTREE=m | 101 | CONFIG_NFT_SET_RBTREE=m |
| 99 | CONFIG_NFT_SET_HASH=m | 102 | CONFIG_NFT_SET_HASH=m |
| 103 | CONFIG_NFT_SET_BITMAP=m | ||
| 100 | CONFIG_NFT_COUNTER=m | 104 | CONFIG_NFT_COUNTER=m |
| 101 | CONFIG_NFT_LOG=m | 105 | CONFIG_NFT_LOG=m |
| 102 | CONFIG_NFT_LIMIT=m | 106 | CONFIG_NFT_LIMIT=m |
| @@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 294 | CONFIG_NET_L3_MASTER_DEV=y | 298 | CONFIG_NET_L3_MASTER_DEV=y |
| 295 | CONFIG_AF_KCM=m | 299 | CONFIG_AF_KCM=m |
| 296 | # CONFIG_WIRELESS is not set | 300 | # CONFIG_WIRELESS is not set |
| 301 | CONFIG_PSAMPLE=m | ||
| 302 | CONFIG_NET_IFE=m | ||
| 297 | CONFIG_NET_DEVLINK=m | 303 | CONFIG_NET_DEVLINK=m |
| 298 | # CONFIG_UEVENT_HELPER is not set | 304 | # CONFIG_UEVENT_HELPER is not set |
| 299 | CONFIG_DEVTMPFS=y | 305 | CONFIG_DEVTMPFS=y |
| @@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 352 | CONFIG_MACVLAN=m | 358 | CONFIG_MACVLAN=m |
| 353 | CONFIG_MACVTAP=m | 359 | CONFIG_MACVTAP=m |
| 354 | CONFIG_IPVLAN=m | 360 | CONFIG_IPVLAN=m |
| 361 | CONFIG_IPVTAP=m | ||
| 355 | CONFIG_VXLAN=m | 362 | CONFIG_VXLAN=m |
| 356 | CONFIG_GENEVE=m | 363 | CONFIG_GENEVE=m |
| 357 | CONFIG_GTP=m | 364 | CONFIG_GTP=m |
| @@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y | |||
| 361 | CONFIG_VETH=m | 368 | CONFIG_VETH=m |
| 362 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 369 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 363 | # CONFIG_NET_VENDOR_AMAZON is not set | 370 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 371 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 364 | # CONFIG_NET_VENDOR_ARC is not set | 372 | # CONFIG_NET_VENDOR_ARC is not set |
| 365 | # CONFIG_NET_CADENCE is not set | 373 | # CONFIG_NET_CADENCE is not set |
| 366 | # CONFIG_NET_VENDOR_BROADCOM is not set | 374 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -377,7 +385,6 @@ CONFIG_BVME6000_NET=y | |||
| 377 | # CONFIG_NET_VENDOR_SEEQ is not set | 385 | # CONFIG_NET_VENDOR_SEEQ is not set |
| 378 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 386 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 379 | # CONFIG_NET_VENDOR_STMICRO is not set | 387 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 380 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 381 | # CONFIG_NET_VENDOR_VIA is not set | 388 | # CONFIG_NET_VENDOR_VIA is not set |
| 382 | # CONFIG_NET_VENDOR_WIZNET is not set | 389 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 383 | CONFIG_PPP=m | 390 | CONFIG_PPP=m |
| @@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 515 | CONFIG_DLM=m | 522 | CONFIG_DLM=m |
| 516 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 523 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 517 | CONFIG_MAGIC_SYSRQ=y | 524 | CONFIG_MAGIC_SYSRQ=y |
| 525 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 526 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 518 | CONFIG_ASYNC_RAID6_TEST=m | 527 | CONFIG_ASYNC_RAID6_TEST=m |
| 519 | CONFIG_TEST_HEXDUMP=m | 528 | CONFIG_TEST_HEXDUMP=m |
| 520 | CONFIG_TEST_STRING_HELPERS=m | 529 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 545 | CONFIG_CRYPTO_LRW=m | 554 | CONFIG_CRYPTO_LRW=m |
| 546 | CONFIG_CRYPTO_PCBC=m | 555 | CONFIG_CRYPTO_PCBC=m |
| 547 | CONFIG_CRYPTO_KEYWRAP=m | 556 | CONFIG_CRYPTO_KEYWRAP=m |
| 557 | CONFIG_CRYPTO_CMAC=m | ||
| 548 | CONFIG_CRYPTO_XCBC=m | 558 | CONFIG_CRYPTO_XCBC=m |
| 549 | CONFIG_CRYPTO_VMAC=m | 559 | CONFIG_CRYPTO_VMAC=m |
| 550 | CONFIG_CRYPTO_MICHAEL_MIC=m | 560 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 556 | CONFIG_CRYPTO_SHA3=m | 566 | CONFIG_CRYPTO_SHA3=m |
| 557 | CONFIG_CRYPTO_TGR192=m | 567 | CONFIG_CRYPTO_TGR192=m |
| 558 | CONFIG_CRYPTO_WP512=m | 568 | CONFIG_CRYPTO_WP512=m |
| 569 | CONFIG_CRYPTO_AES_TI=m | ||
| 559 | CONFIG_CRYPTO_ANUBIS=m | 570 | CONFIG_CRYPTO_ANUBIS=m |
| 560 | CONFIG_CRYPTO_BLOWFISH=m | 571 | CONFIG_CRYPTO_BLOWFISH=m |
| 561 | CONFIG_CRYPTO_CAMELLIA=m | 572 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 580 | CONFIG_CRYPTO_USER_API_RNG=m | 591 | CONFIG_CRYPTO_USER_API_RNG=m |
| 581 | CONFIG_CRYPTO_USER_API_AEAD=m | 592 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 582 | # CONFIG_CRYPTO_HW is not set | 593 | # CONFIG_CRYPTO_HW is not set |
| 594 | CONFIG_CRC32_SELFTEST=m | ||
| 583 | CONFIG_XZ_DEC_TEST=m | 595 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index aaeed4422cc9..afae6958db2d 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig | |||
| @@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y | |||
| 26 | # CONFIG_EFI_PARTITION is not set | 26 | # CONFIG_EFI_PARTITION is not set |
| 27 | CONFIG_SYSV68_PARTITION=y | 27 | CONFIG_SYSV68_PARTITION=y |
| 28 | CONFIG_IOSCHED_DEADLINE=m | 28 | CONFIG_IOSCHED_DEADLINE=m |
| 29 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 29 | CONFIG_KEXEC=y | 30 | CONFIG_KEXEC=y |
| 30 | CONFIG_BOOTINFO_PROC=y | 31 | CONFIG_BOOTINFO_PROC=y |
| 31 | CONFIG_M68020=y | 32 | CONFIG_M68020=y |
| @@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m | |||
| 58 | CONFIG_NET_FOU_IP_TUNNELS=y | 59 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 59 | CONFIG_INET_AH=m | 60 | CONFIG_INET_AH=m |
| 60 | CONFIG_INET_ESP=m | 61 | CONFIG_INET_ESP=m |
| 62 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 61 | CONFIG_INET_IPCOMP=m | 63 | CONFIG_INET_IPCOMP=m |
| 62 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 64 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 63 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 65 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -69,6 +71,7 @@ CONFIG_IPV6=m | |||
| 69 | CONFIG_IPV6_ROUTER_PREF=y | 71 | CONFIG_IPV6_ROUTER_PREF=y |
| 70 | CONFIG_INET6_AH=m | 72 | CONFIG_INET6_AH=m |
| 71 | CONFIG_INET6_ESP=m | 73 | CONFIG_INET6_ESP=m |
| 74 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 72 | CONFIG_INET6_IPCOMP=m | 75 | CONFIG_INET6_IPCOMP=m |
| 73 | CONFIG_IPV6_ILA=m | 76 | CONFIG_IPV6_ILA=m |
| 74 | CONFIG_IPV6_VTI=m | 77 | CONFIG_IPV6_VTI=m |
| @@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 99 | CONFIG_NFT_CT=m | 102 | CONFIG_NFT_CT=m |
| 100 | CONFIG_NFT_SET_RBTREE=m | 103 | CONFIG_NFT_SET_RBTREE=m |
| 101 | CONFIG_NFT_SET_HASH=m | 104 | CONFIG_NFT_SET_HASH=m |
| 105 | CONFIG_NFT_SET_BITMAP=m | ||
| 102 | CONFIG_NFT_COUNTER=m | 106 | CONFIG_NFT_COUNTER=m |
| 103 | CONFIG_NFT_LOG=m | 107 | CONFIG_NFT_LOG=m |
| 104 | CONFIG_NFT_LIMIT=m | 108 | CONFIG_NFT_LIMIT=m |
| @@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 296 | CONFIG_NET_L3_MASTER_DEV=y | 300 | CONFIG_NET_L3_MASTER_DEV=y |
| 297 | CONFIG_AF_KCM=m | 301 | CONFIG_AF_KCM=m |
| 298 | # CONFIG_WIRELESS is not set | 302 | # CONFIG_WIRELESS is not set |
| 303 | CONFIG_PSAMPLE=m | ||
| 304 | CONFIG_NET_IFE=m | ||
| 299 | CONFIG_NET_DEVLINK=m | 305 | CONFIG_NET_DEVLINK=m |
| 300 | # CONFIG_UEVENT_HELPER is not set | 306 | # CONFIG_UEVENT_HELPER is not set |
| 301 | CONFIG_DEVTMPFS=y | 307 | CONFIG_DEVTMPFS=y |
| @@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 353 | CONFIG_MACVLAN=m | 359 | CONFIG_MACVLAN=m |
| 354 | CONFIG_MACVTAP=m | 360 | CONFIG_MACVTAP=m |
| 355 | CONFIG_IPVLAN=m | 361 | CONFIG_IPVLAN=m |
| 362 | CONFIG_IPVTAP=m | ||
| 356 | CONFIG_VXLAN=m | 363 | CONFIG_VXLAN=m |
| 357 | CONFIG_GENEVE=m | 364 | CONFIG_GENEVE=m |
| 358 | CONFIG_GTP=m | 365 | CONFIG_GTP=m |
| @@ -363,6 +370,7 @@ CONFIG_VETH=m | |||
| 363 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 370 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 364 | # CONFIG_NET_VENDOR_AMAZON is not set | 371 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 365 | CONFIG_HPLANCE=y | 372 | CONFIG_HPLANCE=y |
| 373 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 366 | # CONFIG_NET_VENDOR_ARC is not set | 374 | # CONFIG_NET_VENDOR_ARC is not set |
| 367 | # CONFIG_NET_CADENCE is not set | 375 | # CONFIG_NET_CADENCE is not set |
| 368 | # CONFIG_NET_VENDOR_BROADCOM is not set | 376 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -379,7 +387,6 @@ CONFIG_HPLANCE=y | |||
| 379 | # CONFIG_NET_VENDOR_SEEQ is not set | 387 | # CONFIG_NET_VENDOR_SEEQ is not set |
| 380 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 388 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 381 | # CONFIG_NET_VENDOR_STMICRO is not set | 389 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 382 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 383 | # CONFIG_NET_VENDOR_VIA is not set | 390 | # CONFIG_NET_VENDOR_VIA is not set |
| 384 | # CONFIG_NET_VENDOR_WIZNET is not set | 391 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 385 | CONFIG_PPP=m | 392 | CONFIG_PPP=m |
| @@ -525,6 +532,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 525 | CONFIG_DLM=m | 532 | CONFIG_DLM=m |
| 526 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 533 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 527 | CONFIG_MAGIC_SYSRQ=y | 534 | CONFIG_MAGIC_SYSRQ=y |
| 535 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 536 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 528 | CONFIG_ASYNC_RAID6_TEST=m | 537 | CONFIG_ASYNC_RAID6_TEST=m |
| 529 | CONFIG_TEST_HEXDUMP=m | 538 | CONFIG_TEST_HEXDUMP=m |
| 530 | CONFIG_TEST_STRING_HELPERS=m | 539 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -555,6 +564,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 555 | CONFIG_CRYPTO_LRW=m | 564 | CONFIG_CRYPTO_LRW=m |
| 556 | CONFIG_CRYPTO_PCBC=m | 565 | CONFIG_CRYPTO_PCBC=m |
| 557 | CONFIG_CRYPTO_KEYWRAP=m | 566 | CONFIG_CRYPTO_KEYWRAP=m |
| 567 | CONFIG_CRYPTO_CMAC=m | ||
| 558 | CONFIG_CRYPTO_XCBC=m | 568 | CONFIG_CRYPTO_XCBC=m |
| 559 | CONFIG_CRYPTO_VMAC=m | 569 | CONFIG_CRYPTO_VMAC=m |
| 560 | CONFIG_CRYPTO_MICHAEL_MIC=m | 570 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -566,6 +576,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 566 | CONFIG_CRYPTO_SHA3=m | 576 | CONFIG_CRYPTO_SHA3=m |
| 567 | CONFIG_CRYPTO_TGR192=m | 577 | CONFIG_CRYPTO_TGR192=m |
| 568 | CONFIG_CRYPTO_WP512=m | 578 | CONFIG_CRYPTO_WP512=m |
| 579 | CONFIG_CRYPTO_AES_TI=m | ||
| 569 | CONFIG_CRYPTO_ANUBIS=m | 580 | CONFIG_CRYPTO_ANUBIS=m |
| 570 | CONFIG_CRYPTO_BLOWFISH=m | 581 | CONFIG_CRYPTO_BLOWFISH=m |
| 571 | CONFIG_CRYPTO_CAMELLIA=m | 582 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -590,4 +601,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 590 | CONFIG_CRYPTO_USER_API_RNG=m | 601 | CONFIG_CRYPTO_USER_API_RNG=m |
| 591 | CONFIG_CRYPTO_USER_API_AEAD=m | 602 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 592 | # CONFIG_CRYPTO_HW is not set | 603 | # CONFIG_CRYPTO_HW is not set |
| 604 | CONFIG_CRC32_SELFTEST=m | ||
| 593 | CONFIG_XZ_DEC_TEST=m | 605 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 3bbc9b2f0dac..b010734729a7 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig | |||
| @@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y | |||
| 25 | # CONFIG_EFI_PARTITION is not set | 25 | # CONFIG_EFI_PARTITION is not set |
| 26 | CONFIG_SYSV68_PARTITION=y | 26 | CONFIG_SYSV68_PARTITION=y |
| 27 | CONFIG_IOSCHED_DEADLINE=m | 27 | CONFIG_IOSCHED_DEADLINE=m |
| 28 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 28 | CONFIG_KEXEC=y | 29 | CONFIG_KEXEC=y |
| 29 | CONFIG_BOOTINFO_PROC=y | 30 | CONFIG_BOOTINFO_PROC=y |
| 30 | CONFIG_M68020=y | 31 | CONFIG_M68020=y |
| @@ -57,6 +58,7 @@ CONFIG_NET_IPVTI=m | |||
| 57 | CONFIG_NET_FOU_IP_TUNNELS=y | 58 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 58 | CONFIG_INET_AH=m | 59 | CONFIG_INET_AH=m |
| 59 | CONFIG_INET_ESP=m | 60 | CONFIG_INET_ESP=m |
| 61 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 60 | CONFIG_INET_IPCOMP=m | 62 | CONFIG_INET_IPCOMP=m |
| 61 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 63 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 62 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 64 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -68,6 +70,7 @@ CONFIG_IPV6=m | |||
| 68 | CONFIG_IPV6_ROUTER_PREF=y | 70 | CONFIG_IPV6_ROUTER_PREF=y |
| 69 | CONFIG_INET6_AH=m | 71 | CONFIG_INET6_AH=m |
| 70 | CONFIG_INET6_ESP=m | 72 | CONFIG_INET6_ESP=m |
| 73 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 71 | CONFIG_INET6_IPCOMP=m | 74 | CONFIG_INET6_IPCOMP=m |
| 72 | CONFIG_IPV6_ILA=m | 75 | CONFIG_IPV6_ILA=m |
| 73 | CONFIG_IPV6_VTI=m | 76 | CONFIG_IPV6_VTI=m |
| @@ -98,6 +101,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 98 | CONFIG_NFT_CT=m | 101 | CONFIG_NFT_CT=m |
| 99 | CONFIG_NFT_SET_RBTREE=m | 102 | CONFIG_NFT_SET_RBTREE=m |
| 100 | CONFIG_NFT_SET_HASH=m | 103 | CONFIG_NFT_SET_HASH=m |
| 104 | CONFIG_NFT_SET_BITMAP=m | ||
| 101 | CONFIG_NFT_COUNTER=m | 105 | CONFIG_NFT_COUNTER=m |
| 102 | CONFIG_NFT_LOG=m | 106 | CONFIG_NFT_LOG=m |
| 103 | CONFIG_NFT_LIMIT=m | 107 | CONFIG_NFT_LIMIT=m |
| @@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 298 | CONFIG_NET_L3_MASTER_DEV=y | 302 | CONFIG_NET_L3_MASTER_DEV=y |
| 299 | CONFIG_AF_KCM=m | 303 | CONFIG_AF_KCM=m |
| 300 | # CONFIG_WIRELESS is not set | 304 | # CONFIG_WIRELESS is not set |
| 305 | CONFIG_PSAMPLE=m | ||
| 306 | CONFIG_NET_IFE=m | ||
| 301 | CONFIG_NET_DEVLINK=m | 307 | CONFIG_NET_DEVLINK=m |
| 302 | # CONFIG_UEVENT_HELPER is not set | 308 | # CONFIG_UEVENT_HELPER is not set |
| 303 | CONFIG_DEVTMPFS=y | 309 | CONFIG_DEVTMPFS=y |
| @@ -369,6 +375,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 369 | CONFIG_MACVLAN=m | 375 | CONFIG_MACVLAN=m |
| 370 | CONFIG_MACVTAP=m | 376 | CONFIG_MACVTAP=m |
| 371 | CONFIG_IPVLAN=m | 377 | CONFIG_IPVLAN=m |
| 378 | CONFIG_IPVTAP=m | ||
| 372 | CONFIG_VXLAN=m | 379 | CONFIG_VXLAN=m |
| 373 | CONFIG_GENEVE=m | 380 | CONFIG_GENEVE=m |
| 374 | CONFIG_GTP=m | 381 | CONFIG_GTP=m |
| @@ -379,6 +386,7 @@ CONFIG_VETH=m | |||
| 379 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 386 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 380 | # CONFIG_NET_VENDOR_AMAZON is not set | 387 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 381 | CONFIG_MACMACE=y | 388 | CONFIG_MACMACE=y |
| 389 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 382 | # CONFIG_NET_VENDOR_ARC is not set | 390 | # CONFIG_NET_VENDOR_ARC is not set |
| 383 | # CONFIG_NET_CADENCE is not set | 391 | # CONFIG_NET_CADENCE is not set |
| 384 | # CONFIG_NET_VENDOR_BROADCOM is not set | 392 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -398,7 +406,6 @@ CONFIG_MAC8390=y | |||
| 398 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 406 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 399 | # CONFIG_NET_VENDOR_SMSC is not set | 407 | # CONFIG_NET_VENDOR_SMSC is not set |
| 400 | # CONFIG_NET_VENDOR_STMICRO is not set | 408 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 401 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 402 | # CONFIG_NET_VENDOR_VIA is not set | 409 | # CONFIG_NET_VENDOR_VIA is not set |
| 403 | # CONFIG_NET_VENDOR_WIZNET is not set | 410 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 404 | CONFIG_PPP=m | 411 | CONFIG_PPP=m |
| @@ -547,6 +554,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 547 | CONFIG_DLM=m | 554 | CONFIG_DLM=m |
| 548 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 555 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 549 | CONFIG_MAGIC_SYSRQ=y | 556 | CONFIG_MAGIC_SYSRQ=y |
| 557 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 558 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 550 | CONFIG_ASYNC_RAID6_TEST=m | 559 | CONFIG_ASYNC_RAID6_TEST=m |
| 551 | CONFIG_TEST_HEXDUMP=m | 560 | CONFIG_TEST_HEXDUMP=m |
| 552 | CONFIG_TEST_STRING_HELPERS=m | 561 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -577,6 +586,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 577 | CONFIG_CRYPTO_LRW=m | 586 | CONFIG_CRYPTO_LRW=m |
| 578 | CONFIG_CRYPTO_PCBC=m | 587 | CONFIG_CRYPTO_PCBC=m |
| 579 | CONFIG_CRYPTO_KEYWRAP=m | 588 | CONFIG_CRYPTO_KEYWRAP=m |
| 589 | CONFIG_CRYPTO_CMAC=m | ||
| 580 | CONFIG_CRYPTO_XCBC=m | 590 | CONFIG_CRYPTO_XCBC=m |
| 581 | CONFIG_CRYPTO_VMAC=m | 591 | CONFIG_CRYPTO_VMAC=m |
| 582 | CONFIG_CRYPTO_MICHAEL_MIC=m | 592 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -588,6 +598,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 588 | CONFIG_CRYPTO_SHA3=m | 598 | CONFIG_CRYPTO_SHA3=m |
| 589 | CONFIG_CRYPTO_TGR192=m | 599 | CONFIG_CRYPTO_TGR192=m |
| 590 | CONFIG_CRYPTO_WP512=m | 600 | CONFIG_CRYPTO_WP512=m |
| 601 | CONFIG_CRYPTO_AES_TI=m | ||
| 591 | CONFIG_CRYPTO_ANUBIS=m | 602 | CONFIG_CRYPTO_ANUBIS=m |
| 592 | CONFIG_CRYPTO_BLOWFISH=m | 603 | CONFIG_CRYPTO_BLOWFISH=m |
| 593 | CONFIG_CRYPTO_CAMELLIA=m | 604 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -612,4 +623,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 612 | CONFIG_CRYPTO_USER_API_RNG=m | 623 | CONFIG_CRYPTO_USER_API_RNG=m |
| 613 | CONFIG_CRYPTO_USER_API_AEAD=m | 624 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 614 | # CONFIG_CRYPTO_HW is not set | 625 | # CONFIG_CRYPTO_HW is not set |
| 626 | CONFIG_CRC32_SELFTEST=m | ||
| 615 | CONFIG_XZ_DEC_TEST=m | 627 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8f2c0decb2f8..0e414549b235 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig | |||
| @@ -21,6 +21,7 @@ CONFIG_SOLARIS_X86_PARTITION=y | |||
| 21 | CONFIG_UNIXWARE_DISKLABEL=y | 21 | CONFIG_UNIXWARE_DISKLABEL=y |
| 22 | # CONFIG_EFI_PARTITION is not set | 22 | # CONFIG_EFI_PARTITION is not set |
| 23 | CONFIG_IOSCHED_DEADLINE=m | 23 | CONFIG_IOSCHED_DEADLINE=m |
| 24 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 24 | CONFIG_KEXEC=y | 25 | CONFIG_KEXEC=y |
| 25 | CONFIG_BOOTINFO_PROC=y | 26 | CONFIG_BOOTINFO_PROC=y |
| 26 | CONFIG_M68020=y | 27 | CONFIG_M68020=y |
| @@ -67,6 +68,7 @@ CONFIG_NET_IPVTI=m | |||
| 67 | CONFIG_NET_FOU_IP_TUNNELS=y | 68 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 68 | CONFIG_INET_AH=m | 69 | CONFIG_INET_AH=m |
| 69 | CONFIG_INET_ESP=m | 70 | CONFIG_INET_ESP=m |
| 71 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 70 | CONFIG_INET_IPCOMP=m | 72 | CONFIG_INET_IPCOMP=m |
| 71 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 73 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 72 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 74 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -78,6 +80,7 @@ CONFIG_IPV6=m | |||
| 78 | CONFIG_IPV6_ROUTER_PREF=y | 80 | CONFIG_IPV6_ROUTER_PREF=y |
| 79 | CONFIG_INET6_AH=m | 81 | CONFIG_INET6_AH=m |
| 80 | CONFIG_INET6_ESP=m | 82 | CONFIG_INET6_ESP=m |
| 83 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 81 | CONFIG_INET6_IPCOMP=m | 84 | CONFIG_INET6_IPCOMP=m |
| 82 | CONFIG_IPV6_ILA=m | 85 | CONFIG_IPV6_ILA=m |
| 83 | CONFIG_IPV6_VTI=m | 86 | CONFIG_IPV6_VTI=m |
| @@ -108,6 +111,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 108 | CONFIG_NFT_CT=m | 111 | CONFIG_NFT_CT=m |
| 109 | CONFIG_NFT_SET_RBTREE=m | 112 | CONFIG_NFT_SET_RBTREE=m |
| 110 | CONFIG_NFT_SET_HASH=m | 113 | CONFIG_NFT_SET_HASH=m |
| 114 | CONFIG_NFT_SET_BITMAP=m | ||
| 111 | CONFIG_NFT_COUNTER=m | 115 | CONFIG_NFT_COUNTER=m |
| 112 | CONFIG_NFT_LOG=m | 116 | CONFIG_NFT_LOG=m |
| 113 | CONFIG_NFT_LIMIT=m | 117 | CONFIG_NFT_LIMIT=m |
| @@ -308,6 +312,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 308 | CONFIG_NET_L3_MASTER_DEV=y | 312 | CONFIG_NET_L3_MASTER_DEV=y |
| 309 | CONFIG_AF_KCM=m | 313 | CONFIG_AF_KCM=m |
| 310 | # CONFIG_WIRELESS is not set | 314 | # CONFIG_WIRELESS is not set |
| 315 | CONFIG_PSAMPLE=m | ||
| 316 | CONFIG_NET_IFE=m | ||
| 311 | CONFIG_NET_DEVLINK=m | 317 | CONFIG_NET_DEVLINK=m |
| 312 | # CONFIG_UEVENT_HELPER is not set | 318 | # CONFIG_UEVENT_HELPER is not set |
| 313 | CONFIG_DEVTMPFS=y | 319 | CONFIG_DEVTMPFS=y |
| @@ -402,6 +408,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 402 | CONFIG_MACVLAN=m | 408 | CONFIG_MACVLAN=m |
| 403 | CONFIG_MACVTAP=m | 409 | CONFIG_MACVTAP=m |
| 404 | CONFIG_IPVLAN=m | 410 | CONFIG_IPVLAN=m |
| 411 | CONFIG_IPVTAP=m | ||
| 405 | CONFIG_VXLAN=m | 412 | CONFIG_VXLAN=m |
| 406 | CONFIG_GENEVE=m | 413 | CONFIG_GENEVE=m |
| 407 | CONFIG_GTP=m | 414 | CONFIG_GTP=m |
| @@ -419,6 +426,7 @@ CONFIG_HPLANCE=y | |||
| 419 | CONFIG_MVME147_NET=y | 426 | CONFIG_MVME147_NET=y |
| 420 | CONFIG_SUN3LANCE=y | 427 | CONFIG_SUN3LANCE=y |
| 421 | CONFIG_MACMACE=y | 428 | CONFIG_MACMACE=y |
| 429 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 422 | # CONFIG_NET_VENDOR_ARC is not set | 430 | # CONFIG_NET_VENDOR_ARC is not set |
| 423 | # CONFIG_NET_CADENCE is not set | 431 | # CONFIG_NET_CADENCE is not set |
| 424 | # CONFIG_NET_VENDOR_BROADCOM is not set | 432 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -444,7 +452,6 @@ CONFIG_ZORRO8390=y | |||
| 444 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 452 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 445 | CONFIG_SMC91X=y | 453 | CONFIG_SMC91X=y |
| 446 | # CONFIG_NET_VENDOR_STMICRO is not set | 454 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 447 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 448 | # CONFIG_NET_VENDOR_VIA is not set | 455 | # CONFIG_NET_VENDOR_VIA is not set |
| 449 | # CONFIG_NET_VENDOR_WIZNET is not set | 456 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 450 | CONFIG_PLIP=m | 457 | CONFIG_PLIP=m |
| @@ -627,6 +634,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 627 | CONFIG_DLM=m | 634 | CONFIG_DLM=m |
| 628 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 635 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 629 | CONFIG_MAGIC_SYSRQ=y | 636 | CONFIG_MAGIC_SYSRQ=y |
| 637 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 638 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 630 | CONFIG_ASYNC_RAID6_TEST=m | 639 | CONFIG_ASYNC_RAID6_TEST=m |
| 631 | CONFIG_TEST_HEXDUMP=m | 640 | CONFIG_TEST_HEXDUMP=m |
| 632 | CONFIG_TEST_STRING_HELPERS=m | 641 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -657,6 +666,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 657 | CONFIG_CRYPTO_LRW=m | 666 | CONFIG_CRYPTO_LRW=m |
| 658 | CONFIG_CRYPTO_PCBC=m | 667 | CONFIG_CRYPTO_PCBC=m |
| 659 | CONFIG_CRYPTO_KEYWRAP=m | 668 | CONFIG_CRYPTO_KEYWRAP=m |
| 669 | CONFIG_CRYPTO_CMAC=m | ||
| 660 | CONFIG_CRYPTO_XCBC=m | 670 | CONFIG_CRYPTO_XCBC=m |
| 661 | CONFIG_CRYPTO_VMAC=m | 671 | CONFIG_CRYPTO_VMAC=m |
| 662 | CONFIG_CRYPTO_MICHAEL_MIC=m | 672 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -668,6 +678,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 668 | CONFIG_CRYPTO_SHA3=m | 678 | CONFIG_CRYPTO_SHA3=m |
| 669 | CONFIG_CRYPTO_TGR192=m | 679 | CONFIG_CRYPTO_TGR192=m |
| 670 | CONFIG_CRYPTO_WP512=m | 680 | CONFIG_CRYPTO_WP512=m |
| 681 | CONFIG_CRYPTO_AES_TI=m | ||
| 671 | CONFIG_CRYPTO_ANUBIS=m | 682 | CONFIG_CRYPTO_ANUBIS=m |
| 672 | CONFIG_CRYPTO_BLOWFISH=m | 683 | CONFIG_CRYPTO_BLOWFISH=m |
| 673 | CONFIG_CRYPTO_CAMELLIA=m | 684 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -692,4 +703,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 692 | CONFIG_CRYPTO_USER_API_RNG=m | 703 | CONFIG_CRYPTO_USER_API_RNG=m |
| 693 | CONFIG_CRYPTO_USER_API_AEAD=m | 704 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 694 | # CONFIG_CRYPTO_HW is not set | 705 | # CONFIG_CRYPTO_HW is not set |
| 706 | CONFIG_CRC32_SELFTEST=m | ||
| 695 | CONFIG_XZ_DEC_TEST=m | 707 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index c743dd22e96f..b2e687a0ec3d 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig | |||
| @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y | |||
| 25 | CONFIG_SUN_PARTITION=y | 25 | CONFIG_SUN_PARTITION=y |
| 26 | # CONFIG_EFI_PARTITION is not set | 26 | # CONFIG_EFI_PARTITION is not set |
| 27 | CONFIG_IOSCHED_DEADLINE=m | 27 | CONFIG_IOSCHED_DEADLINE=m |
| 28 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 28 | CONFIG_KEXEC=y | 29 | CONFIG_KEXEC=y |
| 29 | CONFIG_BOOTINFO_PROC=y | 30 | CONFIG_BOOTINFO_PROC=y |
| 30 | CONFIG_M68030=y | 31 | CONFIG_M68030=y |
| @@ -55,6 +56,7 @@ CONFIG_NET_IPVTI=m | |||
| 55 | CONFIG_NET_FOU_IP_TUNNELS=y | 56 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 56 | CONFIG_INET_AH=m | 57 | CONFIG_INET_AH=m |
| 57 | CONFIG_INET_ESP=m | 58 | CONFIG_INET_ESP=m |
| 59 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 58 | CONFIG_INET_IPCOMP=m | 60 | CONFIG_INET_IPCOMP=m |
| 59 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 61 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 60 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 62 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -66,6 +68,7 @@ CONFIG_IPV6=m | |||
| 66 | CONFIG_IPV6_ROUTER_PREF=y | 68 | CONFIG_IPV6_ROUTER_PREF=y |
| 67 | CONFIG_INET6_AH=m | 69 | CONFIG_INET6_AH=m |
| 68 | CONFIG_INET6_ESP=m | 70 | CONFIG_INET6_ESP=m |
| 71 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 69 | CONFIG_INET6_IPCOMP=m | 72 | CONFIG_INET6_IPCOMP=m |
| 70 | CONFIG_IPV6_ILA=m | 73 | CONFIG_IPV6_ILA=m |
| 71 | CONFIG_IPV6_VTI=m | 74 | CONFIG_IPV6_VTI=m |
| @@ -96,6 +99,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 96 | CONFIG_NFT_CT=m | 99 | CONFIG_NFT_CT=m |
| 97 | CONFIG_NFT_SET_RBTREE=m | 100 | CONFIG_NFT_SET_RBTREE=m |
| 98 | CONFIG_NFT_SET_HASH=m | 101 | CONFIG_NFT_SET_HASH=m |
| 102 | CONFIG_NFT_SET_BITMAP=m | ||
| 99 | CONFIG_NFT_COUNTER=m | 103 | CONFIG_NFT_COUNTER=m |
| 100 | CONFIG_NFT_LOG=m | 104 | CONFIG_NFT_LOG=m |
| 101 | CONFIG_NFT_LIMIT=m | 105 | CONFIG_NFT_LIMIT=m |
| @@ -293,6 +297,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 293 | CONFIG_NET_L3_MASTER_DEV=y | 297 | CONFIG_NET_L3_MASTER_DEV=y |
| 294 | CONFIG_AF_KCM=m | 298 | CONFIG_AF_KCM=m |
| 295 | # CONFIG_WIRELESS is not set | 299 | # CONFIG_WIRELESS is not set |
| 300 | CONFIG_PSAMPLE=m | ||
| 301 | CONFIG_NET_IFE=m | ||
| 296 | CONFIG_NET_DEVLINK=m | 302 | CONFIG_NET_DEVLINK=m |
| 297 | # CONFIG_UEVENT_HELPER is not set | 303 | # CONFIG_UEVENT_HELPER is not set |
| 298 | CONFIG_DEVTMPFS=y | 304 | CONFIG_DEVTMPFS=y |
| @@ -351,6 +357,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 351 | CONFIG_MACVLAN=m | 357 | CONFIG_MACVLAN=m |
| 352 | CONFIG_MACVTAP=m | 358 | CONFIG_MACVTAP=m |
| 353 | CONFIG_IPVLAN=m | 359 | CONFIG_IPVLAN=m |
| 360 | CONFIG_IPVTAP=m | ||
| 354 | CONFIG_VXLAN=m | 361 | CONFIG_VXLAN=m |
| 355 | CONFIG_GENEVE=m | 362 | CONFIG_GENEVE=m |
| 356 | CONFIG_GTP=m | 363 | CONFIG_GTP=m |
| @@ -361,6 +368,7 @@ CONFIG_VETH=m | |||
| 361 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 368 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 362 | # CONFIG_NET_VENDOR_AMAZON is not set | 369 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 363 | CONFIG_MVME147_NET=y | 370 | CONFIG_MVME147_NET=y |
| 371 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 364 | # CONFIG_NET_VENDOR_ARC is not set | 372 | # CONFIG_NET_VENDOR_ARC is not set |
| 365 | # CONFIG_NET_CADENCE is not set | 373 | # CONFIG_NET_CADENCE is not set |
| 366 | # CONFIG_NET_VENDOR_BROADCOM is not set | 374 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -377,7 +385,6 @@ CONFIG_MVME147_NET=y | |||
| 377 | # CONFIG_NET_VENDOR_SEEQ is not set | 385 | # CONFIG_NET_VENDOR_SEEQ is not set |
| 378 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 386 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 379 | # CONFIG_NET_VENDOR_STMICRO is not set | 387 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 380 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 381 | # CONFIG_NET_VENDOR_VIA is not set | 388 | # CONFIG_NET_VENDOR_VIA is not set |
| 382 | # CONFIG_NET_VENDOR_WIZNET is not set | 389 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 383 | CONFIG_PPP=m | 390 | CONFIG_PPP=m |
| @@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 515 | CONFIG_DLM=m | 522 | CONFIG_DLM=m |
| 516 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 523 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 517 | CONFIG_MAGIC_SYSRQ=y | 524 | CONFIG_MAGIC_SYSRQ=y |
| 525 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 526 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 518 | CONFIG_ASYNC_RAID6_TEST=m | 527 | CONFIG_ASYNC_RAID6_TEST=m |
| 519 | CONFIG_TEST_HEXDUMP=m | 528 | CONFIG_TEST_HEXDUMP=m |
| 520 | CONFIG_TEST_STRING_HELPERS=m | 529 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 545 | CONFIG_CRYPTO_LRW=m | 554 | CONFIG_CRYPTO_LRW=m |
| 546 | CONFIG_CRYPTO_PCBC=m | 555 | CONFIG_CRYPTO_PCBC=m |
| 547 | CONFIG_CRYPTO_KEYWRAP=m | 556 | CONFIG_CRYPTO_KEYWRAP=m |
| 557 | CONFIG_CRYPTO_CMAC=m | ||
| 548 | CONFIG_CRYPTO_XCBC=m | 558 | CONFIG_CRYPTO_XCBC=m |
| 549 | CONFIG_CRYPTO_VMAC=m | 559 | CONFIG_CRYPTO_VMAC=m |
| 550 | CONFIG_CRYPTO_MICHAEL_MIC=m | 560 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 556 | CONFIG_CRYPTO_SHA3=m | 566 | CONFIG_CRYPTO_SHA3=m |
| 557 | CONFIG_CRYPTO_TGR192=m | 567 | CONFIG_CRYPTO_TGR192=m |
| 558 | CONFIG_CRYPTO_WP512=m | 568 | CONFIG_CRYPTO_WP512=m |
| 569 | CONFIG_CRYPTO_AES_TI=m | ||
| 559 | CONFIG_CRYPTO_ANUBIS=m | 570 | CONFIG_CRYPTO_ANUBIS=m |
| 560 | CONFIG_CRYPTO_BLOWFISH=m | 571 | CONFIG_CRYPTO_BLOWFISH=m |
| 561 | CONFIG_CRYPTO_CAMELLIA=m | 572 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 580 | CONFIG_CRYPTO_USER_API_RNG=m | 591 | CONFIG_CRYPTO_USER_API_RNG=m |
| 581 | CONFIG_CRYPTO_USER_API_AEAD=m | 592 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 582 | # CONFIG_CRYPTO_HW is not set | 593 | # CONFIG_CRYPTO_HW is not set |
| 594 | CONFIG_CRC32_SELFTEST=m | ||
| 583 | CONFIG_XZ_DEC_TEST=m | 595 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 2ccaca858f05..cbd8ee24d1bc 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig | |||
| @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y | |||
| 25 | CONFIG_SUN_PARTITION=y | 25 | CONFIG_SUN_PARTITION=y |
| 26 | # CONFIG_EFI_PARTITION is not set | 26 | # CONFIG_EFI_PARTITION is not set |
| 27 | CONFIG_IOSCHED_DEADLINE=m | 27 | CONFIG_IOSCHED_DEADLINE=m |
| 28 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 28 | CONFIG_KEXEC=y | 29 | CONFIG_KEXEC=y |
| 29 | CONFIG_BOOTINFO_PROC=y | 30 | CONFIG_BOOTINFO_PROC=y |
| 30 | CONFIG_M68040=y | 31 | CONFIG_M68040=y |
| @@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m | |||
| 56 | CONFIG_NET_FOU_IP_TUNNELS=y | 57 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 57 | CONFIG_INET_AH=m | 58 | CONFIG_INET_AH=m |
| 58 | CONFIG_INET_ESP=m | 59 | CONFIG_INET_ESP=m |
| 60 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 59 | CONFIG_INET_IPCOMP=m | 61 | CONFIG_INET_IPCOMP=m |
| 60 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 62 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 61 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 63 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -67,6 +69,7 @@ CONFIG_IPV6=m | |||
| 67 | CONFIG_IPV6_ROUTER_PREF=y | 69 | CONFIG_IPV6_ROUTER_PREF=y |
| 68 | CONFIG_INET6_AH=m | 70 | CONFIG_INET6_AH=m |
| 69 | CONFIG_INET6_ESP=m | 71 | CONFIG_INET6_ESP=m |
| 72 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 70 | CONFIG_INET6_IPCOMP=m | 73 | CONFIG_INET6_IPCOMP=m |
| 71 | CONFIG_IPV6_ILA=m | 74 | CONFIG_IPV6_ILA=m |
| 72 | CONFIG_IPV6_VTI=m | 75 | CONFIG_IPV6_VTI=m |
| @@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 97 | CONFIG_NFT_CT=m | 100 | CONFIG_NFT_CT=m |
| 98 | CONFIG_NFT_SET_RBTREE=m | 101 | CONFIG_NFT_SET_RBTREE=m |
| 99 | CONFIG_NFT_SET_HASH=m | 102 | CONFIG_NFT_SET_HASH=m |
| 103 | CONFIG_NFT_SET_BITMAP=m | ||
| 100 | CONFIG_NFT_COUNTER=m | 104 | CONFIG_NFT_COUNTER=m |
| 101 | CONFIG_NFT_LOG=m | 105 | CONFIG_NFT_LOG=m |
| 102 | CONFIG_NFT_LIMIT=m | 106 | CONFIG_NFT_LIMIT=m |
| @@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 294 | CONFIG_NET_L3_MASTER_DEV=y | 298 | CONFIG_NET_L3_MASTER_DEV=y |
| 295 | CONFIG_AF_KCM=m | 299 | CONFIG_AF_KCM=m |
| 296 | # CONFIG_WIRELESS is not set | 300 | # CONFIG_WIRELESS is not set |
| 301 | CONFIG_PSAMPLE=m | ||
| 302 | CONFIG_NET_IFE=m | ||
| 297 | CONFIG_NET_DEVLINK=m | 303 | CONFIG_NET_DEVLINK=m |
| 298 | # CONFIG_UEVENT_HELPER is not set | 304 | # CONFIG_UEVENT_HELPER is not set |
| 299 | CONFIG_DEVTMPFS=y | 305 | CONFIG_DEVTMPFS=y |
| @@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 352 | CONFIG_MACVLAN=m | 358 | CONFIG_MACVLAN=m |
| 353 | CONFIG_MACVTAP=m | 359 | CONFIG_MACVTAP=m |
| 354 | CONFIG_IPVLAN=m | 360 | CONFIG_IPVLAN=m |
| 361 | CONFIG_IPVTAP=m | ||
| 355 | CONFIG_VXLAN=m | 362 | CONFIG_VXLAN=m |
| 356 | CONFIG_GENEVE=m | 363 | CONFIG_GENEVE=m |
| 357 | CONFIG_GTP=m | 364 | CONFIG_GTP=m |
| @@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y | |||
| 361 | CONFIG_VETH=m | 368 | CONFIG_VETH=m |
| 362 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 369 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 363 | # CONFIG_NET_VENDOR_AMAZON is not set | 370 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 371 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 364 | # CONFIG_NET_VENDOR_ARC is not set | 372 | # CONFIG_NET_VENDOR_ARC is not set |
| 365 | # CONFIG_NET_CADENCE is not set | 373 | # CONFIG_NET_CADENCE is not set |
| 366 | # CONFIG_NET_VENDOR_BROADCOM is not set | 374 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -377,7 +385,6 @@ CONFIG_MVME16x_NET=y | |||
| 377 | # CONFIG_NET_VENDOR_SEEQ is not set | 385 | # CONFIG_NET_VENDOR_SEEQ is not set |
| 378 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 386 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 379 | # CONFIG_NET_VENDOR_STMICRO is not set | 387 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 380 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 381 | # CONFIG_NET_VENDOR_VIA is not set | 388 | # CONFIG_NET_VENDOR_VIA is not set |
| 382 | # CONFIG_NET_VENDOR_WIZNET is not set | 389 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 383 | CONFIG_PPP=m | 390 | CONFIG_PPP=m |
| @@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 515 | CONFIG_DLM=m | 522 | CONFIG_DLM=m |
| 516 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 523 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 517 | CONFIG_MAGIC_SYSRQ=y | 524 | CONFIG_MAGIC_SYSRQ=y |
| 525 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 526 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 518 | CONFIG_ASYNC_RAID6_TEST=m | 527 | CONFIG_ASYNC_RAID6_TEST=m |
| 519 | CONFIG_TEST_HEXDUMP=m | 528 | CONFIG_TEST_HEXDUMP=m |
| 520 | CONFIG_TEST_STRING_HELPERS=m | 529 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 545 | CONFIG_CRYPTO_LRW=m | 554 | CONFIG_CRYPTO_LRW=m |
| 546 | CONFIG_CRYPTO_PCBC=m | 555 | CONFIG_CRYPTO_PCBC=m |
| 547 | CONFIG_CRYPTO_KEYWRAP=m | 556 | CONFIG_CRYPTO_KEYWRAP=m |
| 557 | CONFIG_CRYPTO_CMAC=m | ||
| 548 | CONFIG_CRYPTO_XCBC=m | 558 | CONFIG_CRYPTO_XCBC=m |
| 549 | CONFIG_CRYPTO_VMAC=m | 559 | CONFIG_CRYPTO_VMAC=m |
| 550 | CONFIG_CRYPTO_MICHAEL_MIC=m | 560 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 556 | CONFIG_CRYPTO_SHA3=m | 566 | CONFIG_CRYPTO_SHA3=m |
| 557 | CONFIG_CRYPTO_TGR192=m | 567 | CONFIG_CRYPTO_TGR192=m |
| 558 | CONFIG_CRYPTO_WP512=m | 568 | CONFIG_CRYPTO_WP512=m |
| 569 | CONFIG_CRYPTO_AES_TI=m | ||
| 559 | CONFIG_CRYPTO_ANUBIS=m | 570 | CONFIG_CRYPTO_ANUBIS=m |
| 560 | CONFIG_CRYPTO_BLOWFISH=m | 571 | CONFIG_CRYPTO_BLOWFISH=m |
| 561 | CONFIG_CRYPTO_CAMELLIA=m | 572 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 580 | CONFIG_CRYPTO_USER_API_RNG=m | 591 | CONFIG_CRYPTO_USER_API_RNG=m |
| 581 | CONFIG_CRYPTO_USER_API_AEAD=m | 592 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 582 | # CONFIG_CRYPTO_HW is not set | 593 | # CONFIG_CRYPTO_HW is not set |
| 594 | CONFIG_CRC32_SELFTEST=m | ||
| 583 | CONFIG_XZ_DEC_TEST=m | 595 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 5599f3fd5fcd..1e82cc944339 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig | |||
| @@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y | |||
| 26 | # CONFIG_EFI_PARTITION is not set | 26 | # CONFIG_EFI_PARTITION is not set |
| 27 | CONFIG_SYSV68_PARTITION=y | 27 | CONFIG_SYSV68_PARTITION=y |
| 28 | CONFIG_IOSCHED_DEADLINE=m | 28 | CONFIG_IOSCHED_DEADLINE=m |
| 29 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 29 | CONFIG_KEXEC=y | 30 | CONFIG_KEXEC=y |
| 30 | CONFIG_BOOTINFO_PROC=y | 31 | CONFIG_BOOTINFO_PROC=y |
| 31 | CONFIG_M68040=y | 32 | CONFIG_M68040=y |
| @@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m | |||
| 56 | CONFIG_NET_FOU_IP_TUNNELS=y | 57 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 57 | CONFIG_INET_AH=m | 58 | CONFIG_INET_AH=m |
| 58 | CONFIG_INET_ESP=m | 59 | CONFIG_INET_ESP=m |
| 60 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 59 | CONFIG_INET_IPCOMP=m | 61 | CONFIG_INET_IPCOMP=m |
| 60 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 62 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 61 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 63 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -67,6 +69,7 @@ CONFIG_IPV6=m | |||
| 67 | CONFIG_IPV6_ROUTER_PREF=y | 69 | CONFIG_IPV6_ROUTER_PREF=y |
| 68 | CONFIG_INET6_AH=m | 70 | CONFIG_INET6_AH=m |
| 69 | CONFIG_INET6_ESP=m | 71 | CONFIG_INET6_ESP=m |
| 72 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 70 | CONFIG_INET6_IPCOMP=m | 73 | CONFIG_INET6_IPCOMP=m |
| 71 | CONFIG_IPV6_ILA=m | 74 | CONFIG_IPV6_ILA=m |
| 72 | CONFIG_IPV6_VTI=m | 75 | CONFIG_IPV6_VTI=m |
| @@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 97 | CONFIG_NFT_CT=m | 100 | CONFIG_NFT_CT=m |
| 98 | CONFIG_NFT_SET_RBTREE=m | 101 | CONFIG_NFT_SET_RBTREE=m |
| 99 | CONFIG_NFT_SET_HASH=m | 102 | CONFIG_NFT_SET_HASH=m |
| 103 | CONFIG_NFT_SET_BITMAP=m | ||
| 100 | CONFIG_NFT_COUNTER=m | 104 | CONFIG_NFT_COUNTER=m |
| 101 | CONFIG_NFT_LOG=m | 105 | CONFIG_NFT_LOG=m |
| 102 | CONFIG_NFT_LIMIT=m | 106 | CONFIG_NFT_LIMIT=m |
| @@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 294 | CONFIG_NET_L3_MASTER_DEV=y | 298 | CONFIG_NET_L3_MASTER_DEV=y |
| 295 | CONFIG_AF_KCM=m | 299 | CONFIG_AF_KCM=m |
| 296 | # CONFIG_WIRELESS is not set | 300 | # CONFIG_WIRELESS is not set |
| 301 | CONFIG_PSAMPLE=m | ||
| 302 | CONFIG_NET_IFE=m | ||
| 297 | CONFIG_NET_DEVLINK=m | 303 | CONFIG_NET_DEVLINK=m |
| 298 | # CONFIG_UEVENT_HELPER is not set | 304 | # CONFIG_UEVENT_HELPER is not set |
| 299 | CONFIG_DEVTMPFS=y | 305 | CONFIG_DEVTMPFS=y |
| @@ -358,6 +364,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 358 | CONFIG_MACVLAN=m | 364 | CONFIG_MACVLAN=m |
| 359 | CONFIG_MACVTAP=m | 365 | CONFIG_MACVTAP=m |
| 360 | CONFIG_IPVLAN=m | 366 | CONFIG_IPVLAN=m |
| 367 | CONFIG_IPVTAP=m | ||
| 361 | CONFIG_VXLAN=m | 368 | CONFIG_VXLAN=m |
| 362 | CONFIG_GENEVE=m | 369 | CONFIG_GENEVE=m |
| 363 | CONFIG_GTP=m | 370 | CONFIG_GTP=m |
| @@ -369,6 +376,7 @@ CONFIG_VETH=m | |||
| 369 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 376 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 370 | # CONFIG_NET_VENDOR_AMAZON is not set | 377 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 371 | # CONFIG_NET_VENDOR_AMD is not set | 378 | # CONFIG_NET_VENDOR_AMD is not set |
| 379 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 372 | # CONFIG_NET_VENDOR_ARC is not set | 380 | # CONFIG_NET_VENDOR_ARC is not set |
| 373 | # CONFIG_NET_CADENCE is not set | 381 | # CONFIG_NET_CADENCE is not set |
| 374 | # CONFIG_NET_VENDOR_BROADCOM is not set | 382 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -388,7 +396,6 @@ CONFIG_NE2000=y | |||
| 388 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 396 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 389 | # CONFIG_NET_VENDOR_SMSC is not set | 397 | # CONFIG_NET_VENDOR_SMSC is not set |
| 390 | # CONFIG_NET_VENDOR_STMICRO is not set | 398 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 391 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 392 | # CONFIG_NET_VENDOR_VIA is not set | 399 | # CONFIG_NET_VENDOR_VIA is not set |
| 393 | # CONFIG_NET_VENDOR_WIZNET is not set | 400 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 394 | CONFIG_PLIP=m | 401 | CONFIG_PLIP=m |
| @@ -538,6 +545,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 538 | CONFIG_DLM=m | 545 | CONFIG_DLM=m |
| 539 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 546 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 540 | CONFIG_MAGIC_SYSRQ=y | 547 | CONFIG_MAGIC_SYSRQ=y |
| 548 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 549 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 541 | CONFIG_ASYNC_RAID6_TEST=m | 550 | CONFIG_ASYNC_RAID6_TEST=m |
| 542 | CONFIG_TEST_HEXDUMP=m | 551 | CONFIG_TEST_HEXDUMP=m |
| 543 | CONFIG_TEST_STRING_HELPERS=m | 552 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -568,6 +577,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 568 | CONFIG_CRYPTO_LRW=m | 577 | CONFIG_CRYPTO_LRW=m |
| 569 | CONFIG_CRYPTO_PCBC=m | 578 | CONFIG_CRYPTO_PCBC=m |
| 570 | CONFIG_CRYPTO_KEYWRAP=m | 579 | CONFIG_CRYPTO_KEYWRAP=m |
| 580 | CONFIG_CRYPTO_CMAC=m | ||
| 571 | CONFIG_CRYPTO_XCBC=m | 581 | CONFIG_CRYPTO_XCBC=m |
| 572 | CONFIG_CRYPTO_VMAC=m | 582 | CONFIG_CRYPTO_VMAC=m |
| 573 | CONFIG_CRYPTO_MICHAEL_MIC=m | 583 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -579,6 +589,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 579 | CONFIG_CRYPTO_SHA3=m | 589 | CONFIG_CRYPTO_SHA3=m |
| 580 | CONFIG_CRYPTO_TGR192=m | 590 | CONFIG_CRYPTO_TGR192=m |
| 581 | CONFIG_CRYPTO_WP512=m | 591 | CONFIG_CRYPTO_WP512=m |
| 592 | CONFIG_CRYPTO_AES_TI=m | ||
| 582 | CONFIG_CRYPTO_ANUBIS=m | 593 | CONFIG_CRYPTO_ANUBIS=m |
| 583 | CONFIG_CRYPTO_BLOWFISH=m | 594 | CONFIG_CRYPTO_BLOWFISH=m |
| 584 | CONFIG_CRYPTO_CAMELLIA=m | 595 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -603,4 +614,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 603 | CONFIG_CRYPTO_USER_API_RNG=m | 614 | CONFIG_CRYPTO_USER_API_RNG=m |
| 604 | CONFIG_CRYPTO_USER_API_AEAD=m | 615 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 605 | # CONFIG_CRYPTO_HW is not set | 616 | # CONFIG_CRYPTO_HW is not set |
| 617 | CONFIG_CRC32_SELFTEST=m | ||
| 606 | CONFIG_XZ_DEC_TEST=m | 618 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 313bf0a562ad..f9e77f57a972 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig | |||
| @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y | |||
| 25 | # CONFIG_EFI_PARTITION is not set | 25 | # CONFIG_EFI_PARTITION is not set |
| 26 | CONFIG_SYSV68_PARTITION=y | 26 | CONFIG_SYSV68_PARTITION=y |
| 27 | CONFIG_IOSCHED_DEADLINE=m | 27 | CONFIG_IOSCHED_DEADLINE=m |
| 28 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 28 | CONFIG_KEXEC=y | 29 | CONFIG_KEXEC=y |
| 29 | CONFIG_BOOTINFO_PROC=y | 30 | CONFIG_BOOTINFO_PROC=y |
| 30 | CONFIG_SUN3=y | 31 | CONFIG_SUN3=y |
| @@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m | |||
| 53 | CONFIG_NET_FOU_IP_TUNNELS=y | 54 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 54 | CONFIG_INET_AH=m | 55 | CONFIG_INET_AH=m |
| 55 | CONFIG_INET_ESP=m | 56 | CONFIG_INET_ESP=m |
| 57 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 56 | CONFIG_INET_IPCOMP=m | 58 | CONFIG_INET_IPCOMP=m |
| 57 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 59 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 58 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 60 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -64,6 +66,7 @@ CONFIG_IPV6=m | |||
| 64 | CONFIG_IPV6_ROUTER_PREF=y | 66 | CONFIG_IPV6_ROUTER_PREF=y |
| 65 | CONFIG_INET6_AH=m | 67 | CONFIG_INET6_AH=m |
| 66 | CONFIG_INET6_ESP=m | 68 | CONFIG_INET6_ESP=m |
| 69 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 67 | CONFIG_INET6_IPCOMP=m | 70 | CONFIG_INET6_IPCOMP=m |
| 68 | CONFIG_IPV6_ILA=m | 71 | CONFIG_IPV6_ILA=m |
| 69 | CONFIG_IPV6_VTI=m | 72 | CONFIG_IPV6_VTI=m |
| @@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 94 | CONFIG_NFT_CT=m | 97 | CONFIG_NFT_CT=m |
| 95 | CONFIG_NFT_SET_RBTREE=m | 98 | CONFIG_NFT_SET_RBTREE=m |
| 96 | CONFIG_NFT_SET_HASH=m | 99 | CONFIG_NFT_SET_HASH=m |
| 100 | CONFIG_NFT_SET_BITMAP=m | ||
| 97 | CONFIG_NFT_COUNTER=m | 101 | CONFIG_NFT_COUNTER=m |
| 98 | CONFIG_NFT_LOG=m | 102 | CONFIG_NFT_LOG=m |
| 99 | CONFIG_NFT_LIMIT=m | 103 | CONFIG_NFT_LIMIT=m |
| @@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 291 | CONFIG_NET_L3_MASTER_DEV=y | 295 | CONFIG_NET_L3_MASTER_DEV=y |
| 292 | CONFIG_AF_KCM=m | 296 | CONFIG_AF_KCM=m |
| 293 | # CONFIG_WIRELESS is not set | 297 | # CONFIG_WIRELESS is not set |
| 298 | CONFIG_PSAMPLE=m | ||
| 299 | CONFIG_NET_IFE=m | ||
| 294 | CONFIG_NET_DEVLINK=m | 300 | CONFIG_NET_DEVLINK=m |
| 295 | # CONFIG_UEVENT_HELPER is not set | 301 | # CONFIG_UEVENT_HELPER is not set |
| 296 | CONFIG_DEVTMPFS=y | 302 | CONFIG_DEVTMPFS=y |
| @@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 349 | CONFIG_MACVLAN=m | 355 | CONFIG_MACVLAN=m |
| 350 | CONFIG_MACVTAP=m | 356 | CONFIG_MACVTAP=m |
| 351 | CONFIG_IPVLAN=m | 357 | CONFIG_IPVLAN=m |
| 358 | CONFIG_IPVTAP=m | ||
| 352 | CONFIG_VXLAN=m | 359 | CONFIG_VXLAN=m |
| 353 | CONFIG_GENEVE=m | 360 | CONFIG_GENEVE=m |
| 354 | CONFIG_GTP=m | 361 | CONFIG_GTP=m |
| @@ -359,6 +366,7 @@ CONFIG_VETH=m | |||
| 359 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 366 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 360 | # CONFIG_NET_VENDOR_AMAZON is not set | 367 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 361 | CONFIG_SUN3LANCE=y | 368 | CONFIG_SUN3LANCE=y |
| 369 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 362 | # CONFIG_NET_VENDOR_ARC is not set | 370 | # CONFIG_NET_VENDOR_ARC is not set |
| 363 | # CONFIG_NET_CADENCE is not set | 371 | # CONFIG_NET_CADENCE is not set |
| 364 | # CONFIG_NET_VENDOR_EZCHIP is not set | 372 | # CONFIG_NET_VENDOR_EZCHIP is not set |
| @@ -375,7 +383,6 @@ CONFIG_SUN3_82586=y | |||
| 375 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 383 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 376 | # CONFIG_NET_VENDOR_STMICRO is not set | 384 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 377 | # CONFIG_NET_VENDOR_SUN is not set | 385 | # CONFIG_NET_VENDOR_SUN is not set |
| 378 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 379 | # CONFIG_NET_VENDOR_VIA is not set | 386 | # CONFIG_NET_VENDOR_VIA is not set |
| 380 | # CONFIG_NET_VENDOR_WIZNET is not set | 387 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 381 | CONFIG_PPP=m | 388 | CONFIG_PPP=m |
| @@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 517 | CONFIG_DLM=m | 524 | CONFIG_DLM=m |
| 518 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 525 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 519 | CONFIG_MAGIC_SYSRQ=y | 526 | CONFIG_MAGIC_SYSRQ=y |
| 527 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 528 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 520 | CONFIG_ASYNC_RAID6_TEST=m | 529 | CONFIG_ASYNC_RAID6_TEST=m |
| 521 | CONFIG_TEST_HEXDUMP=m | 530 | CONFIG_TEST_HEXDUMP=m |
| 522 | CONFIG_TEST_STRING_HELPERS=m | 531 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -546,6 +555,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 546 | CONFIG_CRYPTO_LRW=m | 555 | CONFIG_CRYPTO_LRW=m |
| 547 | CONFIG_CRYPTO_PCBC=m | 556 | CONFIG_CRYPTO_PCBC=m |
| 548 | CONFIG_CRYPTO_KEYWRAP=m | 557 | CONFIG_CRYPTO_KEYWRAP=m |
| 558 | CONFIG_CRYPTO_CMAC=m | ||
| 549 | CONFIG_CRYPTO_XCBC=m | 559 | CONFIG_CRYPTO_XCBC=m |
| 550 | CONFIG_CRYPTO_VMAC=m | 560 | CONFIG_CRYPTO_VMAC=m |
| 551 | CONFIG_CRYPTO_MICHAEL_MIC=m | 561 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -557,6 +567,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 557 | CONFIG_CRYPTO_SHA3=m | 567 | CONFIG_CRYPTO_SHA3=m |
| 558 | CONFIG_CRYPTO_TGR192=m | 568 | CONFIG_CRYPTO_TGR192=m |
| 559 | CONFIG_CRYPTO_WP512=m | 569 | CONFIG_CRYPTO_WP512=m |
| 570 | CONFIG_CRYPTO_AES_TI=m | ||
| 560 | CONFIG_CRYPTO_ANUBIS=m | 571 | CONFIG_CRYPTO_ANUBIS=m |
| 561 | CONFIG_CRYPTO_BLOWFISH=m | 572 | CONFIG_CRYPTO_BLOWFISH=m |
| 562 | CONFIG_CRYPTO_CAMELLIA=m | 573 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -581,4 +592,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 581 | CONFIG_CRYPTO_USER_API_RNG=m | 592 | CONFIG_CRYPTO_USER_API_RNG=m |
| 582 | CONFIG_CRYPTO_USER_API_AEAD=m | 593 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 583 | # CONFIG_CRYPTO_HW is not set | 594 | # CONFIG_CRYPTO_HW is not set |
| 595 | CONFIG_CRC32_SELFTEST=m | ||
| 584 | CONFIG_XZ_DEC_TEST=m | 596 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 38b61365f769..3c394fcfb368 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig | |||
| @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y | |||
| 25 | # CONFIG_EFI_PARTITION is not set | 25 | # CONFIG_EFI_PARTITION is not set |
| 26 | CONFIG_SYSV68_PARTITION=y | 26 | CONFIG_SYSV68_PARTITION=y |
| 27 | CONFIG_IOSCHED_DEADLINE=m | 27 | CONFIG_IOSCHED_DEADLINE=m |
| 28 | CONFIG_MQ_IOSCHED_DEADLINE=m | ||
| 28 | CONFIG_KEXEC=y | 29 | CONFIG_KEXEC=y |
| 29 | CONFIG_BOOTINFO_PROC=y | 30 | CONFIG_BOOTINFO_PROC=y |
| 30 | CONFIG_SUN3X=y | 31 | CONFIG_SUN3X=y |
| @@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m | |||
| 53 | CONFIG_NET_FOU_IP_TUNNELS=y | 54 | CONFIG_NET_FOU_IP_TUNNELS=y |
| 54 | CONFIG_INET_AH=m | 55 | CONFIG_INET_AH=m |
| 55 | CONFIG_INET_ESP=m | 56 | CONFIG_INET_ESP=m |
| 57 | CONFIG_INET_ESP_OFFLOAD=m | ||
| 56 | CONFIG_INET_IPCOMP=m | 58 | CONFIG_INET_IPCOMP=m |
| 57 | CONFIG_INET_XFRM_MODE_TRANSPORT=m | 59 | CONFIG_INET_XFRM_MODE_TRANSPORT=m |
| 58 | CONFIG_INET_XFRM_MODE_TUNNEL=m | 60 | CONFIG_INET_XFRM_MODE_TUNNEL=m |
| @@ -64,6 +66,7 @@ CONFIG_IPV6=m | |||
| 64 | CONFIG_IPV6_ROUTER_PREF=y | 66 | CONFIG_IPV6_ROUTER_PREF=y |
| 65 | CONFIG_INET6_AH=m | 67 | CONFIG_INET6_AH=m |
| 66 | CONFIG_INET6_ESP=m | 68 | CONFIG_INET6_ESP=m |
| 69 | CONFIG_INET6_ESP_OFFLOAD=m | ||
| 67 | CONFIG_INET6_IPCOMP=m | 70 | CONFIG_INET6_IPCOMP=m |
| 68 | CONFIG_IPV6_ILA=m | 71 | CONFIG_IPV6_ILA=m |
| 69 | CONFIG_IPV6_VTI=m | 72 | CONFIG_IPV6_VTI=m |
| @@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m | |||
| 94 | CONFIG_NFT_CT=m | 97 | CONFIG_NFT_CT=m |
| 95 | CONFIG_NFT_SET_RBTREE=m | 98 | CONFIG_NFT_SET_RBTREE=m |
| 96 | CONFIG_NFT_SET_HASH=m | 99 | CONFIG_NFT_SET_HASH=m |
| 100 | CONFIG_NFT_SET_BITMAP=m | ||
| 97 | CONFIG_NFT_COUNTER=m | 101 | CONFIG_NFT_COUNTER=m |
| 98 | CONFIG_NFT_LOG=m | 102 | CONFIG_NFT_LOG=m |
| 99 | CONFIG_NFT_LIMIT=m | 103 | CONFIG_NFT_LIMIT=m |
| @@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m | |||
| 291 | CONFIG_NET_L3_MASTER_DEV=y | 295 | CONFIG_NET_L3_MASTER_DEV=y |
| 292 | CONFIG_AF_KCM=m | 296 | CONFIG_AF_KCM=m |
| 293 | # CONFIG_WIRELESS is not set | 297 | # CONFIG_WIRELESS is not set |
| 298 | CONFIG_PSAMPLE=m | ||
| 299 | CONFIG_NET_IFE=m | ||
| 294 | CONFIG_NET_DEVLINK=m | 300 | CONFIG_NET_DEVLINK=m |
| 295 | # CONFIG_UEVENT_HELPER is not set | 301 | # CONFIG_UEVENT_HELPER is not set |
| 296 | CONFIG_DEVTMPFS=y | 302 | CONFIG_DEVTMPFS=y |
| @@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m | |||
| 349 | CONFIG_MACVLAN=m | 355 | CONFIG_MACVLAN=m |
| 350 | CONFIG_MACVTAP=m | 356 | CONFIG_MACVTAP=m |
| 351 | CONFIG_IPVLAN=m | 357 | CONFIG_IPVLAN=m |
| 358 | CONFIG_IPVTAP=m | ||
| 352 | CONFIG_VXLAN=m | 359 | CONFIG_VXLAN=m |
| 353 | CONFIG_GENEVE=m | 360 | CONFIG_GENEVE=m |
| 354 | CONFIG_GTP=m | 361 | CONFIG_GTP=m |
| @@ -359,6 +366,7 @@ CONFIG_VETH=m | |||
| 359 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 366 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 360 | # CONFIG_NET_VENDOR_AMAZON is not set | 367 | # CONFIG_NET_VENDOR_AMAZON is not set |
| 361 | CONFIG_SUN3LANCE=y | 368 | CONFIG_SUN3LANCE=y |
| 369 | # CONFIG_NET_VENDOR_AQUANTIA is not set | ||
| 362 | # CONFIG_NET_VENDOR_ARC is not set | 370 | # CONFIG_NET_VENDOR_ARC is not set |
| 363 | # CONFIG_NET_CADENCE is not set | 371 | # CONFIG_NET_CADENCE is not set |
| 364 | # CONFIG_NET_VENDOR_BROADCOM is not set | 372 | # CONFIG_NET_VENDOR_BROADCOM is not set |
| @@ -375,7 +383,6 @@ CONFIG_SUN3LANCE=y | |||
| 375 | # CONFIG_NET_VENDOR_SEEQ is not set | 383 | # CONFIG_NET_VENDOR_SEEQ is not set |
| 376 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 384 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 377 | # CONFIG_NET_VENDOR_STMICRO is not set | 385 | # CONFIG_NET_VENDOR_STMICRO is not set |
| 378 | # CONFIG_NET_VENDOR_SYNOPSYS is not set | ||
| 379 | # CONFIG_NET_VENDOR_VIA is not set | 386 | # CONFIG_NET_VENDOR_VIA is not set |
| 380 | # CONFIG_NET_VENDOR_WIZNET is not set | 387 | # CONFIG_NET_VENDOR_WIZNET is not set |
| 381 | CONFIG_PPP=m | 388 | CONFIG_PPP=m |
| @@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m | |||
| 517 | CONFIG_DLM=m | 524 | CONFIG_DLM=m |
| 518 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set | 525 | # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set |
| 519 | CONFIG_MAGIC_SYSRQ=y | 526 | CONFIG_MAGIC_SYSRQ=y |
| 527 | CONFIG_WW_MUTEX_SELFTEST=m | ||
| 528 | CONFIG_ATOMIC64_SELFTEST=m | ||
| 520 | CONFIG_ASYNC_RAID6_TEST=m | 529 | CONFIG_ASYNC_RAID6_TEST=m |
| 521 | CONFIG_TEST_HEXDUMP=m | 530 | CONFIG_TEST_HEXDUMP=m |
| 522 | CONFIG_TEST_STRING_HELPERS=m | 531 | CONFIG_TEST_STRING_HELPERS=m |
| @@ -547,6 +556,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m | |||
| 547 | CONFIG_CRYPTO_LRW=m | 556 | CONFIG_CRYPTO_LRW=m |
| 548 | CONFIG_CRYPTO_PCBC=m | 557 | CONFIG_CRYPTO_PCBC=m |
| 549 | CONFIG_CRYPTO_KEYWRAP=m | 558 | CONFIG_CRYPTO_KEYWRAP=m |
| 559 | CONFIG_CRYPTO_CMAC=m | ||
| 550 | CONFIG_CRYPTO_XCBC=m | 560 | CONFIG_CRYPTO_XCBC=m |
| 551 | CONFIG_CRYPTO_VMAC=m | 561 | CONFIG_CRYPTO_VMAC=m |
| 552 | CONFIG_CRYPTO_MICHAEL_MIC=m | 562 | CONFIG_CRYPTO_MICHAEL_MIC=m |
| @@ -558,6 +568,7 @@ CONFIG_CRYPTO_SHA512=m | |||
| 558 | CONFIG_CRYPTO_SHA3=m | 568 | CONFIG_CRYPTO_SHA3=m |
| 559 | CONFIG_CRYPTO_TGR192=m | 569 | CONFIG_CRYPTO_TGR192=m |
| 560 | CONFIG_CRYPTO_WP512=m | 570 | CONFIG_CRYPTO_WP512=m |
| 571 | CONFIG_CRYPTO_AES_TI=m | ||
| 561 | CONFIG_CRYPTO_ANUBIS=m | 572 | CONFIG_CRYPTO_ANUBIS=m |
| 562 | CONFIG_CRYPTO_BLOWFISH=m | 573 | CONFIG_CRYPTO_BLOWFISH=m |
| 563 | CONFIG_CRYPTO_CAMELLIA=m | 574 | CONFIG_CRYPTO_CAMELLIA=m |
| @@ -582,4 +593,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m | |||
| 582 | CONFIG_CRYPTO_USER_API_RNG=m | 593 | CONFIG_CRYPTO_USER_API_RNG=m |
| 583 | CONFIG_CRYPTO_USER_API_AEAD=m | 594 | CONFIG_CRYPTO_USER_API_AEAD=m |
| 584 | # CONFIG_CRYPTO_HW is not set | 595 | # CONFIG_CRYPTO_HW is not set |
| 596 | CONFIG_CRC32_SELFTEST=m | ||
| 585 | CONFIG_XZ_DEC_TEST=m | 597 | CONFIG_XZ_DEC_TEST=m |
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index b4a9b0d5928d..dda58cfe8c22 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h | |||
| @@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr) | |||
| 148 | #define __change_bit(nr, vaddr) change_bit(nr, vaddr) | 148 | #define __change_bit(nr, vaddr) change_bit(nr, vaddr) |
| 149 | 149 | ||
| 150 | 150 | ||
| 151 | static inline int test_bit(int nr, const unsigned long *vaddr) | 151 | static inline int test_bit(int nr, const volatile unsigned long *vaddr) |
| 152 | { | 152 | { |
| 153 | return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; | 153 | return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; |
| 154 | } | 154 | } |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index a857d82ec509..aab1edd0d4ba 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | #include <uapi/asm/unistd.h> | 4 | #include <uapi/asm/unistd.h> |
| 5 | 5 | ||
| 6 | 6 | ||
| 7 | #define NR_syscalls 379 | 7 | #define NR_syscalls 380 |
| 8 | 8 | ||
| 9 | #define __ARCH_WANT_OLD_READDIR | 9 | #define __ARCH_WANT_OLD_READDIR |
| 10 | #define __ARCH_WANT_OLD_STAT | 10 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 9fe674bf911f..25589f5b8669 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h | |||
| @@ -384,5 +384,6 @@ | |||
| 384 | #define __NR_copy_file_range 376 | 384 | #define __NR_copy_file_range 376 |
| 385 | #define __NR_preadv2 377 | 385 | #define __NR_preadv2 377 |
| 386 | #define __NR_pwritev2 378 | 386 | #define __NR_pwritev2 378 |
| 387 | #define __NR_statx 379 | ||
| 387 | 388 | ||
| 388 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ | 389 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ |
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index d6fd6d9ced24..8c9fcfafe0dd 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
| @@ -399,3 +399,4 @@ ENTRY(sys_call_table) | |||
| 399 | .long sys_copy_file_range | 399 | .long sys_copy_file_range |
| 400 | .long sys_preadv2 | 400 | .long sys_preadv2 |
| 401 | .long sys_pwritev2 | 401 | .long sys_pwritev2 |
| 402 | .long sys_statx | ||
diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h index 5fcb9ac72693..f0a5d8b844d6 100644 --- a/arch/openrisc/include/asm/cmpxchg.h +++ b/arch/openrisc/include/asm/cmpxchg.h | |||
| @@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, | |||
| 77 | return val; | 77 | return val; |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | #define xchg(ptr, with) \ | 80 | #define xchg(ptr, with) \ |
| 81 | ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr)))) | 81 | ({ \ |
| 82 | (__typeof__(*(ptr))) __xchg((unsigned long)(with), \ | ||
| 83 | (ptr), \ | ||
| 84 | sizeof(*(ptr))); \ | ||
| 85 | }) | ||
| 82 | 86 | ||
| 83 | #endif /* __ASM_OPENRISC_CMPXCHG_H */ | 87 | #endif /* __ASM_OPENRISC_CMPXCHG_H */ |
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 140faa16685a..1311e6b13991 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h | |||
| @@ -211,7 +211,7 @@ do { \ | |||
| 211 | case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \ | 211 | case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \ |
| 212 | case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ | 212 | case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ |
| 213 | case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ | 213 | case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ |
| 214 | case 8: __get_user_asm2(x, ptr, retval); \ | 214 | case 8: __get_user_asm2(x, ptr, retval); break; \ |
| 215 | default: (x) = __get_user_bad(); \ | 215 | default: (x) = __get_user_bad(); \ |
| 216 | } \ | 216 | } \ |
| 217 | } while (0) | 217 | } while (0) |
diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c index 5c4695d13542..ee3e604959e1 100644 --- a/arch/openrisc/kernel/or32_ksyms.c +++ b/arch/openrisc/kernel/or32_ksyms.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <asm/hardirq.h> | 30 | #include <asm/hardirq.h> |
| 31 | #include <asm/delay.h> | 31 | #include <asm/delay.h> |
| 32 | #include <asm/pgalloc.h> | 32 | #include <asm/pgalloc.h> |
| 33 | #include <asm/pgtable.h> | ||
| 33 | 34 | ||
| 34 | #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) | 35 | #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) |
| 35 | 36 | ||
| @@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3); | |||
| 42 | DECLARE_EXPORT(__ashrdi3); | 43 | DECLARE_EXPORT(__ashrdi3); |
| 43 | DECLARE_EXPORT(__ashldi3); | 44 | DECLARE_EXPORT(__ashldi3); |
| 44 | DECLARE_EXPORT(__lshrdi3); | 45 | DECLARE_EXPORT(__lshrdi3); |
| 46 | DECLARE_EXPORT(__ucmpdi2); | ||
| 45 | 47 | ||
| 48 | EXPORT_SYMBOL(empty_zero_page); | ||
| 46 | EXPORT_SYMBOL(__copy_tofrom_user); | 49 | EXPORT_SYMBOL(__copy_tofrom_user); |
| 50 | EXPORT_SYMBOL(__clear_user); | ||
| 47 | EXPORT_SYMBOL(memset); | 51 | EXPORT_SYMBOL(memset); |
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 828a29110459..f8da545854f9 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c | |||
| @@ -90,6 +90,7 @@ void arch_cpu_idle(void) | |||
| 90 | } | 90 | } |
| 91 | 91 | ||
| 92 | void (*pm_power_off) (void) = machine_power_off; | 92 | void (*pm_power_off) (void) = machine_power_off; |
| 93 | EXPORT_SYMBOL(pm_power_off); | ||
| 93 | 94 | ||
| 94 | /* | 95 | /* |
| 95 | * When a process does an "exec", machine state like FPU and debug | 96 | * When a process does an "exec", machine state like FPU and debug |
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 19c9c3c5f267..c7e15cc5c668 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h | |||
| @@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page) | |||
| 43 | 43 | ||
| 44 | #define flush_kernel_dcache_range(start,size) \ | 44 | #define flush_kernel_dcache_range(start,size) \ |
| 45 | flush_kernel_dcache_range_asm((start), (start)+(size)); | 45 | flush_kernel_dcache_range_asm((start), (start)+(size)); |
| 46 | /* vmap range flushes and invalidates. Architecturally, we don't need | ||
| 47 | * the invalidate, because the CPU should refuse to speculate once an | ||
| 48 | * area has been flushed, so invalidate is left empty */ | ||
| 49 | static inline void flush_kernel_vmap_range(void *vaddr, int size) | ||
| 50 | { | ||
| 51 | unsigned long start = (unsigned long)vaddr; | ||
| 52 | |||
| 53 | flush_kernel_dcache_range_asm(start, start + size); | ||
| 54 | } | ||
| 55 | static inline void invalidate_kernel_vmap_range(void *vaddr, int size) | ||
| 56 | { | ||
| 57 | unsigned long start = (unsigned long)vaddr; | ||
| 58 | void *cursor = vaddr; | ||
| 59 | 46 | ||
| 60 | for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { | 47 | void flush_kernel_vmap_range(void *vaddr, int size); |
| 61 | struct page *page = vmalloc_to_page(cursor); | 48 | void invalidate_kernel_vmap_range(void *vaddr, int size); |
| 62 | |||
| 63 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | ||
| 64 | flush_kernel_dcache_page(page); | ||
| 65 | } | ||
| 66 | flush_kernel_dcache_range_asm(start, start + size); | ||
| 67 | } | ||
| 68 | 49 | ||
| 69 | #define flush_cache_vmap(start, end) flush_cache_all() | 50 | #define flush_cache_vmap(start, end) flush_cache_all() |
| 70 | #define flush_cache_vunmap(start, end) flush_cache_all() | 51 | #define flush_cache_vunmap(start, end) flush_cache_all() |
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index fb4382c28259..edfbf9d6a6dd 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h | |||
| @@ -32,7 +32,8 @@ | |||
| 32 | * that put_user is the same as __put_user, etc. | 32 | * that put_user is the same as __put_user, etc. |
| 33 | */ | 33 | */ |
| 34 | 34 | ||
| 35 | #define access_ok(type, uaddr, size) (1) | 35 | #define access_ok(type, uaddr, size) \ |
| 36 | ( (uaddr) == (uaddr) ) | ||
| 36 | 37 | ||
| 37 | #define put_user __put_user | 38 | #define put_user __put_user |
| 38 | #define get_user __get_user | 39 | #define get_user __get_user |
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 6b0741e7a7ed..667c99421003 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h | |||
| @@ -362,8 +362,9 @@ | |||
| 362 | #define __NR_copy_file_range (__NR_Linux + 346) | 362 | #define __NR_copy_file_range (__NR_Linux + 346) |
| 363 | #define __NR_preadv2 (__NR_Linux + 347) | 363 | #define __NR_preadv2 (__NR_Linux + 347) |
| 364 | #define __NR_pwritev2 (__NR_Linux + 348) | 364 | #define __NR_pwritev2 (__NR_Linux + 348) |
| 365 | #define __NR_statx (__NR_Linux + 349) | ||
| 365 | 366 | ||
| 366 | #define __NR_Linux_syscalls (__NR_pwritev2 + 1) | 367 | #define __NR_Linux_syscalls (__NR_statx + 1) |
| 367 | 368 | ||
| 368 | 369 | ||
| 369 | #define __IGNORE_select /* newselect */ | 370 | #define __IGNORE_select /* newselect */ |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 0dc72d5de861..c32a09095216 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
| @@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long | |||
| 616 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); | 616 | __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); |
| 617 | } | 617 | } |
| 618 | } | 618 | } |
| 619 | |||
| 620 | void flush_kernel_vmap_range(void *vaddr, int size) | ||
| 621 | { | ||
| 622 | unsigned long start = (unsigned long)vaddr; | ||
| 623 | |||
| 624 | if ((unsigned long)size > parisc_cache_flush_threshold) | ||
| 625 | flush_data_cache(); | ||
| 626 | else | ||
| 627 | flush_kernel_dcache_range_asm(start, start + size); | ||
| 628 | } | ||
| 629 | EXPORT_SYMBOL(flush_kernel_vmap_range); | ||
| 630 | |||
| 631 | void invalidate_kernel_vmap_range(void *vaddr, int size) | ||
| 632 | { | ||
| 633 | unsigned long start = (unsigned long)vaddr; | ||
| 634 | |||
| 635 | if ((unsigned long)size > parisc_cache_flush_threshold) | ||
| 636 | flush_data_cache(); | ||
| 637 | else | ||
| 638 | flush_kernel_dcache_range_asm(start, start + size); | ||
| 639 | } | ||
| 640 | EXPORT_SYMBOL(invalidate_kernel_vmap_range); | ||
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index a0ecdb4abcc8..c66c943d9322 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c | |||
| @@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, | |||
| 620 | */ | 620 | */ |
| 621 | *loc = fsel(val, addend); | 621 | *loc = fsel(val, addend); |
| 622 | break; | 622 | break; |
| 623 | case R_PARISC_SECREL32: | ||
| 624 | /* 32-bit section relative address. */ | ||
| 625 | *loc = fsel(val, addend); | ||
| 626 | break; | ||
| 623 | case R_PARISC_DPREL21L: | 627 | case R_PARISC_DPREL21L: |
| 624 | /* left 21 bit of relative address */ | 628 | /* left 21 bit of relative address */ |
| 625 | val = lrsel(val - dp, addend); | 629 | val = lrsel(val - dp, addend); |
| @@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, | |||
| 807 | */ | 811 | */ |
| 808 | *loc = fsel(val, addend); | 812 | *loc = fsel(val, addend); |
| 809 | break; | 813 | break; |
| 814 | case R_PARISC_SECREL32: | ||
| 815 | /* 32-bit section relative address. */ | ||
| 816 | *loc = fsel(val, addend); | ||
| 817 | break; | ||
| 810 | case R_PARISC_FPTR64: | 818 | case R_PARISC_FPTR64: |
| 811 | /* 64-bit function address */ | 819 | /* 64-bit function address */ |
| 812 | if(in_local(me, (void *)(val + addend))) { | 820 | if(in_local(me, (void *)(val + addend))) { |
diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index e282a5131d77..6017a5af2e6e 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c | |||
| @@ -39,7 +39,7 @@ | |||
| 39 | * the PDC INTRIGUE calls. This is done to eliminate bugs introduced | 39 | * the PDC INTRIGUE calls. This is done to eliminate bugs introduced |
| 40 | * in various PDC revisions. The code is much more maintainable | 40 | * in various PDC revisions. The code is much more maintainable |
| 41 | * and reliable this way vs having to debug on every version of PDC | 41 | * and reliable this way vs having to debug on every version of PDC |
| 42 | * on every box. | 42 | * on every box. |
| 43 | */ | 43 | */ |
| 44 | 44 | ||
| 45 | #include <linux/capability.h> | 45 | #include <linux/capability.h> |
| @@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr); | |||
| 195 | static int perf_release(struct inode *inode, struct file *file); | 195 | static int perf_release(struct inode *inode, struct file *file); |
| 196 | static int perf_open(struct inode *inode, struct file *file); | 196 | static int perf_open(struct inode *inode, struct file *file); |
| 197 | static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); | 197 | static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); |
| 198 | static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, | 198 | static ssize_t perf_write(struct file *file, const char __user *buf, |
| 199 | loff_t *ppos); | 199 | size_t count, loff_t *ppos); |
| 200 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | 200 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
| 201 | static void perf_start_counters(void); | 201 | static void perf_start_counters(void); |
| 202 | static int perf_stop_counters(uint32_t *raddr); | 202 | static int perf_stop_counters(uint32_t *raddr); |
| @@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void); | |||
| 222 | /* | 222 | /* |
| 223 | * configure: | 223 | * configure: |
| 224 | * | 224 | * |
| 225 | * Configure the cpu with a given data image. First turn off the counters, | 225 | * Configure the cpu with a given data image. First turn off the counters, |
| 226 | * then download the image, then turn the counters back on. | 226 | * then download the image, then turn the counters back on. |
| 227 | */ | 227 | */ |
| 228 | static int perf_config(uint32_t *image_ptr) | 228 | static int perf_config(uint32_t *image_ptr) |
| @@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr) | |||
| 234 | error = perf_stop_counters(raddr); | 234 | error = perf_stop_counters(raddr); |
| 235 | if (error != 0) { | 235 | if (error != 0) { |
| 236 | printk("perf_config: perf_stop_counters = %ld\n", error); | 236 | printk("perf_config: perf_stop_counters = %ld\n", error); |
| 237 | return -EINVAL; | 237 | return -EINVAL; |
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | printk("Preparing to write image\n"); | 240 | printk("Preparing to write image\n"); |
| @@ -242,7 +242,7 @@ printk("Preparing to write image\n"); | |||
| 242 | error = perf_write_image((uint64_t *)image_ptr); | 242 | error = perf_write_image((uint64_t *)image_ptr); |
| 243 | if (error != 0) { | 243 | if (error != 0) { |
| 244 | printk("perf_config: DOWNLOAD = %ld\n", error); | 244 | printk("perf_config: DOWNLOAD = %ld\n", error); |
| 245 | return -EINVAL; | 245 | return -EINVAL; |
| 246 | } | 246 | } |
| 247 | 247 | ||
| 248 | printk("Preparing to start counters\n"); | 248 | printk("Preparing to start counters\n"); |
| @@ -254,7 +254,7 @@ printk("Preparing to start counters\n"); | |||
| 254 | } | 254 | } |
| 255 | 255 | ||
| 256 | /* | 256 | /* |
| 257 | * Open the device and initialize all of its memory. The device is only | 257 | * Open the device and initialize all of its memory. The device is only |
| 258 | * opened once, but can be "queried" by multiple processes that know its | 258 | * opened once, but can be "queried" by multiple processes that know its |
| 259 | * file descriptor. | 259 | * file descriptor. |
| 260 | */ | 260 | */ |
| @@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t | |||
| 298 | * called on the processor that the download should happen | 298 | * called on the processor that the download should happen |
| 299 | * on. | 299 | * on. |
| 300 | */ | 300 | */ |
| 301 | static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, | 301 | static ssize_t perf_write(struct file *file, const char __user *buf, |
| 302 | loff_t *ppos) | 302 | size_t count, loff_t *ppos) |
| 303 | { | 303 | { |
| 304 | size_t image_size; | 304 | size_t image_size; |
| 305 | uint32_t image_type; | 305 | uint32_t image_type; |
| 306 | uint32_t interface_type; | 306 | uint32_t interface_type; |
| 307 | uint32_t test; | 307 | uint32_t test; |
| 308 | 308 | ||
| 309 | if (perf_processor_interface == ONYX_INTF) | 309 | if (perf_processor_interface == ONYX_INTF) |
| 310 | image_size = PCXU_IMAGE_SIZE; | 310 | image_size = PCXU_IMAGE_SIZE; |
| 311 | else if (perf_processor_interface == CUDA_INTF) | 311 | else if (perf_processor_interface == CUDA_INTF) |
| 312 | image_size = PCXW_IMAGE_SIZE; | 312 | image_size = PCXW_IMAGE_SIZE; |
| 313 | else | 313 | else |
| 314 | return -EFAULT; | 314 | return -EFAULT; |
| 315 | 315 | ||
| 316 | if (!capable(CAP_SYS_ADMIN)) | 316 | if (!capable(CAP_SYS_ADMIN)) |
| @@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun | |||
| 330 | 330 | ||
| 331 | /* First check the machine type is correct for | 331 | /* First check the machine type is correct for |
| 332 | the requested image */ | 332 | the requested image */ |
| 333 | if (((perf_processor_interface == CUDA_INTF) && | 333 | if (((perf_processor_interface == CUDA_INTF) && |
| 334 | (interface_type != CUDA_INTF)) || | 334 | (interface_type != CUDA_INTF)) || |
| 335 | ((perf_processor_interface == ONYX_INTF) && | 335 | ((perf_processor_interface == ONYX_INTF) && |
| 336 | (interface_type != ONYX_INTF))) | 336 | (interface_type != ONYX_INTF))) |
| 337 | return -EINVAL; | 337 | return -EINVAL; |
| 338 | 338 | ||
| 339 | /* Next check to make sure the requested image | 339 | /* Next check to make sure the requested image |
| 340 | is valid */ | 340 | is valid */ |
| 341 | if (((interface_type == CUDA_INTF) && | 341 | if (((interface_type == CUDA_INTF) && |
| 342 | (test >= MAX_CUDA_IMAGES)) || | 342 | (test >= MAX_CUDA_IMAGES)) || |
| 343 | ((interface_type == ONYX_INTF) && | 343 | ((interface_type == ONYX_INTF) && |
| 344 | (test >= MAX_ONYX_IMAGES))) | 344 | (test >= MAX_ONYX_IMAGES))) |
| 345 | return -EINVAL; | 345 | return -EINVAL; |
| 346 | 346 | ||
| 347 | /* Copy the image into the processor */ | 347 | /* Copy the image into the processor */ |
| 348 | if (interface_type == CUDA_INTF) | 348 | if (interface_type == CUDA_INTF) |
| 349 | return perf_config(cuda_images[test]); | 349 | return perf_config(cuda_images[test]); |
| 350 | else | 350 | else |
| 351 | return perf_config(onyx_images[test]); | 351 | return perf_config(onyx_images[test]); |
| @@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun | |||
| 359 | static void perf_patch_images(void) | 359 | static void perf_patch_images(void) |
| 360 | { | 360 | { |
| 361 | #if 0 /* FIXME!! */ | 361 | #if 0 /* FIXME!! */ |
| 362 | /* | 362 | /* |
| 363 | * NOTE: this routine is VERY specific to the current TLB image. | 363 | * NOTE: this routine is VERY specific to the current TLB image. |
| 364 | * If the image is changed, this routine might also need to be changed. | 364 | * If the image is changed, this routine might also need to be changed. |
| 365 | */ | 365 | */ |
| @@ -367,9 +367,9 @@ static void perf_patch_images(void) | |||
| 367 | extern void $i_dtlb_miss_2_0(); | 367 | extern void $i_dtlb_miss_2_0(); |
| 368 | extern void PA2_0_iva(); | 368 | extern void PA2_0_iva(); |
| 369 | 369 | ||
| 370 | /* | 370 | /* |
| 371 | * We can only use the lower 32-bits, the upper 32-bits should be 0 | 371 | * We can only use the lower 32-bits, the upper 32-bits should be 0 |
| 372 | * anyway given this is in the kernel | 372 | * anyway given this is in the kernel |
| 373 | */ | 373 | */ |
| 374 | uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0); | 374 | uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0); |
| 375 | uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0); | 375 | uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0); |
| @@ -377,21 +377,21 @@ static void perf_patch_images(void) | |||
| 377 | 377 | ||
| 378 | if (perf_processor_interface == ONYX_INTF) { | 378 | if (perf_processor_interface == ONYX_INTF) { |
| 379 | /* clear last 2 bytes */ | 379 | /* clear last 2 bytes */ |
| 380 | onyx_images[TLBMISS][15] &= 0xffffff00; | 380 | onyx_images[TLBMISS][15] &= 0xffffff00; |
| 381 | /* set 2 bytes */ | 381 | /* set 2 bytes */ |
| 382 | onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); | 382 | onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); |
| 383 | onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00; | 383 | onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00; |
| 384 | onyx_images[TLBMISS][17] = itlb_addr; | 384 | onyx_images[TLBMISS][17] = itlb_addr; |
| 385 | 385 | ||
| 386 | /* clear last 2 bytes */ | 386 | /* clear last 2 bytes */ |
| 387 | onyx_images[TLBHANDMISS][15] &= 0xffffff00; | 387 | onyx_images[TLBHANDMISS][15] &= 0xffffff00; |
| 388 | /* set 2 bytes */ | 388 | /* set 2 bytes */ |
| 389 | onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); | 389 | onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); |
| 390 | onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00; | 390 | onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00; |
| 391 | onyx_images[TLBHANDMISS][17] = itlb_addr; | 391 | onyx_images[TLBHANDMISS][17] = itlb_addr; |
| 392 | 392 | ||
| 393 | /* clear last 2 bytes */ | 393 | /* clear last 2 bytes */ |
| 394 | onyx_images[BIG_CPI][15] &= 0xffffff00; | 394 | onyx_images[BIG_CPI][15] &= 0xffffff00; |
| 395 | /* set 2 bytes */ | 395 | /* set 2 bytes */ |
| 396 | onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24)); | 396 | onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24)); |
| 397 | onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00; | 397 | onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00; |
| @@ -404,24 +404,24 @@ static void perf_patch_images(void) | |||
| 404 | 404 | ||
| 405 | } else if (perf_processor_interface == CUDA_INTF) { | 405 | } else if (perf_processor_interface == CUDA_INTF) { |
| 406 | /* Cuda interface */ | 406 | /* Cuda interface */ |
| 407 | cuda_images[TLBMISS][16] = | 407 | cuda_images[TLBMISS][16] = |
| 408 | (cuda_images[TLBMISS][16]&0xffff0000) | | 408 | (cuda_images[TLBMISS][16]&0xffff0000) | |
| 409 | ((dtlb_addr >> 8)&0x0000ffff); | 409 | ((dtlb_addr >> 8)&0x0000ffff); |
| 410 | cuda_images[TLBMISS][17] = | 410 | cuda_images[TLBMISS][17] = |
| 411 | ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); | 411 | ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); |
| 412 | cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000; | 412 | cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000; |
| 413 | 413 | ||
| 414 | cuda_images[TLBHANDMISS][16] = | 414 | cuda_images[TLBHANDMISS][16] = |
| 415 | (cuda_images[TLBHANDMISS][16]&0xffff0000) | | 415 | (cuda_images[TLBHANDMISS][16]&0xffff0000) | |
| 416 | ((dtlb_addr >> 8)&0x0000ffff); | 416 | ((dtlb_addr >> 8)&0x0000ffff); |
| 417 | cuda_images[TLBHANDMISS][17] = | 417 | cuda_images[TLBHANDMISS][17] = |
| 418 | ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); | 418 | ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); |
| 419 | cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000; | 419 | cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000; |
| 420 | 420 | ||
| 421 | cuda_images[BIG_CPI][16] = | 421 | cuda_images[BIG_CPI][16] = |
| 422 | (cuda_images[BIG_CPI][16]&0xffff0000) | | 422 | (cuda_images[BIG_CPI][16]&0xffff0000) | |
| 423 | ((dtlb_addr >> 8)&0x0000ffff); | 423 | ((dtlb_addr >> 8)&0x0000ffff); |
| 424 | cuda_images[BIG_CPI][17] = | 424 | cuda_images[BIG_CPI][17] = |
| 425 | ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); | 425 | ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); |
| 426 | cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000; | 426 | cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000; |
| 427 | } else { | 427 | } else { |
| @@ -433,7 +433,7 @@ static void perf_patch_images(void) | |||
| 433 | 433 | ||
| 434 | /* | 434 | /* |
| 435 | * ioctl routine | 435 | * ioctl routine |
| 436 | * All routines effect the processor that they are executed on. Thus you | 436 | * All routines effect the processor that they are executed on. Thus you |
| 437 | * must be running on the processor that you wish to change. | 437 | * must be running on the processor that you wish to change. |
| 438 | */ | 438 | */ |
| 439 | 439 | ||
| @@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
| 459 | } | 459 | } |
| 460 | 460 | ||
| 461 | /* copy out the Counters */ | 461 | /* copy out the Counters */ |
| 462 | if (copy_to_user((void __user *)arg, raddr, | 462 | if (copy_to_user((void __user *)arg, raddr, |
| 463 | sizeof (raddr)) != 0) { | 463 | sizeof (raddr)) != 0) { |
| 464 | error = -EFAULT; | 464 | error = -EFAULT; |
| 465 | break; | 465 | break; |
| @@ -487,7 +487,7 @@ static const struct file_operations perf_fops = { | |||
| 487 | .open = perf_open, | 487 | .open = perf_open, |
| 488 | .release = perf_release | 488 | .release = perf_release |
| 489 | }; | 489 | }; |
| 490 | 490 | ||
| 491 | static struct miscdevice perf_dev = { | 491 | static struct miscdevice perf_dev = { |
| 492 | MISC_DYNAMIC_MINOR, | 492 | MISC_DYNAMIC_MINOR, |
| 493 | PA_PERF_DEV, | 493 | PA_PERF_DEV, |
| @@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr) | |||
| 595 | /* OR sticky2 (bit 1496) to counter2 bit 32 */ | 595 | /* OR sticky2 (bit 1496) to counter2 bit 32 */ |
| 596 | tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000; | 596 | tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000; |
| 597 | raddr[2] = (uint32_t)tmp64; | 597 | raddr[2] = (uint32_t)tmp64; |
| 598 | 598 | ||
| 599 | /* Counter3 is bits 1497 to 1528 */ | 599 | /* Counter3 is bits 1497 to 1528 */ |
| 600 | tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff; | 600 | tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff; |
| 601 | /* OR sticky3 (bit 1529) to counter3 bit 32 */ | 601 | /* OR sticky3 (bit 1529) to counter3 bit 32 */ |
| @@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr) | |||
| 617 | userbuf[22] = 0; | 617 | userbuf[22] = 0; |
| 618 | userbuf[23] = 0; | 618 | userbuf[23] = 0; |
| 619 | 619 | ||
| 620 | /* | 620 | /* |
| 621 | * Write back the zeroed bytes + the image given | 621 | * Write back the zeroed bytes + the image given |
| 622 | * the read was destructive. | 622 | * the read was destructive. |
| 623 | */ | 623 | */ |
| @@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr) | |||
| 625 | } else { | 625 | } else { |
| 626 | 626 | ||
| 627 | /* | 627 | /* |
| 628 | * Read RDR-15 which contains the counters and sticky bits | 628 | * Read RDR-15 which contains the counters and sticky bits |
| 629 | */ | 629 | */ |
| 630 | if (!perf_rdr_read_ubuf(15, userbuf)) { | 630 | if (!perf_rdr_read_ubuf(15, userbuf)) { |
| 631 | return -13; | 631 | return -13; |
| 632 | } | 632 | } |
| 633 | 633 | ||
| 634 | /* | 634 | /* |
| 635 | * Clear out the counters | 635 | * Clear out the counters |
| 636 | */ | 636 | */ |
| 637 | perf_rdr_clear(15); | 637 | perf_rdr_clear(15); |
| @@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr) | |||
| 644 | raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL); | 644 | raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL); |
| 645 | raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL); | 645 | raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL); |
| 646 | } | 646 | } |
| 647 | 647 | ||
| 648 | return 0; | 648 | return 0; |
| 649 | } | 649 | } |
| 650 | 650 | ||
| @@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer) | |||
| 682 | i = tentry->num_words; | 682 | i = tentry->num_words; |
| 683 | while (i--) { | 683 | while (i--) { |
| 684 | buffer[i] = 0; | 684 | buffer[i] = 0; |
| 685 | } | 685 | } |
| 686 | 686 | ||
| 687 | /* Check for bits an even number of 64 */ | 687 | /* Check for bits an even number of 64 */ |
| 688 | if ((xbits = width & 0x03f) != 0) { | 688 | if ((xbits = width & 0x03f) != 0) { |
| @@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr) | |||
| 808 | } | 808 | } |
| 809 | 809 | ||
| 810 | runway = ioremap_nocache(cpu_device->hpa.start, 4096); | 810 | runway = ioremap_nocache(cpu_device->hpa.start, 4096); |
| 811 | if (!runway) { | ||
| 812 | pr_err("perf_write_image: ioremap failed!\n"); | ||
| 813 | return -ENOMEM; | ||
| 814 | } | ||
| 811 | 815 | ||
| 812 | /* Merge intrigue bits into Runway STATUS 0 */ | 816 | /* Merge intrigue bits into Runway STATUS 0 */ |
| 813 | tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful; | 817 | tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful; |
| 814 | __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), | 818 | __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), |
| 815 | runway + RUNWAY_STATUS); | 819 | runway + RUNWAY_STATUS); |
| 816 | 820 | ||
| 817 | /* Write RUNWAY DEBUG registers */ | 821 | /* Write RUNWAY DEBUG registers */ |
| 818 | for (i = 0; i < 8; i++) { | 822 | for (i = 0; i < 8; i++) { |
| 819 | __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG); | 823 | __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG); |
| 820 | } | 824 | } |
| 821 | 825 | ||
| 822 | return 0; | 826 | return 0; |
| 823 | } | 827 | } |
| 824 | 828 | ||
| 825 | /* | 829 | /* |
| @@ -843,7 +847,7 @@ printk("perf_rdr_write\n"); | |||
| 843 | perf_rdr_shift_out_U(rdr_num, buffer[i]); | 847 | perf_rdr_shift_out_U(rdr_num, buffer[i]); |
| 844 | } else { | 848 | } else { |
| 845 | perf_rdr_shift_out_W(rdr_num, buffer[i]); | 849 | perf_rdr_shift_out_W(rdr_num, buffer[i]); |
| 846 | } | 850 | } |
| 847 | } | 851 | } |
| 848 | printk("perf_rdr_write done\n"); | 852 | printk("perf_rdr_write done\n"); |
| 849 | } | 853 | } |
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 06f7ca7fe70b..b76f503eee4a 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c | |||
| @@ -142,6 +142,8 @@ void machine_power_off(void) | |||
| 142 | 142 | ||
| 143 | printk(KERN_EMERG "System shut down completed.\n" | 143 | printk(KERN_EMERG "System shut down completed.\n" |
| 144 | "Please power this system off now."); | 144 | "Please power this system off now."); |
| 145 | |||
| 146 | for (;;); | ||
| 145 | } | 147 | } |
| 146 | 148 | ||
| 147 | void (*pm_power_off)(void) = machine_power_off; | 149 | void (*pm_power_off)(void) = machine_power_off; |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 3cfef1de8061..44aeaa9c039f 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
| @@ -444,6 +444,7 @@ | |||
| 444 | ENTRY_SAME(copy_file_range) | 444 | ENTRY_SAME(copy_file_range) |
| 445 | ENTRY_COMP(preadv2) | 445 | ENTRY_COMP(preadv2) |
| 446 | ENTRY_COMP(pwritev2) | 446 | ENTRY_COMP(pwritev2) |
| 447 | ENTRY_SAME(statx) | ||
| 447 | 448 | ||
| 448 | 449 | ||
| 449 | .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) | 450 | .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 4b369d83fe9c..1c9470881c4a 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
| @@ -387,3 +387,4 @@ SYSCALL(copy_file_range) | |||
| 387 | COMPAT_SYS_SPU(preadv2) | 387 | COMPAT_SYS_SPU(preadv2) |
| 388 | COMPAT_SYS_SPU(pwritev2) | 388 | COMPAT_SYS_SPU(pwritev2) |
| 389 | SYSCALL(kexec_file_load) | 389 | SYSCALL(kexec_file_load) |
| 390 | SYSCALL(statx) | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index eb1acee91a20..9ba11dbcaca9 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | #include <uapi/asm/unistd.h> | 12 | #include <uapi/asm/unistd.h> |
| 13 | 13 | ||
| 14 | 14 | ||
| 15 | #define NR_syscalls 383 | 15 | #define NR_syscalls 384 |
| 16 | 16 | ||
| 17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
| 18 | 18 | ||
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 2f26335a3c42..b85f14228857 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h | |||
| @@ -393,5 +393,6 @@ | |||
| 393 | #define __NR_preadv2 380 | 393 | #define __NR_preadv2 380 |
| 394 | #define __NR_pwritev2 381 | 394 | #define __NR_pwritev2 381 |
| 395 | #define __NR_kexec_file_load 382 | 395 | #define __NR_kexec_file_load 382 |
| 396 | #define __NR_statx 383 | ||
| 396 | 397 | ||
| 397 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ | 398 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 995728736677..6fd08219248d 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S | |||
| @@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | |||
| 449 | _GLOBAL(pnv_wakeup_tb_loss) | 449 | _GLOBAL(pnv_wakeup_tb_loss) |
| 450 | ld r1,PACAR1(r13) | 450 | ld r1,PACAR1(r13) |
| 451 | /* | 451 | /* |
| 452 | * Before entering any idle state, the NVGPRs are saved in the stack | 452 | * Before entering any idle state, the NVGPRs are saved in the stack. |
| 453 | * and they are restored before switching to the process context. Hence | 453 | * If there was a state loss, or PACA_NAPSTATELOST was set, then the |
| 454 | * until they are restored, they are free to be used. | 454 | * NVGPRs are restored. If we are here, it is likely that state is lost, |
| 455 | * but not guaranteed -- neither ISA207 nor ISA300 tests to reach | ||
| 456 | * here are the same as the test to restore NVGPRS: | ||
| 457 | * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, | ||
| 458 | * and SRR1 test for restoring NVGPRs. | ||
| 459 | * | ||
| 460 | * We are about to clobber NVGPRs now, so set NAPSTATELOST to | ||
| 461 | * guarantee they will always be restored. This might be tightened | ||
| 462 | * with careful reading of specs (particularly for ISA300) but this | ||
| 463 | * is already a slow wakeup path and it's simpler to be safe. | ||
| 464 | */ | ||
| 465 | li r0,1 | ||
| 466 | stb r0,PACA_NAPSTATELOST(r13) | ||
| 467 | |||
| 468 | /* | ||
| 455 | * | 469 | * |
| 456 | * Save SRR1 and LR in NVGPRs as they might be clobbered in | 470 | * Save SRR1 and LR in NVGPRs as they might be clobbered in |
| 457 | * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required | 471 | * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 9be992083d2a..c22f207aa656 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
| @@ -397,8 +397,7 @@ static void early_check_vec5(void) | |||
| 397 | void __init mmu_early_init_devtree(void) | 397 | void __init mmu_early_init_devtree(void) |
| 398 | { | 398 | { |
| 399 | /* Disable radix mode based on kernel command line. */ | 399 | /* Disable radix mode based on kernel command line. */ |
| 400 | /* We don't yet have the machinery to do radix as a guest. */ | 400 | if (disable_radix) |
| 401 | if (disable_radix || !(mfmsr() & MSR_HV)) | ||
| 402 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; | 401 | cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; |
| 403 | 402 | ||
| 404 | /* | 403 | /* |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 251060cf1713..8b1fe895daa3 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
| @@ -751,7 +751,9 @@ void __init hpte_init_pseries(void) | |||
| 751 | mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; | 751 | mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; |
| 752 | mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; | 752 | mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; |
| 753 | mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; | 753 | mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; |
| 754 | mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; | 754 | |
| 755 | if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) | ||
| 756 | mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; | ||
| 755 | } | 757 | } |
| 756 | 758 | ||
| 757 | void radix_init_pseries(void) | 759 | void radix_init_pseries(void) |
diff --git a/arch/x86/events/amd/iommu.c b/arch/x86/events/amd/iommu.c index b28200dea715..3641e24fdac5 100644 --- a/arch/x86/events/amd/iommu.c +++ b/arch/x86/events/amd/iommu.c | |||
| @@ -11,6 +11,8 @@ | |||
| 11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #define pr_fmt(fmt) "perf/amd_iommu: " fmt | ||
| 15 | |||
| 14 | #include <linux/perf_event.h> | 16 | #include <linux/perf_event.h> |
| 15 | #include <linux/init.h> | 17 | #include <linux/init.h> |
| 16 | #include <linux/cpumask.h> | 18 | #include <linux/cpumask.h> |
| @@ -21,44 +23,42 @@ | |||
| 21 | 23 | ||
| 22 | #define COUNTER_SHIFT 16 | 24 | #define COUNTER_SHIFT 16 |
| 23 | 25 | ||
| 24 | #define _GET_BANK(ev) ((u8)(ev->hw.extra_reg.reg >> 8)) | 26 | /* iommu pmu conf masks */ |
| 25 | #define _GET_CNTR(ev) ((u8)(ev->hw.extra_reg.reg)) | 27 | #define GET_CSOURCE(x) ((x)->conf & 0xFFULL) |
| 28 | #define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL) | ||
| 29 | #define GET_DOMID(x) (((x)->conf >> 24) & 0xFFFFULL) | ||
| 30 | #define GET_PASID(x) (((x)->conf >> 40) & 0xFFFFFULL) | ||
| 26 | 31 | ||
| 27 | /* iommu pmu config masks */ | 32 | /* iommu pmu conf1 masks */ |
| 28 | #define _GET_CSOURCE(ev) ((ev->hw.config & 0xFFULL)) | 33 | #define GET_DEVID_MASK(x) ((x)->conf1 & 0xFFFFULL) |
| 29 | #define _GET_DEVID(ev) ((ev->hw.config >> 8) & 0xFFFFULL) | 34 | #define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL) |
| 30 | #define _GET_PASID(ev) ((ev->hw.config >> 24) & 0xFFFFULL) | 35 | #define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL) |
| 31 | #define _GET_DOMID(ev) ((ev->hw.config >> 40) & 0xFFFFULL) | ||
| 32 | #define _GET_DEVID_MASK(ev) ((ev->hw.extra_reg.config) & 0xFFFFULL) | ||
| 33 | #define _GET_PASID_MASK(ev) ((ev->hw.extra_reg.config >> 16) & 0xFFFFULL) | ||
| 34 | #define _GET_DOMID_MASK(ev) ((ev->hw.extra_reg.config >> 32) & 0xFFFFULL) | ||
| 35 | 36 | ||
| 36 | static struct perf_amd_iommu __perf_iommu; | 37 | #define IOMMU_NAME_SIZE 16 |
| 37 | 38 | ||
| 38 | struct perf_amd_iommu { | 39 | struct perf_amd_iommu { |
| 40 | struct list_head list; | ||
| 39 | struct pmu pmu; | 41 | struct pmu pmu; |
| 42 | struct amd_iommu *iommu; | ||
| 43 | char name[IOMMU_NAME_SIZE]; | ||
| 40 | u8 max_banks; | 44 | u8 max_banks; |
| 41 | u8 max_counters; | 45 | u8 max_counters; |
| 42 | u64 cntr_assign_mask; | 46 | u64 cntr_assign_mask; |
| 43 | raw_spinlock_t lock; | 47 | raw_spinlock_t lock; |
| 44 | const struct attribute_group *attr_groups[4]; | ||
| 45 | }; | 48 | }; |
| 46 | 49 | ||
| 47 | #define format_group attr_groups[0] | 50 | static LIST_HEAD(perf_amd_iommu_list); |
| 48 | #define cpumask_group attr_groups[1] | ||
| 49 | #define events_group attr_groups[2] | ||
| 50 | #define null_group attr_groups[3] | ||
| 51 | 51 | ||
| 52 | /*--------------------------------------------- | 52 | /*--------------------------------------------- |
| 53 | * sysfs format attributes | 53 | * sysfs format attributes |
| 54 | *---------------------------------------------*/ | 54 | *---------------------------------------------*/ |
| 55 | PMU_FORMAT_ATTR(csource, "config:0-7"); | 55 | PMU_FORMAT_ATTR(csource, "config:0-7"); |
| 56 | PMU_FORMAT_ATTR(devid, "config:8-23"); | 56 | PMU_FORMAT_ATTR(devid, "config:8-23"); |
| 57 | PMU_FORMAT_ATTR(pasid, "config:24-39"); | 57 | PMU_FORMAT_ATTR(domid, "config:24-39"); |
| 58 | PMU_FORMAT_ATTR(domid, "config:40-55"); | 58 | PMU_FORMAT_ATTR(pasid, "config:40-59"); |
| 59 | PMU_FORMAT_ATTR(devid_mask, "config1:0-15"); | 59 | PMU_FORMAT_ATTR(devid_mask, "config1:0-15"); |
| 60 | PMU_FORMAT_ATTR(pasid_mask, "config1:16-31"); | 60 | PMU_FORMAT_ATTR(domid_mask, "config1:16-31"); |
| 61 | PMU_FORMAT_ATTR(domid_mask, "config1:32-47"); | 61 | PMU_FORMAT_ATTR(pasid_mask, "config1:32-51"); |
| 62 | 62 | ||
| 63 | static struct attribute *iommu_format_attrs[] = { | 63 | static struct attribute *iommu_format_attrs[] = { |
| 64 | &format_attr_csource.attr, | 64 | &format_attr_csource.attr, |
| @@ -79,6 +79,10 @@ static struct attribute_group amd_iommu_format_group = { | |||
| 79 | /*--------------------------------------------- | 79 | /*--------------------------------------------- |
| 80 | * sysfs events attributes | 80 | * sysfs events attributes |
| 81 | *---------------------------------------------*/ | 81 | *---------------------------------------------*/ |
| 82 | static struct attribute_group amd_iommu_events_group = { | ||
| 83 | .name = "events", | ||
| 84 | }; | ||
| 85 | |||
| 82 | struct amd_iommu_event_desc { | 86 | struct amd_iommu_event_desc { |
| 83 | struct kobj_attribute attr; | 87 | struct kobj_attribute attr; |
| 84 | const char *event; | 88 | const char *event; |
| @@ -150,30 +154,34 @@ static struct attribute_group amd_iommu_cpumask_group = { | |||
| 150 | 154 | ||
| 151 | /*---------------------------------------------*/ | 155 | /*---------------------------------------------*/ |
| 152 | 156 | ||
| 153 | static int get_next_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu) | 157 | static int get_next_avail_iommu_bnk_cntr(struct perf_event *event) |
| 154 | { | 158 | { |
| 159 | struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu); | ||
| 160 | int max_cntrs = piommu->max_counters; | ||
| 161 | int max_banks = piommu->max_banks; | ||
| 162 | u32 shift, bank, cntr; | ||
| 155 | unsigned long flags; | 163 | unsigned long flags; |
| 156 | int shift, bank, cntr, retval; | 164 | int retval; |
| 157 | int max_banks = perf_iommu->max_banks; | ||
| 158 | int max_cntrs = perf_iommu->max_counters; | ||
| 159 | 165 | ||
| 160 | raw_spin_lock_irqsave(&perf_iommu->lock, flags); | 166 | raw_spin_lock_irqsave(&piommu->lock, flags); |
| 161 | 167 | ||
| 162 | for (bank = 0, shift = 0; bank < max_banks; bank++) { | 168 | for (bank = 0, shift = 0; bank < max_banks; bank++) { |
| 163 | for (cntr = 0; cntr < max_cntrs; cntr++) { | 169 | for (cntr = 0; cntr < max_cntrs; cntr++) { |
| 164 | shift = bank + (bank*3) + cntr; | 170 | shift = bank + (bank*3) + cntr; |
| 165 | if (perf_iommu->cntr_assign_mask & (1ULL<<shift)) { | 171 | if (piommu->cntr_assign_mask & BIT_ULL(shift)) { |
| 166 | continue; | 172 | continue; |
| 167 | } else { | 173 | } else { |
| 168 | perf_iommu->cntr_assign_mask |= (1ULL<<shift); | 174 | piommu->cntr_assign_mask |= BIT_ULL(shift); |
| 169 | retval = ((u16)((u16)bank<<8) | (u8)(cntr)); | 175 | event->hw.iommu_bank = bank; |
| 176 | event->hw.iommu_cntr = cntr; | ||
| 177 | retval = 0; | ||
| 170 | goto out; | 178 | goto out; |
| 171 | } | 179 | } |
| 172 | } | 180 | } |
| 173 | } | 181 | } |
| 174 | retval = -ENOSPC; | 182 | retval = -ENOSPC; |
| 175 | out: | 183 | out: |
| 176 | raw_spin_unlock_irqrestore(&perf_iommu->lock, flags); | 184 | raw_spin_unlock_irqrestore(&piommu->lock, flags); |
| 177 | return retval; | 185 | return retval; |
| 178 | } | 186 | } |
| 179 | 187 | ||
| @@ -202,8 +210,6 @@ static int clear_avail_iommu_bnk_cntr(struct perf_amd_iommu *perf_iommu, | |||
| 202 | static int perf_iommu_event_init(struct perf_event *event) | 210 | static int perf_iommu_event_init(struct perf_event *event) |
| 203 | { | 211 | { |
| 204 | struct hw_perf_event *hwc = &event->hw; | 212 | struct hw_perf_event *hwc = &event->hw; |
| 205 | struct perf_amd_iommu *perf_iommu; | ||
| 206 | u64 config, config1; | ||
| 207 | 213 | ||
| 208 | /* test the event attr type check for PMU enumeration */ | 214 | /* test the event attr type check for PMU enumeration */ |
| 209 | if (event->attr.type != event->pmu->type) | 215 | if (event->attr.type != event->pmu->type) |
| @@ -225,80 +231,62 @@ static int perf_iommu_event_init(struct perf_event *event) | |||
| 225 | if (event->cpu < 0) | 231 | if (event->cpu < 0) |
| 226 | return -EINVAL; | 232 | return -EINVAL; |
| 227 | 233 | ||
| 228 | perf_iommu = &__perf_iommu; | ||
| 229 | |||
| 230 | if (event->pmu != &perf_iommu->pmu) | ||
| 231 | return -ENOENT; | ||
| 232 | |||
| 233 | if (perf_iommu) { | ||
| 234 | config = event->attr.config; | ||
| 235 | config1 = event->attr.config1; | ||
| 236 | } else { | ||
| 237 | return -EINVAL; | ||
| 238 | } | ||
| 239 | |||
| 240 | /* integrate with iommu base devid (0000), assume one iommu */ | ||
| 241 | perf_iommu->max_banks = | ||
| 242 | amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID); | ||
| 243 | perf_iommu->max_counters = | ||
| 244 | amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID); | ||
| 245 | if ((perf_iommu->max_banks == 0) || (perf_iommu->max_counters == 0)) | ||
| 246 | return -EINVAL; | ||
| 247 | |||
| 248 | /* update the hw_perf_event struct with the iommu config data */ | 234 | /* update the hw_perf_event struct with the iommu config data */ |
| 249 | hwc->config = config; | 235 | hwc->conf = event->attr.config; |
| 250 | hwc->extra_reg.config = config1; | 236 | hwc->conf1 = event->attr.config1; |
| 251 | 237 | ||
| 252 | return 0; | 238 | return 0; |
| 253 | } | 239 | } |
| 254 | 240 | ||
| 241 | static inline struct amd_iommu *perf_event_2_iommu(struct perf_event *ev) | ||
| 242 | { | ||
| 243 | return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu; | ||
| 244 | } | ||
| 245 | |||
| 255 | static void perf_iommu_enable_event(struct perf_event *ev) | 246 | static void perf_iommu_enable_event(struct perf_event *ev) |
| 256 | { | 247 | { |
| 257 | u8 csource = _GET_CSOURCE(ev); | 248 | struct amd_iommu *iommu = perf_event_2_iommu(ev); |
| 258 | u16 devid = _GET_DEVID(ev); | 249 | struct hw_perf_event *hwc = &ev->hw; |
| 250 | u8 bank = hwc->iommu_bank; | ||
| 251 | u8 cntr = hwc->iommu_cntr; | ||
| 259 | u64 reg = 0ULL; | 252 | u64 reg = 0ULL; |
| 260 | 253 | ||
| 261 | reg = csource; | 254 | reg = GET_CSOURCE(hwc); |
| 262 | amd_iommu_pc_get_set_reg_val(devid, | 255 | amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_COUNTER_SRC_REG, ®); |
| 263 | _GET_BANK(ev), _GET_CNTR(ev) , | ||
| 264 | IOMMU_PC_COUNTER_SRC_REG, ®, true); | ||
| 265 | 256 | ||
| 266 | reg = 0ULL | devid | (_GET_DEVID_MASK(ev) << 32); | 257 | reg = GET_DEVID_MASK(hwc); |
| 258 | reg = GET_DEVID(hwc) | (reg << 32); | ||
| 267 | if (reg) | 259 | if (reg) |
| 268 | reg |= (1UL << 31); | 260 | reg |= BIT(31); |
| 269 | amd_iommu_pc_get_set_reg_val(devid, | 261 | amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DEVID_MATCH_REG, ®); |
| 270 | _GET_BANK(ev), _GET_CNTR(ev) , | ||
| 271 | IOMMU_PC_DEVID_MATCH_REG, ®, true); | ||
| 272 | 262 | ||
| 273 | reg = 0ULL | _GET_PASID(ev) | (_GET_PASID_MASK(ev) << 32); | 263 | reg = GET_PASID_MASK(hwc); |
| 264 | reg = GET_PASID(hwc) | (reg << 32); | ||
| 274 | if (reg) | 265 | if (reg) |
| 275 | reg |= (1UL << 31); | 266 | reg |= BIT(31); |
| 276 | amd_iommu_pc_get_set_reg_val(devid, | 267 | amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_PASID_MATCH_REG, ®); |
| 277 | _GET_BANK(ev), _GET_CNTR(ev) , | ||
| 278 | IOMMU_PC_PASID_MATCH_REG, ®, true); | ||
| 279 | 268 | ||
| 280 | reg = 0ULL | _GET_DOMID(ev) | (_GET_DOMID_MASK(ev) << 32); | 269 | reg = GET_DOMID_MASK(hwc); |
| 270 | reg = GET_DOMID(hwc) | (reg << 32); | ||
| 281 | if (reg) | 271 | if (reg) |
| 282 | reg |= (1UL << 31); | 272 | reg |= BIT(31); |
| 283 | amd_iommu_pc_get_set_reg_val(devid, | 273 | amd_iommu_pc_set_reg(iommu, bank, cntr, IOMMU_PC_DOMID_MATCH_REG, ®); |
| 284 | _GET_BANK(ev), _GET_CNTR(ev) , | ||
| 285 | IOMMU_PC_DOMID_MATCH_REG, ®, true); | ||
| 286 | } | 274 | } |
| 287 | 275 | ||
| 288 | static void perf_iommu_disable_event(struct perf_event *event) | 276 | static void perf_iommu_disable_event(struct perf_event *event) |
| 289 | { | 277 | { |
| 278 | struct amd_iommu *iommu = perf_event_2_iommu(event); | ||
| 279 | struct hw_perf_event *hwc = &event->hw; | ||
| 290 | u64 reg = 0ULL; | 280 | u64 reg = 0ULL; |
| 291 | 281 | ||
| 292 | amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), | 282 | amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, |
| 293 | _GET_BANK(event), _GET_CNTR(event), | 283 | IOMMU_PC_COUNTER_SRC_REG, ®); |
| 294 | IOMMU_PC_COUNTER_SRC_REG, ®, true); | ||
| 295 | } | 284 | } |
| 296 | 285 | ||
| 297 | static void perf_iommu_start(struct perf_event *event, int flags) | 286 | static void perf_iommu_start(struct perf_event *event, int flags) |
| 298 | { | 287 | { |
| 299 | struct hw_perf_event *hwc = &event->hw; | 288 | struct hw_perf_event *hwc = &event->hw; |
| 300 | 289 | ||
| 301 | pr_debug("perf: amd_iommu:perf_iommu_start\n"); | ||
| 302 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) | 290 | if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) |
| 303 | return; | 291 | return; |
| 304 | 292 | ||
| @@ -306,10 +294,11 @@ static void perf_iommu_start(struct perf_event *event, int flags) | |||
| 306 | hwc->state = 0; | 294 | hwc->state = 0; |
| 307 | 295 | ||
| 308 | if (flags & PERF_EF_RELOAD) { | 296 | if (flags & PERF_EF_RELOAD) { |
| 309 | u64 prev_raw_count = local64_read(&hwc->prev_count); | 297 | u64 prev_raw_count = local64_read(&hwc->prev_count); |
| 310 | amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), | 298 | struct amd_iommu *iommu = perf_event_2_iommu(event); |
| 311 | _GET_BANK(event), _GET_CNTR(event), | 299 | |
| 312 | IOMMU_PC_COUNTER_REG, &prev_raw_count, true); | 300 | amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, |
| 301 | IOMMU_PC_COUNTER_REG, &prev_raw_count); | ||
| 313 | } | 302 | } |
| 314 | 303 | ||
| 315 | perf_iommu_enable_event(event); | 304 | perf_iommu_enable_event(event); |
| @@ -319,37 +308,30 @@ static void perf_iommu_start(struct perf_event *event, int flags) | |||
| 319 | 308 | ||
| 320 | static void perf_iommu_read(struct perf_event *event) | 309 | static void perf_iommu_read(struct perf_event *event) |
| 321 | { | 310 | { |
| 322 | u64 count = 0ULL; | 311 | u64 count, prev, delta; |
| 323 | u64 prev_raw_count = 0ULL; | ||
| 324 | u64 delta = 0ULL; | ||
| 325 | struct hw_perf_event *hwc = &event->hw; | 312 | struct hw_perf_event *hwc = &event->hw; |
| 326 | pr_debug("perf: amd_iommu:perf_iommu_read\n"); | 313 | struct amd_iommu *iommu = perf_event_2_iommu(event); |
| 327 | 314 | ||
| 328 | amd_iommu_pc_get_set_reg_val(_GET_DEVID(event), | 315 | if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, |
| 329 | _GET_BANK(event), _GET_CNTR(event), | 316 | IOMMU_PC_COUNTER_REG, &count)) |
| 330 | IOMMU_PC_COUNTER_REG, &count, false); | 317 | return; |
| 331 | 318 | ||
| 332 | /* IOMMU pc counter register is only 48 bits */ | 319 | /* IOMMU pc counter register is only 48 bits */ |
| 333 | count &= 0xFFFFFFFFFFFFULL; | 320 | count &= GENMASK_ULL(47, 0); |
| 334 | 321 | ||
| 335 | prev_raw_count = local64_read(&hwc->prev_count); | 322 | prev = local64_read(&hwc->prev_count); |
| 336 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 323 | if (local64_cmpxchg(&hwc->prev_count, prev, count) != prev) |
| 337 | count) != prev_raw_count) | ||
| 338 | return; | 324 | return; |
| 339 | 325 | ||
| 340 | /* Handling 48-bit counter overflowing */ | 326 | /* Handle 48-bit counter overflow */ |
| 341 | delta = (count << COUNTER_SHIFT) - (prev_raw_count << COUNTER_SHIFT); | 327 | delta = (count << COUNTER_SHIFT) - (prev << COUNTER_SHIFT); |
| 342 | delta >>= COUNTER_SHIFT; | 328 | delta >>= COUNTER_SHIFT; |
| 343 | local64_add(delta, &event->count); | 329 | local64_add(delta, &event->count); |
| 344 | |||
| 345 | } | 330 | } |
| 346 | 331 | ||
| 347 | static void perf_iommu_stop(struct perf_event *event, int flags) | 332 | static void perf_iommu_stop(struct perf_event *event, int flags) |
| 348 | { | 333 | { |
| 349 | struct hw_perf_event *hwc = &event->hw; | 334 | struct hw_perf_event *hwc = &event->hw; |
| 350 | u64 config; | ||
| 351 | |||
| 352 | pr_debug("perf: amd_iommu:perf_iommu_stop\n"); | ||
| 353 | 335 | ||
| 354 | if (hwc->state & PERF_HES_UPTODATE) | 336 | if (hwc->state & PERF_HES_UPTODATE) |
| 355 | return; | 337 | return; |
| @@ -361,7 +343,6 @@ static void perf_iommu_stop(struct perf_event *event, int flags) | |||
| 361 | if (hwc->state & PERF_HES_UPTODATE) | 343 | if (hwc->state & PERF_HES_UPTODATE) |
| 362 | return; | 344 | return; |
| 363 | 345 | ||
| 364 | config = hwc->config; | ||
| 365 | perf_iommu_read(event); | 346 | perf_iommu_read(event); |
| 366 | hwc->state |= PERF_HES_UPTODATE; | 347 | hwc->state |= PERF_HES_UPTODATE; |
| 367 | } | 348 | } |
| @@ -369,17 +350,12 @@ static void perf_iommu_stop(struct perf_event *event, int flags) | |||
| 369 | static int perf_iommu_add(struct perf_event *event, int flags) | 350 | static int perf_iommu_add(struct perf_event *event, int flags) |
| 370 | { | 351 | { |
| 371 | int retval; | 352 | int retval; |
| 372 | struct perf_amd_iommu *perf_iommu = | ||
| 373 | container_of(event->pmu, struct perf_amd_iommu, pmu); | ||
| 374 | 353 | ||
| 375 | pr_debug("perf: amd_iommu:perf_iommu_add\n"); | ||
| 376 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | 354 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
| 377 | 355 | ||
| 378 | /* request an iommu bank/counter */ | 356 | /* request an iommu bank/counter */ |
| 379 | retval = get_next_avail_iommu_bnk_cntr(perf_iommu); | 357 | retval = get_next_avail_iommu_bnk_cntr(event); |
| 380 | if (retval != -ENOSPC) | 358 | if (retval) |
| 381 | event->hw.extra_reg.reg = (u16)retval; | ||
| 382 | else | ||
| 383 | return retval; | 359 | return retval; |
| 384 | 360 | ||
| 385 | if (flags & PERF_EF_START) | 361 | if (flags & PERF_EF_START) |
| @@ -390,115 +366,124 @@ static int perf_iommu_add(struct perf_event *event, int flags) | |||
| 390 | 366 | ||
| 391 | static void perf_iommu_del(struct perf_event *event, int flags) | 367 | static void perf_iommu_del(struct perf_event *event, int flags) |
| 392 | { | 368 | { |
| 369 | struct hw_perf_event *hwc = &event->hw; | ||
| 393 | struct perf_amd_iommu *perf_iommu = | 370 | struct perf_amd_iommu *perf_iommu = |
| 394 | container_of(event->pmu, struct perf_amd_iommu, pmu); | 371 | container_of(event->pmu, struct perf_amd_iommu, pmu); |
| 395 | 372 | ||
| 396 | pr_debug("perf: amd_iommu:perf_iommu_del\n"); | ||
| 397 | perf_iommu_stop(event, PERF_EF_UPDATE); | 373 | perf_iommu_stop(event, PERF_EF_UPDATE); |
| 398 | 374 | ||
| 399 | /* clear the assigned iommu bank/counter */ | 375 | /* clear the assigned iommu bank/counter */ |
| 400 | clear_avail_iommu_bnk_cntr(perf_iommu, | 376 | clear_avail_iommu_bnk_cntr(perf_iommu, |
| 401 | _GET_BANK(event), | 377 | hwc->iommu_bank, hwc->iommu_cntr); |
| 402 | _GET_CNTR(event)); | ||
| 403 | 378 | ||
| 404 | perf_event_update_userpage(event); | 379 | perf_event_update_userpage(event); |
| 405 | } | 380 | } |
| 406 | 381 | ||
| 407 | static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu) | 382 | static __init int _init_events_attrs(void) |
| 408 | { | 383 | { |
| 409 | struct attribute **attrs; | ||
| 410 | struct attribute_group *attr_group; | ||
| 411 | int i = 0, j; | 384 | int i = 0, j; |
| 385 | struct attribute **attrs; | ||
| 412 | 386 | ||
| 413 | while (amd_iommu_v2_event_descs[i].attr.attr.name) | 387 | while (amd_iommu_v2_event_descs[i].attr.attr.name) |
| 414 | i++; | 388 | i++; |
| 415 | 389 | ||
| 416 | attr_group = kzalloc(sizeof(struct attribute *) | 390 | attrs = kzalloc(sizeof(struct attribute **) * (i + 1), GFP_KERNEL); |
| 417 | * (i + 1) + sizeof(*attr_group), GFP_KERNEL); | 391 | if (!attrs) |
| 418 | if (!attr_group) | ||
| 419 | return -ENOMEM; | 392 | return -ENOMEM; |
| 420 | 393 | ||
| 421 | attrs = (struct attribute **)(attr_group + 1); | ||
| 422 | for (j = 0; j < i; j++) | 394 | for (j = 0; j < i; j++) |
| 423 | attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr; | 395 | attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr; |
| 424 | 396 | ||
| 425 | attr_group->name = "events"; | 397 | amd_iommu_events_group.attrs = attrs; |
| 426 | attr_group->attrs = attrs; | ||
| 427 | perf_iommu->events_group = attr_group; | ||
| 428 | |||
| 429 | return 0; | 398 | return 0; |
| 430 | } | 399 | } |
| 431 | 400 | ||
| 432 | static __init void amd_iommu_pc_exit(void) | 401 | const struct attribute_group *amd_iommu_attr_groups[] = { |
| 433 | { | 402 | &amd_iommu_format_group, |
| 434 | if (__perf_iommu.events_group != NULL) { | 403 | &amd_iommu_cpumask_group, |
| 435 | kfree(__perf_iommu.events_group); | 404 | &amd_iommu_events_group, |
| 436 | __perf_iommu.events_group = NULL; | 405 | NULL, |
| 437 | } | 406 | }; |
| 438 | } | 407 | |
| 408 | static struct pmu iommu_pmu = { | ||
| 409 | .event_init = perf_iommu_event_init, | ||
| 410 | .add = perf_iommu_add, | ||
| 411 | .del = perf_iommu_del, | ||
| 412 | .start = perf_iommu_start, | ||
| 413 | .stop = perf_iommu_stop, | ||
| 414 | .read = perf_iommu_read, | ||
| 415 | .task_ctx_nr = perf_invalid_context, | ||
| 416 | .attr_groups = amd_iommu_attr_groups, | ||
| 417 | }; | ||
| 439 | 418 | ||
| 440 | static __init int _init_perf_amd_iommu( | 419 | static __init int init_one_iommu(unsigned int idx) |
| 441 | struct perf_amd_iommu *perf_iommu, char *name) | ||
| 442 | { | 420 | { |
| 421 | struct perf_amd_iommu *perf_iommu; | ||
| 443 | int ret; | 422 | int ret; |
| 444 | 423 | ||
| 424 | perf_iommu = kzalloc(sizeof(struct perf_amd_iommu), GFP_KERNEL); | ||
| 425 | if (!perf_iommu) | ||
| 426 | return -ENOMEM; | ||
| 427 | |||
| 445 | raw_spin_lock_init(&perf_iommu->lock); | 428 | raw_spin_lock_init(&perf_iommu->lock); |
| 446 | 429 | ||
| 447 | /* Init format attributes */ | 430 | perf_iommu->pmu = iommu_pmu; |
| 448 | perf_iommu->format_group = &amd_iommu_format_group; | 431 | perf_iommu->iommu = get_amd_iommu(idx); |
| 432 | perf_iommu->max_banks = amd_iommu_pc_get_max_banks(idx); | ||
| 433 | perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx); | ||
| 449 | 434 | ||
| 450 | /* Init cpumask attributes to only core 0 */ | 435 | if (!perf_iommu->iommu || |
| 451 | cpumask_set_cpu(0, &iommu_cpumask); | 436 | !perf_iommu->max_banks || |
| 452 | perf_iommu->cpumask_group = &amd_iommu_cpumask_group; | 437 | !perf_iommu->max_counters) { |
| 453 | 438 | kfree(perf_iommu); | |
| 454 | /* Init events attributes */ | 439 | return -EINVAL; |
| 455 | if (_init_events_attrs(perf_iommu) != 0) | 440 | } |
| 456 | pr_err("perf: amd_iommu: Only support raw events.\n"); | ||
| 457 | 441 | ||
| 458 | /* Init null attributes */ | 442 | snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx); |
| 459 | perf_iommu->null_group = NULL; | ||
| 460 | perf_iommu->pmu.attr_groups = perf_iommu->attr_groups; | ||
| 461 | 443 | ||
| 462 | ret = perf_pmu_register(&perf_iommu->pmu, name, -1); | 444 | ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1); |
| 463 | if (ret) { | 445 | if (!ret) { |
| 464 | pr_err("perf: amd_iommu: Failed to initialized.\n"); | 446 | pr_info("Detected AMD IOMMU #%d (%d banks, %d counters/bank).\n", |
| 465 | amd_iommu_pc_exit(); | 447 | idx, perf_iommu->max_banks, perf_iommu->max_counters); |
| 448 | list_add_tail(&perf_iommu->list, &perf_amd_iommu_list); | ||
| 466 | } else { | 449 | } else { |
| 467 | pr_info("perf: amd_iommu: Detected. (%d banks, %d counters/bank)\n", | 450 | pr_warn("Error initializing IOMMU %d.\n", idx); |
| 468 | amd_iommu_pc_get_max_banks(IOMMU_BASE_DEVID), | 451 | kfree(perf_iommu); |
| 469 | amd_iommu_pc_get_max_counters(IOMMU_BASE_DEVID)); | ||
| 470 | } | 452 | } |
| 471 | |||
| 472 | return ret; | 453 | return ret; |
| 473 | } | 454 | } |
| 474 | 455 | ||
| 475 | static struct perf_amd_iommu __perf_iommu = { | ||
| 476 | .pmu = { | ||
| 477 | .task_ctx_nr = perf_invalid_context, | ||
| 478 | .event_init = perf_iommu_event_init, | ||
| 479 | .add = perf_iommu_add, | ||
| 480 | .del = perf_iommu_del, | ||
| 481 | .start = perf_iommu_start, | ||
| 482 | .stop = perf_iommu_stop, | ||
| 483 | .read = perf_iommu_read, | ||
| 484 | }, | ||
| 485 | .max_banks = 0x00, | ||
| 486 | .max_counters = 0x00, | ||
| 487 | .cntr_assign_mask = 0ULL, | ||
| 488 | .format_group = NULL, | ||
| 489 | .cpumask_group = NULL, | ||
| 490 | .events_group = NULL, | ||
| 491 | .null_group = NULL, | ||
| 492 | }; | ||
| 493 | |||
| 494 | static __init int amd_iommu_pc_init(void) | 456 | static __init int amd_iommu_pc_init(void) |
| 495 | { | 457 | { |
| 458 | unsigned int i, cnt = 0; | ||
| 459 | int ret; | ||
| 460 | |||
| 496 | /* Make sure the IOMMU PC resource is available */ | 461 | /* Make sure the IOMMU PC resource is available */ |
| 497 | if (!amd_iommu_pc_supported()) | 462 | if (!amd_iommu_pc_supported()) |
| 498 | return -ENODEV; | 463 | return -ENODEV; |
| 499 | 464 | ||
| 500 | _init_perf_amd_iommu(&__perf_iommu, "amd_iommu"); | 465 | ret = _init_events_attrs(); |
| 466 | if (ret) | ||
| 467 | return ret; | ||
| 468 | |||
| 469 | /* | ||
| 470 | * An IOMMU PMU is specific to an IOMMU, and can function independently. | ||
| 471 | * So we go through all IOMMUs and ignore the one that fails init | ||
| 472 | * unless all IOMMU are failing. | ||
| 473 | */ | ||
| 474 | for (i = 0; i < amd_iommu_get_num_iommus(); i++) { | ||
| 475 | ret = init_one_iommu(i); | ||
| 476 | if (!ret) | ||
| 477 | cnt++; | ||
| 478 | } | ||
| 479 | |||
| 480 | if (!cnt) { | ||
| 481 | kfree(amd_iommu_events_group.attrs); | ||
| 482 | return -ENODEV; | ||
| 483 | } | ||
| 501 | 484 | ||
| 485 | /* Init cpumask attributes to only core 0 */ | ||
| 486 | cpumask_set_cpu(0, &iommu_cpumask); | ||
| 502 | return 0; | 487 | return 0; |
| 503 | } | 488 | } |
| 504 | 489 | ||
diff --git a/arch/x86/events/amd/iommu.h b/arch/x86/events/amd/iommu.h index 845d173278e3..62e0702c4374 100644 --- a/arch/x86/events/amd/iommu.h +++ b/arch/x86/events/amd/iommu.h | |||
| @@ -24,17 +24,23 @@ | |||
| 24 | #define PC_MAX_SPEC_BNKS 64 | 24 | #define PC_MAX_SPEC_BNKS 64 |
| 25 | #define PC_MAX_SPEC_CNTRS 16 | 25 | #define PC_MAX_SPEC_CNTRS 16 |
| 26 | 26 | ||
| 27 | /* iommu pc reg masks*/ | 27 | struct amd_iommu; |
| 28 | #define IOMMU_BASE_DEVID 0x0000 | ||
| 29 | 28 | ||
| 30 | /* amd_iommu_init.c external support functions */ | 29 | /* amd_iommu_init.c external support functions */ |
| 30 | extern int amd_iommu_get_num_iommus(void); | ||
| 31 | |||
| 31 | extern bool amd_iommu_pc_supported(void); | 32 | extern bool amd_iommu_pc_supported(void); |
| 32 | 33 | ||
| 33 | extern u8 amd_iommu_pc_get_max_banks(u16 devid); | 34 | extern u8 amd_iommu_pc_get_max_banks(unsigned int idx); |
| 35 | |||
| 36 | extern u8 amd_iommu_pc_get_max_counters(unsigned int idx); | ||
| 37 | |||
| 38 | extern int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, | ||
| 39 | u8 fxn, u64 *value); | ||
| 34 | 40 | ||
| 35 | extern u8 amd_iommu_pc_get_max_counters(u16 devid); | 41 | extern int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, |
| 42 | u8 fxn, u64 *value); | ||
| 36 | 43 | ||
| 37 | extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, | 44 | extern struct amd_iommu *get_amd_iommu(int idx); |
| 38 | u8 fxn, u64 *value, bool is_write); | ||
| 39 | 45 | ||
| 40 | #endif /*_PERF_EVENT_AMD_IOMMU_H_*/ | 46 | #endif /*_PERF_EVENT_AMD_IOMMU_H_*/ |
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 354e9ff2978c..ae8324d65e61 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <asm/insn.h> | 28 | #include <asm/insn.h> |
| 29 | #include <asm/io.h> | 29 | #include <asm/io.h> |
| 30 | #include <asm/intel_pt.h> | 30 | #include <asm/intel_pt.h> |
| 31 | #include <asm/intel-family.h> | ||
| 31 | 32 | ||
| 32 | #include "../perf_event.h" | 33 | #include "../perf_event.h" |
| 33 | #include "pt.h" | 34 | #include "pt.h" |
| @@ -98,6 +99,7 @@ static struct attribute_group pt_cap_group = { | |||
| 98 | .name = "caps", | 99 | .name = "caps", |
| 99 | }; | 100 | }; |
| 100 | 101 | ||
| 102 | PMU_FORMAT_ATTR(pt, "config:0" ); | ||
| 101 | PMU_FORMAT_ATTR(cyc, "config:1" ); | 103 | PMU_FORMAT_ATTR(cyc, "config:1" ); |
| 102 | PMU_FORMAT_ATTR(pwr_evt, "config:4" ); | 104 | PMU_FORMAT_ATTR(pwr_evt, "config:4" ); |
| 103 | PMU_FORMAT_ATTR(fup_on_ptw, "config:5" ); | 105 | PMU_FORMAT_ATTR(fup_on_ptw, "config:5" ); |
| @@ -105,11 +107,13 @@ PMU_FORMAT_ATTR(mtc, "config:9" ); | |||
| 105 | PMU_FORMAT_ATTR(tsc, "config:10" ); | 107 | PMU_FORMAT_ATTR(tsc, "config:10" ); |
| 106 | PMU_FORMAT_ATTR(noretcomp, "config:11" ); | 108 | PMU_FORMAT_ATTR(noretcomp, "config:11" ); |
| 107 | PMU_FORMAT_ATTR(ptw, "config:12" ); | 109 | PMU_FORMAT_ATTR(ptw, "config:12" ); |
| 110 | PMU_FORMAT_ATTR(branch, "config:13" ); | ||
| 108 | PMU_FORMAT_ATTR(mtc_period, "config:14-17" ); | 111 | PMU_FORMAT_ATTR(mtc_period, "config:14-17" ); |
| 109 | PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" ); | 112 | PMU_FORMAT_ATTR(cyc_thresh, "config:19-22" ); |
| 110 | PMU_FORMAT_ATTR(psb_period, "config:24-27" ); | 113 | PMU_FORMAT_ATTR(psb_period, "config:24-27" ); |
| 111 | 114 | ||
| 112 | static struct attribute *pt_formats_attr[] = { | 115 | static struct attribute *pt_formats_attr[] = { |
| 116 | &format_attr_pt.attr, | ||
| 113 | &format_attr_cyc.attr, | 117 | &format_attr_cyc.attr, |
| 114 | &format_attr_pwr_evt.attr, | 118 | &format_attr_pwr_evt.attr, |
| 115 | &format_attr_fup_on_ptw.attr, | 119 | &format_attr_fup_on_ptw.attr, |
| @@ -117,6 +121,7 @@ static struct attribute *pt_formats_attr[] = { | |||
| 117 | &format_attr_tsc.attr, | 121 | &format_attr_tsc.attr, |
| 118 | &format_attr_noretcomp.attr, | 122 | &format_attr_noretcomp.attr, |
| 119 | &format_attr_ptw.attr, | 123 | &format_attr_ptw.attr, |
| 124 | &format_attr_branch.attr, | ||
| 120 | &format_attr_mtc_period.attr, | 125 | &format_attr_mtc_period.attr, |
| 121 | &format_attr_cyc_thresh.attr, | 126 | &format_attr_cyc_thresh.attr, |
| 122 | &format_attr_psb_period.attr, | 127 | &format_attr_psb_period.attr, |
| @@ -197,6 +202,19 @@ static int __init pt_pmu_hw_init(void) | |||
| 197 | pt_pmu.tsc_art_den = eax; | 202 | pt_pmu.tsc_art_den = eax; |
| 198 | } | 203 | } |
| 199 | 204 | ||
| 205 | /* model-specific quirks */ | ||
| 206 | switch (boot_cpu_data.x86_model) { | ||
| 207 | case INTEL_FAM6_BROADWELL_CORE: | ||
| 208 | case INTEL_FAM6_BROADWELL_XEON_D: | ||
| 209 | case INTEL_FAM6_BROADWELL_GT3E: | ||
| 210 | case INTEL_FAM6_BROADWELL_X: | ||
| 211 | /* not setting BRANCH_EN will #GP, erratum BDM106 */ | ||
| 212 | pt_pmu.branch_en_always_on = true; | ||
| 213 | break; | ||
| 214 | default: | ||
| 215 | break; | ||
| 216 | } | ||
| 217 | |||
| 200 | if (boot_cpu_has(X86_FEATURE_VMX)) { | 218 | if (boot_cpu_has(X86_FEATURE_VMX)) { |
| 201 | /* | 219 | /* |
| 202 | * Intel SDM, 36.5 "Tracing post-VMXON" says that | 220 | * Intel SDM, 36.5 "Tracing post-VMXON" says that |
| @@ -263,8 +281,20 @@ fail: | |||
| 263 | #define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \ | 281 | #define RTIT_CTL_PTW (RTIT_CTL_PTW_EN | \ |
| 264 | RTIT_CTL_FUP_ON_PTW) | 282 | RTIT_CTL_FUP_ON_PTW) |
| 265 | 283 | ||
| 266 | #define PT_CONFIG_MASK (RTIT_CTL_TSC_EN | \ | 284 | /* |
| 285 | * Bit 0 (TraceEn) in the attr.config is meaningless as the | ||
| 286 | * corresponding bit in the RTIT_CTL can only be controlled | ||
| 287 | * by the driver; therefore, repurpose it to mean: pass | ||
| 288 | * through the bit that was previously assumed to be always | ||
| 289 | * on for PT, thereby allowing the user to *not* set it if | ||
| 290 | * they so wish. See also pt_event_valid() and pt_config(). | ||
| 291 | */ | ||
| 292 | #define RTIT_CTL_PASSTHROUGH RTIT_CTL_TRACEEN | ||
| 293 | |||
| 294 | #define PT_CONFIG_MASK (RTIT_CTL_TRACEEN | \ | ||
| 295 | RTIT_CTL_TSC_EN | \ | ||
| 267 | RTIT_CTL_DISRETC | \ | 296 | RTIT_CTL_DISRETC | \ |
| 297 | RTIT_CTL_BRANCH_EN | \ | ||
| 268 | RTIT_CTL_CYC_PSB | \ | 298 | RTIT_CTL_CYC_PSB | \ |
| 269 | RTIT_CTL_MTC | \ | 299 | RTIT_CTL_MTC | \ |
| 270 | RTIT_CTL_PWR_EVT_EN | \ | 300 | RTIT_CTL_PWR_EVT_EN | \ |
| @@ -332,6 +362,33 @@ static bool pt_event_valid(struct perf_event *event) | |||
| 332 | return false; | 362 | return false; |
| 333 | } | 363 | } |
| 334 | 364 | ||
| 365 | /* | ||
| 366 | * Setting bit 0 (TraceEn in RTIT_CTL MSR) in the attr.config | ||
| 367 | * clears the assomption that BranchEn must always be enabled, | ||
| 368 | * as was the case with the first implementation of PT. | ||
| 369 | * If this bit is not set, the legacy behavior is preserved | ||
| 370 | * for compatibility with the older userspace. | ||
| 371 | * | ||
| 372 | * Re-using bit 0 for this purpose is fine because it is never | ||
| 373 | * directly set by the user; previous attempts at setting it in | ||
| 374 | * the attr.config resulted in -EINVAL. | ||
| 375 | */ | ||
| 376 | if (config & RTIT_CTL_PASSTHROUGH) { | ||
| 377 | /* | ||
| 378 | * Disallow not setting BRANCH_EN where BRANCH_EN is | ||
| 379 | * always required. | ||
| 380 | */ | ||
| 381 | if (pt_pmu.branch_en_always_on && | ||
| 382 | !(config & RTIT_CTL_BRANCH_EN)) | ||
| 383 | return false; | ||
| 384 | } else { | ||
| 385 | /* | ||
| 386 | * Disallow BRANCH_EN without the PASSTHROUGH. | ||
| 387 | */ | ||
| 388 | if (config & RTIT_CTL_BRANCH_EN) | ||
| 389 | return false; | ||
| 390 | } | ||
| 391 | |||
| 335 | return true; | 392 | return true; |
| 336 | } | 393 | } |
| 337 | 394 | ||
| @@ -420,7 +477,20 @@ static void pt_config(struct perf_event *event) | |||
| 420 | } | 477 | } |
| 421 | 478 | ||
| 422 | reg = pt_config_filters(event); | 479 | reg = pt_config_filters(event); |
| 423 | reg |= RTIT_CTL_TOPA | RTIT_CTL_BRANCH_EN | RTIT_CTL_TRACEEN; | 480 | reg |= RTIT_CTL_TOPA | RTIT_CTL_TRACEEN; |
| 481 | |||
| 482 | /* | ||
| 483 | * Previously, we had BRANCH_EN on by default, but now that PT has | ||
| 484 | * grown features outside of branch tracing, it is useful to allow | ||
| 485 | * the user to disable it. Setting bit 0 in the event's attr.config | ||
| 486 | * allows BRANCH_EN to pass through instead of being always on. See | ||
| 487 | * also the comment in pt_event_valid(). | ||
| 488 | */ | ||
| 489 | if (event->attr.config & BIT(0)) { | ||
| 490 | reg |= event->attr.config & RTIT_CTL_BRANCH_EN; | ||
| 491 | } else { | ||
| 492 | reg |= RTIT_CTL_BRANCH_EN; | ||
| 493 | } | ||
| 424 | 494 | ||
| 425 | if (!event->attr.exclude_kernel) | 495 | if (!event->attr.exclude_kernel) |
| 426 | reg |= RTIT_CTL_OS; | 496 | reg |= RTIT_CTL_OS; |
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index b528e8f373e4..0eb41d07b79a 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h | |||
| @@ -110,6 +110,7 @@ struct pt_pmu { | |||
| 110 | struct pmu pmu; | 110 | struct pmu pmu; |
| 111 | u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; | 111 | u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES]; |
| 112 | bool vmx; | 112 | bool vmx; |
| 113 | bool branch_en_always_on; | ||
| 113 | unsigned long max_nonturbo_ratio; | 114 | unsigned long max_nonturbo_ratio; |
| 114 | unsigned int tsc_art_num; | 115 | unsigned int tsc_art_num; |
| 115 | unsigned int tsc_art_den; | 116 | unsigned int tsc_art_den; |
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index d74747b031ec..c4eda791f877 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h | |||
| @@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node { | |||
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | void kvm_page_track_init(struct kvm *kvm); | 48 | void kvm_page_track_init(struct kvm *kvm); |
| 49 | void kvm_page_track_cleanup(struct kvm *kvm); | ||
| 49 | 50 | ||
| 50 | void kvm_page_track_free_memslot(struct kvm_memory_slot *free, | 51 | void kvm_page_track_free_memslot(struct kvm_memory_slot *free, |
| 51 | struct kvm_memory_slot *dont); | 52 | struct kvm_memory_slot *dont); |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 72277b1028a5..50d35e3185f5 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
| @@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd) | |||
| 121 | *(tmp + 1) = 0; | 121 | *(tmp + 1) = 0; |
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | #if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \ | ||
| 125 | defined(CONFIG_PARAVIRT)) | ||
| 126 | static inline void native_pud_clear(pud_t *pudp) | 124 | static inline void native_pud_clear(pud_t *pudp) |
| 127 | { | 125 | { |
| 128 | } | 126 | } |
| 129 | #endif | ||
| 130 | 127 | ||
| 131 | static inline void pud_clear(pud_t *pudp) | 128 | static inline void pud_clear(pud_t *pudp) |
| 132 | { | 129 | { |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 1cfb36b8c024..585ee0d42d18 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
| @@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); | |||
| 62 | # define set_pud(pudp, pud) native_set_pud(pudp, pud) | 62 | # define set_pud(pudp, pud) native_set_pud(pudp, pud) |
| 63 | #endif | 63 | #endif |
| 64 | 64 | ||
| 65 | #ifndef __PAGETABLE_PMD_FOLDED | 65 | #ifndef __PAGETABLE_PUD_FOLDED |
| 66 | #define pud_clear(pud) native_pud_clear(pud) | 66 | #define pud_clear(pud) native_pud_clear(pud) |
| 67 | #endif | 67 | #endif |
| 68 | 68 | ||
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index ae32838cac5f..b2879cc23db4 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
| @@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled) | |||
| 179 | return -EINVAL; | 179 | return -EINVAL; |
| 180 | } | 180 | } |
| 181 | 181 | ||
| 182 | if (!enabled) { | ||
| 183 | ++disabled_cpus; | ||
| 184 | return -EINVAL; | ||
| 185 | } | ||
| 186 | |||
| 182 | if (boot_cpu_physical_apicid != -1U) | 187 | if (boot_cpu_physical_apicid != -1U) |
| 183 | ver = boot_cpu_apic_version; | 188 | ver = boot_cpu_apic_version; |
| 184 | 189 | ||
| 185 | cpu = __generic_processor_info(id, ver, enabled); | 190 | cpu = generic_processor_info(id, ver); |
| 186 | if (cpu >= 0) | 191 | if (cpu >= 0) |
| 187 | early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid; | 192 | early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid; |
| 188 | 193 | ||
| @@ -710,7 +715,7 @@ static void __init acpi_set_irq_model_ioapic(void) | |||
| 710 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 715 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
| 711 | #include <acpi/processor.h> | 716 | #include <acpi/processor.h> |
| 712 | 717 | ||
| 713 | int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | 718 | static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) |
| 714 | { | 719 | { |
| 715 | #ifdef CONFIG_ACPI_NUMA | 720 | #ifdef CONFIG_ACPI_NUMA |
| 716 | int nid; | 721 | int nid; |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index aee7deddabd0..8ccb7ef512e0 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
| @@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid) | |||
| 2063 | return nr_logical_cpuids++; | 2063 | return nr_logical_cpuids++; |
| 2064 | } | 2064 | } |
| 2065 | 2065 | ||
| 2066 | int __generic_processor_info(int apicid, int version, bool enabled) | 2066 | int generic_processor_info(int apicid, int version) |
| 2067 | { | 2067 | { |
| 2068 | int cpu, max = nr_cpu_ids; | 2068 | int cpu, max = nr_cpu_ids; |
| 2069 | bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, | 2069 | bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, |
| @@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled) | |||
| 2121 | if (num_processors >= nr_cpu_ids) { | 2121 | if (num_processors >= nr_cpu_ids) { |
| 2122 | int thiscpu = max + disabled_cpus; | 2122 | int thiscpu = max + disabled_cpus; |
| 2123 | 2123 | ||
| 2124 | if (enabled) { | 2124 | pr_warning("APIC: NR_CPUS/possible_cpus limit of %i " |
| 2125 | pr_warning("APIC: NR_CPUS/possible_cpus limit of %i " | 2125 | "reached. Processor %d/0x%x ignored.\n", |
| 2126 | "reached. Processor %d/0x%x ignored.\n", | 2126 | max, thiscpu, apicid); |
| 2127 | max, thiscpu, apicid); | ||
| 2128 | } | ||
| 2129 | 2127 | ||
| 2130 | disabled_cpus++; | 2128 | disabled_cpus++; |
| 2131 | return -EINVAL; | 2129 | return -EINVAL; |
| @@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled) | |||
| 2177 | apic->x86_32_early_logical_apicid(cpu); | 2175 | apic->x86_32_early_logical_apicid(cpu); |
| 2178 | #endif | 2176 | #endif |
| 2179 | set_cpu_possible(cpu, true); | 2177 | set_cpu_possible(cpu, true); |
| 2180 | 2178 | physid_set(apicid, phys_cpu_present_map); | |
| 2181 | if (enabled) { | 2179 | set_cpu_present(cpu, true); |
| 2182 | num_processors++; | 2180 | num_processors++; |
| 2183 | physid_set(apicid, phys_cpu_present_map); | ||
| 2184 | set_cpu_present(cpu, true); | ||
| 2185 | } else { | ||
| 2186 | disabled_cpus++; | ||
| 2187 | } | ||
| 2188 | 2181 | ||
| 2189 | return cpu; | 2182 | return cpu; |
| 2190 | } | 2183 | } |
| 2191 | 2184 | ||
| 2192 | int generic_processor_info(int apicid, int version) | ||
| 2193 | { | ||
| 2194 | return __generic_processor_info(apicid, version, true); | ||
| 2195 | } | ||
| 2196 | |||
| 2197 | int hard_smp_processor_id(void) | 2185 | int hard_smp_processor_id(void) |
| 2198 | { | 2186 | { |
| 2199 | return read_apic_id(); | 2187 | return read_apic_id(); |
diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index c05509d38b1f..9ac2a5cdd9c2 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | |||
| @@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn) | |||
| 727 | if (atomic_dec_and_test(&rdtgrp->waitcount) && | 727 | if (atomic_dec_and_test(&rdtgrp->waitcount) && |
| 728 | (rdtgrp->flags & RDT_DELETED)) { | 728 | (rdtgrp->flags & RDT_DELETED)) { |
| 729 | kernfs_unbreak_active_protection(kn); | 729 | kernfs_unbreak_active_protection(kn); |
| 730 | kernfs_put(kn); | 730 | kernfs_put(rdtgrp->kn); |
| 731 | kfree(rdtgrp); | 731 | kfree(rdtgrp); |
| 732 | } else { | 732 | } else { |
| 733 | kernfs_unbreak_active_protection(kn); | 733 | kernfs_unbreak_active_protection(kn); |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 54a2372f5dbb..b5785c197e53 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE | 4 | * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE |
| 5 | */ | 5 | */ |
| 6 | 6 | ||
| 7 | #define DISABLE_BRANCH_PROFILING | ||
| 7 | #include <linux/init.h> | 8 | #include <linux/init.h> |
| 8 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
| 9 | #include <linux/types.h> | 10 | #include <linux/types.h> |
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index f088ea4c66e7..a723ae9440ab 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c | |||
| @@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action) | |||
| 166 | spin_lock_irqsave(&desc->lock, flags); | 166 | spin_lock_irqsave(&desc->lock, flags); |
| 167 | 167 | ||
| 168 | /* | 168 | /* |
| 169 | * most handlers of type NMI_UNKNOWN never return because | 169 | * Indicate if there are multiple registrations on the |
| 170 | * they just assume the NMI is theirs. Just a sanity check | 170 | * internal NMI handler call chains (SERR and IO_CHECK). |
| 171 | * to manage expectations | ||
| 172 | */ | 171 | */ |
| 173 | WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); | ||
| 174 | WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); | 172 | WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); |
| 175 | WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); | 173 | WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); |
| 176 | 174 | ||
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 4f7a9833d8e5..c73a7f9e881a 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
| @@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void) | |||
| 1333 | * the refined calibration and directly register it as a clocksource. | 1333 | * the refined calibration and directly register it as a clocksource. |
| 1334 | */ | 1334 | */ |
| 1335 | if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { | 1335 | if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { |
| 1336 | if (boot_cpu_has(X86_FEATURE_ART)) | ||
| 1337 | art_related_clocksource = &clocksource_tsc; | ||
| 1336 | clocksource_register_khz(&clocksource_tsc, tsc_khz); | 1338 | clocksource_register_khz(&clocksource_tsc, tsc_khz); |
| 1337 | return 0; | 1339 | return 0; |
| 1338 | } | 1340 | } |
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 478d15dbaee4..08339262b666 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c | |||
| @@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs) | |||
| 82 | return sizeof(*regs); | 82 | return sizeof(*regs); |
| 83 | } | 83 | } |
| 84 | 84 | ||
| 85 | #ifdef CONFIG_X86_32 | ||
| 86 | #define GCC_REALIGN_WORDS 3 | ||
| 87 | #else | ||
| 88 | #define GCC_REALIGN_WORDS 1 | ||
| 89 | #endif | ||
| 90 | |||
| 85 | static bool is_last_task_frame(struct unwind_state *state) | 91 | static bool is_last_task_frame(struct unwind_state *state) |
| 86 | { | 92 | { |
| 87 | unsigned long bp = (unsigned long)state->bp; | 93 | unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2; |
| 88 | unsigned long regs = (unsigned long)task_pt_regs(state->task); | 94 | unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS; |
| 89 | 95 | ||
| 90 | /* | 96 | /* |
| 91 | * We have to check for the last task frame at two different locations | 97 | * We have to check for the last task frame at two different locations |
| 92 | * because gcc can occasionally decide to realign the stack pointer and | 98 | * because gcc can occasionally decide to realign the stack pointer and |
| 93 | * change the offset of the stack frame by a word in the prologue of a | 99 | * change the offset of the stack frame in the prologue of a function |
| 94 | * function called by head/entry code. | 100 | * called by head/entry code. Examples: |
| 101 | * | ||
| 102 | * <start_secondary>: | ||
| 103 | * push %edi | ||
| 104 | * lea 0x8(%esp),%edi | ||
| 105 | * and $0xfffffff8,%esp | ||
| 106 | * pushl -0x4(%edi) | ||
| 107 | * push %ebp | ||
| 108 | * mov %esp,%ebp | ||
| 109 | * | ||
| 110 | * <x86_64_start_kernel>: | ||
| 111 | * lea 0x8(%rsp),%r10 | ||
| 112 | * and $0xfffffffffffffff0,%rsp | ||
| 113 | * pushq -0x8(%r10) | ||
| 114 | * push %rbp | ||
| 115 | * mov %rsp,%rbp | ||
| 116 | * | ||
| 117 | * Note that after aligning the stack, it pushes a duplicate copy of | ||
| 118 | * the return address before pushing the frame pointer. | ||
| 95 | */ | 119 | */ |
| 96 | return bp == regs - FRAME_HEADER_SIZE || | 120 | return (state->bp == last_bp || |
| 97 | bp == regs - FRAME_HEADER_SIZE - sizeof(long); | 121 | (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1))); |
| 98 | } | 122 | } |
| 99 | 123 | ||
| 100 | /* | 124 | /* |
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 73ea24d4f119..047b17a26269 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
| @@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm) | |||
| 657 | { | 657 | { |
| 658 | struct kvm_pic *vpic = kvm->arch.vpic; | 658 | struct kvm_pic *vpic = kvm->arch.vpic; |
| 659 | 659 | ||
| 660 | if (!vpic) | ||
| 661 | return; | ||
| 662 | |||
| 660 | kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); | 663 | kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); |
| 661 | kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); | 664 | kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); |
| 662 | kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); | 665 | kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); |
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 6e219e5c07d2..289270a6aecb 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c | |||
| @@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm) | |||
| 635 | { | 635 | { |
| 636 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 636 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
| 637 | 637 | ||
| 638 | if (!ioapic) | ||
| 639 | return; | ||
| 640 | |||
| 638 | cancel_delayed_work_sync(&ioapic->eoi_inject); | 641 | cancel_delayed_work_sync(&ioapic->eoi_inject); |
| 639 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); | 642 | kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); |
| 640 | kvm->arch.vioapic = NULL; | 643 | kvm->arch.vioapic = NULL; |
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index 37942e419c32..60168cdd0546 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c | |||
| @@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
| 160 | return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); | 160 | return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); |
| 161 | } | 161 | } |
| 162 | 162 | ||
| 163 | void kvm_page_track_cleanup(struct kvm *kvm) | ||
| 164 | { | ||
| 165 | struct kvm_page_track_notifier_head *head; | ||
| 166 | |||
| 167 | head = &kvm->arch.track_notifier_head; | ||
| 168 | cleanup_srcu_struct(&head->track_srcu); | ||
| 169 | } | ||
| 170 | |||
| 163 | void kvm_page_track_init(struct kvm *kvm) | 171 | void kvm_page_track_init(struct kvm *kvm) |
| 164 | { | 172 | { |
| 165 | struct kvm_page_track_notifier_head *head; | 173 | struct kvm_page_track_notifier_head *head; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d1efe2c62b3f..5fba70646c32 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm) | |||
| 1379 | unsigned long flags; | 1379 | unsigned long flags; |
| 1380 | struct kvm_arch *vm_data = &kvm->arch; | 1380 | struct kvm_arch *vm_data = &kvm->arch; |
| 1381 | 1381 | ||
| 1382 | if (!avic) | ||
| 1383 | return; | ||
| 1384 | |||
| 1382 | avic_free_vm_id(vm_data->avic_vm_id); | 1385 | avic_free_vm_id(vm_data->avic_vm_id); |
| 1383 | 1386 | ||
| 1384 | if (vm_data->avic_logical_id_table_page) | 1387 | if (vm_data->avic_logical_id_table_page) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 98e82ee1e699..2ee00dbbbd51 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void) | |||
| 1239 | return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; | 1239 | return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; |
| 1240 | } | 1240 | } |
| 1241 | 1241 | ||
| 1242 | static inline bool cpu_has_vmx_invvpid(void) | ||
| 1243 | { | ||
| 1244 | return vmx_capability.vpid & VMX_VPID_INVVPID_BIT; | ||
| 1245 | } | ||
| 1246 | |||
| 1242 | static inline bool cpu_has_vmx_ept(void) | 1247 | static inline bool cpu_has_vmx_ept(void) |
| 1243 | { | 1248 | { |
| 1244 | return vmcs_config.cpu_based_2nd_exec_ctrl & | 1249 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
| @@ -2753,7 +2758,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
| 2753 | SECONDARY_EXEC_RDTSCP | | 2758 | SECONDARY_EXEC_RDTSCP | |
| 2754 | SECONDARY_EXEC_DESC | | 2759 | SECONDARY_EXEC_DESC | |
| 2755 | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | | 2760 | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | |
| 2756 | SECONDARY_EXEC_ENABLE_VPID | | ||
| 2757 | SECONDARY_EXEC_APIC_REGISTER_VIRT | | 2761 | SECONDARY_EXEC_APIC_REGISTER_VIRT | |
| 2758 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | | 2762 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | |
| 2759 | SECONDARY_EXEC_WBINVD_EXITING | | 2763 | SECONDARY_EXEC_WBINVD_EXITING | |
| @@ -2781,10 +2785,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
| 2781 | * though it is treated as global context. The alternative is | 2785 | * though it is treated as global context. The alternative is |
| 2782 | * not failing the single-context invvpid, and it is worse. | 2786 | * not failing the single-context invvpid, and it is worse. |
| 2783 | */ | 2787 | */ |
| 2784 | if (enable_vpid) | 2788 | if (enable_vpid) { |
| 2789 | vmx->nested.nested_vmx_secondary_ctls_high |= | ||
| 2790 | SECONDARY_EXEC_ENABLE_VPID; | ||
| 2785 | vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | | 2791 | vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | |
| 2786 | VMX_VPID_EXTENT_SUPPORTED_MASK; | 2792 | VMX_VPID_EXTENT_SUPPORTED_MASK; |
| 2787 | else | 2793 | } else |
| 2788 | vmx->nested.nested_vmx_vpid_caps = 0; | 2794 | vmx->nested.nested_vmx_vpid_caps = 0; |
| 2789 | 2795 | ||
| 2790 | if (enable_unrestricted_guest) | 2796 | if (enable_unrestricted_guest) |
| @@ -4024,6 +4030,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) | |||
| 4024 | __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); | 4030 | __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); |
| 4025 | } | 4031 | } |
| 4026 | 4032 | ||
| 4033 | static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) | ||
| 4034 | { | ||
| 4035 | if (enable_ept) | ||
| 4036 | vmx_flush_tlb(vcpu); | ||
| 4037 | } | ||
| 4038 | |||
| 4027 | static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) | 4039 | static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) |
| 4028 | { | 4040 | { |
| 4029 | ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; | 4041 | ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; |
| @@ -6517,8 +6529,10 @@ static __init int hardware_setup(void) | |||
| 6517 | if (boot_cpu_has(X86_FEATURE_NX)) | 6529 | if (boot_cpu_has(X86_FEATURE_NX)) |
| 6518 | kvm_enable_efer_bits(EFER_NX); | 6530 | kvm_enable_efer_bits(EFER_NX); |
| 6519 | 6531 | ||
| 6520 | if (!cpu_has_vmx_vpid()) | 6532 | if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || |
| 6533 | !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) | ||
| 6521 | enable_vpid = 0; | 6534 | enable_vpid = 0; |
| 6535 | |||
| 6522 | if (!cpu_has_vmx_shadow_vmcs()) | 6536 | if (!cpu_has_vmx_shadow_vmcs()) |
| 6523 | enable_shadow_vmcs = 0; | 6537 | enable_shadow_vmcs = 0; |
| 6524 | if (enable_shadow_vmcs) | 6538 | if (enable_shadow_vmcs) |
| @@ -8501,7 +8515,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) | |||
| 8501 | && kvm_vmx_exit_handlers[exit_reason]) | 8515 | && kvm_vmx_exit_handlers[exit_reason]) |
| 8502 | return kvm_vmx_exit_handlers[exit_reason](vcpu); | 8516 | return kvm_vmx_exit_handlers[exit_reason](vcpu); |
| 8503 | else { | 8517 | else { |
| 8504 | WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); | 8518 | vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", |
| 8519 | exit_reason); | ||
| 8505 | kvm_queue_exception(vcpu, UD_VECTOR); | 8520 | kvm_queue_exception(vcpu, UD_VECTOR); |
| 8506 | return 1; | 8521 | return 1; |
| 8507 | } | 8522 | } |
| @@ -8547,6 +8562,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) | |||
| 8547 | } else { | 8562 | } else { |
| 8548 | sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; | 8563 | sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; |
| 8549 | sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; | 8564 | sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; |
| 8565 | vmx_flush_tlb_ept_only(vcpu); | ||
| 8550 | } | 8566 | } |
| 8551 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); | 8567 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); |
| 8552 | 8568 | ||
| @@ -8572,8 +8588,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) | |||
| 8572 | */ | 8588 | */ |
| 8573 | if (!is_guest_mode(vcpu) || | 8589 | if (!is_guest_mode(vcpu) || |
| 8574 | !nested_cpu_has2(get_vmcs12(&vmx->vcpu), | 8590 | !nested_cpu_has2(get_vmcs12(&vmx->vcpu), |
| 8575 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) | 8591 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { |
| 8576 | vmcs_write64(APIC_ACCESS_ADDR, hpa); | 8592 | vmcs_write64(APIC_ACCESS_ADDR, hpa); |
| 8593 | vmx_flush_tlb_ept_only(vcpu); | ||
| 8594 | } | ||
| 8577 | } | 8595 | } |
| 8578 | 8596 | ||
| 8579 | static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) | 8597 | static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) |
| @@ -9974,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
| 9974 | { | 9992 | { |
| 9975 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 9993 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 9976 | u32 exec_control; | 9994 | u32 exec_control; |
| 9977 | bool nested_ept_enabled = false; | ||
| 9978 | 9995 | ||
| 9979 | vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); | 9996 | vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); |
| 9980 | vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); | 9997 | vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); |
| @@ -10121,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
| 10121 | vmcs12->guest_intr_status); | 10138 | vmcs12->guest_intr_status); |
| 10122 | } | 10139 | } |
| 10123 | 10140 | ||
| 10124 | nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0; | ||
| 10125 | |||
| 10126 | /* | 10141 | /* |
| 10127 | * Write an illegal value to APIC_ACCESS_ADDR. Later, | 10142 | * Write an illegal value to APIC_ACCESS_ADDR. Later, |
| 10128 | * nested_get_vmcs12_pages will either fix it up or | 10143 | * nested_get_vmcs12_pages will either fix it up or |
| @@ -10255,6 +10270,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
| 10255 | if (nested_cpu_has_ept(vmcs12)) { | 10270 | if (nested_cpu_has_ept(vmcs12)) { |
| 10256 | kvm_mmu_unload(vcpu); | 10271 | kvm_mmu_unload(vcpu); |
| 10257 | nested_ept_init_mmu_context(vcpu); | 10272 | nested_ept_init_mmu_context(vcpu); |
| 10273 | } else if (nested_cpu_has2(vmcs12, | ||
| 10274 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { | ||
| 10275 | vmx_flush_tlb_ept_only(vcpu); | ||
| 10258 | } | 10276 | } |
| 10259 | 10277 | ||
| 10260 | /* | 10278 | /* |
| @@ -10282,12 +10300,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
| 10282 | vmx_set_efer(vcpu, vcpu->arch.efer); | 10300 | vmx_set_efer(vcpu, vcpu->arch.efer); |
| 10283 | 10301 | ||
| 10284 | /* Shadow page tables on either EPT or shadow page tables. */ | 10302 | /* Shadow page tables on either EPT or shadow page tables. */ |
| 10285 | if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled, | 10303 | if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), |
| 10286 | entry_failure_code)) | 10304 | entry_failure_code)) |
| 10287 | return 1; | 10305 | return 1; |
| 10288 | 10306 | ||
| 10289 | kvm_mmu_reset_context(vcpu); | ||
| 10290 | |||
| 10291 | if (!enable_ept) | 10307 | if (!enable_ept) |
| 10292 | vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; | 10308 | vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; |
| 10293 | 10309 | ||
| @@ -11056,6 +11072,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
| 11056 | vmx->nested.change_vmcs01_virtual_x2apic_mode = false; | 11072 | vmx->nested.change_vmcs01_virtual_x2apic_mode = false; |
| 11057 | vmx_set_virtual_x2apic_mode(vcpu, | 11073 | vmx_set_virtual_x2apic_mode(vcpu, |
| 11058 | vcpu->arch.apic_base & X2APIC_ENABLE); | 11074 | vcpu->arch.apic_base & X2APIC_ENABLE); |
| 11075 | } else if (!nested_cpu_has_ept(vmcs12) && | ||
| 11076 | nested_cpu_has2(vmcs12, | ||
| 11077 | SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { | ||
| 11078 | vmx_flush_tlb_ept_only(vcpu); | ||
| 11059 | } | 11079 | } |
| 11060 | 11080 | ||
| 11061 | /* This is needed for same reason as it was needed in prepare_vmcs02 */ | 11081 | /* This is needed for same reason as it was needed in prepare_vmcs02 */ |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1faf620a6fdc..ccbd45ecd41a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -8153,11 +8153,12 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
| 8153 | if (kvm_x86_ops->vm_destroy) | 8153 | if (kvm_x86_ops->vm_destroy) |
| 8154 | kvm_x86_ops->vm_destroy(kvm); | 8154 | kvm_x86_ops->vm_destroy(kvm); |
| 8155 | kvm_iommu_unmap_guest(kvm); | 8155 | kvm_iommu_unmap_guest(kvm); |
| 8156 | kfree(kvm->arch.vpic); | 8156 | kvm_pic_destroy(kvm); |
| 8157 | kfree(kvm->arch.vioapic); | 8157 | kvm_ioapic_destroy(kvm); |
| 8158 | kvm_free_vcpus(kvm); | 8158 | kvm_free_vcpus(kvm); |
| 8159 | kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); | 8159 | kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); |
| 8160 | kvm_mmu_uninit_vm(kvm); | 8160 | kvm_mmu_uninit_vm(kvm); |
| 8161 | kvm_page_track_cleanup(kvm); | ||
| 8161 | } | 8162 | } |
| 8162 | 8163 | ||
| 8163 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | 8164 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
| @@ -8566,11 +8567,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, | |||
| 8566 | { | 8567 | { |
| 8567 | struct x86_exception fault; | 8568 | struct x86_exception fault; |
| 8568 | 8569 | ||
| 8569 | trace_kvm_async_pf_ready(work->arch.token, work->gva); | ||
| 8570 | if (work->wakeup_all) | 8570 | if (work->wakeup_all) |
| 8571 | work->arch.token = ~0; /* broadcast wakeup */ | 8571 | work->arch.token = ~0; /* broadcast wakeup */ |
| 8572 | else | 8572 | else |
| 8573 | kvm_del_async_pf_gfn(vcpu, work->arch.gfn); | 8573 | kvm_del_async_pf_gfn(vcpu, work->arch.gfn); |
| 8574 | trace_kvm_async_pf_ready(work->arch.token, work->gva); | ||
| 8574 | 8575 | ||
| 8575 | if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && | 8576 | if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && |
| 8576 | !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { | 8577 | !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { |
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 8d63d7a104c3..4c90cfdc128b 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | #define DISABLE_BRANCH_PROFILING | ||
| 1 | #define pr_fmt(fmt) "kasan: " fmt | 2 | #define pr_fmt(fmt) "kasan: " fmt |
| 2 | #include <linux/bootmem.h> | 3 | #include <linux/bootmem.h> |
| 3 | #include <linux/kasan.h> | 4 | #include <linux/kasan.h> |
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 5126dfd52b18..cd44ae727df7 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c | |||
| @@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm, | |||
| 590 | * we might run off the end of the bounds table if we are on | 590 | * we might run off the end of the bounds table if we are on |
| 591 | * a 64-bit kernel and try to get 8 bytes. | 591 | * a 64-bit kernel and try to get 8 bytes. |
| 592 | */ | 592 | */ |
| 593 | int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, | 593 | static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, |
| 594 | long __user *bd_entry_ptr) | 594 | long __user *bd_entry_ptr) |
| 595 | { | 595 | { |
| 596 | u32 bd_entry_32; | 596 | u32 bd_entry_32; |
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile index a7dbec4dce27..3dbde04febdc 100644 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ b/arch/x86/platform/intel-mid/device_libs/Makefile | |||
| @@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o | |||
| 26 | obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o | 26 | obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o |
| 27 | # MISC Devices | 27 | # MISC Devices |
| 28 | obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o | 28 | obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o |
| 29 | obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o | ||
| 29 | obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o | 30 | obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o |
| 30 | obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o | 31 | obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c new file mode 100644 index 000000000000..a6c3705a28ad --- /dev/null +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c | |||
| @@ -0,0 +1,82 @@ | |||
| 1 | /* | ||
| 2 | * Intel Merrifield power button support | ||
| 3 | * | ||
| 4 | * (C) Copyright 2017 Intel Corporation | ||
| 5 | * | ||
| 6 | * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or | ||
| 9 | * modify it under the terms of the GNU General Public License | ||
| 10 | * as published by the Free Software Foundation; version 2 | ||
| 11 | * of the License. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/init.h> | ||
| 15 | #include <linux/ioport.h> | ||
| 16 | #include <linux/platform_device.h> | ||
| 17 | #include <linux/sfi.h> | ||
| 18 | |||
| 19 | #include <asm/intel-mid.h> | ||
| 20 | #include <asm/intel_scu_ipc.h> | ||
| 21 | |||
| 22 | static struct resource mrfld_power_btn_resources[] = { | ||
| 23 | { | ||
| 24 | .flags = IORESOURCE_IRQ, | ||
| 25 | }, | ||
| 26 | }; | ||
| 27 | |||
| 28 | static struct platform_device mrfld_power_btn_dev = { | ||
| 29 | .name = "msic_power_btn", | ||
| 30 | .id = PLATFORM_DEVID_NONE, | ||
| 31 | .num_resources = ARRAY_SIZE(mrfld_power_btn_resources), | ||
| 32 | .resource = mrfld_power_btn_resources, | ||
| 33 | }; | ||
| 34 | |||
| 35 | static int mrfld_power_btn_scu_status_change(struct notifier_block *nb, | ||
| 36 | unsigned long code, void *data) | ||
| 37 | { | ||
| 38 | if (code == SCU_DOWN) { | ||
| 39 | platform_device_unregister(&mrfld_power_btn_dev); | ||
| 40 | return 0; | ||
| 41 | } | ||
| 42 | |||
| 43 | return platform_device_register(&mrfld_power_btn_dev); | ||
| 44 | } | ||
| 45 | |||
| 46 | static struct notifier_block mrfld_power_btn_scu_notifier = { | ||
| 47 | .notifier_call = mrfld_power_btn_scu_status_change, | ||
| 48 | }; | ||
| 49 | |||
| 50 | static int __init register_mrfld_power_btn(void) | ||
| 51 | { | ||
| 52 | if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) | ||
| 53 | return -ENODEV; | ||
| 54 | |||
| 55 | /* | ||
| 56 | * We need to be sure that the SCU IPC is ready before | ||
| 57 | * PMIC power button device can be registered: | ||
| 58 | */ | ||
| 59 | intel_scu_notifier_add(&mrfld_power_btn_scu_notifier); | ||
| 60 | |||
| 61 | return 0; | ||
| 62 | } | ||
| 63 | arch_initcall(register_mrfld_power_btn); | ||
| 64 | |||
| 65 | static void __init *mrfld_power_btn_platform_data(void *info) | ||
| 66 | { | ||
| 67 | struct resource *res = mrfld_power_btn_resources; | ||
| 68 | struct sfi_device_table_entry *pentry = info; | ||
| 69 | |||
| 70 | res->start = res->end = pentry->irq; | ||
| 71 | return NULL; | ||
| 72 | } | ||
| 73 | |||
| 74 | static const struct devs_id mrfld_power_btn_dev_id __initconst = { | ||
| 75 | .name = "bcove_power_btn", | ||
| 76 | .type = SFI_DEV_TYPE_IPC, | ||
| 77 | .delay = 1, | ||
| 78 | .msic = 1, | ||
| 79 | .get_platform_data = &mrfld_power_btn_platform_data, | ||
| 80 | }; | ||
| 81 | |||
| 82 | sfi_device(mrfld_power_btn_dev_id); | ||
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c index 86edd1e941eb..9e304e2ea4f5 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | #include <asm/intel_scu_ipc.h> | 19 | #include <asm/intel_scu_ipc.h> |
| 20 | #include <asm/io_apic.h> | 20 | #include <asm/io_apic.h> |
| 21 | 21 | ||
| 22 | #define TANGIER_EXT_TIMER0_MSI 15 | 22 | #define TANGIER_EXT_TIMER0_MSI 12 |
| 23 | 23 | ||
| 24 | static struct platform_device wdt_dev = { | 24 | static struct platform_device wdt_dev = { |
| 25 | .name = "intel_mid_wdt", | 25 | .name = "intel_mid_wdt", |
diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c index e793fe509971..e42978d4deaf 100644 --- a/arch/x86/platform/intel-mid/mfld.c +++ b/arch/x86/platform/intel-mid/mfld.c | |||
| @@ -17,16 +17,6 @@ | |||
| 17 | 17 | ||
| 18 | #include "intel_mid_weak_decls.h" | 18 | #include "intel_mid_weak_decls.h" |
| 19 | 19 | ||
| 20 | static void penwell_arch_setup(void); | ||
| 21 | /* penwell arch ops */ | ||
| 22 | static struct intel_mid_ops penwell_ops = { | ||
| 23 | .arch_setup = penwell_arch_setup, | ||
| 24 | }; | ||
| 25 | |||
| 26 | static void mfld_power_off(void) | ||
| 27 | { | ||
| 28 | } | ||
| 29 | |||
| 30 | static unsigned long __init mfld_calibrate_tsc(void) | 20 | static unsigned long __init mfld_calibrate_tsc(void) |
| 31 | { | 21 | { |
| 32 | unsigned long fast_calibrate; | 22 | unsigned long fast_calibrate; |
| @@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void) | |||
| 63 | static void __init penwell_arch_setup(void) | 53 | static void __init penwell_arch_setup(void) |
| 64 | { | 54 | { |
| 65 | x86_platform.calibrate_tsc = mfld_calibrate_tsc; | 55 | x86_platform.calibrate_tsc = mfld_calibrate_tsc; |
| 66 | pm_power_off = mfld_power_off; | ||
| 67 | } | 56 | } |
| 68 | 57 | ||
| 58 | static struct intel_mid_ops penwell_ops = { | ||
| 59 | .arch_setup = penwell_arch_setup, | ||
| 60 | }; | ||
| 61 | |||
| 69 | void *get_penwell_ops(void) | 62 | void *get_penwell_ops(void) |
| 70 | { | 63 | { |
| 71 | return &penwell_ops; | 64 | return &penwell_ops; |
diff --git a/block/blk-mq.c b/block/blk-mq.c index a4546f060e80..08a49c69738b 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
| @@ -697,17 +697,8 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, | |||
| 697 | { | 697 | { |
| 698 | struct blk_mq_timeout_data *data = priv; | 698 | struct blk_mq_timeout_data *data = priv; |
| 699 | 699 | ||
| 700 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { | 700 | if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) |
| 701 | /* | ||
| 702 | * If a request wasn't started before the queue was | ||
| 703 | * marked dying, kill it here or it'll go unnoticed. | ||
| 704 | */ | ||
| 705 | if (unlikely(blk_queue_dying(rq->q))) { | ||
| 706 | rq->errors = -EIO; | ||
| 707 | blk_mq_end_request(rq, rq->errors); | ||
| 708 | } | ||
| 709 | return; | 701 | return; |
| 710 | } | ||
| 711 | 702 | ||
| 712 | if (time_after_eq(jiffies, rq->deadline)) { | 703 | if (time_after_eq(jiffies, rq->deadline)) { |
| 713 | if (!blk_mark_rq_complete(rq)) | 704 | if (!blk_mark_rq_complete(rq)) |
diff --git a/block/blk-stat.c b/block/blk-stat.c index 9b43efb8933f..186fcb981e9b 100644 --- a/block/blk-stat.c +++ b/block/blk-stat.c | |||
| @@ -30,11 +30,11 @@ static void blk_stat_flush_batch(struct blk_rq_stat *stat) | |||
| 30 | 30 | ||
| 31 | static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) | 31 | static void blk_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src) |
| 32 | { | 32 | { |
| 33 | blk_stat_flush_batch(src); | ||
| 34 | |||
| 33 | if (!src->nr_samples) | 35 | if (!src->nr_samples) |
| 34 | return; | 36 | return; |
| 35 | 37 | ||
| 36 | blk_stat_flush_batch(src); | ||
| 37 | |||
| 38 | dst->min = min(dst->min, src->min); | 38 | dst->min = min(dst->min, src->min); |
| 39 | dst->max = max(dst->max, src->max); | 39 | dst->max = max(dst->max, src->max); |
| 40 | 40 | ||
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 4467a8089ab8..0143135b3abe 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
| @@ -182,11 +182,6 @@ int __weak arch_register_cpu(int cpu) | |||
| 182 | 182 | ||
| 183 | void __weak arch_unregister_cpu(int cpu) {} | 183 | void __weak arch_unregister_cpu(int cpu) {} |
| 184 | 184 | ||
| 185 | int __weak acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | ||
| 186 | { | ||
| 187 | return -ENODEV; | ||
| 188 | } | ||
| 189 | |||
| 190 | static int acpi_processor_hotadd_init(struct acpi_processor *pr) | 185 | static int acpi_processor_hotadd_init(struct acpi_processor *pr) |
| 191 | { | 186 | { |
| 192 | unsigned long long sta; | 187 | unsigned long long sta; |
| @@ -285,6 +280,13 @@ static int acpi_processor_get_info(struct acpi_device *device) | |||
| 285 | pr->acpi_id = value; | 280 | pr->acpi_id = value; |
| 286 | } | 281 | } |
| 287 | 282 | ||
| 283 | if (acpi_duplicate_processor_id(pr->acpi_id)) { | ||
| 284 | dev_err(&device->dev, | ||
| 285 | "Failed to get unique processor _UID (0x%x)\n", | ||
| 286 | pr->acpi_id); | ||
| 287 | return -ENODEV; | ||
| 288 | } | ||
| 289 | |||
| 288 | pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, | 290 | pr->phys_id = acpi_get_phys_id(pr->handle, device_declaration, |
| 289 | pr->acpi_id); | 291 | pr->acpi_id); |
| 290 | if (invalid_phys_cpuid(pr->phys_id)) | 292 | if (invalid_phys_cpuid(pr->phys_id)) |
| @@ -585,7 +587,7 @@ static struct acpi_scan_handler processor_container_handler = { | |||
| 585 | static int nr_unique_ids __initdata; | 587 | static int nr_unique_ids __initdata; |
| 586 | 588 | ||
| 587 | /* The number of the duplicate processor IDs */ | 589 | /* The number of the duplicate processor IDs */ |
| 588 | static int nr_duplicate_ids __initdata; | 590 | static int nr_duplicate_ids; |
| 589 | 591 | ||
| 590 | /* Used to store the unique processor IDs */ | 592 | /* Used to store the unique processor IDs */ |
| 591 | static int unique_processor_ids[] __initdata = { | 593 | static int unique_processor_ids[] __initdata = { |
| @@ -593,7 +595,7 @@ static int unique_processor_ids[] __initdata = { | |||
| 593 | }; | 595 | }; |
| 594 | 596 | ||
| 595 | /* Used to store the duplicate processor IDs */ | 597 | /* Used to store the duplicate processor IDs */ |
| 596 | static int duplicate_processor_ids[] __initdata = { | 598 | static int duplicate_processor_ids[] = { |
| 597 | [0 ... NR_CPUS - 1] = -1, | 599 | [0 ... NR_CPUS - 1] = -1, |
| 598 | }; | 600 | }; |
| 599 | 601 | ||
| @@ -638,28 +640,53 @@ static acpi_status __init acpi_processor_ids_walk(acpi_handle handle, | |||
| 638 | void **rv) | 640 | void **rv) |
| 639 | { | 641 | { |
| 640 | acpi_status status; | 642 | acpi_status status; |
| 643 | acpi_object_type acpi_type; | ||
| 644 | unsigned long long uid; | ||
| 641 | union acpi_object object = { 0 }; | 645 | union acpi_object object = { 0 }; |
| 642 | struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; | 646 | struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; |
| 643 | 647 | ||
| 644 | status = acpi_evaluate_object(handle, NULL, NULL, &buffer); | 648 | status = acpi_get_type(handle, &acpi_type); |
| 645 | if (ACPI_FAILURE(status)) | 649 | if (ACPI_FAILURE(status)) |
| 646 | acpi_handle_info(handle, "Not get the processor object\n"); | 650 | return false; |
| 647 | else | 651 | |
| 648 | processor_validated_ids_update(object.processor.proc_id); | 652 | switch (acpi_type) { |
| 653 | case ACPI_TYPE_PROCESSOR: | ||
| 654 | status = acpi_evaluate_object(handle, NULL, NULL, &buffer); | ||
| 655 | if (ACPI_FAILURE(status)) | ||
| 656 | goto err; | ||
| 657 | uid = object.processor.proc_id; | ||
| 658 | break; | ||
| 659 | |||
| 660 | case ACPI_TYPE_DEVICE: | ||
| 661 | status = acpi_evaluate_integer(handle, "_UID", NULL, &uid); | ||
| 662 | if (ACPI_FAILURE(status)) | ||
| 663 | goto err; | ||
| 664 | break; | ||
| 665 | default: | ||
| 666 | goto err; | ||
| 667 | } | ||
| 668 | |||
| 669 | processor_validated_ids_update(uid); | ||
| 670 | return true; | ||
| 671 | |||
| 672 | err: | ||
| 673 | acpi_handle_info(handle, "Invalid processor object\n"); | ||
| 674 | return false; | ||
| 649 | 675 | ||
| 650 | return AE_OK; | ||
| 651 | } | 676 | } |
| 652 | 677 | ||
| 653 | static void __init acpi_processor_check_duplicates(void) | 678 | void __init acpi_processor_check_duplicates(void) |
| 654 | { | 679 | { |
| 655 | /* Search all processor nodes in ACPI namespace */ | 680 | /* check the correctness for all processors in ACPI namespace */ |
| 656 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | 681 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, |
| 657 | ACPI_UINT32_MAX, | 682 | ACPI_UINT32_MAX, |
| 658 | acpi_processor_ids_walk, | 683 | acpi_processor_ids_walk, |
| 659 | NULL, NULL, NULL); | 684 | NULL, NULL, NULL); |
| 685 | acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, acpi_processor_ids_walk, | ||
| 686 | NULL, NULL); | ||
| 660 | } | 687 | } |
| 661 | 688 | ||
| 662 | bool __init acpi_processor_validate_proc_id(int proc_id) | 689 | bool acpi_duplicate_processor_id(int proc_id) |
| 663 | { | 690 | { |
| 664 | int i; | 691 | int i; |
| 665 | 692 | ||
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 80cb5eb75b63..34fbe027e73a 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
| @@ -1249,7 +1249,6 @@ static int __init acpi_init(void) | |||
| 1249 | acpi_wakeup_device_init(); | 1249 | acpi_wakeup_device_init(); |
| 1250 | acpi_debugger_init(); | 1250 | acpi_debugger_init(); |
| 1251 | acpi_setup_sb_notify_handler(); | 1251 | acpi_setup_sb_notify_handler(); |
| 1252 | acpi_set_processor_mapping(); | ||
| 1253 | return 0; | 1252 | return 0; |
| 1254 | } | 1253 | } |
| 1255 | 1254 | ||
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 611a5585a902..b933061b6b60 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
| @@ -32,12 +32,12 @@ static struct acpi_table_madt *get_madt_table(void) | |||
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | static int map_lapic_id(struct acpi_subtable_header *entry, | 34 | static int map_lapic_id(struct acpi_subtable_header *entry, |
| 35 | u32 acpi_id, phys_cpuid_t *apic_id, bool ignore_disabled) | 35 | u32 acpi_id, phys_cpuid_t *apic_id) |
| 36 | { | 36 | { |
| 37 | struct acpi_madt_local_apic *lapic = | 37 | struct acpi_madt_local_apic *lapic = |
| 38 | container_of(entry, struct acpi_madt_local_apic, header); | 38 | container_of(entry, struct acpi_madt_local_apic, header); |
| 39 | 39 | ||
| 40 | if (ignore_disabled && !(lapic->lapic_flags & ACPI_MADT_ENABLED)) | 40 | if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) |
| 41 | return -ENODEV; | 41 | return -ENODEV; |
| 42 | 42 | ||
| 43 | if (lapic->processor_id != acpi_id) | 43 | if (lapic->processor_id != acpi_id) |
| @@ -48,13 +48,12 @@ static int map_lapic_id(struct acpi_subtable_header *entry, | |||
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | static int map_x2apic_id(struct acpi_subtable_header *entry, | 50 | static int map_x2apic_id(struct acpi_subtable_header *entry, |
| 51 | int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id, | 51 | int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id) |
| 52 | bool ignore_disabled) | ||
| 53 | { | 52 | { |
| 54 | struct acpi_madt_local_x2apic *apic = | 53 | struct acpi_madt_local_x2apic *apic = |
| 55 | container_of(entry, struct acpi_madt_local_x2apic, header); | 54 | container_of(entry, struct acpi_madt_local_x2apic, header); |
| 56 | 55 | ||
| 57 | if (ignore_disabled && !(apic->lapic_flags & ACPI_MADT_ENABLED)) | 56 | if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) |
| 58 | return -ENODEV; | 57 | return -ENODEV; |
| 59 | 58 | ||
| 60 | if (device_declaration && (apic->uid == acpi_id)) { | 59 | if (device_declaration && (apic->uid == acpi_id)) { |
| @@ -66,13 +65,12 @@ static int map_x2apic_id(struct acpi_subtable_header *entry, | |||
| 66 | } | 65 | } |
| 67 | 66 | ||
| 68 | static int map_lsapic_id(struct acpi_subtable_header *entry, | 67 | static int map_lsapic_id(struct acpi_subtable_header *entry, |
| 69 | int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id, | 68 | int device_declaration, u32 acpi_id, phys_cpuid_t *apic_id) |
| 70 | bool ignore_disabled) | ||
| 71 | { | 69 | { |
| 72 | struct acpi_madt_local_sapic *lsapic = | 70 | struct acpi_madt_local_sapic *lsapic = |
| 73 | container_of(entry, struct acpi_madt_local_sapic, header); | 71 | container_of(entry, struct acpi_madt_local_sapic, header); |
| 74 | 72 | ||
| 75 | if (ignore_disabled && !(lsapic->lapic_flags & ACPI_MADT_ENABLED)) | 73 | if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) |
| 76 | return -ENODEV; | 74 | return -ENODEV; |
| 77 | 75 | ||
| 78 | if (device_declaration) { | 76 | if (device_declaration) { |
| @@ -89,13 +87,12 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, | |||
| 89 | * Retrieve the ARM CPU physical identifier (MPIDR) | 87 | * Retrieve the ARM CPU physical identifier (MPIDR) |
| 90 | */ | 88 | */ |
| 91 | static int map_gicc_mpidr(struct acpi_subtable_header *entry, | 89 | static int map_gicc_mpidr(struct acpi_subtable_header *entry, |
| 92 | int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr, | 90 | int device_declaration, u32 acpi_id, phys_cpuid_t *mpidr) |
| 93 | bool ignore_disabled) | ||
| 94 | { | 91 | { |
| 95 | struct acpi_madt_generic_interrupt *gicc = | 92 | struct acpi_madt_generic_interrupt *gicc = |
| 96 | container_of(entry, struct acpi_madt_generic_interrupt, header); | 93 | container_of(entry, struct acpi_madt_generic_interrupt, header); |
| 97 | 94 | ||
| 98 | if (ignore_disabled && !(gicc->flags & ACPI_MADT_ENABLED)) | 95 | if (!(gicc->flags & ACPI_MADT_ENABLED)) |
| 99 | return -ENODEV; | 96 | return -ENODEV; |
| 100 | 97 | ||
| 101 | /* device_declaration means Device object in DSDT, in the | 98 | /* device_declaration means Device object in DSDT, in the |
| @@ -112,7 +109,7 @@ static int map_gicc_mpidr(struct acpi_subtable_header *entry, | |||
| 112 | } | 109 | } |
| 113 | 110 | ||
| 114 | static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, | 111 | static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, |
| 115 | int type, u32 acpi_id, bool ignore_disabled) | 112 | int type, u32 acpi_id) |
| 116 | { | 113 | { |
| 117 | unsigned long madt_end, entry; | 114 | unsigned long madt_end, entry; |
| 118 | phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */ | 115 | phys_cpuid_t phys_id = PHYS_CPUID_INVALID; /* CPU hardware ID */ |
| @@ -130,20 +127,16 @@ static phys_cpuid_t map_madt_entry(struct acpi_table_madt *madt, | |||
| 130 | struct acpi_subtable_header *header = | 127 | struct acpi_subtable_header *header = |
| 131 | (struct acpi_subtable_header *)entry; | 128 | (struct acpi_subtable_header *)entry; |
| 132 | if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { | 129 | if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { |
| 133 | if (!map_lapic_id(header, acpi_id, &phys_id, | 130 | if (!map_lapic_id(header, acpi_id, &phys_id)) |
| 134 | ignore_disabled)) | ||
| 135 | break; | 131 | break; |
| 136 | } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { | 132 | } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { |
| 137 | if (!map_x2apic_id(header, type, acpi_id, &phys_id, | 133 | if (!map_x2apic_id(header, type, acpi_id, &phys_id)) |
| 138 | ignore_disabled)) | ||
| 139 | break; | 134 | break; |
| 140 | } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { | 135 | } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { |
| 141 | if (!map_lsapic_id(header, type, acpi_id, &phys_id, | 136 | if (!map_lsapic_id(header, type, acpi_id, &phys_id)) |
| 142 | ignore_disabled)) | ||
| 143 | break; | 137 | break; |
| 144 | } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) { | 138 | } else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) { |
| 145 | if (!map_gicc_mpidr(header, type, acpi_id, &phys_id, | 139 | if (!map_gicc_mpidr(header, type, acpi_id, &phys_id)) |
| 146 | ignore_disabled)) | ||
| 147 | break; | 140 | break; |
| 148 | } | 141 | } |
| 149 | entry += header->length; | 142 | entry += header->length; |
| @@ -161,15 +154,14 @@ phys_cpuid_t __init acpi_map_madt_entry(u32 acpi_id) | |||
| 161 | if (!madt) | 154 | if (!madt) |
| 162 | return PHYS_CPUID_INVALID; | 155 | return PHYS_CPUID_INVALID; |
| 163 | 156 | ||
| 164 | rv = map_madt_entry(madt, 1, acpi_id, true); | 157 | rv = map_madt_entry(madt, 1, acpi_id); |
| 165 | 158 | ||
| 166 | acpi_put_table((struct acpi_table_header *)madt); | 159 | acpi_put_table((struct acpi_table_header *)madt); |
| 167 | 160 | ||
| 168 | return rv; | 161 | return rv; |
| 169 | } | 162 | } |
| 170 | 163 | ||
| 171 | static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id, | 164 | static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id) |
| 172 | bool ignore_disabled) | ||
| 173 | { | 165 | { |
| 174 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 166 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
| 175 | union acpi_object *obj; | 167 | union acpi_object *obj; |
| @@ -190,38 +182,30 @@ static phys_cpuid_t map_mat_entry(acpi_handle handle, int type, u32 acpi_id, | |||
| 190 | 182 | ||
| 191 | header = (struct acpi_subtable_header *)obj->buffer.pointer; | 183 | header = (struct acpi_subtable_header *)obj->buffer.pointer; |
| 192 | if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) | 184 | if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) |
| 193 | map_lapic_id(header, acpi_id, &phys_id, ignore_disabled); | 185 | map_lapic_id(header, acpi_id, &phys_id); |
| 194 | else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) | 186 | else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) |
| 195 | map_lsapic_id(header, type, acpi_id, &phys_id, ignore_disabled); | 187 | map_lsapic_id(header, type, acpi_id, &phys_id); |
| 196 | else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) | 188 | else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) |
| 197 | map_x2apic_id(header, type, acpi_id, &phys_id, ignore_disabled); | 189 | map_x2apic_id(header, type, acpi_id, &phys_id); |
| 198 | else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) | 190 | else if (header->type == ACPI_MADT_TYPE_GENERIC_INTERRUPT) |
| 199 | map_gicc_mpidr(header, type, acpi_id, &phys_id, | 191 | map_gicc_mpidr(header, type, acpi_id, &phys_id); |
| 200 | ignore_disabled); | ||
| 201 | 192 | ||
| 202 | exit: | 193 | exit: |
| 203 | kfree(buffer.pointer); | 194 | kfree(buffer.pointer); |
| 204 | return phys_id; | 195 | return phys_id; |
| 205 | } | 196 | } |
| 206 | 197 | ||
| 207 | static phys_cpuid_t __acpi_get_phys_id(acpi_handle handle, int type, | 198 | phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) |
| 208 | u32 acpi_id, bool ignore_disabled) | ||
| 209 | { | 199 | { |
| 210 | phys_cpuid_t phys_id; | 200 | phys_cpuid_t phys_id; |
| 211 | 201 | ||
| 212 | phys_id = map_mat_entry(handle, type, acpi_id, ignore_disabled); | 202 | phys_id = map_mat_entry(handle, type, acpi_id); |
| 213 | if (invalid_phys_cpuid(phys_id)) | 203 | if (invalid_phys_cpuid(phys_id)) |
| 214 | phys_id = map_madt_entry(get_madt_table(), type, acpi_id, | 204 | phys_id = map_madt_entry(get_madt_table(), type, acpi_id); |
| 215 | ignore_disabled); | ||
| 216 | 205 | ||
| 217 | return phys_id; | 206 | return phys_id; |
| 218 | } | 207 | } |
| 219 | 208 | ||
| 220 | phys_cpuid_t acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) | ||
| 221 | { | ||
| 222 | return __acpi_get_phys_id(handle, type, acpi_id, true); | ||
| 223 | } | ||
| 224 | |||
| 225 | int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) | 209 | int acpi_map_cpuid(phys_cpuid_t phys_id, u32 acpi_id) |
| 226 | { | 210 | { |
| 227 | #ifdef CONFIG_SMP | 211 | #ifdef CONFIG_SMP |
| @@ -278,79 +262,6 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) | |||
| 278 | } | 262 | } |
| 279 | EXPORT_SYMBOL_GPL(acpi_get_cpuid); | 263 | EXPORT_SYMBOL_GPL(acpi_get_cpuid); |
| 280 | 264 | ||
| 281 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | ||
| 282 | static bool __init | ||
| 283 | map_processor(acpi_handle handle, phys_cpuid_t *phys_id, int *cpuid) | ||
| 284 | { | ||
| 285 | int type, id; | ||
| 286 | u32 acpi_id; | ||
| 287 | acpi_status status; | ||
| 288 | acpi_object_type acpi_type; | ||
| 289 | unsigned long long tmp; | ||
| 290 | union acpi_object object = { 0 }; | ||
| 291 | struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; | ||
| 292 | |||
| 293 | status = acpi_get_type(handle, &acpi_type); | ||
| 294 | if (ACPI_FAILURE(status)) | ||
| 295 | return false; | ||
| 296 | |||
| 297 | switch (acpi_type) { | ||
| 298 | case ACPI_TYPE_PROCESSOR: | ||
| 299 | status = acpi_evaluate_object(handle, NULL, NULL, &buffer); | ||
| 300 | if (ACPI_FAILURE(status)) | ||
| 301 | return false; | ||
| 302 | acpi_id = object.processor.proc_id; | ||
| 303 | |||
| 304 | /* validate the acpi_id */ | ||
| 305 | if(acpi_processor_validate_proc_id(acpi_id)) | ||
| 306 | return false; | ||
| 307 | break; | ||
| 308 | case ACPI_TYPE_DEVICE: | ||
| 309 | status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); | ||
| 310 | if (ACPI_FAILURE(status)) | ||
| 311 | return false; | ||
| 312 | acpi_id = tmp; | ||
| 313 | break; | ||
| 314 | default: | ||
| 315 | return false; | ||
| 316 | } | ||
| 317 | |||
| 318 | type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; | ||
| 319 | |||
| 320 | *phys_id = __acpi_get_phys_id(handle, type, acpi_id, false); | ||
| 321 | id = acpi_map_cpuid(*phys_id, acpi_id); | ||
| 322 | |||
| 323 | if (id < 0) | ||
| 324 | return false; | ||
| 325 | *cpuid = id; | ||
| 326 | return true; | ||
| 327 | } | ||
| 328 | |||
| 329 | static acpi_status __init | ||
| 330 | set_processor_node_mapping(acpi_handle handle, u32 lvl, void *context, | ||
| 331 | void **rv) | ||
| 332 | { | ||
| 333 | phys_cpuid_t phys_id; | ||
| 334 | int cpu_id; | ||
| 335 | |||
| 336 | if (!map_processor(handle, &phys_id, &cpu_id)) | ||
| 337 | return AE_ERROR; | ||
| 338 | |||
| 339 | acpi_map_cpu2node(handle, cpu_id, phys_id); | ||
| 340 | return AE_OK; | ||
| 341 | } | ||
| 342 | |||
| 343 | void __init acpi_set_processor_mapping(void) | ||
| 344 | { | ||
| 345 | /* Set persistent cpu <-> node mapping for all processors. */ | ||
| 346 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | ||
| 347 | ACPI_UINT32_MAX, set_processor_node_mapping, | ||
| 348 | NULL, NULL, NULL); | ||
| 349 | } | ||
| 350 | #else | ||
| 351 | void __init acpi_set_processor_mapping(void) {} | ||
| 352 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | ||
| 353 | |||
| 354 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC | 265 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC |
| 355 | static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, | 266 | static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, |
| 356 | u64 *phys_addr, int *ioapic_id) | 267 | u64 *phys_addr, int *ioapic_id) |
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 01c94669a2b0..3afa8c1fa127 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c | |||
| @@ -30,7 +30,7 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h) | |||
| 30 | return true; | 30 | return true; |
| 31 | 31 | ||
| 32 | if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) && | 32 | if (!memcmp(h->oem_table_id, "QDF2400 ", ACPI_OEM_TABLE_ID_SIZE) && |
| 33 | h->oem_revision == 0) | 33 | h->oem_revision == 1) |
| 34 | return true; | 34 | return true; |
| 35 | 35 | ||
| 36 | return false; | 36 | return false; |
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index bf43b5d2aafc..83f1439e57fd 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c | |||
| @@ -218,6 +218,7 @@ static const struct of_device_id img_ascii_lcd_matches[] = { | |||
| 218 | { .compatible = "img,boston-lcd", .data = &boston_config }, | 218 | { .compatible = "img,boston-lcd", .data = &boston_config }, |
| 219 | { .compatible = "mti,malta-lcd", .data = &malta_config }, | 219 | { .compatible = "mti,malta-lcd", .data = &malta_config }, |
| 220 | { .compatible = "mti,sead3-lcd", .data = &sead3_config }, | 220 | { .compatible = "mti,sead3-lcd", .data = &sead3_config }, |
| 221 | { /* sentinel */ } | ||
| 221 | }; | 222 | }; |
| 222 | 223 | ||
| 223 | /** | 224 | /** |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 684bda4d14a1..6bb60fb6a30b 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
| @@ -639,11 +639,6 @@ int lock_device_hotplug_sysfs(void) | |||
| 639 | return restart_syscall(); | 639 | return restart_syscall(); |
| 640 | } | 640 | } |
| 641 | 641 | ||
| 642 | void assert_held_device_hotplug(void) | ||
| 643 | { | ||
| 644 | lockdep_assert_held(&device_hotplug_lock); | ||
| 645 | } | ||
| 646 | |||
| 647 | #ifdef CONFIG_BLOCK | 642 | #ifdef CONFIG_BLOCK |
| 648 | static inline int device_is_not_partition(struct device *dev) | 643 | static inline int device_is_not_partition(struct device *dev) |
| 649 | { | 644 | { |
diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index c2c14a12713b..08e054507d0b 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig | |||
| @@ -344,7 +344,8 @@ config BT_WILINK | |||
| 344 | 344 | ||
| 345 | config BT_QCOMSMD | 345 | config BT_QCOMSMD |
| 346 | tristate "Qualcomm SMD based HCI support" | 346 | tristate "Qualcomm SMD based HCI support" |
| 347 | depends on (QCOM_SMD && QCOM_WCNSS_CTRL) || COMPILE_TEST | 347 | depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) |
| 348 | depends on QCOM_WCNSS_CTRL || (COMPILE_TEST && QCOM_WCNSS_CTRL=n) | ||
| 348 | select BT_QCA | 349 | select BT_QCA |
| 349 | help | 350 | help |
| 350 | Qualcomm SMD based HCI driver. | 351 | Qualcomm SMD based HCI driver. |
diff --git a/drivers/char/hw_random/amd-rng.c b/drivers/char/hw_random/amd-rng.c index 4a99ac756f08..9959c762da2f 100644 --- a/drivers/char/hw_random/amd-rng.c +++ b/drivers/char/hw_random/amd-rng.c | |||
| @@ -55,6 +55,7 @@ MODULE_DEVICE_TABLE(pci, pci_tbl); | |||
| 55 | struct amd768_priv { | 55 | struct amd768_priv { |
| 56 | void __iomem *iobase; | 56 | void __iomem *iobase; |
| 57 | struct pci_dev *pcidev; | 57 | struct pci_dev *pcidev; |
| 58 | u32 pmbase; | ||
| 58 | }; | 59 | }; |
| 59 | 60 | ||
| 60 | static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) | 61 | static int amd_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait) |
| @@ -148,33 +149,58 @@ found: | |||
| 148 | if (pmbase == 0) | 149 | if (pmbase == 0) |
| 149 | return -EIO; | 150 | return -EIO; |
| 150 | 151 | ||
| 151 | priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); | 152 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
| 152 | if (!priv) | 153 | if (!priv) |
| 153 | return -ENOMEM; | 154 | return -ENOMEM; |
| 154 | 155 | ||
| 155 | if (!devm_request_region(&pdev->dev, pmbase + PMBASE_OFFSET, | 156 | if (!request_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE, DRV_NAME)) { |
| 156 | PMBASE_SIZE, DRV_NAME)) { | ||
| 157 | dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", | 157 | dev_err(&pdev->dev, DRV_NAME " region 0x%x already in use!\n", |
| 158 | pmbase + 0xF0); | 158 | pmbase + 0xF0); |
| 159 | return -EBUSY; | 159 | err = -EBUSY; |
| 160 | goto out; | ||
| 160 | } | 161 | } |
| 161 | 162 | ||
| 162 | priv->iobase = devm_ioport_map(&pdev->dev, pmbase + PMBASE_OFFSET, | 163 | priv->iobase = ioport_map(pmbase + PMBASE_OFFSET, PMBASE_SIZE); |
| 163 | PMBASE_SIZE); | ||
| 164 | if (!priv->iobase) { | 164 | if (!priv->iobase) { |
| 165 | pr_err(DRV_NAME "Cannot map ioport\n"); | 165 | pr_err(DRV_NAME "Cannot map ioport\n"); |
| 166 | return -ENOMEM; | 166 | err = -EINVAL; |
| 167 | goto err_iomap; | ||
| 167 | } | 168 | } |
| 168 | 169 | ||
| 169 | amd_rng.priv = (unsigned long)priv; | 170 | amd_rng.priv = (unsigned long)priv; |
| 171 | priv->pmbase = pmbase; | ||
| 170 | priv->pcidev = pdev; | 172 | priv->pcidev = pdev; |
| 171 | 173 | ||
| 172 | pr_info(DRV_NAME " detected\n"); | 174 | pr_info(DRV_NAME " detected\n"); |
| 173 | return devm_hwrng_register(&pdev->dev, &amd_rng); | 175 | err = hwrng_register(&amd_rng); |
| 176 | if (err) { | ||
| 177 | pr_err(DRV_NAME " registering failed (%d)\n", err); | ||
| 178 | goto err_hwrng; | ||
| 179 | } | ||
| 180 | return 0; | ||
| 181 | |||
| 182 | err_hwrng: | ||
| 183 | ioport_unmap(priv->iobase); | ||
| 184 | err_iomap: | ||
| 185 | release_region(pmbase + PMBASE_OFFSET, PMBASE_SIZE); | ||
| 186 | out: | ||
| 187 | kfree(priv); | ||
| 188 | return err; | ||
| 174 | } | 189 | } |
| 175 | 190 | ||
| 176 | static void __exit mod_exit(void) | 191 | static void __exit mod_exit(void) |
| 177 | { | 192 | { |
| 193 | struct amd768_priv *priv; | ||
| 194 | |||
| 195 | priv = (struct amd768_priv *)amd_rng.priv; | ||
| 196 | |||
| 197 | hwrng_unregister(&amd_rng); | ||
| 198 | |||
| 199 | ioport_unmap(priv->iobase); | ||
| 200 | |||
| 201 | release_region(priv->pmbase + PMBASE_OFFSET, PMBASE_SIZE); | ||
| 202 | |||
| 203 | kfree(priv); | ||
| 178 | } | 204 | } |
| 179 | 205 | ||
| 180 | module_init(mod_init); | 206 | module_init(mod_init); |
diff --git a/drivers/char/hw_random/geode-rng.c b/drivers/char/hw_random/geode-rng.c index e7a245942029..e1d421a36a13 100644 --- a/drivers/char/hw_random/geode-rng.c +++ b/drivers/char/hw_random/geode-rng.c | |||
| @@ -31,6 +31,9 @@ | |||
| 31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
| 32 | #include <linux/pci.h> | 32 | #include <linux/pci.h> |
| 33 | 33 | ||
| 34 | |||
| 35 | #define PFX KBUILD_MODNAME ": " | ||
| 36 | |||
| 34 | #define GEODE_RNG_DATA_REG 0x50 | 37 | #define GEODE_RNG_DATA_REG 0x50 |
| 35 | #define GEODE_RNG_STATUS_REG 0x54 | 38 | #define GEODE_RNG_STATUS_REG 0x54 |
| 36 | 39 | ||
| @@ -82,6 +85,7 @@ static struct hwrng geode_rng = { | |||
| 82 | 85 | ||
| 83 | static int __init mod_init(void) | 86 | static int __init mod_init(void) |
| 84 | { | 87 | { |
| 88 | int err = -ENODEV; | ||
| 85 | struct pci_dev *pdev = NULL; | 89 | struct pci_dev *pdev = NULL; |
| 86 | const struct pci_device_id *ent; | 90 | const struct pci_device_id *ent; |
| 87 | void __iomem *mem; | 91 | void __iomem *mem; |
| @@ -89,27 +93,43 @@ static int __init mod_init(void) | |||
| 89 | 93 | ||
| 90 | for_each_pci_dev(pdev) { | 94 | for_each_pci_dev(pdev) { |
| 91 | ent = pci_match_id(pci_tbl, pdev); | 95 | ent = pci_match_id(pci_tbl, pdev); |
| 92 | if (ent) { | 96 | if (ent) |
| 93 | rng_base = pci_resource_start(pdev, 0); | 97 | goto found; |
| 94 | if (rng_base == 0) | ||
| 95 | return -ENODEV; | ||
| 96 | |||
| 97 | mem = devm_ioremap(&pdev->dev, rng_base, 0x58); | ||
| 98 | if (!mem) | ||
| 99 | return -ENOMEM; | ||
| 100 | geode_rng.priv = (unsigned long)mem; | ||
| 101 | |||
| 102 | pr_info("AMD Geode RNG detected\n"); | ||
| 103 | return devm_hwrng_register(&pdev->dev, &geode_rng); | ||
| 104 | } | ||
| 105 | } | 98 | } |
| 106 | |||
| 107 | /* Device not found. */ | 99 | /* Device not found. */ |
| 108 | return -ENODEV; | 100 | goto out; |
| 101 | |||
| 102 | found: | ||
| 103 | rng_base = pci_resource_start(pdev, 0); | ||
| 104 | if (rng_base == 0) | ||
| 105 | goto out; | ||
| 106 | err = -ENOMEM; | ||
| 107 | mem = ioremap(rng_base, 0x58); | ||
| 108 | if (!mem) | ||
| 109 | goto out; | ||
| 110 | geode_rng.priv = (unsigned long)mem; | ||
| 111 | |||
| 112 | pr_info("AMD Geode RNG detected\n"); | ||
| 113 | err = hwrng_register(&geode_rng); | ||
| 114 | if (err) { | ||
| 115 | pr_err(PFX "RNG registering failed (%d)\n", | ||
| 116 | err); | ||
| 117 | goto err_unmap; | ||
| 118 | } | ||
| 119 | out: | ||
| 120 | return err; | ||
| 121 | |||
| 122 | err_unmap: | ||
| 123 | iounmap(mem); | ||
| 124 | goto out; | ||
| 109 | } | 125 | } |
| 110 | 126 | ||
| 111 | static void __exit mod_exit(void) | 127 | static void __exit mod_exit(void) |
| 112 | { | 128 | { |
| 129 | void __iomem *mem = (void __iomem *)geode_rng.priv; | ||
| 130 | |||
| 131 | hwrng_unregister(&geode_rng); | ||
| 132 | iounmap(mem); | ||
| 113 | } | 133 | } |
| 114 | 134 | ||
| 115 | module_init(mod_init); | 135 | module_init(mod_init); |
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 2a558c706581..3e73bcdf9e65 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c | |||
| @@ -84,11 +84,14 @@ struct pp_struct { | |||
| 84 | struct ieee1284_info state; | 84 | struct ieee1284_info state; |
| 85 | struct ieee1284_info saved_state; | 85 | struct ieee1284_info saved_state; |
| 86 | long default_inactivity; | 86 | long default_inactivity; |
| 87 | int index; | ||
| 87 | }; | 88 | }; |
| 88 | 89 | ||
| 89 | /* should we use PARDEVICE_MAX here? */ | 90 | /* should we use PARDEVICE_MAX here? */ |
| 90 | static struct device *devices[PARPORT_MAX]; | 91 | static struct device *devices[PARPORT_MAX]; |
| 91 | 92 | ||
| 93 | static DEFINE_IDA(ida_index); | ||
| 94 | |||
| 92 | /* pp_struct.flags bitfields */ | 95 | /* pp_struct.flags bitfields */ |
| 93 | #define PP_CLAIMED (1<<0) | 96 | #define PP_CLAIMED (1<<0) |
| 94 | #define PP_EXCL (1<<1) | 97 | #define PP_EXCL (1<<1) |
| @@ -290,7 +293,7 @@ static int register_device(int minor, struct pp_struct *pp) | |||
| 290 | struct pardevice *pdev = NULL; | 293 | struct pardevice *pdev = NULL; |
| 291 | char *name; | 294 | char *name; |
| 292 | struct pardev_cb ppdev_cb; | 295 | struct pardev_cb ppdev_cb; |
| 293 | int rc = 0; | 296 | int rc = 0, index; |
| 294 | 297 | ||
| 295 | name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); | 298 | name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor); |
| 296 | if (name == NULL) | 299 | if (name == NULL) |
| @@ -303,20 +306,23 @@ static int register_device(int minor, struct pp_struct *pp) | |||
| 303 | goto err; | 306 | goto err; |
| 304 | } | 307 | } |
| 305 | 308 | ||
| 309 | index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); | ||
| 306 | memset(&ppdev_cb, 0, sizeof(ppdev_cb)); | 310 | memset(&ppdev_cb, 0, sizeof(ppdev_cb)); |
| 307 | ppdev_cb.irq_func = pp_irq; | 311 | ppdev_cb.irq_func = pp_irq; |
| 308 | ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; | 312 | ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; |
| 309 | ppdev_cb.private = pp; | 313 | ppdev_cb.private = pp; |
| 310 | pdev = parport_register_dev_model(port, name, &ppdev_cb, minor); | 314 | pdev = parport_register_dev_model(port, name, &ppdev_cb, index); |
| 311 | parport_put_port(port); | 315 | parport_put_port(port); |
| 312 | 316 | ||
| 313 | if (!pdev) { | 317 | if (!pdev) { |
| 314 | pr_warn("%s: failed to register device!\n", name); | 318 | pr_warn("%s: failed to register device!\n", name); |
| 315 | rc = -ENXIO; | 319 | rc = -ENXIO; |
| 320 | ida_simple_remove(&ida_index, index); | ||
| 316 | goto err; | 321 | goto err; |
| 317 | } | 322 | } |
| 318 | 323 | ||
| 319 | pp->pdev = pdev; | 324 | pp->pdev = pdev; |
| 325 | pp->index = index; | ||
| 320 | dev_dbg(&pdev->dev, "registered pardevice\n"); | 326 | dev_dbg(&pdev->dev, "registered pardevice\n"); |
| 321 | err: | 327 | err: |
| 322 | kfree(name); | 328 | kfree(name); |
| @@ -755,6 +761,7 @@ static int pp_release(struct inode *inode, struct file *file) | |||
| 755 | 761 | ||
| 756 | if (pp->pdev) { | 762 | if (pp->pdev) { |
| 757 | parport_unregister_device(pp->pdev); | 763 | parport_unregister_device(pp->pdev); |
| 764 | ida_simple_remove(&ida_index, pp->index); | ||
| 758 | pp->pdev = NULL; | 765 | pp->pdev = NULL; |
| 759 | pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); | 766 | pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); |
| 760 | } | 767 | } |
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 0fb39fe217d1..67201f67a14a 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
| @@ -2502,7 +2502,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, | |||
| 2502 | 2502 | ||
| 2503 | clk->core = hw->core; | 2503 | clk->core = hw->core; |
| 2504 | clk->dev_id = dev_id; | 2504 | clk->dev_id = dev_id; |
| 2505 | clk->con_id = con_id; | 2505 | clk->con_id = kstrdup_const(con_id, GFP_KERNEL); |
| 2506 | clk->max_rate = ULONG_MAX; | 2506 | clk->max_rate = ULONG_MAX; |
| 2507 | 2507 | ||
| 2508 | clk_prepare_lock(); | 2508 | clk_prepare_lock(); |
| @@ -2518,6 +2518,7 @@ void __clk_free_clk(struct clk *clk) | |||
| 2518 | hlist_del(&clk->clks_node); | 2518 | hlist_del(&clk->clks_node); |
| 2519 | clk_prepare_unlock(); | 2519 | clk_prepare_unlock(); |
| 2520 | 2520 | ||
| 2521 | kfree_const(clk->con_id); | ||
| 2521 | kfree(clk); | 2522 | kfree(clk); |
| 2522 | } | 2523 | } |
| 2523 | 2524 | ||
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c index 924f560dcf80..00d4150e33c3 100644 --- a/drivers/clk/rockchip/clk-rk3036.c +++ b/drivers/clk/rockchip/clk-rk3036.c | |||
| @@ -127,7 +127,7 @@ PNAME(mux_ddrphy_p) = { "dpll_ddr", "gpll_ddr" }; | |||
| 127 | PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" }; | 127 | PNAME(mux_pll_src_3plls_p) = { "apll", "dpll", "gpll" }; |
| 128 | PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" }; | 128 | PNAME(mux_timer_p) = { "xin24m", "pclk_peri_src" }; |
| 129 | 129 | ||
| 130 | PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll" "usb480m" }; | 130 | PNAME(mux_pll_src_apll_dpll_gpll_usb480m_p) = { "apll", "dpll", "gpll", "usb480m" }; |
| 131 | 131 | ||
| 132 | PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" }; | 132 | PNAME(mux_mmc_src_p) = { "apll", "dpll", "gpll", "xin24m" }; |
| 133 | PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; | 133 | PNAME(mux_i2s_pre_p) = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" }; |
| @@ -450,6 +450,13 @@ static void __init rk3036_clk_init(struct device_node *np) | |||
| 450 | return; | 450 | return; |
| 451 | } | 451 | } |
| 452 | 452 | ||
| 453 | /* | ||
| 454 | * Make uart_pll_clk a child of the gpll, as all other sources are | ||
| 455 | * not that usable / stable. | ||
| 456 | */ | ||
| 457 | writel_relaxed(HIWORD_UPDATE(0x2, 0x3, 10), | ||
| 458 | reg_base + RK2928_CLKSEL_CON(13)); | ||
| 459 | |||
| 453 | ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); | 460 | ctx = rockchip_clk_init(np, reg_base, CLK_NR_CLKS); |
| 454 | if (IS_ERR(ctx)) { | 461 | if (IS_ERR(ctx)) { |
| 455 | pr_err("%s: rockchip clk init failed\n", __func__); | 462 | pr_err("%s: rockchip clk init failed\n", __func__); |
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig index 695bbf9ef428..72109d2cf41b 100644 --- a/drivers/clk/sunxi-ng/Kconfig +++ b/drivers/clk/sunxi-ng/Kconfig | |||
| @@ -80,6 +80,7 @@ config SUN6I_A31_CCU | |||
| 80 | select SUNXI_CCU_DIV | 80 | select SUNXI_CCU_DIV |
| 81 | select SUNXI_CCU_NK | 81 | select SUNXI_CCU_NK |
| 82 | select SUNXI_CCU_NKM | 82 | select SUNXI_CCU_NKM |
| 83 | select SUNXI_CCU_NKMP | ||
| 83 | select SUNXI_CCU_NM | 84 | select SUNXI_CCU_NM |
| 84 | select SUNXI_CCU_MP | 85 | select SUNXI_CCU_MP |
| 85 | select SUNXI_CCU_PHASE | 86 | select SUNXI_CCU_PHASE |
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c index e3c084cc6da5..f54114c607df 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c | |||
| @@ -566,7 +566,7 @@ static SUNXI_CCU_M_WITH_GATE(gpu_clk, "gpu", "pll-gpu", | |||
| 566 | 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); | 566 | 0x1a0, 0, 3, BIT(31), CLK_SET_RATE_PARENT); |
| 567 | 567 | ||
| 568 | /* Fixed Factor clocks */ | 568 | /* Fixed Factor clocks */ |
| 569 | static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 1, 2, 0); | 569 | static CLK_FIXED_FACTOR(osc12M_clk, "osc12M", "osc24M", 2, 1, 0); |
| 570 | 570 | ||
| 571 | /* We hardcode the divider to 4 for now */ | 571 | /* We hardcode the divider to 4 for now */ |
| 572 | static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", | 572 | static CLK_FIXED_FACTOR(pll_audio_clk, "pll-audio", |
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 4c9a920ff4ab..89e68d29bf45 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c | |||
| @@ -608,7 +608,7 @@ static SUNXI_CCU_M_WITH_MUX_GATE(hdmi_clk, "hdmi", lcd_ch1_parents, | |||
| 608 | 0x150, 0, 4, 24, 2, BIT(31), | 608 | 0x150, 0, 4, 24, 2, BIT(31), |
| 609 | CLK_SET_RATE_PARENT); | 609 | CLK_SET_RATE_PARENT); |
| 610 | 610 | ||
| 611 | static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(31), 0); | 611 | static SUNXI_CCU_GATE(hdmi_ddc_clk, "hdmi-ddc", "osc24M", 0x150, BIT(30), 0); |
| 612 | 612 | ||
| 613 | static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); | 613 | static SUNXI_CCU_GATE(ps_clk, "ps", "lcd1-ch1", 0x140, BIT(31), 0); |
| 614 | 614 | ||
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c index 22c2ca7a2a22..b583f186a804 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.c +++ b/drivers/clk/sunxi-ng/ccu_mp.c | |||
| @@ -85,6 +85,10 @@ static unsigned long ccu_mp_recalc_rate(struct clk_hw *hw, | |||
| 85 | unsigned int m, p; | 85 | unsigned int m, p; |
| 86 | u32 reg; | 86 | u32 reg; |
| 87 | 87 | ||
| 88 | /* Adjust parent_rate according to pre-dividers */ | ||
| 89 | ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux, | ||
| 90 | -1, &parent_rate); | ||
| 91 | |||
| 88 | reg = readl(cmp->common.base + cmp->common.reg); | 92 | reg = readl(cmp->common.base + cmp->common.reg); |
| 89 | 93 | ||
| 90 | m = reg >> cmp->m.shift; | 94 | m = reg >> cmp->m.shift; |
| @@ -117,6 +121,10 @@ static int ccu_mp_set_rate(struct clk_hw *hw, unsigned long rate, | |||
| 117 | unsigned int m, p; | 121 | unsigned int m, p; |
| 118 | u32 reg; | 122 | u32 reg; |
| 119 | 123 | ||
| 124 | /* Adjust parent_rate according to pre-dividers */ | ||
| 125 | ccu_mux_helper_adjust_parent_for_prediv(&cmp->common, &cmp->mux, | ||
| 126 | -1, &parent_rate); | ||
| 127 | |||
| 120 | max_m = cmp->m.max ?: 1 << cmp->m.width; | 128 | max_m = cmp->m.max ?: 1 << cmp->m.width; |
| 121 | max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); | 129 | max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); |
| 122 | 130 | ||
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c index a2b40a000157..488055ed944f 100644 --- a/drivers/clk/sunxi-ng/ccu_nkmp.c +++ b/drivers/clk/sunxi-ng/ccu_nkmp.c | |||
| @@ -107,7 +107,7 @@ static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw, | |||
| 107 | p = reg >> nkmp->p.shift; | 107 | p = reg >> nkmp->p.shift; |
| 108 | p &= (1 << nkmp->p.width) - 1; | 108 | p &= (1 << nkmp->p.width) - 1; |
| 109 | 109 | ||
| 110 | return parent_rate * n * k >> p / m; | 110 | return (parent_rate * n * k >> p) / m; |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, | 113 | static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate, |
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 745844ee973e..d4ca9962a759 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | #include <linux/io.h> | 10 | #include <linux/io.h> |
| 11 | #include <linux/platform_device.h> | 11 | #include <linux/platform_device.h> |
| 12 | #include <linux/atmel_tc.h> | 12 | #include <linux/atmel_tc.h> |
| 13 | #include <linux/sched_clock.h> | ||
| 14 | 13 | ||
| 15 | 14 | ||
| 16 | /* | 15 | /* |
| @@ -57,14 +56,9 @@ static u64 tc_get_cycles(struct clocksource *cs) | |||
| 57 | return (upper << 16) | lower; | 56 | return (upper << 16) | lower; |
| 58 | } | 57 | } |
| 59 | 58 | ||
| 60 | static u32 tc_get_cv32(void) | ||
| 61 | { | ||
| 62 | return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); | ||
| 63 | } | ||
| 64 | |||
| 65 | static u64 tc_get_cycles32(struct clocksource *cs) | 59 | static u64 tc_get_cycles32(struct clocksource *cs) |
| 66 | { | 60 | { |
| 67 | return tc_get_cv32(); | 61 | return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); |
| 68 | } | 62 | } |
| 69 | 63 | ||
| 70 | static struct clocksource clksrc = { | 64 | static struct clocksource clksrc = { |
| @@ -75,11 +69,6 @@ static struct clocksource clksrc = { | |||
| 75 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 69 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
| 76 | }; | 70 | }; |
| 77 | 71 | ||
| 78 | static u64 notrace tc_read_sched_clock(void) | ||
| 79 | { | ||
| 80 | return tc_get_cv32(); | ||
| 81 | } | ||
| 82 | |||
| 83 | #ifdef CONFIG_GENERIC_CLOCKEVENTS | 72 | #ifdef CONFIG_GENERIC_CLOCKEVENTS |
| 84 | 73 | ||
| 85 | struct tc_clkevt_device { | 74 | struct tc_clkevt_device { |
| @@ -350,9 +339,6 @@ static int __init tcb_clksrc_init(void) | |||
| 350 | clksrc.read = tc_get_cycles32; | 339 | clksrc.read = tc_get_cycles32; |
| 351 | /* setup ony channel 0 */ | 340 | /* setup ony channel 0 */ |
| 352 | tcb_setup_single_chan(tc, best_divisor_idx); | 341 | tcb_setup_single_chan(tc, best_divisor_idx); |
| 353 | |||
| 354 | /* register sched_clock on chips with single 32 bit counter */ | ||
| 355 | sched_clock_register(tc_read_sched_clock, 32, divided_rate); | ||
| 356 | } else { | 342 | } else { |
| 357 | /* tclib will give us three clocks no matter what the | 343 | /* tclib will give us three clocks no matter what the |
| 358 | * underlying platform supports. | 344 | * underlying platform supports. |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 38b9fdf854a4..5dbdd261aa73 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, | |||
| 680 | char *buf) | 680 | char *buf) |
| 681 | { | 681 | { |
| 682 | unsigned int cur_freq = __cpufreq_get(policy); | 682 | unsigned int cur_freq = __cpufreq_get(policy); |
| 683 | if (!cur_freq) | 683 | |
| 684 | return sprintf(buf, "<unknown>"); | 684 | if (cur_freq) |
| 685 | return sprintf(buf, "%u\n", cur_freq); | 685 | return sprintf(buf, "%u\n", cur_freq); |
| 686 | |||
| 687 | return sprintf(buf, "<unknown>\n"); | ||
| 686 | } | 688 | } |
| 687 | 689 | ||
| 688 | /** | 690 | /** |
| @@ -1182,6 +1184,9 @@ static int cpufreq_online(unsigned int cpu) | |||
| 1182 | for_each_cpu(j, policy->related_cpus) | 1184 | for_each_cpu(j, policy->related_cpus) |
| 1183 | per_cpu(cpufreq_cpu_data, j) = policy; | 1185 | per_cpu(cpufreq_cpu_data, j) = policy; |
| 1184 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1186 | write_unlock_irqrestore(&cpufreq_driver_lock, flags); |
| 1187 | } else { | ||
| 1188 | policy->min = policy->user_policy.min; | ||
| 1189 | policy->max = policy->user_policy.max; | ||
| 1185 | } | 1190 | } |
| 1186 | 1191 | ||
| 1187 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { | 1192 | if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 3d37219a0dd7..283491f742d3 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
| @@ -84,6 +84,11 @@ static inline u64 div_ext_fp(u64 x, u64 y) | |||
| 84 | return div64_u64(x << EXT_FRAC_BITS, y); | 84 | return div64_u64(x << EXT_FRAC_BITS, y); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | static inline int32_t percent_ext_fp(int percent) | ||
| 88 | { | ||
| 89 | return div_ext_fp(percent, 100); | ||
| 90 | } | ||
| 91 | |||
| 87 | /** | 92 | /** |
| 88 | * struct sample - Store performance sample | 93 | * struct sample - Store performance sample |
| 89 | * @core_avg_perf: Ratio of APERF/MPERF which is the actual average | 94 | * @core_avg_perf: Ratio of APERF/MPERF which is the actual average |
| @@ -359,9 +364,7 @@ static bool driver_registered __read_mostly; | |||
| 359 | static bool acpi_ppc; | 364 | static bool acpi_ppc; |
| 360 | #endif | 365 | #endif |
| 361 | 366 | ||
| 362 | static struct perf_limits performance_limits; | 367 | static struct perf_limits global; |
| 363 | static struct perf_limits powersave_limits; | ||
| 364 | static struct perf_limits *limits; | ||
| 365 | 368 | ||
| 366 | static void intel_pstate_init_limits(struct perf_limits *limits) | 369 | static void intel_pstate_init_limits(struct perf_limits *limits) |
| 367 | { | 370 | { |
| @@ -372,14 +375,6 @@ static void intel_pstate_init_limits(struct perf_limits *limits) | |||
| 372 | limits->max_sysfs_pct = 100; | 375 | limits->max_sysfs_pct = 100; |
| 373 | } | 376 | } |
| 374 | 377 | ||
| 375 | static void intel_pstate_set_performance_limits(struct perf_limits *limits) | ||
| 376 | { | ||
| 377 | intel_pstate_init_limits(limits); | ||
| 378 | limits->min_perf_pct = 100; | ||
| 379 | limits->min_perf = int_ext_tofp(1); | ||
| 380 | limits->min_sysfs_pct = 100; | ||
| 381 | } | ||
| 382 | |||
| 383 | static DEFINE_MUTEX(intel_pstate_driver_lock); | 378 | static DEFINE_MUTEX(intel_pstate_driver_lock); |
| 384 | static DEFINE_MUTEX(intel_pstate_limits_lock); | 379 | static DEFINE_MUTEX(intel_pstate_limits_lock); |
| 385 | 380 | ||
| @@ -502,7 +497,7 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) | |||
| 502 | * correct max turbo frequency based on the turbo state. | 497 | * correct max turbo frequency based on the turbo state. |
| 503 | * Also need to convert to MHz as _PSS freq is in MHz. | 498 | * Also need to convert to MHz as _PSS freq is in MHz. |
| 504 | */ | 499 | */ |
| 505 | if (!limits->turbo_disabled) | 500 | if (!global.turbo_disabled) |
| 506 | cpu->acpi_perf_data.states[0].core_frequency = | 501 | cpu->acpi_perf_data.states[0].core_frequency = |
| 507 | policy->cpuinfo.max_freq / 1000; | 502 | policy->cpuinfo.max_freq / 1000; |
| 508 | cpu->valid_pss_table = true; | 503 | cpu->valid_pss_table = true; |
| @@ -621,7 +616,7 @@ static inline void update_turbo_state(void) | |||
| 621 | 616 | ||
| 622 | cpu = all_cpu_data[0]; | 617 | cpu = all_cpu_data[0]; |
| 623 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); | 618 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); |
| 624 | limits->turbo_disabled = | 619 | global.turbo_disabled = |
| 625 | (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || | 620 | (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || |
| 626 | cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); | 621 | cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); |
| 627 | } | 622 | } |
| @@ -845,12 +840,11 @@ static struct freq_attr *hwp_cpufreq_attrs[] = { | |||
| 845 | 840 | ||
| 846 | static void intel_pstate_hwp_set(struct cpufreq_policy *policy) | 841 | static void intel_pstate_hwp_set(struct cpufreq_policy *policy) |
| 847 | { | 842 | { |
| 848 | int min, hw_min, max, hw_max, cpu, range, adj_range; | 843 | int min, hw_min, max, hw_max, cpu; |
| 849 | struct perf_limits *perf_limits = limits; | 844 | struct perf_limits *perf_limits = &global; |
| 850 | u64 value, cap; | 845 | u64 value, cap; |
| 851 | 846 | ||
| 852 | for_each_cpu(cpu, policy->cpus) { | 847 | for_each_cpu(cpu, policy->cpus) { |
| 853 | int max_perf_pct, min_perf_pct; | ||
| 854 | struct cpudata *cpu_data = all_cpu_data[cpu]; | 848 | struct cpudata *cpu_data = all_cpu_data[cpu]; |
| 855 | s16 epp; | 849 | s16 epp; |
| 856 | 850 | ||
| @@ -859,24 +853,22 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy) | |||
| 859 | 853 | ||
| 860 | rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); | 854 | rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap); |
| 861 | hw_min = HWP_LOWEST_PERF(cap); | 855 | hw_min = HWP_LOWEST_PERF(cap); |
| 862 | if (limits->no_turbo) | 856 | if (global.no_turbo) |
| 863 | hw_max = HWP_GUARANTEED_PERF(cap); | 857 | hw_max = HWP_GUARANTEED_PERF(cap); |
| 864 | else | 858 | else |
| 865 | hw_max = HWP_HIGHEST_PERF(cap); | 859 | hw_max = HWP_HIGHEST_PERF(cap); |
| 866 | range = hw_max - hw_min; | ||
| 867 | 860 | ||
| 868 | max_perf_pct = perf_limits->max_perf_pct; | 861 | max = fp_ext_toint(hw_max * perf_limits->max_perf); |
| 869 | min_perf_pct = perf_limits->min_perf_pct; | 862 | if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) |
| 863 | min = max; | ||
| 864 | else | ||
| 865 | min = fp_ext_toint(hw_max * perf_limits->min_perf); | ||
| 870 | 866 | ||
| 871 | rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); | 867 | rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value); |
| 872 | adj_range = min_perf_pct * range / 100; | 868 | |
| 873 | min = hw_min + adj_range; | ||
| 874 | value &= ~HWP_MIN_PERF(~0L); | 869 | value &= ~HWP_MIN_PERF(~0L); |
| 875 | value |= HWP_MIN_PERF(min); | 870 | value |= HWP_MIN_PERF(min); |
| 876 | 871 | ||
| 877 | adj_range = max_perf_pct * range / 100; | ||
| 878 | max = hw_min + adj_range; | ||
| 879 | |||
| 880 | value &= ~HWP_MAX_PERF(~0L); | 872 | value &= ~HWP_MAX_PERF(~0L); |
| 881 | value |= HWP_MAX_PERF(max); | 873 | value |= HWP_MAX_PERF(max); |
| 882 | 874 | ||
| @@ -969,26 +961,18 @@ static int intel_pstate_resume(struct cpufreq_policy *policy) | |||
| 969 | } | 961 | } |
| 970 | 962 | ||
| 971 | static void intel_pstate_update_policies(void) | 963 | static void intel_pstate_update_policies(void) |
| 972 | __releases(&intel_pstate_limits_lock) | ||
| 973 | __acquires(&intel_pstate_limits_lock) | ||
| 974 | { | 964 | { |
| 975 | struct perf_limits *saved_limits = limits; | ||
| 976 | int cpu; | 965 | int cpu; |
| 977 | 966 | ||
| 978 | mutex_unlock(&intel_pstate_limits_lock); | ||
| 979 | |||
| 980 | for_each_possible_cpu(cpu) | 967 | for_each_possible_cpu(cpu) |
| 981 | cpufreq_update_policy(cpu); | 968 | cpufreq_update_policy(cpu); |
| 982 | |||
| 983 | mutex_lock(&intel_pstate_limits_lock); | ||
| 984 | |||
| 985 | limits = saved_limits; | ||
| 986 | } | 969 | } |
| 987 | 970 | ||
| 988 | /************************** debugfs begin ************************/ | 971 | /************************** debugfs begin ************************/ |
| 989 | static int pid_param_set(void *data, u64 val) | 972 | static int pid_param_set(void *data, u64 val) |
| 990 | { | 973 | { |
| 991 | *(u32 *)data = val; | 974 | *(u32 *)data = val; |
| 975 | pid_params.sample_rate_ns = pid_params.sample_rate_ms * NSEC_PER_MSEC; | ||
| 992 | intel_pstate_reset_all_pid(); | 976 | intel_pstate_reset_all_pid(); |
| 993 | return 0; | 977 | return 0; |
| 994 | } | 978 | } |
| @@ -1060,7 +1044,7 @@ static void intel_pstate_debug_hide_params(void) | |||
| 1060 | static ssize_t show_##file_name \ | 1044 | static ssize_t show_##file_name \ |
| 1061 | (struct kobject *kobj, struct attribute *attr, char *buf) \ | 1045 | (struct kobject *kobj, struct attribute *attr, char *buf) \ |
| 1062 | { \ | 1046 | { \ |
| 1063 | return sprintf(buf, "%u\n", limits->object); \ | 1047 | return sprintf(buf, "%u\n", global.object); \ |
| 1064 | } | 1048 | } |
| 1065 | 1049 | ||
| 1066 | static ssize_t intel_pstate_show_status(char *buf); | 1050 | static ssize_t intel_pstate_show_status(char *buf); |
| @@ -1151,10 +1135,10 @@ static ssize_t show_no_turbo(struct kobject *kobj, | |||
| 1151 | } | 1135 | } |
| 1152 | 1136 | ||
| 1153 | update_turbo_state(); | 1137 | update_turbo_state(); |
| 1154 | if (limits->turbo_disabled) | 1138 | if (global.turbo_disabled) |
| 1155 | ret = sprintf(buf, "%u\n", limits->turbo_disabled); | 1139 | ret = sprintf(buf, "%u\n", global.turbo_disabled); |
| 1156 | else | 1140 | else |
| 1157 | ret = sprintf(buf, "%u\n", limits->no_turbo); | 1141 | ret = sprintf(buf, "%u\n", global.no_turbo); |
| 1158 | 1142 | ||
| 1159 | mutex_unlock(&intel_pstate_driver_lock); | 1143 | mutex_unlock(&intel_pstate_driver_lock); |
| 1160 | 1144 | ||
| @@ -1181,19 +1165,19 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, | |||
| 1181 | mutex_lock(&intel_pstate_limits_lock); | 1165 | mutex_lock(&intel_pstate_limits_lock); |
| 1182 | 1166 | ||
| 1183 | update_turbo_state(); | 1167 | update_turbo_state(); |
| 1184 | if (limits->turbo_disabled) { | 1168 | if (global.turbo_disabled) { |
| 1185 | pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); | 1169 | pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); |
| 1186 | mutex_unlock(&intel_pstate_limits_lock); | 1170 | mutex_unlock(&intel_pstate_limits_lock); |
| 1187 | mutex_unlock(&intel_pstate_driver_lock); | 1171 | mutex_unlock(&intel_pstate_driver_lock); |
| 1188 | return -EPERM; | 1172 | return -EPERM; |
| 1189 | } | 1173 | } |
| 1190 | 1174 | ||
| 1191 | limits->no_turbo = clamp_t(int, input, 0, 1); | 1175 | global.no_turbo = clamp_t(int, input, 0, 1); |
| 1192 | |||
| 1193 | intel_pstate_update_policies(); | ||
| 1194 | 1176 | ||
| 1195 | mutex_unlock(&intel_pstate_limits_lock); | 1177 | mutex_unlock(&intel_pstate_limits_lock); |
| 1196 | 1178 | ||
| 1179 | intel_pstate_update_policies(); | ||
| 1180 | |||
| 1197 | mutex_unlock(&intel_pstate_driver_lock); | 1181 | mutex_unlock(&intel_pstate_driver_lock); |
| 1198 | 1182 | ||
| 1199 | return count; | 1183 | return count; |
| @@ -1218,19 +1202,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | |||
| 1218 | 1202 | ||
| 1219 | mutex_lock(&intel_pstate_limits_lock); | 1203 | mutex_lock(&intel_pstate_limits_lock); |
| 1220 | 1204 | ||
| 1221 | limits->max_sysfs_pct = clamp_t(int, input, 0 , 100); | 1205 | global.max_sysfs_pct = clamp_t(int, input, 0 , 100); |
| 1222 | limits->max_perf_pct = min(limits->max_policy_pct, | 1206 | global.max_perf_pct = min(global.max_policy_pct, global.max_sysfs_pct); |
| 1223 | limits->max_sysfs_pct); | 1207 | global.max_perf_pct = max(global.min_policy_pct, global.max_perf_pct); |
| 1224 | limits->max_perf_pct = max(limits->min_policy_pct, | 1208 | global.max_perf_pct = max(global.min_perf_pct, global.max_perf_pct); |
| 1225 | limits->max_perf_pct); | 1209 | global.max_perf = percent_ext_fp(global.max_perf_pct); |
| 1226 | limits->max_perf_pct = max(limits->min_perf_pct, | ||
| 1227 | limits->max_perf_pct); | ||
| 1228 | limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); | ||
| 1229 | |||
| 1230 | intel_pstate_update_policies(); | ||
| 1231 | 1210 | ||
| 1232 | mutex_unlock(&intel_pstate_limits_lock); | 1211 | mutex_unlock(&intel_pstate_limits_lock); |
| 1233 | 1212 | ||
| 1213 | intel_pstate_update_policies(); | ||
| 1214 | |||
| 1234 | mutex_unlock(&intel_pstate_driver_lock); | 1215 | mutex_unlock(&intel_pstate_driver_lock); |
| 1235 | 1216 | ||
| 1236 | return count; | 1217 | return count; |
| @@ -1255,19 +1236,16 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, | |||
| 1255 | 1236 | ||
| 1256 | mutex_lock(&intel_pstate_limits_lock); | 1237 | mutex_lock(&intel_pstate_limits_lock); |
| 1257 | 1238 | ||
| 1258 | limits->min_sysfs_pct = clamp_t(int, input, 0 , 100); | 1239 | global.min_sysfs_pct = clamp_t(int, input, 0 , 100); |
| 1259 | limits->min_perf_pct = max(limits->min_policy_pct, | 1240 | global.min_perf_pct = max(global.min_policy_pct, global.min_sysfs_pct); |
| 1260 | limits->min_sysfs_pct); | 1241 | global.min_perf_pct = min(global.max_policy_pct, global.min_perf_pct); |
| 1261 | limits->min_perf_pct = min(limits->max_policy_pct, | 1242 | global.min_perf_pct = min(global.max_perf_pct, global.min_perf_pct); |
| 1262 | limits->min_perf_pct); | 1243 | global.min_perf = percent_ext_fp(global.min_perf_pct); |
| 1263 | limits->min_perf_pct = min(limits->max_perf_pct, | ||
| 1264 | limits->min_perf_pct); | ||
| 1265 | limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); | ||
| 1266 | |||
| 1267 | intel_pstate_update_policies(); | ||
| 1268 | 1244 | ||
| 1269 | mutex_unlock(&intel_pstate_limits_lock); | 1245 | mutex_unlock(&intel_pstate_limits_lock); |
| 1270 | 1246 | ||
| 1247 | intel_pstate_update_policies(); | ||
| 1248 | |||
| 1271 | mutex_unlock(&intel_pstate_driver_lock); | 1249 | mutex_unlock(&intel_pstate_driver_lock); |
| 1272 | 1250 | ||
| 1273 | return count; | 1251 | return count; |
| @@ -1387,7 +1365,7 @@ static u64 atom_get_val(struct cpudata *cpudata, int pstate) | |||
| 1387 | u32 vid; | 1365 | u32 vid; |
| 1388 | 1366 | ||
| 1389 | val = (u64)pstate << 8; | 1367 | val = (u64)pstate << 8; |
| 1390 | if (limits->no_turbo && !limits->turbo_disabled) | 1368 | if (global.no_turbo && !global.turbo_disabled) |
| 1391 | val |= (u64)1 << 32; | 1369 | val |= (u64)1 << 32; |
| 1392 | 1370 | ||
| 1393 | vid_fp = cpudata->vid.min + mul_fp( | 1371 | vid_fp = cpudata->vid.min + mul_fp( |
| @@ -1557,7 +1535,7 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) | |||
| 1557 | u64 val; | 1535 | u64 val; |
| 1558 | 1536 | ||
| 1559 | val = (u64)pstate << 8; | 1537 | val = (u64)pstate << 8; |
| 1560 | if (limits->no_turbo && !limits->turbo_disabled) | 1538 | if (global.no_turbo && !global.turbo_disabled) |
| 1561 | val |= (u64)1 << 32; | 1539 | val |= (u64)1 << 32; |
| 1562 | 1540 | ||
| 1563 | return val; | 1541 | return val; |
| @@ -1683,9 +1661,9 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | |||
| 1683 | int max_perf = cpu->pstate.turbo_pstate; | 1661 | int max_perf = cpu->pstate.turbo_pstate; |
| 1684 | int max_perf_adj; | 1662 | int max_perf_adj; |
| 1685 | int min_perf; | 1663 | int min_perf; |
| 1686 | struct perf_limits *perf_limits = limits; | 1664 | struct perf_limits *perf_limits = &global; |
| 1687 | 1665 | ||
| 1688 | if (limits->no_turbo || limits->turbo_disabled) | 1666 | if (global.no_turbo || global.turbo_disabled) |
| 1689 | max_perf = cpu->pstate.max_pstate; | 1667 | max_perf = cpu->pstate.max_pstate; |
| 1690 | 1668 | ||
| 1691 | if (per_cpu_limits) | 1669 | if (per_cpu_limits) |
| @@ -1820,7 +1798,7 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) | |||
| 1820 | 1798 | ||
| 1821 | sample->busy_scaled = busy_frac * 100; | 1799 | sample->busy_scaled = busy_frac * 100; |
| 1822 | 1800 | ||
| 1823 | target = limits->no_turbo || limits->turbo_disabled ? | 1801 | target = global.no_turbo || global.turbo_disabled ? |
| 1824 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; | 1802 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; |
| 1825 | target += target >> 2; | 1803 | target += target >> 2; |
| 1826 | target = mul_fp(target, busy_frac); | 1804 | target = mul_fp(target, busy_frac); |
| @@ -2080,36 +2058,34 @@ static void intel_pstate_clear_update_util_hook(unsigned int cpu) | |||
| 2080 | static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, | 2058 | static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, |
| 2081 | struct perf_limits *limits) | 2059 | struct perf_limits *limits) |
| 2082 | { | 2060 | { |
| 2061 | int32_t max_policy_perf, min_policy_perf; | ||
| 2083 | 2062 | ||
| 2084 | limits->max_policy_pct = DIV_ROUND_UP(policy->max * 100, | 2063 | max_policy_perf = div_ext_fp(policy->max, policy->cpuinfo.max_freq); |
| 2085 | policy->cpuinfo.max_freq); | 2064 | max_policy_perf = clamp_t(int32_t, max_policy_perf, 0, int_ext_tofp(1)); |
| 2086 | limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0, 100); | ||
| 2087 | if (policy->max == policy->min) { | 2065 | if (policy->max == policy->min) { |
| 2088 | limits->min_policy_pct = limits->max_policy_pct; | 2066 | min_policy_perf = max_policy_perf; |
| 2089 | } else { | 2067 | } else { |
| 2090 | limits->min_policy_pct = DIV_ROUND_UP(policy->min * 100, | 2068 | min_policy_perf = div_ext_fp(policy->min, |
| 2091 | policy->cpuinfo.max_freq); | 2069 | policy->cpuinfo.max_freq); |
| 2092 | limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, | 2070 | min_policy_perf = clamp_t(int32_t, min_policy_perf, |
| 2093 | 0, 100); | 2071 | 0, max_policy_perf); |
| 2094 | } | 2072 | } |
| 2095 | 2073 | ||
| 2096 | /* Normalize user input to [min_policy_pct, max_policy_pct] */ | 2074 | /* Normalize user input to [min_perf, max_perf] */ |
| 2097 | limits->min_perf_pct = max(limits->min_policy_pct, | 2075 | limits->min_perf = max(min_policy_perf, |
| 2098 | limits->min_sysfs_pct); | 2076 | percent_ext_fp(limits->min_sysfs_pct)); |
| 2099 | limits->min_perf_pct = min(limits->max_policy_pct, | 2077 | limits->min_perf = min(limits->min_perf, max_policy_perf); |
| 2100 | limits->min_perf_pct); | 2078 | limits->max_perf = min(max_policy_perf, |
| 2101 | limits->max_perf_pct = min(limits->max_policy_pct, | 2079 | percent_ext_fp(limits->max_sysfs_pct)); |
| 2102 | limits->max_sysfs_pct); | 2080 | limits->max_perf = max(min_policy_perf, limits->max_perf); |
| 2103 | limits->max_perf_pct = max(limits->min_policy_pct, | 2081 | |
| 2104 | limits->max_perf_pct); | 2082 | /* Make sure min_perf <= max_perf */ |
| 2105 | 2083 | limits->min_perf = min(limits->min_perf, limits->max_perf); | |
| 2106 | /* Make sure min_perf_pct <= max_perf_pct */ | 2084 | |
| 2107 | limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct); | ||
| 2108 | |||
| 2109 | limits->min_perf = div_ext_fp(limits->min_perf_pct, 100); | ||
| 2110 | limits->max_perf = div_ext_fp(limits->max_perf_pct, 100); | ||
| 2111 | limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS); | 2085 | limits->max_perf = round_up(limits->max_perf, EXT_FRAC_BITS); |
| 2112 | limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS); | 2086 | limits->min_perf = round_up(limits->min_perf, EXT_FRAC_BITS); |
| 2087 | limits->max_perf_pct = fp_ext_toint(limits->max_perf * 100); | ||
| 2088 | limits->min_perf_pct = fp_ext_toint(limits->min_perf * 100); | ||
| 2113 | 2089 | ||
| 2114 | pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, | 2090 | pr_debug("cpu:%d max_perf_pct:%d min_perf_pct:%d\n", policy->cpu, |
| 2115 | limits->max_perf_pct, limits->min_perf_pct); | 2091 | limits->max_perf_pct, limits->min_perf_pct); |
| @@ -2118,7 +2094,7 @@ static void intel_pstate_update_perf_limits(struct cpufreq_policy *policy, | |||
| 2118 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) | 2094 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
| 2119 | { | 2095 | { |
| 2120 | struct cpudata *cpu; | 2096 | struct cpudata *cpu; |
| 2121 | struct perf_limits *perf_limits = NULL; | 2097 | struct perf_limits *perf_limits = &global; |
| 2122 | 2098 | ||
| 2123 | if (!policy->cpuinfo.max_freq) | 2099 | if (!policy->cpuinfo.max_freq) |
| 2124 | return -ENODEV; | 2100 | return -ENODEV; |
| @@ -2141,21 +2117,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 2141 | 2117 | ||
| 2142 | mutex_lock(&intel_pstate_limits_lock); | 2118 | mutex_lock(&intel_pstate_limits_lock); |
| 2143 | 2119 | ||
| 2144 | if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { | ||
| 2145 | pr_debug("set performance\n"); | ||
| 2146 | if (!perf_limits) { | ||
| 2147 | limits = &performance_limits; | ||
| 2148 | perf_limits = limits; | ||
| 2149 | } | ||
| 2150 | } else { | ||
| 2151 | pr_debug("set powersave\n"); | ||
| 2152 | if (!perf_limits) { | ||
| 2153 | limits = &powersave_limits; | ||
| 2154 | perf_limits = limits; | ||
| 2155 | } | ||
| 2156 | |||
| 2157 | } | ||
| 2158 | |||
| 2159 | intel_pstate_update_perf_limits(policy, perf_limits); | 2120 | intel_pstate_update_perf_limits(policy, perf_limits); |
| 2160 | 2121 | ||
| 2161 | if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { | 2122 | if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) { |
| @@ -2179,16 +2140,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
| 2179 | static int intel_pstate_verify_policy(struct cpufreq_policy *policy) | 2140 | static int intel_pstate_verify_policy(struct cpufreq_policy *policy) |
| 2180 | { | 2141 | { |
| 2181 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | 2142 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
| 2182 | struct perf_limits *perf_limits; | ||
| 2183 | |||
| 2184 | if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) | ||
| 2185 | perf_limits = &performance_limits; | ||
| 2186 | else | ||
| 2187 | perf_limits = &powersave_limits; | ||
| 2188 | 2143 | ||
| 2189 | update_turbo_state(); | 2144 | update_turbo_state(); |
| 2190 | policy->cpuinfo.max_freq = perf_limits->turbo_disabled || | 2145 | policy->cpuinfo.max_freq = global.turbo_disabled || global.no_turbo ? |
| 2191 | perf_limits->no_turbo ? | ||
| 2192 | cpu->pstate.max_freq : | 2146 | cpu->pstate.max_freq : |
| 2193 | cpu->pstate.turbo_freq; | 2147 | cpu->pstate.turbo_freq; |
| 2194 | 2148 | ||
| @@ -2203,9 +2157,9 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy) | |||
| 2203 | unsigned int max_freq, min_freq; | 2157 | unsigned int max_freq, min_freq; |
| 2204 | 2158 | ||
| 2205 | max_freq = policy->cpuinfo.max_freq * | 2159 | max_freq = policy->cpuinfo.max_freq * |
| 2206 | perf_limits->max_sysfs_pct / 100; | 2160 | global.max_sysfs_pct / 100; |
| 2207 | min_freq = policy->cpuinfo.max_freq * | 2161 | min_freq = policy->cpuinfo.max_freq * |
| 2208 | perf_limits->min_sysfs_pct / 100; | 2162 | global.min_sysfs_pct / 100; |
| 2209 | cpufreq_verify_within_limits(policy, min_freq, max_freq); | 2163 | cpufreq_verify_within_limits(policy, min_freq, max_freq); |
| 2210 | } | 2164 | } |
| 2211 | 2165 | ||
| @@ -2257,7 +2211,7 @@ static int __intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 2257 | /* cpuinfo and default policy values */ | 2211 | /* cpuinfo and default policy values */ |
| 2258 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; | 2212 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; |
| 2259 | update_turbo_state(); | 2213 | update_turbo_state(); |
| 2260 | policy->cpuinfo.max_freq = limits->turbo_disabled ? | 2214 | policy->cpuinfo.max_freq = global.turbo_disabled ? |
| 2261 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; | 2215 | cpu->pstate.max_pstate : cpu->pstate.turbo_pstate; |
| 2262 | policy->cpuinfo.max_freq *= cpu->pstate.scaling; | 2216 | policy->cpuinfo.max_freq *= cpu->pstate.scaling; |
| 2263 | 2217 | ||
| @@ -2277,7 +2231,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy) | |||
| 2277 | return ret; | 2231 | return ret; |
| 2278 | 2232 | ||
| 2279 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 2233 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
| 2280 | if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100) | 2234 | if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE)) |
| 2281 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; | 2235 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; |
| 2282 | else | 2236 | else |
| 2283 | policy->policy = CPUFREQ_POLICY_POWERSAVE; | 2237 | policy->policy = CPUFREQ_POLICY_POWERSAVE; |
| @@ -2303,7 +2257,7 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) | |||
| 2303 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | 2257 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
| 2304 | 2258 | ||
| 2305 | update_turbo_state(); | 2259 | update_turbo_state(); |
| 2306 | policy->cpuinfo.max_freq = limits->turbo_disabled ? | 2260 | policy->cpuinfo.max_freq = global.no_turbo || global.turbo_disabled ? |
| 2307 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; | 2261 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; |
| 2308 | 2262 | ||
| 2309 | cpufreq_verify_within_cpu_limits(policy); | 2263 | cpufreq_verify_within_cpu_limits(policy); |
| @@ -2311,26 +2265,6 @@ static int intel_cpufreq_verify_policy(struct cpufreq_policy *policy) | |||
| 2311 | return 0; | 2265 | return 0; |
| 2312 | } | 2266 | } |
| 2313 | 2267 | ||
| 2314 | static unsigned int intel_cpufreq_turbo_update(struct cpudata *cpu, | ||
| 2315 | struct cpufreq_policy *policy, | ||
| 2316 | unsigned int target_freq) | ||
| 2317 | { | ||
| 2318 | unsigned int max_freq; | ||
| 2319 | |||
| 2320 | update_turbo_state(); | ||
| 2321 | |||
| 2322 | max_freq = limits->no_turbo || limits->turbo_disabled ? | ||
| 2323 | cpu->pstate.max_freq : cpu->pstate.turbo_freq; | ||
| 2324 | policy->cpuinfo.max_freq = max_freq; | ||
| 2325 | if (policy->max > max_freq) | ||
| 2326 | policy->max = max_freq; | ||
| 2327 | |||
| 2328 | if (target_freq > max_freq) | ||
| 2329 | target_freq = max_freq; | ||
| 2330 | |||
| 2331 | return target_freq; | ||
| 2332 | } | ||
| 2333 | |||
| 2334 | static int intel_cpufreq_target(struct cpufreq_policy *policy, | 2268 | static int intel_cpufreq_target(struct cpufreq_policy *policy, |
| 2335 | unsigned int target_freq, | 2269 | unsigned int target_freq, |
| 2336 | unsigned int relation) | 2270 | unsigned int relation) |
| @@ -2339,8 +2273,10 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy, | |||
| 2339 | struct cpufreq_freqs freqs; | 2273 | struct cpufreq_freqs freqs; |
| 2340 | int target_pstate; | 2274 | int target_pstate; |
| 2341 | 2275 | ||
| 2276 | update_turbo_state(); | ||
| 2277 | |||
| 2342 | freqs.old = policy->cur; | 2278 | freqs.old = policy->cur; |
| 2343 | freqs.new = intel_cpufreq_turbo_update(cpu, policy, target_freq); | 2279 | freqs.new = target_freq; |
| 2344 | 2280 | ||
| 2345 | cpufreq_freq_transition_begin(policy, &freqs); | 2281 | cpufreq_freq_transition_begin(policy, &freqs); |
| 2346 | switch (relation) { | 2282 | switch (relation) { |
| @@ -2372,7 +2308,8 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy, | |||
| 2372 | struct cpudata *cpu = all_cpu_data[policy->cpu]; | 2308 | struct cpudata *cpu = all_cpu_data[policy->cpu]; |
| 2373 | int target_pstate; | 2309 | int target_pstate; |
| 2374 | 2310 | ||
| 2375 | target_freq = intel_cpufreq_turbo_update(cpu, policy, target_freq); | 2311 | update_turbo_state(); |
| 2312 | |||
| 2376 | target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); | 2313 | target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling); |
| 2377 | target_pstate = intel_pstate_prepare_request(cpu, target_pstate); | 2314 | target_pstate = intel_pstate_prepare_request(cpu, target_pstate); |
| 2378 | intel_pstate_update_pstate(cpu, target_pstate); | 2315 | intel_pstate_update_pstate(cpu, target_pstate); |
| @@ -2427,13 +2364,7 @@ static int intel_pstate_register_driver(void) | |||
| 2427 | { | 2364 | { |
| 2428 | int ret; | 2365 | int ret; |
| 2429 | 2366 | ||
| 2430 | intel_pstate_init_limits(&powersave_limits); | 2367 | intel_pstate_init_limits(&global); |
| 2431 | intel_pstate_set_performance_limits(&performance_limits); | ||
| 2432 | if (IS_ENABLED(CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE) && | ||
| 2433 | intel_pstate_driver == &intel_pstate) | ||
| 2434 | limits = &performance_limits; | ||
| 2435 | else | ||
| 2436 | limits = &powersave_limits; | ||
| 2437 | 2368 | ||
| 2438 | ret = cpufreq_register_driver(intel_pstate_driver); | 2369 | ret = cpufreq_register_driver(intel_pstate_driver); |
| 2439 | if (ret) { | 2370 | if (ret) { |
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c index c5adc8c9ac43..ae948b1da93a 100644 --- a/drivers/cpuidle/sysfs.c +++ b/drivers/cpuidle/sysfs.c | |||
| @@ -615,6 +615,18 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev) | |||
| 615 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); | 615 | struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu); |
| 616 | int error; | 616 | int error; |
| 617 | 617 | ||
| 618 | /* | ||
| 619 | * Return if cpu_device is not setup for this CPU. | ||
| 620 | * | ||
| 621 | * This could happen if the arch did not set up cpu_device | ||
| 622 | * since this CPU is not in cpu_present mask and the | ||
| 623 | * driver did not send a correct CPU mask during registration. | ||
| 624 | * Without this check we would end up passing bogus | ||
| 625 | * value for &cpu_dev->kobj in kobject_init_and_add() | ||
| 626 | */ | ||
| 627 | if (!cpu_dev) | ||
| 628 | return -ENODEV; | ||
| 629 | |||
| 618 | kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); | 630 | kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); |
| 619 | if (!kdev) | 631 | if (!kdev) |
| 620 | return -ENOMEM; | 632 | return -ENOMEM; |
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c index 511ab042b5e7..92d1c6959f08 100644 --- a/drivers/crypto/ccp/ccp-dev.c +++ b/drivers/crypto/ccp/ccp-dev.c | |||
| @@ -283,11 +283,14 @@ EXPORT_SYMBOL_GPL(ccp_version); | |||
| 283 | */ | 283 | */ |
| 284 | int ccp_enqueue_cmd(struct ccp_cmd *cmd) | 284 | int ccp_enqueue_cmd(struct ccp_cmd *cmd) |
| 285 | { | 285 | { |
| 286 | struct ccp_device *ccp = ccp_get_device(); | 286 | struct ccp_device *ccp; |
| 287 | unsigned long flags; | 287 | unsigned long flags; |
| 288 | unsigned int i; | 288 | unsigned int i; |
| 289 | int ret; | 289 | int ret; |
| 290 | 290 | ||
| 291 | /* Some commands might need to be sent to a specific device */ | ||
| 292 | ccp = cmd->ccp ? cmd->ccp : ccp_get_device(); | ||
| 293 | |||
| 291 | if (!ccp) | 294 | if (!ccp) |
| 292 | return -ENODEV; | 295 | return -ENODEV; |
| 293 | 296 | ||
diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c index e5d9278f4019..8d0eeb46d4a2 100644 --- a/drivers/crypto/ccp/ccp-dmaengine.c +++ b/drivers/crypto/ccp/ccp-dmaengine.c | |||
| @@ -390,6 +390,7 @@ static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan, | |||
| 390 | goto err; | 390 | goto err; |
| 391 | 391 | ||
| 392 | ccp_cmd = &cmd->ccp_cmd; | 392 | ccp_cmd = &cmd->ccp_cmd; |
| 393 | ccp_cmd->ccp = chan->ccp; | ||
| 393 | ccp_pt = &ccp_cmd->u.passthru_nomap; | 394 | ccp_pt = &ccp_cmd->u.passthru_nomap; |
| 394 | ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; | 395 | ccp_cmd->flags = CCP_CMD_MAY_BACKLOG; |
| 395 | ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; | 396 | ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP; |
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 8d9829ff2a78..80c6db279ae1 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c | |||
| @@ -427,6 +427,7 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | |||
| 427 | int rc = VM_FAULT_SIGBUS; | 427 | int rc = VM_FAULT_SIGBUS; |
| 428 | phys_addr_t phys; | 428 | phys_addr_t phys; |
| 429 | pfn_t pfn; | 429 | pfn_t pfn; |
| 430 | unsigned int fault_size = PAGE_SIZE; | ||
| 430 | 431 | ||
| 431 | if (check_vma(dax_dev, vmf->vma, __func__)) | 432 | if (check_vma(dax_dev, vmf->vma, __func__)) |
| 432 | return VM_FAULT_SIGBUS; | 433 | return VM_FAULT_SIGBUS; |
| @@ -437,9 +438,12 @@ static int __dax_dev_pte_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | |||
| 437 | return VM_FAULT_SIGBUS; | 438 | return VM_FAULT_SIGBUS; |
| 438 | } | 439 | } |
| 439 | 440 | ||
| 441 | if (fault_size != dax_region->align) | ||
| 442 | return VM_FAULT_SIGBUS; | ||
| 443 | |||
| 440 | phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE); | 444 | phys = pgoff_to_phys(dax_dev, vmf->pgoff, PAGE_SIZE); |
| 441 | if (phys == -1) { | 445 | if (phys == -1) { |
| 442 | dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, | 446 | dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, |
| 443 | vmf->pgoff); | 447 | vmf->pgoff); |
| 444 | return VM_FAULT_SIGBUS; | 448 | return VM_FAULT_SIGBUS; |
| 445 | } | 449 | } |
| @@ -464,6 +468,7 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | |||
| 464 | phys_addr_t phys; | 468 | phys_addr_t phys; |
| 465 | pgoff_t pgoff; | 469 | pgoff_t pgoff; |
| 466 | pfn_t pfn; | 470 | pfn_t pfn; |
| 471 | unsigned int fault_size = PMD_SIZE; | ||
| 467 | 472 | ||
| 468 | if (check_vma(dax_dev, vmf->vma, __func__)) | 473 | if (check_vma(dax_dev, vmf->vma, __func__)) |
| 469 | return VM_FAULT_SIGBUS; | 474 | return VM_FAULT_SIGBUS; |
| @@ -480,10 +485,20 @@ static int __dax_dev_pmd_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | |||
| 480 | return VM_FAULT_SIGBUS; | 485 | return VM_FAULT_SIGBUS; |
| 481 | } | 486 | } |
| 482 | 487 | ||
| 488 | if (fault_size < dax_region->align) | ||
| 489 | return VM_FAULT_SIGBUS; | ||
| 490 | else if (fault_size > dax_region->align) | ||
| 491 | return VM_FAULT_FALLBACK; | ||
| 492 | |||
| 493 | /* if we are outside of the VMA */ | ||
| 494 | if (pmd_addr < vmf->vma->vm_start || | ||
| 495 | (pmd_addr + PMD_SIZE) > vmf->vma->vm_end) | ||
| 496 | return VM_FAULT_SIGBUS; | ||
| 497 | |||
| 483 | pgoff = linear_page_index(vmf->vma, pmd_addr); | 498 | pgoff = linear_page_index(vmf->vma, pmd_addr); |
| 484 | phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE); | 499 | phys = pgoff_to_phys(dax_dev, pgoff, PMD_SIZE); |
| 485 | if (phys == -1) { | 500 | if (phys == -1) { |
| 486 | dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, | 501 | dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, |
| 487 | pgoff); | 502 | pgoff); |
| 488 | return VM_FAULT_SIGBUS; | 503 | return VM_FAULT_SIGBUS; |
| 489 | } | 504 | } |
| @@ -503,6 +518,8 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | |||
| 503 | phys_addr_t phys; | 518 | phys_addr_t phys; |
| 504 | pgoff_t pgoff; | 519 | pgoff_t pgoff; |
| 505 | pfn_t pfn; | 520 | pfn_t pfn; |
| 521 | unsigned int fault_size = PUD_SIZE; | ||
| 522 | |||
| 506 | 523 | ||
| 507 | if (check_vma(dax_dev, vmf->vma, __func__)) | 524 | if (check_vma(dax_dev, vmf->vma, __func__)) |
| 508 | return VM_FAULT_SIGBUS; | 525 | return VM_FAULT_SIGBUS; |
| @@ -519,10 +536,20 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf) | |||
| 519 | return VM_FAULT_SIGBUS; | 536 | return VM_FAULT_SIGBUS; |
| 520 | } | 537 | } |
| 521 | 538 | ||
| 539 | if (fault_size < dax_region->align) | ||
| 540 | return VM_FAULT_SIGBUS; | ||
| 541 | else if (fault_size > dax_region->align) | ||
| 542 | return VM_FAULT_FALLBACK; | ||
| 543 | |||
| 544 | /* if we are outside of the VMA */ | ||
| 545 | if (pud_addr < vmf->vma->vm_start || | ||
| 546 | (pud_addr + PUD_SIZE) > vmf->vma->vm_end) | ||
| 547 | return VM_FAULT_SIGBUS; | ||
| 548 | |||
| 522 | pgoff = linear_page_index(vmf->vma, pud_addr); | 549 | pgoff = linear_page_index(vmf->vma, pud_addr); |
| 523 | phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE); | 550 | phys = pgoff_to_phys(dax_dev, pgoff, PUD_SIZE); |
| 524 | if (phys == -1) { | 551 | if (phys == -1) { |
| 525 | dev_dbg(dev, "%s: phys_to_pgoff(%#lx) failed\n", __func__, | 552 | dev_dbg(dev, "%s: pgoff_to_phys(%#lx) failed\n", __func__, |
| 526 | pgoff); | 553 | pgoff); |
| 527 | return VM_FAULT_SIGBUS; | 554 | return VM_FAULT_SIGBUS; |
| 528 | } | 555 | } |
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 82d85cce81f8..4773f2867234 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
| @@ -43,6 +43,7 @@ config EDAC_LEGACY_SYSFS | |||
| 43 | 43 | ||
| 44 | config EDAC_DEBUG | 44 | config EDAC_DEBUG |
| 45 | bool "Debugging" | 45 | bool "Debugging" |
| 46 | select DEBUG_FS | ||
| 46 | help | 47 | help |
| 47 | This turns on debugging information for the entire EDAC subsystem. | 48 | This turns on debugging information for the entire EDAC subsystem. |
| 48 | You do so by inserting edac_module with "edac_debug_level=x." Valid | 49 | You do so by inserting edac_module with "edac_debug_level=x." Valid |
| @@ -259,6 +260,15 @@ config EDAC_SKX | |||
| 259 | Support for error detection and correction the Intel | 260 | Support for error detection and correction the Intel |
| 260 | Skylake server Integrated Memory Controllers. | 261 | Skylake server Integrated Memory Controllers. |
| 261 | 262 | ||
| 263 | config EDAC_PND2 | ||
| 264 | tristate "Intel Pondicherry2" | ||
| 265 | depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL | ||
| 266 | help | ||
| 267 | Support for error detection and correction on the Intel | ||
| 268 | Pondicherry2 Integrated Memory Controller. This SoC IP is | ||
| 269 | first used on the Apollo Lake platform and Denverton | ||
| 270 | micro-server but may appear on others in the future. | ||
| 271 | |||
| 262 | config EDAC_MPC85XX | 272 | config EDAC_MPC85XX |
| 263 | tristate "Freescale MPC83xx / MPC85xx" | 273 | tristate "Freescale MPC83xx / MPC85xx" |
| 264 | depends on EDAC_MM_EDAC && FSL_SOC | 274 | depends on EDAC_MM_EDAC && FSL_SOC |
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile index 88e472e8b9a9..587107e90996 100644 --- a/drivers/edac/Makefile +++ b/drivers/edac/Makefile | |||
| @@ -32,6 +32,7 @@ obj-$(CONFIG_EDAC_I7300) += i7300_edac.o | |||
| 32 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o | 32 | obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o |
| 33 | obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o | 33 | obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o |
| 34 | obj-$(CONFIG_EDAC_SKX) += skx_edac.o | 34 | obj-$(CONFIG_EDAC_SKX) += skx_edac.o |
| 35 | obj-$(CONFIG_EDAC_PND2) += pnd2_edac.o | ||
| 35 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o | 36 | obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o |
| 36 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o | 37 | obj-$(CONFIG_EDAC_E752X) += e752x_edac.o |
| 37 | obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o | 38 | obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o |
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 1670d27bcac8..f683919981b0 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c | |||
| @@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci) | |||
| 1293 | dimm->mtype = MEM_FB_DDR2; | 1293 | dimm->mtype = MEM_FB_DDR2; |
| 1294 | 1294 | ||
| 1295 | /* ask what device type on this row */ | 1295 | /* ask what device type on this row */ |
| 1296 | if (MTR_DRAM_WIDTH(mtr)) | 1296 | if (MTR_DRAM_WIDTH(mtr) == 8) |
| 1297 | dimm->dtype = DEV_X8; | 1297 | dimm->dtype = DEV_X8; |
| 1298 | else | 1298 | else |
| 1299 | dimm->dtype = DEV_X4; | 1299 | dimm->dtype = DEV_X4; |
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index abf6ef22e220..37a9ba71da44 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c | |||
| @@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci) | |||
| 1207 | 1207 | ||
| 1208 | dimm->nr_pages = size_mb << 8; | 1208 | dimm->nr_pages = size_mb << 8; |
| 1209 | dimm->grain = 8; | 1209 | dimm->grain = 8; |
| 1210 | dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4; | 1210 | dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ? |
| 1211 | DEV_X8 : DEV_X4; | ||
| 1211 | dimm->mtype = MEM_FB_DDR2; | 1212 | dimm->mtype = MEM_FB_DDR2; |
| 1212 | /* | 1213 | /* |
| 1213 | * The eccc mechanism is SDDC (aka SECC), with | 1214 | * The eccc mechanism is SDDC (aka SECC), with |
| 1214 | * is similar to Chipkill. | 1215 | * is similar to Chipkill. |
| 1215 | */ | 1216 | */ |
| 1216 | dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ? | 1217 | dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ? |
| 1217 | EDAC_S8ECD8ED : EDAC_S4ECD4ED; | 1218 | EDAC_S8ECD8ED : EDAC_S4ECD4ED; |
| 1218 | ndimms++; | 1219 | ndimms++; |
| 1219 | } | 1220 | } |
diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c new file mode 100644 index 000000000000..928e0dba41fc --- /dev/null +++ b/drivers/edac/pnd2_edac.c | |||
| @@ -0,0 +1,1546 @@ | |||
| 1 | /* | ||
| 2 | * Driver for Pondicherry2 memory controller. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | * | ||
| 15 | * [Derived from sb_edac.c] | ||
| 16 | * | ||
| 17 | * Translation of system physical addresses to DIMM addresses | ||
| 18 | * is a two stage process: | ||
| 19 | * | ||
| 20 | * First the Pondicherry 2 memory controller handles slice and channel interleaving | ||
| 21 | * in "sys2pmi()". This is (almost) completley common between platforms. | ||
| 22 | * | ||
| 23 | * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM, | ||
| 24 | * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters. | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/module.h> | ||
| 28 | #include <linux/init.h> | ||
| 29 | #include <linux/pci.h> | ||
| 30 | #include <linux/pci_ids.h> | ||
| 31 | #include <linux/slab.h> | ||
| 32 | #include <linux/delay.h> | ||
| 33 | #include <linux/edac.h> | ||
| 34 | #include <linux/mmzone.h> | ||
| 35 | #include <linux/smp.h> | ||
| 36 | #include <linux/bitmap.h> | ||
| 37 | #include <linux/math64.h> | ||
| 38 | #include <linux/mod_devicetable.h> | ||
| 39 | #include <asm/cpu_device_id.h> | ||
| 40 | #include <asm/intel-family.h> | ||
| 41 | #include <asm/processor.h> | ||
| 42 | #include <asm/mce.h> | ||
| 43 | |||
| 44 | #include "edac_mc.h" | ||
| 45 | #include "edac_module.h" | ||
| 46 | #include "pnd2_edac.h" | ||
| 47 | |||
| 48 | #define APL_NUM_CHANNELS 4 | ||
| 49 | #define DNV_NUM_CHANNELS 2 | ||
| 50 | #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */ | ||
| 51 | |||
| 52 | enum type { | ||
| 53 | APL, | ||
| 54 | DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */ | ||
| 55 | }; | ||
| 56 | |||
| 57 | struct dram_addr { | ||
| 58 | int chan; | ||
| 59 | int dimm; | ||
| 60 | int rank; | ||
| 61 | int bank; | ||
| 62 | int row; | ||
| 63 | int col; | ||
| 64 | }; | ||
| 65 | |||
| 66 | struct pnd2_pvt { | ||
| 67 | int dimm_geom[APL_NUM_CHANNELS]; | ||
| 68 | u64 tolm, tohm; | ||
| 69 | }; | ||
| 70 | |||
| 71 | /* | ||
| 72 | * System address space is divided into multiple regions with | ||
| 73 | * different interleave rules in each. The as0/as1 regions | ||
| 74 | * have no interleaving at all. The as2 region is interleaved | ||
| 75 | * between two channels. The mot region is magic and may overlap | ||
| 76 | * other regions, with its interleave rules taking precedence. | ||
| 77 | * Addresses not in any of these regions are interleaved across | ||
| 78 | * all four channels. | ||
| 79 | */ | ||
| 80 | static struct region { | ||
| 81 | u64 base; | ||
| 82 | u64 limit; | ||
| 83 | u8 enabled; | ||
| 84 | } mot, as0, as1, as2; | ||
| 85 | |||
| 86 | static struct dunit_ops { | ||
| 87 | char *name; | ||
| 88 | enum type type; | ||
| 89 | int pmiaddr_shift; | ||
| 90 | int pmiidx_shift; | ||
| 91 | int channels; | ||
| 92 | int dimms_per_channel; | ||
| 93 | int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name); | ||
| 94 | int (*get_registers)(void); | ||
| 95 | int (*check_ecc)(void); | ||
| 96 | void (*mk_region)(char *name, struct region *rp, void *asym); | ||
| 97 | void (*get_dimm_config)(struct mem_ctl_info *mci); | ||
| 98 | int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, | ||
| 99 | struct dram_addr *daddr, char *msg); | ||
| 100 | } *ops; | ||
| 101 | |||
| 102 | static struct mem_ctl_info *pnd2_mci; | ||
| 103 | |||
| 104 | #define PND2_MSG_SIZE 256 | ||
| 105 | |||
| 106 | /* Debug macros */ | ||
| 107 | #define pnd2_printk(level, fmt, arg...) \ | ||
| 108 | edac_printk(level, "pnd2", fmt, ##arg) | ||
| 109 | |||
| 110 | #define pnd2_mc_printk(mci, level, fmt, arg...) \ | ||
| 111 | edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg) | ||
| 112 | |||
| 113 | #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12 | ||
| 114 | #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13 | ||
| 115 | #define SELECTOR_DISABLED (-1) | ||
| 116 | #define _4GB (1ul << 32) | ||
| 117 | |||
| 118 | #define PMI_ADDRESS_WIDTH 31 | ||
| 119 | #define PND_MAX_PHYS_BIT 39 | ||
| 120 | |||
| 121 | #define APL_ASYMSHIFT 28 | ||
| 122 | #define DNV_ASYMSHIFT 31 | ||
| 123 | #define CH_HASH_MASK_LSB 6 | ||
| 124 | #define SLICE_HASH_MASK_LSB 6 | ||
| 125 | #define MOT_SLC_INTLV_BIT 12 | ||
| 126 | #define LOG2_PMI_ADDR_GRANULARITY 5 | ||
| 127 | #define MOT_SHIFT 24 | ||
| 128 | |||
| 129 | #define GET_BITFIELD(v, lo, hi) (((v) & GENMASK_ULL(hi, lo)) >> (lo)) | ||
| 130 | #define U64_LSHIFT(val, s) ((u64)(val) << (s)) | ||
| 131 | |||
| 132 | #ifdef CONFIG_X86_INTEL_SBI_APL | ||
| 133 | #include "linux/platform_data/sbi_apl.h" | ||
| 134 | int sbi_send(int port, int off, int op, u32 *data) | ||
| 135 | { | ||
| 136 | struct sbi_apl_message sbi_arg; | ||
| 137 | int ret, read = 0; | ||
| 138 | |||
| 139 | memset(&sbi_arg, 0, sizeof(sbi_arg)); | ||
| 140 | |||
| 141 | if (op == 0 || op == 4 || op == 6) | ||
| 142 | read = 1; | ||
| 143 | else | ||
| 144 | sbi_arg.data = *data; | ||
| 145 | |||
| 146 | sbi_arg.opcode = op; | ||
| 147 | sbi_arg.port_address = port; | ||
| 148 | sbi_arg.register_offset = off; | ||
| 149 | ret = sbi_apl_commit(&sbi_arg); | ||
| 150 | if (ret || sbi_arg.status) | ||
| 151 | edac_dbg(2, "sbi_send status=%d ret=%d data=%x\n", | ||
| 152 | sbi_arg.status, ret, sbi_arg.data); | ||
| 153 | |||
| 154 | if (ret == 0) | ||
| 155 | ret = sbi_arg.status; | ||
| 156 | |||
| 157 | if (ret == 0 && read) | ||
| 158 | *data = sbi_arg.data; | ||
| 159 | |||
| 160 | return ret; | ||
| 161 | } | ||
| 162 | #else | ||
| 163 | int sbi_send(int port, int off, int op, u32 *data) | ||
| 164 | { | ||
| 165 | return -EUNATCH; | ||
| 166 | } | ||
| 167 | #endif | ||
| 168 | |||
| 169 | static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) | ||
| 170 | { | ||
| 171 | int ret = 0; | ||
| 172 | |||
| 173 | edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op); | ||
| 174 | switch (sz) { | ||
| 175 | case 8: | ||
| 176 | ret = sbi_send(port, off + 4, op, (u32 *)(data + 4)); | ||
| 177 | case 4: | ||
| 178 | ret = sbi_send(port, off, op, (u32 *)data); | ||
| 179 | pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name, | ||
| 180 | sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret); | ||
| 181 | break; | ||
| 182 | } | ||
| 183 | |||
| 184 | return ret; | ||
| 185 | } | ||
| 186 | |||
| 187 | static u64 get_mem_ctrl_hub_base_addr(void) | ||
| 188 | { | ||
| 189 | struct b_cr_mchbar_lo_pci lo; | ||
| 190 | struct b_cr_mchbar_hi_pci hi; | ||
| 191 | struct pci_dev *pdev; | ||
| 192 | |||
| 193 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL); | ||
| 194 | if (pdev) { | ||
| 195 | pci_read_config_dword(pdev, 0x48, (u32 *)&lo); | ||
| 196 | pci_read_config_dword(pdev, 0x4c, (u32 *)&hi); | ||
| 197 | pci_dev_put(pdev); | ||
| 198 | } else { | ||
| 199 | return 0; | ||
| 200 | } | ||
| 201 | |||
| 202 | if (!lo.enable) { | ||
| 203 | edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n"); | ||
| 204 | return 0; | ||
| 205 | } | ||
| 206 | |||
| 207 | return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15); | ||
| 208 | } | ||
| 209 | |||
| 210 | static u64 get_sideband_reg_base_addr(void) | ||
| 211 | { | ||
| 212 | struct pci_dev *pdev; | ||
| 213 | u32 hi, lo; | ||
| 214 | |||
| 215 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL); | ||
| 216 | if (pdev) { | ||
| 217 | pci_read_config_dword(pdev, 0x10, &lo); | ||
| 218 | pci_read_config_dword(pdev, 0x14, &hi); | ||
| 219 | pci_dev_put(pdev); | ||
| 220 | return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0)); | ||
| 221 | } else { | ||
| 222 | return 0xfd000000; | ||
| 223 | } | ||
| 224 | } | ||
| 225 | |||
| 226 | static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name) | ||
| 227 | { | ||
| 228 | struct pci_dev *pdev; | ||
| 229 | char *base; | ||
| 230 | u64 addr; | ||
| 231 | |||
| 232 | if (op == 4) { | ||
| 233 | pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL); | ||
| 234 | if (!pdev) | ||
| 235 | return -ENODEV; | ||
| 236 | |||
| 237 | pci_read_config_dword(pdev, off, data); | ||
| 238 | pci_dev_put(pdev); | ||
| 239 | } else { | ||
| 240 | /* MMIO via memory controller hub base address */ | ||
| 241 | if (op == 0 && port == 0x4c) { | ||
| 242 | addr = get_mem_ctrl_hub_base_addr(); | ||
| 243 | if (!addr) | ||
| 244 | return -ENODEV; | ||
| 245 | } else { | ||
| 246 | /* MMIO via sideband register base address */ | ||
| 247 | addr = get_sideband_reg_base_addr(); | ||
| 248 | if (!addr) | ||
| 249 | return -ENODEV; | ||
| 250 | addr += (port << 16); | ||
| 251 | } | ||
| 252 | |||
| 253 | base = ioremap((resource_size_t)addr, 0x10000); | ||
| 254 | if (!base) | ||
| 255 | return -ENODEV; | ||
| 256 | |||
| 257 | if (sz == 8) | ||
| 258 | *(u32 *)(data + 4) = *(u32 *)(base + off + 4); | ||
| 259 | *(u32 *)data = *(u32 *)(base + off); | ||
| 260 | |||
| 261 | iounmap(base); | ||
| 262 | } | ||
| 263 | |||
| 264 | edac_dbg(2, "Read %s=%.8x_%.8x\n", name, | ||
| 265 | (sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data); | ||
| 266 | |||
| 267 | return 0; | ||
| 268 | } | ||
| 269 | |||
| 270 | #define RD_REGP(regp, regname, port) \ | ||
| 271 | ops->rd_reg(port, \ | ||
| 272 | regname##_offset, \ | ||
| 273 | regname##_r_opcode, \ | ||
| 274 | regp, sizeof(struct regname), \ | ||
| 275 | #regname) | ||
| 276 | |||
| 277 | #define RD_REG(regp, regname) \ | ||
| 278 | ops->rd_reg(regname ## _port, \ | ||
| 279 | regname##_offset, \ | ||
| 280 | regname##_r_opcode, \ | ||
| 281 | regp, sizeof(struct regname), \ | ||
| 282 | #regname) | ||
| 283 | |||
| 284 | static u64 top_lm, top_hm; | ||
| 285 | static bool two_slices; | ||
| 286 | static bool two_channels; /* Both PMI channels in one slice enabled */ | ||
| 287 | |||
| 288 | static u8 sym_chan_mask; | ||
| 289 | static u8 asym_chan_mask; | ||
| 290 | static u8 chan_mask; | ||
| 291 | |||
| 292 | static int slice_selector = -1; | ||
| 293 | static int chan_selector = -1; | ||
| 294 | static u64 slice_hash_mask; | ||
| 295 | static u64 chan_hash_mask; | ||
| 296 | |||
| 297 | static void mk_region(char *name, struct region *rp, u64 base, u64 limit) | ||
| 298 | { | ||
| 299 | rp->enabled = 1; | ||
| 300 | rp->base = base; | ||
| 301 | rp->limit = limit; | ||
| 302 | edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit); | ||
| 303 | } | ||
| 304 | |||
| 305 | static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask) | ||
| 306 | { | ||
| 307 | if (mask == 0) { | ||
| 308 | pr_info(FW_BUG "MOT mask cannot be zero\n"); | ||
| 309 | return; | ||
| 310 | } | ||
| 311 | if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) { | ||
| 312 | pr_info(FW_BUG "MOT mask not power of two\n"); | ||
| 313 | return; | ||
| 314 | } | ||
| 315 | if (base & ~mask) { | ||
| 316 | pr_info(FW_BUG "MOT region base/mask alignment error\n"); | ||
| 317 | return; | ||
| 318 | } | ||
| 319 | rp->base = base; | ||
| 320 | rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0); | ||
| 321 | rp->enabled = 1; | ||
| 322 | edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit); | ||
| 323 | } | ||
| 324 | |||
| 325 | static bool in_region(struct region *rp, u64 addr) | ||
| 326 | { | ||
| 327 | if (!rp->enabled) | ||
| 328 | return false; | ||
| 329 | |||
| 330 | return rp->base <= addr && addr <= rp->limit; | ||
| 331 | } | ||
| 332 | |||
| 333 | static int gen_sym_mask(struct b_cr_slice_channel_hash *p) | ||
| 334 | { | ||
| 335 | int mask = 0; | ||
| 336 | |||
| 337 | if (!p->slice_0_mem_disabled) | ||
| 338 | mask |= p->sym_slice0_channel_enabled; | ||
| 339 | |||
| 340 | if (!p->slice_1_disabled) | ||
| 341 | mask |= p->sym_slice1_channel_enabled << 2; | ||
| 342 | |||
| 343 | if (p->ch_1_disabled || p->enable_pmi_dual_data_mode) | ||
| 344 | mask &= 0x5; | ||
| 345 | |||
| 346 | return mask; | ||
| 347 | } | ||
| 348 | |||
| 349 | static int gen_asym_mask(struct b_cr_slice_channel_hash *p, | ||
| 350 | struct b_cr_asym_mem_region0_mchbar *as0, | ||
| 351 | struct b_cr_asym_mem_region1_mchbar *as1, | ||
| 352 | struct b_cr_asym_2way_mem_region_mchbar *as2way) | ||
| 353 | { | ||
| 354 | const int intlv[] = { 0x5, 0xA, 0x3, 0xC }; | ||
| 355 | int mask = 0; | ||
| 356 | |||
| 357 | if (as2way->asym_2way_interleave_enable) | ||
| 358 | mask = intlv[as2way->asym_2way_intlv_mode]; | ||
| 359 | if (as0->slice0_asym_enable) | ||
| 360 | mask |= (1 << as0->slice0_asym_channel_select); | ||
| 361 | if (as1->slice1_asym_enable) | ||
| 362 | mask |= (4 << as1->slice1_asym_channel_select); | ||
| 363 | if (p->slice_0_mem_disabled) | ||
| 364 | mask &= 0xc; | ||
| 365 | if (p->slice_1_disabled) | ||
| 366 | mask &= 0x3; | ||
| 367 | if (p->ch_1_disabled || p->enable_pmi_dual_data_mode) | ||
| 368 | mask &= 0x5; | ||
| 369 | |||
| 370 | return mask; | ||
| 371 | } | ||
| 372 | |||
| 373 | static struct b_cr_tolud_pci tolud; | ||
| 374 | static struct b_cr_touud_lo_pci touud_lo; | ||
| 375 | static struct b_cr_touud_hi_pci touud_hi; | ||
| 376 | static struct b_cr_asym_mem_region0_mchbar asym0; | ||
| 377 | static struct b_cr_asym_mem_region1_mchbar asym1; | ||
| 378 | static struct b_cr_asym_2way_mem_region_mchbar asym_2way; | ||
| 379 | static struct b_cr_mot_out_base_mchbar mot_base; | ||
| 380 | static struct b_cr_mot_out_mask_mchbar mot_mask; | ||
| 381 | static struct b_cr_slice_channel_hash chash; | ||
| 382 | |||
| 383 | /* Apollo Lake dunit */ | ||
| 384 | /* | ||
| 385 | * Validated on board with just two DIMMs in the [0] and [2] positions | ||
| 386 | * in this array. Other port number matches documentation, but caution | ||
| 387 | * advised. | ||
| 388 | */ | ||
| 389 | static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 }; | ||
| 390 | static struct d_cr_drp0 drp0[APL_NUM_CHANNELS]; | ||
| 391 | |||
| 392 | /* Denverton dunit */ | ||
| 393 | static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 }; | ||
| 394 | static struct d_cr_dsch dsch; | ||
| 395 | static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS]; | ||
| 396 | static struct d_cr_drp drp[DNV_NUM_CHANNELS]; | ||
| 397 | static struct d_cr_dmap dmap[DNV_NUM_CHANNELS]; | ||
| 398 | static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS]; | ||
| 399 | static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS]; | ||
| 400 | static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS]; | ||
| 401 | static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS]; | ||
| 402 | static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS]; | ||
| 403 | |||
| 404 | static void apl_mk_region(char *name, struct region *rp, void *asym) | ||
| 405 | { | ||
| 406 | struct b_cr_asym_mem_region0_mchbar *a = asym; | ||
| 407 | |||
| 408 | mk_region(name, rp, | ||
| 409 | U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT), | ||
| 410 | U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) + | ||
| 411 | GENMASK_ULL(APL_ASYMSHIFT - 1, 0)); | ||
| 412 | } | ||
| 413 | |||
| 414 | static void dnv_mk_region(char *name, struct region *rp, void *asym) | ||
| 415 | { | ||
| 416 | struct b_cr_asym_mem_region_denverton *a = asym; | ||
| 417 | |||
| 418 | mk_region(name, rp, | ||
| 419 | U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT), | ||
| 420 | U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) + | ||
| 421 | GENMASK_ULL(DNV_ASYMSHIFT - 1, 0)); | ||
| 422 | } | ||
| 423 | |||
| 424 | static int apl_get_registers(void) | ||
| 425 | { | ||
| 426 | int i; | ||
| 427 | |||
| 428 | if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar)) | ||
| 429 | return -ENODEV; | ||
| 430 | |||
| 431 | for (i = 0; i < APL_NUM_CHANNELS; i++) | ||
| 432 | if (RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i])) | ||
| 433 | return -ENODEV; | ||
| 434 | |||
| 435 | return 0; | ||
| 436 | } | ||
| 437 | |||
| 438 | static int dnv_get_registers(void) | ||
| 439 | { | ||
| 440 | int i; | ||
| 441 | |||
| 442 | if (RD_REG(&dsch, d_cr_dsch)) | ||
| 443 | return -ENODEV; | ||
| 444 | |||
| 445 | for (i = 0; i < DNV_NUM_CHANNELS; i++) | ||
| 446 | if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) || | ||
| 447 | RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) || | ||
| 448 | RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) || | ||
| 449 | RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) || | ||
| 450 | RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) || | ||
| 451 | RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) || | ||
| 452 | RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) || | ||
| 453 | RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i])) | ||
| 454 | return -ENODEV; | ||
| 455 | |||
| 456 | return 0; | ||
| 457 | } | ||
| 458 | |||
| 459 | /* | ||
| 460 | * Read all the h/w config registers once here (they don't | ||
| 461 | * change at run time. Figure out which address ranges have | ||
| 462 | * which interleave characteristics. | ||
| 463 | */ | ||
| 464 | static int get_registers(void) | ||
| 465 | { | ||
| 466 | const int intlv[] = { 10, 11, 12, 12 }; | ||
| 467 | |||
| 468 | if (RD_REG(&tolud, b_cr_tolud_pci) || | ||
| 469 | RD_REG(&touud_lo, b_cr_touud_lo_pci) || | ||
| 470 | RD_REG(&touud_hi, b_cr_touud_hi_pci) || | ||
| 471 | RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) || | ||
| 472 | RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) || | ||
| 473 | RD_REG(&mot_base, b_cr_mot_out_base_mchbar) || | ||
| 474 | RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) || | ||
| 475 | RD_REG(&chash, b_cr_slice_channel_hash)) | ||
| 476 | return -ENODEV; | ||
| 477 | |||
| 478 | if (ops->get_registers()) | ||
| 479 | return -ENODEV; | ||
| 480 | |||
| 481 | if (ops->type == DNV) { | ||
| 482 | /* PMI channel idx (always 0) for asymmetric region */ | ||
| 483 | asym0.slice0_asym_channel_select = 0; | ||
| 484 | asym1.slice1_asym_channel_select = 0; | ||
| 485 | /* PMI channel bitmap (always 1) for symmetric region */ | ||
| 486 | chash.sym_slice0_channel_enabled = 0x1; | ||
| 487 | chash.sym_slice1_channel_enabled = 0x1; | ||
| 488 | } | ||
| 489 | |||
| 490 | if (asym0.slice0_asym_enable) | ||
| 491 | ops->mk_region("as0", &as0, &asym0); | ||
| 492 | |||
| 493 | if (asym1.slice1_asym_enable) | ||
| 494 | ops->mk_region("as1", &as1, &asym1); | ||
| 495 | |||
| 496 | if (asym_2way.asym_2way_interleave_enable) { | ||
| 497 | mk_region("as2way", &as2, | ||
| 498 | U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT), | ||
| 499 | U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) + | ||
| 500 | GENMASK_ULL(APL_ASYMSHIFT - 1, 0)); | ||
| 501 | } | ||
| 502 | |||
| 503 | if (mot_base.imr_en) { | ||
| 504 | mk_region_mask("mot", &mot, | ||
| 505 | U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT), | ||
| 506 | U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT)); | ||
| 507 | } | ||
| 508 | |||
| 509 | top_lm = U64_LSHIFT(tolud.tolud, 20); | ||
| 510 | top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20); | ||
| 511 | |||
| 512 | two_slices = !chash.slice_1_disabled && | ||
| 513 | !chash.slice_0_mem_disabled && | ||
| 514 | (chash.sym_slice0_channel_enabled != 0) && | ||
| 515 | (chash.sym_slice1_channel_enabled != 0); | ||
| 516 | two_channels = !chash.ch_1_disabled && | ||
| 517 | !chash.enable_pmi_dual_data_mode && | ||
| 518 | ((chash.sym_slice0_channel_enabled == 3) || | ||
| 519 | (chash.sym_slice1_channel_enabled == 3)); | ||
| 520 | |||
| 521 | sym_chan_mask = gen_sym_mask(&chash); | ||
| 522 | asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way); | ||
| 523 | chan_mask = sym_chan_mask | asym_chan_mask; | ||
| 524 | |||
| 525 | if (two_slices && !two_channels) { | ||
| 526 | if (chash.hvm_mode) | ||
| 527 | slice_selector = 29; | ||
| 528 | else | ||
| 529 | slice_selector = intlv[chash.interleave_mode]; | ||
| 530 | } else if (!two_slices && two_channels) { | ||
| 531 | if (chash.hvm_mode) | ||
| 532 | chan_selector = 29; | ||
| 533 | else | ||
| 534 | chan_selector = intlv[chash.interleave_mode]; | ||
| 535 | } else if (two_slices && two_channels) { | ||
| 536 | if (chash.hvm_mode) { | ||
| 537 | slice_selector = 29; | ||
| 538 | chan_selector = 30; | ||
| 539 | } else { | ||
| 540 | slice_selector = intlv[chash.interleave_mode]; | ||
| 541 | chan_selector = intlv[chash.interleave_mode] + 1; | ||
| 542 | } | ||
| 543 | } | ||
| 544 | |||
| 545 | if (two_slices) { | ||
| 546 | if (!chash.hvm_mode) | ||
| 547 | slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB; | ||
| 548 | if (!two_channels) | ||
| 549 | slice_hash_mask |= BIT_ULL(slice_selector); | ||
| 550 | } | ||
| 551 | |||
| 552 | if (two_channels) { | ||
| 553 | if (!chash.hvm_mode) | ||
| 554 | chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB; | ||
| 555 | if (!two_slices) | ||
| 556 | chan_hash_mask |= BIT_ULL(chan_selector); | ||
| 557 | } | ||
| 558 | |||
| 559 | return 0; | ||
| 560 | } | ||
| 561 | |||
| 562 | /* Get a contiguous memory address (remove the MMIO gap) */ | ||
| 563 | static u64 remove_mmio_gap(u64 sys) | ||
| 564 | { | ||
| 565 | return (sys < _4GB) ? sys : sys - (_4GB - top_lm); | ||
| 566 | } | ||
| 567 | |||
| 568 | /* Squeeze out one address bit, shift upper part down to fill gap */ | ||
| 569 | static void remove_addr_bit(u64 *addr, int bitidx) | ||
| 570 | { | ||
| 571 | u64 mask; | ||
| 572 | |||
| 573 | if (bitidx == -1) | ||
| 574 | return; | ||
| 575 | |||
| 576 | mask = (1ull << bitidx) - 1; | ||
| 577 | *addr = ((*addr >> 1) & ~mask) | (*addr & mask); | ||
| 578 | } | ||
| 579 | |||
| 580 | /* XOR all the bits from addr specified in mask */ | ||
| 581 | static int hash_by_mask(u64 addr, u64 mask) | ||
| 582 | { | ||
| 583 | u64 result = addr & mask; | ||
| 584 | |||
| 585 | result = (result >> 32) ^ result; | ||
| 586 | result = (result >> 16) ^ result; | ||
| 587 | result = (result >> 8) ^ result; | ||
| 588 | result = (result >> 4) ^ result; | ||
| 589 | result = (result >> 2) ^ result; | ||
| 590 | result = (result >> 1) ^ result; | ||
| 591 | |||
| 592 | return (int)result & 1; | ||
| 593 | } | ||
| 594 | |||
| 595 | /* | ||
| 596 | * First stage decode. Take the system address and figure out which | ||
| 597 | * second stage will deal with it based on interleave modes. | ||
| 598 | */ | ||
| 599 | static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg) | ||
| 600 | { | ||
| 601 | u64 contig_addr, contig_base, contig_offset, contig_base_adj; | ||
| 602 | int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH : | ||
| 603 | MOT_CHAN_INTLV_BIT_1SLC_2CH; | ||
| 604 | int slice_intlv_bit_rm = SELECTOR_DISABLED; | ||
| 605 | int chan_intlv_bit_rm = SELECTOR_DISABLED; | ||
| 606 | /* Determine if address is in the MOT region. */ | ||
| 607 | bool mot_hit = in_region(&mot, addr); | ||
| 608 | /* Calculate the number of symmetric regions enabled. */ | ||
| 609 | int sym_channels = hweight8(sym_chan_mask); | ||
| 610 | |||
| 611 | /* | ||
| 612 | * The amount we need to shift the asym base can be determined by the | ||
| 613 | * number of enabled symmetric channels. | ||
| 614 | * NOTE: This can only work because symmetric memory is not supposed | ||
| 615 | * to do a 3-way interleave. | ||
| 616 | */ | ||
| 617 | int sym_chan_shift = sym_channels >> 1; | ||
| 618 | |||
| 619 | /* Give up if address is out of range, or in MMIO gap */ | ||
| 620 | if (addr >= (1ul << PND_MAX_PHYS_BIT) || | ||
| 621 | (addr >= top_lm && addr < _4GB) || addr >= top_hm) { | ||
| 622 | snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr); | ||
| 623 | return -EINVAL; | ||
| 624 | } | ||
| 625 | |||
| 626 | /* Get a contiguous memory address (remove the MMIO gap) */ | ||
| 627 | contig_addr = remove_mmio_gap(addr); | ||
| 628 | |||
| 629 | if (in_region(&as0, addr)) { | ||
| 630 | *pmiidx = asym0.slice0_asym_channel_select; | ||
| 631 | |||
| 632 | contig_base = remove_mmio_gap(as0.base); | ||
| 633 | contig_offset = contig_addr - contig_base; | ||
| 634 | contig_base_adj = (contig_base >> sym_chan_shift) * | ||
| 635 | ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1); | ||
| 636 | contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull); | ||
| 637 | } else if (in_region(&as1, addr)) { | ||
| 638 | *pmiidx = 2u + asym1.slice1_asym_channel_select; | ||
| 639 | |||
| 640 | contig_base = remove_mmio_gap(as1.base); | ||
| 641 | contig_offset = contig_addr - contig_base; | ||
| 642 | contig_base_adj = (contig_base >> sym_chan_shift) * | ||
| 643 | ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1); | ||
| 644 | contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull); | ||
| 645 | } else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) { | ||
| 646 | bool channel1; | ||
| 647 | |||
| 648 | mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH; | ||
| 649 | *pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1; | ||
| 650 | channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) : | ||
| 651 | hash_by_mask(contig_addr, chan_hash_mask); | ||
| 652 | *pmiidx |= (u32)channel1; | ||
| 653 | |||
| 654 | contig_base = remove_mmio_gap(as2.base); | ||
| 655 | chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector; | ||
| 656 | contig_offset = contig_addr - contig_base; | ||
| 657 | remove_addr_bit(&contig_offset, chan_intlv_bit_rm); | ||
| 658 | contig_addr = (contig_base >> sym_chan_shift) + contig_offset; | ||
| 659 | } else { | ||
| 660 | /* Otherwise we're in normal, boring symmetric mode. */ | ||
| 661 | *pmiidx = 0u; | ||
| 662 | |||
| 663 | if (two_slices) { | ||
| 664 | bool slice1; | ||
| 665 | |||
| 666 | if (mot_hit) { | ||
| 667 | slice_intlv_bit_rm = MOT_SLC_INTLV_BIT; | ||
| 668 | slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1; | ||
| 669 | } else { | ||
| 670 | slice_intlv_bit_rm = slice_selector; | ||
| 671 | slice1 = hash_by_mask(addr, slice_hash_mask); | ||
| 672 | } | ||
| 673 | |||
| 674 | *pmiidx = (u32)slice1 << 1; | ||
| 675 | } | ||
| 676 | |||
| 677 | if (two_channels) { | ||
| 678 | bool channel1; | ||
| 679 | |||
| 680 | mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH : | ||
| 681 | MOT_CHAN_INTLV_BIT_1SLC_2CH; | ||
| 682 | |||
| 683 | if (mot_hit) { | ||
| 684 | chan_intlv_bit_rm = mot_intlv_bit; | ||
| 685 | channel1 = (addr >> mot_intlv_bit) & 1; | ||
| 686 | } else { | ||
| 687 | chan_intlv_bit_rm = chan_selector; | ||
| 688 | channel1 = hash_by_mask(contig_addr, chan_hash_mask); | ||
| 689 | } | ||
| 690 | |||
| 691 | *pmiidx |= (u32)channel1; | ||
| 692 | } | ||
| 693 | } | ||
| 694 | |||
| 695 | /* Remove the chan_selector bit first */ | ||
| 696 | remove_addr_bit(&contig_addr, chan_intlv_bit_rm); | ||
| 697 | /* Remove the slice bit (we remove it second because it must be lower */ | ||
| 698 | remove_addr_bit(&contig_addr, slice_intlv_bit_rm); | ||
| 699 | *pmiaddr = contig_addr; | ||
| 700 | |||
| 701 | return 0; | ||
| 702 | } | ||
| 703 | |||
| 704 | /* Translate PMI address to memory (rank, row, bank, column) */ | ||
| 705 | #define C(n) (0x10 | (n)) /* column */ | ||
| 706 | #define B(n) (0x20 | (n)) /* bank */ | ||
| 707 | #define R(n) (0x40 | (n)) /* row */ | ||
| 708 | #define RS (0x80) /* rank */ | ||
| 709 | |||
| 710 | /* addrdec values */ | ||
| 711 | #define AMAP_1KB 0 | ||
| 712 | #define AMAP_2KB 1 | ||
| 713 | #define AMAP_4KB 2 | ||
| 714 | #define AMAP_RSVD 3 | ||
| 715 | |||
| 716 | /* dden values */ | ||
| 717 | #define DEN_4Gb 0 | ||
| 718 | #define DEN_8Gb 2 | ||
| 719 | |||
| 720 | /* dwid values */ | ||
| 721 | #define X8 0 | ||
| 722 | #define X16 1 | ||
| 723 | |||
| 724 | static struct dimm_geometry { | ||
| 725 | u8 addrdec; | ||
| 726 | u8 dden; | ||
| 727 | u8 dwid; | ||
| 728 | u8 rowbits, colbits; | ||
| 729 | u16 bits[PMI_ADDRESS_WIDTH]; | ||
| 730 | } dimms[] = { | ||
| 731 | { | ||
| 732 | .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16, | ||
| 733 | .rowbits = 15, .colbits = 10, | ||
| 734 | .bits = { | ||
| 735 | C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), | ||
| 736 | R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), | ||
| 737 | R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), | ||
| 738 | 0, 0, 0, 0 | ||
| 739 | } | ||
| 740 | }, | ||
| 741 | { | ||
| 742 | .addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8, | ||
| 743 | .rowbits = 16, .colbits = 10, | ||
| 744 | .bits = { | ||
| 745 | C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), | ||
| 746 | R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), | ||
| 747 | R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), | ||
| 748 | R(15), 0, 0, 0 | ||
| 749 | } | ||
| 750 | }, | ||
| 751 | { | ||
| 752 | .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16, | ||
| 753 | .rowbits = 16, .colbits = 10, | ||
| 754 | .bits = { | ||
| 755 | C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), | ||
| 756 | R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), | ||
| 757 | R(10), C(7), C(8), C(9), R(11), RS, R(12), R(13), R(14), | ||
| 758 | R(15), 0, 0, 0 | ||
| 759 | } | ||
| 760 | }, | ||
| 761 | { | ||
| 762 | .addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8, | ||
| 763 | .rowbits = 16, .colbits = 11, | ||
| 764 | .bits = { | ||
| 765 | C(2), C(3), C(4), C(5), C(6), B(0), B(1), B(2), R(0), | ||
| 766 | R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), R(9), | ||
| 767 | R(10), C(7), C(8), C(9), R(11), RS, C(11), R(12), R(13), | ||
| 768 | R(14), R(15), 0, 0 | ||
| 769 | } | ||
| 770 | }, | ||
| 771 | { | ||
| 772 | .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16, | ||
| 773 | .rowbits = 15, .colbits = 10, | ||
| 774 | .bits = { | ||
| 775 | C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), | ||
| 776 | R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), | ||
| 777 | R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), | ||
| 778 | 0, 0, 0, 0 | ||
| 779 | } | ||
| 780 | }, | ||
| 781 | { | ||
| 782 | .addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8, | ||
| 783 | .rowbits = 16, .colbits = 10, | ||
| 784 | .bits = { | ||
| 785 | C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), | ||
| 786 | R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), | ||
| 787 | R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), | ||
| 788 | R(15), 0, 0, 0 | ||
| 789 | } | ||
| 790 | }, | ||
| 791 | { | ||
| 792 | .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16, | ||
| 793 | .rowbits = 16, .colbits = 10, | ||
| 794 | .bits = { | ||
| 795 | C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), | ||
| 796 | R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), | ||
| 797 | R(9), R(10), C(8), C(9), R(11), RS, R(12), R(13), R(14), | ||
| 798 | R(15), 0, 0, 0 | ||
| 799 | } | ||
| 800 | }, | ||
| 801 | { | ||
| 802 | .addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8, | ||
| 803 | .rowbits = 16, .colbits = 11, | ||
| 804 | .bits = { | ||
| 805 | C(2), C(3), C(4), C(5), C(6), C(7), B(0), B(1), B(2), | ||
| 806 | R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), R(8), | ||
| 807 | R(9), R(10), C(8), C(9), R(11), RS, C(11), R(12), R(13), | ||
| 808 | R(14), R(15), 0, 0 | ||
| 809 | } | ||
| 810 | }, | ||
| 811 | { | ||
| 812 | .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16, | ||
| 813 | .rowbits = 15, .colbits = 10, | ||
| 814 | .bits = { | ||
| 815 | C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), | ||
| 816 | B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), | ||
| 817 | R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), | ||
| 818 | 0, 0, 0, 0 | ||
| 819 | } | ||
| 820 | }, | ||
| 821 | { | ||
| 822 | .addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8, | ||
| 823 | .rowbits = 16, .colbits = 10, | ||
| 824 | .bits = { | ||
| 825 | C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), | ||
| 826 | B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), | ||
| 827 | R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), | ||
| 828 | R(15), 0, 0, 0 | ||
| 829 | } | ||
| 830 | }, | ||
| 831 | { | ||
| 832 | .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16, | ||
| 833 | .rowbits = 16, .colbits = 10, | ||
| 834 | .bits = { | ||
| 835 | C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), | ||
| 836 | B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), | ||
| 837 | R(8), R(9), R(10), C(9), R(11), RS, R(12), R(13), R(14), | ||
| 838 | R(15), 0, 0, 0 | ||
| 839 | } | ||
| 840 | }, | ||
| 841 | { | ||
| 842 | .addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8, | ||
| 843 | .rowbits = 16, .colbits = 11, | ||
| 844 | .bits = { | ||
| 845 | C(2), C(3), C(4), C(5), C(6), C(7), C(8), B(0), B(1), | ||
| 846 | B(2), R(0), R(1), R(2), R(3), R(4), R(5), R(6), R(7), | ||
| 847 | R(8), R(9), R(10), C(9), R(11), RS, C(11), R(12), R(13), | ||
| 848 | R(14), R(15), 0, 0 | ||
| 849 | } | ||
| 850 | } | ||
| 851 | }; | ||
| 852 | |||
| 853 | static int bank_hash(u64 pmiaddr, int idx, int shft) | ||
| 854 | { | ||
| 855 | int bhash = 0; | ||
| 856 | |||
| 857 | switch (idx) { | ||
| 858 | case 0: | ||
| 859 | bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1; | ||
| 860 | break; | ||
| 861 | case 1: | ||
| 862 | bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1; | ||
| 863 | bhash ^= ((pmiaddr >> 22) & 1) << 1; | ||
| 864 | break; | ||
| 865 | case 2: | ||
| 866 | bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2; | ||
| 867 | break; | ||
| 868 | } | ||
| 869 | |||
| 870 | return bhash; | ||
| 871 | } | ||
| 872 | |||
| 873 | static int rank_hash(u64 pmiaddr) | ||
| 874 | { | ||
| 875 | return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1; | ||
| 876 | } | ||
| 877 | |||
| 878 | /* Second stage decode. Compute rank, bank, row & column. */ | ||
| 879 | static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, | ||
| 880 | struct dram_addr *daddr, char *msg) | ||
| 881 | { | ||
| 882 | struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx]; | ||
| 883 | struct pnd2_pvt *pvt = mci->pvt_info; | ||
| 884 | int g = pvt->dimm_geom[pmiidx]; | ||
| 885 | struct dimm_geometry *d = &dimms[g]; | ||
| 886 | int column = 0, bank = 0, row = 0, rank = 0; | ||
| 887 | int i, idx, type, skiprs = 0; | ||
| 888 | |||
| 889 | for (i = 0; i < PMI_ADDRESS_WIDTH; i++) { | ||
| 890 | int bit = (pmiaddr >> i) & 1; | ||
| 891 | |||
| 892 | if (i + skiprs >= PMI_ADDRESS_WIDTH) { | ||
| 893 | snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n"); | ||
| 894 | return -EINVAL; | ||
| 895 | } | ||
| 896 | |||
| 897 | type = d->bits[i + skiprs] & ~0xf; | ||
| 898 | idx = d->bits[i + skiprs] & 0xf; | ||
| 899 | |||
| 900 | /* | ||
| 901 | * On single rank DIMMs ignore the rank select bit | ||
| 902 | * and shift remainder of "bits[]" down one place. | ||
| 903 | */ | ||
| 904 | if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) { | ||
| 905 | skiprs = 1; | ||
| 906 | type = d->bits[i + skiprs] & ~0xf; | ||
| 907 | idx = d->bits[i + skiprs] & 0xf; | ||
| 908 | } | ||
| 909 | |||
| 910 | switch (type) { | ||
| 911 | case C(0): | ||
| 912 | column |= (bit << idx); | ||
| 913 | break; | ||
| 914 | case B(0): | ||
| 915 | bank |= (bit << idx); | ||
| 916 | if (cr_drp0->bahen) | ||
| 917 | bank ^= bank_hash(pmiaddr, idx, d->addrdec); | ||
| 918 | break; | ||
| 919 | case R(0): | ||
| 920 | row |= (bit << idx); | ||
| 921 | break; | ||
| 922 | case RS: | ||
| 923 | rank = bit; | ||
| 924 | if (cr_drp0->rsien) | ||
| 925 | rank ^= rank_hash(pmiaddr); | ||
| 926 | break; | ||
| 927 | default: | ||
| 928 | if (bit) { | ||
| 929 | snprintf(msg, PND2_MSG_SIZE, "Bad translation\n"); | ||
| 930 | return -EINVAL; | ||
| 931 | } | ||
| 932 | goto done; | ||
| 933 | } | ||
| 934 | } | ||
| 935 | |||
| 936 | done: | ||
| 937 | daddr->col = column; | ||
| 938 | daddr->bank = bank; | ||
| 939 | daddr->row = row; | ||
| 940 | daddr->rank = rank; | ||
| 941 | daddr->dimm = 0; | ||
| 942 | |||
| 943 | return 0; | ||
| 944 | } | ||
| 945 | |||
| 946 | /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */ | ||
| 947 | #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out)) | ||
| 948 | |||
| 949 | static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx, | ||
| 950 | struct dram_addr *daddr, char *msg) | ||
| 951 | { | ||
| 952 | /* Rank 0 or 1 */ | ||
| 953 | daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0); | ||
| 954 | /* Rank 2 or 3 */ | ||
| 955 | daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1); | ||
| 956 | |||
| 957 | /* | ||
| 958 | * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we | ||
| 959 | * flip them if DIMM1 is larger than DIMM0. | ||
| 960 | */ | ||
| 961 | daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip; | ||
| 962 | |||
| 963 | daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0); | ||
| 964 | daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1); | ||
| 965 | daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2); | ||
| 966 | if (dsch.ddr4en) | ||
| 967 | daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3); | ||
| 968 | if (dmap1[pmiidx].bxor) { | ||
| 969 | if (dsch.ddr4en) { | ||
| 970 | daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0); | ||
| 971 | daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1); | ||
| 972 | if (dsch.chan_width == 0) | ||
| 973 | /* 64/72 bit dram channel width */ | ||
| 974 | daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2); | ||
| 975 | else | ||
| 976 | /* 32/40 bit dram channel width */ | ||
| 977 | daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2); | ||
| 978 | daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3); | ||
| 979 | } else { | ||
| 980 | daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0); | ||
| 981 | daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1); | ||
| 982 | if (dsch.chan_width == 0) | ||
| 983 | daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2); | ||
| 984 | else | ||
| 985 | daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2); | ||
| 986 | } | ||
| 987 | } | ||
| 988 | |||
| 989 | daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0); | ||
| 990 | daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1); | ||
| 991 | daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2); | ||
| 992 | daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3); | ||
| 993 | daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4); | ||
| 994 | daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5); | ||
| 995 | daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6); | ||
| 996 | daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7); | ||
| 997 | daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8); | ||
| 998 | daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9); | ||
| 999 | daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10); | ||
| 1000 | daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11); | ||
| 1001 | daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12); | ||
| 1002 | daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13); | ||
| 1003 | if (dmap4[pmiidx].row14 != 31) | ||
| 1004 | daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14); | ||
| 1005 | if (dmap4[pmiidx].row15 != 31) | ||
| 1006 | daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15); | ||
| 1007 | if (dmap4[pmiidx].row16 != 31) | ||
| 1008 | daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16); | ||
| 1009 | if (dmap4[pmiidx].row17 != 31) | ||
| 1010 | daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17); | ||
| 1011 | |||
| 1012 | daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3); | ||
| 1013 | daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4); | ||
| 1014 | daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5); | ||
| 1015 | daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6); | ||
| 1016 | daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7); | ||
| 1017 | daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8); | ||
| 1018 | daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9); | ||
| 1019 | if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f) | ||
| 1020 | daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11); | ||
| 1021 | |||
| 1022 | return 0; | ||
| 1023 | } | ||
| 1024 | |||
| 1025 | static int check_channel(int ch) | ||
| 1026 | { | ||
| 1027 | if (drp0[ch].dramtype != 0) { | ||
| 1028 | pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch); | ||
| 1029 | return 1; | ||
| 1030 | } else if (drp0[ch].eccen == 0) { | ||
| 1031 | pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch); | ||
| 1032 | return 1; | ||
| 1033 | } | ||
| 1034 | return 0; | ||
| 1035 | } | ||
| 1036 | |||
| 1037 | static int apl_check_ecc_active(void) | ||
| 1038 | { | ||
| 1039 | int i, ret = 0; | ||
| 1040 | |||
| 1041 | /* Check dramtype and ECC mode for each present DIMM */ | ||
| 1042 | for (i = 0; i < APL_NUM_CHANNELS; i++) | ||
| 1043 | if (chan_mask & BIT(i)) | ||
| 1044 | ret += check_channel(i); | ||
| 1045 | return ret ? -EINVAL : 0; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3) | ||
| 1049 | |||
| 1050 | static int check_unit(int ch) | ||
| 1051 | { | ||
| 1052 | struct d_cr_drp *d = &drp[ch]; | ||
| 1053 | |||
| 1054 | if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) { | ||
| 1055 | pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch); | ||
| 1056 | return 1; | ||
| 1057 | } | ||
| 1058 | return 0; | ||
| 1059 | } | ||
| 1060 | |||
| 1061 | static int dnv_check_ecc_active(void) | ||
| 1062 | { | ||
| 1063 | int i, ret = 0; | ||
| 1064 | |||
| 1065 | for (i = 0; i < DNV_NUM_CHANNELS; i++) | ||
| 1066 | ret += check_unit(i); | ||
| 1067 | return ret ? -EINVAL : 0; | ||
| 1068 | } | ||
| 1069 | |||
| 1070 | static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr, | ||
| 1071 | struct dram_addr *daddr, char *msg) | ||
| 1072 | { | ||
| 1073 | u64 pmiaddr; | ||
| 1074 | u32 pmiidx; | ||
| 1075 | int ret; | ||
| 1076 | |||
| 1077 | ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg); | ||
| 1078 | if (ret) | ||
| 1079 | return ret; | ||
| 1080 | |||
| 1081 | pmiaddr >>= ops->pmiaddr_shift; | ||
| 1082 | /* pmi channel idx to dimm channel idx */ | ||
| 1083 | pmiidx >>= ops->pmiidx_shift; | ||
| 1084 | daddr->chan = pmiidx; | ||
| 1085 | |||
| 1086 | ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg); | ||
| 1087 | if (ret) | ||
| 1088 | return ret; | ||
| 1089 | |||
| 1090 | edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n", | ||
| 1091 | addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col); | ||
| 1092 | |||
| 1093 | return 0; | ||
| 1094 | } | ||
| 1095 | |||
| 1096 | static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m, | ||
| 1097 | struct dram_addr *daddr) | ||
| 1098 | { | ||
| 1099 | enum hw_event_mc_err_type tp_event; | ||
| 1100 | char *optype, msg[PND2_MSG_SIZE]; | ||
| 1101 | bool ripv = m->mcgstatus & MCG_STATUS_RIPV; | ||
| 1102 | bool overflow = m->status & MCI_STATUS_OVER; | ||
| 1103 | bool uc_err = m->status & MCI_STATUS_UC; | ||
| 1104 | bool recov = m->status & MCI_STATUS_S; | ||
| 1105 | u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52); | ||
| 1106 | u32 mscod = GET_BITFIELD(m->status, 16, 31); | ||
| 1107 | u32 errcode = GET_BITFIELD(m->status, 0, 15); | ||
| 1108 | u32 optypenum = GET_BITFIELD(m->status, 4, 6); | ||
| 1109 | int rc; | ||
| 1110 | |||
| 1111 | tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) : | ||
| 1112 | HW_EVENT_ERR_CORRECTED; | ||
| 1113 | |||
| 1114 | /* | ||
| 1115 | * According with Table 15-9 of the Intel Architecture spec vol 3A, | ||
| 1116 | * memory errors should fit in this mask: | ||
| 1117 | * 000f 0000 1mmm cccc (binary) | ||
| 1118 | * where: | ||
| 1119 | * f = Correction Report Filtering Bit. If 1, subsequent errors | ||
| 1120 | * won't be shown | ||
| 1121 | * mmm = error type | ||
| 1122 | * cccc = channel | ||
| 1123 | * If the mask doesn't match, report an error to the parsing logic | ||
| 1124 | */ | ||
| 1125 | if (!((errcode & 0xef80) == 0x80)) { | ||
| 1126 | optype = "Can't parse: it is not a mem"; | ||
| 1127 | } else { | ||
| 1128 | switch (optypenum) { | ||
| 1129 | case 0: | ||
| 1130 | optype = "generic undef request error"; | ||
| 1131 | break; | ||
| 1132 | case 1: | ||
| 1133 | optype = "memory read error"; | ||
| 1134 | break; | ||
| 1135 | case 2: | ||
| 1136 | optype = "memory write error"; | ||
| 1137 | break; | ||
| 1138 | case 3: | ||
| 1139 | optype = "addr/cmd error"; | ||
| 1140 | break; | ||
| 1141 | case 4: | ||
| 1142 | optype = "memory scrubbing error"; | ||
| 1143 | break; | ||
| 1144 | default: | ||
| 1145 | optype = "reserved"; | ||
| 1146 | break; | ||
| 1147 | } | ||
| 1148 | } | ||
| 1149 | |||
| 1150 | /* Only decode errors with an valid address (ADDRV) */ | ||
| 1151 | if (!(m->status & MCI_STATUS_ADDRV)) | ||
| 1152 | return; | ||
| 1153 | |||
| 1154 | rc = get_memory_error_data(mci, m->addr, daddr, msg); | ||
| 1155 | if (rc) | ||
| 1156 | goto address_error; | ||
| 1157 | |||
| 1158 | snprintf(msg, sizeof(msg), | ||
| 1159 | "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d", | ||
| 1160 | overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod, | ||
| 1161 | errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col); | ||
| 1162 | |||
| 1163 | edac_dbg(0, "%s\n", msg); | ||
| 1164 | |||
| 1165 | /* Call the helper to output message */ | ||
| 1166 | edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT, | ||
| 1167 | m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg); | ||
| 1168 | |||
| 1169 | return; | ||
| 1170 | |||
| 1171 | address_error: | ||
| 1172 | edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, ""); | ||
| 1173 | } | ||
| 1174 | |||
| 1175 | static void apl_get_dimm_config(struct mem_ctl_info *mci) | ||
| 1176 | { | ||
| 1177 | struct pnd2_pvt *pvt = mci->pvt_info; | ||
| 1178 | struct dimm_info *dimm; | ||
| 1179 | struct d_cr_drp0 *d; | ||
| 1180 | u64 capacity; | ||
| 1181 | int i, g; | ||
| 1182 | |||
| 1183 | for (i = 0; i < APL_NUM_CHANNELS; i++) { | ||
| 1184 | if (!(chan_mask & BIT(i))) | ||
| 1185 | continue; | ||
| 1186 | |||
| 1187 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, 0, 0); | ||
| 1188 | if (!dimm) { | ||
| 1189 | edac_dbg(0, "No allocated DIMM for channel %d\n", i); | ||
| 1190 | continue; | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | d = &drp0[i]; | ||
| 1194 | for (g = 0; g < ARRAY_SIZE(dimms); g++) | ||
| 1195 | if (dimms[g].addrdec == d->addrdec && | ||
| 1196 | dimms[g].dden == d->dden && | ||
| 1197 | dimms[g].dwid == d->dwid) | ||
| 1198 | break; | ||
| 1199 | |||
| 1200 | if (g == ARRAY_SIZE(dimms)) { | ||
| 1201 | edac_dbg(0, "Channel %d: unrecognized DIMM\n", i); | ||
| 1202 | continue; | ||
| 1203 | } | ||
| 1204 | |||
| 1205 | pvt->dimm_geom[i] = g; | ||
| 1206 | capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) * | ||
| 1207 | (1ul << dimms[g].colbits); | ||
| 1208 | edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3)); | ||
| 1209 | dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3)); | ||
| 1210 | dimm->grain = 32; | ||
| 1211 | dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16; | ||
| 1212 | dimm->mtype = MEM_DDR3; | ||
| 1213 | dimm->edac_mode = EDAC_SECDED; | ||
| 1214 | snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2); | ||
| 1215 | } | ||
| 1216 | } | ||
| 1217 | |||
| 1218 | static const int dnv_dtypes[] = { | ||
| 1219 | DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN | ||
| 1220 | }; | ||
| 1221 | |||
| 1222 | static void dnv_get_dimm_config(struct mem_ctl_info *mci) | ||
| 1223 | { | ||
| 1224 | int i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype; | ||
| 1225 | struct dimm_info *dimm; | ||
| 1226 | struct d_cr_drp *d; | ||
| 1227 | u64 capacity; | ||
| 1228 | |||
| 1229 | if (dsch.ddr4en) { | ||
| 1230 | memtype = MEM_DDR4; | ||
| 1231 | banks = 16; | ||
| 1232 | colbits = 10; | ||
| 1233 | } else { | ||
| 1234 | memtype = MEM_DDR3; | ||
| 1235 | banks = 8; | ||
| 1236 | } | ||
| 1237 | |||
| 1238 | for (i = 0; i < DNV_NUM_CHANNELS; i++) { | ||
| 1239 | if (dmap4[i].row14 == 31) | ||
| 1240 | rowbits = 14; | ||
| 1241 | else if (dmap4[i].row15 == 31) | ||
| 1242 | rowbits = 15; | ||
| 1243 | else if (dmap4[i].row16 == 31) | ||
| 1244 | rowbits = 16; | ||
| 1245 | else if (dmap4[i].row17 == 31) | ||
| 1246 | rowbits = 17; | ||
| 1247 | else | ||
| 1248 | rowbits = 18; | ||
| 1249 | |||
| 1250 | if (memtype == MEM_DDR3) { | ||
| 1251 | if (dmap1[i].ca11 != 0x3f) | ||
| 1252 | colbits = 12; | ||
| 1253 | else | ||
| 1254 | colbits = 10; | ||
| 1255 | } | ||
| 1256 | |||
| 1257 | d = &drp[i]; | ||
| 1258 | /* DIMM0 is present if rank0 and/or rank1 is enabled */ | ||
| 1259 | ranks_of_dimm[0] = d->rken0 + d->rken1; | ||
| 1260 | /* DIMM1 is present if rank2 and/or rank3 is enabled */ | ||
| 1261 | ranks_of_dimm[1] = d->rken2 + d->rken3; | ||
| 1262 | |||
| 1263 | for (j = 0; j < DNV_MAX_DIMMS; j++) { | ||
| 1264 | if (!ranks_of_dimm[j]) | ||
| 1265 | continue; | ||
| 1266 | |||
| 1267 | dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, i, j, 0); | ||
| 1268 | if (!dimm) { | ||
| 1269 | edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j); | ||
| 1270 | continue; | ||
| 1271 | } | ||
| 1272 | |||
| 1273 | capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits); | ||
| 1274 | edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3)); | ||
| 1275 | dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3)); | ||
| 1276 | dimm->grain = 32; | ||
| 1277 | dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1]; | ||
| 1278 | dimm->mtype = memtype; | ||
| 1279 | dimm->edac_mode = EDAC_SECDED; | ||
| 1280 | snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j); | ||
| 1281 | } | ||
| 1282 | } | ||
| 1283 | } | ||
| 1284 | |||
| 1285 | static int pnd2_register_mci(struct mem_ctl_info **ppmci) | ||
| 1286 | { | ||
| 1287 | struct edac_mc_layer layers[2]; | ||
| 1288 | struct mem_ctl_info *mci; | ||
| 1289 | struct pnd2_pvt *pvt; | ||
| 1290 | int rc; | ||
| 1291 | |||
| 1292 | rc = ops->check_ecc(); | ||
| 1293 | if (rc < 0) | ||
| 1294 | return rc; | ||
| 1295 | |||
| 1296 | /* Allocate a new MC control structure */ | ||
| 1297 | layers[0].type = EDAC_MC_LAYER_CHANNEL; | ||
| 1298 | layers[0].size = ops->channels; | ||
| 1299 | layers[0].is_virt_csrow = false; | ||
| 1300 | layers[1].type = EDAC_MC_LAYER_SLOT; | ||
| 1301 | layers[1].size = ops->dimms_per_channel; | ||
| 1302 | layers[1].is_virt_csrow = true; | ||
| 1303 | mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt)); | ||
| 1304 | if (!mci) | ||
| 1305 | return -ENOMEM; | ||
| 1306 | |||
| 1307 | pvt = mci->pvt_info; | ||
| 1308 | memset(pvt, 0, sizeof(*pvt)); | ||
| 1309 | |||
| 1310 | mci->mod_name = "pnd2_edac.c"; | ||
| 1311 | mci->dev_name = ops->name; | ||
| 1312 | mci->ctl_name = "Pondicherry2"; | ||
| 1313 | |||
| 1314 | /* Get dimm basic config and the memory layout */ | ||
| 1315 | ops->get_dimm_config(mci); | ||
| 1316 | |||
| 1317 | if (edac_mc_add_mc(mci)) { | ||
| 1318 | edac_dbg(0, "MC: failed edac_mc_add_mc()\n"); | ||
| 1319 | edac_mc_free(mci); | ||
| 1320 | return -EINVAL; | ||
| 1321 | } | ||
| 1322 | |||
| 1323 | *ppmci = mci; | ||
| 1324 | |||
| 1325 | return 0; | ||
| 1326 | } | ||
| 1327 | |||
| 1328 | static void pnd2_unregister_mci(struct mem_ctl_info *mci) | ||
| 1329 | { | ||
| 1330 | if (unlikely(!mci || !mci->pvt_info)) { | ||
| 1331 | pnd2_printk(KERN_ERR, "Couldn't find mci handler\n"); | ||
| 1332 | return; | ||
| 1333 | } | ||
| 1334 | |||
| 1335 | /* Remove MC sysfs nodes */ | ||
| 1336 | edac_mc_del_mc(NULL); | ||
| 1337 | edac_dbg(1, "%s: free mci struct\n", mci->ctl_name); | ||
| 1338 | edac_mc_free(mci); | ||
| 1339 | } | ||
| 1340 | |||
| 1341 | /* | ||
| 1342 | * Callback function registered with core kernel mce code. | ||
| 1343 | * Called once for each logged error. | ||
| 1344 | */ | ||
| 1345 | static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data) | ||
| 1346 | { | ||
| 1347 | struct mce *mce = (struct mce *)data; | ||
| 1348 | struct mem_ctl_info *mci; | ||
| 1349 | struct dram_addr daddr; | ||
| 1350 | char *type; | ||
| 1351 | |||
| 1352 | if (get_edac_report_status() == EDAC_REPORTING_DISABLED) | ||
| 1353 | return NOTIFY_DONE; | ||
| 1354 | |||
| 1355 | mci = pnd2_mci; | ||
| 1356 | if (!mci) | ||
| 1357 | return NOTIFY_DONE; | ||
| 1358 | |||
| 1359 | /* | ||
| 1360 | * Just let mcelog handle it if the error is | ||
| 1361 | * outside the memory controller. A memory error | ||
| 1362 | * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0. | ||
| 1363 | * bit 12 has an special meaning. | ||
| 1364 | */ | ||
| 1365 | if ((mce->status & 0xefff) >> 7 != 1) | ||
| 1366 | return NOTIFY_DONE; | ||
| 1367 | |||
| 1368 | if (mce->mcgstatus & MCG_STATUS_MCIP) | ||
| 1369 | type = "Exception"; | ||
| 1370 | else | ||
| 1371 | type = "Event"; | ||
| 1372 | |||
| 1373 | pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n"); | ||
| 1374 | pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n", | ||
| 1375 | mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status); | ||
| 1376 | pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc); | ||
| 1377 | pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr); | ||
| 1378 | pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc); | ||
| 1379 | pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n", | ||
| 1380 | mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid); | ||
| 1381 | |||
| 1382 | pnd2_mce_output_error(mci, mce, &daddr); | ||
| 1383 | |||
| 1384 | /* Advice mcelog that the error were handled */ | ||
| 1385 | return NOTIFY_STOP; | ||
| 1386 | } | ||
| 1387 | |||
| 1388 | static struct notifier_block pnd2_mce_dec = { | ||
| 1389 | .notifier_call = pnd2_mce_check_error, | ||
| 1390 | }; | ||
| 1391 | |||
| 1392 | #ifdef CONFIG_EDAC_DEBUG | ||
| 1393 | /* | ||
| 1394 | * Write an address to this file to exercise the address decode | ||
| 1395 | * logic in this driver. | ||
| 1396 | */ | ||
| 1397 | static u64 pnd2_fake_addr; | ||
| 1398 | #define PND2_BLOB_SIZE 1024 | ||
| 1399 | static char pnd2_result[PND2_BLOB_SIZE]; | ||
| 1400 | static struct dentry *pnd2_test; | ||
| 1401 | static struct debugfs_blob_wrapper pnd2_blob = { | ||
| 1402 | .data = pnd2_result, | ||
| 1403 | .size = 0 | ||
| 1404 | }; | ||
| 1405 | |||
| 1406 | static int debugfs_u64_set(void *data, u64 val) | ||
| 1407 | { | ||
| 1408 | struct dram_addr daddr; | ||
| 1409 | struct mce m; | ||
| 1410 | |||
| 1411 | *(u64 *)data = val; | ||
| 1412 | m.mcgstatus = 0; | ||
| 1413 | /* ADDRV + MemRd + Unknown channel */ | ||
| 1414 | m.status = MCI_STATUS_ADDRV + 0x9f; | ||
| 1415 | m.addr = val; | ||
| 1416 | pnd2_mce_output_error(pnd2_mci, &m, &daddr); | ||
| 1417 | snprintf(pnd2_blob.data, PND2_BLOB_SIZE, | ||
| 1418 | "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n", | ||
| 1419 | m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col); | ||
| 1420 | pnd2_blob.size = strlen(pnd2_blob.data); | ||
| 1421 | |||
| 1422 | return 0; | ||
| 1423 | } | ||
| 1424 | DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); | ||
| 1425 | |||
| 1426 | static void setup_pnd2_debug(void) | ||
| 1427 | { | ||
| 1428 | pnd2_test = edac_debugfs_create_dir("pnd2_test"); | ||
| 1429 | edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test, | ||
| 1430 | &pnd2_fake_addr, &fops_u64_wo); | ||
| 1431 | debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob); | ||
| 1432 | } | ||
| 1433 | |||
| 1434 | static void teardown_pnd2_debug(void) | ||
| 1435 | { | ||
| 1436 | debugfs_remove_recursive(pnd2_test); | ||
| 1437 | } | ||
| 1438 | #else | ||
| 1439 | static void setup_pnd2_debug(void) {} | ||
| 1440 | static void teardown_pnd2_debug(void) {} | ||
| 1441 | #endif /* CONFIG_EDAC_DEBUG */ | ||
| 1442 | |||
| 1443 | |||
| 1444 | static int pnd2_probe(void) | ||
| 1445 | { | ||
| 1446 | int rc; | ||
| 1447 | |||
| 1448 | edac_dbg(2, "\n"); | ||
| 1449 | rc = get_registers(); | ||
| 1450 | if (rc) | ||
| 1451 | return rc; | ||
| 1452 | |||
| 1453 | return pnd2_register_mci(&pnd2_mci); | ||
| 1454 | } | ||
| 1455 | |||
| 1456 | static void pnd2_remove(void) | ||
| 1457 | { | ||
| 1458 | edac_dbg(0, "\n"); | ||
| 1459 | pnd2_unregister_mci(pnd2_mci); | ||
| 1460 | } | ||
| 1461 | |||
| 1462 | static struct dunit_ops apl_ops = { | ||
| 1463 | .name = "pnd2/apl", | ||
| 1464 | .type = APL, | ||
| 1465 | .pmiaddr_shift = LOG2_PMI_ADDR_GRANULARITY, | ||
| 1466 | .pmiidx_shift = 0, | ||
| 1467 | .channels = APL_NUM_CHANNELS, | ||
| 1468 | .dimms_per_channel = 1, | ||
| 1469 | .rd_reg = apl_rd_reg, | ||
| 1470 | .get_registers = apl_get_registers, | ||
| 1471 | .check_ecc = apl_check_ecc_active, | ||
| 1472 | .mk_region = apl_mk_region, | ||
| 1473 | .get_dimm_config = apl_get_dimm_config, | ||
| 1474 | .pmi2mem = apl_pmi2mem, | ||
| 1475 | }; | ||
| 1476 | |||
| 1477 | static struct dunit_ops dnv_ops = { | ||
| 1478 | .name = "pnd2/dnv", | ||
| 1479 | .type = DNV, | ||
| 1480 | .pmiaddr_shift = 0, | ||
| 1481 | .pmiidx_shift = 1, | ||
| 1482 | .channels = DNV_NUM_CHANNELS, | ||
| 1483 | .dimms_per_channel = 2, | ||
| 1484 | .rd_reg = dnv_rd_reg, | ||
| 1485 | .get_registers = dnv_get_registers, | ||
| 1486 | .check_ecc = dnv_check_ecc_active, | ||
| 1487 | .mk_region = dnv_mk_region, | ||
| 1488 | .get_dimm_config = dnv_get_dimm_config, | ||
| 1489 | .pmi2mem = dnv_pmi2mem, | ||
| 1490 | }; | ||
| 1491 | |||
| 1492 | static const struct x86_cpu_id pnd2_cpuids[] = { | ||
| 1493 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_GOLDMONT, 0, (kernel_ulong_t)&apl_ops }, | ||
| 1494 | { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_DENVERTON, 0, (kernel_ulong_t)&dnv_ops }, | ||
| 1495 | { } | ||
| 1496 | }; | ||
| 1497 | MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids); | ||
| 1498 | |||
| 1499 | static int __init pnd2_init(void) | ||
| 1500 | { | ||
| 1501 | const struct x86_cpu_id *id; | ||
| 1502 | int rc; | ||
| 1503 | |||
| 1504 | edac_dbg(2, "\n"); | ||
| 1505 | |||
| 1506 | id = x86_match_cpu(pnd2_cpuids); | ||
| 1507 | if (!id) | ||
| 1508 | return -ENODEV; | ||
| 1509 | |||
| 1510 | ops = (struct dunit_ops *)id->driver_data; | ||
| 1511 | |||
| 1512 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
| 1513 | opstate_init(); | ||
| 1514 | |||
| 1515 | rc = pnd2_probe(); | ||
| 1516 | if (rc < 0) { | ||
| 1517 | pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc); | ||
| 1518 | return rc; | ||
| 1519 | } | ||
| 1520 | |||
| 1521 | if (!pnd2_mci) | ||
| 1522 | return -ENODEV; | ||
| 1523 | |||
| 1524 | mce_register_decode_chain(&pnd2_mce_dec); | ||
| 1525 | setup_pnd2_debug(); | ||
| 1526 | |||
| 1527 | return 0; | ||
| 1528 | } | ||
| 1529 | |||
| 1530 | static void __exit pnd2_exit(void) | ||
| 1531 | { | ||
| 1532 | edac_dbg(2, "\n"); | ||
| 1533 | teardown_pnd2_debug(); | ||
| 1534 | mce_unregister_decode_chain(&pnd2_mce_dec); | ||
| 1535 | pnd2_remove(); | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | module_init(pnd2_init); | ||
| 1539 | module_exit(pnd2_exit); | ||
| 1540 | |||
| 1541 | module_param(edac_op_state, int, 0444); | ||
| 1542 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
| 1543 | |||
| 1544 | MODULE_LICENSE("GPL v2"); | ||
| 1545 | MODULE_AUTHOR("Tony Luck"); | ||
| 1546 | MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller"); | ||
diff --git a/drivers/edac/pnd2_edac.h b/drivers/edac/pnd2_edac.h new file mode 100644 index 000000000000..61b6e79492bb --- /dev/null +++ b/drivers/edac/pnd2_edac.h | |||
| @@ -0,0 +1,301 @@ | |||
| 1 | /* | ||
| 2 | * Register bitfield descriptions for Pondicherry2 memory controller. | ||
| 3 | * | ||
| 4 | * Copyright (c) 2016, Intel Corporation. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms and conditions of the GNU General Public License, | ||
| 8 | * version 2, as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 13 | * more details. | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef _PND2_REGS_H | ||
| 17 | #define _PND2_REGS_H | ||
| 18 | |||
| 19 | struct b_cr_touud_lo_pci { | ||
| 20 | u32 lock : 1; | ||
| 21 | u32 reserved_1 : 19; | ||
| 22 | u32 touud : 12; | ||
| 23 | }; | ||
| 24 | |||
| 25 | #define b_cr_touud_lo_pci_port 0x4c | ||
| 26 | #define b_cr_touud_lo_pci_offset 0xa8 | ||
| 27 | #define b_cr_touud_lo_pci_r_opcode 0x04 | ||
| 28 | |||
| 29 | struct b_cr_touud_hi_pci { | ||
| 30 | u32 touud : 7; | ||
| 31 | u32 reserved_0 : 25; | ||
| 32 | }; | ||
| 33 | |||
| 34 | #define b_cr_touud_hi_pci_port 0x4c | ||
| 35 | #define b_cr_touud_hi_pci_offset 0xac | ||
| 36 | #define b_cr_touud_hi_pci_r_opcode 0x04 | ||
| 37 | |||
| 38 | struct b_cr_tolud_pci { | ||
| 39 | u32 lock : 1; | ||
| 40 | u32 reserved_0 : 19; | ||
| 41 | u32 tolud : 12; | ||
| 42 | }; | ||
| 43 | |||
| 44 | #define b_cr_tolud_pci_port 0x4c | ||
| 45 | #define b_cr_tolud_pci_offset 0xbc | ||
| 46 | #define b_cr_tolud_pci_r_opcode 0x04 | ||
| 47 | |||
| 48 | struct b_cr_mchbar_lo_pci { | ||
| 49 | u32 enable : 1; | ||
| 50 | u32 pad_3_1 : 3; | ||
| 51 | u32 pad_14_4: 11; | ||
| 52 | u32 base: 17; | ||
| 53 | }; | ||
| 54 | |||
| 55 | struct b_cr_mchbar_hi_pci { | ||
| 56 | u32 base : 7; | ||
| 57 | u32 pad_31_7 : 25; | ||
| 58 | }; | ||
| 59 | |||
| 60 | /* Symmetric region */ | ||
| 61 | struct b_cr_slice_channel_hash { | ||
| 62 | u64 slice_1_disabled : 1; | ||
| 63 | u64 hvm_mode : 1; | ||
| 64 | u64 interleave_mode : 2; | ||
| 65 | u64 slice_0_mem_disabled : 1; | ||
| 66 | u64 reserved_0 : 1; | ||
| 67 | u64 slice_hash_mask : 14; | ||
| 68 | u64 reserved_1 : 11; | ||
| 69 | u64 enable_pmi_dual_data_mode : 1; | ||
| 70 | u64 ch_1_disabled : 1; | ||
| 71 | u64 reserved_2 : 1; | ||
| 72 | u64 sym_slice0_channel_enabled : 2; | ||
| 73 | u64 sym_slice1_channel_enabled : 2; | ||
| 74 | u64 ch_hash_mask : 14; | ||
| 75 | u64 reserved_3 : 11; | ||
| 76 | u64 lock : 1; | ||
| 77 | }; | ||
| 78 | |||
| 79 | #define b_cr_slice_channel_hash_port 0x4c | ||
| 80 | #define b_cr_slice_channel_hash_offset 0x4c58 | ||
| 81 | #define b_cr_slice_channel_hash_r_opcode 0x06 | ||
| 82 | |||
| 83 | struct b_cr_mot_out_base_mchbar { | ||
| 84 | u32 reserved_0 : 14; | ||
| 85 | u32 mot_out_base : 15; | ||
| 86 | u32 reserved_1 : 1; | ||
| 87 | u32 tr_en : 1; | ||
| 88 | u32 imr_en : 1; | ||
| 89 | }; | ||
| 90 | |||
| 91 | #define b_cr_mot_out_base_mchbar_port 0x4c | ||
| 92 | #define b_cr_mot_out_base_mchbar_offset 0x6af0 | ||
| 93 | #define b_cr_mot_out_base_mchbar_r_opcode 0x00 | ||
| 94 | |||
| 95 | struct b_cr_mot_out_mask_mchbar { | ||
| 96 | u32 reserved_0 : 14; | ||
| 97 | u32 mot_out_mask : 15; | ||
| 98 | u32 reserved_1 : 1; | ||
| 99 | u32 ia_iwb_en : 1; | ||
| 100 | u32 gt_iwb_en : 1; | ||
| 101 | }; | ||
| 102 | |||
| 103 | #define b_cr_mot_out_mask_mchbar_port 0x4c | ||
| 104 | #define b_cr_mot_out_mask_mchbar_offset 0x6af4 | ||
| 105 | #define b_cr_mot_out_mask_mchbar_r_opcode 0x00 | ||
| 106 | |||
| 107 | struct b_cr_asym_mem_region0_mchbar { | ||
| 108 | u32 pad : 4; | ||
| 109 | u32 slice0_asym_base : 11; | ||
| 110 | u32 pad_18_15 : 4; | ||
| 111 | u32 slice0_asym_limit : 11; | ||
| 112 | u32 slice0_asym_channel_select : 1; | ||
| 113 | u32 slice0_asym_enable : 1; | ||
| 114 | }; | ||
| 115 | |||
| 116 | #define b_cr_asym_mem_region0_mchbar_port 0x4c | ||
| 117 | #define b_cr_asym_mem_region0_mchbar_offset 0x6e40 | ||
| 118 | #define b_cr_asym_mem_region0_mchbar_r_opcode 0x00 | ||
| 119 | |||
| 120 | struct b_cr_asym_mem_region1_mchbar { | ||
| 121 | u32 pad : 4; | ||
| 122 | u32 slice1_asym_base : 11; | ||
| 123 | u32 pad_18_15 : 4; | ||
| 124 | u32 slice1_asym_limit : 11; | ||
| 125 | u32 slice1_asym_channel_select : 1; | ||
| 126 | u32 slice1_asym_enable : 1; | ||
| 127 | }; | ||
| 128 | |||
| 129 | #define b_cr_asym_mem_region1_mchbar_port 0x4c | ||
| 130 | #define b_cr_asym_mem_region1_mchbar_offset 0x6e44 | ||
| 131 | #define b_cr_asym_mem_region1_mchbar_r_opcode 0x00 | ||
| 132 | |||
| 133 | /* Some bit fields moved in above two structs on Denverton */ | ||
| 134 | struct b_cr_asym_mem_region_denverton { | ||
| 135 | u32 pad : 4; | ||
| 136 | u32 slice_asym_base : 8; | ||
| 137 | u32 pad_19_12 : 8; | ||
| 138 | u32 slice_asym_limit : 8; | ||
| 139 | u32 pad_28_30 : 3; | ||
| 140 | u32 slice_asym_enable : 1; | ||
| 141 | }; | ||
| 142 | |||
| 143 | struct b_cr_asym_2way_mem_region_mchbar { | ||
| 144 | u32 pad : 2; | ||
| 145 | u32 asym_2way_intlv_mode : 2; | ||
| 146 | u32 asym_2way_base : 11; | ||
| 147 | u32 pad_16_15 : 2; | ||
| 148 | u32 asym_2way_limit : 11; | ||
| 149 | u32 pad_30_28 : 3; | ||
| 150 | u32 asym_2way_interleave_enable : 1; | ||
| 151 | }; | ||
| 152 | |||
| 153 | #define b_cr_asym_2way_mem_region_mchbar_port 0x4c | ||
| 154 | #define b_cr_asym_2way_mem_region_mchbar_offset 0x6e50 | ||
| 155 | #define b_cr_asym_2way_mem_region_mchbar_r_opcode 0x00 | ||
| 156 | |||
| 157 | /* Apollo Lake d-unit */ | ||
| 158 | |||
| 159 | struct d_cr_drp0 { | ||
| 160 | u32 rken0 : 1; | ||
| 161 | u32 rken1 : 1; | ||
| 162 | u32 ddmen : 1; | ||
| 163 | u32 rsvd3 : 1; | ||
| 164 | u32 dwid : 2; | ||
| 165 | u32 dden : 3; | ||
| 166 | u32 rsvd13_9 : 5; | ||
| 167 | u32 rsien : 1; | ||
| 168 | u32 bahen : 1; | ||
| 169 | u32 rsvd18_16 : 3; | ||
| 170 | u32 caswizzle : 2; | ||
| 171 | u32 eccen : 1; | ||
| 172 | u32 dramtype : 3; | ||
| 173 | u32 blmode : 3; | ||
| 174 | u32 addrdec : 2; | ||
| 175 | u32 dramdevice_pr : 2; | ||
| 176 | }; | ||
| 177 | |||
| 178 | #define d_cr_drp0_offset 0x1400 | ||
| 179 | #define d_cr_drp0_r_opcode 0x00 | ||
| 180 | |||
| 181 | /* Denverton d-unit */ | ||
| 182 | |||
| 183 | struct d_cr_dsch { | ||
| 184 | u32 ch0en : 1; | ||
| 185 | u32 ch1en : 1; | ||
| 186 | u32 ddr4en : 1; | ||
| 187 | u32 coldwake : 1; | ||
| 188 | u32 newbypdis : 1; | ||
| 189 | u32 chan_width : 1; | ||
| 190 | u32 rsvd6_6 : 1; | ||
| 191 | u32 ooodis : 1; | ||
| 192 | u32 rsvd18_8 : 11; | ||
| 193 | u32 ic : 1; | ||
| 194 | u32 rsvd31_20 : 12; | ||
| 195 | }; | ||
| 196 | |||
| 197 | #define d_cr_dsch_port 0x16 | ||
| 198 | #define d_cr_dsch_offset 0x0 | ||
| 199 | #define d_cr_dsch_r_opcode 0x0 | ||
| 200 | |||
| 201 | struct d_cr_ecc_ctrl { | ||
| 202 | u32 eccen : 1; | ||
| 203 | u32 rsvd31_1 : 31; | ||
| 204 | }; | ||
| 205 | |||
| 206 | #define d_cr_ecc_ctrl_offset 0x180 | ||
| 207 | #define d_cr_ecc_ctrl_r_opcode 0x0 | ||
| 208 | |||
| 209 | struct d_cr_drp { | ||
| 210 | u32 rken0 : 1; | ||
| 211 | u32 rken1 : 1; | ||
| 212 | u32 rken2 : 1; | ||
| 213 | u32 rken3 : 1; | ||
| 214 | u32 dimmdwid0 : 2; | ||
| 215 | u32 dimmdden0 : 2; | ||
| 216 | u32 dimmdwid1 : 2; | ||
| 217 | u32 dimmdden1 : 2; | ||
| 218 | u32 rsvd15_12 : 4; | ||
| 219 | u32 dimmflip : 1; | ||
| 220 | u32 rsvd31_17 : 15; | ||
| 221 | }; | ||
| 222 | |||
| 223 | #define d_cr_drp_offset 0x158 | ||
| 224 | #define d_cr_drp_r_opcode 0x0 | ||
| 225 | |||
| 226 | struct d_cr_dmap { | ||
| 227 | u32 ba0 : 5; | ||
| 228 | u32 ba1 : 5; | ||
| 229 | u32 bg0 : 5; /* if ddr3, ba2 = bg0 */ | ||
| 230 | u32 bg1 : 5; /* if ddr3, ba3 = bg1 */ | ||
| 231 | u32 rs0 : 5; | ||
| 232 | u32 rs1 : 5; | ||
| 233 | u32 rsvd : 2; | ||
| 234 | }; | ||
| 235 | |||
| 236 | #define d_cr_dmap_offset 0x174 | ||
| 237 | #define d_cr_dmap_r_opcode 0x0 | ||
| 238 | |||
| 239 | struct d_cr_dmap1 { | ||
| 240 | u32 ca11 : 6; | ||
| 241 | u32 bxor : 1; | ||
| 242 | u32 rsvd : 25; | ||
| 243 | }; | ||
| 244 | |||
| 245 | #define d_cr_dmap1_offset 0xb4 | ||
| 246 | #define d_cr_dmap1_r_opcode 0x0 | ||
| 247 | |||
| 248 | struct d_cr_dmap2 { | ||
| 249 | u32 row0 : 5; | ||
| 250 | u32 row1 : 5; | ||
| 251 | u32 row2 : 5; | ||
| 252 | u32 row3 : 5; | ||
| 253 | u32 row4 : 5; | ||
| 254 | u32 row5 : 5; | ||
| 255 | u32 rsvd : 2; | ||
| 256 | }; | ||
| 257 | |||
| 258 | #define d_cr_dmap2_offset 0x148 | ||
| 259 | #define d_cr_dmap2_r_opcode 0x0 | ||
| 260 | |||
| 261 | struct d_cr_dmap3 { | ||
| 262 | u32 row6 : 5; | ||
| 263 | u32 row7 : 5; | ||
| 264 | u32 row8 : 5; | ||
| 265 | u32 row9 : 5; | ||
| 266 | u32 row10 : 5; | ||
| 267 | u32 row11 : 5; | ||
| 268 | u32 rsvd : 2; | ||
| 269 | }; | ||
| 270 | |||
| 271 | #define d_cr_dmap3_offset 0x14c | ||
| 272 | #define d_cr_dmap3_r_opcode 0x0 | ||
| 273 | |||
| 274 | struct d_cr_dmap4 { | ||
| 275 | u32 row12 : 5; | ||
| 276 | u32 row13 : 5; | ||
| 277 | u32 row14 : 5; | ||
| 278 | u32 row15 : 5; | ||
| 279 | u32 row16 : 5; | ||
| 280 | u32 row17 : 5; | ||
| 281 | u32 rsvd : 2; | ||
| 282 | }; | ||
| 283 | |||
| 284 | #define d_cr_dmap4_offset 0x150 | ||
| 285 | #define d_cr_dmap4_r_opcode 0x0 | ||
| 286 | |||
| 287 | struct d_cr_dmap5 { | ||
| 288 | u32 ca3 : 4; | ||
| 289 | u32 ca4 : 4; | ||
| 290 | u32 ca5 : 4; | ||
| 291 | u32 ca6 : 4; | ||
| 292 | u32 ca7 : 4; | ||
| 293 | u32 ca8 : 4; | ||
| 294 | u32 ca9 : 4; | ||
| 295 | u32 rsvd : 4; | ||
| 296 | }; | ||
| 297 | |||
| 298 | #define d_cr_dmap5_offset 0x154 | ||
| 299 | #define d_cr_dmap5_r_opcode 0x0 | ||
| 300 | |||
| 301 | #endif /* _PND2_REGS_H */ | ||
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index 6c270d9d304a..669246056812 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c | |||
| @@ -1596,7 +1596,7 @@ static void xgene_edac_pa_report(struct edac_device_ctl_info *edac_dev) | |||
| 1596 | reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS); | 1596 | reg = readl(ctx->dev_csr + IOBPATRANSERRINTSTS); |
| 1597 | if (!reg) | 1597 | if (!reg) |
| 1598 | goto chk_iob_axi0; | 1598 | goto chk_iob_axi0; |
| 1599 | dev_err(edac_dev->dev, "IOB procesing agent (PA) transaction error\n"); | 1599 | dev_err(edac_dev->dev, "IOB processing agent (PA) transaction error\n"); |
| 1600 | if (reg & IOBPA_RDATA_CORRUPT_MASK) | 1600 | if (reg & IOBPA_RDATA_CORRUPT_MASK) |
| 1601 | dev_err(edac_dev->dev, "IOB PA read data RAM error\n"); | 1601 | dev_err(edac_dev->dev, "IOB PA read data RAM error\n"); |
| 1602 | if (reg & IOBPA_M_RDATA_CORRUPT_MASK) | 1602 | if (reg & IOBPA_M_RDATA_CORRUPT_MASK) |
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig index 96bbae579c0b..fc09c76248b4 100644 --- a/drivers/extcon/Kconfig +++ b/drivers/extcon/Kconfig | |||
| @@ -44,7 +44,7 @@ config EXTCON_GPIO | |||
| 44 | 44 | ||
| 45 | config EXTCON_INTEL_INT3496 | 45 | config EXTCON_INTEL_INT3496 |
| 46 | tristate "Intel INT3496 ACPI device extcon driver" | 46 | tristate "Intel INT3496 ACPI device extcon driver" |
| 47 | depends on GPIOLIB && ACPI | 47 | depends on GPIOLIB && ACPI && (X86 || COMPILE_TEST) |
| 48 | help | 48 | help |
| 49 | Say Y here to enable extcon support for USB OTG ports controlled by | 49 | Say Y here to enable extcon support for USB OTG ports controlled by |
| 50 | an Intel INT3496 ACPI device. | 50 | an Intel INT3496 ACPI device. |
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c index a3131b036de6..9d17984bbbd4 100644 --- a/drivers/extcon/extcon-intel-int3496.c +++ b/drivers/extcon/extcon-intel-int3496.c | |||
| @@ -45,6 +45,17 @@ static const unsigned int int3496_cable[] = { | |||
| 45 | EXTCON_NONE, | 45 | EXTCON_NONE, |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | static const struct acpi_gpio_params id_gpios = { INT3496_GPIO_USB_ID, 0, false }; | ||
| 49 | static const struct acpi_gpio_params vbus_gpios = { INT3496_GPIO_VBUS_EN, 0, false }; | ||
| 50 | static const struct acpi_gpio_params mux_gpios = { INT3496_GPIO_USB_MUX, 0, false }; | ||
| 51 | |||
| 52 | static const struct acpi_gpio_mapping acpi_int3496_default_gpios[] = { | ||
| 53 | { "id-gpios", &id_gpios, 1 }, | ||
| 54 | { "vbus-gpios", &vbus_gpios, 1 }, | ||
| 55 | { "mux-gpios", &mux_gpios, 1 }, | ||
| 56 | { }, | ||
| 57 | }; | ||
| 58 | |||
| 48 | static void int3496_do_usb_id(struct work_struct *work) | 59 | static void int3496_do_usb_id(struct work_struct *work) |
| 49 | { | 60 | { |
| 50 | struct int3496_data *data = | 61 | struct int3496_data *data = |
| @@ -83,6 +94,13 @@ static int int3496_probe(struct platform_device *pdev) | |||
| 83 | struct int3496_data *data; | 94 | struct int3496_data *data; |
| 84 | int ret; | 95 | int ret; |
| 85 | 96 | ||
| 97 | ret = acpi_dev_add_driver_gpios(ACPI_COMPANION(dev), | ||
| 98 | acpi_int3496_default_gpios); | ||
| 99 | if (ret) { | ||
| 100 | dev_err(dev, "can't add GPIO ACPI mapping\n"); | ||
| 101 | return ret; | ||
| 102 | } | ||
| 103 | |||
| 86 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); | 104 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); |
| 87 | if (!data) | 105 | if (!data) |
| 88 | return -ENOMEM; | 106 | return -ENOMEM; |
| @@ -90,30 +108,27 @@ static int int3496_probe(struct platform_device *pdev) | |||
| 90 | data->dev = dev; | 108 | data->dev = dev; |
| 91 | INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); | 109 | INIT_DELAYED_WORK(&data->work, int3496_do_usb_id); |
| 92 | 110 | ||
| 93 | data->gpio_usb_id = devm_gpiod_get_index(dev, "id", | 111 | data->gpio_usb_id = devm_gpiod_get(dev, "id", GPIOD_IN); |
| 94 | INT3496_GPIO_USB_ID, | ||
| 95 | GPIOD_IN); | ||
| 96 | if (IS_ERR(data->gpio_usb_id)) { | 112 | if (IS_ERR(data->gpio_usb_id)) { |
| 97 | ret = PTR_ERR(data->gpio_usb_id); | 113 | ret = PTR_ERR(data->gpio_usb_id); |
| 98 | dev_err(dev, "can't request USB ID GPIO: %d\n", ret); | 114 | dev_err(dev, "can't request USB ID GPIO: %d\n", ret); |
| 99 | return ret; | 115 | return ret; |
| 116 | } else if (gpiod_get_direction(data->gpio_usb_id) != GPIOF_DIR_IN) { | ||
| 117 | dev_warn(dev, FW_BUG "USB ID GPIO not in input mode, fixing\n"); | ||
| 118 | gpiod_direction_input(data->gpio_usb_id); | ||
| 100 | } | 119 | } |
| 101 | 120 | ||
| 102 | data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); | 121 | data->usb_id_irq = gpiod_to_irq(data->gpio_usb_id); |
| 103 | if (data->usb_id_irq <= 0) { | 122 | if (data->usb_id_irq < 0) { |
| 104 | dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); | 123 | dev_err(dev, "can't get USB ID IRQ: %d\n", data->usb_id_irq); |
| 105 | return -EINVAL; | 124 | return data->usb_id_irq; |
| 106 | } | 125 | } |
| 107 | 126 | ||
| 108 | data->gpio_vbus_en = devm_gpiod_get_index(dev, "vbus en", | 127 | data->gpio_vbus_en = devm_gpiod_get(dev, "vbus", GPIOD_ASIS); |
| 109 | INT3496_GPIO_VBUS_EN, | ||
| 110 | GPIOD_ASIS); | ||
| 111 | if (IS_ERR(data->gpio_vbus_en)) | 128 | if (IS_ERR(data->gpio_vbus_en)) |
| 112 | dev_info(dev, "can't request VBUS EN GPIO\n"); | 129 | dev_info(dev, "can't request VBUS EN GPIO\n"); |
| 113 | 130 | ||
| 114 | data->gpio_usb_mux = devm_gpiod_get_index(dev, "usb mux", | 131 | data->gpio_usb_mux = devm_gpiod_get(dev, "mux", GPIOD_ASIS); |
| 115 | INT3496_GPIO_USB_MUX, | ||
| 116 | GPIOD_ASIS); | ||
| 117 | if (IS_ERR(data->gpio_usb_mux)) | 132 | if (IS_ERR(data->gpio_usb_mux)) |
| 118 | dev_info(dev, "can't request USB MUX GPIO\n"); | 133 | dev_info(dev, "can't request USB MUX GPIO\n"); |
| 119 | 134 | ||
| @@ -154,6 +169,8 @@ static int int3496_remove(struct platform_device *pdev) | |||
| 154 | devm_free_irq(&pdev->dev, data->usb_id_irq, data); | 169 | devm_free_irq(&pdev->dev, data->usb_id_irq, data); |
| 155 | cancel_delayed_work_sync(&data->work); | 170 | cancel_delayed_work_sync(&data->work); |
| 156 | 171 | ||
| 172 | acpi_dev_remove_driver_gpios(ACPI_COMPANION(&pdev->dev)); | ||
| 173 | |||
| 157 | return 0; | 174 | return 0; |
| 158 | } | 175 | } |
| 159 | 176 | ||
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c index 9e1a138fed53..16a8951b2bed 100644 --- a/drivers/gpio/gpio-altera-a10sr.c +++ b/drivers/gpio/gpio-altera-a10sr.c | |||
| @@ -96,7 +96,7 @@ static int altr_a10sr_gpio_probe(struct platform_device *pdev) | |||
| 96 | gpio->regmap = a10sr->regmap; | 96 | gpio->regmap = a10sr->regmap; |
| 97 | 97 | ||
| 98 | gpio->gp = altr_a10sr_gc; | 98 | gpio->gp = altr_a10sr_gc; |
| 99 | 99 | gpio->gp.parent = pdev->dev.parent; | |
| 100 | gpio->gp.of_node = pdev->dev.of_node; | 100 | gpio->gp.of_node = pdev->dev.of_node; |
| 101 | 101 | ||
| 102 | ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio); | 102 | ret = devm_gpiochip_add_data(&pdev->dev, &gpio->gp, gpio); |
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c index 5bddbd507ca9..3fe6a21e05a5 100644 --- a/drivers/gpio/gpio-altera.c +++ b/drivers/gpio/gpio-altera.c | |||
| @@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d, | |||
| 90 | 90 | ||
| 91 | altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d)); | 91 | altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d)); |
| 92 | 92 | ||
| 93 | if (type == IRQ_TYPE_NONE) | 93 | if (type == IRQ_TYPE_NONE) { |
| 94 | irq_set_handler_locked(d, handle_bad_irq); | ||
| 94 | return 0; | 95 | return 0; |
| 95 | if (type == IRQ_TYPE_LEVEL_HIGH && | 96 | } |
| 96 | altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH) | 97 | if (type == altera_gc->interrupt_trigger) { |
| 97 | return 0; | 98 | if (type == IRQ_TYPE_LEVEL_HIGH) |
| 98 | if (type == IRQ_TYPE_EDGE_RISING && | 99 | irq_set_handler_locked(d, handle_level_irq); |
| 99 | altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING) | 100 | else |
| 100 | return 0; | 101 | irq_set_handler_locked(d, handle_simple_irq); |
| 101 | if (type == IRQ_TYPE_EDGE_FALLING && | ||
| 102 | altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING) | ||
| 103 | return 0; | ||
| 104 | if (type == IRQ_TYPE_EDGE_BOTH && | ||
| 105 | altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH) | ||
| 106 | return 0; | 102 | return 0; |
| 107 | 103 | } | |
| 104 | irq_set_handler_locked(d, handle_bad_irq); | ||
| 108 | return -EINVAL; | 105 | return -EINVAL; |
| 109 | } | 106 | } |
| 110 | 107 | ||
| @@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc) | |||
| 230 | chained_irq_exit(chip, desc); | 227 | chained_irq_exit(chip, desc); |
| 231 | } | 228 | } |
| 232 | 229 | ||
| 233 | |||
| 234 | static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) | 230 | static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc) |
| 235 | { | 231 | { |
| 236 | struct altera_gpio_chip *altera_gc; | 232 | struct altera_gpio_chip *altera_gc; |
| @@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev) | |||
| 310 | altera_gc->interrupt_trigger = reg; | 306 | altera_gc->interrupt_trigger = reg; |
| 311 | 307 | ||
| 312 | ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0, | 308 | ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0, |
| 313 | handle_simple_irq, IRQ_TYPE_NONE); | 309 | handle_bad_irq, IRQ_TYPE_NONE); |
| 314 | 310 | ||
| 315 | if (ret) { | 311 | if (ret) { |
| 316 | dev_err(&pdev->dev, "could not add irqchip\n"); | 312 | dev_err(&pdev->dev, "could not add irqchip\n"); |
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c index bdb692345428..2a57d024481d 100644 --- a/drivers/gpio/gpio-mcp23s08.c +++ b/drivers/gpio/gpio-mcp23s08.c | |||
| @@ -270,8 +270,10 @@ mcp23s08_direction_output(struct gpio_chip *chip, unsigned offset, int value) | |||
| 270 | static irqreturn_t mcp23s08_irq(int irq, void *data) | 270 | static irqreturn_t mcp23s08_irq(int irq, void *data) |
| 271 | { | 271 | { |
| 272 | struct mcp23s08 *mcp = data; | 272 | struct mcp23s08 *mcp = data; |
| 273 | int intcap, intf, i; | 273 | int intcap, intf, i, gpio, gpio_orig, intcap_mask; |
| 274 | unsigned int child_irq; | 274 | unsigned int child_irq; |
| 275 | bool intf_set, intcap_changed, gpio_bit_changed, | ||
| 276 | defval_changed, gpio_set; | ||
| 275 | 277 | ||
| 276 | mutex_lock(&mcp->lock); | 278 | mutex_lock(&mcp->lock); |
| 277 | if (mcp_read(mcp, MCP_INTF, &intf) < 0) { | 279 | if (mcp_read(mcp, MCP_INTF, &intf) < 0) { |
| @@ -287,14 +289,67 @@ static irqreturn_t mcp23s08_irq(int irq, void *data) | |||
| 287 | } | 289 | } |
| 288 | 290 | ||
| 289 | mcp->cache[MCP_INTCAP] = intcap; | 291 | mcp->cache[MCP_INTCAP] = intcap; |
| 292 | |||
| 293 | /* This clears the interrupt(configurable on S18) */ | ||
| 294 | if (mcp_read(mcp, MCP_GPIO, &gpio) < 0) { | ||
| 295 | mutex_unlock(&mcp->lock); | ||
| 296 | return IRQ_HANDLED; | ||
| 297 | } | ||
| 298 | gpio_orig = mcp->cache[MCP_GPIO]; | ||
| 299 | mcp->cache[MCP_GPIO] = gpio; | ||
| 290 | mutex_unlock(&mcp->lock); | 300 | mutex_unlock(&mcp->lock); |
| 291 | 301 | ||
| 302 | if (mcp->cache[MCP_INTF] == 0) { | ||
| 303 | /* There is no interrupt pending */ | ||
| 304 | return IRQ_HANDLED; | ||
| 305 | } | ||
| 306 | |||
| 307 | dev_dbg(mcp->chip.parent, | ||
| 308 | "intcap 0x%04X intf 0x%04X gpio_orig 0x%04X gpio 0x%04X\n", | ||
| 309 | intcap, intf, gpio_orig, gpio); | ||
| 292 | 310 | ||
| 293 | for (i = 0; i < mcp->chip.ngpio; i++) { | 311 | for (i = 0; i < mcp->chip.ngpio; i++) { |
| 294 | if ((BIT(i) & mcp->cache[MCP_INTF]) && | 312 | /* We must check all of the inputs on the chip, |
| 295 | ((BIT(i) & intcap & mcp->irq_rise) || | 313 | * otherwise we may not notice a change on >=2 pins. |
| 296 | (mcp->irq_fall & ~intcap & BIT(i)) || | 314 | * |
| 297 | (BIT(i) & mcp->cache[MCP_INTCON]))) { | 315 | * On at least the mcp23s17, INTCAP is only updated |
| 316 | * one byte at a time(INTCAPA and INTCAPB are | ||
| 317 | * not written to at the same time - only on a per-bank | ||
| 318 | * basis). | ||
| 319 | * | ||
| 320 | * INTF only contains the single bit that caused the | ||
| 321 | * interrupt per-bank. On the mcp23s17, there is | ||
| 322 | * INTFA and INTFB. If two pins are changed on the A | ||
| 323 | * side at the same time, INTF will only have one bit | ||
| 324 | * set. If one pin on the A side and one pin on the B | ||
| 325 | * side are changed at the same time, INTF will have | ||
| 326 | * two bits set. Thus, INTF can't be the only check | ||
| 327 | * to see if the input has changed. | ||
| 328 | */ | ||
| 329 | |||
| 330 | intf_set = BIT(i) & mcp->cache[MCP_INTF]; | ||
| 331 | if (i < 8 && intf_set) | ||
| 332 | intcap_mask = 0x00FF; | ||
| 333 | else if (i >= 8 && intf_set) | ||
| 334 | intcap_mask = 0xFF00; | ||
| 335 | else | ||
| 336 | intcap_mask = 0x00; | ||
| 337 | |||
| 338 | intcap_changed = (intcap_mask & | ||
| 339 | (BIT(i) & mcp->cache[MCP_INTCAP])) != | ||
| 340 | (intcap_mask & (BIT(i) & gpio_orig)); | ||
| 341 | gpio_set = BIT(i) & mcp->cache[MCP_GPIO]; | ||
| 342 | gpio_bit_changed = (BIT(i) & gpio_orig) != | ||
| 343 | (BIT(i) & mcp->cache[MCP_GPIO]); | ||
| 344 | defval_changed = (BIT(i) & mcp->cache[MCP_INTCON]) && | ||
| 345 | ((BIT(i) & mcp->cache[MCP_GPIO]) != | ||
| 346 | (BIT(i) & mcp->cache[MCP_DEFVAL])); | ||
| 347 | |||
| 348 | if (((gpio_bit_changed || intcap_changed) && | ||
| 349 | (BIT(i) & mcp->irq_rise) && gpio_set) || | ||
| 350 | ((gpio_bit_changed || intcap_changed) && | ||
| 351 | (BIT(i) & mcp->irq_fall) && !gpio_set) || | ||
| 352 | defval_changed) { | ||
| 298 | child_irq = irq_find_mapping(mcp->chip.irqdomain, i); | 353 | child_irq = irq_find_mapping(mcp->chip.irqdomain, i); |
| 299 | handle_nested_irq(child_irq); | 354 | handle_nested_irq(child_irq); |
| 300 | } | 355 | } |
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 06dac72cb69c..d99338689213 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c | |||
| @@ -197,7 +197,7 @@ static ssize_t gpio_mockup_event_write(struct file *file, | |||
| 197 | struct seq_file *sfile; | 197 | struct seq_file *sfile; |
| 198 | struct gpio_desc *desc; | 198 | struct gpio_desc *desc; |
| 199 | struct gpio_chip *gc; | 199 | struct gpio_chip *gc; |
| 200 | int status, val; | 200 | int val; |
| 201 | char buf; | 201 | char buf; |
| 202 | 202 | ||
| 203 | sfile = file->private_data; | 203 | sfile = file->private_data; |
| @@ -206,9 +206,8 @@ static ssize_t gpio_mockup_event_write(struct file *file, | |||
| 206 | chip = priv->chip; | 206 | chip = priv->chip; |
| 207 | gc = &chip->gc; | 207 | gc = &chip->gc; |
| 208 | 208 | ||
| 209 | status = copy_from_user(&buf, usr_buf, 1); | 209 | if (copy_from_user(&buf, usr_buf, 1)) |
| 210 | if (status) | 210 | return -EFAULT; |
| 211 | return status; | ||
| 212 | 211 | ||
| 213 | if (buf == '0') | 212 | if (buf == '0') |
| 214 | val = 0; | 213 | val = 0; |
diff --git a/drivers/gpio/gpio-xgene.c b/drivers/gpio/gpio-xgene.c index 40a8881c2ce8..f1c6ec17b90a 100644 --- a/drivers/gpio/gpio-xgene.c +++ b/drivers/gpio/gpio-xgene.c | |||
| @@ -42,9 +42,7 @@ struct xgene_gpio { | |||
| 42 | struct gpio_chip chip; | 42 | struct gpio_chip chip; |
| 43 | void __iomem *base; | 43 | void __iomem *base; |
| 44 | spinlock_t lock; | 44 | spinlock_t lock; |
| 45 | #ifdef CONFIG_PM | ||
| 46 | u32 set_dr_val[XGENE_MAX_GPIO_BANKS]; | 45 | u32 set_dr_val[XGENE_MAX_GPIO_BANKS]; |
| 47 | #endif | ||
| 48 | }; | 46 | }; |
| 49 | 47 | ||
| 50 | static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset) | 48 | static int xgene_gpio_get(struct gpio_chip *gc, unsigned int offset) |
| @@ -138,8 +136,7 @@ static int xgene_gpio_dir_out(struct gpio_chip *gc, | |||
| 138 | return 0; | 136 | return 0; |
| 139 | } | 137 | } |
| 140 | 138 | ||
| 141 | #ifdef CONFIG_PM | 139 | static __maybe_unused int xgene_gpio_suspend(struct device *dev) |
| 142 | static int xgene_gpio_suspend(struct device *dev) | ||
| 143 | { | 140 | { |
| 144 | struct xgene_gpio *gpio = dev_get_drvdata(dev); | 141 | struct xgene_gpio *gpio = dev_get_drvdata(dev); |
| 145 | unsigned long bank_offset; | 142 | unsigned long bank_offset; |
| @@ -152,7 +149,7 @@ static int xgene_gpio_suspend(struct device *dev) | |||
| 152 | return 0; | 149 | return 0; |
| 153 | } | 150 | } |
| 154 | 151 | ||
| 155 | static int xgene_gpio_resume(struct device *dev) | 152 | static __maybe_unused int xgene_gpio_resume(struct device *dev) |
| 156 | { | 153 | { |
| 157 | struct xgene_gpio *gpio = dev_get_drvdata(dev); | 154 | struct xgene_gpio *gpio = dev_get_drvdata(dev); |
| 158 | unsigned long bank_offset; | 155 | unsigned long bank_offset; |
| @@ -166,10 +163,6 @@ static int xgene_gpio_resume(struct device *dev) | |||
| 166 | } | 163 | } |
| 167 | 164 | ||
| 168 | static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume); | 165 | static SIMPLE_DEV_PM_OPS(xgene_gpio_pm, xgene_gpio_suspend, xgene_gpio_resume); |
| 169 | #define XGENE_GPIO_PM_OPS (&xgene_gpio_pm) | ||
| 170 | #else | ||
| 171 | #define XGENE_GPIO_PM_OPS NULL | ||
| 172 | #endif | ||
| 173 | 166 | ||
| 174 | static int xgene_gpio_probe(struct platform_device *pdev) | 167 | static int xgene_gpio_probe(struct platform_device *pdev) |
| 175 | { | 168 | { |
| @@ -241,7 +234,7 @@ static struct platform_driver xgene_gpio_driver = { | |||
| 241 | .name = "xgene-gpio", | 234 | .name = "xgene-gpio", |
| 242 | .of_match_table = xgene_gpio_of_match, | 235 | .of_match_table = xgene_gpio_of_match, |
| 243 | .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match), | 236 | .acpi_match_table = ACPI_PTR(xgene_gpio_acpi_match), |
| 244 | .pm = XGENE_GPIO_PM_OPS, | 237 | .pm = &xgene_gpio_pm, |
| 245 | }, | 238 | }, |
| 246 | .probe = xgene_gpio_probe, | 239 | .probe = xgene_gpio_probe, |
| 247 | }; | 240 | }; |
diff --git a/drivers/gpu/drm/amd/acp/Makefile b/drivers/gpu/drm/amd/acp/Makefile index 8363cb57915b..8a08e81ee90d 100644 --- a/drivers/gpu/drm/amd/acp/Makefile +++ b/drivers/gpu/drm/amd/acp/Makefile | |||
| @@ -3,6 +3,4 @@ | |||
| 3 | # of AMDSOC/AMDGPU drm driver. | 3 | # of AMDSOC/AMDGPU drm driver. |
| 4 | # It provides the HW control for ACP related functionalities. | 4 | # It provides the HW control for ACP related functionalities. |
| 5 | 5 | ||
| 6 | subdir-ccflags-y += -I$(AMDACPPATH)/ -I$(AMDACPPATH)/include | ||
| 7 | |||
| 8 | AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o | 6 | AMD_ACP_FILES := $(AMDACPPATH)/acp_hw.o |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index d2d0f60ff36d..99424cb8020b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | |||
| @@ -240,6 +240,8 @@ free_partial_kdata: | |||
| 240 | for (; i >= 0; i--) | 240 | for (; i >= 0; i--) |
| 241 | drm_free_large(p->chunks[i].kdata); | 241 | drm_free_large(p->chunks[i].kdata); |
| 242 | kfree(p->chunks); | 242 | kfree(p->chunks); |
| 243 | p->chunks = NULL; | ||
| 244 | p->nchunks = 0; | ||
| 243 | put_ctx: | 245 | put_ctx: |
| 244 | amdgpu_ctx_put(p->ctx); | 246 | amdgpu_ctx_put(p->ctx); |
| 245 | free_chunk: | 247 | free_chunk: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4120b351a8e5..de0cf3315484 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
| @@ -475,7 +475,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) | |||
| 475 | int r; | 475 | int r; |
| 476 | 476 | ||
| 477 | if (adev->wb.wb_obj == NULL) { | 477 | if (adev->wb.wb_obj == NULL) { |
| 478 | r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4, | 478 | r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t), |
| 479 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, | 479 | PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, |
| 480 | &adev->wb.wb_obj, &adev->wb.gpu_addr, | 480 | &adev->wb.wb_obj, &adev->wb.gpu_addr, |
| 481 | (void **)&adev->wb.wb); | 481 | (void **)&adev->wb.wb); |
| @@ -488,7 +488,7 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) | |||
| 488 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); | 488 | memset(&adev->wb.used, 0, sizeof(adev->wb.used)); |
| 489 | 489 | ||
| 490 | /* clear wb memory */ | 490 | /* clear wb memory */ |
| 491 | memset((char *)adev->wb.wb, 0, AMDGPU_GPU_PAGE_SIZE); | 491 | memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t)); |
| 492 | } | 492 | } |
| 493 | 493 | ||
| 494 | return 0; | 494 | return 0; |
| @@ -2590,7 +2590,7 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, | |||
| 2590 | use_bank = 0; | 2590 | use_bank = 0; |
| 2591 | } | 2591 | } |
| 2592 | 2592 | ||
| 2593 | *pos &= 0x3FFFF; | 2593 | *pos &= (1UL << 22) - 1; |
| 2594 | 2594 | ||
| 2595 | if (use_bank) { | 2595 | if (use_bank) { |
| 2596 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || | 2596 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || |
| @@ -2666,7 +2666,7 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf, | |||
| 2666 | use_bank = 0; | 2666 | use_bank = 0; |
| 2667 | } | 2667 | } |
| 2668 | 2668 | ||
| 2669 | *pos &= 0x3FFFF; | 2669 | *pos &= (1UL << 22) - 1; |
| 2670 | 2670 | ||
| 2671 | if (use_bank) { | 2671 | if (use_bank) { |
| 2672 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || | 2672 | if ((sh_bank != 0xFFFFFFFF && sh_bank >= adev->gfx.config.max_sh_per_se) || |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f7adbace428a..b76cd699eb0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | |||
| @@ -421,6 +421,7 @@ static const struct pci_device_id pciidlist[] = { | |||
| 421 | {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 421 | {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
| 422 | {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 422 | {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
| 423 | {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 423 | {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
| 424 | {0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | ||
| 424 | {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, | 425 | {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, |
| 425 | 426 | ||
| 426 | {0, 0, 0} | 427 | {0, 0, 0} |
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c index f55e45b52fbc..c5dec210d529 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c | |||
| @@ -3464,6 +3464,16 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev, | |||
| 3464 | (adev->pdev->device == 0x6667)) { | 3464 | (adev->pdev->device == 0x6667)) { |
| 3465 | max_sclk = 75000; | 3465 | max_sclk = 75000; |
| 3466 | } | 3466 | } |
| 3467 | } else if (adev->asic_type == CHIP_OLAND) { | ||
| 3468 | if ((adev->pdev->revision == 0xC7) || | ||
| 3469 | (adev->pdev->revision == 0x80) || | ||
| 3470 | (adev->pdev->revision == 0x81) || | ||
| 3471 | (adev->pdev->revision == 0x83) || | ||
| 3472 | (adev->pdev->revision == 0x87) || | ||
| 3473 | (adev->pdev->device == 0x6604) || | ||
| 3474 | (adev->pdev->device == 0x6605)) { | ||
| 3475 | max_sclk = 75000; | ||
| 3476 | } | ||
| 3467 | } | 3477 | } |
| 3468 | 3478 | ||
| 3469 | if (rps->vce_active) { | 3479 | if (rps->vce_active) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 50bdb24ef8d6..4a785d6acfb9 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c | |||
| @@ -1051,7 +1051,7 @@ static int vi_common_early_init(void *handle) | |||
| 1051 | /* rev0 hardware requires workarounds to support PG */ | 1051 | /* rev0 hardware requires workarounds to support PG */ |
| 1052 | adev->pg_flags = 0; | 1052 | adev->pg_flags = 0; |
| 1053 | if (adev->rev_id != 0x00) { | 1053 | if (adev->rev_id != 0x00) { |
| 1054 | adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | | 1054 | adev->pg_flags |= |
| 1055 | AMD_PG_SUPPORT_GFX_SMG | | 1055 | AMD_PG_SUPPORT_GFX_SMG | |
| 1056 | AMD_PG_SUPPORT_GFX_PIPELINE | | 1056 | AMD_PG_SUPPORT_GFX_PIPELINE | |
| 1057 | AMD_PG_SUPPORT_CP | | 1057 | AMD_PG_SUPPORT_CP | |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c index 8cf71f3c6d0e..261b828ad590 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_clockpowergating.c | |||
| @@ -178,7 +178,7 @@ int smu7_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) | |||
| 178 | if (bgate) { | 178 | if (bgate) { |
| 179 | cgs_set_powergating_state(hwmgr->device, | 179 | cgs_set_powergating_state(hwmgr->device, |
| 180 | AMD_IP_BLOCK_TYPE_VCE, | 180 | AMD_IP_BLOCK_TYPE_VCE, |
| 181 | AMD_PG_STATE_UNGATE); | 181 | AMD_PG_STATE_GATE); |
| 182 | cgs_set_clockgating_state(hwmgr->device, | 182 | cgs_set_clockgating_state(hwmgr->device, |
| 183 | AMD_IP_BLOCK_TYPE_VCE, | 183 | AMD_IP_BLOCK_TYPE_VCE, |
| 184 | AMD_CG_STATE_GATE); | 184 | AMD_CG_STATE_GATE); |
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 08e6a71f5d05..294b53697334 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c | |||
| @@ -63,8 +63,7 @@ static void malidp_crtc_enable(struct drm_crtc *crtc) | |||
| 63 | 63 | ||
| 64 | clk_prepare_enable(hwdev->pxlclk); | 64 | clk_prepare_enable(hwdev->pxlclk); |
| 65 | 65 | ||
| 66 | /* mclk needs to be set to the same or higher rate than pxlclk */ | 66 | /* We rely on firmware to set mclk to a sensible level. */ |
| 67 | clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000); | ||
| 68 | clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); | 67 | clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); |
| 69 | 68 | ||
| 70 | hwdev->modeset(hwdev, &vm); | 69 | hwdev->modeset(hwdev, &vm); |
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index 488aedf5b58d..9f5513006eee 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c | |||
| @@ -83,7 +83,7 @@ static const struct malidp_layer malidp550_layers[] = { | |||
| 83 | { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, | 83 | { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, |
| 84 | { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE }, | 84 | { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE, MALIDP_DE_LG_STRIDE }, |
| 85 | { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, | 85 | { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE, MALIDP_DE_LV_STRIDE0 }, |
| 86 | { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, 0 }, | 86 | { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE, MALIDP550_DE_LS_R1_STRIDE }, |
| 87 | }; | 87 | }; |
| 88 | 88 | ||
| 89 | #define MALIDP_DE_DEFAULT_PREFETCH_START 5 | 89 | #define MALIDP_DE_DEFAULT_PREFETCH_START 5 |
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 414aada10fe5..d5aec082294c 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c | |||
| @@ -37,6 +37,8 @@ | |||
| 37 | #define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) | 37 | #define LAYER_V_VAL(x) (((x) & 0x1fff) << 16) |
| 38 | #define MALIDP_LAYER_COMP_SIZE 0x010 | 38 | #define MALIDP_LAYER_COMP_SIZE 0x010 |
| 39 | #define MALIDP_LAYER_OFFSET 0x014 | 39 | #define MALIDP_LAYER_OFFSET 0x014 |
| 40 | #define MALIDP550_LS_ENABLE 0x01c | ||
| 41 | #define MALIDP550_LS_R1_IN_SIZE 0x020 | ||
| 40 | 42 | ||
| 41 | /* | 43 | /* |
| 42 | * This 4-entry look-up-table is used to determine the full 8-bit alpha value | 44 | * This 4-entry look-up-table is used to determine the full 8-bit alpha value |
| @@ -242,6 +244,11 @@ static void malidp_de_plane_update(struct drm_plane *plane, | |||
| 242 | LAYER_V_VAL(plane->state->crtc_y), | 244 | LAYER_V_VAL(plane->state->crtc_y), |
| 243 | mp->layer->base + MALIDP_LAYER_OFFSET); | 245 | mp->layer->base + MALIDP_LAYER_OFFSET); |
| 244 | 246 | ||
| 247 | if (mp->layer->id == DE_SMART) | ||
| 248 | malidp_hw_write(mp->hwdev, | ||
| 249 | LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h), | ||
| 250 | mp->layer->base + MALIDP550_LS_R1_IN_SIZE); | ||
| 251 | |||
| 245 | /* first clear the rotation bits */ | 252 | /* first clear the rotation bits */ |
| 246 | val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); | 253 | val = malidp_hw_read(mp->hwdev, mp->layer->base + MALIDP_LAYER_CONTROL); |
| 247 | val &= ~LAYER_ROT_MASK; | 254 | val &= ~LAYER_ROT_MASK; |
| @@ -330,9 +337,16 @@ int malidp_de_planes_init(struct drm_device *drm) | |||
| 330 | plane->hwdev = malidp->dev; | 337 | plane->hwdev = malidp->dev; |
| 331 | plane->layer = &map->layers[i]; | 338 | plane->layer = &map->layers[i]; |
| 332 | 339 | ||
| 333 | /* Skip the features which the SMART layer doesn't have */ | 340 | if (id == DE_SMART) { |
| 334 | if (id == DE_SMART) | 341 | /* |
| 342 | * Enable the first rectangle in the SMART layer to be | ||
| 343 | * able to use it as a drm plane. | ||
| 344 | */ | ||
| 345 | malidp_hw_write(malidp->dev, 1, | ||
| 346 | plane->layer->base + MALIDP550_LS_ENABLE); | ||
| 347 | /* Skip the features which the SMART layer doesn't have. */ | ||
| 335 | continue; | 348 | continue; |
| 349 | } | ||
| 336 | 350 | ||
| 337 | drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags); | 351 | drm_plane_create_rotation_property(&plane->base, DRM_ROTATE_0, flags); |
| 338 | malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT, | 352 | malidp_hw_write(malidp->dev, MALIDP_ALPHA_LUT, |
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index aff6d4a84e99..b816067a65c5 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h | |||
| @@ -84,6 +84,7 @@ | |||
| 84 | /* Stride register offsets relative to Lx_BASE */ | 84 | /* Stride register offsets relative to Lx_BASE */ |
| 85 | #define MALIDP_DE_LG_STRIDE 0x18 | 85 | #define MALIDP_DE_LG_STRIDE 0x18 |
| 86 | #define MALIDP_DE_LV_STRIDE0 0x18 | 86 | #define MALIDP_DE_LV_STRIDE0 0x18 |
| 87 | #define MALIDP550_DE_LS_R1_STRIDE 0x28 | ||
| 87 | 88 | ||
| 88 | /* macros to set values into registers */ | 89 | /* macros to set values into registers */ |
| 89 | #define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) | 90 | #define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0) |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index f6d4d9700734..324a688b3f30 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -1260,9 +1260,9 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, | |||
| 1260 | * to KMS, hence fail if different settings are requested. | 1260 | * to KMS, hence fail if different settings are requested. |
| 1261 | */ | 1261 | */ |
| 1262 | if (var->bits_per_pixel != fb->format->cpp[0] * 8 || | 1262 | if (var->bits_per_pixel != fb->format->cpp[0] * 8 || |
| 1263 | var->xres != fb->width || var->yres != fb->height || | 1263 | var->xres > fb->width || var->yres > fb->height || |
| 1264 | var->xres_virtual != fb->width || var->yres_virtual != fb->height) { | 1264 | var->xres_virtual > fb->width || var->yres_virtual > fb->height) { |
| 1265 | DRM_DEBUG("fb userspace requested width/height/bpp different than current fb " | 1265 | DRM_DEBUG("fb requested width/height/bpp can't fit in current fb " |
| 1266 | "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", | 1266 | "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", |
| 1267 | var->xres, var->yres, var->bits_per_pixel, | 1267 | var->xres, var->yres, var->bits_per_pixel, |
| 1268 | var->xres_virtual, var->yres_virtual, | 1268 | var->xres_virtual, var->yres_virtual, |
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 0fd6f7a18364..c0e8d3302292 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c | |||
| @@ -68,6 +68,8 @@ struct decon_context { | |||
| 68 | unsigned long flags; | 68 | unsigned long flags; |
| 69 | unsigned long out_type; | 69 | unsigned long out_type; |
| 70 | int first_win; | 70 | int first_win; |
| 71 | spinlock_t vblank_lock; | ||
| 72 | u32 frame_id; | ||
| 71 | }; | 73 | }; |
| 72 | 74 | ||
| 73 | static const uint32_t decon_formats[] = { | 75 | static const uint32_t decon_formats[] = { |
| @@ -103,7 +105,7 @@ static int decon_enable_vblank(struct exynos_drm_crtc *crtc) | |||
| 103 | if (ctx->out_type & IFTYPE_I80) | 105 | if (ctx->out_type & IFTYPE_I80) |
| 104 | val |= VIDINTCON0_FRAMEDONE; | 106 | val |= VIDINTCON0_FRAMEDONE; |
| 105 | else | 107 | else |
| 106 | val |= VIDINTCON0_INTFRMEN; | 108 | val |= VIDINTCON0_INTFRMEN | VIDINTCON0_FRAMESEL_FP; |
| 107 | 109 | ||
| 108 | writel(val, ctx->addr + DECON_VIDINTCON0); | 110 | writel(val, ctx->addr + DECON_VIDINTCON0); |
| 109 | } | 111 | } |
| @@ -122,14 +124,56 @@ static void decon_disable_vblank(struct exynos_drm_crtc *crtc) | |||
| 122 | writel(0, ctx->addr + DECON_VIDINTCON0); | 124 | writel(0, ctx->addr + DECON_VIDINTCON0); |
| 123 | } | 125 | } |
| 124 | 126 | ||
| 127 | /* return number of starts/ends of frame transmissions since reset */ | ||
| 128 | static u32 decon_get_frame_count(struct decon_context *ctx, bool end) | ||
| 129 | { | ||
| 130 | u32 frm, pfrm, status, cnt = 2; | ||
| 131 | |||
| 132 | /* To get consistent result repeat read until frame id is stable. | ||
| 133 | * Usually the loop will be executed once, in rare cases when the loop | ||
| 134 | * is executed at frame change time 2nd pass will be needed. | ||
| 135 | */ | ||
| 136 | frm = readl(ctx->addr + DECON_CRFMID); | ||
| 137 | do { | ||
| 138 | status = readl(ctx->addr + DECON_VIDCON1); | ||
| 139 | pfrm = frm; | ||
| 140 | frm = readl(ctx->addr + DECON_CRFMID); | ||
| 141 | } while (frm != pfrm && --cnt); | ||
| 142 | |||
| 143 | /* CRFMID is incremented on BPORCH in case of I80 and on VSYNC in case | ||
| 144 | * of RGB, it should be taken into account. | ||
| 145 | */ | ||
| 146 | if (!frm) | ||
| 147 | return 0; | ||
| 148 | |||
| 149 | switch (status & (VIDCON1_VSTATUS_MASK | VIDCON1_I80_ACTIVE)) { | ||
| 150 | case VIDCON1_VSTATUS_VS: | ||
| 151 | if (!(ctx->out_type & IFTYPE_I80)) | ||
| 152 | --frm; | ||
| 153 | break; | ||
| 154 | case VIDCON1_VSTATUS_BP: | ||
| 155 | --frm; | ||
| 156 | break; | ||
| 157 | case VIDCON1_I80_ACTIVE: | ||
| 158 | case VIDCON1_VSTATUS_AC: | ||
| 159 | if (end) | ||
| 160 | --frm; | ||
| 161 | break; | ||
| 162 | default: | ||
| 163 | break; | ||
| 164 | } | ||
| 165 | |||
| 166 | return frm; | ||
| 167 | } | ||
| 168 | |||
| 125 | static void decon_setup_trigger(struct decon_context *ctx) | 169 | static void decon_setup_trigger(struct decon_context *ctx) |
| 126 | { | 170 | { |
| 127 | if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))) | 171 | if (!(ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))) |
| 128 | return; | 172 | return; |
| 129 | 173 | ||
| 130 | if (!(ctx->out_type & I80_HW_TRG)) { | 174 | if (!(ctx->out_type & I80_HW_TRG)) { |
| 131 | writel(TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN | 175 | writel(TRIGCON_TRIGEN_PER_F | TRIGCON_TRIGEN_F | |
| 132 | | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, | 176 | TRIGCON_TE_AUTO_MASK | TRIGCON_SWTRIGEN, |
| 133 | ctx->addr + DECON_TRIGCON); | 177 | ctx->addr + DECON_TRIGCON); |
| 134 | return; | 178 | return; |
| 135 | } | 179 | } |
| @@ -365,11 +409,14 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc, | |||
| 365 | static void decon_atomic_flush(struct exynos_drm_crtc *crtc) | 409 | static void decon_atomic_flush(struct exynos_drm_crtc *crtc) |
| 366 | { | 410 | { |
| 367 | struct decon_context *ctx = crtc->ctx; | 411 | struct decon_context *ctx = crtc->ctx; |
| 412 | unsigned long flags; | ||
| 368 | int i; | 413 | int i; |
| 369 | 414 | ||
| 370 | if (test_bit(BIT_SUSPENDED, &ctx->flags)) | 415 | if (test_bit(BIT_SUSPENDED, &ctx->flags)) |
| 371 | return; | 416 | return; |
| 372 | 417 | ||
| 418 | spin_lock_irqsave(&ctx->vblank_lock, flags); | ||
| 419 | |||
| 373 | for (i = ctx->first_win; i < WINDOWS_NR; i++) | 420 | for (i = ctx->first_win; i < WINDOWS_NR; i++) |
| 374 | decon_shadow_protect_win(ctx, i, false); | 421 | decon_shadow_protect_win(ctx, i, false); |
| 375 | 422 | ||
| @@ -378,11 +425,18 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) | |||
| 378 | 425 | ||
| 379 | if (ctx->out_type & IFTYPE_I80) | 426 | if (ctx->out_type & IFTYPE_I80) |
| 380 | set_bit(BIT_WIN_UPDATED, &ctx->flags); | 427 | set_bit(BIT_WIN_UPDATED, &ctx->flags); |
| 428 | |||
| 429 | ctx->frame_id = decon_get_frame_count(ctx, true); | ||
| 430 | |||
| 431 | exynos_crtc_handle_event(crtc); | ||
| 432 | |||
| 433 | spin_unlock_irqrestore(&ctx->vblank_lock, flags); | ||
| 381 | } | 434 | } |
| 382 | 435 | ||
| 383 | static void decon_swreset(struct decon_context *ctx) | 436 | static void decon_swreset(struct decon_context *ctx) |
| 384 | { | 437 | { |
| 385 | unsigned int tries; | 438 | unsigned int tries; |
| 439 | unsigned long flags; | ||
| 386 | 440 | ||
| 387 | writel(0, ctx->addr + DECON_VIDCON0); | 441 | writel(0, ctx->addr + DECON_VIDCON0); |
| 388 | for (tries = 2000; tries; --tries) { | 442 | for (tries = 2000; tries; --tries) { |
| @@ -400,6 +454,10 @@ static void decon_swreset(struct decon_context *ctx) | |||
| 400 | 454 | ||
| 401 | WARN(tries == 0, "failed to software reset DECON\n"); | 455 | WARN(tries == 0, "failed to software reset DECON\n"); |
| 402 | 456 | ||
| 457 | spin_lock_irqsave(&ctx->vblank_lock, flags); | ||
| 458 | ctx->frame_id = 0; | ||
| 459 | spin_unlock_irqrestore(&ctx->vblank_lock, flags); | ||
| 460 | |||
| 403 | if (!(ctx->out_type & IFTYPE_HDMI)) | 461 | if (!(ctx->out_type & IFTYPE_HDMI)) |
| 404 | return; | 462 | return; |
| 405 | 463 | ||
| @@ -578,6 +636,24 @@ static const struct component_ops decon_component_ops = { | |||
| 578 | .unbind = decon_unbind, | 636 | .unbind = decon_unbind, |
| 579 | }; | 637 | }; |
| 580 | 638 | ||
| 639 | static void decon_handle_vblank(struct decon_context *ctx) | ||
| 640 | { | ||
| 641 | u32 frm; | ||
| 642 | |||
| 643 | spin_lock(&ctx->vblank_lock); | ||
| 644 | |||
| 645 | frm = decon_get_frame_count(ctx, true); | ||
| 646 | |||
| 647 | if (frm != ctx->frame_id) { | ||
| 648 | /* handle only if incremented, take care of wrap-around */ | ||
| 649 | if ((s32)(frm - ctx->frame_id) > 0) | ||
| 650 | drm_crtc_handle_vblank(&ctx->crtc->base); | ||
| 651 | ctx->frame_id = frm; | ||
| 652 | } | ||
| 653 | |||
| 654 | spin_unlock(&ctx->vblank_lock); | ||
| 655 | } | ||
| 656 | |||
| 581 | static irqreturn_t decon_irq_handler(int irq, void *dev_id) | 657 | static irqreturn_t decon_irq_handler(int irq, void *dev_id) |
| 582 | { | 658 | { |
| 583 | struct decon_context *ctx = dev_id; | 659 | struct decon_context *ctx = dev_id; |
| @@ -598,7 +674,7 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) | |||
| 598 | (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F)) | 674 | (VIDOUT_INTERLACE_EN_F | VIDOUT_INTERLACE_FIELD_F)) |
| 599 | return IRQ_HANDLED; | 675 | return IRQ_HANDLED; |
| 600 | } | 676 | } |
| 601 | drm_crtc_handle_vblank(&ctx->crtc->base); | 677 | decon_handle_vblank(ctx); |
| 602 | } | 678 | } |
| 603 | 679 | ||
| 604 | out: | 680 | out: |
| @@ -671,6 +747,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev) | |||
| 671 | __set_bit(BIT_SUSPENDED, &ctx->flags); | 747 | __set_bit(BIT_SUSPENDED, &ctx->flags); |
| 672 | ctx->dev = dev; | 748 | ctx->dev = dev; |
| 673 | ctx->out_type = (unsigned long)of_device_get_match_data(dev); | 749 | ctx->out_type = (unsigned long)of_device_get_match_data(dev); |
| 750 | spin_lock_init(&ctx->vblank_lock); | ||
| 674 | 751 | ||
| 675 | if (ctx->out_type & IFTYPE_HDMI) { | 752 | if (ctx->out_type & IFTYPE_HDMI) { |
| 676 | ctx->first_win = 1; | 753 | ctx->first_win = 1; |
| @@ -678,7 +755,7 @@ static int exynos5433_decon_probe(struct platform_device *pdev) | |||
| 678 | ctx->out_type |= IFTYPE_I80; | 755 | ctx->out_type |= IFTYPE_I80; |
| 679 | } | 756 | } |
| 680 | 757 | ||
| 681 | if (ctx->out_type | I80_HW_TRG) { | 758 | if (ctx->out_type & I80_HW_TRG) { |
| 682 | ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, | 759 | ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, |
| 683 | "samsung,disp-sysreg"); | 760 | "samsung,disp-sysreg"); |
| 684 | if (IS_ERR(ctx->sysreg)) { | 761 | if (IS_ERR(ctx->sysreg)) { |
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f9ab19e205e2..48811806fa27 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c | |||
| @@ -526,6 +526,7 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc) | |||
| 526 | 526 | ||
| 527 | for (i = 0; i < WINDOWS_NR; i++) | 527 | for (i = 0; i < WINDOWS_NR; i++) |
| 528 | decon_shadow_protect_win(ctx, i, false); | 528 | decon_shadow_protect_win(ctx, i, false); |
| 529 | exynos_crtc_handle_event(crtc); | ||
| 529 | } | 530 | } |
| 530 | 531 | ||
| 531 | static void decon_init(struct decon_context *ctx) | 532 | static void decon_init(struct decon_context *ctx) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c index 5367b6664fe3..c65f4509932c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c | |||
| @@ -85,16 +85,28 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 85 | struct drm_crtc_state *old_crtc_state) | 85 | struct drm_crtc_state *old_crtc_state) |
| 86 | { | 86 | { |
| 87 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | 87 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); |
| 88 | struct drm_pending_vblank_event *event; | ||
| 89 | unsigned long flags; | ||
| 90 | 88 | ||
| 91 | if (exynos_crtc->ops->atomic_flush) | 89 | if (exynos_crtc->ops->atomic_flush) |
| 92 | exynos_crtc->ops->atomic_flush(exynos_crtc); | 90 | exynos_crtc->ops->atomic_flush(exynos_crtc); |
| 91 | } | ||
| 92 | |||
| 93 | static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { | ||
| 94 | .enable = exynos_drm_crtc_enable, | ||
| 95 | .disable = exynos_drm_crtc_disable, | ||
| 96 | .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, | ||
| 97 | .atomic_check = exynos_crtc_atomic_check, | ||
| 98 | .atomic_begin = exynos_crtc_atomic_begin, | ||
| 99 | .atomic_flush = exynos_crtc_atomic_flush, | ||
| 100 | }; | ||
| 101 | |||
| 102 | void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc) | ||
| 103 | { | ||
| 104 | struct drm_crtc *crtc = &exynos_crtc->base; | ||
| 105 | struct drm_pending_vblank_event *event = crtc->state->event; | ||
| 106 | unsigned long flags; | ||
| 93 | 107 | ||
| 94 | event = crtc->state->event; | ||
| 95 | if (event) { | 108 | if (event) { |
| 96 | crtc->state->event = NULL; | 109 | crtc->state->event = NULL; |
| 97 | |||
| 98 | spin_lock_irqsave(&crtc->dev->event_lock, flags); | 110 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
| 99 | if (drm_crtc_vblank_get(crtc) == 0) | 111 | if (drm_crtc_vblank_get(crtc) == 0) |
| 100 | drm_crtc_arm_vblank_event(crtc, event); | 112 | drm_crtc_arm_vblank_event(crtc, event); |
| @@ -105,15 +117,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc, | |||
| 105 | 117 | ||
| 106 | } | 118 | } |
| 107 | 119 | ||
| 108 | static const struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = { | ||
| 109 | .enable = exynos_drm_crtc_enable, | ||
| 110 | .disable = exynos_drm_crtc_disable, | ||
| 111 | .mode_set_nofb = exynos_drm_crtc_mode_set_nofb, | ||
| 112 | .atomic_check = exynos_crtc_atomic_check, | ||
| 113 | .atomic_begin = exynos_crtc_atomic_begin, | ||
| 114 | .atomic_flush = exynos_crtc_atomic_flush, | ||
| 115 | }; | ||
| 116 | |||
| 117 | static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) | 120 | static void exynos_drm_crtc_destroy(struct drm_crtc *crtc) |
| 118 | { | 121 | { |
| 119 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); | 122 | struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h index 6a581a8af465..abd5d6ceac0c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h +++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h | |||
| @@ -40,4 +40,6 @@ int exynos_drm_crtc_get_pipe_from_type(struct drm_device *drm_dev, | |||
| 40 | */ | 40 | */ |
| 41 | void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); | 41 | void exynos_drm_crtc_te_handler(struct drm_crtc *crtc); |
| 42 | 42 | ||
| 43 | void exynos_crtc_handle_event(struct exynos_drm_crtc *exynos_crtc); | ||
| 44 | |||
| 43 | #endif | 45 | #endif |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 812e2ec0761d..d7ef26370e67 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c | |||
| @@ -86,7 +86,7 @@ | |||
| 86 | #define DSIM_SYNC_INFORM (1 << 27) | 86 | #define DSIM_SYNC_INFORM (1 << 27) |
| 87 | #define DSIM_EOT_DISABLE (1 << 28) | 87 | #define DSIM_EOT_DISABLE (1 << 28) |
| 88 | #define DSIM_MFLUSH_VS (1 << 29) | 88 | #define DSIM_MFLUSH_VS (1 << 29) |
| 89 | /* This flag is valid only for exynos3250/3472/4415/5260/5430 */ | 89 | /* This flag is valid only for exynos3250/3472/5260/5430 */ |
| 90 | #define DSIM_CLKLANE_STOP (1 << 30) | 90 | #define DSIM_CLKLANE_STOP (1 << 30) |
| 91 | 91 | ||
| 92 | /* DSIM_ESCMODE */ | 92 | /* DSIM_ESCMODE */ |
| @@ -473,17 +473,6 @@ static const struct exynos_dsi_driver_data exynos4_dsi_driver_data = { | |||
| 473 | .reg_values = reg_values, | 473 | .reg_values = reg_values, |
| 474 | }; | 474 | }; |
| 475 | 475 | ||
| 476 | static const struct exynos_dsi_driver_data exynos4415_dsi_driver_data = { | ||
| 477 | .reg_ofs = exynos_reg_ofs, | ||
| 478 | .plltmr_reg = 0x58, | ||
| 479 | .has_clklane_stop = 1, | ||
| 480 | .num_clks = 2, | ||
| 481 | .max_freq = 1000, | ||
| 482 | .wait_for_reset = 1, | ||
| 483 | .num_bits_resol = 11, | ||
| 484 | .reg_values = reg_values, | ||
| 485 | }; | ||
| 486 | |||
| 487 | static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = { | 476 | static const struct exynos_dsi_driver_data exynos5_dsi_driver_data = { |
| 488 | .reg_ofs = exynos_reg_ofs, | 477 | .reg_ofs = exynos_reg_ofs, |
| 489 | .plltmr_reg = 0x58, | 478 | .plltmr_reg = 0x58, |
| @@ -521,8 +510,6 @@ static const struct of_device_id exynos_dsi_of_match[] = { | |||
| 521 | .data = &exynos3_dsi_driver_data }, | 510 | .data = &exynos3_dsi_driver_data }, |
| 522 | { .compatible = "samsung,exynos4210-mipi-dsi", | 511 | { .compatible = "samsung,exynos4210-mipi-dsi", |
| 523 | .data = &exynos4_dsi_driver_data }, | 512 | .data = &exynos4_dsi_driver_data }, |
| 524 | { .compatible = "samsung,exynos4415-mipi-dsi", | ||
| 525 | .data = &exynos4415_dsi_driver_data }, | ||
| 526 | { .compatible = "samsung,exynos5410-mipi-dsi", | 513 | { .compatible = "samsung,exynos5410-mipi-dsi", |
| 527 | .data = &exynos5_dsi_driver_data }, | 514 | .data = &exynos5_dsi_driver_data }, |
| 528 | { .compatible = "samsung,exynos5422-mipi-dsi", | 515 | { .compatible = "samsung,exynos5422-mipi-dsi", |
| @@ -979,7 +966,7 @@ static void exynos_dsi_send_to_fifo(struct exynos_dsi *dsi, | |||
| 979 | bool first = !xfer->tx_done; | 966 | bool first = !xfer->tx_done; |
| 980 | u32 reg; | 967 | u32 reg; |
| 981 | 968 | ||
| 982 | dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n", | 969 | dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n", |
| 983 | xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); | 970 | xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); |
| 984 | 971 | ||
| 985 | if (length > DSI_TX_FIFO_SIZE) | 972 | if (length > DSI_TX_FIFO_SIZE) |
| @@ -1177,7 +1164,7 @@ static bool exynos_dsi_transfer_finish(struct exynos_dsi *dsi) | |||
| 1177 | spin_unlock_irqrestore(&dsi->transfer_lock, flags); | 1164 | spin_unlock_irqrestore(&dsi->transfer_lock, flags); |
| 1178 | 1165 | ||
| 1179 | dev_dbg(dsi->dev, | 1166 | dev_dbg(dsi->dev, |
| 1180 | "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", | 1167 | "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", |
| 1181 | xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, | 1168 | xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, |
| 1182 | xfer->rx_done); | 1169 | xfer->rx_done); |
| 1183 | 1170 | ||
| @@ -1348,9 +1335,12 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi) | |||
| 1348 | int te_gpio_irq; | 1335 | int te_gpio_irq; |
| 1349 | 1336 | ||
| 1350 | dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0); | 1337 | dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0); |
| 1338 | if (dsi->te_gpio == -ENOENT) | ||
| 1339 | return 0; | ||
| 1340 | |||
| 1351 | if (!gpio_is_valid(dsi->te_gpio)) { | 1341 | if (!gpio_is_valid(dsi->te_gpio)) { |
| 1352 | dev_err(dsi->dev, "no te-gpios specified\n"); | ||
| 1353 | ret = dsi->te_gpio; | 1342 | ret = dsi->te_gpio; |
| 1343 | dev_err(dsi->dev, "cannot get te-gpios, %d\n", ret); | ||
| 1354 | goto out; | 1344 | goto out; |
| 1355 | } | 1345 | } |
| 1356 | 1346 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 95871577015d..5b18b5c5fdf2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c | |||
| @@ -1695,7 +1695,7 @@ static int fimc_probe(struct platform_device *pdev) | |||
| 1695 | goto err_put_clk; | 1695 | goto err_put_clk; |
| 1696 | } | 1696 | } |
| 1697 | 1697 | ||
| 1698 | DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); | 1698 | DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv); |
| 1699 | 1699 | ||
| 1700 | spin_lock_init(&ctx->lock); | 1700 | spin_lock_init(&ctx->lock); |
| 1701 | platform_set_drvdata(pdev, ctx); | 1701 | platform_set_drvdata(pdev, ctx); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index a9fa444c6053..3f04d72c448d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c | |||
| @@ -71,10 +71,10 @@ | |||
| 71 | #define TRIGCON 0x1A4 | 71 | #define TRIGCON 0x1A4 |
| 72 | #define TRGMODE_ENABLE (1 << 0) | 72 | #define TRGMODE_ENABLE (1 << 0) |
| 73 | #define SWTRGCMD_ENABLE (1 << 1) | 73 | #define SWTRGCMD_ENABLE (1 << 1) |
| 74 | /* Exynos3250, 3472, 4415, 5260 5410, 5420 and 5422 only supported. */ | 74 | /* Exynos3250, 3472, 5260 5410, 5420 and 5422 only supported. */ |
| 75 | #define HWTRGEN_ENABLE (1 << 3) | 75 | #define HWTRGEN_ENABLE (1 << 3) |
| 76 | #define HWTRGMASK_ENABLE (1 << 4) | 76 | #define HWTRGMASK_ENABLE (1 << 4) |
| 77 | /* Exynos3250, 3472, 4415, 5260, 5420 and 5422 only supported. */ | 77 | /* Exynos3250, 3472, 5260, 5420 and 5422 only supported. */ |
| 78 | #define HWTRIGEN_PER_ENABLE (1 << 31) | 78 | #define HWTRIGEN_PER_ENABLE (1 << 31) |
| 79 | 79 | ||
| 80 | /* display mode change control register except exynos4 */ | 80 | /* display mode change control register except exynos4 */ |
| @@ -138,18 +138,6 @@ static struct fimd_driver_data exynos4_fimd_driver_data = { | |||
| 138 | .has_vtsel = 1, | 138 | .has_vtsel = 1, |
| 139 | }; | 139 | }; |
| 140 | 140 | ||
| 141 | static struct fimd_driver_data exynos4415_fimd_driver_data = { | ||
| 142 | .timing_base = 0x20000, | ||
| 143 | .lcdblk_offset = 0x210, | ||
| 144 | .lcdblk_vt_shift = 10, | ||
| 145 | .lcdblk_bypass_shift = 1, | ||
| 146 | .trg_type = I80_HW_TRG, | ||
| 147 | .has_shadowcon = 1, | ||
| 148 | .has_vidoutcon = 1, | ||
| 149 | .has_vtsel = 1, | ||
| 150 | .has_trigger_per_te = 1, | ||
| 151 | }; | ||
| 152 | |||
| 153 | static struct fimd_driver_data exynos5_fimd_driver_data = { | 141 | static struct fimd_driver_data exynos5_fimd_driver_data = { |
| 154 | .timing_base = 0x20000, | 142 | .timing_base = 0x20000, |
| 155 | .lcdblk_offset = 0x214, | 143 | .lcdblk_offset = 0x214, |
| @@ -210,8 +198,6 @@ static const struct of_device_id fimd_driver_dt_match[] = { | |||
| 210 | .data = &exynos3_fimd_driver_data }, | 198 | .data = &exynos3_fimd_driver_data }, |
| 211 | { .compatible = "samsung,exynos4210-fimd", | 199 | { .compatible = "samsung,exynos4210-fimd", |
| 212 | .data = &exynos4_fimd_driver_data }, | 200 | .data = &exynos4_fimd_driver_data }, |
| 213 | { .compatible = "samsung,exynos4415-fimd", | ||
| 214 | .data = &exynos4415_fimd_driver_data }, | ||
| 215 | { .compatible = "samsung,exynos5250-fimd", | 201 | { .compatible = "samsung,exynos5250-fimd", |
| 216 | .data = &exynos5_fimd_driver_data }, | 202 | .data = &exynos5_fimd_driver_data }, |
| 217 | { .compatible = "samsung,exynos5420-fimd", | 203 | { .compatible = "samsung,exynos5420-fimd", |
| @@ -257,7 +243,7 @@ static int fimd_enable_vblank(struct exynos_drm_crtc *crtc) | |||
| 257 | val |= VIDINTCON0_INT_FRAME; | 243 | val |= VIDINTCON0_INT_FRAME; |
| 258 | 244 | ||
| 259 | val &= ~VIDINTCON0_FRAMESEL0_MASK; | 245 | val &= ~VIDINTCON0_FRAMESEL0_MASK; |
| 260 | val |= VIDINTCON0_FRAMESEL0_VSYNC; | 246 | val |= VIDINTCON0_FRAMESEL0_FRONTPORCH; |
| 261 | val &= ~VIDINTCON0_FRAMESEL1_MASK; | 247 | val &= ~VIDINTCON0_FRAMESEL1_MASK; |
| 262 | val |= VIDINTCON0_FRAMESEL1_NONE; | 248 | val |= VIDINTCON0_FRAMESEL1_NONE; |
| 263 | } | 249 | } |
| @@ -723,6 +709,8 @@ static void fimd_atomic_flush(struct exynos_drm_crtc *crtc) | |||
| 723 | 709 | ||
| 724 | for (i = 0; i < WINDOWS_NR; i++) | 710 | for (i = 0; i < WINDOWS_NR; i++) |
| 725 | fimd_shadow_protect_win(ctx, i, false); | 711 | fimd_shadow_protect_win(ctx, i, false); |
| 712 | |||
| 713 | exynos_crtc_handle_event(crtc); | ||
| 726 | } | 714 | } |
| 727 | 715 | ||
| 728 | static void fimd_update_plane(struct exynos_drm_crtc *crtc, | 716 | static void fimd_update_plane(struct exynos_drm_crtc *crtc, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 4c28f7ffcc4d..55a1579d11b3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c | |||
| @@ -218,7 +218,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, | |||
| 218 | return ERR_PTR(ret); | 218 | return ERR_PTR(ret); |
| 219 | } | 219 | } |
| 220 | 220 | ||
| 221 | DRM_DEBUG_KMS("created file object = %p\n", obj->filp); | 221 | DRM_DEBUG_KMS("created file object = %pK\n", obj->filp); |
| 222 | 222 | ||
| 223 | return exynos_gem; | 223 | return exynos_gem; |
| 224 | } | 224 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index bef57987759d..0506b2b17ac1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c | |||
| @@ -1723,7 +1723,7 @@ static int gsc_probe(struct platform_device *pdev) | |||
| 1723 | return ret; | 1723 | return ret; |
| 1724 | } | 1724 | } |
| 1725 | 1725 | ||
| 1726 | DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv); | 1726 | DRM_DEBUG_KMS("id[%d]ippdrv[%pK]\n", ctx->id, ippdrv); |
| 1727 | 1727 | ||
| 1728 | mutex_init(&ctx->lock); | 1728 | mutex_init(&ctx->lock); |
| 1729 | platform_set_drvdata(pdev, ctx); | 1729 | platform_set_drvdata(pdev, ctx); |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index 9c84ee76f18a..3edda18cc2d2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c | |||
| @@ -208,7 +208,7 @@ static struct exynos_drm_ippdrv *ipp_find_drv_by_handle(u32 prop_id) | |||
| 208 | * e.g PAUSE state, queue buf, command control. | 208 | * e.g PAUSE state, queue buf, command control. |
| 209 | */ | 209 | */ |
| 210 | list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { | 210 | list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) { |
| 211 | DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv); | 211 | DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", count++, ippdrv); |
| 212 | 212 | ||
| 213 | mutex_lock(&ippdrv->cmd_lock); | 213 | mutex_lock(&ippdrv->cmd_lock); |
| 214 | list_for_each_entry(c_node, &ippdrv->cmd_list, list) { | 214 | list_for_each_entry(c_node, &ippdrv->cmd_list, list) { |
| @@ -388,7 +388,7 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data, | |||
| 388 | } | 388 | } |
| 389 | property->prop_id = ret; | 389 | property->prop_id = ret; |
| 390 | 390 | ||
| 391 | DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n", | 391 | DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%pK]\n", |
| 392 | property->prop_id, property->cmd, ippdrv); | 392 | property->prop_id, property->cmd, ippdrv); |
| 393 | 393 | ||
| 394 | /* stored property information and ippdrv in private data */ | 394 | /* stored property information and ippdrv in private data */ |
| @@ -518,7 +518,7 @@ static int ipp_put_mem_node(struct drm_device *drm_dev, | |||
| 518 | { | 518 | { |
| 519 | int i; | 519 | int i; |
| 520 | 520 | ||
| 521 | DRM_DEBUG_KMS("node[%p]\n", m_node); | 521 | DRM_DEBUG_KMS("node[%pK]\n", m_node); |
| 522 | 522 | ||
| 523 | if (!m_node) { | 523 | if (!m_node) { |
| 524 | DRM_ERROR("invalid dequeue node.\n"); | 524 | DRM_ERROR("invalid dequeue node.\n"); |
| @@ -562,7 +562,7 @@ static struct drm_exynos_ipp_mem_node | |||
| 562 | m_node->buf_id = qbuf->buf_id; | 562 | m_node->buf_id = qbuf->buf_id; |
| 563 | INIT_LIST_HEAD(&m_node->list); | 563 | INIT_LIST_HEAD(&m_node->list); |
| 564 | 564 | ||
| 565 | DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id); | 565 | DRM_DEBUG_KMS("m_node[%pK]ops_id[%d]\n", m_node, qbuf->ops_id); |
| 566 | DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); | 566 | DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id); |
| 567 | 567 | ||
| 568 | for_each_ipp_planar(i) { | 568 | for_each_ipp_planar(i) { |
| @@ -659,7 +659,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node, | |||
| 659 | 659 | ||
| 660 | mutex_lock(&c_node->event_lock); | 660 | mutex_lock(&c_node->event_lock); |
| 661 | list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { | 661 | list_for_each_entry_safe(e, te, &c_node->event_list, base.link) { |
| 662 | DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e); | 662 | DRM_DEBUG_KMS("count[%d]e[%pK]\n", count++, e); |
| 663 | 663 | ||
| 664 | /* | 664 | /* |
| 665 | * qbuf == NULL condition means all event deletion. | 665 | * qbuf == NULL condition means all event deletion. |
| @@ -750,7 +750,7 @@ static struct drm_exynos_ipp_mem_node | |||
| 750 | 750 | ||
| 751 | /* find memory node from memory list */ | 751 | /* find memory node from memory list */ |
| 752 | list_for_each_entry(m_node, head, list) { | 752 | list_for_each_entry(m_node, head, list) { |
| 753 | DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node); | 753 | DRM_DEBUG_KMS("count[%d]m_node[%pK]\n", count++, m_node); |
| 754 | 754 | ||
| 755 | /* compare buffer id */ | 755 | /* compare buffer id */ |
| 756 | if (m_node->buf_id == qbuf->buf_id) | 756 | if (m_node->buf_id == qbuf->buf_id) |
| @@ -767,7 +767,7 @@ static int ipp_set_mem_node(struct exynos_drm_ippdrv *ippdrv, | |||
| 767 | struct exynos_drm_ipp_ops *ops = NULL; | 767 | struct exynos_drm_ipp_ops *ops = NULL; |
| 768 | int ret = 0; | 768 | int ret = 0; |
| 769 | 769 | ||
| 770 | DRM_DEBUG_KMS("node[%p]\n", m_node); | 770 | DRM_DEBUG_KMS("node[%pK]\n", m_node); |
| 771 | 771 | ||
| 772 | if (!m_node) { | 772 | if (!m_node) { |
| 773 | DRM_ERROR("invalid queue node.\n"); | 773 | DRM_ERROR("invalid queue node.\n"); |
| @@ -1232,7 +1232,7 @@ static int ipp_start_property(struct exynos_drm_ippdrv *ippdrv, | |||
| 1232 | m_node = list_first_entry(head, | 1232 | m_node = list_first_entry(head, |
| 1233 | struct drm_exynos_ipp_mem_node, list); | 1233 | struct drm_exynos_ipp_mem_node, list); |
| 1234 | 1234 | ||
| 1235 | DRM_DEBUG_KMS("m_node[%p]\n", m_node); | 1235 | DRM_DEBUG_KMS("m_node[%pK]\n", m_node); |
| 1236 | 1236 | ||
| 1237 | ret = ipp_set_mem_node(ippdrv, c_node, m_node); | 1237 | ret = ipp_set_mem_node(ippdrv, c_node, m_node); |
| 1238 | if (ret) { | 1238 | if (ret) { |
| @@ -1601,7 +1601,7 @@ static int ipp_subdrv_probe(struct drm_device *drm_dev, struct device *dev) | |||
| 1601 | } | 1601 | } |
| 1602 | ippdrv->prop_list.ipp_id = ret; | 1602 | ippdrv->prop_list.ipp_id = ret; |
| 1603 | 1603 | ||
| 1604 | DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n", | 1604 | DRM_DEBUG_KMS("count[%d]ippdrv[%pK]ipp_id[%d]\n", |
| 1605 | count++, ippdrv, ret); | 1605 | count++, ippdrv, ret); |
| 1606 | 1606 | ||
| 1607 | /* store parent device for node */ | 1607 | /* store parent device for node */ |
| @@ -1659,7 +1659,7 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev, | |||
| 1659 | 1659 | ||
| 1660 | file_priv->ipp_dev = dev; | 1660 | file_priv->ipp_dev = dev; |
| 1661 | 1661 | ||
| 1662 | DRM_DEBUG_KMS("done priv[%p]\n", dev); | 1662 | DRM_DEBUG_KMS("done priv[%pK]\n", dev); |
| 1663 | 1663 | ||
| 1664 | return 0; | 1664 | return 0; |
| 1665 | } | 1665 | } |
| @@ -1676,7 +1676,7 @@ static void ipp_subdrv_close(struct drm_device *drm_dev, struct device *dev, | |||
| 1676 | mutex_lock(&ippdrv->cmd_lock); | 1676 | mutex_lock(&ippdrv->cmd_lock); |
| 1677 | list_for_each_entry_safe(c_node, tc_node, | 1677 | list_for_each_entry_safe(c_node, tc_node, |
| 1678 | &ippdrv->cmd_list, list) { | 1678 | &ippdrv->cmd_list, list) { |
| 1679 | DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", | 1679 | DRM_DEBUG_KMS("count[%d]ippdrv[%pK]\n", |
| 1680 | count++, ippdrv); | 1680 | count++, ippdrv); |
| 1681 | 1681 | ||
| 1682 | if (c_node->filp == file) { | 1682 | if (c_node->filp == file) { |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 6591e406084c..79282a820ecc 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c | |||
| @@ -748,7 +748,7 @@ static int rotator_probe(struct platform_device *pdev) | |||
| 748 | goto err_ippdrv_register; | 748 | goto err_ippdrv_register; |
| 749 | } | 749 | } |
| 750 | 750 | ||
| 751 | DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv); | 751 | DRM_DEBUG_KMS("ippdrv[%pK]\n", ippdrv); |
| 752 | 752 | ||
| 753 | platform_set_drvdata(pdev, rot); | 753 | platform_set_drvdata(pdev, rot); |
| 754 | 754 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 57fe514d5c5b..5d9a62a87eec 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c | |||
| @@ -170,6 +170,7 @@ static const struct exynos_drm_crtc_ops vidi_crtc_ops = { | |||
| 170 | .enable_vblank = vidi_enable_vblank, | 170 | .enable_vblank = vidi_enable_vblank, |
| 171 | .disable_vblank = vidi_disable_vblank, | 171 | .disable_vblank = vidi_disable_vblank, |
| 172 | .update_plane = vidi_update_plane, | 172 | .update_plane = vidi_update_plane, |
| 173 | .atomic_flush = exynos_crtc_handle_event, | ||
| 173 | }; | 174 | }; |
| 174 | 175 | ||
| 175 | static void vidi_fake_vblank_timer(unsigned long arg) | 176 | static void vidi_fake_vblank_timer(unsigned long arg) |
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 72143ac10525..25edb635a197 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c | |||
| @@ -1012,6 +1012,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc) | |||
| 1012 | return; | 1012 | return; |
| 1013 | 1013 | ||
| 1014 | mixer_vsync_set_update(mixer_ctx, true); | 1014 | mixer_vsync_set_update(mixer_ctx, true); |
| 1015 | exynos_crtc_handle_event(crtc); | ||
| 1015 | } | 1016 | } |
| 1016 | 1017 | ||
| 1017 | static void mixer_enable(struct exynos_drm_crtc *crtc) | 1018 | static void mixer_enable(struct exynos_drm_crtc *crtc) |
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 3b6caaca9751..325618d969fe 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c | |||
| @@ -242,7 +242,7 @@ static int alloc_resource(struct intel_vgpu *vgpu, | |||
| 242 | const char *item; | 242 | const char *item; |
| 243 | 243 | ||
| 244 | if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { | 244 | if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { |
| 245 | gvt_err("Invalid vGPU creation params\n"); | 245 | gvt_vgpu_err("Invalid vGPU creation params\n"); |
| 246 | return -EINVAL; | 246 | return -EINVAL; |
| 247 | } | 247 | } |
| 248 | 248 | ||
| @@ -285,9 +285,9 @@ static int alloc_resource(struct intel_vgpu *vgpu, | |||
| 285 | return 0; | 285 | return 0; |
| 286 | 286 | ||
| 287 | no_enough_resource: | 287 | no_enough_resource: |
| 288 | gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); | 288 | gvt_vgpu_err("fail to allocate resource %s\n", item); |
| 289 | gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", | 289 | gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n", |
| 290 | vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), | 290 | BYTES_TO_MB(request), BYTES_TO_MB(avail), |
| 291 | BYTES_TO_MB(max), BYTES_TO_MB(taken)); | 291 | BYTES_TO_MB(max), BYTES_TO_MB(taken)); |
| 292 | return -ENOSPC; | 292 | return -ENOSPC; |
| 293 | } | 293 | } |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 7ae6e2b241c8..2b92cc8a7d1a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
| @@ -817,6 +817,25 @@ static bool is_shadowed_mmio(unsigned int offset) | |||
| 817 | return ret; | 817 | return ret; |
| 818 | } | 818 | } |
| 819 | 819 | ||
| 820 | static inline bool is_force_nonpriv_mmio(unsigned int offset) | ||
| 821 | { | ||
| 822 | return (offset >= 0x24d0 && offset < 0x2500); | ||
| 823 | } | ||
| 824 | |||
| 825 | static int force_nonpriv_reg_handler(struct parser_exec_state *s, | ||
| 826 | unsigned int offset, unsigned int index) | ||
| 827 | { | ||
| 828 | struct intel_gvt *gvt = s->vgpu->gvt; | ||
| 829 | unsigned int data = cmd_val(s, index + 1); | ||
| 830 | |||
| 831 | if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) { | ||
| 832 | gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n", | ||
| 833 | offset, data); | ||
| 834 | return -EINVAL; | ||
| 835 | } | ||
| 836 | return 0; | ||
| 837 | } | ||
| 838 | |||
| 820 | static int cmd_reg_handler(struct parser_exec_state *s, | 839 | static int cmd_reg_handler(struct parser_exec_state *s, |
| 821 | unsigned int offset, unsigned int index, char *cmd) | 840 | unsigned int offset, unsigned int index, char *cmd) |
| 822 | { | 841 | { |
| @@ -824,23 +843,26 @@ static int cmd_reg_handler(struct parser_exec_state *s, | |||
| 824 | struct intel_gvt *gvt = vgpu->gvt; | 843 | struct intel_gvt *gvt = vgpu->gvt; |
| 825 | 844 | ||
| 826 | if (offset + 4 > gvt->device_info.mmio_size) { | 845 | if (offset + 4 > gvt->device_info.mmio_size) { |
| 827 | gvt_err("%s access to (%x) outside of MMIO range\n", | 846 | gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", |
| 828 | cmd, offset); | 847 | cmd, offset); |
| 829 | return -EINVAL; | 848 | return -EINVAL; |
| 830 | } | 849 | } |
| 831 | 850 | ||
| 832 | if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { | 851 | if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { |
| 833 | gvt_err("vgpu%d: %s access to non-render register (%x)\n", | 852 | gvt_vgpu_err("%s access to non-render register (%x)\n", |
| 834 | s->vgpu->id, cmd, offset); | 853 | cmd, offset); |
| 835 | return 0; | 854 | return 0; |
| 836 | } | 855 | } |
| 837 | 856 | ||
| 838 | if (is_shadowed_mmio(offset)) { | 857 | if (is_shadowed_mmio(offset)) { |
| 839 | gvt_err("vgpu%d: found access of shadowed MMIO %x\n", | 858 | gvt_vgpu_err("found access of shadowed MMIO %x\n", offset); |
| 840 | s->vgpu->id, offset); | ||
| 841 | return 0; | 859 | return 0; |
| 842 | } | 860 | } |
| 843 | 861 | ||
| 862 | if (is_force_nonpriv_mmio(offset) && | ||
| 863 | force_nonpriv_reg_handler(s, offset, index)) | ||
| 864 | return -EINVAL; | ||
| 865 | |||
| 844 | if (offset == i915_mmio_reg_offset(DERRMR) || | 866 | if (offset == i915_mmio_reg_offset(DERRMR) || |
| 845 | offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { | 867 | offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { |
| 846 | /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ | 868 | /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */ |
| @@ -1008,7 +1030,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) | |||
| 1008 | ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); | 1030 | ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); |
| 1009 | else if (post_sync == 1) { | 1031 | else if (post_sync == 1) { |
| 1010 | /* check ggtt*/ | 1032 | /* check ggtt*/ |
| 1011 | if ((cmd_val(s, 2) & (1 << 2))) { | 1033 | if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) { |
| 1012 | gma = cmd_val(s, 2) & GENMASK(31, 3); | 1034 | gma = cmd_val(s, 2) & GENMASK(31, 3); |
| 1013 | if (gmadr_bytes == 8) | 1035 | if (gmadr_bytes == 8) |
| 1014 | gma |= (cmd_gma_hi(s, 3)) << 32; | 1036 | gma |= (cmd_gma_hi(s, 3)) << 32; |
| @@ -1129,6 +1151,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, | |||
| 1129 | struct mi_display_flip_command_info *info) | 1151 | struct mi_display_flip_command_info *info) |
| 1130 | { | 1152 | { |
| 1131 | struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; | 1153 | struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; |
| 1154 | struct intel_vgpu *vgpu = s->vgpu; | ||
| 1132 | u32 dword0 = cmd_val(s, 0); | 1155 | u32 dword0 = cmd_val(s, 0); |
| 1133 | u32 dword1 = cmd_val(s, 1); | 1156 | u32 dword1 = cmd_val(s, 1); |
| 1134 | u32 dword2 = cmd_val(s, 2); | 1157 | u32 dword2 = cmd_val(s, 2); |
| @@ -1167,7 +1190,7 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s, | |||
| 1167 | break; | 1190 | break; |
| 1168 | 1191 | ||
| 1169 | default: | 1192 | default: |
| 1170 | gvt_err("unknown plane code %d\n", plane); | 1193 | gvt_vgpu_err("unknown plane code %d\n", plane); |
| 1171 | return -EINVAL; | 1194 | return -EINVAL; |
| 1172 | } | 1195 | } |
| 1173 | 1196 | ||
| @@ -1274,25 +1297,26 @@ static int update_plane_mmio_from_mi_display_flip( | |||
| 1274 | static int cmd_handler_mi_display_flip(struct parser_exec_state *s) | 1297 | static int cmd_handler_mi_display_flip(struct parser_exec_state *s) |
| 1275 | { | 1298 | { |
| 1276 | struct mi_display_flip_command_info info; | 1299 | struct mi_display_flip_command_info info; |
| 1300 | struct intel_vgpu *vgpu = s->vgpu; | ||
| 1277 | int ret; | 1301 | int ret; |
| 1278 | int i; | 1302 | int i; |
| 1279 | int len = cmd_length(s); | 1303 | int len = cmd_length(s); |
| 1280 | 1304 | ||
| 1281 | ret = decode_mi_display_flip(s, &info); | 1305 | ret = decode_mi_display_flip(s, &info); |
| 1282 | if (ret) { | 1306 | if (ret) { |
| 1283 | gvt_err("fail to decode MI display flip command\n"); | 1307 | gvt_vgpu_err("fail to decode MI display flip command\n"); |
| 1284 | return ret; | 1308 | return ret; |
| 1285 | } | 1309 | } |
| 1286 | 1310 | ||
| 1287 | ret = check_mi_display_flip(s, &info); | 1311 | ret = check_mi_display_flip(s, &info); |
| 1288 | if (ret) { | 1312 | if (ret) { |
| 1289 | gvt_err("invalid MI display flip command\n"); | 1313 | gvt_vgpu_err("invalid MI display flip command\n"); |
| 1290 | return ret; | 1314 | return ret; |
| 1291 | } | 1315 | } |
| 1292 | 1316 | ||
| 1293 | ret = update_plane_mmio_from_mi_display_flip(s, &info); | 1317 | ret = update_plane_mmio_from_mi_display_flip(s, &info); |
| 1294 | if (ret) { | 1318 | if (ret) { |
| 1295 | gvt_err("fail to update plane mmio\n"); | 1319 | gvt_vgpu_err("fail to update plane mmio\n"); |
| 1296 | return ret; | 1320 | return ret; |
| 1297 | } | 1321 | } |
| 1298 | 1322 | ||
| @@ -1350,7 +1374,8 @@ static inline int cmd_address_audit(struct parser_exec_state *s, | |||
| 1350 | int ret; | 1374 | int ret; |
| 1351 | 1375 | ||
| 1352 | if (op_size > max_surface_size) { | 1376 | if (op_size > max_surface_size) { |
| 1353 | gvt_err("command address audit fail name %s\n", s->info->name); | 1377 | gvt_vgpu_err("command address audit fail name %s\n", |
| 1378 | s->info->name); | ||
| 1354 | return -EINVAL; | 1379 | return -EINVAL; |
| 1355 | } | 1380 | } |
| 1356 | 1381 | ||
| @@ -1367,7 +1392,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s, | |||
| 1367 | } | 1392 | } |
| 1368 | return 0; | 1393 | return 0; |
| 1369 | err: | 1394 | err: |
| 1370 | gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", | 1395 | gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", |
| 1371 | s->info->name, guest_gma, op_size); | 1396 | s->info->name, guest_gma, op_size); |
| 1372 | 1397 | ||
| 1373 | pr_err("cmd dump: "); | 1398 | pr_err("cmd dump: "); |
| @@ -1412,8 +1437,10 @@ static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s) | |||
| 1412 | 1437 | ||
| 1413 | static inline int unexpected_cmd(struct parser_exec_state *s) | 1438 | static inline int unexpected_cmd(struct parser_exec_state *s) |
| 1414 | { | 1439 | { |
| 1415 | gvt_err("vgpu%d: Unexpected %s in command buffer!\n", | 1440 | struct intel_vgpu *vgpu = s->vgpu; |
| 1416 | s->vgpu->id, s->info->name); | 1441 | |
| 1442 | gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); | ||
| 1443 | |||
| 1417 | return -EINVAL; | 1444 | return -EINVAL; |
| 1418 | } | 1445 | } |
| 1419 | 1446 | ||
| @@ -1516,7 +1543,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, | |||
| 1516 | while (gma != end_gma) { | 1543 | while (gma != end_gma) { |
| 1517 | gpa = intel_vgpu_gma_to_gpa(mm, gma); | 1544 | gpa = intel_vgpu_gma_to_gpa(mm, gma); |
| 1518 | if (gpa == INTEL_GVT_INVALID_ADDR) { | 1545 | if (gpa == INTEL_GVT_INVALID_ADDR) { |
| 1519 | gvt_err("invalid gma address: %lx\n", gma); | 1546 | gvt_vgpu_err("invalid gma address: %lx\n", gma); |
| 1520 | return -EFAULT; | 1547 | return -EFAULT; |
| 1521 | } | 1548 | } |
| 1522 | 1549 | ||
| @@ -1557,6 +1584,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) | |||
| 1557 | uint32_t bb_size = 0; | 1584 | uint32_t bb_size = 0; |
| 1558 | uint32_t cmd_len = 0; | 1585 | uint32_t cmd_len = 0; |
| 1559 | bool met_bb_end = false; | 1586 | bool met_bb_end = false; |
| 1587 | struct intel_vgpu *vgpu = s->vgpu; | ||
| 1560 | u32 cmd; | 1588 | u32 cmd; |
| 1561 | 1589 | ||
| 1562 | /* get the start gm address of the batch buffer */ | 1590 | /* get the start gm address of the batch buffer */ |
| @@ -1565,7 +1593,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) | |||
| 1565 | 1593 | ||
| 1566 | info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); | 1594 | info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); |
| 1567 | if (info == NULL) { | 1595 | if (info == NULL) { |
| 1568 | gvt_err("unknown cmd 0x%x, opcode=0x%x\n", | 1596 | gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", |
| 1569 | cmd, get_opcode(cmd, s->ring_id)); | 1597 | cmd, get_opcode(cmd, s->ring_id)); |
| 1570 | return -EINVAL; | 1598 | return -EINVAL; |
| 1571 | } | 1599 | } |
| @@ -1574,7 +1602,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) | |||
| 1574 | gma, gma + 4, &cmd); | 1602 | gma, gma + 4, &cmd); |
| 1575 | info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); | 1603 | info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); |
| 1576 | if (info == NULL) { | 1604 | if (info == NULL) { |
| 1577 | gvt_err("unknown cmd 0x%x, opcode=0x%x\n", | 1605 | gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", |
| 1578 | cmd, get_opcode(cmd, s->ring_id)); | 1606 | cmd, get_opcode(cmd, s->ring_id)); |
| 1579 | return -EINVAL; | 1607 | return -EINVAL; |
| 1580 | } | 1608 | } |
| @@ -1599,6 +1627,7 @@ static uint32_t find_bb_size(struct parser_exec_state *s) | |||
| 1599 | static int perform_bb_shadow(struct parser_exec_state *s) | 1627 | static int perform_bb_shadow(struct parser_exec_state *s) |
| 1600 | { | 1628 | { |
| 1601 | struct intel_shadow_bb_entry *entry_obj; | 1629 | struct intel_shadow_bb_entry *entry_obj; |
| 1630 | struct intel_vgpu *vgpu = s->vgpu; | ||
| 1602 | unsigned long gma = 0; | 1631 | unsigned long gma = 0; |
| 1603 | uint32_t bb_size; | 1632 | uint32_t bb_size; |
| 1604 | void *dst = NULL; | 1633 | void *dst = NULL; |
| @@ -1633,7 +1662,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) | |||
| 1633 | 1662 | ||
| 1634 | ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); | 1663 | ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); |
| 1635 | if (ret) { | 1664 | if (ret) { |
| 1636 | gvt_err("failed to set shadow batch to CPU\n"); | 1665 | gvt_vgpu_err("failed to set shadow batch to CPU\n"); |
| 1637 | goto unmap_src; | 1666 | goto unmap_src; |
| 1638 | } | 1667 | } |
| 1639 | 1668 | ||
| @@ -1645,7 +1674,7 @@ static int perform_bb_shadow(struct parser_exec_state *s) | |||
| 1645 | gma, gma + bb_size, | 1674 | gma, gma + bb_size, |
| 1646 | dst); | 1675 | dst); |
| 1647 | if (ret) { | 1676 | if (ret) { |
| 1648 | gvt_err("fail to copy guest ring buffer\n"); | 1677 | gvt_vgpu_err("fail to copy guest ring buffer\n"); |
| 1649 | goto unmap_src; | 1678 | goto unmap_src; |
| 1650 | } | 1679 | } |
| 1651 | 1680 | ||
| @@ -1676,15 +1705,16 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) | |||
| 1676 | { | 1705 | { |
| 1677 | bool second_level; | 1706 | bool second_level; |
| 1678 | int ret = 0; | 1707 | int ret = 0; |
| 1708 | struct intel_vgpu *vgpu = s->vgpu; | ||
| 1679 | 1709 | ||
| 1680 | if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { | 1710 | if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { |
| 1681 | gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); | 1711 | gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); |
| 1682 | return -EINVAL; | 1712 | return -EINVAL; |
| 1683 | } | 1713 | } |
| 1684 | 1714 | ||
| 1685 | second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; | 1715 | second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; |
| 1686 | if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { | 1716 | if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { |
| 1687 | gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); | 1717 | gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n"); |
| 1688 | return -EINVAL; | 1718 | return -EINVAL; |
| 1689 | } | 1719 | } |
| 1690 | 1720 | ||
| @@ -1702,7 +1732,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) | |||
| 1702 | if (batch_buffer_needs_scan(s)) { | 1732 | if (batch_buffer_needs_scan(s)) { |
| 1703 | ret = perform_bb_shadow(s); | 1733 | ret = perform_bb_shadow(s); |
| 1704 | if (ret < 0) | 1734 | if (ret < 0) |
| 1705 | gvt_err("invalid shadow batch buffer\n"); | 1735 | gvt_vgpu_err("invalid shadow batch buffer\n"); |
| 1706 | } else { | 1736 | } else { |
| 1707 | /* emulate a batch buffer end to do return right */ | 1737 | /* emulate a batch buffer end to do return right */ |
| 1708 | ret = cmd_handler_mi_batch_buffer_end(s); | 1738 | ret = cmd_handler_mi_batch_buffer_end(s); |
| @@ -2429,6 +2459,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) | |||
| 2429 | int ret = 0; | 2459 | int ret = 0; |
| 2430 | cycles_t t0, t1, t2; | 2460 | cycles_t t0, t1, t2; |
| 2431 | struct parser_exec_state s_before_advance_custom; | 2461 | struct parser_exec_state s_before_advance_custom; |
| 2462 | struct intel_vgpu *vgpu = s->vgpu; | ||
| 2432 | 2463 | ||
| 2433 | t0 = get_cycles(); | 2464 | t0 = get_cycles(); |
| 2434 | 2465 | ||
| @@ -2436,7 +2467,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) | |||
| 2436 | 2467 | ||
| 2437 | info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); | 2468 | info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); |
| 2438 | if (info == NULL) { | 2469 | if (info == NULL) { |
| 2439 | gvt_err("unknown cmd 0x%x, opcode=0x%x\n", | 2470 | gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", |
| 2440 | cmd, get_opcode(cmd, s->ring_id)); | 2471 | cmd, get_opcode(cmd, s->ring_id)); |
| 2441 | return -EINVAL; | 2472 | return -EINVAL; |
| 2442 | } | 2473 | } |
| @@ -2452,7 +2483,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) | |||
| 2452 | if (info->handler) { | 2483 | if (info->handler) { |
| 2453 | ret = info->handler(s); | 2484 | ret = info->handler(s); |
| 2454 | if (ret < 0) { | 2485 | if (ret < 0) { |
| 2455 | gvt_err("%s handler error\n", info->name); | 2486 | gvt_vgpu_err("%s handler error\n", info->name); |
| 2456 | return ret; | 2487 | return ret; |
| 2457 | } | 2488 | } |
| 2458 | } | 2489 | } |
| @@ -2463,7 +2494,7 @@ static int cmd_parser_exec(struct parser_exec_state *s) | |||
| 2463 | if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { | 2494 | if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { |
| 2464 | ret = cmd_advance_default(s); | 2495 | ret = cmd_advance_default(s); |
| 2465 | if (ret) { | 2496 | if (ret) { |
| 2466 | gvt_err("%s IP advance error\n", info->name); | 2497 | gvt_vgpu_err("%s IP advance error\n", info->name); |
| 2467 | return ret; | 2498 | return ret; |
| 2468 | } | 2499 | } |
| 2469 | } | 2500 | } |
| @@ -2486,6 +2517,7 @@ static int command_scan(struct parser_exec_state *s, | |||
| 2486 | 2517 | ||
| 2487 | unsigned long gma_head, gma_tail, gma_bottom; | 2518 | unsigned long gma_head, gma_tail, gma_bottom; |
| 2488 | int ret = 0; | 2519 | int ret = 0; |
| 2520 | struct intel_vgpu *vgpu = s->vgpu; | ||
| 2489 | 2521 | ||
| 2490 | gma_head = rb_start + rb_head; | 2522 | gma_head = rb_start + rb_head; |
| 2491 | gma_tail = rb_start + rb_tail; | 2523 | gma_tail = rb_start + rb_tail; |
| @@ -2497,7 +2529,7 @@ static int command_scan(struct parser_exec_state *s, | |||
| 2497 | if (s->buf_type == RING_BUFFER_INSTRUCTION) { | 2529 | if (s->buf_type == RING_BUFFER_INSTRUCTION) { |
| 2498 | if (!(s->ip_gma >= rb_start) || | 2530 | if (!(s->ip_gma >= rb_start) || |
| 2499 | !(s->ip_gma < gma_bottom)) { | 2531 | !(s->ip_gma < gma_bottom)) { |
| 2500 | gvt_err("ip_gma %lx out of ring scope." | 2532 | gvt_vgpu_err("ip_gma %lx out of ring scope." |
| 2501 | "(base:0x%lx, bottom: 0x%lx)\n", | 2533 | "(base:0x%lx, bottom: 0x%lx)\n", |
| 2502 | s->ip_gma, rb_start, | 2534 | s->ip_gma, rb_start, |
| 2503 | gma_bottom); | 2535 | gma_bottom); |
| @@ -2505,7 +2537,7 @@ static int command_scan(struct parser_exec_state *s, | |||
| 2505 | return -EINVAL; | 2537 | return -EINVAL; |
| 2506 | } | 2538 | } |
| 2507 | if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { | 2539 | if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { |
| 2508 | gvt_err("ip_gma %lx out of range." | 2540 | gvt_vgpu_err("ip_gma %lx out of range." |
| 2509 | "base 0x%lx head 0x%lx tail 0x%lx\n", | 2541 | "base 0x%lx head 0x%lx tail 0x%lx\n", |
| 2510 | s->ip_gma, rb_start, | 2542 | s->ip_gma, rb_start, |
| 2511 | rb_head, rb_tail); | 2543 | rb_head, rb_tail); |
| @@ -2515,7 +2547,7 @@ static int command_scan(struct parser_exec_state *s, | |||
| 2515 | } | 2547 | } |
| 2516 | ret = cmd_parser_exec(s); | 2548 | ret = cmd_parser_exec(s); |
| 2517 | if (ret) { | 2549 | if (ret) { |
| 2518 | gvt_err("cmd parser error\n"); | 2550 | gvt_vgpu_err("cmd parser error\n"); |
| 2519 | parser_exec_state_dump(s); | 2551 | parser_exec_state_dump(s); |
| 2520 | break; | 2552 | break; |
| 2521 | } | 2553 | } |
| @@ -2639,7 +2671,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) | |||
| 2639 | gma_head, gma_top, | 2671 | gma_head, gma_top, |
| 2640 | workload->shadow_ring_buffer_va); | 2672 | workload->shadow_ring_buffer_va); |
| 2641 | if (ret) { | 2673 | if (ret) { |
| 2642 | gvt_err("fail to copy guest ring buffer\n"); | 2674 | gvt_vgpu_err("fail to copy guest ring buffer\n"); |
| 2643 | return ret; | 2675 | return ret; |
| 2644 | } | 2676 | } |
| 2645 | copy_len = gma_top - gma_head; | 2677 | copy_len = gma_top - gma_head; |
| @@ -2651,7 +2683,7 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) | |||
| 2651 | gma_head, gma_tail, | 2683 | gma_head, gma_tail, |
| 2652 | workload->shadow_ring_buffer_va + copy_len); | 2684 | workload->shadow_ring_buffer_va + copy_len); |
| 2653 | if (ret) { | 2685 | if (ret) { |
| 2654 | gvt_err("fail to copy guest ring buffer\n"); | 2686 | gvt_vgpu_err("fail to copy guest ring buffer\n"); |
| 2655 | return ret; | 2687 | return ret; |
| 2656 | } | 2688 | } |
| 2657 | ring->tail += workload->rb_len; | 2689 | ring->tail += workload->rb_len; |
| @@ -2662,16 +2694,17 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload) | |||
| 2662 | int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) | 2694 | int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) |
| 2663 | { | 2695 | { |
| 2664 | int ret; | 2696 | int ret; |
| 2697 | struct intel_vgpu *vgpu = workload->vgpu; | ||
| 2665 | 2698 | ||
| 2666 | ret = shadow_workload_ring_buffer(workload); | 2699 | ret = shadow_workload_ring_buffer(workload); |
| 2667 | if (ret) { | 2700 | if (ret) { |
| 2668 | gvt_err("fail to shadow workload ring_buffer\n"); | 2701 | gvt_vgpu_err("fail to shadow workload ring_buffer\n"); |
| 2669 | return ret; | 2702 | return ret; |
| 2670 | } | 2703 | } |
| 2671 | 2704 | ||
| 2672 | ret = scan_workload(workload); | 2705 | ret = scan_workload(workload); |
| 2673 | if (ret) { | 2706 | if (ret) { |
| 2674 | gvt_err("scan workload error\n"); | 2707 | gvt_vgpu_err("scan workload error\n"); |
| 2675 | return ret; | 2708 | return ret; |
| 2676 | } | 2709 | } |
| 2677 | return 0; | 2710 | return 0; |
| @@ -2681,6 +2714,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
| 2681 | { | 2714 | { |
| 2682 | int ctx_size = wa_ctx->indirect_ctx.size; | 2715 | int ctx_size = wa_ctx->indirect_ctx.size; |
| 2683 | unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; | 2716 | unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; |
| 2717 | struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; | ||
| 2684 | struct drm_i915_gem_object *obj; | 2718 | struct drm_i915_gem_object *obj; |
| 2685 | int ret = 0; | 2719 | int ret = 0; |
| 2686 | void *map; | 2720 | void *map; |
| @@ -2694,14 +2728,14 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
| 2694 | /* get the va of the shadow batch buffer */ | 2728 | /* get the va of the shadow batch buffer */ |
| 2695 | map = i915_gem_object_pin_map(obj, I915_MAP_WB); | 2729 | map = i915_gem_object_pin_map(obj, I915_MAP_WB); |
| 2696 | if (IS_ERR(map)) { | 2730 | if (IS_ERR(map)) { |
| 2697 | gvt_err("failed to vmap shadow indirect ctx\n"); | 2731 | gvt_vgpu_err("failed to vmap shadow indirect ctx\n"); |
| 2698 | ret = PTR_ERR(map); | 2732 | ret = PTR_ERR(map); |
| 2699 | goto put_obj; | 2733 | goto put_obj; |
| 2700 | } | 2734 | } |
| 2701 | 2735 | ||
| 2702 | ret = i915_gem_object_set_to_cpu_domain(obj, false); | 2736 | ret = i915_gem_object_set_to_cpu_domain(obj, false); |
| 2703 | if (ret) { | 2737 | if (ret) { |
| 2704 | gvt_err("failed to set shadow indirect ctx to CPU\n"); | 2738 | gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n"); |
| 2705 | goto unmap_src; | 2739 | goto unmap_src; |
| 2706 | } | 2740 | } |
| 2707 | 2741 | ||
| @@ -2710,7 +2744,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
| 2710 | guest_gma, guest_gma + ctx_size, | 2744 | guest_gma, guest_gma + ctx_size, |
| 2711 | map); | 2745 | map); |
| 2712 | if (ret) { | 2746 | if (ret) { |
| 2713 | gvt_err("fail to copy guest indirect ctx\n"); | 2747 | gvt_vgpu_err("fail to copy guest indirect ctx\n"); |
| 2714 | goto unmap_src; | 2748 | goto unmap_src; |
| 2715 | } | 2749 | } |
| 2716 | 2750 | ||
| @@ -2744,13 +2778,14 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
| 2744 | int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | 2778 | int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) |
| 2745 | { | 2779 | { |
| 2746 | int ret; | 2780 | int ret; |
| 2781 | struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; | ||
| 2747 | 2782 | ||
| 2748 | if (wa_ctx->indirect_ctx.size == 0) | 2783 | if (wa_ctx->indirect_ctx.size == 0) |
| 2749 | return 0; | 2784 | return 0; |
| 2750 | 2785 | ||
| 2751 | ret = shadow_indirect_ctx(wa_ctx); | 2786 | ret = shadow_indirect_ctx(wa_ctx); |
| 2752 | if (ret) { | 2787 | if (ret) { |
| 2753 | gvt_err("fail to shadow indirect ctx\n"); | 2788 | gvt_vgpu_err("fail to shadow indirect ctx\n"); |
| 2754 | return ret; | 2789 | return ret; |
| 2755 | } | 2790 | } |
| 2756 | 2791 | ||
| @@ -2758,7 +2793,7 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
| 2758 | 2793 | ||
| 2759 | ret = scan_wa_ctx(wa_ctx); | 2794 | ret = scan_wa_ctx(wa_ctx); |
| 2760 | if (ret) { | 2795 | if (ret) { |
| 2761 | gvt_err("scan wa ctx error\n"); | 2796 | gvt_vgpu_err("scan wa ctx error\n"); |
| 2762 | return ret; | 2797 | return ret; |
| 2763 | } | 2798 | } |
| 2764 | 2799 | ||
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h index 68cba7bd980a..b0cff4dc2684 100644 --- a/drivers/gpu/drm/i915/gvt/debug.h +++ b/drivers/gpu/drm/i915/gvt/debug.h | |||
| @@ -27,6 +27,14 @@ | |||
| 27 | #define gvt_err(fmt, args...) \ | 27 | #define gvt_err(fmt, args...) \ |
| 28 | DRM_ERROR("gvt: "fmt, ##args) | 28 | DRM_ERROR("gvt: "fmt, ##args) |
| 29 | 29 | ||
| 30 | #define gvt_vgpu_err(fmt, args...) \ | ||
| 31 | do { \ | ||
| 32 | if (IS_ERR_OR_NULL(vgpu)) \ | ||
| 33 | DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \ | ||
| 34 | else \ | ||
| 35 | DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\ | ||
| 36 | } while (0) | ||
| 37 | |||
| 30 | #define gvt_dbg_core(fmt, args...) \ | 38 | #define gvt_dbg_core(fmt, args...) \ |
| 31 | DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) | 39 | DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) |
| 32 | 40 | ||
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c index bda85dff7b2a..f1648fe5e5ea 100644 --- a/drivers/gpu/drm/i915/gvt/edid.c +++ b/drivers/gpu/drm/i915/gvt/edid.c | |||
| @@ -52,16 +52,16 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) | |||
| 52 | unsigned char chr = 0; | 52 | unsigned char chr = 0; |
| 53 | 53 | ||
| 54 | if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { | 54 | if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { |
| 55 | gvt_err("Driver tries to read EDID without proper sequence!\n"); | 55 | gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n"); |
| 56 | return 0; | 56 | return 0; |
| 57 | } | 57 | } |
| 58 | if (edid->current_edid_read >= EDID_SIZE) { | 58 | if (edid->current_edid_read >= EDID_SIZE) { |
| 59 | gvt_err("edid_get_byte() exceeds the size of EDID!\n"); | 59 | gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n"); |
| 60 | return 0; | 60 | return 0; |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | if (!edid->edid_available) { | 63 | if (!edid->edid_available) { |
| 64 | gvt_err("Reading EDID but EDID is not available!\n"); | 64 | gvt_vgpu_err("Reading EDID but EDID is not available!\n"); |
| 65 | return 0; | 65 | return 0; |
| 66 | } | 66 | } |
| 67 | 67 | ||
| @@ -72,7 +72,7 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) | |||
| 72 | chr = edid_data->edid_block[edid->current_edid_read]; | 72 | chr = edid_data->edid_block[edid->current_edid_read]; |
| 73 | edid->current_edid_read++; | 73 | edid->current_edid_read++; |
| 74 | } else { | 74 | } else { |
| 75 | gvt_err("No EDID available during the reading?\n"); | 75 | gvt_vgpu_err("No EDID available during the reading?\n"); |
| 76 | } | 76 | } |
| 77 | return chr; | 77 | return chr; |
| 78 | } | 78 | } |
| @@ -223,7 +223,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 223 | vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; | 223 | vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; |
| 224 | break; | 224 | break; |
| 225 | default: | 225 | default: |
| 226 | gvt_err("Unknown/reserved GMBUS cycle detected!\n"); | 226 | gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n"); |
| 227 | break; | 227 | break; |
| 228 | } | 228 | } |
| 229 | /* | 229 | /* |
| @@ -292,8 +292,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 292 | */ | 292 | */ |
| 293 | } else { | 293 | } else { |
| 294 | memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); | 294 | memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); |
| 295 | gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n", | 295 | gvt_vgpu_err("warning: gmbus3 read with nothing returned\n"); |
| 296 | vgpu->id); | ||
| 297 | } | 296 | } |
| 298 | return 0; | 297 | return 0; |
| 299 | } | 298 | } |
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 46eb9fd3c03f..f1f426a97aa9 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
| @@ -172,6 +172,7 @@ static int emulate_execlist_ctx_schedule_out( | |||
| 172 | struct intel_vgpu_execlist *execlist, | 172 | struct intel_vgpu_execlist *execlist, |
| 173 | struct execlist_ctx_descriptor_format *ctx) | 173 | struct execlist_ctx_descriptor_format *ctx) |
| 174 | { | 174 | { |
| 175 | struct intel_vgpu *vgpu = execlist->vgpu; | ||
| 175 | struct intel_vgpu_execlist_slot *running = execlist->running_slot; | 176 | struct intel_vgpu_execlist_slot *running = execlist->running_slot; |
| 176 | struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; | 177 | struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; |
| 177 | struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; | 178 | struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; |
| @@ -183,7 +184,7 @@ static int emulate_execlist_ctx_schedule_out( | |||
| 183 | gvt_dbg_el("schedule out context id %x\n", ctx->context_id); | 184 | gvt_dbg_el("schedule out context id %x\n", ctx->context_id); |
| 184 | 185 | ||
| 185 | if (WARN_ON(!same_context(ctx, execlist->running_context))) { | 186 | if (WARN_ON(!same_context(ctx, execlist->running_context))) { |
| 186 | gvt_err("schedule out context is not running context," | 187 | gvt_vgpu_err("schedule out context is not running context," |
| 187 | "ctx id %x running ctx id %x\n", | 188 | "ctx id %x running ctx id %x\n", |
| 188 | ctx->context_id, | 189 | ctx->context_id, |
| 189 | execlist->running_context->context_id); | 190 | execlist->running_context->context_id); |
| @@ -254,7 +255,7 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot( | |||
| 254 | status.udw = vgpu_vreg(vgpu, status_reg + 4); | 255 | status.udw = vgpu_vreg(vgpu, status_reg + 4); |
| 255 | 256 | ||
| 256 | if (status.execlist_queue_full) { | 257 | if (status.execlist_queue_full) { |
| 257 | gvt_err("virtual execlist slots are full\n"); | 258 | gvt_vgpu_err("virtual execlist slots are full\n"); |
| 258 | return NULL; | 259 | return NULL; |
| 259 | } | 260 | } |
| 260 | 261 | ||
| @@ -270,11 +271,12 @@ static int emulate_execlist_schedule_in(struct intel_vgpu_execlist *execlist, | |||
| 270 | 271 | ||
| 271 | struct execlist_ctx_descriptor_format *ctx0, *ctx1; | 272 | struct execlist_ctx_descriptor_format *ctx0, *ctx1; |
| 272 | struct execlist_context_status_format status; | 273 | struct execlist_context_status_format status; |
| 274 | struct intel_vgpu *vgpu = execlist->vgpu; | ||
| 273 | 275 | ||
| 274 | gvt_dbg_el("emulate schedule-in\n"); | 276 | gvt_dbg_el("emulate schedule-in\n"); |
| 275 | 277 | ||
| 276 | if (!slot) { | 278 | if (!slot) { |
| 277 | gvt_err("no available execlist slot\n"); | 279 | gvt_vgpu_err("no available execlist slot\n"); |
| 278 | return -EINVAL; | 280 | return -EINVAL; |
| 279 | } | 281 | } |
| 280 | 282 | ||
| @@ -375,7 +377,6 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) | |||
| 375 | 377 | ||
| 376 | vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); | 378 | vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); |
| 377 | if (IS_ERR(vma)) { | 379 | if (IS_ERR(vma)) { |
| 378 | gvt_err("Cannot pin\n"); | ||
| 379 | return; | 380 | return; |
| 380 | } | 381 | } |
| 381 | 382 | ||
| @@ -428,7 +429,6 @@ static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
| 428 | vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, | 429 | vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, |
| 429 | 0, CACHELINE_BYTES, 0); | 430 | 0, CACHELINE_BYTES, 0); |
| 430 | if (IS_ERR(vma)) { | 431 | if (IS_ERR(vma)) { |
| 431 | gvt_err("Cannot pin indirect ctx obj\n"); | ||
| 432 | return; | 432 | return; |
| 433 | } | 433 | } |
| 434 | 434 | ||
| @@ -561,6 +561,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) | |||
| 561 | { | 561 | { |
| 562 | struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; | 562 | struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; |
| 563 | struct intel_vgpu_mm *mm; | 563 | struct intel_vgpu_mm *mm; |
| 564 | struct intel_vgpu *vgpu = workload->vgpu; | ||
| 564 | int page_table_level; | 565 | int page_table_level; |
| 565 | u32 pdp[8]; | 566 | u32 pdp[8]; |
| 566 | 567 | ||
| @@ -569,7 +570,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) | |||
| 569 | } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ | 570 | } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ |
| 570 | page_table_level = 4; | 571 | page_table_level = 4; |
| 571 | } else { | 572 | } else { |
| 572 | gvt_err("Advanced Context mode(SVM) is not supported!\n"); | 573 | gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); |
| 573 | return -EINVAL; | 574 | return -EINVAL; |
| 574 | } | 575 | } |
| 575 | 576 | ||
| @@ -583,7 +584,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload) | |||
| 583 | mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, | 584 | mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, |
| 584 | pdp, page_table_level, 0); | 585 | pdp, page_table_level, 0); |
| 585 | if (IS_ERR(mm)) { | 586 | if (IS_ERR(mm)) { |
| 586 | gvt_err("fail to create mm object.\n"); | 587 | gvt_vgpu_err("fail to create mm object.\n"); |
| 587 | return PTR_ERR(mm); | 588 | return PTR_ERR(mm); |
| 588 | } | 589 | } |
| 589 | } | 590 | } |
| @@ -609,7 +610,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, | |||
| 609 | ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, | 610 | ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, |
| 610 | (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); | 611 | (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); |
| 611 | if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { | 612 | if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { |
| 612 | gvt_err("invalid guest context LRCA: %x\n", desc->lrca); | 613 | gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); |
| 613 | return -EINVAL; | 614 | return -EINVAL; |
| 614 | } | 615 | } |
| 615 | 616 | ||
| @@ -724,8 +725,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) | |||
| 724 | continue; | 725 | continue; |
| 725 | 726 | ||
| 726 | if (!desc[i]->privilege_access) { | 727 | if (!desc[i]->privilege_access) { |
| 727 | gvt_err("vgpu%d: unexpected GGTT elsp submission\n", | 728 | gvt_vgpu_err("unexpected GGTT elsp submission\n"); |
| 728 | vgpu->id); | ||
| 729 | return -EINVAL; | 729 | return -EINVAL; |
| 730 | } | 730 | } |
| 731 | 731 | ||
| @@ -735,15 +735,13 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) | |||
| 735 | } | 735 | } |
| 736 | 736 | ||
| 737 | if (!valid_desc_bitmap) { | 737 | if (!valid_desc_bitmap) { |
| 738 | gvt_err("vgpu%d: no valid desc in a elsp submission\n", | 738 | gvt_vgpu_err("no valid desc in a elsp submission\n"); |
| 739 | vgpu->id); | ||
| 740 | return -EINVAL; | 739 | return -EINVAL; |
| 741 | } | 740 | } |
| 742 | 741 | ||
| 743 | if (!test_bit(0, (void *)&valid_desc_bitmap) && | 742 | if (!test_bit(0, (void *)&valid_desc_bitmap) && |
| 744 | test_bit(1, (void *)&valid_desc_bitmap)) { | 743 | test_bit(1, (void *)&valid_desc_bitmap)) { |
| 745 | gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", | 744 | gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n"); |
| 746 | vgpu->id); | ||
| 747 | return -EINVAL; | 745 | return -EINVAL; |
| 748 | } | 746 | } |
| 749 | 747 | ||
| @@ -752,8 +750,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id) | |||
| 752 | ret = submit_context(vgpu, ring_id, &valid_desc[i], | 750 | ret = submit_context(vgpu, ring_id, &valid_desc[i], |
| 753 | emulate_schedule_in); | 751 | emulate_schedule_in); |
| 754 | if (ret) { | 752 | if (ret) { |
| 755 | gvt_err("vgpu%d: fail to schedule workload\n", | 753 | gvt_vgpu_err("fail to schedule workload\n"); |
| 756 | vgpu->id); | ||
| 757 | return ret; | 754 | return ret; |
| 758 | } | 755 | } |
| 759 | emulate_schedule_in = false; | 756 | emulate_schedule_in = false; |
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6a5ff23ded90..da7312715824 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c | |||
| @@ -49,8 +49,8 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size) | |||
| 49 | { | 49 | { |
| 50 | if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size | 50 | if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size |
| 51 | && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { | 51 | && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { |
| 52 | gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", | 52 | gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", |
| 53 | vgpu->id, addr, size); | 53 | addr, size); |
| 54 | return false; | 54 | return false; |
| 55 | } | 55 | } |
| 56 | return true; | 56 | return true; |
| @@ -430,7 +430,7 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, | |||
| 430 | 430 | ||
| 431 | mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); | 431 | mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); |
| 432 | if (mfn == INTEL_GVT_INVALID_ADDR) { | 432 | if (mfn == INTEL_GVT_INVALID_ADDR) { |
| 433 | gvt_err("fail to translate gfn: 0x%lx\n", gfn); | 433 | gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); |
| 434 | return -ENXIO; | 434 | return -ENXIO; |
| 435 | } | 435 | } |
| 436 | 436 | ||
| @@ -611,7 +611,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu, | |||
| 611 | 611 | ||
| 612 | daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); | 612 | daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); |
| 613 | if (dma_mapping_error(kdev, daddr)) { | 613 | if (dma_mapping_error(kdev, daddr)) { |
| 614 | gvt_err("fail to map dma addr\n"); | 614 | gvt_vgpu_err("fail to map dma addr\n"); |
| 615 | return -EINVAL; | 615 | return -EINVAL; |
| 616 | } | 616 | } |
| 617 | 617 | ||
| @@ -735,7 +735,7 @@ retry: | |||
| 735 | if (reclaim_one_mm(vgpu->gvt)) | 735 | if (reclaim_one_mm(vgpu->gvt)) |
| 736 | goto retry; | 736 | goto retry; |
| 737 | 737 | ||
| 738 | gvt_err("fail to allocate ppgtt shadow page\n"); | 738 | gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); |
| 739 | return ERR_PTR(-ENOMEM); | 739 | return ERR_PTR(-ENOMEM); |
| 740 | } | 740 | } |
| 741 | 741 | ||
| @@ -750,14 +750,14 @@ retry: | |||
| 750 | */ | 750 | */ |
| 751 | ret = init_shadow_page(vgpu, &spt->shadow_page, type); | 751 | ret = init_shadow_page(vgpu, &spt->shadow_page, type); |
| 752 | if (ret) { | 752 | if (ret) { |
| 753 | gvt_err("fail to initialize shadow page for spt\n"); | 753 | gvt_vgpu_err("fail to initialize shadow page for spt\n"); |
| 754 | goto err; | 754 | goto err; |
| 755 | } | 755 | } |
| 756 | 756 | ||
| 757 | ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, | 757 | ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, |
| 758 | gfn, ppgtt_write_protection_handler, NULL); | 758 | gfn, ppgtt_write_protection_handler, NULL); |
| 759 | if (ret) { | 759 | if (ret) { |
| 760 | gvt_err("fail to initialize guest page for spt\n"); | 760 | gvt_vgpu_err("fail to initialize guest page for spt\n"); |
| 761 | goto err; | 761 | goto err; |
| 762 | } | 762 | } |
| 763 | 763 | ||
| @@ -776,8 +776,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( | |||
| 776 | if (p) | 776 | if (p) |
| 777 | return shadow_page_to_ppgtt_spt(p); | 777 | return shadow_page_to_ppgtt_spt(p); |
| 778 | 778 | ||
| 779 | gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", | 779 | gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn); |
| 780 | vgpu->id, mfn); | ||
| 781 | return NULL; | 780 | return NULL; |
| 782 | } | 781 | } |
| 783 | 782 | ||
| @@ -827,8 +826,8 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, | |||
| 827 | } | 826 | } |
| 828 | s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); | 827 | s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); |
| 829 | if (!s) { | 828 | if (!s) { |
| 830 | gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", | 829 | gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", |
| 831 | vgpu->id, ops->get_pfn(e)); | 830 | ops->get_pfn(e)); |
| 832 | return -ENXIO; | 831 | return -ENXIO; |
| 833 | } | 832 | } |
| 834 | return ppgtt_invalidate_shadow_page(s); | 833 | return ppgtt_invalidate_shadow_page(s); |
| @@ -836,6 +835,7 @@ static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu *vgpu, | |||
| 836 | 835 | ||
| 837 | static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | 836 | static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) |
| 838 | { | 837 | { |
| 838 | struct intel_vgpu *vgpu = spt->vgpu; | ||
| 839 | struct intel_gvt_gtt_entry e; | 839 | struct intel_gvt_gtt_entry e; |
| 840 | unsigned long index; | 840 | unsigned long index; |
| 841 | int ret; | 841 | int ret; |
| @@ -854,7 +854,7 @@ static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
| 854 | 854 | ||
| 855 | for_each_present_shadow_entry(spt, &e, index) { | 855 | for_each_present_shadow_entry(spt, &e, index) { |
| 856 | if (!gtt_type_is_pt(get_next_pt_type(e.type))) { | 856 | if (!gtt_type_is_pt(get_next_pt_type(e.type))) { |
| 857 | gvt_err("GVT doesn't support pse bit for now\n"); | 857 | gvt_vgpu_err("GVT doesn't support pse bit for now\n"); |
| 858 | return -EINVAL; | 858 | return -EINVAL; |
| 859 | } | 859 | } |
| 860 | ret = ppgtt_invalidate_shadow_page_by_shadow_entry( | 860 | ret = ppgtt_invalidate_shadow_page_by_shadow_entry( |
| @@ -868,8 +868,8 @@ release: | |||
| 868 | ppgtt_free_shadow_page(spt); | 868 | ppgtt_free_shadow_page(spt); |
| 869 | return 0; | 869 | return 0; |
| 870 | fail: | 870 | fail: |
| 871 | gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", | 871 | gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", |
| 872 | spt->vgpu->id, spt, e.val64, e.type); | 872 | spt, e.val64, e.type); |
| 873 | return ret; | 873 | return ret; |
| 874 | } | 874 | } |
| 875 | 875 | ||
| @@ -914,8 +914,8 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_shadow_page_by_guest_entry( | |||
| 914 | } | 914 | } |
| 915 | return s; | 915 | return s; |
| 916 | fail: | 916 | fail: |
| 917 | gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", | 917 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", |
| 918 | vgpu->id, s, we->val64, we->type); | 918 | s, we->val64, we->type); |
| 919 | return ERR_PTR(ret); | 919 | return ERR_PTR(ret); |
| 920 | } | 920 | } |
| 921 | 921 | ||
| @@ -953,7 +953,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
| 953 | 953 | ||
| 954 | for_each_present_guest_entry(spt, &ge, i) { | 954 | for_each_present_guest_entry(spt, &ge, i) { |
| 955 | if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { | 955 | if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { |
| 956 | gvt_err("GVT doesn't support pse bit now\n"); | 956 | gvt_vgpu_err("GVT doesn't support pse bit now\n"); |
| 957 | ret = -EINVAL; | 957 | ret = -EINVAL; |
| 958 | goto fail; | 958 | goto fail; |
| 959 | } | 959 | } |
| @@ -969,8 +969,8 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) | |||
| 969 | } | 969 | } |
| 970 | return 0; | 970 | return 0; |
| 971 | fail: | 971 | fail: |
| 972 | gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", | 972 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", |
| 973 | vgpu->id, spt, ge.val64, ge.type); | 973 | spt, ge.val64, ge.type); |
| 974 | return ret; | 974 | return ret; |
| 975 | } | 975 | } |
| 976 | 976 | ||
| @@ -999,7 +999,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, | |||
| 999 | struct intel_vgpu_ppgtt_spt *s = | 999 | struct intel_vgpu_ppgtt_spt *s = |
| 1000 | ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); | 1000 | ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); |
| 1001 | if (!s) { | 1001 | if (!s) { |
| 1002 | gvt_err("fail to find guest page\n"); | 1002 | gvt_vgpu_err("fail to find guest page\n"); |
| 1003 | ret = -ENXIO; | 1003 | ret = -ENXIO; |
| 1004 | goto fail; | 1004 | goto fail; |
| 1005 | } | 1005 | } |
| @@ -1011,8 +1011,8 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, | |||
| 1011 | ppgtt_set_shadow_entry(spt, &e, index); | 1011 | ppgtt_set_shadow_entry(spt, &e, index); |
| 1012 | return 0; | 1012 | return 0; |
| 1013 | fail: | 1013 | fail: |
| 1014 | gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", | 1014 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", |
| 1015 | vgpu->id, spt, e.val64, e.type); | 1015 | spt, e.val64, e.type); |
| 1016 | return ret; | 1016 | return ret; |
| 1017 | } | 1017 | } |
| 1018 | 1018 | ||
| @@ -1046,8 +1046,8 @@ static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page *gpt, | |||
| 1046 | } | 1046 | } |
| 1047 | return 0; | 1047 | return 0; |
| 1048 | fail: | 1048 | fail: |
| 1049 | gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, | 1049 | gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", |
| 1050 | spt, we->val64, we->type); | 1050 | spt, we->val64, we->type); |
| 1051 | return ret; | 1051 | return ret; |
| 1052 | } | 1052 | } |
| 1053 | 1053 | ||
| @@ -1250,8 +1250,8 @@ static int ppgtt_handle_guest_write_page_table( | |||
| 1250 | } | 1250 | } |
| 1251 | return 0; | 1251 | return 0; |
| 1252 | fail: | 1252 | fail: |
| 1253 | gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", | 1253 | gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", |
| 1254 | vgpu->id, spt, we->val64, we->type); | 1254 | spt, we->val64, we->type); |
| 1255 | return ret; | 1255 | return ret; |
| 1256 | } | 1256 | } |
| 1257 | 1257 | ||
| @@ -1493,7 +1493,7 @@ static int shadow_mm(struct intel_vgpu_mm *mm) | |||
| 1493 | 1493 | ||
| 1494 | spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); | 1494 | spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); |
| 1495 | if (IS_ERR(spt)) { | 1495 | if (IS_ERR(spt)) { |
| 1496 | gvt_err("fail to populate guest root pointer\n"); | 1496 | gvt_vgpu_err("fail to populate guest root pointer\n"); |
| 1497 | ret = PTR_ERR(spt); | 1497 | ret = PTR_ERR(spt); |
| 1498 | goto fail; | 1498 | goto fail; |
| 1499 | } | 1499 | } |
| @@ -1566,7 +1566,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, | |||
| 1566 | 1566 | ||
| 1567 | ret = gtt->mm_alloc_page_table(mm); | 1567 | ret = gtt->mm_alloc_page_table(mm); |
| 1568 | if (ret) { | 1568 | if (ret) { |
| 1569 | gvt_err("fail to allocate page table for mm\n"); | 1569 | gvt_vgpu_err("fail to allocate page table for mm\n"); |
| 1570 | goto fail; | 1570 | goto fail; |
| 1571 | } | 1571 | } |
| 1572 | 1572 | ||
| @@ -1584,7 +1584,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, | |||
| 1584 | } | 1584 | } |
| 1585 | return mm; | 1585 | return mm; |
| 1586 | fail: | 1586 | fail: |
| 1587 | gvt_err("fail to create mm\n"); | 1587 | gvt_vgpu_err("fail to create mm\n"); |
| 1588 | if (mm) | 1588 | if (mm) |
| 1589 | intel_gvt_mm_unreference(mm); | 1589 | intel_gvt_mm_unreference(mm); |
| 1590 | return ERR_PTR(ret); | 1590 | return ERR_PTR(ret); |
| @@ -1760,7 +1760,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) | |||
| 1760 | mm->page_table_level, gma, gpa); | 1760 | mm->page_table_level, gma, gpa); |
| 1761 | return gpa; | 1761 | return gpa; |
| 1762 | err: | 1762 | err: |
| 1763 | gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); | 1763 | gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); |
| 1764 | return INTEL_GVT_INVALID_ADDR; | 1764 | return INTEL_GVT_INVALID_ADDR; |
| 1765 | } | 1765 | } |
| 1766 | 1766 | ||
| @@ -1836,8 +1836,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, | |||
| 1836 | if (ops->test_present(&e)) { | 1836 | if (ops->test_present(&e)) { |
| 1837 | ret = gtt_entry_p2m(vgpu, &e, &m); | 1837 | ret = gtt_entry_p2m(vgpu, &e, &m); |
| 1838 | if (ret) { | 1838 | if (ret) { |
| 1839 | gvt_err("vgpu%d: fail to translate guest gtt entry\n", | 1839 | gvt_vgpu_err("fail to translate guest gtt entry\n"); |
| 1840 | vgpu->id); | ||
| 1841 | return ret; | 1840 | return ret; |
| 1842 | } | 1841 | } |
| 1843 | } else { | 1842 | } else { |
| @@ -1893,14 +1892,14 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, | |||
| 1893 | 1892 | ||
| 1894 | scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); | 1893 | scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); |
| 1895 | if (!scratch_pt) { | 1894 | if (!scratch_pt) { |
| 1896 | gvt_err("fail to allocate scratch page\n"); | 1895 | gvt_vgpu_err("fail to allocate scratch page\n"); |
| 1897 | return -ENOMEM; | 1896 | return -ENOMEM; |
| 1898 | } | 1897 | } |
| 1899 | 1898 | ||
| 1900 | daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, | 1899 | daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, |
| 1901 | 4096, PCI_DMA_BIDIRECTIONAL); | 1900 | 4096, PCI_DMA_BIDIRECTIONAL); |
| 1902 | if (dma_mapping_error(dev, daddr)) { | 1901 | if (dma_mapping_error(dev, daddr)) { |
| 1903 | gvt_err("fail to dmamap scratch_pt\n"); | 1902 | gvt_vgpu_err("fail to dmamap scratch_pt\n"); |
| 1904 | __free_page(virt_to_page(scratch_pt)); | 1903 | __free_page(virt_to_page(scratch_pt)); |
| 1905 | return -ENOMEM; | 1904 | return -ENOMEM; |
| 1906 | } | 1905 | } |
| @@ -2003,7 +2002,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) | |||
| 2003 | ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, | 2002 | ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, |
| 2004 | NULL, 1, 0); | 2003 | NULL, 1, 0); |
| 2005 | if (IS_ERR(ggtt_mm)) { | 2004 | if (IS_ERR(ggtt_mm)) { |
| 2006 | gvt_err("fail to create mm for ggtt.\n"); | 2005 | gvt_vgpu_err("fail to create mm for ggtt.\n"); |
| 2007 | return PTR_ERR(ggtt_mm); | 2006 | return PTR_ERR(ggtt_mm); |
| 2008 | } | 2007 | } |
| 2009 | 2008 | ||
| @@ -2076,7 +2075,6 @@ static int setup_spt_oos(struct intel_gvt *gvt) | |||
| 2076 | for (i = 0; i < preallocated_oos_pages; i++) { | 2075 | for (i = 0; i < preallocated_oos_pages; i++) { |
| 2077 | oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); | 2076 | oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); |
| 2078 | if (!oos_page) { | 2077 | if (!oos_page) { |
| 2079 | gvt_err("fail to pre-allocate oos page\n"); | ||
| 2080 | ret = -ENOMEM; | 2078 | ret = -ENOMEM; |
| 2081 | goto fail; | 2079 | goto fail; |
| 2082 | } | 2080 | } |
| @@ -2166,7 +2164,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, | |||
| 2166 | mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, | 2164 | mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, |
| 2167 | pdp, page_table_level, 0); | 2165 | pdp, page_table_level, 0); |
| 2168 | if (IS_ERR(mm)) { | 2166 | if (IS_ERR(mm)) { |
| 2169 | gvt_err("fail to create mm\n"); | 2167 | gvt_vgpu_err("fail to create mm\n"); |
| 2170 | return PTR_ERR(mm); | 2168 | return PTR_ERR(mm); |
| 2171 | } | 2169 | } |
| 2172 | } | 2170 | } |
| @@ -2196,7 +2194,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, | |||
| 2196 | 2194 | ||
| 2197 | mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); | 2195 | mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); |
| 2198 | if (!mm) { | 2196 | if (!mm) { |
| 2199 | gvt_err("fail to find ppgtt instance.\n"); | 2197 | gvt_vgpu_err("fail to find ppgtt instance.\n"); |
| 2200 | return -EINVAL; | 2198 | return -EINVAL; |
| 2201 | } | 2199 | } |
| 2202 | intel_gvt_mm_unreference(mm); | 2200 | intel_gvt_mm_unreference(mm); |
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 23791920ced1..6dfc48b63b71 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h | |||
| @@ -162,7 +162,6 @@ struct intel_vgpu { | |||
| 162 | atomic_t running_workload_num; | 162 | atomic_t running_workload_num; |
| 163 | DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); | 163 | DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); |
| 164 | struct i915_gem_context *shadow_ctx; | 164 | struct i915_gem_context *shadow_ctx; |
| 165 | struct notifier_block shadow_ctx_notifier_block; | ||
| 166 | 165 | ||
| 167 | #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) | 166 | #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) |
| 168 | struct { | 167 | struct { |
| @@ -233,6 +232,7 @@ struct intel_gvt { | |||
| 233 | struct intel_gvt_gtt gtt; | 232 | struct intel_gvt_gtt gtt; |
| 234 | struct intel_gvt_opregion opregion; | 233 | struct intel_gvt_opregion opregion; |
| 235 | struct intel_gvt_workload_scheduler scheduler; | 234 | struct intel_gvt_workload_scheduler scheduler; |
| 235 | struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES]; | ||
| 236 | DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); | 236 | DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); |
| 237 | struct intel_vgpu_type *types; | 237 | struct intel_vgpu_type *types; |
| 238 | unsigned int num_types; | 238 | unsigned int num_types; |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 8e43395c748a..eaff45d417e8 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
| @@ -181,11 +181,9 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, | |||
| 181 | GVT_FAILSAFE_UNSUPPORTED_GUEST); | 181 | GVT_FAILSAFE_UNSUPPORTED_GUEST); |
| 182 | 182 | ||
| 183 | if (!vgpu->mmio.disable_warn_untrack) { | 183 | if (!vgpu->mmio.disable_warn_untrack) { |
| 184 | gvt_err("vgpu%d: found oob fence register access\n", | 184 | gvt_vgpu_err("found oob fence register access\n"); |
| 185 | vgpu->id); | 185 | gvt_vgpu_err("total fence %d, access fence %d\n", |
| 186 | gvt_err("vgpu%d: total fence %d, access fence %d\n", | 186 | vgpu_fence_sz(vgpu), fence_num); |
| 187 | vgpu->id, vgpu_fence_sz(vgpu), | ||
| 188 | fence_num); | ||
| 189 | } | 187 | } |
| 190 | memset(p_data, 0, bytes); | 188 | memset(p_data, 0, bytes); |
| 191 | return -EINVAL; | 189 | return -EINVAL; |
| @@ -249,7 +247,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, | |||
| 249 | break; | 247 | break; |
| 250 | default: | 248 | default: |
| 251 | /*should not hit here*/ | 249 | /*should not hit here*/ |
| 252 | gvt_err("invalid forcewake offset 0x%x\n", offset); | 250 | gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset); |
| 253 | return -EINVAL; | 251 | return -EINVAL; |
| 254 | } | 252 | } |
| 255 | } else { | 253 | } else { |
| @@ -530,7 +528,7 @@ static int check_fdi_rx_train_status(struct intel_vgpu *vgpu, | |||
| 530 | fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; | 528 | fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; |
| 531 | fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; | 529 | fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; |
| 532 | } else { | 530 | } else { |
| 533 | gvt_err("Invalid train pattern %d\n", train_pattern); | 531 | gvt_vgpu_err("Invalid train pattern %d\n", train_pattern); |
| 534 | return -EINVAL; | 532 | return -EINVAL; |
| 535 | } | 533 | } |
| 536 | 534 | ||
| @@ -588,7 +586,7 @@ static int update_fdi_rx_iir_status(struct intel_vgpu *vgpu, | |||
| 588 | else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) | 586 | else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) |
| 589 | index = FDI_RX_IMR_TO_PIPE(offset); | 587 | index = FDI_RX_IMR_TO_PIPE(offset); |
| 590 | else { | 588 | else { |
| 591 | gvt_err("Unsupport registers %x\n", offset); | 589 | gvt_vgpu_err("Unsupport registers %x\n", offset); |
| 592 | return -EINVAL; | 590 | return -EINVAL; |
| 593 | } | 591 | } |
| 594 | 592 | ||
| @@ -818,7 +816,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, | |||
| 818 | u32 data; | 816 | u32 data; |
| 819 | 817 | ||
| 820 | if (!dpy_is_valid_port(port_index)) { | 818 | if (!dpy_is_valid_port(port_index)) { |
| 821 | gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); | 819 | gvt_vgpu_err("Unsupported DP port access!\n"); |
| 822 | return 0; | 820 | return 0; |
| 823 | } | 821 | } |
| 824 | 822 | ||
| @@ -1016,8 +1014,7 @@ static void write_virtual_sbi_register(struct intel_vgpu *vgpu, | |||
| 1016 | 1014 | ||
| 1017 | if (i == num) { | 1015 | if (i == num) { |
| 1018 | if (num == SBI_REG_MAX) { | 1016 | if (num == SBI_REG_MAX) { |
| 1019 | gvt_err("vgpu%d: SBI caching meets maximum limits\n", | 1017 | gvt_vgpu_err("SBI caching meets maximum limits\n"); |
| 1020 | vgpu->id); | ||
| 1021 | return; | 1018 | return; |
| 1022 | } | 1019 | } |
| 1023 | display->sbi.number++; | 1020 | display->sbi.number++; |
| @@ -1097,7 +1094,7 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 1097 | break; | 1094 | break; |
| 1098 | } | 1095 | } |
| 1099 | if (invalid_read) | 1096 | if (invalid_read) |
| 1100 | gvt_err("invalid pvinfo read: [%x:%x] = %x\n", | 1097 | gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n", |
| 1101 | offset, bytes, *(u32 *)p_data); | 1098 | offset, bytes, *(u32 *)p_data); |
| 1102 | vgpu->pv_notified = true; | 1099 | vgpu->pv_notified = true; |
| 1103 | return 0; | 1100 | return 0; |
| @@ -1125,7 +1122,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) | |||
| 1125 | case 1: /* Remove this in guest driver. */ | 1122 | case 1: /* Remove this in guest driver. */ |
| 1126 | break; | 1123 | break; |
| 1127 | default: | 1124 | default: |
| 1128 | gvt_err("Invalid PV notification %d\n", notification); | 1125 | gvt_vgpu_err("Invalid PV notification %d\n", notification); |
| 1129 | } | 1126 | } |
| 1130 | return ret; | 1127 | return ret; |
| 1131 | } | 1128 | } |
| @@ -1181,7 +1178,7 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 1181 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); | 1178 | enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); |
| 1182 | break; | 1179 | break; |
| 1183 | default: | 1180 | default: |
| 1184 | gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", | 1181 | gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n", |
| 1185 | offset, bytes, data); | 1182 | offset, bytes, data); |
| 1186 | break; | 1183 | break; |
| 1187 | } | 1184 | } |
| @@ -1415,7 +1412,8 @@ static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 1415 | if (execlist->elsp_dwords.index == 3) { | 1412 | if (execlist->elsp_dwords.index == 3) { |
| 1416 | ret = intel_vgpu_submit_execlist(vgpu, ring_id); | 1413 | ret = intel_vgpu_submit_execlist(vgpu, ring_id); |
| 1417 | if(ret) | 1414 | if(ret) |
| 1418 | gvt_err("fail submit workload on ring %d\n", ring_id); | 1415 | gvt_vgpu_err("fail submit workload on ring %d\n", |
| 1416 | ring_id); | ||
| 1419 | } | 1417 | } |
| 1420 | 1418 | ||
| 1421 | ++execlist->elsp_dwords.index; | 1419 | ++execlist->elsp_dwords.index; |
| @@ -2988,3 +2986,20 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 2988 | write_vreg(vgpu, offset, p_data, bytes); | 2986 | write_vreg(vgpu, offset, p_data, bytes); |
| 2989 | return 0; | 2987 | return 0; |
| 2990 | } | 2988 | } |
| 2989 | |||
| 2990 | /** | ||
| 2991 | * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be | ||
| 2992 | * force-nopriv register | ||
| 2993 | * | ||
| 2994 | * @gvt: a GVT device | ||
| 2995 | * @offset: register offset | ||
| 2996 | * | ||
| 2997 | * Returns: | ||
| 2998 | * True if the register is in force-nonpriv whitelist; | ||
| 2999 | * False if outside; | ||
| 3000 | */ | ||
| 3001 | bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, | ||
| 3002 | unsigned int offset) | ||
| 3003 | { | ||
| 3004 | return in_whitelist(offset); | ||
| 3005 | } | ||
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 84d801638ede..1ea3eb270de8 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c | |||
| @@ -426,7 +426,7 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info, | |||
| 426 | 426 | ||
| 427 | static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) | 427 | static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) |
| 428 | { | 428 | { |
| 429 | struct intel_vgpu *vgpu; | 429 | struct intel_vgpu *vgpu = NULL; |
| 430 | struct intel_vgpu_type *type; | 430 | struct intel_vgpu_type *type; |
| 431 | struct device *pdev; | 431 | struct device *pdev; |
| 432 | void *gvt; | 432 | void *gvt; |
| @@ -437,7 +437,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) | |||
| 437 | 437 | ||
| 438 | type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); | 438 | type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); |
| 439 | if (!type) { | 439 | if (!type) { |
| 440 | gvt_err("failed to find type %s to create\n", | 440 | gvt_vgpu_err("failed to find type %s to create\n", |
| 441 | kobject_name(kobj)); | 441 | kobject_name(kobj)); |
| 442 | ret = -EINVAL; | 442 | ret = -EINVAL; |
| 443 | goto out; | 443 | goto out; |
| @@ -446,7 +446,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) | |||
| 446 | vgpu = intel_gvt_ops->vgpu_create(gvt, type); | 446 | vgpu = intel_gvt_ops->vgpu_create(gvt, type); |
| 447 | if (IS_ERR_OR_NULL(vgpu)) { | 447 | if (IS_ERR_OR_NULL(vgpu)) { |
| 448 | ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); | 448 | ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); |
| 449 | gvt_err("failed to create intel vgpu: %d\n", ret); | 449 | gvt_vgpu_err("failed to create intel vgpu: %d\n", ret); |
| 450 | goto out; | 450 | goto out; |
| 451 | } | 451 | } |
| 452 | 452 | ||
| @@ -526,7 +526,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) | |||
| 526 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, | 526 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, |
| 527 | &vgpu->vdev.iommu_notifier); | 527 | &vgpu->vdev.iommu_notifier); |
| 528 | if (ret != 0) { | 528 | if (ret != 0) { |
| 529 | gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); | 529 | gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", |
| 530 | ret); | ||
| 530 | goto out; | 531 | goto out; |
| 531 | } | 532 | } |
| 532 | 533 | ||
| @@ -534,7 +535,8 @@ static int intel_vgpu_open(struct mdev_device *mdev) | |||
| 534 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, | 535 | ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, |
| 535 | &vgpu->vdev.group_notifier); | 536 | &vgpu->vdev.group_notifier); |
| 536 | if (ret != 0) { | 537 | if (ret != 0) { |
| 537 | gvt_err("vfio_register_notifier for group failed: %d\n", ret); | 538 | gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", |
| 539 | ret); | ||
| 538 | goto undo_iommu; | 540 | goto undo_iommu; |
| 539 | } | 541 | } |
| 540 | 542 | ||
| @@ -635,7 +637,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, | |||
| 635 | 637 | ||
| 636 | 638 | ||
| 637 | if (index >= VFIO_PCI_NUM_REGIONS) { | 639 | if (index >= VFIO_PCI_NUM_REGIONS) { |
| 638 | gvt_err("invalid index: %u\n", index); | 640 | gvt_vgpu_err("invalid index: %u\n", index); |
| 639 | return -EINVAL; | 641 | return -EINVAL; |
| 640 | } | 642 | } |
| 641 | 643 | ||
| @@ -669,7 +671,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, | |||
| 669 | case VFIO_PCI_VGA_REGION_INDEX: | 671 | case VFIO_PCI_VGA_REGION_INDEX: |
| 670 | case VFIO_PCI_ROM_REGION_INDEX: | 672 | case VFIO_PCI_ROM_REGION_INDEX: |
| 671 | default: | 673 | default: |
| 672 | gvt_err("unsupported region: %u\n", index); | 674 | gvt_vgpu_err("unsupported region: %u\n", index); |
| 673 | } | 675 | } |
| 674 | 676 | ||
| 675 | return ret == 0 ? count : ret; | 677 | return ret == 0 ? count : ret; |
| @@ -861,7 +863,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, | |||
| 861 | 863 | ||
| 862 | trigger = eventfd_ctx_fdget(fd); | 864 | trigger = eventfd_ctx_fdget(fd); |
| 863 | if (IS_ERR(trigger)) { | 865 | if (IS_ERR(trigger)) { |
| 864 | gvt_err("eventfd_ctx_fdget failed\n"); | 866 | gvt_vgpu_err("eventfd_ctx_fdget failed\n"); |
| 865 | return PTR_ERR(trigger); | 867 | return PTR_ERR(trigger); |
| 866 | } | 868 | } |
| 867 | vgpu->vdev.msi_trigger = trigger; | 869 | vgpu->vdev.msi_trigger = trigger; |
| @@ -1120,7 +1122,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, | |||
| 1120 | ret = vfio_set_irqs_validate_and_prepare(&hdr, max, | 1122 | ret = vfio_set_irqs_validate_and_prepare(&hdr, max, |
| 1121 | VFIO_PCI_NUM_IRQS, &data_size); | 1123 | VFIO_PCI_NUM_IRQS, &data_size); |
| 1122 | if (ret) { | 1124 | if (ret) { |
| 1123 | gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); | 1125 | gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); |
| 1124 | return -EINVAL; | 1126 | return -EINVAL; |
| 1125 | } | 1127 | } |
| 1126 | if (data_size) { | 1128 | if (data_size) { |
| @@ -1310,7 +1312,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev) | |||
| 1310 | 1312 | ||
| 1311 | kvm = vgpu->vdev.kvm; | 1313 | kvm = vgpu->vdev.kvm; |
| 1312 | if (!kvm || kvm->mm != current->mm) { | 1314 | if (!kvm || kvm->mm != current->mm) { |
| 1313 | gvt_err("KVM is required to use Intel vGPU\n"); | 1315 | gvt_vgpu_err("KVM is required to use Intel vGPU\n"); |
| 1314 | return -ESRCH; | 1316 | return -ESRCH; |
| 1315 | } | 1317 | } |
| 1316 | 1318 | ||
| @@ -1337,8 +1339,10 @@ static int kvmgt_guest_init(struct mdev_device *mdev) | |||
| 1337 | 1339 | ||
| 1338 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) | 1340 | static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) |
| 1339 | { | 1341 | { |
| 1342 | struct intel_vgpu *vgpu = info->vgpu; | ||
| 1343 | |||
| 1340 | if (!info) { | 1344 | if (!info) { |
| 1341 | gvt_err("kvmgt_guest_info invalid\n"); | 1345 | gvt_vgpu_err("kvmgt_guest_info invalid\n"); |
| 1342 | return false; | 1346 | return false; |
| 1343 | } | 1347 | } |
| 1344 | 1348 | ||
| @@ -1383,12 +1387,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |||
| 1383 | unsigned long iova, pfn; | 1387 | unsigned long iova, pfn; |
| 1384 | struct kvmgt_guest_info *info; | 1388 | struct kvmgt_guest_info *info; |
| 1385 | struct device *dev; | 1389 | struct device *dev; |
| 1390 | struct intel_vgpu *vgpu; | ||
| 1386 | int rc; | 1391 | int rc; |
| 1387 | 1392 | ||
| 1388 | if (!handle_valid(handle)) | 1393 | if (!handle_valid(handle)) |
| 1389 | return INTEL_GVT_INVALID_ADDR; | 1394 | return INTEL_GVT_INVALID_ADDR; |
| 1390 | 1395 | ||
| 1391 | info = (struct kvmgt_guest_info *)handle; | 1396 | info = (struct kvmgt_guest_info *)handle; |
| 1397 | vgpu = info->vgpu; | ||
| 1392 | iova = gvt_cache_find(info->vgpu, gfn); | 1398 | iova = gvt_cache_find(info->vgpu, gfn); |
| 1393 | if (iova != INTEL_GVT_INVALID_ADDR) | 1399 | if (iova != INTEL_GVT_INVALID_ADDR) |
| 1394 | return iova; | 1400 | return iova; |
| @@ -1397,13 +1403,14 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn) | |||
| 1397 | dev = mdev_dev(info->vgpu->vdev.mdev); | 1403 | dev = mdev_dev(info->vgpu->vdev.mdev); |
| 1398 | rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); | 1404 | rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); |
| 1399 | if (rc != 1) { | 1405 | if (rc != 1) { |
| 1400 | gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); | 1406 | gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", |
| 1407 | gfn, rc); | ||
| 1401 | return INTEL_GVT_INVALID_ADDR; | 1408 | return INTEL_GVT_INVALID_ADDR; |
| 1402 | } | 1409 | } |
| 1403 | /* transfer to host iova for GFX to use DMA */ | 1410 | /* transfer to host iova for GFX to use DMA */ |
| 1404 | rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); | 1411 | rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); |
| 1405 | if (rc) { | 1412 | if (rc) { |
| 1406 | gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); | 1413 | gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); |
| 1407 | vfio_unpin_pages(dev, &gfn, 1); | 1414 | vfio_unpin_pages(dev, &gfn, 1); |
| 1408 | return INTEL_GVT_INVALID_ADDR; | 1415 | return INTEL_GVT_INVALID_ADDR; |
| 1409 | } | 1416 | } |
| @@ -1417,7 +1424,7 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, | |||
| 1417 | { | 1424 | { |
| 1418 | struct kvmgt_guest_info *info; | 1425 | struct kvmgt_guest_info *info; |
| 1419 | struct kvm *kvm; | 1426 | struct kvm *kvm; |
| 1420 | int ret; | 1427 | int idx, ret; |
| 1421 | bool kthread = current->mm == NULL; | 1428 | bool kthread = current->mm == NULL; |
| 1422 | 1429 | ||
| 1423 | if (!handle_valid(handle)) | 1430 | if (!handle_valid(handle)) |
| @@ -1429,8 +1436,10 @@ static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa, | |||
| 1429 | if (kthread) | 1436 | if (kthread) |
| 1430 | use_mm(kvm->mm); | 1437 | use_mm(kvm->mm); |
| 1431 | 1438 | ||
| 1439 | idx = srcu_read_lock(&kvm->srcu); | ||
| 1432 | ret = write ? kvm_write_guest(kvm, gpa, buf, len) : | 1440 | ret = write ? kvm_write_guest(kvm, gpa, buf, len) : |
| 1433 | kvm_read_guest(kvm, gpa, buf, len); | 1441 | kvm_read_guest(kvm, gpa, buf, len); |
| 1442 | srcu_read_unlock(&kvm->srcu, idx); | ||
| 1434 | 1443 | ||
| 1435 | if (kthread) | 1444 | if (kthread) |
| 1436 | unuse_mm(kvm->mm); | 1445 | unuse_mm(kvm->mm); |
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index 60b698cb8365..1ba3bdb09341 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c | |||
| @@ -142,10 +142,10 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, | |||
| 142 | ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, | 142 | ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, |
| 143 | p_data, bytes); | 143 | p_data, bytes); |
| 144 | if (ret) { | 144 | if (ret) { |
| 145 | gvt_err("vgpu%d: guest page read error %d, " | 145 | gvt_vgpu_err("guest page read error %d, " |
| 146 | "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", | 146 | "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", |
| 147 | vgpu->id, ret, | 147 | ret, gp->gfn, pa, *(u32 *)p_data, |
| 148 | gp->gfn, pa, *(u32 *)p_data, bytes); | 148 | bytes); |
| 149 | } | 149 | } |
| 150 | mutex_unlock(&gvt->lock); | 150 | mutex_unlock(&gvt->lock); |
| 151 | return ret; | 151 | return ret; |
| @@ -200,14 +200,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, | |||
| 200 | ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); | 200 | ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); |
| 201 | 201 | ||
| 202 | if (!vgpu->mmio.disable_warn_untrack) { | 202 | if (!vgpu->mmio.disable_warn_untrack) { |
| 203 | gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", | 203 | gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n", |
| 204 | vgpu->id, offset, bytes, *(u32 *)p_data); | 204 | offset, bytes, *(u32 *)p_data); |
| 205 | 205 | ||
| 206 | if (offset == 0x206c) { | 206 | if (offset == 0x206c) { |
| 207 | gvt_err("------------------------------------------\n"); | 207 | gvt_vgpu_err("------------------------------------------\n"); |
| 208 | gvt_err("vgpu%d: likely triggers a gfx reset\n", | 208 | gvt_vgpu_err("likely triggers a gfx reset\n"); |
| 209 | vgpu->id); | 209 | gvt_vgpu_err("------------------------------------------\n"); |
| 210 | gvt_err("------------------------------------------\n"); | ||
| 211 | vgpu->mmio.disable_warn_untrack = true; | 210 | vgpu->mmio.disable_warn_untrack = true; |
| 212 | } | 211 | } |
| 213 | } | 212 | } |
| @@ -220,8 +219,8 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, | |||
| 220 | mutex_unlock(&gvt->lock); | 219 | mutex_unlock(&gvt->lock); |
| 221 | return 0; | 220 | return 0; |
| 222 | err: | 221 | err: |
| 223 | gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", | 222 | gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", |
| 224 | vgpu->id, offset, bytes); | 223 | offset, bytes); |
| 225 | mutex_unlock(&gvt->lock); | 224 | mutex_unlock(&gvt->lock); |
| 226 | return ret; | 225 | return ret; |
| 227 | } | 226 | } |
| @@ -259,10 +258,11 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, | |||
| 259 | if (gp) { | 258 | if (gp) { |
| 260 | ret = gp->handler(gp, pa, p_data, bytes); | 259 | ret = gp->handler(gp, pa, p_data, bytes); |
| 261 | if (ret) { | 260 | if (ret) { |
| 262 | gvt_err("vgpu%d: guest page write error %d, " | 261 | gvt_err("guest page write error %d, " |
| 263 | "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", | 262 | "gfn 0x%lx, pa 0x%llx, " |
| 264 | vgpu->id, ret, | 263 | "var 0x%x, len %d\n", |
| 265 | gp->gfn, pa, *(u32 *)p_data, bytes); | 264 | ret, gp->gfn, pa, |
| 265 | *(u32 *)p_data, bytes); | ||
| 266 | } | 266 | } |
| 267 | mutex_unlock(&gvt->lock); | 267 | mutex_unlock(&gvt->lock); |
| 268 | return ret; | 268 | return ret; |
| @@ -329,8 +329,8 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, | |||
| 329 | 329 | ||
| 330 | /* all register bits are RO. */ | 330 | /* all register bits are RO. */ |
| 331 | if (ro_mask == ~(u64)0) { | 331 | if (ro_mask == ~(u64)0) { |
| 332 | gvt_err("vgpu%d: try to write RO reg %x\n", | 332 | gvt_vgpu_err("try to write RO reg %x\n", |
| 333 | vgpu->id, offset); | 333 | offset); |
| 334 | ret = 0; | 334 | ret = 0; |
| 335 | goto out; | 335 | goto out; |
| 336 | } | 336 | } |
| @@ -360,8 +360,8 @@ out: | |||
| 360 | mutex_unlock(&gvt->lock); | 360 | mutex_unlock(&gvt->lock); |
| 361 | return 0; | 361 | return 0; |
| 362 | err: | 362 | err: |
| 363 | gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", | 363 | gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, |
| 364 | vgpu->id, offset, bytes); | 364 | bytes); |
| 365 | mutex_unlock(&gvt->lock); | 365 | mutex_unlock(&gvt->lock); |
| 366 | return ret; | 366 | return ret; |
| 367 | } | 367 | } |
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h index 3bc620f56f35..a3a027025cd0 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.h +++ b/drivers/gpu/drm/i915/gvt/mmio.h | |||
| @@ -107,4 +107,7 @@ int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, | |||
| 107 | void *p_data, unsigned int bytes); | 107 | void *p_data, unsigned int bytes); |
| 108 | int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, | 108 | int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, |
| 109 | void *p_data, unsigned int bytes); | 109 | void *p_data, unsigned int bytes); |
| 110 | |||
| 111 | bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, | ||
| 112 | unsigned int offset); | ||
| 110 | #endif | 113 | #endif |
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c index 5d1caf9daba9..311799136d7f 100644 --- a/drivers/gpu/drm/i915/gvt/opregion.c +++ b/drivers/gpu/drm/i915/gvt/opregion.c | |||
| @@ -67,14 +67,15 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) | |||
| 67 | mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va | 67 | mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va |
| 68 | + i * PAGE_SIZE); | 68 | + i * PAGE_SIZE); |
| 69 | if (mfn == INTEL_GVT_INVALID_ADDR) { | 69 | if (mfn == INTEL_GVT_INVALID_ADDR) { |
| 70 | gvt_err("fail to get MFN from VA\n"); | 70 | gvt_vgpu_err("fail to get MFN from VA\n"); |
| 71 | return -EINVAL; | 71 | return -EINVAL; |
| 72 | } | 72 | } |
| 73 | ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, | 73 | ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, |
| 74 | vgpu_opregion(vgpu)->gfn[i], | 74 | vgpu_opregion(vgpu)->gfn[i], |
| 75 | mfn, 1, map); | 75 | mfn, 1, map); |
| 76 | if (ret) { | 76 | if (ret) { |
| 77 | gvt_err("fail to map GFN to MFN, errno: %d\n", ret); | 77 | gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n", |
| 78 | ret); | ||
| 78 | return ret; | 79 | return ret; |
| 79 | } | 80 | } |
| 80 | } | 81 | } |
| @@ -287,7 +288,7 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) | |||
| 287 | parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; | 288 | parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; |
| 288 | 289 | ||
| 289 | if (!(swsci & SWSCI_SCI_SELECT)) { | 290 | if (!(swsci & SWSCI_SCI_SELECT)) { |
| 290 | gvt_err("vgpu%d: requesting SMI service\n", vgpu->id); | 291 | gvt_vgpu_err("requesting SMI service\n"); |
| 291 | return 0; | 292 | return 0; |
| 292 | } | 293 | } |
| 293 | /* ignore non 0->1 trasitions */ | 294 | /* ignore non 0->1 trasitions */ |
| @@ -300,9 +301,8 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci) | |||
| 300 | func = GVT_OPREGION_FUNC(*scic); | 301 | func = GVT_OPREGION_FUNC(*scic); |
| 301 | subfunc = GVT_OPREGION_SUBFUNC(*scic); | 302 | subfunc = GVT_OPREGION_SUBFUNC(*scic); |
| 302 | if (!querying_capabilities(*scic)) { | 303 | if (!querying_capabilities(*scic)) { |
| 303 | gvt_err("vgpu%d: requesting runtime service: func \"%s\"," | 304 | gvt_vgpu_err("requesting runtime service: func \"%s\"," |
| 304 | " subfunc \"%s\"\n", | 305 | " subfunc \"%s\"\n", |
| 305 | vgpu->id, | ||
| 306 | opregion_func_name(func), | 306 | opregion_func_name(func), |
| 307 | opregion_subfunc_name(subfunc)); | 307 | opregion_subfunc_name(subfunc)); |
| 308 | /* | 308 | /* |
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c index 73f052a4f424..95ee091ce085 100644 --- a/drivers/gpu/drm/i915/gvt/render.c +++ b/drivers/gpu/drm/i915/gvt/render.c | |||
| @@ -167,7 +167,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) | |||
| 167 | I915_WRITE_FW(reg, 0x1); | 167 | I915_WRITE_FW(reg, 0x1); |
| 168 | 168 | ||
| 169 | if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) | 169 | if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) |
| 170 | gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); | 170 | gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); |
| 171 | else | 171 | else |
| 172 | vgpu_vreg(vgpu, regs[ring_id]) = 0; | 172 | vgpu_vreg(vgpu, regs[ring_id]) = 0; |
| 173 | 173 | ||
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 06c9584ac5f0..34b9acdf3479 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
| @@ -101,7 +101,7 @@ struct tbs_sched_data { | |||
| 101 | struct list_head runq_head; | 101 | struct list_head runq_head; |
| 102 | }; | 102 | }; |
| 103 | 103 | ||
| 104 | #define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) | 104 | #define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1)) |
| 105 | 105 | ||
| 106 | static void tbs_sched_func(struct work_struct *work) | 106 | static void tbs_sched_func(struct work_struct *work) |
| 107 | { | 107 | { |
| @@ -223,7 +223,7 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) | |||
| 223 | return; | 223 | return; |
| 224 | 224 | ||
| 225 | list_add_tail(&vgpu_data->list, &sched_data->runq_head); | 225 | list_add_tail(&vgpu_data->list, &sched_data->runq_head); |
| 226 | schedule_delayed_work(&sched_data->work, sched_data->period); | 226 | schedule_delayed_work(&sched_data->work, 0); |
| 227 | } | 227 | } |
| 228 | 228 | ||
| 229 | static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) | 229 | static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index d3a56c949025..c4353ed86d4b 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c | |||
| @@ -84,7 +84,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) | |||
| 84 | (u32)((workload->ctx_desc.lrca + i) << | 84 | (u32)((workload->ctx_desc.lrca + i) << |
| 85 | GTT_PAGE_SHIFT)); | 85 | GTT_PAGE_SHIFT)); |
| 86 | if (context_gpa == INTEL_GVT_INVALID_ADDR) { | 86 | if (context_gpa == INTEL_GVT_INVALID_ADDR) { |
| 87 | gvt_err("Invalid guest context descriptor\n"); | 87 | gvt_vgpu_err("Invalid guest context descriptor\n"); |
| 88 | return -EINVAL; | 88 | return -EINVAL; |
| 89 | } | 89 | } |
| 90 | 90 | ||
| @@ -130,12 +130,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) | |||
| 130 | static int shadow_context_status_change(struct notifier_block *nb, | 130 | static int shadow_context_status_change(struct notifier_block *nb, |
| 131 | unsigned long action, void *data) | 131 | unsigned long action, void *data) |
| 132 | { | 132 | { |
| 133 | struct intel_vgpu *vgpu = container_of(nb, | 133 | struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data; |
| 134 | struct intel_vgpu, shadow_ctx_notifier_block); | 134 | struct intel_gvt *gvt = container_of(nb, struct intel_gvt, |
| 135 | struct drm_i915_gem_request *req = | 135 | shadow_ctx_notifier_block[req->engine->id]); |
| 136 | (struct drm_i915_gem_request *)data; | 136 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 137 | struct intel_gvt_workload_scheduler *scheduler = | ||
| 138 | &vgpu->gvt->scheduler; | ||
| 139 | struct intel_vgpu_workload *workload = | 137 | struct intel_vgpu_workload *workload = |
| 140 | scheduler->current_workload[req->engine->id]; | 138 | scheduler->current_workload[req->engine->id]; |
| 141 | 139 | ||
| @@ -175,7 +173,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
| 175 | int ring_id = workload->ring_id; | 173 | int ring_id = workload->ring_id; |
| 176 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; | 174 | struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; |
| 177 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; | 175 | struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; |
| 176 | struct intel_engine_cs *engine = dev_priv->engine[ring_id]; | ||
| 178 | struct drm_i915_gem_request *rq; | 177 | struct drm_i915_gem_request *rq; |
| 178 | struct intel_vgpu *vgpu = workload->vgpu; | ||
| 179 | int ret; | 179 | int ret; |
| 180 | 180 | ||
| 181 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", | 181 | gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", |
| @@ -187,9 +187,24 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
| 187 | 187 | ||
| 188 | mutex_lock(&dev_priv->drm.struct_mutex); | 188 | mutex_lock(&dev_priv->drm.struct_mutex); |
| 189 | 189 | ||
| 190 | /* pin shadow context by gvt even the shadow context will be pinned | ||
| 191 | * when i915 alloc request. That is because gvt will update the guest | ||
| 192 | * context from shadow context when workload is completed, and at that | ||
| 193 | * moment, i915 may already unpined the shadow context to make the | ||
| 194 | * shadow_ctx pages invalid. So gvt need to pin itself. After update | ||
| 195 | * the guest context, gvt can unpin the shadow_ctx safely. | ||
| 196 | */ | ||
| 197 | ret = engine->context_pin(engine, shadow_ctx); | ||
| 198 | if (ret) { | ||
| 199 | gvt_vgpu_err("fail to pin shadow context\n"); | ||
| 200 | workload->status = ret; | ||
| 201 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
| 202 | return ret; | ||
| 203 | } | ||
| 204 | |||
| 190 | rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); | 205 | rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); |
| 191 | if (IS_ERR(rq)) { | 206 | if (IS_ERR(rq)) { |
| 192 | gvt_err("fail to allocate gem request\n"); | 207 | gvt_vgpu_err("fail to allocate gem request\n"); |
| 193 | ret = PTR_ERR(rq); | 208 | ret = PTR_ERR(rq); |
| 194 | goto out; | 209 | goto out; |
| 195 | } | 210 | } |
| @@ -202,9 +217,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) | |||
| 202 | if (ret) | 217 | if (ret) |
| 203 | goto out; | 218 | goto out; |
| 204 | 219 | ||
| 205 | ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); | 220 | if ((workload->ring_id == RCS) && |
| 206 | if (ret) | 221 | (workload->wa_ctx.indirect_ctx.size != 0)) { |
| 207 | goto out; | 222 | ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); |
| 223 | if (ret) | ||
| 224 | goto out; | ||
| 225 | } | ||
| 208 | 226 | ||
| 209 | ret = populate_shadow_context(workload); | 227 | ret = populate_shadow_context(workload); |
| 210 | if (ret) | 228 | if (ret) |
| @@ -227,6 +245,9 @@ out: | |||
| 227 | 245 | ||
| 228 | if (!IS_ERR_OR_NULL(rq)) | 246 | if (!IS_ERR_OR_NULL(rq)) |
| 229 | i915_add_request_no_flush(rq); | 247 | i915_add_request_no_flush(rq); |
| 248 | else | ||
| 249 | engine->context_unpin(engine, shadow_ctx); | ||
| 250 | |||
| 230 | mutex_unlock(&dev_priv->drm.struct_mutex); | 251 | mutex_unlock(&dev_priv->drm.struct_mutex); |
| 231 | return ret; | 252 | return ret; |
| 232 | } | 253 | } |
| @@ -322,7 +343,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) | |||
| 322 | (u32)((workload->ctx_desc.lrca + i) << | 343 | (u32)((workload->ctx_desc.lrca + i) << |
| 323 | GTT_PAGE_SHIFT)); | 344 | GTT_PAGE_SHIFT)); |
| 324 | if (context_gpa == INTEL_GVT_INVALID_ADDR) { | 345 | if (context_gpa == INTEL_GVT_INVALID_ADDR) { |
| 325 | gvt_err("invalid guest context descriptor\n"); | 346 | gvt_vgpu_err("invalid guest context descriptor\n"); |
| 326 | return; | 347 | return; |
| 327 | } | 348 | } |
| 328 | 349 | ||
| @@ -376,6 +397,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
| 376 | * For the workload w/o request, directly complete the workload. | 397 | * For the workload w/o request, directly complete the workload. |
| 377 | */ | 398 | */ |
| 378 | if (workload->req) { | 399 | if (workload->req) { |
| 400 | struct drm_i915_private *dev_priv = | ||
| 401 | workload->vgpu->gvt->dev_priv; | ||
| 402 | struct intel_engine_cs *engine = | ||
| 403 | dev_priv->engine[workload->ring_id]; | ||
| 379 | wait_event(workload->shadow_ctx_status_wq, | 404 | wait_event(workload->shadow_ctx_status_wq, |
| 380 | !atomic_read(&workload->shadow_ctx_active)); | 405 | !atomic_read(&workload->shadow_ctx_active)); |
| 381 | 406 | ||
| @@ -388,6 +413,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) | |||
| 388 | INTEL_GVT_EVENT_MAX) | 413 | INTEL_GVT_EVENT_MAX) |
| 389 | intel_vgpu_trigger_virtual_event(vgpu, event); | 414 | intel_vgpu_trigger_virtual_event(vgpu, event); |
| 390 | } | 415 | } |
| 416 | mutex_lock(&dev_priv->drm.struct_mutex); | ||
| 417 | /* unpin shadow ctx as the shadow_ctx update is done */ | ||
| 418 | engine->context_unpin(engine, workload->vgpu->shadow_ctx); | ||
| 419 | mutex_unlock(&dev_priv->drm.struct_mutex); | ||
| 391 | } | 420 | } |
| 392 | 421 | ||
| 393 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", | 422 | gvt_dbg_sched("ring id %d complete workload %p status %d\n", |
| @@ -417,6 +446,7 @@ static int workload_thread(void *priv) | |||
| 417 | int ring_id = p->ring_id; | 446 | int ring_id = p->ring_id; |
| 418 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 447 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 419 | struct intel_vgpu_workload *workload = NULL; | 448 | struct intel_vgpu_workload *workload = NULL; |
| 449 | struct intel_vgpu *vgpu = NULL; | ||
| 420 | int ret; | 450 | int ret; |
| 421 | bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); | 451 | bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); |
| 422 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | 452 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
| @@ -459,25 +489,14 @@ static int workload_thread(void *priv) | |||
| 459 | mutex_unlock(&gvt->lock); | 489 | mutex_unlock(&gvt->lock); |
| 460 | 490 | ||
| 461 | if (ret) { | 491 | if (ret) { |
| 462 | gvt_err("fail to dispatch workload, skip\n"); | 492 | vgpu = workload->vgpu; |
| 493 | gvt_vgpu_err("fail to dispatch workload, skip\n"); | ||
| 463 | goto complete; | 494 | goto complete; |
| 464 | } | 495 | } |
| 465 | 496 | ||
| 466 | gvt_dbg_sched("ring id %d wait workload %p\n", | 497 | gvt_dbg_sched("ring id %d wait workload %p\n", |
| 467 | workload->ring_id, workload); | 498 | workload->ring_id, workload); |
| 468 | retry: | 499 | i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); |
| 469 | i915_wait_request(workload->req, | ||
| 470 | 0, MAX_SCHEDULE_TIMEOUT); | ||
| 471 | /* I915 has replay mechanism and a request will be replayed | ||
| 472 | * if there is i915 reset. So the seqno will be updated anyway. | ||
| 473 | * If the seqno is not updated yet after waiting, which means | ||
| 474 | * the replay may still be in progress and we can wait again. | ||
| 475 | */ | ||
| 476 | if (!i915_gem_request_completed(workload->req)) { | ||
| 477 | gvt_dbg_sched("workload %p not completed, wait again\n", | ||
| 478 | workload); | ||
| 479 | goto retry; | ||
| 480 | } | ||
| 481 | 500 | ||
| 482 | complete: | 501 | complete: |
| 483 | gvt_dbg_sched("will complete workload %p, status: %d\n", | 502 | gvt_dbg_sched("will complete workload %p, status: %d\n", |
| @@ -513,15 +532,16 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu) | |||
| 513 | void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) | 532 | void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt) |
| 514 | { | 533 | { |
| 515 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 534 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 516 | int i; | 535 | struct intel_engine_cs *engine; |
| 536 | enum intel_engine_id i; | ||
| 517 | 537 | ||
| 518 | gvt_dbg_core("clean workload scheduler\n"); | 538 | gvt_dbg_core("clean workload scheduler\n"); |
| 519 | 539 | ||
| 520 | for (i = 0; i < I915_NUM_ENGINES; i++) { | 540 | for_each_engine(engine, gvt->dev_priv, i) { |
| 521 | if (scheduler->thread[i]) { | 541 | atomic_notifier_chain_unregister( |
| 522 | kthread_stop(scheduler->thread[i]); | 542 | &engine->context_status_notifier, |
| 523 | scheduler->thread[i] = NULL; | 543 | &gvt->shadow_ctx_notifier_block[i]); |
| 524 | } | 544 | kthread_stop(scheduler->thread[i]); |
| 525 | } | 545 | } |
| 526 | } | 546 | } |
| 527 | 547 | ||
| @@ -529,18 +549,15 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) | |||
| 529 | { | 549 | { |
| 530 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; | 550 | struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; |
| 531 | struct workload_thread_param *param = NULL; | 551 | struct workload_thread_param *param = NULL; |
| 552 | struct intel_engine_cs *engine; | ||
| 553 | enum intel_engine_id i; | ||
| 532 | int ret; | 554 | int ret; |
| 533 | int i; | ||
| 534 | 555 | ||
| 535 | gvt_dbg_core("init workload scheduler\n"); | 556 | gvt_dbg_core("init workload scheduler\n"); |
| 536 | 557 | ||
| 537 | init_waitqueue_head(&scheduler->workload_complete_wq); | 558 | init_waitqueue_head(&scheduler->workload_complete_wq); |
| 538 | 559 | ||
| 539 | for (i = 0; i < I915_NUM_ENGINES; i++) { | 560 | for_each_engine(engine, gvt->dev_priv, i) { |
| 540 | /* check ring mask at init time */ | ||
| 541 | if (!HAS_ENGINE(gvt->dev_priv, i)) | ||
| 542 | continue; | ||
| 543 | |||
| 544 | init_waitqueue_head(&scheduler->waitq[i]); | 561 | init_waitqueue_head(&scheduler->waitq[i]); |
| 545 | 562 | ||
| 546 | param = kzalloc(sizeof(*param), GFP_KERNEL); | 563 | param = kzalloc(sizeof(*param), GFP_KERNEL); |
| @@ -559,6 +576,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt) | |||
| 559 | ret = PTR_ERR(scheduler->thread[i]); | 576 | ret = PTR_ERR(scheduler->thread[i]); |
| 560 | goto err; | 577 | goto err; |
| 561 | } | 578 | } |
| 579 | |||
| 580 | gvt->shadow_ctx_notifier_block[i].notifier_call = | ||
| 581 | shadow_context_status_change; | ||
| 582 | atomic_notifier_chain_register(&engine->context_status_notifier, | ||
| 583 | &gvt->shadow_ctx_notifier_block[i]); | ||
| 562 | } | 584 | } |
| 563 | return 0; | 585 | return 0; |
| 564 | err: | 586 | err: |
| @@ -570,9 +592,6 @@ err: | |||
| 570 | 592 | ||
| 571 | void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) | 593 | void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu) |
| 572 | { | 594 | { |
| 573 | atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier, | ||
| 574 | &vgpu->shadow_ctx_notifier_block); | ||
| 575 | |||
| 576 | i915_gem_context_put_unlocked(vgpu->shadow_ctx); | 595 | i915_gem_context_put_unlocked(vgpu->shadow_ctx); |
| 577 | } | 596 | } |
| 578 | 597 | ||
| @@ -587,10 +606,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu) | |||
| 587 | 606 | ||
| 588 | vgpu->shadow_ctx->engine[RCS].initialised = true; | 607 | vgpu->shadow_ctx->engine[RCS].initialised = true; |
| 589 | 608 | ||
| 590 | vgpu->shadow_ctx_notifier_block.notifier_call = | ||
| 591 | shadow_context_status_change; | ||
| 592 | |||
| 593 | atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier, | ||
| 594 | &vgpu->shadow_ctx_notifier_block); | ||
| 595 | return 0; | 609 | return 0; |
| 596 | } | 610 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index e703556eba99..1c75402a59c1 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -248,6 +248,7 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
| 248 | case I915_PARAM_IRQ_ACTIVE: | 248 | case I915_PARAM_IRQ_ACTIVE: |
| 249 | case I915_PARAM_ALLOW_BATCHBUFFER: | 249 | case I915_PARAM_ALLOW_BATCHBUFFER: |
| 250 | case I915_PARAM_LAST_DISPATCH: | 250 | case I915_PARAM_LAST_DISPATCH: |
| 251 | case I915_PARAM_HAS_EXEC_CONSTANTS: | ||
| 251 | /* Reject all old ums/dri params. */ | 252 | /* Reject all old ums/dri params. */ |
| 252 | return -ENODEV; | 253 | return -ENODEV; |
| 253 | case I915_PARAM_CHIPSET_ID: | 254 | case I915_PARAM_CHIPSET_ID: |
| @@ -274,9 +275,6 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
| 274 | case I915_PARAM_HAS_BSD2: | 275 | case I915_PARAM_HAS_BSD2: |
| 275 | value = !!dev_priv->engine[VCS2]; | 276 | value = !!dev_priv->engine[VCS2]; |
| 276 | break; | 277 | break; |
| 277 | case I915_PARAM_HAS_EXEC_CONSTANTS: | ||
| 278 | value = INTEL_GEN(dev_priv) >= 4; | ||
| 279 | break; | ||
| 280 | case I915_PARAM_HAS_LLC: | 278 | case I915_PARAM_HAS_LLC: |
| 281 | value = HAS_LLC(dev_priv); | 279 | value = HAS_LLC(dev_priv); |
| 282 | break; | 280 | break; |
| @@ -1788,7 +1786,7 @@ void i915_reset(struct drm_i915_private *dev_priv) | |||
| 1788 | goto error; | 1786 | goto error; |
| 1789 | } | 1787 | } |
| 1790 | 1788 | ||
| 1791 | i915_gem_reset_finish(dev_priv); | 1789 | i915_gem_reset(dev_priv); |
| 1792 | intel_overlay_reset(dev_priv); | 1790 | intel_overlay_reset(dev_priv); |
| 1793 | 1791 | ||
| 1794 | /* Ok, now get things going again... */ | 1792 | /* Ok, now get things going again... */ |
| @@ -1814,6 +1812,7 @@ void i915_reset(struct drm_i915_private *dev_priv) | |||
| 1814 | i915_queue_hangcheck(dev_priv); | 1812 | i915_queue_hangcheck(dev_priv); |
| 1815 | 1813 | ||
| 1816 | wakeup: | 1814 | wakeup: |
| 1815 | i915_gem_reset_finish(dev_priv); | ||
| 1817 | enable_irq(dev_priv->drm.irq); | 1816 | enable_irq(dev_priv->drm.irq); |
| 1818 | wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); | 1817 | wake_up_bit(&error->flags, I915_RESET_IN_PROGRESS); |
| 1819 | return; | 1818 | return; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 0a4b42d31391..1e53c31b6826 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -293,6 +293,7 @@ enum plane_id { | |||
| 293 | PLANE_PRIMARY, | 293 | PLANE_PRIMARY, |
| 294 | PLANE_SPRITE0, | 294 | PLANE_SPRITE0, |
| 295 | PLANE_SPRITE1, | 295 | PLANE_SPRITE1, |
| 296 | PLANE_SPRITE2, | ||
| 296 | PLANE_CURSOR, | 297 | PLANE_CURSOR, |
| 297 | I915_MAX_PLANES, | 298 | I915_MAX_PLANES, |
| 298 | }; | 299 | }; |
| @@ -1324,7 +1325,7 @@ struct intel_gen6_power_mgmt { | |||
| 1324 | unsigned boosts; | 1325 | unsigned boosts; |
| 1325 | 1326 | ||
| 1326 | /* manual wa residency calculations */ | 1327 | /* manual wa residency calculations */ |
| 1327 | struct intel_rps_ei up_ei, down_ei; | 1328 | struct intel_rps_ei ei; |
| 1328 | 1329 | ||
| 1329 | /* | 1330 | /* |
| 1330 | * Protects RPS/RC6 register access and PCU communication. | 1331 | * Protects RPS/RC6 register access and PCU communication. |
| @@ -2063,8 +2064,6 @@ struct drm_i915_private { | |||
| 2063 | 2064 | ||
| 2064 | const struct intel_device_info info; | 2065 | const struct intel_device_info info; |
| 2065 | 2066 | ||
| 2066 | int relative_constants_mode; | ||
| 2067 | |||
| 2068 | void __iomem *regs; | 2067 | void __iomem *regs; |
| 2069 | 2068 | ||
| 2070 | struct intel_uncore uncore; | 2069 | struct intel_uncore uncore; |
| @@ -3341,6 +3340,7 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error) | |||
| 3341 | } | 3340 | } |
| 3342 | 3341 | ||
| 3343 | int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); | 3342 | int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); |
| 3343 | void i915_gem_reset(struct drm_i915_private *dev_priv); | ||
| 3344 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv); | 3344 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv); |
| 3345 | void i915_gem_set_wedged(struct drm_i915_private *dev_priv); | 3345 | void i915_gem_set_wedged(struct drm_i915_private *dev_priv); |
| 3346 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); | 3346 | void i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6908123162d1..67b1fc5a0331 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1434,6 +1434,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
| 1434 | 1434 | ||
| 1435 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); | 1435 | trace_i915_gem_object_pwrite(obj, args->offset, args->size); |
| 1436 | 1436 | ||
| 1437 | ret = -ENODEV; | ||
| 1438 | if (obj->ops->pwrite) | ||
| 1439 | ret = obj->ops->pwrite(obj, args); | ||
| 1440 | if (ret != -ENODEV) | ||
| 1441 | goto err; | ||
| 1442 | |||
| 1437 | ret = i915_gem_object_wait(obj, | 1443 | ret = i915_gem_object_wait(obj, |
| 1438 | I915_WAIT_INTERRUPTIBLE | | 1444 | I915_WAIT_INTERRUPTIBLE | |
| 1439 | I915_WAIT_ALL, | 1445 | I915_WAIT_ALL, |
| @@ -2119,6 +2125,7 @@ i915_gem_object_truncate(struct drm_i915_gem_object *obj) | |||
| 2119 | */ | 2125 | */ |
| 2120 | shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); | 2126 | shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); |
| 2121 | obj->mm.madv = __I915_MADV_PURGED; | 2127 | obj->mm.madv = __I915_MADV_PURGED; |
| 2128 | obj->mm.pages = ERR_PTR(-EFAULT); | ||
| 2122 | } | 2129 | } |
| 2123 | 2130 | ||
| 2124 | /* Try to discard unwanted pages */ | 2131 | /* Try to discard unwanted pages */ |
| @@ -2218,7 +2225,9 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj, | |||
| 2218 | 2225 | ||
| 2219 | __i915_gem_object_reset_page_iter(obj); | 2226 | __i915_gem_object_reset_page_iter(obj); |
| 2220 | 2227 | ||
| 2221 | obj->ops->put_pages(obj, pages); | 2228 | if (!IS_ERR(pages)) |
| 2229 | obj->ops->put_pages(obj, pages); | ||
| 2230 | |||
| 2222 | unlock: | 2231 | unlock: |
| 2223 | mutex_unlock(&obj->mm.lock); | 2232 | mutex_unlock(&obj->mm.lock); |
| 2224 | } | 2233 | } |
| @@ -2437,7 +2446,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) | |||
| 2437 | if (err) | 2446 | if (err) |
| 2438 | return err; | 2447 | return err; |
| 2439 | 2448 | ||
| 2440 | if (unlikely(!obj->mm.pages)) { | 2449 | if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { |
| 2441 | err = ____i915_gem_object_get_pages(obj); | 2450 | err = ____i915_gem_object_get_pages(obj); |
| 2442 | if (err) | 2451 | if (err) |
| 2443 | goto unlock; | 2452 | goto unlock; |
| @@ -2515,7 +2524,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, | |||
| 2515 | 2524 | ||
| 2516 | pinned = true; | 2525 | pinned = true; |
| 2517 | if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { | 2526 | if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) { |
| 2518 | if (unlikely(!obj->mm.pages)) { | 2527 | if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) { |
| 2519 | ret = ____i915_gem_object_get_pages(obj); | 2528 | ret = ____i915_gem_object_get_pages(obj); |
| 2520 | if (ret) | 2529 | if (ret) |
| 2521 | goto err_unlock; | 2530 | goto err_unlock; |
| @@ -2563,6 +2572,75 @@ err_unlock: | |||
| 2563 | goto out_unlock; | 2572 | goto out_unlock; |
| 2564 | } | 2573 | } |
| 2565 | 2574 | ||
| 2575 | static int | ||
| 2576 | i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, | ||
| 2577 | const struct drm_i915_gem_pwrite *arg) | ||
| 2578 | { | ||
| 2579 | struct address_space *mapping = obj->base.filp->f_mapping; | ||
| 2580 | char __user *user_data = u64_to_user_ptr(arg->data_ptr); | ||
| 2581 | u64 remain, offset; | ||
| 2582 | unsigned int pg; | ||
| 2583 | |||
| 2584 | /* Before we instantiate/pin the backing store for our use, we | ||
| 2585 | * can prepopulate the shmemfs filp efficiently using a write into | ||
| 2586 | * the pagecache. We avoid the penalty of instantiating all the | ||
| 2587 | * pages, important if the user is just writing to a few and never | ||
| 2588 | * uses the object on the GPU, and using a direct write into shmemfs | ||
| 2589 | * allows it to avoid the cost of retrieving a page (either swapin | ||
| 2590 | * or clearing-before-use) before it is overwritten. | ||
| 2591 | */ | ||
| 2592 | if (READ_ONCE(obj->mm.pages)) | ||
| 2593 | return -ENODEV; | ||
| 2594 | |||
| 2595 | /* Before the pages are instantiated the object is treated as being | ||
| 2596 | * in the CPU domain. The pages will be clflushed as required before | ||
| 2597 | * use, and we can freely write into the pages directly. If userspace | ||
| 2598 | * races pwrite with any other operation; corruption will ensue - | ||
| 2599 | * that is userspace's prerogative! | ||
| 2600 | */ | ||
| 2601 | |||
| 2602 | remain = arg->size; | ||
| 2603 | offset = arg->offset; | ||
| 2604 | pg = offset_in_page(offset); | ||
| 2605 | |||
| 2606 | do { | ||
| 2607 | unsigned int len, unwritten; | ||
| 2608 | struct page *page; | ||
| 2609 | void *data, *vaddr; | ||
| 2610 | int err; | ||
| 2611 | |||
| 2612 | len = PAGE_SIZE - pg; | ||
| 2613 | if (len > remain) | ||
| 2614 | len = remain; | ||
| 2615 | |||
| 2616 | err = pagecache_write_begin(obj->base.filp, mapping, | ||
| 2617 | offset, len, 0, | ||
| 2618 | &page, &data); | ||
| 2619 | if (err < 0) | ||
| 2620 | return err; | ||
| 2621 | |||
| 2622 | vaddr = kmap(page); | ||
| 2623 | unwritten = copy_from_user(vaddr + pg, user_data, len); | ||
| 2624 | kunmap(page); | ||
| 2625 | |||
| 2626 | err = pagecache_write_end(obj->base.filp, mapping, | ||
| 2627 | offset, len, len - unwritten, | ||
| 2628 | page, data); | ||
| 2629 | if (err < 0) | ||
| 2630 | return err; | ||
| 2631 | |||
| 2632 | if (unwritten) | ||
| 2633 | return -EFAULT; | ||
| 2634 | |||
| 2635 | remain -= len; | ||
| 2636 | user_data += len; | ||
| 2637 | offset += len; | ||
| 2638 | pg = 0; | ||
| 2639 | } while (remain); | ||
| 2640 | |||
| 2641 | return 0; | ||
| 2642 | } | ||
| 2643 | |||
| 2566 | static bool ban_context(const struct i915_gem_context *ctx) | 2644 | static bool ban_context(const struct i915_gem_context *ctx) |
| 2567 | { | 2645 | { |
| 2568 | return (i915_gem_context_is_bannable(ctx) && | 2646 | return (i915_gem_context_is_bannable(ctx) && |
| @@ -2641,7 +2719,16 @@ int i915_gem_reset_prepare(struct drm_i915_private *dev_priv) | |||
| 2641 | for_each_engine(engine, dev_priv, id) { | 2719 | for_each_engine(engine, dev_priv, id) { |
| 2642 | struct drm_i915_gem_request *request; | 2720 | struct drm_i915_gem_request *request; |
| 2643 | 2721 | ||
| 2722 | /* Prevent request submission to the hardware until we have | ||
| 2723 | * completed the reset in i915_gem_reset_finish(). If a request | ||
| 2724 | * is completed by one engine, it may then queue a request | ||
| 2725 | * to a second via its engine->irq_tasklet *just* as we are | ||
| 2726 | * calling engine->init_hw() and also writing the ELSP. | ||
| 2727 | * Turning off the engine->irq_tasklet until the reset is over | ||
| 2728 | * prevents the race. | ||
| 2729 | */ | ||
| 2644 | tasklet_kill(&engine->irq_tasklet); | 2730 | tasklet_kill(&engine->irq_tasklet); |
| 2731 | tasklet_disable(&engine->irq_tasklet); | ||
| 2645 | 2732 | ||
| 2646 | if (engine_stalled(engine)) { | 2733 | if (engine_stalled(engine)) { |
| 2647 | request = i915_gem_find_active_request(engine); | 2734 | request = i915_gem_find_active_request(engine); |
| @@ -2756,7 +2843,7 @@ static void i915_gem_reset_engine(struct intel_engine_cs *engine) | |||
| 2756 | engine->reset_hw(engine, request); | 2843 | engine->reset_hw(engine, request); |
| 2757 | } | 2844 | } |
| 2758 | 2845 | ||
| 2759 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | 2846 | void i915_gem_reset(struct drm_i915_private *dev_priv) |
| 2760 | { | 2847 | { |
| 2761 | struct intel_engine_cs *engine; | 2848 | struct intel_engine_cs *engine; |
| 2762 | enum intel_engine_id id; | 2849 | enum intel_engine_id id; |
| @@ -2778,6 +2865,17 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | |||
| 2778 | } | 2865 | } |
| 2779 | } | 2866 | } |
| 2780 | 2867 | ||
| 2868 | void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | ||
| 2869 | { | ||
| 2870 | struct intel_engine_cs *engine; | ||
| 2871 | enum intel_engine_id id; | ||
| 2872 | |||
| 2873 | lockdep_assert_held(&dev_priv->drm.struct_mutex); | ||
| 2874 | |||
| 2875 | for_each_engine(engine, dev_priv, id) | ||
| 2876 | tasklet_enable(&engine->irq_tasklet); | ||
| 2877 | } | ||
| 2878 | |||
| 2781 | static void nop_submit_request(struct drm_i915_gem_request *request) | 2879 | static void nop_submit_request(struct drm_i915_gem_request *request) |
| 2782 | { | 2880 | { |
| 2783 | dma_fence_set_error(&request->fence, -EIO); | 2881 | dma_fence_set_error(&request->fence, -EIO); |
| @@ -3029,6 +3127,16 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) | |||
| 3029 | args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); | 3127 | args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start)); |
| 3030 | if (args->timeout_ns < 0) | 3128 | if (args->timeout_ns < 0) |
| 3031 | args->timeout_ns = 0; | 3129 | args->timeout_ns = 0; |
| 3130 | |||
| 3131 | /* | ||
| 3132 | * Apparently ktime isn't accurate enough and occasionally has a | ||
| 3133 | * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch | ||
| 3134 | * things up to make the test happy. We allow up to 1 jiffy. | ||
| 3135 | * | ||
| 3136 | * This is a regression from the timespec->ktime conversion. | ||
| 3137 | */ | ||
| 3138 | if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns)) | ||
| 3139 | args->timeout_ns = 0; | ||
| 3032 | } | 3140 | } |
| 3033 | 3141 | ||
| 3034 | i915_gem_object_put(obj); | 3142 | i915_gem_object_put(obj); |
| @@ -3974,8 +4082,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, | |||
| 3974 | static const struct drm_i915_gem_object_ops i915_gem_object_ops = { | 4082 | static const struct drm_i915_gem_object_ops i915_gem_object_ops = { |
| 3975 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | | 4083 | .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | |
| 3976 | I915_GEM_OBJECT_IS_SHRINKABLE, | 4084 | I915_GEM_OBJECT_IS_SHRINKABLE, |
| 4085 | |||
| 3977 | .get_pages = i915_gem_object_get_pages_gtt, | 4086 | .get_pages = i915_gem_object_get_pages_gtt, |
| 3978 | .put_pages = i915_gem_object_put_pages_gtt, | 4087 | .put_pages = i915_gem_object_put_pages_gtt, |
| 4088 | |||
| 4089 | .pwrite = i915_gem_object_pwrite_gtt, | ||
| 3979 | }; | 4090 | }; |
| 3980 | 4091 | ||
| 3981 | struct drm_i915_gem_object * | 4092 | struct drm_i915_gem_object * |
| @@ -4583,8 +4694,6 @@ i915_gem_load_init(struct drm_i915_private *dev_priv) | |||
| 4583 | init_waitqueue_head(&dev_priv->gpu_error.wait_queue); | 4694 | init_waitqueue_head(&dev_priv->gpu_error.wait_queue); |
| 4584 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); | 4695 | init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
| 4585 | 4696 | ||
| 4586 | dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; | ||
| 4587 | |||
| 4588 | init_waitqueue_head(&dev_priv->pending_flip_queue); | 4697 | init_waitqueue_head(&dev_priv->pending_flip_queue); |
| 4589 | 4698 | ||
| 4590 | dev_priv->mm.interruptible = true; | 4699 | dev_priv->mm.interruptible = true; |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 17f90c618208..e2d83b6d376b 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
| @@ -311,7 +311,6 @@ __create_hw_context(struct drm_i915_private *dev_priv, | |||
| 311 | ctx->ring_size = 4 * PAGE_SIZE; | 311 | ctx->ring_size = 4 * PAGE_SIZE; |
| 312 | ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << | 312 | ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) << |
| 313 | GEN8_CTX_ADDRESSING_MODE_SHIFT; | 313 | GEN8_CTX_ADDRESSING_MODE_SHIFT; |
| 314 | ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier); | ||
| 315 | 314 | ||
| 316 | /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not | 315 | /* GuC requires the ring to be placed above GUC_WOPCM_TOP. If GuC is not |
| 317 | * present or not in use we still need a small bias as ring wraparound | 316 | * present or not in use we still need a small bias as ring wraparound |
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h index 0ac750b90f3d..e9c008fe14b1 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.h +++ b/drivers/gpu/drm/i915/i915_gem_context.h | |||
| @@ -160,9 +160,6 @@ struct i915_gem_context { | |||
| 160 | /** desc_template: invariant fields for the HW context descriptor */ | 160 | /** desc_template: invariant fields for the HW context descriptor */ |
| 161 | u32 desc_template; | 161 | u32 desc_template; |
| 162 | 162 | ||
| 163 | /** status_notifier: list of callbacks for context-switch changes */ | ||
| 164 | struct atomic_notifier_head status_notifier; | ||
| 165 | |||
| 166 | /** guilty_count: How many times this context has caused a GPU hang. */ | 163 | /** guilty_count: How many times this context has caused a GPU hang. */ |
| 167 | unsigned int guilty_count; | 164 | unsigned int guilty_count; |
| 168 | /** | 165 | /** |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index c181b1bb3d2c..3be2503aa042 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
| @@ -293,12 +293,12 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, | |||
| 293 | * those as well to make room for our guard pages. | 293 | * those as well to make room for our guard pages. |
| 294 | */ | 294 | */ |
| 295 | if (check_color) { | 295 | if (check_color) { |
| 296 | if (vma->node.start + vma->node.size == node->start) { | 296 | if (node->start + node->size == target->start) { |
| 297 | if (vma->node.color == node->color) | 297 | if (node->color == target->color) |
| 298 | continue; | 298 | continue; |
| 299 | } | 299 | } |
| 300 | if (vma->node.start == node->start + node->size) { | 300 | if (node->start == target->start + target->size) { |
| 301 | if (vma->node.color == node->color) | 301 | if (node->color == target->color) |
| 302 | continue; | 302 | continue; |
| 303 | } | 303 | } |
| 304 | } | 304 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d02cfaefe1c8..30e0675fd7da 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -1408,10 +1408,7 @@ execbuf_submit(struct i915_execbuffer_params *params, | |||
| 1408 | struct drm_i915_gem_execbuffer2 *args, | 1408 | struct drm_i915_gem_execbuffer2 *args, |
| 1409 | struct list_head *vmas) | 1409 | struct list_head *vmas) |
| 1410 | { | 1410 | { |
| 1411 | struct drm_i915_private *dev_priv = params->request->i915; | ||
| 1412 | u64 exec_start, exec_len; | 1411 | u64 exec_start, exec_len; |
| 1413 | int instp_mode; | ||
| 1414 | u32 instp_mask; | ||
| 1415 | int ret; | 1412 | int ret; |
| 1416 | 1413 | ||
| 1417 | ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); | 1414 | ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); |
| @@ -1422,56 +1419,11 @@ execbuf_submit(struct i915_execbuffer_params *params, | |||
| 1422 | if (ret) | 1419 | if (ret) |
| 1423 | return ret; | 1420 | return ret; |
| 1424 | 1421 | ||
| 1425 | instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; | 1422 | if (args->flags & I915_EXEC_CONSTANTS_MASK) { |
| 1426 | instp_mask = I915_EXEC_CONSTANTS_MASK; | 1423 | DRM_DEBUG("I915_EXEC_CONSTANTS_* unsupported\n"); |
| 1427 | switch (instp_mode) { | ||
| 1428 | case I915_EXEC_CONSTANTS_REL_GENERAL: | ||
| 1429 | case I915_EXEC_CONSTANTS_ABSOLUTE: | ||
| 1430 | case I915_EXEC_CONSTANTS_REL_SURFACE: | ||
| 1431 | if (instp_mode != 0 && params->engine->id != RCS) { | ||
| 1432 | DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); | ||
| 1433 | return -EINVAL; | ||
| 1434 | } | ||
| 1435 | |||
| 1436 | if (instp_mode != dev_priv->relative_constants_mode) { | ||
| 1437 | if (INTEL_INFO(dev_priv)->gen < 4) { | ||
| 1438 | DRM_DEBUG("no rel constants on pre-gen4\n"); | ||
| 1439 | return -EINVAL; | ||
| 1440 | } | ||
| 1441 | |||
| 1442 | if (INTEL_INFO(dev_priv)->gen > 5 && | ||
| 1443 | instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { | ||
| 1444 | DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); | ||
| 1445 | return -EINVAL; | ||
| 1446 | } | ||
| 1447 | |||
| 1448 | /* The HW changed the meaning on this bit on gen6 */ | ||
| 1449 | if (INTEL_INFO(dev_priv)->gen >= 6) | ||
| 1450 | instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE; | ||
| 1451 | } | ||
| 1452 | break; | ||
| 1453 | default: | ||
| 1454 | DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); | ||
| 1455 | return -EINVAL; | 1424 | return -EINVAL; |
| 1456 | } | 1425 | } |
| 1457 | 1426 | ||
| 1458 | if (params->engine->id == RCS && | ||
| 1459 | instp_mode != dev_priv->relative_constants_mode) { | ||
| 1460 | struct intel_ring *ring = params->request->ring; | ||
| 1461 | |||
| 1462 | ret = intel_ring_begin(params->request, 4); | ||
| 1463 | if (ret) | ||
| 1464 | return ret; | ||
| 1465 | |||
| 1466 | intel_ring_emit(ring, MI_NOOP); | ||
| 1467 | intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); | ||
| 1468 | intel_ring_emit_reg(ring, INSTPM); | ||
| 1469 | intel_ring_emit(ring, instp_mask << 16 | instp_mode); | ||
| 1470 | intel_ring_advance(ring); | ||
| 1471 | |||
| 1472 | dev_priv->relative_constants_mode = instp_mode; | ||
| 1473 | } | ||
| 1474 | |||
| 1475 | if (args->flags & I915_EXEC_GEN7_SOL_RESET) { | 1427 | if (args->flags & I915_EXEC_GEN7_SOL_RESET) { |
| 1476 | ret = i915_reset_gen7_sol_offsets(params->request); | 1428 | ret = i915_reset_gen7_sol_offsets(params->request); |
| 1477 | if (ret) | 1429 | if (ret) |
diff --git a/drivers/gpu/drm/i915/i915_gem_object.h b/drivers/gpu/drm/i915/i915_gem_object.h index bf90b07163d1..76b80a0be797 100644 --- a/drivers/gpu/drm/i915/i915_gem_object.h +++ b/drivers/gpu/drm/i915/i915_gem_object.h | |||
| @@ -54,6 +54,9 @@ struct drm_i915_gem_object_ops { | |||
| 54 | struct sg_table *(*get_pages)(struct drm_i915_gem_object *); | 54 | struct sg_table *(*get_pages)(struct drm_i915_gem_object *); |
| 55 | void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); | 55 | void (*put_pages)(struct drm_i915_gem_object *, struct sg_table *); |
| 56 | 56 | ||
| 57 | int (*pwrite)(struct drm_i915_gem_object *, | ||
| 58 | const struct drm_i915_gem_pwrite *); | ||
| 59 | |||
| 57 | int (*dmabuf_export)(struct drm_i915_gem_object *); | 60 | int (*dmabuf_export)(struct drm_i915_gem_object *); |
| 58 | void (*release)(struct drm_i915_gem_object *); | 61 | void (*release)(struct drm_i915_gem_object *); |
| 59 | }; | 62 | }; |
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 401006b4c6a3..d5d2b4c6ed38 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c | |||
| @@ -263,7 +263,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) | |||
| 263 | I915_SHRINK_BOUND | | 263 | I915_SHRINK_BOUND | |
| 264 | I915_SHRINK_UNBOUND | | 264 | I915_SHRINK_UNBOUND | |
| 265 | I915_SHRINK_ACTIVE); | 265 | I915_SHRINK_ACTIVE); |
| 266 | rcu_barrier(); /* wait until our RCU delayed slab frees are completed */ | 266 | synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */ |
| 267 | 267 | ||
| 268 | return freed; | 268 | return freed; |
| 269 | } | 269 | } |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index e6ffef2f707a..b6c886ac901b 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
| @@ -1046,68 +1046,51 @@ static void vlv_c0_read(struct drm_i915_private *dev_priv, | |||
| 1046 | ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); | 1046 | ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); |
| 1047 | } | 1047 | } |
| 1048 | 1048 | ||
| 1049 | static bool vlv_c0_above(struct drm_i915_private *dev_priv, | ||
| 1050 | const struct intel_rps_ei *old, | ||
| 1051 | const struct intel_rps_ei *now, | ||
| 1052 | int threshold) | ||
| 1053 | { | ||
| 1054 | u64 time, c0; | ||
| 1055 | unsigned int mul = 100; | ||
| 1056 | |||
| 1057 | if (old->cz_clock == 0) | ||
| 1058 | return false; | ||
| 1059 | |||
| 1060 | if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) | ||
| 1061 | mul <<= 8; | ||
| 1062 | |||
| 1063 | time = now->cz_clock - old->cz_clock; | ||
| 1064 | time *= threshold * dev_priv->czclk_freq; | ||
| 1065 | |||
| 1066 | /* Workload can be split between render + media, e.g. SwapBuffers | ||
| 1067 | * being blitted in X after being rendered in mesa. To account for | ||
| 1068 | * this we need to combine both engines into our activity counter. | ||
| 1069 | */ | ||
| 1070 | c0 = now->render_c0 - old->render_c0; | ||
| 1071 | c0 += now->media_c0 - old->media_c0; | ||
| 1072 | c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; | ||
| 1073 | |||
| 1074 | return c0 >= time; | ||
| 1075 | } | ||
| 1076 | |||
| 1077 | void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) | 1049 | void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) |
| 1078 | { | 1050 | { |
| 1079 | vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); | 1051 | memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei)); |
| 1080 | dev_priv->rps.up_ei = dev_priv->rps.down_ei; | ||
| 1081 | } | 1052 | } |
| 1082 | 1053 | ||
| 1083 | static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) | 1054 | static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) |
| 1084 | { | 1055 | { |
| 1056 | const struct intel_rps_ei *prev = &dev_priv->rps.ei; | ||
| 1085 | struct intel_rps_ei now; | 1057 | struct intel_rps_ei now; |
| 1086 | u32 events = 0; | 1058 | u32 events = 0; |
| 1087 | 1059 | ||
| 1088 | if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) | 1060 | if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) |
| 1089 | return 0; | 1061 | return 0; |
| 1090 | 1062 | ||
| 1091 | vlv_c0_read(dev_priv, &now); | 1063 | vlv_c0_read(dev_priv, &now); |
| 1092 | if (now.cz_clock == 0) | 1064 | if (now.cz_clock == 0) |
| 1093 | return 0; | 1065 | return 0; |
| 1094 | 1066 | ||
| 1095 | if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { | 1067 | if (prev->cz_clock) { |
| 1096 | if (!vlv_c0_above(dev_priv, | 1068 | u64 time, c0; |
| 1097 | &dev_priv->rps.down_ei, &now, | 1069 | unsigned int mul; |
| 1098 | dev_priv->rps.down_threshold)) | 1070 | |
| 1099 | events |= GEN6_PM_RP_DOWN_THRESHOLD; | 1071 | mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */ |
| 1100 | dev_priv->rps.down_ei = now; | 1072 | if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) |
| 1101 | } | 1073 | mul <<= 8; |
| 1102 | 1074 | ||
| 1103 | if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { | 1075 | time = now.cz_clock - prev->cz_clock; |
| 1104 | if (vlv_c0_above(dev_priv, | 1076 | time *= dev_priv->czclk_freq; |
| 1105 | &dev_priv->rps.up_ei, &now, | 1077 | |
| 1106 | dev_priv->rps.up_threshold)) | 1078 | /* Workload can be split between render + media, |
| 1107 | events |= GEN6_PM_RP_UP_THRESHOLD; | 1079 | * e.g. SwapBuffers being blitted in X after being rendered in |
| 1108 | dev_priv->rps.up_ei = now; | 1080 | * mesa. To account for this we need to combine both engines |
| 1081 | * into our activity counter. | ||
| 1082 | */ | ||
| 1083 | c0 = now.render_c0 - prev->render_c0; | ||
| 1084 | c0 += now.media_c0 - prev->media_c0; | ||
| 1085 | c0 *= mul; | ||
| 1086 | |||
| 1087 | if (c0 > time * dev_priv->rps.up_threshold) | ||
| 1088 | events = GEN6_PM_RP_UP_THRESHOLD; | ||
| 1089 | else if (c0 < time * dev_priv->rps.down_threshold) | ||
| 1090 | events = GEN6_PM_RP_DOWN_THRESHOLD; | ||
| 1109 | } | 1091 | } |
| 1110 | 1092 | ||
| 1093 | dev_priv->rps.ei = now; | ||
| 1111 | return events; | 1094 | return events; |
| 1112 | } | 1095 | } |
| 1113 | 1096 | ||
| @@ -4228,7 +4211,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
| 4228 | /* Let's track the enabled rps events */ | 4211 | /* Let's track the enabled rps events */ |
| 4229 | if (IS_VALLEYVIEW(dev_priv)) | 4212 | if (IS_VALLEYVIEW(dev_priv)) |
| 4230 | /* WaGsvRC0ResidencyMethod:vlv */ | 4213 | /* WaGsvRC0ResidencyMethod:vlv */ |
| 4231 | dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; | 4214 | dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; |
| 4232 | else | 4215 | else |
| 4233 | dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; | 4216 | dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; |
| 4234 | 4217 | ||
| @@ -4266,6 +4249,16 @@ void intel_irq_init(struct drm_i915_private *dev_priv) | |||
| 4266 | if (!IS_GEN2(dev_priv)) | 4249 | if (!IS_GEN2(dev_priv)) |
| 4267 | dev->vblank_disable_immediate = true; | 4250 | dev->vblank_disable_immediate = true; |
| 4268 | 4251 | ||
| 4252 | /* Most platforms treat the display irq block as an always-on | ||
| 4253 | * power domain. vlv/chv can disable it at runtime and need | ||
| 4254 | * special care to avoid writing any of the display block registers | ||
| 4255 | * outside of the power domain. We defer setting up the display irqs | ||
| 4256 | * in this case to the runtime pm. | ||
| 4257 | */ | ||
| 4258 | dev_priv->display_irqs_enabled = true; | ||
| 4259 | if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) | ||
| 4260 | dev_priv->display_irqs_enabled = false; | ||
| 4261 | |||
| 4269 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | 4262 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; |
| 4270 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; | 4263 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
| 4271 | 4264 | ||
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 155906e84812..df20e9bc1c0f 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c | |||
| @@ -512,10 +512,36 @@ err_unpin: | |||
| 512 | return ret; | 512 | return ret; |
| 513 | } | 513 | } |
| 514 | 514 | ||
| 515 | static void | ||
| 516 | i915_vma_remove(struct i915_vma *vma) | ||
| 517 | { | ||
| 518 | struct drm_i915_gem_object *obj = vma->obj; | ||
| 519 | |||
| 520 | GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); | ||
| 521 | GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); | ||
| 522 | |||
| 523 | drm_mm_remove_node(&vma->node); | ||
| 524 | list_move_tail(&vma->vm_link, &vma->vm->unbound_list); | ||
| 525 | |||
| 526 | /* Since the unbound list is global, only move to that list if | ||
| 527 | * no more VMAs exist. | ||
| 528 | */ | ||
| 529 | if (--obj->bind_count == 0) | ||
| 530 | list_move_tail(&obj->global_link, | ||
| 531 | &to_i915(obj->base.dev)->mm.unbound_list); | ||
| 532 | |||
| 533 | /* And finally now the object is completely decoupled from this vma, | ||
| 534 | * we can drop its hold on the backing storage and allow it to be | ||
| 535 | * reaped by the shrinker. | ||
| 536 | */ | ||
| 537 | i915_gem_object_unpin_pages(obj); | ||
| 538 | GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); | ||
| 539 | } | ||
| 540 | |||
| 515 | int __i915_vma_do_pin(struct i915_vma *vma, | 541 | int __i915_vma_do_pin(struct i915_vma *vma, |
| 516 | u64 size, u64 alignment, u64 flags) | 542 | u64 size, u64 alignment, u64 flags) |
| 517 | { | 543 | { |
| 518 | unsigned int bound = vma->flags; | 544 | const unsigned int bound = vma->flags; |
| 519 | int ret; | 545 | int ret; |
| 520 | 546 | ||
| 521 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); | 547 | lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); |
| @@ -524,18 +550,18 @@ int __i915_vma_do_pin(struct i915_vma *vma, | |||
| 524 | 550 | ||
| 525 | if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { | 551 | if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { |
| 526 | ret = -EBUSY; | 552 | ret = -EBUSY; |
| 527 | goto err; | 553 | goto err_unpin; |
| 528 | } | 554 | } |
| 529 | 555 | ||
| 530 | if ((bound & I915_VMA_BIND_MASK) == 0) { | 556 | if ((bound & I915_VMA_BIND_MASK) == 0) { |
| 531 | ret = i915_vma_insert(vma, size, alignment, flags); | 557 | ret = i915_vma_insert(vma, size, alignment, flags); |
| 532 | if (ret) | 558 | if (ret) |
| 533 | goto err; | 559 | goto err_unpin; |
| 534 | } | 560 | } |
| 535 | 561 | ||
| 536 | ret = i915_vma_bind(vma, vma->obj->cache_level, flags); | 562 | ret = i915_vma_bind(vma, vma->obj->cache_level, flags); |
| 537 | if (ret) | 563 | if (ret) |
| 538 | goto err; | 564 | goto err_remove; |
| 539 | 565 | ||
| 540 | if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) | 566 | if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) |
| 541 | __i915_vma_set_map_and_fenceable(vma); | 567 | __i915_vma_set_map_and_fenceable(vma); |
| @@ -544,7 +570,12 @@ int __i915_vma_do_pin(struct i915_vma *vma, | |||
| 544 | GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); | 570 | GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); |
| 545 | return 0; | 571 | return 0; |
| 546 | 572 | ||
| 547 | err: | 573 | err_remove: |
| 574 | if ((bound & I915_VMA_BIND_MASK) == 0) { | ||
| 575 | GEM_BUG_ON(vma->pages); | ||
| 576 | i915_vma_remove(vma); | ||
| 577 | } | ||
| 578 | err_unpin: | ||
| 548 | __i915_vma_unpin(vma); | 579 | __i915_vma_unpin(vma); |
| 549 | return ret; | 580 | return ret; |
| 550 | } | 581 | } |
| @@ -657,9 +688,6 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
| 657 | } | 688 | } |
| 658 | vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); | 689 | vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); |
| 659 | 690 | ||
| 660 | drm_mm_remove_node(&vma->node); | ||
| 661 | list_move_tail(&vma->vm_link, &vma->vm->unbound_list); | ||
| 662 | |||
| 663 | if (vma->pages != obj->mm.pages) { | 691 | if (vma->pages != obj->mm.pages) { |
| 664 | GEM_BUG_ON(!vma->pages); | 692 | GEM_BUG_ON(!vma->pages); |
| 665 | sg_free_table(vma->pages); | 693 | sg_free_table(vma->pages); |
| @@ -667,18 +695,7 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
| 667 | } | 695 | } |
| 668 | vma->pages = NULL; | 696 | vma->pages = NULL; |
| 669 | 697 | ||
| 670 | /* Since the unbound list is global, only move to that list if | 698 | i915_vma_remove(vma); |
| 671 | * no more VMAs exist. */ | ||
| 672 | if (--obj->bind_count == 0) | ||
| 673 | list_move_tail(&obj->global_link, | ||
| 674 | &to_i915(obj->base.dev)->mm.unbound_list); | ||
| 675 | |||
| 676 | /* And finally now the object is completely decoupled from this vma, | ||
| 677 | * we can drop its hold on the backing storage and allow it to be | ||
| 678 | * reaped by the shrinker. | ||
| 679 | */ | ||
| 680 | i915_gem_object_unpin_pages(obj); | ||
| 681 | GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count) < obj->bind_count); | ||
| 682 | 699 | ||
| 683 | destroy: | 700 | destroy: |
| 684 | if (unlikely(i915_vma_is_closed(vma))) | 701 | if (unlikely(i915_vma_is_closed(vma))) |
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 0085bc745f6a..de219b71fb76 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c | |||
| @@ -35,7 +35,6 @@ | |||
| 35 | */ | 35 | */ |
| 36 | 36 | ||
| 37 | #define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin" | 37 | #define I915_CSR_GLK "i915/glk_dmc_ver1_01.bin" |
| 38 | MODULE_FIRMWARE(I915_CSR_GLK); | ||
| 39 | #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) | 38 | #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 1) |
| 40 | 39 | ||
| 41 | #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" | 40 | #define I915_CSR_KBL "i915/kbl_dmc_ver1_01.bin" |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 01341670738f..ed1f4f272b4f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -3669,10 +3669,6 @@ static void intel_update_pipe_config(struct intel_crtc *crtc, | |||
| 3669 | /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ | 3669 | /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ |
| 3670 | crtc->base.mode = crtc->base.state->mode; | 3670 | crtc->base.mode = crtc->base.state->mode; |
| 3671 | 3671 | ||
| 3672 | DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n", | ||
| 3673 | old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h, | ||
| 3674 | pipe_config->pipe_src_w, pipe_config->pipe_src_h); | ||
| 3675 | |||
| 3676 | /* | 3672 | /* |
| 3677 | * Update pipe size and adjust fitter if needed: the reason for this is | 3673 | * Update pipe size and adjust fitter if needed: the reason for this is |
| 3678 | * that in compute_mode_changes we check the native mode (not the pfit | 3674 | * that in compute_mode_changes we check the native mode (not the pfit |
| @@ -4796,23 +4792,17 @@ static void skylake_pfit_enable(struct intel_crtc *crtc) | |||
| 4796 | struct intel_crtc_scaler_state *scaler_state = | 4792 | struct intel_crtc_scaler_state *scaler_state = |
| 4797 | &crtc->config->scaler_state; | 4793 | &crtc->config->scaler_state; |
| 4798 | 4794 | ||
| 4799 | DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); | ||
| 4800 | |||
| 4801 | if (crtc->config->pch_pfit.enabled) { | 4795 | if (crtc->config->pch_pfit.enabled) { |
| 4802 | int id; | 4796 | int id; |
| 4803 | 4797 | ||
| 4804 | if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { | 4798 | if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) |
| 4805 | DRM_ERROR("Requesting pfit without getting a scaler first\n"); | ||
| 4806 | return; | 4799 | return; |
| 4807 | } | ||
| 4808 | 4800 | ||
| 4809 | id = scaler_state->scaler_id; | 4801 | id = scaler_state->scaler_id; |
| 4810 | I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | | 4802 | I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | |
| 4811 | PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); | 4803 | PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); |
| 4812 | I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); | 4804 | I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); |
| 4813 | I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); | 4805 | I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); |
| 4814 | |||
| 4815 | DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id); | ||
| 4816 | } | 4806 | } |
| 4817 | } | 4807 | } |
| 4818 | 4808 | ||
| @@ -14379,6 +14369,24 @@ static void skl_update_crtcs(struct drm_atomic_state *state, | |||
| 14379 | } while (progress); | 14369 | } while (progress); |
| 14380 | } | 14370 | } |
| 14381 | 14371 | ||
| 14372 | static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) | ||
| 14373 | { | ||
| 14374 | struct intel_atomic_state *state, *next; | ||
| 14375 | struct llist_node *freed; | ||
| 14376 | |||
| 14377 | freed = llist_del_all(&dev_priv->atomic_helper.free_list); | ||
| 14378 | llist_for_each_entry_safe(state, next, freed, freed) | ||
| 14379 | drm_atomic_state_put(&state->base); | ||
| 14380 | } | ||
| 14381 | |||
| 14382 | static void intel_atomic_helper_free_state_worker(struct work_struct *work) | ||
| 14383 | { | ||
| 14384 | struct drm_i915_private *dev_priv = | ||
| 14385 | container_of(work, typeof(*dev_priv), atomic_helper.free_work); | ||
| 14386 | |||
| 14387 | intel_atomic_helper_free_state(dev_priv); | ||
| 14388 | } | ||
| 14389 | |||
| 14382 | static void intel_atomic_commit_tail(struct drm_atomic_state *state) | 14390 | static void intel_atomic_commit_tail(struct drm_atomic_state *state) |
| 14383 | { | 14391 | { |
| 14384 | struct drm_device *dev = state->dev; | 14392 | struct drm_device *dev = state->dev; |
| @@ -14545,6 +14553,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) | |||
| 14545 | * can happen also when the device is completely off. | 14553 | * can happen also when the device is completely off. |
| 14546 | */ | 14554 | */ |
| 14547 | intel_uncore_arm_unclaimed_mmio_detection(dev_priv); | 14555 | intel_uncore_arm_unclaimed_mmio_detection(dev_priv); |
| 14556 | |||
| 14557 | intel_atomic_helper_free_state(dev_priv); | ||
| 14548 | } | 14558 | } |
| 14549 | 14559 | ||
| 14550 | static void intel_atomic_commit_work(struct work_struct *work) | 14560 | static void intel_atomic_commit_work(struct work_struct *work) |
| @@ -14946,17 +14956,19 @@ static void intel_begin_crtc_commit(struct drm_crtc *crtc, | |||
| 14946 | to_intel_atomic_state(old_crtc_state->state); | 14956 | to_intel_atomic_state(old_crtc_state->state); |
| 14947 | bool modeset = needs_modeset(crtc->state); | 14957 | bool modeset = needs_modeset(crtc->state); |
| 14948 | 14958 | ||
| 14959 | if (!modeset && | ||
| 14960 | (intel_cstate->base.color_mgmt_changed || | ||
| 14961 | intel_cstate->update_pipe)) { | ||
| 14962 | intel_color_set_csc(crtc->state); | ||
| 14963 | intel_color_load_luts(crtc->state); | ||
| 14964 | } | ||
| 14965 | |||
| 14949 | /* Perform vblank evasion around commit operation */ | 14966 | /* Perform vblank evasion around commit operation */ |
| 14950 | intel_pipe_update_start(intel_crtc); | 14967 | intel_pipe_update_start(intel_crtc); |
| 14951 | 14968 | ||
| 14952 | if (modeset) | 14969 | if (modeset) |
| 14953 | goto out; | 14970 | goto out; |
| 14954 | 14971 | ||
| 14955 | if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) { | ||
| 14956 | intel_color_set_csc(crtc->state); | ||
| 14957 | intel_color_load_luts(crtc->state); | ||
| 14958 | } | ||
| 14959 | |||
| 14960 | if (intel_cstate->update_pipe) | 14972 | if (intel_cstate->update_pipe) |
| 14961 | intel_update_pipe_config(intel_crtc, old_intel_cstate); | 14973 | intel_update_pipe_config(intel_crtc, old_intel_cstate); |
| 14962 | else if (INTEL_GEN(dev_priv) >= 9) | 14974 | else if (INTEL_GEN(dev_priv) >= 9) |
| @@ -16599,18 +16611,6 @@ fail: | |||
| 16599 | drm_modeset_acquire_fini(&ctx); | 16611 | drm_modeset_acquire_fini(&ctx); |
| 16600 | } | 16612 | } |
| 16601 | 16613 | ||
| 16602 | static void intel_atomic_helper_free_state(struct work_struct *work) | ||
| 16603 | { | ||
| 16604 | struct drm_i915_private *dev_priv = | ||
| 16605 | container_of(work, typeof(*dev_priv), atomic_helper.free_work); | ||
| 16606 | struct intel_atomic_state *state, *next; | ||
| 16607 | struct llist_node *freed; | ||
| 16608 | |||
| 16609 | freed = llist_del_all(&dev_priv->atomic_helper.free_list); | ||
| 16610 | llist_for_each_entry_safe(state, next, freed, freed) | ||
| 16611 | drm_atomic_state_put(&state->base); | ||
| 16612 | } | ||
| 16613 | |||
| 16614 | int intel_modeset_init(struct drm_device *dev) | 16614 | int intel_modeset_init(struct drm_device *dev) |
| 16615 | { | 16615 | { |
| 16616 | struct drm_i915_private *dev_priv = to_i915(dev); | 16616 | struct drm_i915_private *dev_priv = to_i915(dev); |
| @@ -16631,7 +16631,7 @@ int intel_modeset_init(struct drm_device *dev) | |||
| 16631 | dev->mode_config.funcs = &intel_mode_funcs; | 16631 | dev->mode_config.funcs = &intel_mode_funcs; |
| 16632 | 16632 | ||
| 16633 | INIT_WORK(&dev_priv->atomic_helper.free_work, | 16633 | INIT_WORK(&dev_priv->atomic_helper.free_work, |
| 16634 | intel_atomic_helper_free_state); | 16634 | intel_atomic_helper_free_state_worker); |
| 16635 | 16635 | ||
| 16636 | intel_init_quirks(dev); | 16636 | intel_init_quirks(dev); |
| 16637 | 16637 | ||
| @@ -16696,12 +16696,11 @@ int intel_modeset_init(struct drm_device *dev) | |||
| 16696 | } | 16696 | } |
| 16697 | } | 16697 | } |
| 16698 | 16698 | ||
| 16699 | intel_update_czclk(dev_priv); | ||
| 16700 | intel_update_cdclk(dev_priv); | ||
| 16701 | dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; | ||
| 16702 | |||
| 16703 | intel_shared_dpll_init(dev); | 16699 | intel_shared_dpll_init(dev); |
| 16704 | 16700 | ||
| 16701 | intel_update_czclk(dev_priv); | ||
| 16702 | intel_modeset_init_hw(dev); | ||
| 16703 | |||
| 16705 | if (dev_priv->max_cdclk_freq == 0) | 16704 | if (dev_priv->max_cdclk_freq == 0) |
| 16706 | intel_update_max_cdclk(dev_priv); | 16705 | intel_update_max_cdclk(dev_priv); |
| 16707 | 16706 | ||
| @@ -17258,8 +17257,6 @@ void intel_modeset_gem_init(struct drm_device *dev) | |||
| 17258 | 17257 | ||
| 17259 | intel_init_gt_powersave(dev_priv); | 17258 | intel_init_gt_powersave(dev_priv); |
| 17260 | 17259 | ||
| 17261 | intel_modeset_init_hw(dev); | ||
| 17262 | |||
| 17263 | intel_setup_overlay(dev_priv); | 17260 | intel_setup_overlay(dev_priv); |
| 17264 | } | 17261 | } |
| 17265 | 17262 | ||
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 371acf109e34..ab1be5c80ea5 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
| @@ -105,6 +105,8 @@ intel_engine_setup(struct drm_i915_private *dev_priv, | |||
| 105 | /* Nothing to do here, execute in order of dependencies */ | 105 | /* Nothing to do here, execute in order of dependencies */ |
| 106 | engine->schedule = NULL; | 106 | engine->schedule = NULL; |
| 107 | 107 | ||
| 108 | ATOMIC_INIT_NOTIFIER_HEAD(&engine->context_status_notifier); | ||
| 109 | |||
| 108 | dev_priv->engine[id] = engine; | 110 | dev_priv->engine[id] = engine; |
| 109 | return 0; | 111 | return 0; |
| 110 | } | 112 | } |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 1b8ba2e77539..2d449fb5d1d2 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
| @@ -357,14 +357,13 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
| 357 | bool *enabled, int width, int height) | 357 | bool *enabled, int width, int height) |
| 358 | { | 358 | { |
| 359 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); | 359 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); |
| 360 | unsigned long conn_configured, mask; | 360 | unsigned long conn_configured, conn_seq, mask; |
| 361 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); | 361 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); |
| 362 | int i, j; | 362 | int i, j; |
| 363 | bool *save_enabled; | 363 | bool *save_enabled; |
| 364 | bool fallback = true; | 364 | bool fallback = true; |
| 365 | int num_connectors_enabled = 0; | 365 | int num_connectors_enabled = 0; |
| 366 | int num_connectors_detected = 0; | 366 | int num_connectors_detected = 0; |
| 367 | int pass = 0; | ||
| 368 | 367 | ||
| 369 | save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); | 368 | save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL); |
| 370 | if (!save_enabled) | 369 | if (!save_enabled) |
| @@ -374,6 +373,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
| 374 | mask = BIT(count) - 1; | 373 | mask = BIT(count) - 1; |
| 375 | conn_configured = 0; | 374 | conn_configured = 0; |
| 376 | retry: | 375 | retry: |
| 376 | conn_seq = conn_configured; | ||
| 377 | for (i = 0; i < count; i++) { | 377 | for (i = 0; i < count; i++) { |
| 378 | struct drm_fb_helper_connector *fb_conn; | 378 | struct drm_fb_helper_connector *fb_conn; |
| 379 | struct drm_connector *connector; | 379 | struct drm_connector *connector; |
| @@ -387,7 +387,7 @@ retry: | |||
| 387 | if (conn_configured & BIT(i)) | 387 | if (conn_configured & BIT(i)) |
| 388 | continue; | 388 | continue; |
| 389 | 389 | ||
| 390 | if (pass == 0 && !connector->has_tile) | 390 | if (conn_seq == 0 && !connector->has_tile) |
| 391 | continue; | 391 | continue; |
| 392 | 392 | ||
| 393 | if (connector->status == connector_status_connected) | 393 | if (connector->status == connector_status_connected) |
| @@ -498,10 +498,8 @@ retry: | |||
| 498 | conn_configured |= BIT(i); | 498 | conn_configured |= BIT(i); |
| 499 | } | 499 | } |
| 500 | 500 | ||
| 501 | if ((conn_configured & mask) != mask) { | 501 | if ((conn_configured & mask) != mask && conn_configured != conn_seq) |
| 502 | pass++; | ||
| 503 | goto retry; | 502 | goto retry; |
| 504 | } | ||
| 505 | 503 | ||
| 506 | /* | 504 | /* |
| 507 | * If the BIOS didn't enable everything it could, fall back to have the | 505 | * If the BIOS didn't enable everything it could, fall back to have the |
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c index d23c0fcff751..8c04eca84351 100644 --- a/drivers/gpu/drm/i915/intel_gvt.c +++ b/drivers/gpu/drm/i915/intel_gvt.c | |||
| @@ -77,6 +77,11 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) | |||
| 77 | goto bail; | 77 | goto bail; |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | if (!i915.enable_execlists) { | ||
| 81 | DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n"); | ||
| 82 | goto bail; | ||
| 83 | } | ||
| 84 | |||
| 80 | /* | 85 | /* |
| 81 | * We're not in host or fail to find a MPT module, disable GVT-g | 86 | * We're not in host or fail to find a MPT module, disable GVT-g |
| 82 | */ | 87 | */ |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ebae2bd83918..24b2fa5b6282 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
| @@ -1298,16 +1298,34 @@ intel_hdmi_mode_valid(struct drm_connector *connector, | |||
| 1298 | 1298 | ||
| 1299 | static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) | 1299 | static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) |
| 1300 | { | 1300 | { |
| 1301 | struct drm_device *dev = crtc_state->base.crtc->dev; | 1301 | struct drm_i915_private *dev_priv = |
| 1302 | to_i915(crtc_state->base.crtc->dev); | ||
| 1303 | struct drm_atomic_state *state = crtc_state->base.state; | ||
| 1304 | struct drm_connector_state *connector_state; | ||
| 1305 | struct drm_connector *connector; | ||
| 1306 | int i; | ||
| 1302 | 1307 | ||
| 1303 | if (HAS_GMCH_DISPLAY(to_i915(dev))) | 1308 | if (HAS_GMCH_DISPLAY(dev_priv)) |
| 1304 | return false; | 1309 | return false; |
| 1305 | 1310 | ||
| 1306 | /* | 1311 | /* |
| 1307 | * HDMI 12bpc affects the clocks, so it's only possible | 1312 | * HDMI 12bpc affects the clocks, so it's only possible |
| 1308 | * when not cloning with other encoder types. | 1313 | * when not cloning with other encoder types. |
| 1309 | */ | 1314 | */ |
| 1310 | return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI; | 1315 | if (crtc_state->output_types != 1 << INTEL_OUTPUT_HDMI) |
| 1316 | return false; | ||
| 1317 | |||
| 1318 | for_each_connector_in_state(state, connector, connector_state, i) { | ||
| 1319 | const struct drm_display_info *info = &connector->display_info; | ||
| 1320 | |||
| 1321 | if (connector_state->crtc != crtc_state->base.crtc) | ||
| 1322 | continue; | ||
| 1323 | |||
| 1324 | if ((info->edid_hdmi_dc_modes & DRM_EDID_HDMI_DC_36) == 0) | ||
| 1325 | return false; | ||
| 1326 | } | ||
| 1327 | |||
| 1328 | return true; | ||
| 1311 | } | 1329 | } |
| 1312 | 1330 | ||
| 1313 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, | 1331 | bool intel_hdmi_compute_config(struct intel_encoder *encoder, |
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index b62e3f8ad415..54208bef7a83 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c | |||
| @@ -219,7 +219,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) | |||
| 219 | } | 219 | } |
| 220 | } | 220 | } |
| 221 | } | 221 | } |
| 222 | if (dev_priv->display.hpd_irq_setup) | 222 | if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) |
| 223 | dev_priv->display.hpd_irq_setup(dev_priv); | 223 | dev_priv->display.hpd_irq_setup(dev_priv); |
| 224 | spin_unlock_irq(&dev_priv->irq_lock); | 224 | spin_unlock_irq(&dev_priv->irq_lock); |
| 225 | 225 | ||
| @@ -425,7 +425,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, | |||
| 425 | } | 425 | } |
| 426 | } | 426 | } |
| 427 | 427 | ||
| 428 | if (storm_detected) | 428 | if (storm_detected && dev_priv->display_irqs_enabled) |
| 429 | dev_priv->display.hpd_irq_setup(dev_priv); | 429 | dev_priv->display.hpd_irq_setup(dev_priv); |
| 430 | spin_unlock(&dev_priv->irq_lock); | 430 | spin_unlock(&dev_priv->irq_lock); |
| 431 | 431 | ||
| @@ -471,10 +471,12 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) | |||
| 471 | * Interrupt setup is already guaranteed to be single-threaded, this is | 471 | * Interrupt setup is already guaranteed to be single-threaded, this is |
| 472 | * just to make the assert_spin_locked checks happy. | 472 | * just to make the assert_spin_locked checks happy. |
| 473 | */ | 473 | */ |
| 474 | spin_lock_irq(&dev_priv->irq_lock); | 474 | if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup) { |
| 475 | if (dev_priv->display.hpd_irq_setup) | 475 | spin_lock_irq(&dev_priv->irq_lock); |
| 476 | dev_priv->display.hpd_irq_setup(dev_priv); | 476 | if (dev_priv->display_irqs_enabled) |
| 477 | spin_unlock_irq(&dev_priv->irq_lock); | 477 | dev_priv->display.hpd_irq_setup(dev_priv); |
| 478 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 479 | } | ||
| 478 | } | 480 | } |
| 479 | 481 | ||
| 480 | static void i915_hpd_poll_init_work(struct work_struct *work) | 482 | static void i915_hpd_poll_init_work(struct work_struct *work) |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index ebf8023d21e6..471af3b480ad 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
| @@ -345,7 +345,8 @@ execlists_context_status_change(struct drm_i915_gem_request *rq, | |||
| 345 | if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) | 345 | if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) |
| 346 | return; | 346 | return; |
| 347 | 347 | ||
| 348 | atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq); | 348 | atomic_notifier_call_chain(&rq->engine->context_status_notifier, |
| 349 | status, rq); | ||
| 349 | } | 350 | } |
| 350 | 351 | ||
| 351 | static void | 352 | static void |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 249623d45be0..6a29784d2b41 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -4891,6 +4891,12 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) | |||
| 4891 | break; | 4891 | break; |
| 4892 | } | 4892 | } |
| 4893 | 4893 | ||
| 4894 | /* When byt can survive without system hang with dynamic | ||
| 4895 | * sw freq adjustments, this restriction can be lifted. | ||
| 4896 | */ | ||
| 4897 | if (IS_VALLEYVIEW(dev_priv)) | ||
| 4898 | goto skip_hw_write; | ||
| 4899 | |||
| 4894 | I915_WRITE(GEN6_RP_UP_EI, | 4900 | I915_WRITE(GEN6_RP_UP_EI, |
| 4895 | GT_INTERVAL_FROM_US(dev_priv, ei_up)); | 4901 | GT_INTERVAL_FROM_US(dev_priv, ei_up)); |
| 4896 | I915_WRITE(GEN6_RP_UP_THRESHOLD, | 4902 | I915_WRITE(GEN6_RP_UP_THRESHOLD, |
| @@ -4911,6 +4917,7 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) | |||
| 4911 | GEN6_RP_UP_BUSY_AVG | | 4917 | GEN6_RP_UP_BUSY_AVG | |
| 4912 | GEN6_RP_DOWN_IDLE_AVG); | 4918 | GEN6_RP_DOWN_IDLE_AVG); |
| 4913 | 4919 | ||
| 4920 | skip_hw_write: | ||
| 4914 | dev_priv->rps.power = new_power; | 4921 | dev_priv->rps.power = new_power; |
| 4915 | dev_priv->rps.up_threshold = threshold_up; | 4922 | dev_priv->rps.up_threshold = threshold_up; |
| 4916 | dev_priv->rps.down_threshold = threshold_down; | 4923 | dev_priv->rps.down_threshold = threshold_down; |
| @@ -4921,8 +4928,9 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) | |||
| 4921 | { | 4928 | { |
| 4922 | u32 mask = 0; | 4929 | u32 mask = 0; |
| 4923 | 4930 | ||
| 4931 | /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */ | ||
| 4924 | if (val > dev_priv->rps.min_freq_softlimit) | 4932 | if (val > dev_priv->rps.min_freq_softlimit) |
| 4925 | mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; | 4933 | mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; |
| 4926 | if (val < dev_priv->rps.max_freq_softlimit) | 4934 | if (val < dev_priv->rps.max_freq_softlimit) |
| 4927 | mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; | 4935 | mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; |
| 4928 | 4936 | ||
| @@ -5032,7 +5040,7 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv) | |||
| 5032 | { | 5040 | { |
| 5033 | mutex_lock(&dev_priv->rps.hw_lock); | 5041 | mutex_lock(&dev_priv->rps.hw_lock); |
| 5034 | if (dev_priv->rps.enabled) { | 5042 | if (dev_priv->rps.enabled) { |
| 5035 | if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) | 5043 | if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) |
| 5036 | gen6_rps_reset_ei(dev_priv); | 5044 | gen6_rps_reset_ei(dev_priv); |
| 5037 | I915_WRITE(GEN6_PMINTRMSK, | 5045 | I915_WRITE(GEN6_PMINTRMSK, |
| 5038 | gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); | 5046 | gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); |
| @@ -7916,10 +7924,10 @@ static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, | |||
| 7916 | * @timeout_base_ms: timeout for polling with preemption enabled | 7924 | * @timeout_base_ms: timeout for polling with preemption enabled |
| 7917 | * | 7925 | * |
| 7918 | * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE | 7926 | * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE |
| 7919 | * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. | 7927 | * reports an error or an overall timeout of @timeout_base_ms+50 ms expires. |
| 7920 | * The request is acknowledged once the PCODE reply dword equals @reply after | 7928 | * The request is acknowledged once the PCODE reply dword equals @reply after |
| 7921 | * applying @reply_mask. Polling is first attempted with preemption enabled | 7929 | * applying @reply_mask. Polling is first attempted with preemption enabled |
| 7922 | * for @timeout_base_ms and if this times out for another 10 ms with | 7930 | * for @timeout_base_ms and if this times out for another 50 ms with |
| 7923 | * preemption disabled. | 7931 | * preemption disabled. |
| 7924 | * | 7932 | * |
| 7925 | * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some | 7933 | * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some |
| @@ -7955,14 +7963,15 @@ int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, | |||
| 7955 | * worst case) _and_ PCODE was busy for some reason even after a | 7963 | * worst case) _and_ PCODE was busy for some reason even after a |
| 7956 | * (queued) request and @timeout_base_ms delay. As a workaround retry | 7964 | * (queued) request and @timeout_base_ms delay. As a workaround retry |
| 7957 | * the poll with preemption disabled to maximize the number of | 7965 | * the poll with preemption disabled to maximize the number of |
| 7958 | * requests. Increase the timeout from @timeout_base_ms to 10ms to | 7966 | * requests. Increase the timeout from @timeout_base_ms to 50ms to |
| 7959 | * account for interrupts that could reduce the number of these | 7967 | * account for interrupts that could reduce the number of these |
| 7960 | * requests. | 7968 | * requests, and for any quirks of the PCODE firmware that delays |
| 7969 | * the request completion. | ||
| 7961 | */ | 7970 | */ |
| 7962 | DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); | 7971 | DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); |
| 7963 | WARN_ON_ONCE(timeout_base_ms > 3); | 7972 | WARN_ON_ONCE(timeout_base_ms > 3); |
| 7964 | preempt_disable(); | 7973 | preempt_disable(); |
| 7965 | ret = wait_for_atomic(COND, 10); | 7974 | ret = wait_for_atomic(COND, 50); |
| 7966 | preempt_enable(); | 7975 | preempt_enable(); |
| 7967 | 7976 | ||
| 7968 | out: | 7977 | out: |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 79c2b8d72322..13dccb18cd43 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -403,6 +403,9 @@ struct intel_engine_cs { | |||
| 403 | */ | 403 | */ |
| 404 | struct i915_gem_context *legacy_active_context; | 404 | struct i915_gem_context *legacy_active_context; |
| 405 | 405 | ||
| 406 | /* status_notifier: list of callbacks for context-switch changes */ | ||
| 407 | struct atomic_notifier_head context_status_notifier; | ||
| 408 | |||
| 406 | struct intel_engine_hangcheck hangcheck; | 409 | struct intel_engine_hangcheck hangcheck; |
| 407 | 410 | ||
| 408 | bool needs_cmd_parser; | 411 | bool needs_cmd_parser; |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 9ef54688872a..9481ca9a3ae7 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
| @@ -254,9 +254,6 @@ skl_update_plane(struct drm_plane *drm_plane, | |||
| 254 | int scaler_id = plane_state->scaler_id; | 254 | int scaler_id = plane_state->scaler_id; |
| 255 | const struct intel_scaler *scaler; | 255 | const struct intel_scaler *scaler; |
| 256 | 256 | ||
| 257 | DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", | ||
| 258 | plane_id, PS_PLANE_SEL(plane_id)); | ||
| 259 | |||
| 260 | scaler = &crtc_state->scaler_state.scalers[scaler_id]; | 257 | scaler = &crtc_state->scaler_state.scalers[scaler_id]; |
| 261 | 258 | ||
| 262 | I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), | 259 | I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index abe08885a5ba..b7ff592b14f5 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
| @@ -119,6 +119,8 @@ fw_domains_get(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma | |||
| 119 | 119 | ||
| 120 | for_each_fw_domain_masked(d, fw_domains, dev_priv) | 120 | for_each_fw_domain_masked(d, fw_domains, dev_priv) |
| 121 | fw_domain_wait_ack(d); | 121 | fw_domain_wait_ack(d); |
| 122 | |||
| 123 | dev_priv->uncore.fw_domains_active |= fw_domains; | ||
| 122 | } | 124 | } |
| 123 | 125 | ||
| 124 | static void | 126 | static void |
| @@ -130,6 +132,8 @@ fw_domains_put(struct drm_i915_private *dev_priv, enum forcewake_domains fw_doma | |||
| 130 | fw_domain_put(d); | 132 | fw_domain_put(d); |
| 131 | fw_domain_posting_read(d); | 133 | fw_domain_posting_read(d); |
| 132 | } | 134 | } |
| 135 | |||
| 136 | dev_priv->uncore.fw_domains_active &= ~fw_domains; | ||
| 133 | } | 137 | } |
| 134 | 138 | ||
| 135 | static void | 139 | static void |
| @@ -240,10 +244,8 @@ intel_uncore_fw_release_timer(struct hrtimer *timer) | |||
| 240 | if (WARN_ON(domain->wake_count == 0)) | 244 | if (WARN_ON(domain->wake_count == 0)) |
| 241 | domain->wake_count++; | 245 | domain->wake_count++; |
| 242 | 246 | ||
| 243 | if (--domain->wake_count == 0) { | 247 | if (--domain->wake_count == 0) |
| 244 | dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); | 248 | dev_priv->uncore.funcs.force_wake_put(dev_priv, domain->mask); |
| 245 | dev_priv->uncore.fw_domains_active &= ~domain->mask; | ||
| 246 | } | ||
| 247 | 249 | ||
| 248 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | 250 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); |
| 249 | 251 | ||
| @@ -454,10 +456,8 @@ static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, | |||
| 454 | fw_domains &= ~domain->mask; | 456 | fw_domains &= ~domain->mask; |
| 455 | } | 457 | } |
| 456 | 458 | ||
| 457 | if (fw_domains) { | 459 | if (fw_domains) |
| 458 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); | 460 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); |
| 459 | dev_priv->uncore.fw_domains_active |= fw_domains; | ||
| 460 | } | ||
| 461 | } | 461 | } |
| 462 | 462 | ||
| 463 | /** | 463 | /** |
| @@ -968,7 +968,6 @@ static noinline void ___force_wake_auto(struct drm_i915_private *dev_priv, | |||
| 968 | fw_domain_arm_timer(domain); | 968 | fw_domain_arm_timer(domain); |
| 969 | 969 | ||
| 970 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); | 970 | dev_priv->uncore.funcs.force_wake_get(dev_priv, fw_domains); |
| 971 | dev_priv->uncore.fw_domains_active |= fw_domains; | ||
| 972 | } | 971 | } |
| 973 | 972 | ||
| 974 | static inline void __force_wake_auto(struct drm_i915_private *dev_priv, | 973 | static inline void __force_wake_auto(struct drm_i915_private *dev_priv, |
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index af267c35d813..ee5883f59be5 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c | |||
| @@ -147,9 +147,6 @@ static int omap_gem_dmabuf_mmap(struct dma_buf *buffer, | |||
| 147 | struct drm_gem_object *obj = buffer->priv; | 147 | struct drm_gem_object *obj = buffer->priv; |
| 148 | int ret = 0; | 148 | int ret = 0; |
| 149 | 149 | ||
| 150 | if (WARN_ON(!obj->filp)) | ||
| 151 | return -EINVAL; | ||
| 152 | |||
| 153 | ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); | 150 | ret = drm_gem_mmap_obj(obj, omap_gem_mmap_size(obj), vma); |
| 154 | if (ret < 0) | 151 | if (ret < 0) |
| 155 | return ret; | 152 | return ret; |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index d12b8978142f..c7af9fdd20c7 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
| @@ -2984,6 +2984,16 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
| 2984 | (rdev->pdev->device == 0x6667)) { | 2984 | (rdev->pdev->device == 0x6667)) { |
| 2985 | max_sclk = 75000; | 2985 | max_sclk = 75000; |
| 2986 | } | 2986 | } |
| 2987 | } else if (rdev->family == CHIP_OLAND) { | ||
| 2988 | if ((rdev->pdev->revision == 0xC7) || | ||
| 2989 | (rdev->pdev->revision == 0x80) || | ||
| 2990 | (rdev->pdev->revision == 0x81) || | ||
| 2991 | (rdev->pdev->revision == 0x83) || | ||
| 2992 | (rdev->pdev->revision == 0x87) || | ||
| 2993 | (rdev->pdev->device == 0x6604) || | ||
| 2994 | (rdev->pdev->device == 0x6605)) { | ||
| 2995 | max_sclk = 75000; | ||
| 2996 | } | ||
| 2987 | } | 2997 | } |
| 2988 | 2998 | ||
| 2989 | if (rps->vce_active) { | 2999 | if (rps->vce_active) { |
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index f80bf9385e41..d745e8b50fb8 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c | |||
| @@ -464,6 +464,7 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc) | |||
| 464 | { | 464 | { |
| 465 | struct drm_device *dev = crtc->dev; | 465 | struct drm_device *dev = crtc->dev; |
| 466 | struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); | 466 | struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); |
| 467 | unsigned long flags; | ||
| 467 | 468 | ||
| 468 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); | 469 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); |
| 469 | mutex_lock(&tilcdc_crtc->enable_lock); | 470 | mutex_lock(&tilcdc_crtc->enable_lock); |
| @@ -484,7 +485,17 @@ static void tilcdc_crtc_enable(struct drm_crtc *crtc) | |||
| 484 | tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, | 485 | tilcdc_write_mask(dev, LCDC_RASTER_CTRL_REG, |
| 485 | LCDC_PALETTE_LOAD_MODE(DATA_ONLY), | 486 | LCDC_PALETTE_LOAD_MODE(DATA_ONLY), |
| 486 | LCDC_PALETTE_LOAD_MODE_MASK); | 487 | LCDC_PALETTE_LOAD_MODE_MASK); |
| 488 | |||
| 489 | /* There is no real chance for a race here as the time stamp | ||
| 490 | * is taken before the raster DMA is started. The spin-lock is | ||
| 491 | * taken to have a memory barrier after taking the time-stamp | ||
| 492 | * and to avoid a context switch between taking the stamp and | ||
| 493 | * enabling the raster. | ||
| 494 | */ | ||
| 495 | spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); | ||
| 496 | tilcdc_crtc->last_vblank = ktime_get(); | ||
| 487 | tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); | 497 | tilcdc_set(dev, LCDC_RASTER_CTRL_REG, LCDC_RASTER_ENABLE); |
| 498 | spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); | ||
| 488 | 499 | ||
| 489 | drm_crtc_vblank_on(crtc); | 500 | drm_crtc_vblank_on(crtc); |
| 490 | 501 | ||
| @@ -539,7 +550,6 @@ static void tilcdc_crtc_off(struct drm_crtc *crtc, bool shutdown) | |||
| 539 | } | 550 | } |
| 540 | 551 | ||
| 541 | drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); | 552 | drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq); |
| 542 | tilcdc_crtc->last_vblank = 0; | ||
| 543 | 553 | ||
| 544 | tilcdc_crtc->enabled = false; | 554 | tilcdc_crtc->enabled = false; |
| 545 | mutex_unlock(&tilcdc_crtc->enable_lock); | 555 | mutex_unlock(&tilcdc_crtc->enable_lock); |
| @@ -602,7 +612,6 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, | |||
| 602 | { | 612 | { |
| 603 | struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); | 613 | struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc); |
| 604 | struct drm_device *dev = crtc->dev; | 614 | struct drm_device *dev = crtc->dev; |
| 605 | unsigned long flags; | ||
| 606 | 615 | ||
| 607 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); | 616 | WARN_ON(!drm_modeset_is_locked(&crtc->mutex)); |
| 608 | 617 | ||
| @@ -614,28 +623,30 @@ int tilcdc_crtc_update_fb(struct drm_crtc *crtc, | |||
| 614 | drm_framebuffer_reference(fb); | 623 | drm_framebuffer_reference(fb); |
| 615 | 624 | ||
| 616 | crtc->primary->fb = fb; | 625 | crtc->primary->fb = fb; |
| 626 | tilcdc_crtc->event = event; | ||
| 617 | 627 | ||
| 618 | spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); | 628 | mutex_lock(&tilcdc_crtc->enable_lock); |
| 619 | 629 | ||
| 620 | if (crtc->hwmode.vrefresh && ktime_to_ns(tilcdc_crtc->last_vblank)) { | 630 | if (tilcdc_crtc->enabled) { |
| 631 | unsigned long flags; | ||
| 621 | ktime_t next_vblank; | 632 | ktime_t next_vblank; |
| 622 | s64 tdiff; | 633 | s64 tdiff; |
| 623 | 634 | ||
| 624 | next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, | 635 | spin_lock_irqsave(&tilcdc_crtc->irq_lock, flags); |
| 625 | 1000000 / crtc->hwmode.vrefresh); | ||
| 626 | 636 | ||
| 637 | next_vblank = ktime_add_us(tilcdc_crtc->last_vblank, | ||
| 638 | 1000000 / crtc->hwmode.vrefresh); | ||
| 627 | tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); | 639 | tdiff = ktime_to_us(ktime_sub(next_vblank, ktime_get())); |
| 628 | 640 | ||
| 629 | if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) | 641 | if (tdiff < TILCDC_VBLANK_SAFETY_THRESHOLD_US) |
| 630 | tilcdc_crtc->next_fb = fb; | 642 | tilcdc_crtc->next_fb = fb; |
| 631 | } | 643 | else |
| 632 | 644 | set_scanout(crtc, fb); | |
| 633 | if (tilcdc_crtc->next_fb != fb) | ||
| 634 | set_scanout(crtc, fb); | ||
| 635 | 645 | ||
| 636 | tilcdc_crtc->event = event; | 646 | spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); |
| 647 | } | ||
| 637 | 648 | ||
| 638 | spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); | 649 | mutex_unlock(&tilcdc_crtc->enable_lock); |
| 639 | 650 | ||
| 640 | return 0; | 651 | return 0; |
| 641 | } | 652 | } |
| @@ -1036,5 +1047,5 @@ int tilcdc_crtc_create(struct drm_device *dev) | |||
| 1036 | 1047 | ||
| 1037 | fail: | 1048 | fail: |
| 1038 | tilcdc_crtc_destroy(crtc); | 1049 | tilcdc_crtc_destroy(crtc); |
| 1039 | return -ENOMEM; | 1050 | return ret; |
| 1040 | } | 1051 | } |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 1aeb80e52424..8c54cb8f5d6d 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
| @@ -175,11 +175,11 @@ config HID_CHERRY | |||
| 175 | Support for Cherry Cymotion keyboard. | 175 | Support for Cherry Cymotion keyboard. |
| 176 | 176 | ||
| 177 | config HID_CHICONY | 177 | config HID_CHICONY |
| 178 | tristate "Chicony Tactical pad" | 178 | tristate "Chicony devices" |
| 179 | depends on HID | 179 | depends on HID |
| 180 | default !EXPERT | 180 | default !EXPERT |
| 181 | ---help--- | 181 | ---help--- |
| 182 | Support for Chicony Tactical pad. | 182 | Support for Chicony Tactical pad and special keys on Chicony keyboards. |
| 183 | 183 | ||
| 184 | config HID_CORSAIR | 184 | config HID_CORSAIR |
| 185 | tristate "Corsair devices" | 185 | tristate "Corsair devices" |
| @@ -190,6 +190,7 @@ config HID_CORSAIR | |||
| 190 | 190 | ||
| 191 | Supported devices: | 191 | Supported devices: |
| 192 | - Vengeance K90 | 192 | - Vengeance K90 |
| 193 | - Scimitar PRO RGB | ||
| 193 | 194 | ||
| 194 | config HID_PRODIKEYS | 195 | config HID_PRODIKEYS |
| 195 | tristate "Prodikeys PC-MIDI Keyboard support" | 196 | tristate "Prodikeys PC-MIDI Keyboard support" |
diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c index bc3cec199fee..f04ed9aabc3f 100644 --- a/drivers/hid/hid-chicony.c +++ b/drivers/hid/hid-chicony.c | |||
| @@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = { | |||
| 86 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, | 86 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, |
| 87 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, | 87 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, |
| 88 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, | 88 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, |
| 89 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, | ||
| 89 | { } | 90 | { } |
| 90 | }; | 91 | }; |
| 91 | MODULE_DEVICE_TABLE(hid, ch_devices); | 92 | MODULE_DEVICE_TABLE(hid, ch_devices); |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e9e87d337446..3ceb4a2af381 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
| @@ -1870,6 +1870,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 1870 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, | 1870 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, |
| 1871 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, | 1871 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, |
| 1872 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, | 1872 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, |
| 1873 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, | ||
| 1873 | { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, | 1874 | { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, |
| 1874 | { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, | 1875 | { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, |
| 1875 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, | 1876 | { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, |
| @@ -1910,6 +1911,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
| 1910 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, | 1911 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, |
| 1911 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, | 1912 | { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, |
| 1912 | { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, | 1913 | { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, |
| 1914 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, | ||
| 1913 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, | 1915 | { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) }, |
| 1914 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, | 1916 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, |
| 1915 | { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, | 1917 | { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, |
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c index c0303f61c26a..9ba5d98a1180 100644 --- a/drivers/hid/hid-corsair.c +++ b/drivers/hid/hid-corsair.c | |||
| @@ -3,8 +3,10 @@ | |||
| 3 | * | 3 | * |
| 4 | * Supported devices: | 4 | * Supported devices: |
| 5 | * - Vengeance K90 Keyboard | 5 | * - Vengeance K90 Keyboard |
| 6 | * - Scimitar PRO RGB Gaming Mouse | ||
| 6 | * | 7 | * |
| 7 | * Copyright (c) 2015 Clement Vuchener | 8 | * Copyright (c) 2015 Clement Vuchener |
| 9 | * Copyright (c) 2017 Oscar Campos | ||
| 8 | */ | 10 | */ |
| 9 | 11 | ||
| 10 | /* | 12 | /* |
| @@ -670,10 +672,51 @@ static int corsair_input_mapping(struct hid_device *dev, | |||
| 670 | return 0; | 672 | return 0; |
| 671 | } | 673 | } |
| 672 | 674 | ||
| 675 | /* | ||
| 676 | * The report descriptor of Corsair Scimitar RGB Pro gaming mouse is | ||
| 677 | * non parseable as they define two consecutive Logical Minimum for | ||
| 678 | * the Usage Page (Consumer) in rdescs bytes 75 and 77 being 77 0x16 | ||
| 679 | * that should be obviousy 0x26 for Logical Magimum of 16 bits. This | ||
| 680 | * prevents poper parsing of the report descriptor due Logical | ||
| 681 | * Minimum being larger than Logical Maximum. | ||
| 682 | * | ||
| 683 | * This driver fixes the report descriptor for: | ||
| 684 | * - USB ID b1c:1b3e, sold as Scimitar RGB Pro Gaming mouse | ||
| 685 | */ | ||
| 686 | |||
| 687 | static __u8 *corsair_mouse_report_fixup(struct hid_device *hdev, __u8 *rdesc, | ||
| 688 | unsigned int *rsize) | ||
| 689 | { | ||
| 690 | struct usb_interface *intf = to_usb_interface(hdev->dev.parent); | ||
| 691 | |||
| 692 | if (intf->cur_altsetting->desc.bInterfaceNumber == 1) { | ||
| 693 | /* | ||
| 694 | * Corsair Scimitar RGB Pro report descriptor is broken and | ||
| 695 | * defines two different Logical Minimum for the Consumer | ||
| 696 | * Application. The byte 77 should be a 0x26 defining a 16 | ||
| 697 | * bits integer for the Logical Maximum but it is a 0x16 | ||
| 698 | * instead (Logical Minimum) | ||
| 699 | */ | ||
| 700 | switch (hdev->product) { | ||
| 701 | case USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB: | ||
| 702 | if (*rsize >= 172 && rdesc[75] == 0x15 && rdesc[77] == 0x16 | ||
| 703 | && rdesc[78] == 0xff && rdesc[79] == 0x0f) { | ||
| 704 | hid_info(hdev, "Fixing up report descriptor\n"); | ||
| 705 | rdesc[77] = 0x26; | ||
| 706 | } | ||
| 707 | break; | ||
| 708 | } | ||
| 709 | |||
| 710 | } | ||
| 711 | return rdesc; | ||
| 712 | } | ||
| 713 | |||
| 673 | static const struct hid_device_id corsair_devices[] = { | 714 | static const struct hid_device_id corsair_devices[] = { |
| 674 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90), | 715 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90), |
| 675 | .driver_data = CORSAIR_USE_K90_MACRO | | 716 | .driver_data = CORSAIR_USE_K90_MACRO | |
| 676 | CORSAIR_USE_K90_BACKLIGHT }, | 717 | CORSAIR_USE_K90_BACKLIGHT }, |
| 718 | { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, | ||
| 719 | USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, | ||
| 677 | {} | 720 | {} |
| 678 | }; | 721 | }; |
| 679 | 722 | ||
| @@ -686,10 +729,14 @@ static struct hid_driver corsair_driver = { | |||
| 686 | .event = corsair_event, | 729 | .event = corsair_event, |
| 687 | .remove = corsair_remove, | 730 | .remove = corsair_remove, |
| 688 | .input_mapping = corsair_input_mapping, | 731 | .input_mapping = corsair_input_mapping, |
| 732 | .report_fixup = corsair_mouse_report_fixup, | ||
| 689 | }; | 733 | }; |
| 690 | 734 | ||
| 691 | module_hid_driver(corsair_driver); | 735 | module_hid_driver(corsair_driver); |
| 692 | 736 | ||
| 693 | MODULE_LICENSE("GPL"); | 737 | MODULE_LICENSE("GPL"); |
| 738 | /* Original K90 driver author */ | ||
| 694 | MODULE_AUTHOR("Clement Vuchener"); | 739 | MODULE_AUTHOR("Clement Vuchener"); |
| 740 | /* Scimitar PRO RGB driver author */ | ||
| 741 | MODULE_AUTHOR("Oscar Campos"); | ||
| 695 | MODULE_DESCRIPTION("HID driver for Corsair devices"); | 742 | MODULE_DESCRIPTION("HID driver for Corsair devices"); |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 86c95d30ac80..0e2e7c571d22 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
| @@ -278,6 +278,9 @@ | |||
| 278 | #define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13 | 278 | #define USB_DEVICE_ID_CORSAIR_K70RGB 0x1b13 |
| 279 | #define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15 | 279 | #define USB_DEVICE_ID_CORSAIR_STRAFE 0x1b15 |
| 280 | #define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17 | 280 | #define USB_DEVICE_ID_CORSAIR_K65RGB 0x1b17 |
| 281 | #define USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE 0x1b38 | ||
| 282 | #define USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE 0x1b39 | ||
| 283 | #define USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB 0x1b3e | ||
| 281 | 284 | ||
| 282 | #define USB_VENDOR_ID_CREATIVELABS 0x041e | 285 | #define USB_VENDOR_ID_CREATIVELABS 0x041e |
| 283 | #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c | 286 | #define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c |
| @@ -557,6 +560,7 @@ | |||
| 557 | 560 | ||
| 558 | #define USB_VENDOR_ID_JESS 0x0c45 | 561 | #define USB_VENDOR_ID_JESS 0x0c45 |
| 559 | #define USB_DEVICE_ID_JESS_YUREX 0x1010 | 562 | #define USB_DEVICE_ID_JESS_YUREX 0x1010 |
| 563 | #define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112 | ||
| 560 | 564 | ||
| 561 | #define USB_VENDOR_ID_JESS2 0x0f30 | 565 | #define USB_VENDOR_ID_JESS2 0x0f30 |
| 562 | #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111 | 566 | #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111 |
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index f405b07d0381..740996f9bdd4 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c | |||
| @@ -2632,6 +2632,8 @@ err_stop: | |||
| 2632 | sony_leds_remove(sc); | 2632 | sony_leds_remove(sc); |
| 2633 | if (sc->quirks & SONY_BATTERY_SUPPORT) | 2633 | if (sc->quirks & SONY_BATTERY_SUPPORT) |
| 2634 | sony_battery_remove(sc); | 2634 | sony_battery_remove(sc); |
| 2635 | if (sc->touchpad) | ||
| 2636 | sony_unregister_touchpad(sc); | ||
| 2635 | sony_cancel_work_sync(sc); | 2637 | sony_cancel_work_sync(sc); |
| 2636 | kfree(sc->output_report_dmabuf); | 2638 | kfree(sc->output_report_dmabuf); |
| 2637 | sony_remove_dev_list(sc); | 2639 | sony_remove_dev_list(sc); |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index d6847a664446..a69a3c88ab29 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
| @@ -80,6 +80,9 @@ static const struct hid_blacklist { | |||
| 80 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS }, | 80 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB, HID_QUIRK_NO_INIT_REPORTS }, |
| 81 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS }, | 81 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB, HID_QUIRK_NO_INIT_REPORTS }, |
| 82 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, | 82 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_STRAFE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, |
| 83 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, | ||
| 84 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, | ||
| 85 | { USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB, HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL }, | ||
| 83 | { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, | 86 | { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, |
| 84 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, | 87 | { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, |
| 85 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, | 88 | { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, |
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index be8f7e2a026f..994bddc55b82 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c | |||
| @@ -2579,7 +2579,9 @@ static void wacom_remove(struct hid_device *hdev) | |||
| 2579 | 2579 | ||
| 2580 | /* make sure we don't trigger the LEDs */ | 2580 | /* make sure we don't trigger the LEDs */ |
| 2581 | wacom_led_groups_release(wacom); | 2581 | wacom_led_groups_release(wacom); |
| 2582 | wacom_release_resources(wacom); | 2582 | |
| 2583 | if (wacom->wacom_wac.features.type != REMOTE) | ||
| 2584 | wacom_release_resources(wacom); | ||
| 2583 | 2585 | ||
| 2584 | hid_set_drvdata(hdev, NULL); | 2586 | hid_set_drvdata(hdev, NULL); |
| 2585 | } | 2587 | } |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 4aa3de9f1163..94250c293be2 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
| @@ -1959,8 +1959,10 @@ static void wacom_wac_pen_usage_mapping(struct hid_device *hdev, | |||
| 1959 | input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH); | 1959 | input_set_capability(input, EV_KEY, BTN_TOOL_BRUSH); |
| 1960 | input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL); | 1960 | input_set_capability(input, EV_KEY, BTN_TOOL_PENCIL); |
| 1961 | input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH); | 1961 | input_set_capability(input, EV_KEY, BTN_TOOL_AIRBRUSH); |
| 1962 | input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); | 1962 | if (!(features->device_type & WACOM_DEVICETYPE_DIRECT)) { |
| 1963 | input_set_capability(input, EV_KEY, BTN_TOOL_LENS); | 1963 | input_set_capability(input, EV_KEY, BTN_TOOL_MOUSE); |
| 1964 | input_set_capability(input, EV_KEY, BTN_TOOL_LENS); | ||
| 1965 | } | ||
| 1964 | break; | 1966 | break; |
| 1965 | case WACOM_HID_WD_FINGERWHEEL: | 1967 | case WACOM_HID_WD_FINGERWHEEL: |
| 1966 | wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); | 1968 | wacom_map_usage(input, usage, field, EV_ABS, ABS_WHEEL, 0); |
| @@ -4197,10 +4199,10 @@ static const struct wacom_features wacom_features_0x343 = | |||
| 4197 | WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; | 4199 | WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; |
| 4198 | static const struct wacom_features wacom_features_0x360 = | 4200 | static const struct wacom_features wacom_features_0x360 = |
| 4199 | { "Wacom Intuos Pro M", 44800, 29600, 8191, 63, | 4201 | { "Wacom Intuos Pro M", 44800, 29600, 8191, 63, |
| 4200 | INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; | 4202 | INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; |
| 4201 | static const struct wacom_features wacom_features_0x361 = | 4203 | static const struct wacom_features wacom_features_0x361 = |
| 4202 | { "Wacom Intuos Pro L", 62200, 43200, 8191, 63, | 4204 | { "Wacom Intuos Pro L", 62200, 43200, 8191, 63, |
| 4203 | INTUOSP2_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 9, .touch_max = 10 }; | 4205 | INTUOSP2_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 9, .touch_max = 10 }; |
| 4204 | 4206 | ||
| 4205 | static const struct wacom_features wacom_features_HID_ANY_ID = | 4207 | static const struct wacom_features wacom_features_HID_ANY_ID = |
| 4206 | { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; | 4208 | { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; |
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index bd0d1988feb2..321b8833fa6f 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
| @@ -502,12 +502,15 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle) | |||
| 502 | 502 | ||
| 503 | wait_for_completion(&info->waitevent); | 503 | wait_for_completion(&info->waitevent); |
| 504 | 504 | ||
| 505 | if (channel->rescind) { | ||
| 506 | ret = -ENODEV; | ||
| 507 | goto post_msg_err; | ||
| 508 | } | ||
| 509 | |||
| 510 | post_msg_err: | 505 | post_msg_err: |
| 506 | /* | ||
| 507 | * If the channel has been rescinded; | ||
| 508 | * we will be awakened by the rescind | ||
| 509 | * handler; set the error code to zero so we don't leak memory. | ||
| 510 | */ | ||
| 511 | if (channel->rescind) | ||
| 512 | ret = 0; | ||
| 513 | |||
| 511 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); | 514 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); |
| 512 | list_del(&info->msglistentry); | 515 | list_del(&info->msglistentry); |
| 513 | spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); | 516 | spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); |
| @@ -530,15 +533,13 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | |||
| 530 | int ret; | 533 | int ret; |
| 531 | 534 | ||
| 532 | /* | 535 | /* |
| 533 | * vmbus_on_event(), running in the tasklet, can race | 536 | * vmbus_on_event(), running in the per-channel tasklet, can race |
| 534 | * with vmbus_close_internal() in the case of SMP guest, e.g., when | 537 | * with vmbus_close_internal() in the case of SMP guest, e.g., when |
| 535 | * the former is accessing channel->inbound.ring_buffer, the latter | 538 | * the former is accessing channel->inbound.ring_buffer, the latter |
| 536 | * could be freeing the ring_buffer pages. | 539 | * could be freeing the ring_buffer pages, so here we must stop it |
| 537 | * | 540 | * first. |
| 538 | * To resolve the race, we can serialize them by disabling the | ||
| 539 | * tasklet when the latter is running here. | ||
| 540 | */ | 541 | */ |
| 541 | hv_event_tasklet_disable(channel); | 542 | tasklet_disable(&channel->callback_event); |
| 542 | 543 | ||
| 543 | /* | 544 | /* |
| 544 | * In case a device driver's probe() fails (e.g., | 545 | * In case a device driver's probe() fails (e.g., |
| @@ -605,8 +606,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel) | |||
| 605 | get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); | 606 | get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); |
| 606 | 607 | ||
| 607 | out: | 608 | out: |
| 608 | hv_event_tasklet_enable(channel); | ||
| 609 | |||
| 610 | return ret; | 609 | return ret; |
| 611 | } | 610 | } |
| 612 | 611 | ||
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index f33465d78a02..fbcb06352308 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
| @@ -350,7 +350,8 @@ static struct vmbus_channel *alloc_channel(void) | |||
| 350 | static void free_channel(struct vmbus_channel *channel) | 350 | static void free_channel(struct vmbus_channel *channel) |
| 351 | { | 351 | { |
| 352 | tasklet_kill(&channel->callback_event); | 352 | tasklet_kill(&channel->callback_event); |
| 353 | kfree(channel); | 353 | |
| 354 | kfree_rcu(channel, rcu); | ||
| 354 | } | 355 | } |
| 355 | 356 | ||
| 356 | static void percpu_channel_enq(void *arg) | 357 | static void percpu_channel_enq(void *arg) |
| @@ -359,14 +360,14 @@ static void percpu_channel_enq(void *arg) | |||
| 359 | struct hv_per_cpu_context *hv_cpu | 360 | struct hv_per_cpu_context *hv_cpu |
| 360 | = this_cpu_ptr(hv_context.cpu_context); | 361 | = this_cpu_ptr(hv_context.cpu_context); |
| 361 | 362 | ||
| 362 | list_add_tail(&channel->percpu_list, &hv_cpu->chan_list); | 363 | list_add_tail_rcu(&channel->percpu_list, &hv_cpu->chan_list); |
| 363 | } | 364 | } |
| 364 | 365 | ||
| 365 | static void percpu_channel_deq(void *arg) | 366 | static void percpu_channel_deq(void *arg) |
| 366 | { | 367 | { |
| 367 | struct vmbus_channel *channel = arg; | 368 | struct vmbus_channel *channel = arg; |
| 368 | 369 | ||
| 369 | list_del(&channel->percpu_list); | 370 | list_del_rcu(&channel->percpu_list); |
| 370 | } | 371 | } |
| 371 | 372 | ||
| 372 | 373 | ||
| @@ -381,19 +382,6 @@ static void vmbus_release_relid(u32 relid) | |||
| 381 | true); | 382 | true); |
| 382 | } | 383 | } |
| 383 | 384 | ||
| 384 | void hv_event_tasklet_disable(struct vmbus_channel *channel) | ||
| 385 | { | ||
| 386 | tasklet_disable(&channel->callback_event); | ||
| 387 | } | ||
| 388 | |||
| 389 | void hv_event_tasklet_enable(struct vmbus_channel *channel) | ||
| 390 | { | ||
| 391 | tasklet_enable(&channel->callback_event); | ||
| 392 | |||
| 393 | /* In case there is any pending event */ | ||
| 394 | tasklet_schedule(&channel->callback_event); | ||
| 395 | } | ||
| 396 | |||
| 397 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | 385 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) |
| 398 | { | 386 | { |
| 399 | unsigned long flags; | 387 | unsigned long flags; |
| @@ -402,7 +390,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | |||
| 402 | BUG_ON(!channel->rescind); | 390 | BUG_ON(!channel->rescind); |
| 403 | BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); | 391 | BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); |
| 404 | 392 | ||
| 405 | hv_event_tasklet_disable(channel); | ||
| 406 | if (channel->target_cpu != get_cpu()) { | 393 | if (channel->target_cpu != get_cpu()) { |
| 407 | put_cpu(); | 394 | put_cpu(); |
| 408 | smp_call_function_single(channel->target_cpu, | 395 | smp_call_function_single(channel->target_cpu, |
| @@ -411,7 +398,6 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | |||
| 411 | percpu_channel_deq(channel); | 398 | percpu_channel_deq(channel); |
| 412 | put_cpu(); | 399 | put_cpu(); |
| 413 | } | 400 | } |
| 414 | hv_event_tasklet_enable(channel); | ||
| 415 | 401 | ||
| 416 | if (channel->primary_channel == NULL) { | 402 | if (channel->primary_channel == NULL) { |
| 417 | list_del(&channel->listentry); | 403 | list_del(&channel->listentry); |
| @@ -505,7 +491,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
| 505 | 491 | ||
| 506 | init_vp_index(newchannel, dev_type); | 492 | init_vp_index(newchannel, dev_type); |
| 507 | 493 | ||
| 508 | hv_event_tasklet_disable(newchannel); | ||
| 509 | if (newchannel->target_cpu != get_cpu()) { | 494 | if (newchannel->target_cpu != get_cpu()) { |
| 510 | put_cpu(); | 495 | put_cpu(); |
| 511 | smp_call_function_single(newchannel->target_cpu, | 496 | smp_call_function_single(newchannel->target_cpu, |
| @@ -515,7 +500,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
| 515 | percpu_channel_enq(newchannel); | 500 | percpu_channel_enq(newchannel); |
| 516 | put_cpu(); | 501 | put_cpu(); |
| 517 | } | 502 | } |
| 518 | hv_event_tasklet_enable(newchannel); | ||
| 519 | 503 | ||
| 520 | /* | 504 | /* |
| 521 | * This state is used to indicate a successful open | 505 | * This state is used to indicate a successful open |
| @@ -565,7 +549,6 @@ err_deq_chan: | |||
| 565 | list_del(&newchannel->listentry); | 549 | list_del(&newchannel->listentry); |
| 566 | mutex_unlock(&vmbus_connection.channel_mutex); | 550 | mutex_unlock(&vmbus_connection.channel_mutex); |
| 567 | 551 | ||
| 568 | hv_event_tasklet_disable(newchannel); | ||
| 569 | if (newchannel->target_cpu != get_cpu()) { | 552 | if (newchannel->target_cpu != get_cpu()) { |
| 570 | put_cpu(); | 553 | put_cpu(); |
| 571 | smp_call_function_single(newchannel->target_cpu, | 554 | smp_call_function_single(newchannel->target_cpu, |
| @@ -574,7 +557,6 @@ err_deq_chan: | |||
| 574 | percpu_channel_deq(newchannel); | 557 | percpu_channel_deq(newchannel); |
| 575 | put_cpu(); | 558 | put_cpu(); |
| 576 | } | 559 | } |
| 577 | hv_event_tasklet_enable(newchannel); | ||
| 578 | 560 | ||
| 579 | vmbus_release_relid(newchannel->offermsg.child_relid); | 561 | vmbus_release_relid(newchannel->offermsg.child_relid); |
| 580 | 562 | ||
| @@ -814,6 +796,7 @@ static void vmbus_onoffer(struct vmbus_channel_message_header *hdr) | |||
| 814 | /* Allocate the channel object and save this offer. */ | 796 | /* Allocate the channel object and save this offer. */ |
| 815 | newchannel = alloc_channel(); | 797 | newchannel = alloc_channel(); |
| 816 | if (!newchannel) { | 798 | if (!newchannel) { |
| 799 | vmbus_release_relid(offer->child_relid); | ||
| 817 | pr_err("Unable to allocate channel object\n"); | 800 | pr_err("Unable to allocate channel object\n"); |
| 818 | return; | 801 | return; |
| 819 | } | 802 | } |
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c index 9aee6014339d..a5596a642ed0 100644 --- a/drivers/hv/hv_fcopy.c +++ b/drivers/hv/hv_fcopy.c | |||
| @@ -71,7 +71,6 @@ static DECLARE_WORK(fcopy_send_work, fcopy_send_data); | |||
| 71 | static const char fcopy_devname[] = "vmbus/hv_fcopy"; | 71 | static const char fcopy_devname[] = "vmbus/hv_fcopy"; |
| 72 | static u8 *recv_buffer; | 72 | static u8 *recv_buffer; |
| 73 | static struct hvutil_transport *hvt; | 73 | static struct hvutil_transport *hvt; |
| 74 | static struct completion release_event; | ||
| 75 | /* | 74 | /* |
| 76 | * This state maintains the version number registered by the daemon. | 75 | * This state maintains the version number registered by the daemon. |
| 77 | */ | 76 | */ |
| @@ -331,7 +330,6 @@ static void fcopy_on_reset(void) | |||
| 331 | 330 | ||
| 332 | if (cancel_delayed_work_sync(&fcopy_timeout_work)) | 331 | if (cancel_delayed_work_sync(&fcopy_timeout_work)) |
| 333 | fcopy_respond_to_host(HV_E_FAIL); | 332 | fcopy_respond_to_host(HV_E_FAIL); |
| 334 | complete(&release_event); | ||
| 335 | } | 333 | } |
| 336 | 334 | ||
| 337 | int hv_fcopy_init(struct hv_util_service *srv) | 335 | int hv_fcopy_init(struct hv_util_service *srv) |
| @@ -339,7 +337,6 @@ int hv_fcopy_init(struct hv_util_service *srv) | |||
| 339 | recv_buffer = srv->recv_buffer; | 337 | recv_buffer = srv->recv_buffer; |
| 340 | fcopy_transaction.recv_channel = srv->channel; | 338 | fcopy_transaction.recv_channel = srv->channel; |
| 341 | 339 | ||
| 342 | init_completion(&release_event); | ||
| 343 | /* | 340 | /* |
| 344 | * When this driver loads, the user level daemon that | 341 | * When this driver loads, the user level daemon that |
| 345 | * processes the host requests may not yet be running. | 342 | * processes the host requests may not yet be running. |
| @@ -361,5 +358,4 @@ void hv_fcopy_deinit(void) | |||
| 361 | fcopy_transaction.state = HVUTIL_DEVICE_DYING; | 358 | fcopy_transaction.state = HVUTIL_DEVICE_DYING; |
| 362 | cancel_delayed_work_sync(&fcopy_timeout_work); | 359 | cancel_delayed_work_sync(&fcopy_timeout_work); |
| 363 | hvutil_transport_destroy(hvt); | 360 | hvutil_transport_destroy(hvt); |
| 364 | wait_for_completion(&release_event); | ||
| 365 | } | 361 | } |
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index de263712e247..a1adfe2cfb34 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c | |||
| @@ -101,7 +101,6 @@ static DECLARE_WORK(kvp_sendkey_work, kvp_send_key); | |||
| 101 | static const char kvp_devname[] = "vmbus/hv_kvp"; | 101 | static const char kvp_devname[] = "vmbus/hv_kvp"; |
| 102 | static u8 *recv_buffer; | 102 | static u8 *recv_buffer; |
| 103 | static struct hvutil_transport *hvt; | 103 | static struct hvutil_transport *hvt; |
| 104 | static struct completion release_event; | ||
| 105 | /* | 104 | /* |
| 106 | * Register the kernel component with the user-level daemon. | 105 | * Register the kernel component with the user-level daemon. |
| 107 | * As part of this registration, pass the LIC version number. | 106 | * As part of this registration, pass the LIC version number. |
| @@ -714,7 +713,6 @@ static void kvp_on_reset(void) | |||
| 714 | if (cancel_delayed_work_sync(&kvp_timeout_work)) | 713 | if (cancel_delayed_work_sync(&kvp_timeout_work)) |
| 715 | kvp_respond_to_host(NULL, HV_E_FAIL); | 714 | kvp_respond_to_host(NULL, HV_E_FAIL); |
| 716 | kvp_transaction.state = HVUTIL_DEVICE_INIT; | 715 | kvp_transaction.state = HVUTIL_DEVICE_INIT; |
| 717 | complete(&release_event); | ||
| 718 | } | 716 | } |
| 719 | 717 | ||
| 720 | int | 718 | int |
| @@ -723,7 +721,6 @@ hv_kvp_init(struct hv_util_service *srv) | |||
| 723 | recv_buffer = srv->recv_buffer; | 721 | recv_buffer = srv->recv_buffer; |
| 724 | kvp_transaction.recv_channel = srv->channel; | 722 | kvp_transaction.recv_channel = srv->channel; |
| 725 | 723 | ||
| 726 | init_completion(&release_event); | ||
| 727 | /* | 724 | /* |
| 728 | * When this driver loads, the user level daemon that | 725 | * When this driver loads, the user level daemon that |
| 729 | * processes the host requests may not yet be running. | 726 | * processes the host requests may not yet be running. |
| @@ -747,5 +744,4 @@ void hv_kvp_deinit(void) | |||
| 747 | cancel_delayed_work_sync(&kvp_timeout_work); | 744 | cancel_delayed_work_sync(&kvp_timeout_work); |
| 748 | cancel_work_sync(&kvp_sendkey_work); | 745 | cancel_work_sync(&kvp_sendkey_work); |
| 749 | hvutil_transport_destroy(hvt); | 746 | hvutil_transport_destroy(hvt); |
| 750 | wait_for_completion(&release_event); | ||
| 751 | } | 747 | } |
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c index bcc03f0748d6..e659d1b94a57 100644 --- a/drivers/hv/hv_snapshot.c +++ b/drivers/hv/hv_snapshot.c | |||
| @@ -79,7 +79,6 @@ static int dm_reg_value; | |||
| 79 | static const char vss_devname[] = "vmbus/hv_vss"; | 79 | static const char vss_devname[] = "vmbus/hv_vss"; |
| 80 | static __u8 *recv_buffer; | 80 | static __u8 *recv_buffer; |
| 81 | static struct hvutil_transport *hvt; | 81 | static struct hvutil_transport *hvt; |
| 82 | static struct completion release_event; | ||
| 83 | 82 | ||
| 84 | static void vss_timeout_func(struct work_struct *dummy); | 83 | static void vss_timeout_func(struct work_struct *dummy); |
| 85 | static void vss_handle_request(struct work_struct *dummy); | 84 | static void vss_handle_request(struct work_struct *dummy); |
| @@ -361,13 +360,11 @@ static void vss_on_reset(void) | |||
| 361 | if (cancel_delayed_work_sync(&vss_timeout_work)) | 360 | if (cancel_delayed_work_sync(&vss_timeout_work)) |
| 362 | vss_respond_to_host(HV_E_FAIL); | 361 | vss_respond_to_host(HV_E_FAIL); |
| 363 | vss_transaction.state = HVUTIL_DEVICE_INIT; | 362 | vss_transaction.state = HVUTIL_DEVICE_INIT; |
| 364 | complete(&release_event); | ||
| 365 | } | 363 | } |
| 366 | 364 | ||
| 367 | int | 365 | int |
| 368 | hv_vss_init(struct hv_util_service *srv) | 366 | hv_vss_init(struct hv_util_service *srv) |
| 369 | { | 367 | { |
| 370 | init_completion(&release_event); | ||
| 371 | if (vmbus_proto_version < VERSION_WIN8_1) { | 368 | if (vmbus_proto_version < VERSION_WIN8_1) { |
| 372 | pr_warn("Integration service 'Backup (volume snapshot)'" | 369 | pr_warn("Integration service 'Backup (volume snapshot)'" |
| 373 | " not supported on this host version.\n"); | 370 | " not supported on this host version.\n"); |
| @@ -400,5 +397,4 @@ void hv_vss_deinit(void) | |||
| 400 | cancel_delayed_work_sync(&vss_timeout_work); | 397 | cancel_delayed_work_sync(&vss_timeout_work); |
| 401 | cancel_work_sync(&vss_handle_request_work); | 398 | cancel_work_sync(&vss_handle_request_work); |
| 402 | hvutil_transport_destroy(hvt); | 399 | hvutil_transport_destroy(hvt); |
| 403 | wait_for_completion(&release_event); | ||
| 404 | } | 400 | } |
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c index 3042eaa13062..186b10083c55 100644 --- a/drivers/hv/hv_util.c +++ b/drivers/hv/hv_util.c | |||
| @@ -590,6 +590,8 @@ static int hv_timesync_init(struct hv_util_service *srv) | |||
| 590 | if (!hyperv_cs) | 590 | if (!hyperv_cs) |
| 591 | return -ENODEV; | 591 | return -ENODEV; |
| 592 | 592 | ||
| 593 | spin_lock_init(&host_ts.lock); | ||
| 594 | |||
| 593 | INIT_WORK(&wrk.work, hv_set_host_time); | 595 | INIT_WORK(&wrk.work, hv_set_host_time); |
| 594 | 596 | ||
| 595 | /* | 597 | /* |
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c index c235a9515267..4402a71e23f7 100644 --- a/drivers/hv/hv_utils_transport.c +++ b/drivers/hv/hv_utils_transport.c | |||
| @@ -182,10 +182,11 @@ static int hvt_op_release(struct inode *inode, struct file *file) | |||
| 182 | * connects back. | 182 | * connects back. |
| 183 | */ | 183 | */ |
| 184 | hvt_reset(hvt); | 184 | hvt_reset(hvt); |
| 185 | mutex_unlock(&hvt->lock); | ||
| 186 | 185 | ||
| 187 | if (mode_old == HVUTIL_TRANSPORT_DESTROY) | 186 | if (mode_old == HVUTIL_TRANSPORT_DESTROY) |
| 188 | hvt_transport_free(hvt); | 187 | complete(&hvt->release); |
| 188 | |||
| 189 | mutex_unlock(&hvt->lock); | ||
| 189 | 190 | ||
| 190 | return 0; | 191 | return 0; |
| 191 | } | 192 | } |
| @@ -304,6 +305,7 @@ struct hvutil_transport *hvutil_transport_init(const char *name, | |||
| 304 | 305 | ||
| 305 | init_waitqueue_head(&hvt->outmsg_q); | 306 | init_waitqueue_head(&hvt->outmsg_q); |
| 306 | mutex_init(&hvt->lock); | 307 | mutex_init(&hvt->lock); |
| 308 | init_completion(&hvt->release); | ||
| 307 | 309 | ||
| 308 | spin_lock(&hvt_list_lock); | 310 | spin_lock(&hvt_list_lock); |
| 309 | list_add(&hvt->list, &hvt_list); | 311 | list_add(&hvt->list, &hvt_list); |
| @@ -351,6 +353,8 @@ void hvutil_transport_destroy(struct hvutil_transport *hvt) | |||
| 351 | if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) | 353 | if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0) |
| 352 | cn_del_callback(&hvt->cn_id); | 354 | cn_del_callback(&hvt->cn_id); |
| 353 | 355 | ||
| 354 | if (mode_old != HVUTIL_TRANSPORT_CHARDEV) | 356 | if (mode_old == HVUTIL_TRANSPORT_CHARDEV) |
| 355 | hvt_transport_free(hvt); | 357 | wait_for_completion(&hvt->release); |
| 358 | |||
| 359 | hvt_transport_free(hvt); | ||
| 356 | } | 360 | } |
diff --git a/drivers/hv/hv_utils_transport.h b/drivers/hv/hv_utils_transport.h index d98f5225c3e6..79afb626e166 100644 --- a/drivers/hv/hv_utils_transport.h +++ b/drivers/hv/hv_utils_transport.h | |||
| @@ -41,6 +41,7 @@ struct hvutil_transport { | |||
| 41 | int outmsg_len; /* its length */ | 41 | int outmsg_len; /* its length */ |
| 42 | wait_queue_head_t outmsg_q; /* poll/read wait queue */ | 42 | wait_queue_head_t outmsg_q; /* poll/read wait queue */ |
| 43 | struct mutex lock; /* protects struct members */ | 43 | struct mutex lock; /* protects struct members */ |
| 44 | struct completion release; /* synchronize with fd release */ | ||
| 44 | }; | 45 | }; |
| 45 | 46 | ||
| 46 | struct hvutil_transport *hvutil_transport_init(const char *name, | 47 | struct hvutil_transport *hvutil_transport_init(const char *name, |
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index da6b59ba5940..8370b9dc6037 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
| @@ -939,8 +939,10 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) | |||
| 939 | if (relid == 0) | 939 | if (relid == 0) |
| 940 | continue; | 940 | continue; |
| 941 | 941 | ||
| 942 | rcu_read_lock(); | ||
| 943 | |||
| 942 | /* Find channel based on relid */ | 944 | /* Find channel based on relid */ |
| 943 | list_for_each_entry(channel, &hv_cpu->chan_list, percpu_list) { | 945 | list_for_each_entry_rcu(channel, &hv_cpu->chan_list, percpu_list) { |
| 944 | if (channel->offermsg.child_relid != relid) | 946 | if (channel->offermsg.child_relid != relid) |
| 945 | continue; | 947 | continue; |
| 946 | 948 | ||
| @@ -956,6 +958,8 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu) | |||
| 956 | tasklet_schedule(&channel->callback_event); | 958 | tasklet_schedule(&channel->callback_event); |
| 957 | } | 959 | } |
| 958 | } | 960 | } |
| 961 | |||
| 962 | rcu_read_unlock(); | ||
| 959 | } | 963 | } |
| 960 | } | 964 | } |
| 961 | 965 | ||
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c index cccef87963e0..975c43d446f8 100644 --- a/drivers/hwmon/asus_atk0110.c +++ b/drivers/hwmon/asus_atk0110.c | |||
| @@ -646,6 +646,9 @@ static int atk_read_value(struct atk_sensor_data *sensor, u64 *value) | |||
| 646 | else | 646 | else |
| 647 | err = atk_read_value_new(sensor, value); | 647 | err = atk_read_value_new(sensor, value); |
| 648 | 648 | ||
| 649 | if (err) | ||
| 650 | return err; | ||
| 651 | |||
| 649 | sensor->is_valid = true; | 652 | sensor->is_valid = true; |
| 650 | sensor->last_updated = jiffies; | 653 | sensor->last_updated = jiffies; |
| 651 | sensor->cached_value = *value; | 654 | sensor->cached_value = *value; |
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c index efb01c247e2d..4dfc7238313e 100644 --- a/drivers/hwmon/it87.c +++ b/drivers/hwmon/it87.c | |||
| @@ -3198,7 +3198,7 @@ static int __init sm_it87_init(void) | |||
| 3198 | { | 3198 | { |
| 3199 | int sioaddr[2] = { REG_2E, REG_4E }; | 3199 | int sioaddr[2] = { REG_2E, REG_4E }; |
| 3200 | struct it87_sio_data sio_data; | 3200 | struct it87_sio_data sio_data; |
| 3201 | unsigned short isa_address; | 3201 | unsigned short isa_address[2]; |
| 3202 | bool found = false; | 3202 | bool found = false; |
| 3203 | int i, err; | 3203 | int i, err; |
| 3204 | 3204 | ||
| @@ -3208,15 +3208,29 @@ static int __init sm_it87_init(void) | |||
| 3208 | 3208 | ||
| 3209 | for (i = 0; i < ARRAY_SIZE(sioaddr); i++) { | 3209 | for (i = 0; i < ARRAY_SIZE(sioaddr); i++) { |
| 3210 | memset(&sio_data, 0, sizeof(struct it87_sio_data)); | 3210 | memset(&sio_data, 0, sizeof(struct it87_sio_data)); |
| 3211 | isa_address = 0; | 3211 | isa_address[i] = 0; |
| 3212 | err = it87_find(sioaddr[i], &isa_address, &sio_data); | 3212 | err = it87_find(sioaddr[i], &isa_address[i], &sio_data); |
| 3213 | if (err || isa_address == 0) | 3213 | if (err || isa_address[i] == 0) |
| 3214 | continue; | 3214 | continue; |
| 3215 | /* | ||
| 3216 | * Don't register second chip if its ISA address matches | ||
| 3217 | * the first chip's ISA address. | ||
| 3218 | */ | ||
| 3219 | if (i && isa_address[i] == isa_address[0]) | ||
| 3220 | break; | ||
| 3215 | 3221 | ||
| 3216 | err = it87_device_add(i, isa_address, &sio_data); | 3222 | err = it87_device_add(i, isa_address[i], &sio_data); |
| 3217 | if (err) | 3223 | if (err) |
| 3218 | goto exit_dev_unregister; | 3224 | goto exit_dev_unregister; |
| 3225 | |||
| 3219 | found = true; | 3226 | found = true; |
| 3227 | |||
| 3228 | /* | ||
| 3229 | * IT8705F may respond on both SIO addresses. | ||
| 3230 | * Stop probing after finding one. | ||
| 3231 | */ | ||
| 3232 | if (sio_data.type == it87) | ||
| 3233 | break; | ||
| 3220 | } | 3234 | } |
| 3221 | 3235 | ||
| 3222 | if (!found) { | 3236 | if (!found) { |
diff --git a/drivers/hwmon/max31790.c b/drivers/hwmon/max31790.c index c1b9275978f9..281491cca510 100644 --- a/drivers/hwmon/max31790.c +++ b/drivers/hwmon/max31790.c | |||
| @@ -311,7 +311,7 @@ static int max31790_write_pwm(struct device *dev, u32 attr, int channel, | |||
| 311 | data->pwm[channel] = val << 8; | 311 | data->pwm[channel] = val << 8; |
| 312 | err = i2c_smbus_write_word_swapped(client, | 312 | err = i2c_smbus_write_word_swapped(client, |
| 313 | MAX31790_REG_PWMOUT(channel), | 313 | MAX31790_REG_PWMOUT(channel), |
| 314 | val); | 314 | data->pwm[channel]); |
| 315 | break; | 315 | break; |
| 316 | case hwmon_pwm_enable: | 316 | case hwmon_pwm_enable: |
| 317 | fan_config = data->fan_config[channel]; | 317 | fan_config = data->fan_config[channel]; |
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c index cdd9b3b26195..7563eceeaaea 100644 --- a/drivers/hwtracing/intel_th/core.c +++ b/drivers/hwtracing/intel_th/core.c | |||
| @@ -221,8 +221,10 @@ static int intel_th_output_activate(struct intel_th_device *thdev) | |||
| 221 | else | 221 | else |
| 222 | intel_th_trace_enable(thdev); | 222 | intel_th_trace_enable(thdev); |
| 223 | 223 | ||
| 224 | if (ret) | 224 | if (ret) { |
| 225 | pm_runtime_put(&thdev->dev); | 225 | pm_runtime_put(&thdev->dev); |
| 226 | module_put(thdrv->driver.owner); | ||
| 227 | } | ||
| 226 | 228 | ||
| 227 | return ret; | 229 | return ret; |
| 228 | } | 230 | } |
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 0bba3842336e..590cf90dd21a 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c | |||
| @@ -85,6 +85,16 @@ static const struct pci_device_id intel_th_pci_id_table[] = { | |||
| 85 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), | 85 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6), |
| 86 | .driver_data = (kernel_ulong_t)0, | 86 | .driver_data = (kernel_ulong_t)0, |
| 87 | }, | 87 | }, |
| 88 | { | ||
| 89 | /* Denverton */ | ||
| 90 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x19e1), | ||
| 91 | .driver_data = (kernel_ulong_t)0, | ||
| 92 | }, | ||
| 93 | { | ||
| 94 | /* Gemini Lake */ | ||
| 95 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), | ||
| 96 | .driver_data = (kernel_ulong_t)0, | ||
| 97 | }, | ||
| 88 | { 0 }, | 98 | { 0 }, |
| 89 | }; | 99 | }; |
| 90 | 100 | ||
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index ad9dec30bb30..4282ceca3d8f 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c | |||
| @@ -169,7 +169,9 @@ static irqreturn_t tiadc_irq_h(int irq, void *private) | |||
| 169 | { | 169 | { |
| 170 | struct iio_dev *indio_dev = private; | 170 | struct iio_dev *indio_dev = private; |
| 171 | struct tiadc_device *adc_dev = iio_priv(indio_dev); | 171 | struct tiadc_device *adc_dev = iio_priv(indio_dev); |
| 172 | unsigned int status, config; | 172 | unsigned int status, config, adc_fsm; |
| 173 | unsigned short count = 0; | ||
| 174 | |||
| 173 | status = tiadc_readl(adc_dev, REG_IRQSTATUS); | 175 | status = tiadc_readl(adc_dev, REG_IRQSTATUS); |
| 174 | 176 | ||
| 175 | /* | 177 | /* |
| @@ -183,6 +185,15 @@ static irqreturn_t tiadc_irq_h(int irq, void *private) | |||
| 183 | tiadc_writel(adc_dev, REG_CTRL, config); | 185 | tiadc_writel(adc_dev, REG_CTRL, config); |
| 184 | tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN | 186 | tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN |
| 185 | | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES); | 187 | | IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES); |
| 188 | |||
| 189 | /* wait for idle state. | ||
| 190 | * ADC needs to finish the current conversion | ||
| 191 | * before disabling the module | ||
| 192 | */ | ||
| 193 | do { | ||
| 194 | adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM); | ||
| 195 | } while (adc_fsm != 0x10 && count++ < 100); | ||
| 196 | |||
| 186 | tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB)); | 197 | tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB)); |
| 187 | return IRQ_HANDLED; | 198 | return IRQ_HANDLED; |
| 188 | } else if (status & IRQENB_FIFO1THRES) { | 199 | } else if (status & IRQENB_FIFO1THRES) { |
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c index a3cce3a38300..ecf592d69043 100644 --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c | |||
| @@ -51,8 +51,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) | |||
| 51 | st->report_state.report_id, | 51 | st->report_state.report_id, |
| 52 | st->report_state.index, | 52 | st->report_state.index, |
| 53 | HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM); | 53 | HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM); |
| 54 | |||
| 55 | poll_value = hid_sensor_read_poll_value(st); | ||
| 56 | } else { | 54 | } else { |
| 57 | int val; | 55 | int val; |
| 58 | 56 | ||
| @@ -89,7 +87,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state) | |||
| 89 | sensor_hub_get_feature(st->hsdev, st->power_state.report_id, | 87 | sensor_hub_get_feature(st->hsdev, st->power_state.report_id, |
| 90 | st->power_state.index, | 88 | st->power_state.index, |
| 91 | sizeof(state_val), &state_val); | 89 | sizeof(state_val), &state_val); |
| 92 | if (state && poll_value) | 90 | if (state) |
| 91 | poll_value = hid_sensor_read_poll_value(st); | ||
| 92 | if (poll_value > 0) | ||
| 93 | msleep_interruptible(poll_value * 2); | 93 | msleep_interruptible(poll_value * 2); |
| 94 | 94 | ||
| 95 | return 0; | 95 | return 0; |
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c index 78532ce07449..81b572d7699a 100644 --- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c +++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c | |||
| @@ -193,8 +193,8 @@ int st_lsm6dsx_update_watermark(struct st_lsm6dsx_sensor *sensor, u16 watermark) | |||
| 193 | if (err < 0) | 193 | if (err < 0) |
| 194 | goto out; | 194 | goto out; |
| 195 | 195 | ||
| 196 | fifo_watermark = ((data & ~ST_LSM6DSX_FIFO_TH_MASK) << 8) | | 196 | fifo_watermark = ((data << 8) & ~ST_LSM6DSX_FIFO_TH_MASK) | |
| 197 | (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK); | 197 | (fifo_watermark & ST_LSM6DSX_FIFO_TH_MASK); |
| 198 | 198 | ||
| 199 | wdata = cpu_to_le16(fifo_watermark); | 199 | wdata = cpu_to_le16(fifo_watermark); |
| 200 | err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR, | 200 | err = hw->tf->write(hw->dev, ST_LSM6DSX_REG_FIFO_THL_ADDR, |
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c index 6dd8cbd7ce95..e13370dc9b1c 100644 --- a/drivers/iio/magnetometer/ak8974.c +++ b/drivers/iio/magnetometer/ak8974.c | |||
| @@ -763,7 +763,7 @@ power_off: | |||
| 763 | return ret; | 763 | return ret; |
| 764 | } | 764 | } |
| 765 | 765 | ||
| 766 | static int __exit ak8974_remove(struct i2c_client *i2c) | 766 | static int ak8974_remove(struct i2c_client *i2c) |
| 767 | { | 767 | { |
| 768 | struct iio_dev *indio_dev = i2c_get_clientdata(i2c); | 768 | struct iio_dev *indio_dev = i2c_get_clientdata(i2c); |
| 769 | struct ak8974 *ak8974 = iio_priv(indio_dev); | 769 | struct ak8974 *ak8974 = iio_priv(indio_dev); |
| @@ -845,7 +845,7 @@ static struct i2c_driver ak8974_driver = { | |||
| 845 | .of_match_table = of_match_ptr(ak8974_of_match), | 845 | .of_match_table = of_match_ptr(ak8974_of_match), |
| 846 | }, | 846 | }, |
| 847 | .probe = ak8974_probe, | 847 | .probe = ak8974_probe, |
| 848 | .remove = __exit_p(ak8974_remove), | 848 | .remove = ak8974_remove, |
| 849 | .id_table = ak8974_id, | 849 | .id_table = ak8974_id, |
| 850 | }; | 850 | }; |
| 851 | module_i2c_driver(ak8974_driver); | 851 | module_i2c_driver(ak8974_driver); |
diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c index e95510117a6d..f2ae75fa3128 100644 --- a/drivers/infiniband/core/cq.c +++ b/drivers/infiniband/core/cq.c | |||
| @@ -29,7 +29,13 @@ static int __ib_process_cq(struct ib_cq *cq, int budget) | |||
| 29 | { | 29 | { |
| 30 | int i, n, completed = 0; | 30 | int i, n, completed = 0; |
| 31 | 31 | ||
| 32 | while ((n = ib_poll_cq(cq, IB_POLL_BATCH, cq->wc)) > 0) { | 32 | /* |
| 33 | * budget might be (-1) if the caller does not | ||
| 34 | * want to bound this call, thus we need unsigned | ||
| 35 | * minimum here. | ||
| 36 | */ | ||
| 37 | while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH, | ||
| 38 | budget - completed), cq->wc)) > 0) { | ||
| 33 | for (i = 0; i < n; i++) { | 39 | for (i = 0; i < n; i++) { |
| 34 | struct ib_wc *wc = &cq->wc[i]; | 40 | struct ib_wc *wc = &cq->wc[i]; |
| 35 | 41 | ||
| @@ -196,7 +202,7 @@ void ib_free_cq(struct ib_cq *cq) | |||
| 196 | irq_poll_disable(&cq->iop); | 202 | irq_poll_disable(&cq->iop); |
| 197 | break; | 203 | break; |
| 198 | case IB_POLL_WORKQUEUE: | 204 | case IB_POLL_WORKQUEUE: |
| 199 | flush_work(&cq->work); | 205 | cancel_work_sync(&cq->work); |
| 200 | break; | 206 | break; |
| 201 | default: | 207 | default: |
| 202 | WARN_ON_ONCE(1); | 208 | WARN_ON_ONCE(1); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 593d2ce6ec7c..7c9e34d679d3 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
| @@ -336,12 +336,26 @@ int ib_register_device(struct ib_device *device, | |||
| 336 | struct device *parent = device->dev.parent; | 336 | struct device *parent = device->dev.parent; |
| 337 | 337 | ||
| 338 | WARN_ON_ONCE(!parent); | 338 | WARN_ON_ONCE(!parent); |
| 339 | if (!device->dev.dma_ops) | 339 | WARN_ON_ONCE(device->dma_device); |
| 340 | device->dev.dma_ops = parent->dma_ops; | 340 | if (device->dev.dma_ops) { |
| 341 | if (!device->dev.dma_mask) | 341 | /* |
| 342 | device->dev.dma_mask = parent->dma_mask; | 342 | * The caller provided custom DMA operations. Copy the |
| 343 | if (!device->dev.coherent_dma_mask) | 343 | * DMA-related fields that are used by e.g. dma_alloc_coherent() |
| 344 | device->dev.coherent_dma_mask = parent->coherent_dma_mask; | 344 | * into device->dev. |
| 345 | */ | ||
| 346 | device->dma_device = &device->dev; | ||
| 347 | if (!device->dev.dma_mask) | ||
| 348 | device->dev.dma_mask = parent->dma_mask; | ||
| 349 | if (!device->dev.coherent_dma_mask) | ||
| 350 | device->dev.coherent_dma_mask = | ||
| 351 | parent->coherent_dma_mask; | ||
| 352 | } else { | ||
| 353 | /* | ||
| 354 | * The caller did not provide custom DMA operations. Use the | ||
| 355 | * DMA mapping operations of the parent device. | ||
| 356 | */ | ||
| 357 | device->dma_device = parent; | ||
| 358 | } | ||
| 345 | 359 | ||
| 346 | mutex_lock(&device_mutex); | 360 | mutex_lock(&device_mutex); |
| 347 | 361 | ||
| @@ -1015,8 +1029,7 @@ static int __init ib_core_init(void) | |||
| 1015 | return -ENOMEM; | 1029 | return -ENOMEM; |
| 1016 | 1030 | ||
| 1017 | ib_comp_wq = alloc_workqueue("ib-comp-wq", | 1031 | ib_comp_wq = alloc_workqueue("ib-comp-wq", |
| 1018 | WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, | 1032 | WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0); |
| 1019 | WQ_UNBOUND_MAX_ACTIVE); | ||
| 1020 | if (!ib_comp_wq) { | 1033 | if (!ib_comp_wq) { |
| 1021 | ret = -ENOMEM; | 1034 | ret = -ENOMEM; |
| 1022 | goto err; | 1035 | goto err; |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 0f5d43d1f5fc..70c3e9e79508 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c | |||
| @@ -160,6 +160,9 @@ int i40iw_inetaddr_event(struct notifier_block *notifier, | |||
| 160 | return NOTIFY_DONE; | 160 | return NOTIFY_DONE; |
| 161 | 161 | ||
| 162 | iwdev = &hdl->device; | 162 | iwdev = &hdl->device; |
| 163 | if (iwdev->init_state < INET_NOTIFIER) | ||
| 164 | return NOTIFY_DONE; | ||
| 165 | |||
| 163 | netdev = iwdev->ldev->netdev; | 166 | netdev = iwdev->ldev->netdev; |
| 164 | upper_dev = netdev_master_upper_dev_get(netdev); | 167 | upper_dev = netdev_master_upper_dev_get(netdev); |
| 165 | if (netdev != event_netdev) | 168 | if (netdev != event_netdev) |
| @@ -214,6 +217,9 @@ int i40iw_inet6addr_event(struct notifier_block *notifier, | |||
| 214 | return NOTIFY_DONE; | 217 | return NOTIFY_DONE; |
| 215 | 218 | ||
| 216 | iwdev = &hdl->device; | 219 | iwdev = &hdl->device; |
| 220 | if (iwdev->init_state < INET_NOTIFIER) | ||
| 221 | return NOTIFY_DONE; | ||
| 222 | |||
| 217 | netdev = iwdev->ldev->netdev; | 223 | netdev = iwdev->ldev->netdev; |
| 218 | if (netdev != event_netdev) | 224 | if (netdev != event_netdev) |
| 219 | return NOTIFY_DONE; | 225 | return NOTIFY_DONE; |
| @@ -260,6 +266,8 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void * | |||
| 260 | if (!iwhdl) | 266 | if (!iwhdl) |
| 261 | return NOTIFY_DONE; | 267 | return NOTIFY_DONE; |
| 262 | iwdev = &iwhdl->device; | 268 | iwdev = &iwhdl->device; |
| 269 | if (iwdev->init_state < INET_NOTIFIER) | ||
| 270 | return NOTIFY_DONE; | ||
| 263 | p = (__be32 *)neigh->primary_key; | 271 | p = (__be32 *)neigh->primary_key; |
| 264 | i40iw_copy_ip_ntohl(local_ipaddr, p); | 272 | i40iw_copy_ip_ntohl(local_ipaddr, p); |
| 265 | if (neigh->nud_state & NUD_VALID) { | 273 | if (neigh->nud_state & NUD_VALID) { |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index bc9fb144e57b..c52edeafd616 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
| @@ -372,7 +372,7 @@ static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id, | |||
| 372 | return 0; | 372 | return 0; |
| 373 | } | 373 | } |
| 374 | 374 | ||
| 375 | static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, | 375 | static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, |
| 376 | bool dpp_pool) | 376 | bool dpp_pool) |
| 377 | { | 377 | { |
| 378 | int status; | 378 | int status; |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 12c4208fd701..af9f596bb68b 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
| @@ -7068,7 +7068,7 @@ static void qib_7322_txchk_change(struct qib_devdata *dd, u32 start, | |||
| 7068 | unsigned long flags; | 7068 | unsigned long flags; |
| 7069 | 7069 | ||
| 7070 | while (wait) { | 7070 | while (wait) { |
| 7071 | unsigned long shadow; | 7071 | unsigned long shadow = 0; |
| 7072 | int cstart, previ = -1; | 7072 | int cstart, previ = -1; |
| 7073 | 7073 | ||
| 7074 | /* | 7074 | /* |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index 3cd96c1b9502..9fbe22d3467b 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h | |||
| @@ -69,6 +69,9 @@ | |||
| 69 | */ | 69 | */ |
| 70 | #define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 | 70 | #define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 |
| 71 | 71 | ||
| 72 | #define PVRDMA_NUM_RING_PAGES 4 | ||
| 73 | #define PVRDMA_QP_NUM_HEADER_PAGES 1 | ||
| 74 | |||
| 72 | struct pvrdma_dev; | 75 | struct pvrdma_dev; |
| 73 | 76 | ||
| 74 | struct pvrdma_page_dir { | 77 | struct pvrdma_page_dir { |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h index e69d6f3cae32..09078ccfaec7 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_dev_api.h | |||
| @@ -132,7 +132,7 @@ enum pvrdma_pci_resource { | |||
| 132 | 132 | ||
| 133 | enum pvrdma_device_ctl { | 133 | enum pvrdma_device_ctl { |
| 134 | PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */ | 134 | PVRDMA_DEVICE_CTL_ACTIVATE, /* Activate device. */ |
| 135 | PVRDMA_DEVICE_CTL_QUIESCE, /* Quiesce device. */ | 135 | PVRDMA_DEVICE_CTL_UNQUIESCE, /* Unquiesce device. */ |
| 136 | PVRDMA_DEVICE_CTL_RESET, /* Reset device. */ | 136 | PVRDMA_DEVICE_CTL_RESET, /* Reset device. */ |
| 137 | }; | 137 | }; |
| 138 | 138 | ||
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c index 100bea5c42ff..34ebc7615411 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c | |||
| @@ -56,7 +56,7 @@ | |||
| 56 | #include "pvrdma.h" | 56 | #include "pvrdma.h" |
| 57 | 57 | ||
| 58 | #define DRV_NAME "vmw_pvrdma" | 58 | #define DRV_NAME "vmw_pvrdma" |
| 59 | #define DRV_VERSION "1.0.0.0-k" | 59 | #define DRV_VERSION "1.0.1.0-k" |
| 60 | 60 | ||
| 61 | static DEFINE_MUTEX(pvrdma_device_list_lock); | 61 | static DEFINE_MUTEX(pvrdma_device_list_lock); |
| 62 | static LIST_HEAD(pvrdma_device_list); | 62 | static LIST_HEAD(pvrdma_device_list); |
| @@ -660,7 +660,16 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, | |||
| 660 | pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); | 660 | pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR); |
| 661 | break; | 661 | break; |
| 662 | case NETDEV_UP: | 662 | case NETDEV_UP: |
| 663 | pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); | 663 | pvrdma_write_reg(dev, PVRDMA_REG_CTL, |
| 664 | PVRDMA_DEVICE_CTL_UNQUIESCE); | ||
| 665 | |||
| 666 | mb(); | ||
| 667 | |||
| 668 | if (pvrdma_read_reg(dev, PVRDMA_REG_ERR)) | ||
| 669 | dev_err(&dev->pdev->dev, | ||
| 670 | "failed to activate device during link up\n"); | ||
| 671 | else | ||
| 672 | pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); | ||
| 664 | break; | 673 | break; |
| 665 | default: | 674 | default: |
| 666 | dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", | 675 | dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n", |
| @@ -858,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, | |||
| 858 | dev->dsr->resp_slot_dma = (u64)slot_dma; | 867 | dev->dsr->resp_slot_dma = (u64)slot_dma; |
| 859 | 868 | ||
| 860 | /* Async event ring */ | 869 | /* Async event ring */ |
| 861 | dev->dsr->async_ring_pages.num_pages = 4; | 870 | dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; |
| 862 | ret = pvrdma_page_dir_init(dev, &dev->async_pdir, | 871 | ret = pvrdma_page_dir_init(dev, &dev->async_pdir, |
| 863 | dev->dsr->async_ring_pages.num_pages, true); | 872 | dev->dsr->async_ring_pages.num_pages, true); |
| 864 | if (ret) | 873 | if (ret) |
| @@ -867,7 +876,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, | |||
| 867 | dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; | 876 | dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; |
| 868 | 877 | ||
| 869 | /* CQ notification ring */ | 878 | /* CQ notification ring */ |
| 870 | dev->dsr->cq_ring_pages.num_pages = 4; | 879 | dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES; |
| 871 | ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, | 880 | ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, |
| 872 | dev->dsr->cq_ring_pages.num_pages, true); | 881 | dev->dsr->cq_ring_pages.num_pages, true); |
| 873 | if (ret) | 882 | if (ret) |
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c index dbbfd35e7da7..30062aad3af1 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c | |||
| @@ -170,8 +170,9 @@ static int pvrdma_set_sq_size(struct pvrdma_dev *dev, struct ib_qp_cap *req_cap, | |||
| 170 | sizeof(struct pvrdma_sge) * | 170 | sizeof(struct pvrdma_sge) * |
| 171 | qp->sq.max_sg); | 171 | qp->sq.max_sg); |
| 172 | /* Note: one extra page for the header. */ | 172 | /* Note: one extra page for the header. */ |
| 173 | qp->npages_send = 1 + (qp->sq.wqe_cnt * qp->sq.wqe_size + | 173 | qp->npages_send = PVRDMA_QP_NUM_HEADER_PAGES + |
| 174 | PAGE_SIZE - 1) / PAGE_SIZE; | 174 | (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / |
| 175 | PAGE_SIZE; | ||
| 175 | 176 | ||
| 176 | return 0; | 177 | return 0; |
| 177 | } | 178 | } |
| @@ -288,7 +289,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |||
| 288 | qp->npages = qp->npages_send + qp->npages_recv; | 289 | qp->npages = qp->npages_send + qp->npages_recv; |
| 289 | 290 | ||
| 290 | /* Skip header page. */ | 291 | /* Skip header page. */ |
| 291 | qp->sq.offset = PAGE_SIZE; | 292 | qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE; |
| 292 | 293 | ||
| 293 | /* Recv queue pages are after send pages. */ | 294 | /* Recv queue pages are after send pages. */ |
| 294 | qp->rq.offset = qp->npages_send * PAGE_SIZE; | 295 | qp->rq.offset = qp->npages_send * PAGE_SIZE; |
| @@ -341,7 +342,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd, | |||
| 341 | cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); | 342 | cmd->qp_type = ib_qp_type_to_pvrdma(init_attr->qp_type); |
| 342 | cmd->access_flags = IB_ACCESS_LOCAL_WRITE; | 343 | cmd->access_flags = IB_ACCESS_LOCAL_WRITE; |
| 343 | cmd->total_chunks = qp->npages; | 344 | cmd->total_chunks = qp->npages; |
| 344 | cmd->send_chunks = qp->npages_send - 1; | 345 | cmd->send_chunks = qp->npages_send - PVRDMA_QP_NUM_HEADER_PAGES; |
| 345 | cmd->pdir_dma = qp->pdir.dir_dma; | 346 | cmd->pdir_dma = qp->pdir.dir_dma; |
| 346 | 347 | ||
| 347 | dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", | 348 | dev_dbg(&dev->pdev->dev, "create queuepair with %d, %d, %d, %d\n", |
| @@ -554,13 +555,13 @@ out: | |||
| 554 | return ret; | 555 | return ret; |
| 555 | } | 556 | } |
| 556 | 557 | ||
| 557 | static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n) | 558 | static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n) |
| 558 | { | 559 | { |
| 559 | return pvrdma_page_dir_get_ptr(&qp->pdir, | 560 | return pvrdma_page_dir_get_ptr(&qp->pdir, |
| 560 | qp->sq.offset + n * qp->sq.wqe_size); | 561 | qp->sq.offset + n * qp->sq.wqe_size); |
| 561 | } | 562 | } |
| 562 | 563 | ||
| 563 | static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n) | 564 | static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n) |
| 564 | { | 565 | { |
| 565 | return pvrdma_page_dir_get_ptr(&qp->pdir, | 566 | return pvrdma_page_dir_get_ptr(&qp->pdir, |
| 566 | qp->rq.offset + n * qp->rq.wqe_size); | 567 | qp->rq.offset + n * qp->rq.wqe_size); |
| @@ -598,9 +599,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 598 | unsigned long flags; | 599 | unsigned long flags; |
| 599 | struct pvrdma_sq_wqe_hdr *wqe_hdr; | 600 | struct pvrdma_sq_wqe_hdr *wqe_hdr; |
| 600 | struct pvrdma_sge *sge; | 601 | struct pvrdma_sge *sge; |
| 601 | int i, index; | 602 | int i, ret; |
| 602 | int nreq; | ||
| 603 | int ret; | ||
| 604 | 603 | ||
| 605 | /* | 604 | /* |
| 606 | * In states lower than RTS, we can fail immediately. In other states, | 605 | * In states lower than RTS, we can fail immediately. In other states, |
| @@ -613,9 +612,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 613 | 612 | ||
| 614 | spin_lock_irqsave(&qp->sq.lock, flags); | 613 | spin_lock_irqsave(&qp->sq.lock, flags); |
| 615 | 614 | ||
| 616 | index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt); | 615 | while (wr) { |
| 617 | for (nreq = 0; wr; nreq++, wr = wr->next) { | 616 | unsigned int tail = 0; |
| 618 | unsigned int tail; | ||
| 619 | 617 | ||
| 620 | if (unlikely(!pvrdma_idx_ring_has_space( | 618 | if (unlikely(!pvrdma_idx_ring_has_space( |
| 621 | qp->sq.ring, qp->sq.wqe_cnt, &tail))) { | 619 | qp->sq.ring, qp->sq.wqe_cnt, &tail))) { |
| @@ -680,7 +678,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 680 | } | 678 | } |
| 681 | } | 679 | } |
| 682 | 680 | ||
| 683 | wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index); | 681 | wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail); |
| 684 | memset(wqe_hdr, 0, sizeof(*wqe_hdr)); | 682 | memset(wqe_hdr, 0, sizeof(*wqe_hdr)); |
| 685 | wqe_hdr->wr_id = wr->wr_id; | 683 | wqe_hdr->wr_id = wr->wr_id; |
| 686 | wqe_hdr->num_sge = wr->num_sge; | 684 | wqe_hdr->num_sge = wr->num_sge; |
| @@ -771,12 +769,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 771 | /* Make sure wqe is written before index update */ | 769 | /* Make sure wqe is written before index update */ |
| 772 | smp_wmb(); | 770 | smp_wmb(); |
| 773 | 771 | ||
| 774 | index++; | ||
| 775 | if (unlikely(index >= qp->sq.wqe_cnt)) | ||
| 776 | index = 0; | ||
| 777 | /* Update shared sq ring */ | 772 | /* Update shared sq ring */ |
| 778 | pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, | 773 | pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, |
| 779 | qp->sq.wqe_cnt); | 774 | qp->sq.wqe_cnt); |
| 775 | |||
| 776 | wr = wr->next; | ||
| 780 | } | 777 | } |
| 781 | 778 | ||
| 782 | ret = 0; | 779 | ret = 0; |
| @@ -806,7 +803,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
| 806 | struct pvrdma_qp *qp = to_vqp(ibqp); | 803 | struct pvrdma_qp *qp = to_vqp(ibqp); |
| 807 | struct pvrdma_rq_wqe_hdr *wqe_hdr; | 804 | struct pvrdma_rq_wqe_hdr *wqe_hdr; |
| 808 | struct pvrdma_sge *sge; | 805 | struct pvrdma_sge *sge; |
| 809 | int index, nreq; | ||
| 810 | int ret = 0; | 806 | int ret = 0; |
| 811 | int i; | 807 | int i; |
| 812 | 808 | ||
| @@ -821,9 +817,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
| 821 | 817 | ||
| 822 | spin_lock_irqsave(&qp->rq.lock, flags); | 818 | spin_lock_irqsave(&qp->rq.lock, flags); |
| 823 | 819 | ||
| 824 | index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt); | 820 | while (wr) { |
| 825 | for (nreq = 0; wr; nreq++, wr = wr->next) { | 821 | unsigned int tail = 0; |
| 826 | unsigned int tail; | ||
| 827 | 822 | ||
| 828 | if (unlikely(wr->num_sge > qp->rq.max_sg || | 823 | if (unlikely(wr->num_sge > qp->rq.max_sg || |
| 829 | wr->num_sge < 0)) { | 824 | wr->num_sge < 0)) { |
| @@ -843,7 +838,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
| 843 | goto out; | 838 | goto out; |
| 844 | } | 839 | } |
| 845 | 840 | ||
| 846 | wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index); | 841 | wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail); |
| 847 | wqe_hdr->wr_id = wr->wr_id; | 842 | wqe_hdr->wr_id = wr->wr_id; |
| 848 | wqe_hdr->num_sge = wr->num_sge; | 843 | wqe_hdr->num_sge = wr->num_sge; |
| 849 | wqe_hdr->total_len = 0; | 844 | wqe_hdr->total_len = 0; |
| @@ -859,12 +854,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
| 859 | /* Make sure wqe is written before index update */ | 854 | /* Make sure wqe is written before index update */ |
| 860 | smp_wmb(); | 855 | smp_wmb(); |
| 861 | 856 | ||
| 862 | index++; | ||
| 863 | if (unlikely(index >= qp->rq.wqe_cnt)) | ||
| 864 | index = 0; | ||
| 865 | /* Update shared rq ring */ | 857 | /* Update shared rq ring */ |
| 866 | pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, | 858 | pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, |
| 867 | qp->rq.wqe_cnt); | 859 | qp->rq.wqe_cnt); |
| 860 | |||
| 861 | wr = wr->next; | ||
| 868 | } | 862 | } |
| 869 | 863 | ||
| 870 | spin_unlock_irqrestore(&qp->rq.lock, flags); | 864 | spin_unlock_irqrestore(&qp->rq.lock, flags); |
diff --git a/drivers/infiniband/sw/rdmavt/mmap.c b/drivers/infiniband/sw/rdmavt/mmap.c index e202b8142759..6b712eecbd37 100644 --- a/drivers/infiniband/sw/rdmavt/mmap.c +++ b/drivers/infiniband/sw/rdmavt/mmap.c | |||
| @@ -170,9 +170,9 @@ struct rvt_mmap_info *rvt_create_mmap_info(struct rvt_dev_info *rdi, | |||
| 170 | 170 | ||
| 171 | spin_lock_irq(&rdi->mmap_offset_lock); | 171 | spin_lock_irq(&rdi->mmap_offset_lock); |
| 172 | if (rdi->mmap_offset == 0) | 172 | if (rdi->mmap_offset == 0) |
| 173 | rdi->mmap_offset = PAGE_SIZE; | 173 | rdi->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); |
| 174 | ip->offset = rdi->mmap_offset; | 174 | ip->offset = rdi->mmap_offset; |
| 175 | rdi->mmap_offset += size; | 175 | rdi->mmap_offset += ALIGN(size, SHMLBA); |
| 176 | spin_unlock_irq(&rdi->mmap_offset_lock); | 176 | spin_unlock_irq(&rdi->mmap_offset_lock); |
| 177 | 177 | ||
| 178 | INIT_LIST_HEAD(&ip->pending_mmaps); | 178 | INIT_LIST_HEAD(&ip->pending_mmaps); |
diff --git a/drivers/infiniband/sw/rxe/Kconfig b/drivers/infiniband/sw/rxe/Kconfig index 7d1ac27ed251..6332dedc11e8 100644 --- a/drivers/infiniband/sw/rxe/Kconfig +++ b/drivers/infiniband/sw/rxe/Kconfig | |||
| @@ -22,4 +22,4 @@ config RDMA_RXE | |||
| 22 | To configure and work with soft-RoCE driver please use the | 22 | To configure and work with soft-RoCE driver please use the |
| 23 | following wiki page under "configure Soft-RoCE (RXE)" section: | 23 | following wiki page under "configure Soft-RoCE (RXE)" section: |
| 24 | 24 | ||
| 25 | https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home | 25 | https://github.com/linux-rdma/rdma-core/blob/master/Documentation/rxe.md |
diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c index c572a4c09359..bd812e00988e 100644 --- a/drivers/infiniband/sw/rxe/rxe_mmap.c +++ b/drivers/infiniband/sw/rxe/rxe_mmap.c | |||
| @@ -156,10 +156,10 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, | |||
| 156 | spin_lock_bh(&rxe->mmap_offset_lock); | 156 | spin_lock_bh(&rxe->mmap_offset_lock); |
| 157 | 157 | ||
| 158 | if (rxe->mmap_offset == 0) | 158 | if (rxe->mmap_offset == 0) |
| 159 | rxe->mmap_offset = PAGE_SIZE; | 159 | rxe->mmap_offset = ALIGN(PAGE_SIZE, SHMLBA); |
| 160 | 160 | ||
| 161 | ip->info.offset = rxe->mmap_offset; | 161 | ip->info.offset = rxe->mmap_offset; |
| 162 | rxe->mmap_offset += size; | 162 | rxe->mmap_offset += ALIGN(size, SHMLBA); |
| 163 | 163 | ||
| 164 | spin_unlock_bh(&rxe->mmap_offset_lock); | 164 | spin_unlock_bh(&rxe->mmap_offset_lock); |
| 165 | 165 | ||
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index dbfde0dc6ff7..9f95f50b2909 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c | |||
| @@ -729,11 +729,11 @@ next_wqe: | |||
| 729 | ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); | 729 | ret = rxe_xmit_packet(to_rdev(qp->ibqp.device), qp, &pkt, skb); |
| 730 | if (ret) { | 730 | if (ret) { |
| 731 | qp->need_req_skb = 1; | 731 | qp->need_req_skb = 1; |
| 732 | kfree_skb(skb); | ||
| 733 | 732 | ||
| 734 | rollback_state(wqe, qp, &rollback_wqe, rollback_psn); | 733 | rollback_state(wqe, qp, &rollback_wqe, rollback_psn); |
| 735 | 734 | ||
| 736 | if (ret == -EAGAIN) { | 735 | if (ret == -EAGAIN) { |
| 736 | kfree_skb(skb); | ||
| 737 | rxe_run_task(&qp->req.task, 1); | 737 | rxe_run_task(&qp->req.task, 1); |
| 738 | goto exit; | 738 | goto exit; |
| 739 | } | 739 | } |
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index d404a8aba7af..c9dd385ce62e 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c | |||
| @@ -813,18 +813,17 @@ static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) | |||
| 813 | WARN_ON_ONCE(1); | 813 | WARN_ON_ONCE(1); |
| 814 | } | 814 | } |
| 815 | 815 | ||
| 816 | /* We successfully processed this new request. */ | ||
| 817 | qp->resp.msn++; | ||
| 818 | |||
| 819 | /* next expected psn, read handles this separately */ | 816 | /* next expected psn, read handles this separately */ |
| 820 | qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; | 817 | qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK; |
| 821 | 818 | ||
| 822 | qp->resp.opcode = pkt->opcode; | 819 | qp->resp.opcode = pkt->opcode; |
| 823 | qp->resp.status = IB_WC_SUCCESS; | 820 | qp->resp.status = IB_WC_SUCCESS; |
| 824 | 821 | ||
| 825 | if (pkt->mask & RXE_COMP_MASK) | 822 | if (pkt->mask & RXE_COMP_MASK) { |
| 823 | /* We successfully processed this new request. */ | ||
| 824 | qp->resp.msn++; | ||
| 826 | return RESPST_COMPLETE; | 825 | return RESPST_COMPLETE; |
| 827 | else if (qp_type(qp) == IB_QPT_RC) | 826 | } else if (qp_type(qp) == IB_QPT_RC) |
| 828 | return RESPST_ACKNOWLEDGE; | 827 | return RESPST_ACKNOWLEDGE; |
| 829 | else | 828 | else |
| 830 | return RESPST_CLEANUP; | 829 | return RESPST_CLEANUP; |
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 9d0b22ad58c1..c1ae4aeae2f9 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h | |||
| @@ -430,6 +430,7 @@ struct iser_fr_desc { | |||
| 430 | struct list_head list; | 430 | struct list_head list; |
| 431 | struct iser_reg_resources rsc; | 431 | struct iser_reg_resources rsc; |
| 432 | struct iser_pi_context *pi_ctx; | 432 | struct iser_pi_context *pi_ctx; |
| 433 | struct list_head all_list; | ||
| 433 | }; | 434 | }; |
| 434 | 435 | ||
| 435 | /** | 436 | /** |
| @@ -443,6 +444,7 @@ struct iser_fr_pool { | |||
| 443 | struct list_head list; | 444 | struct list_head list; |
| 444 | spinlock_t lock; | 445 | spinlock_t lock; |
| 445 | int size; | 446 | int size; |
| 447 | struct list_head all_list; | ||
| 446 | }; | 448 | }; |
| 447 | 449 | ||
| 448 | /** | 450 | /** |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 30b622f2ab73..c538a38c91ce 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
| @@ -362,6 +362,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, | |||
| 362 | int i, ret; | 362 | int i, ret; |
| 363 | 363 | ||
| 364 | INIT_LIST_HEAD(&fr_pool->list); | 364 | INIT_LIST_HEAD(&fr_pool->list); |
| 365 | INIT_LIST_HEAD(&fr_pool->all_list); | ||
| 365 | spin_lock_init(&fr_pool->lock); | 366 | spin_lock_init(&fr_pool->lock); |
| 366 | fr_pool->size = 0; | 367 | fr_pool->size = 0; |
| 367 | for (i = 0; i < cmds_max; i++) { | 368 | for (i = 0; i < cmds_max; i++) { |
| @@ -373,6 +374,7 @@ int iser_alloc_fastreg_pool(struct ib_conn *ib_conn, | |||
| 373 | } | 374 | } |
| 374 | 375 | ||
| 375 | list_add_tail(&desc->list, &fr_pool->list); | 376 | list_add_tail(&desc->list, &fr_pool->list); |
| 377 | list_add_tail(&desc->all_list, &fr_pool->all_list); | ||
| 376 | fr_pool->size++; | 378 | fr_pool->size++; |
| 377 | } | 379 | } |
| 378 | 380 | ||
| @@ -392,13 +394,13 @@ void iser_free_fastreg_pool(struct ib_conn *ib_conn) | |||
| 392 | struct iser_fr_desc *desc, *tmp; | 394 | struct iser_fr_desc *desc, *tmp; |
| 393 | int i = 0; | 395 | int i = 0; |
| 394 | 396 | ||
| 395 | if (list_empty(&fr_pool->list)) | 397 | if (list_empty(&fr_pool->all_list)) |
| 396 | return; | 398 | return; |
| 397 | 399 | ||
| 398 | iser_info("freeing conn %p fr pool\n", ib_conn); | 400 | iser_info("freeing conn %p fr pool\n", ib_conn); |
| 399 | 401 | ||
| 400 | list_for_each_entry_safe(desc, tmp, &fr_pool->list, list) { | 402 | list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) { |
| 401 | list_del(&desc->list); | 403 | list_del(&desc->all_list); |
| 402 | iser_free_reg_res(&desc->rsc); | 404 | iser_free_reg_res(&desc->rsc); |
| 403 | if (desc->pi_ctx) | 405 | if (desc->pi_ctx) |
| 404 | iser_free_pi_ctx(desc->pi_ctx); | 406 | iser_free_pi_ctx(desc->pi_ctx); |
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c index d96aa27dfcdc..db64adfbe1af 100644 --- a/drivers/input/joystick/iforce/iforce-usb.c +++ b/drivers/input/joystick/iforce/iforce-usb.c | |||
| @@ -141,6 +141,9 @@ static int iforce_usb_probe(struct usb_interface *intf, | |||
| 141 | 141 | ||
| 142 | interface = intf->cur_altsetting; | 142 | interface = intf->cur_altsetting; |
| 143 | 143 | ||
| 144 | if (interface->desc.bNumEndpoints < 2) | ||
| 145 | return -ENODEV; | ||
| 146 | |||
| 144 | epirq = &interface->endpoint[0].desc; | 147 | epirq = &interface->endpoint[0].desc; |
| 145 | epout = &interface->endpoint[1].desc; | 148 | epout = &interface->endpoint[1].desc; |
| 146 | 149 | ||
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index 9cc6d057c302..23c191a2a071 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c | |||
| @@ -700,6 +700,10 @@ static int cm109_usb_probe(struct usb_interface *intf, | |||
| 700 | int error = -ENOMEM; | 700 | int error = -ENOMEM; |
| 701 | 701 | ||
| 702 | interface = intf->cur_altsetting; | 702 | interface = intf->cur_altsetting; |
| 703 | |||
| 704 | if (interface->desc.bNumEndpoints < 1) | ||
| 705 | return -ENODEV; | ||
| 706 | |||
| 703 | endpoint = &interface->endpoint[0].desc; | 707 | endpoint = &interface->endpoint[0].desc; |
| 704 | 708 | ||
| 705 | if (!usb_endpoint_is_int_in(endpoint)) | 709 | if (!usb_endpoint_is_int_in(endpoint)) |
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 9c0ea36913b4..f4e8fbec6a94 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c | |||
| @@ -1667,6 +1667,10 @@ static int ims_pcu_parse_cdc_data(struct usb_interface *intf, struct ims_pcu *pc | |||
| 1667 | return -EINVAL; | 1667 | return -EINVAL; |
| 1668 | 1668 | ||
| 1669 | alt = pcu->ctrl_intf->cur_altsetting; | 1669 | alt = pcu->ctrl_intf->cur_altsetting; |
| 1670 | |||
| 1671 | if (alt->desc.bNumEndpoints < 1) | ||
| 1672 | return -ENODEV; | ||
| 1673 | |||
| 1670 | pcu->ep_ctrl = &alt->endpoint[0].desc; | 1674 | pcu->ep_ctrl = &alt->endpoint[0].desc; |
| 1671 | pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); | 1675 | pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl); |
| 1672 | 1676 | ||
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c index 79c964c075f1..6e7ff9561d92 100644 --- a/drivers/input/misc/yealink.c +++ b/drivers/input/misc/yealink.c | |||
| @@ -875,6 +875,10 @@ static int usb_probe(struct usb_interface *intf, const struct usb_device_id *id) | |||
| 875 | int ret, pipe, i; | 875 | int ret, pipe, i; |
| 876 | 876 | ||
| 877 | interface = intf->cur_altsetting; | 877 | interface = intf->cur_altsetting; |
| 878 | |||
| 879 | if (interface->desc.bNumEndpoints < 1) | ||
| 880 | return -ENODEV; | ||
| 881 | |||
| 878 | endpoint = &interface->endpoint[0].desc; | 882 | endpoint = &interface->endpoint[0].desc; |
| 879 | if (!usb_endpoint_is_int_in(endpoint)) | 883 | if (!usb_endpoint_is_int_in(endpoint)) |
| 880 | return -ENODEV; | 884 | return -ENODEV; |
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 72b28ebfe360..f210e19ddba6 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
| @@ -1282,10 +1282,8 @@ static int alps_decode_ss4_v2(struct alps_fields *f, | |||
| 1282 | /* handle buttons */ | 1282 | /* handle buttons */ |
| 1283 | if (pkt_id == SS4_PACKET_ID_STICK) { | 1283 | if (pkt_id == SS4_PACKET_ID_STICK) { |
| 1284 | f->ts_left = !!(SS4_BTN_V2(p) & 0x01); | 1284 | f->ts_left = !!(SS4_BTN_V2(p) & 0x01); |
| 1285 | if (!(priv->flags & ALPS_BUTTONPAD)) { | 1285 | f->ts_right = !!(SS4_BTN_V2(p) & 0x02); |
| 1286 | f->ts_right = !!(SS4_BTN_V2(p) & 0x02); | 1286 | f->ts_middle = !!(SS4_BTN_V2(p) & 0x04); |
| 1287 | f->ts_middle = !!(SS4_BTN_V2(p) & 0x04); | ||
| 1288 | } | ||
| 1289 | } else { | 1287 | } else { |
| 1290 | f->left = !!(SS4_BTN_V2(p) & 0x01); | 1288 | f->left = !!(SS4_BTN_V2(p) & 0x01); |
| 1291 | if (!(priv->flags & ALPS_BUTTONPAD)) { | 1289 | if (!(priv->flags & ALPS_BUTTONPAD)) { |
| @@ -2462,14 +2460,34 @@ static int alps_update_device_area_ss4_v2(unsigned char otp[][4], | |||
| 2462 | int num_y_electrode; | 2460 | int num_y_electrode; |
| 2463 | int x_pitch, y_pitch, x_phys, y_phys; | 2461 | int x_pitch, y_pitch, x_phys, y_phys; |
| 2464 | 2462 | ||
| 2465 | num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F); | 2463 | if (IS_SS4PLUS_DEV(priv->dev_id)) { |
| 2466 | num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F); | 2464 | num_x_electrode = |
| 2465 | SS4PLUS_NUMSENSOR_XOFFSET + (otp[0][2] & 0x0F); | ||
| 2466 | num_y_electrode = | ||
| 2467 | SS4PLUS_NUMSENSOR_YOFFSET + ((otp[0][2] >> 4) & 0x0F); | ||
| 2468 | |||
| 2469 | priv->x_max = | ||
| 2470 | (num_x_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE; | ||
| 2471 | priv->y_max = | ||
| 2472 | (num_y_electrode - 1) * SS4PLUS_COUNT_PER_ELECTRODE; | ||
| 2467 | 2473 | ||
| 2468 | priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE; | 2474 | x_pitch = (otp[0][1] & 0x0F) + SS4PLUS_MIN_PITCH_MM; |
| 2469 | priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE; | 2475 | y_pitch = ((otp[0][1] >> 4) & 0x0F) + SS4PLUS_MIN_PITCH_MM; |
| 2470 | 2476 | ||
| 2471 | x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM; | 2477 | } else { |
| 2472 | y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM; | 2478 | num_x_electrode = |
| 2479 | SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F); | ||
| 2480 | num_y_electrode = | ||
| 2481 | SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F); | ||
| 2482 | |||
| 2483 | priv->x_max = | ||
| 2484 | (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE; | ||
| 2485 | priv->y_max = | ||
| 2486 | (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE; | ||
| 2487 | |||
| 2488 | x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM; | ||
| 2489 | y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM; | ||
| 2490 | } | ||
| 2473 | 2491 | ||
| 2474 | x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */ | 2492 | x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */ |
| 2475 | y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */ | 2493 | y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */ |
| @@ -2485,7 +2503,10 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4], | |||
| 2485 | { | 2503 | { |
| 2486 | unsigned char is_btnless; | 2504 | unsigned char is_btnless; |
| 2487 | 2505 | ||
| 2488 | is_btnless = (otp[1][1] >> 3) & 0x01; | 2506 | if (IS_SS4PLUS_DEV(priv->dev_id)) |
| 2507 | is_btnless = (otp[1][0] >> 1) & 0x01; | ||
| 2508 | else | ||
| 2509 | is_btnless = (otp[1][1] >> 3) & 0x01; | ||
| 2489 | 2510 | ||
| 2490 | if (is_btnless) | 2511 | if (is_btnless) |
| 2491 | priv->flags |= ALPS_BUTTONPAD; | 2512 | priv->flags |= ALPS_BUTTONPAD; |
| @@ -2493,6 +2514,21 @@ static int alps_update_btn_info_ss4_v2(unsigned char otp[][4], | |||
| 2493 | return 0; | 2514 | return 0; |
| 2494 | } | 2515 | } |
| 2495 | 2516 | ||
| 2517 | static int alps_update_dual_info_ss4_v2(unsigned char otp[][4], | ||
| 2518 | struct alps_data *priv) | ||
| 2519 | { | ||
| 2520 | bool is_dual = false; | ||
| 2521 | |||
| 2522 | if (IS_SS4PLUS_DEV(priv->dev_id)) | ||
| 2523 | is_dual = (otp[0][0] >> 4) & 0x01; | ||
| 2524 | |||
| 2525 | if (is_dual) | ||
| 2526 | priv->flags |= ALPS_DUALPOINT | | ||
| 2527 | ALPS_DUALPOINT_WITH_PRESSURE; | ||
| 2528 | |||
| 2529 | return 0; | ||
| 2530 | } | ||
| 2531 | |||
| 2496 | static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, | 2532 | static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, |
| 2497 | struct alps_data *priv) | 2533 | struct alps_data *priv) |
| 2498 | { | 2534 | { |
| @@ -2508,6 +2544,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, | |||
| 2508 | 2544 | ||
| 2509 | alps_update_btn_info_ss4_v2(otp, priv); | 2545 | alps_update_btn_info_ss4_v2(otp, priv); |
| 2510 | 2546 | ||
| 2547 | alps_update_dual_info_ss4_v2(otp, priv); | ||
| 2548 | |||
| 2511 | return 0; | 2549 | return 0; |
| 2512 | } | 2550 | } |
| 2513 | 2551 | ||
| @@ -2753,10 +2791,6 @@ static int alps_set_protocol(struct psmouse *psmouse, | |||
| 2753 | if (alps_set_defaults_ss4_v2(psmouse, priv)) | 2791 | if (alps_set_defaults_ss4_v2(psmouse, priv)) |
| 2754 | return -EIO; | 2792 | return -EIO; |
| 2755 | 2793 | ||
| 2756 | if (priv->fw_ver[1] == 0x1) | ||
| 2757 | priv->flags |= ALPS_DUALPOINT | | ||
| 2758 | ALPS_DUALPOINT_WITH_PRESSURE; | ||
| 2759 | |||
| 2760 | break; | 2794 | break; |
| 2761 | } | 2795 | } |
| 2762 | 2796 | ||
| @@ -2827,10 +2861,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) | |||
| 2827 | ec[2] >= 0x90 && ec[2] <= 0x9d) { | 2861 | ec[2] >= 0x90 && ec[2] <= 0x9d) { |
| 2828 | protocol = &alps_v3_protocol_data; | 2862 | protocol = &alps_v3_protocol_data; |
| 2829 | } else if (e7[0] == 0x73 && e7[1] == 0x03 && | 2863 | } else if (e7[0] == 0x73 && e7[1] == 0x03 && |
| 2830 | e7[2] == 0x14 && ec[1] == 0x02) { | 2864 | (e7[2] == 0x14 || e7[2] == 0x28)) { |
| 2831 | protocol = &alps_v8_protocol_data; | ||
| 2832 | } else if (e7[0] == 0x73 && e7[1] == 0x03 && | ||
| 2833 | e7[2] == 0x28 && ec[1] == 0x01) { | ||
| 2834 | protocol = &alps_v8_protocol_data; | 2865 | protocol = &alps_v8_protocol_data; |
| 2835 | } else { | 2866 | } else { |
| 2836 | psmouse_dbg(psmouse, | 2867 | psmouse_dbg(psmouse, |
| @@ -2840,7 +2871,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) | |||
| 2840 | } | 2871 | } |
| 2841 | 2872 | ||
| 2842 | if (priv) { | 2873 | if (priv) { |
| 2843 | /* Save the Firmware version */ | 2874 | /* Save Device ID and Firmware version */ |
| 2875 | memcpy(priv->dev_id, e7, 3); | ||
| 2844 | memcpy(priv->fw_ver, ec, 3); | 2876 | memcpy(priv->fw_ver, ec, 3); |
| 2845 | error = alps_set_protocol(psmouse, priv, protocol); | 2877 | error = alps_set_protocol(psmouse, priv, protocol); |
| 2846 | if (error) | 2878 | if (error) |
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index 6d279aa27cb9..4334f2805d93 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h | |||
| @@ -54,6 +54,16 @@ enum SS4_PACKET_ID { | |||
| 54 | 54 | ||
| 55 | #define SS4_MASK_NORMAL_BUTTONS 0x07 | 55 | #define SS4_MASK_NORMAL_BUTTONS 0x07 |
| 56 | 56 | ||
| 57 | #define SS4PLUS_COUNT_PER_ELECTRODE 128 | ||
| 58 | #define SS4PLUS_NUMSENSOR_XOFFSET 16 | ||
| 59 | #define SS4PLUS_NUMSENSOR_YOFFSET 5 | ||
| 60 | #define SS4PLUS_MIN_PITCH_MM 37 | ||
| 61 | |||
| 62 | #define IS_SS4PLUS_DEV(_b) (((_b[0]) == 0x73) && \ | ||
| 63 | ((_b[1]) == 0x03) && \ | ||
| 64 | ((_b[2]) == 0x28) \ | ||
| 65 | ) | ||
| 66 | |||
| 57 | #define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \ | 67 | #define SS4_IS_IDLE_V2(_b) (((_b[0]) == 0x18) && \ |
| 58 | ((_b[1]) == 0x10) && \ | 68 | ((_b[1]) == 0x10) && \ |
| 59 | ((_b[2]) == 0x00) && \ | 69 | ((_b[2]) == 0x00) && \ |
| @@ -283,6 +293,7 @@ struct alps_data { | |||
| 283 | int addr_command; | 293 | int addr_command; |
| 284 | u16 proto_version; | 294 | u16 proto_version; |
| 285 | u8 byte0, mask0; | 295 | u8 byte0, mask0; |
| 296 | u8 dev_id[3]; | ||
| 286 | u8 fw_ver[3]; | 297 | u8 fw_ver[3]; |
| 287 | int flags; | 298 | int flags; |
| 288 | int x_max; | 299 | int x_max; |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 352050e9031d..d5ab9ddef3e3 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
| @@ -218,17 +218,19 @@ static int elan_query_product(struct elan_tp_data *data) | |||
| 218 | 218 | ||
| 219 | static int elan_check_ASUS_special_fw(struct elan_tp_data *data) | 219 | static int elan_check_ASUS_special_fw(struct elan_tp_data *data) |
| 220 | { | 220 | { |
| 221 | if (data->ic_type != 0x0E) | 221 | if (data->ic_type == 0x0E) { |
| 222 | return false; | 222 | switch (data->product_id) { |
| 223 | 223 | case 0x05 ... 0x07: | |
| 224 | switch (data->product_id) { | 224 | case 0x09: |
| 225 | case 0x05 ... 0x07: | 225 | case 0x13: |
| 226 | case 0x09: | 226 | return true; |
| 227 | case 0x13: | 227 | } |
| 228 | } else if (data->ic_type == 0x08 && data->product_id == 0x26) { | ||
| 229 | /* ASUS EeeBook X205TA */ | ||
| 228 | return true; | 230 | return true; |
| 229 | default: | ||
| 230 | return false; | ||
| 231 | } | 231 | } |
| 232 | |||
| 233 | return false; | ||
| 232 | } | 234 | } |
| 233 | 235 | ||
| 234 | static int __elan_initialize(struct elan_tp_data *data) | 236 | static int __elan_initialize(struct elan_tp_data *data) |
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c index 198678613382..34dfee555b20 100644 --- a/drivers/input/rmi4/rmi_f30.c +++ b/drivers/input/rmi4/rmi_f30.c | |||
| @@ -170,6 +170,10 @@ static int rmi_f30_config(struct rmi_function *fn) | |||
| 170 | rmi_get_platform_data(fn->rmi_dev); | 170 | rmi_get_platform_data(fn->rmi_dev); |
| 171 | int error; | 171 | int error; |
| 172 | 172 | ||
| 173 | /* can happen if f30_data.disable is set */ | ||
| 174 | if (!f30) | ||
| 175 | return 0; | ||
| 176 | |||
| 173 | if (pdata->f30_data.trackstick_buttons) { | 177 | if (pdata->f30_data.trackstick_buttons) { |
| 174 | /* Try [re-]establish link to F03. */ | 178 | /* Try [re-]establish link to F03. */ |
| 175 | f30->f03 = rmi_find_function(fn->rmi_dev, 0x03); | 179 | f30->f03 = rmi_find_function(fn->rmi_dev, 0x03); |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 05afd16ea9c9..312bd6ca9198 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
| @@ -120,6 +120,13 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { | |||
| 120 | }, | 120 | }, |
| 121 | }, | 121 | }, |
| 122 | { | 122 | { |
| 123 | /* Dell Embedded Box PC 3000 */ | ||
| 124 | .matches = { | ||
| 125 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 126 | DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"), | ||
| 127 | }, | ||
| 128 | }, | ||
| 129 | { | ||
| 123 | /* OQO Model 01 */ | 130 | /* OQO Model 01 */ |
| 124 | .matches = { | 131 | .matches = { |
| 125 | DMI_MATCH(DMI_SYS_VENDOR, "OQO"), | 132 | DMI_MATCH(DMI_SYS_VENDOR, "OQO"), |
| @@ -513,6 +520,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { | |||
| 513 | DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), | 520 | DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"), |
| 514 | }, | 521 | }, |
| 515 | }, | 522 | }, |
| 523 | { | ||
| 524 | /* TUXEDO BU1406 */ | ||
| 525 | .matches = { | ||
| 526 | DMI_MATCH(DMI_SYS_VENDOR, "Notebook"), | ||
| 527 | DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), | ||
| 528 | }, | ||
| 529 | }, | ||
| 516 | { } | 530 | { } |
| 517 | }; | 531 | }; |
| 518 | 532 | ||
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c index cd852059b99e..df4bea96d7ed 100644 --- a/drivers/input/tablet/hanwang.c +++ b/drivers/input/tablet/hanwang.c | |||
| @@ -340,6 +340,9 @@ static int hanwang_probe(struct usb_interface *intf, const struct usb_device_id | |||
| 340 | int error; | 340 | int error; |
| 341 | int i; | 341 | int i; |
| 342 | 342 | ||
| 343 | if (intf->cur_altsetting->desc.bNumEndpoints < 1) | ||
| 344 | return -ENODEV; | ||
| 345 | |||
| 343 | hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); | 346 | hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL); |
| 344 | input_dev = input_allocate_device(); | 347 | input_dev = input_allocate_device(); |
| 345 | if (!hanwang || !input_dev) { | 348 | if (!hanwang || !input_dev) { |
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index e850d7e8afbc..4d9d64908b59 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c | |||
| @@ -122,6 +122,9 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i | |||
| 122 | struct input_dev *input_dev; | 122 | struct input_dev *input_dev; |
| 123 | int error = -ENOMEM; | 123 | int error = -ENOMEM; |
| 124 | 124 | ||
| 125 | if (intf->cur_altsetting->desc.bNumEndpoints < 1) | ||
| 126 | return -ENODEV; | ||
| 127 | |||
| 125 | kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); | 128 | kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL); |
| 126 | input_dev = input_allocate_device(); | 129 | input_dev = input_allocate_device(); |
| 127 | if (!kbtab || !input_dev) | 130 | if (!kbtab || !input_dev) |
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index aefb6e11f88a..4c0eecae065c 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c | |||
| @@ -527,6 +527,9 @@ static int sur40_probe(struct usb_interface *interface, | |||
| 527 | if (iface_desc->desc.bInterfaceClass != 0xFF) | 527 | if (iface_desc->desc.bInterfaceClass != 0xFF) |
| 528 | return -ENODEV; | 528 | return -ENODEV; |
| 529 | 529 | ||
| 530 | if (iface_desc->desc.bNumEndpoints < 5) | ||
| 531 | return -ENODEV; | ||
| 532 | |||
| 530 | /* Use endpoint #4 (0x86). */ | 533 | /* Use endpoint #4 (0x86). */ |
| 531 | endpoint = &iface_desc->endpoint[4].desc; | 534 | endpoint = &iface_desc->endpoint[4].desc; |
| 532 | if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) | 535 | if (endpoint->bEndpointAddress != TOUCH_ENDPOINT) |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 98940d1392cb..63cacf5d6cf2 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
| @@ -1234,7 +1234,7 @@ static void __domain_flush_pages(struct protection_domain *domain, | |||
| 1234 | 1234 | ||
| 1235 | build_inv_iommu_pages(&cmd, address, size, domain->id, pde); | 1235 | build_inv_iommu_pages(&cmd, address, size, domain->id, pde); |
| 1236 | 1236 | ||
| 1237 | for (i = 0; i < amd_iommus_present; ++i) { | 1237 | for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { |
| 1238 | if (!domain->dev_iommu[i]) | 1238 | if (!domain->dev_iommu[i]) |
| 1239 | continue; | 1239 | continue; |
| 1240 | 1240 | ||
| @@ -1278,7 +1278,7 @@ static void domain_flush_complete(struct protection_domain *domain) | |||
| 1278 | { | 1278 | { |
| 1279 | int i; | 1279 | int i; |
| 1280 | 1280 | ||
| 1281 | for (i = 0; i < amd_iommus_present; ++i) { | 1281 | for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { |
| 1282 | if (domain && !domain->dev_iommu[i]) | 1282 | if (domain && !domain->dev_iommu[i]) |
| 1283 | continue; | 1283 | continue; |
| 1284 | 1284 | ||
| @@ -3202,7 +3202,7 @@ static void amd_iommu_get_resv_regions(struct device *dev, | |||
| 3202 | 3202 | ||
| 3203 | region = iommu_alloc_resv_region(MSI_RANGE_START, | 3203 | region = iommu_alloc_resv_region(MSI_RANGE_START, |
| 3204 | MSI_RANGE_END - MSI_RANGE_START + 1, | 3204 | MSI_RANGE_END - MSI_RANGE_START + 1, |
| 3205 | 0, IOMMU_RESV_RESERVED); | 3205 | 0, IOMMU_RESV_MSI); |
| 3206 | if (!region) | 3206 | if (!region) |
| 3207 | return; | 3207 | return; |
| 3208 | list_add_tail(®ion->list, head); | 3208 | list_add_tail(®ion->list, head); |
| @@ -3363,7 +3363,7 @@ static int __flush_pasid(struct protection_domain *domain, int pasid, | |||
| 3363 | * IOMMU TLB needs to be flushed before Device TLB to | 3363 | * IOMMU TLB needs to be flushed before Device TLB to |
| 3364 | * prevent device TLB refill from IOMMU TLB | 3364 | * prevent device TLB refill from IOMMU TLB |
| 3365 | */ | 3365 | */ |
| 3366 | for (i = 0; i < amd_iommus_present; ++i) { | 3366 | for (i = 0; i < amd_iommu_get_num_iommus(); ++i) { |
| 3367 | if (domain->dev_iommu[i] == 0) | 3367 | if (domain->dev_iommu[i] == 0) |
| 3368 | continue; | 3368 | continue; |
| 3369 | 3369 | ||
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 6130278c5d71..5a11328f4d98 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
| @@ -167,7 +167,9 @@ LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the | |||
| 167 | 167 | ||
| 168 | /* Array to assign indices to IOMMUs*/ | 168 | /* Array to assign indices to IOMMUs*/ |
| 169 | struct amd_iommu *amd_iommus[MAX_IOMMUS]; | 169 | struct amd_iommu *amd_iommus[MAX_IOMMUS]; |
| 170 | int amd_iommus_present; | 170 | |
| 171 | /* Number of IOMMUs present in the system */ | ||
| 172 | static int amd_iommus_present; | ||
| 171 | 173 | ||
| 172 | /* IOMMUs have a non-present cache? */ | 174 | /* IOMMUs have a non-present cache? */ |
| 173 | bool amd_iommu_np_cache __read_mostly; | 175 | bool amd_iommu_np_cache __read_mostly; |
| @@ -254,10 +256,6 @@ static int amd_iommu_enable_interrupts(void); | |||
| 254 | static int __init iommu_go_to_state(enum iommu_init_state state); | 256 | static int __init iommu_go_to_state(enum iommu_init_state state); |
| 255 | static void init_device_table_dma(void); | 257 | static void init_device_table_dma(void); |
| 256 | 258 | ||
| 257 | static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, | ||
| 258 | u8 bank, u8 cntr, u8 fxn, | ||
| 259 | u64 *value, bool is_write); | ||
| 260 | |||
| 261 | static inline void update_last_devid(u16 devid) | 259 | static inline void update_last_devid(u16 devid) |
| 262 | { | 260 | { |
| 263 | if (devid > amd_iommu_last_bdf) | 261 | if (devid > amd_iommu_last_bdf) |
| @@ -272,6 +270,11 @@ static inline unsigned long tbl_size(int entry_size) | |||
| 272 | return 1UL << shift; | 270 | return 1UL << shift; |
| 273 | } | 271 | } |
| 274 | 272 | ||
| 273 | int amd_iommu_get_num_iommus(void) | ||
| 274 | { | ||
| 275 | return amd_iommus_present; | ||
| 276 | } | ||
| 277 | |||
| 275 | /* Access to l1 and l2 indexed register spaces */ | 278 | /* Access to l1 and l2 indexed register spaces */ |
| 276 | 279 | ||
| 277 | static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) | 280 | static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) |
| @@ -1336,7 +1339,7 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) | |||
| 1336 | 1339 | ||
| 1337 | /* Add IOMMU to internal data structures */ | 1340 | /* Add IOMMU to internal data structures */ |
| 1338 | list_add_tail(&iommu->list, &amd_iommu_list); | 1341 | list_add_tail(&iommu->list, &amd_iommu_list); |
| 1339 | iommu->index = amd_iommus_present++; | 1342 | iommu->index = amd_iommus_present++; |
| 1340 | 1343 | ||
| 1341 | if (unlikely(iommu->index >= MAX_IOMMUS)) { | 1344 | if (unlikely(iommu->index >= MAX_IOMMUS)) { |
| 1342 | WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); | 1345 | WARN(1, "AMD-Vi: System has more IOMMUs than supported by this driver\n"); |
| @@ -1477,6 +1480,8 @@ static int __init init_iommu_all(struct acpi_table_header *table) | |||
| 1477 | return 0; | 1480 | return 0; |
| 1478 | } | 1481 | } |
| 1479 | 1482 | ||
| 1483 | static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, | ||
| 1484 | u8 fxn, u64 *value, bool is_write); | ||
| 1480 | 1485 | ||
| 1481 | static void init_iommu_perf_ctr(struct amd_iommu *iommu) | 1486 | static void init_iommu_perf_ctr(struct amd_iommu *iommu) |
| 1482 | { | 1487 | { |
| @@ -1488,8 +1493,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu) | |||
| 1488 | amd_iommu_pc_present = true; | 1493 | amd_iommu_pc_present = true; |
| 1489 | 1494 | ||
| 1490 | /* Check if the performance counters can be written to */ | 1495 | /* Check if the performance counters can be written to */ |
| 1491 | if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) || | 1496 | if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || |
| 1492 | (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) || | 1497 | (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || |
| 1493 | (val != val2)) { | 1498 | (val != val2)) { |
| 1494 | pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); | 1499 | pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); |
| 1495 | amd_iommu_pc_present = false; | 1500 | amd_iommu_pc_present = false; |
| @@ -2711,6 +2716,18 @@ bool amd_iommu_v2_supported(void) | |||
| 2711 | } | 2716 | } |
| 2712 | EXPORT_SYMBOL(amd_iommu_v2_supported); | 2717 | EXPORT_SYMBOL(amd_iommu_v2_supported); |
| 2713 | 2718 | ||
| 2719 | struct amd_iommu *get_amd_iommu(unsigned int idx) | ||
| 2720 | { | ||
| 2721 | unsigned int i = 0; | ||
| 2722 | struct amd_iommu *iommu; | ||
| 2723 | |||
| 2724 | for_each_iommu(iommu) | ||
| 2725 | if (i++ == idx) | ||
| 2726 | return iommu; | ||
| 2727 | return NULL; | ||
| 2728 | } | ||
| 2729 | EXPORT_SYMBOL(get_amd_iommu); | ||
| 2730 | |||
| 2714 | /**************************************************************************** | 2731 | /**************************************************************************** |
| 2715 | * | 2732 | * |
| 2716 | * IOMMU EFR Performance Counter support functionality. This code allows | 2733 | * IOMMU EFR Performance Counter support functionality. This code allows |
| @@ -2718,17 +2735,14 @@ EXPORT_SYMBOL(amd_iommu_v2_supported); | |||
| 2718 | * | 2735 | * |
| 2719 | ****************************************************************************/ | 2736 | ****************************************************************************/ |
| 2720 | 2737 | ||
| 2721 | u8 amd_iommu_pc_get_max_banks(u16 devid) | 2738 | u8 amd_iommu_pc_get_max_banks(unsigned int idx) |
| 2722 | { | 2739 | { |
| 2723 | struct amd_iommu *iommu; | 2740 | struct amd_iommu *iommu = get_amd_iommu(idx); |
| 2724 | u8 ret = 0; | ||
| 2725 | 2741 | ||
| 2726 | /* locate the iommu governing the devid */ | ||
| 2727 | iommu = amd_iommu_rlookup_table[devid]; | ||
| 2728 | if (iommu) | 2742 | if (iommu) |
| 2729 | ret = iommu->max_banks; | 2743 | return iommu->max_banks; |
| 2730 | 2744 | ||
| 2731 | return ret; | 2745 | return 0; |
| 2732 | } | 2746 | } |
| 2733 | EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); | 2747 | EXPORT_SYMBOL(amd_iommu_pc_get_max_banks); |
| 2734 | 2748 | ||
| @@ -2738,62 +2752,69 @@ bool amd_iommu_pc_supported(void) | |||
| 2738 | } | 2752 | } |
| 2739 | EXPORT_SYMBOL(amd_iommu_pc_supported); | 2753 | EXPORT_SYMBOL(amd_iommu_pc_supported); |
| 2740 | 2754 | ||
| 2741 | u8 amd_iommu_pc_get_max_counters(u16 devid) | 2755 | u8 amd_iommu_pc_get_max_counters(unsigned int idx) |
| 2742 | { | 2756 | { |
| 2743 | struct amd_iommu *iommu; | 2757 | struct amd_iommu *iommu = get_amd_iommu(idx); |
| 2744 | u8 ret = 0; | ||
| 2745 | 2758 | ||
| 2746 | /* locate the iommu governing the devid */ | ||
| 2747 | iommu = amd_iommu_rlookup_table[devid]; | ||
| 2748 | if (iommu) | 2759 | if (iommu) |
| 2749 | ret = iommu->max_counters; | 2760 | return iommu->max_counters; |
| 2750 | 2761 | ||
| 2751 | return ret; | 2762 | return 0; |
| 2752 | } | 2763 | } |
| 2753 | EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); | 2764 | EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); |
| 2754 | 2765 | ||
| 2755 | static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu, | 2766 | static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, |
| 2756 | u8 bank, u8 cntr, u8 fxn, | 2767 | u8 fxn, u64 *value, bool is_write) |
| 2757 | u64 *value, bool is_write) | ||
| 2758 | { | 2768 | { |
| 2759 | u32 offset; | 2769 | u32 offset; |
| 2760 | u32 max_offset_lim; | 2770 | u32 max_offset_lim; |
| 2761 | 2771 | ||
| 2772 | /* Make sure the IOMMU PC resource is available */ | ||
| 2773 | if (!amd_iommu_pc_present) | ||
| 2774 | return -ENODEV; | ||
| 2775 | |||
| 2762 | /* Check for valid iommu and pc register indexing */ | 2776 | /* Check for valid iommu and pc register indexing */ |
| 2763 | if (WARN_ON((fxn > 0x28) || (fxn & 7))) | 2777 | if (WARN_ON(!iommu || (fxn > 0x28) || (fxn & 7))) |
| 2764 | return -ENODEV; | 2778 | return -ENODEV; |
| 2765 | 2779 | ||
| 2766 | offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); | 2780 | offset = (u32)(((0x40 | bank) << 12) | (cntr << 8) | fxn); |
| 2767 | 2781 | ||
| 2768 | /* Limit the offset to the hw defined mmio region aperture */ | 2782 | /* Limit the offset to the hw defined mmio region aperture */ |
| 2769 | max_offset_lim = (u32)(((0x40|iommu->max_banks) << 12) | | 2783 | max_offset_lim = (u32)(((0x40 | iommu->max_banks) << 12) | |
| 2770 | (iommu->max_counters << 8) | 0x28); | 2784 | (iommu->max_counters << 8) | 0x28); |
| 2771 | if ((offset < MMIO_CNTR_REG_OFFSET) || | 2785 | if ((offset < MMIO_CNTR_REG_OFFSET) || |
| 2772 | (offset > max_offset_lim)) | 2786 | (offset > max_offset_lim)) |
| 2773 | return -EINVAL; | 2787 | return -EINVAL; |
| 2774 | 2788 | ||
| 2775 | if (is_write) { | 2789 | if (is_write) { |
| 2776 | writel((u32)*value, iommu->mmio_base + offset); | 2790 | u64 val = *value & GENMASK_ULL(47, 0); |
| 2777 | writel((*value >> 32), iommu->mmio_base + offset + 4); | 2791 | |
| 2792 | writel((u32)val, iommu->mmio_base + offset); | ||
| 2793 | writel((val >> 32), iommu->mmio_base + offset + 4); | ||
| 2778 | } else { | 2794 | } else { |
| 2779 | *value = readl(iommu->mmio_base + offset + 4); | 2795 | *value = readl(iommu->mmio_base + offset + 4); |
| 2780 | *value <<= 32; | 2796 | *value <<= 32; |
| 2781 | *value = readl(iommu->mmio_base + offset); | 2797 | *value |= readl(iommu->mmio_base + offset); |
| 2798 | *value &= GENMASK_ULL(47, 0); | ||
| 2782 | } | 2799 | } |
| 2783 | 2800 | ||
| 2784 | return 0; | 2801 | return 0; |
| 2785 | } | 2802 | } |
| 2786 | EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); | ||
| 2787 | 2803 | ||
| 2788 | int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, | 2804 | int amd_iommu_pc_get_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) |
| 2789 | u64 *value, bool is_write) | ||
| 2790 | { | 2805 | { |
| 2791 | struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; | 2806 | if (!iommu) |
| 2807 | return -EINVAL; | ||
| 2792 | 2808 | ||
| 2793 | /* Make sure the IOMMU PC resource is available */ | 2809 | return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, false); |
| 2794 | if (!amd_iommu_pc_present || iommu == NULL) | 2810 | } |
| 2795 | return -ENODEV; | 2811 | EXPORT_SYMBOL(amd_iommu_pc_get_reg); |
| 2812 | |||
| 2813 | int amd_iommu_pc_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, u8 fxn, u64 *value) | ||
| 2814 | { | ||
| 2815 | if (!iommu) | ||
| 2816 | return -EINVAL; | ||
| 2796 | 2817 | ||
| 2797 | return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn, | 2818 | return iommu_pc_get_set_reg(iommu, bank, cntr, fxn, value, true); |
| 2798 | value, is_write); | ||
| 2799 | } | 2819 | } |
| 2820 | EXPORT_SYMBOL(amd_iommu_pc_set_reg); | ||
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 7eb60c15c582..466260f8a1df 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | 21 | ||
| 22 | #include "amd_iommu_types.h" | 22 | #include "amd_iommu_types.h" |
| 23 | 23 | ||
| 24 | extern int amd_iommu_get_num_iommus(void); | ||
| 24 | extern int amd_iommu_init_dma_ops(void); | 25 | extern int amd_iommu_init_dma_ops(void); |
| 25 | extern int amd_iommu_init_passthrough(void); | 26 | extern int amd_iommu_init_passthrough(void); |
| 26 | extern irqreturn_t amd_iommu_int_thread(int irq, void *data); | 27 | extern irqreturn_t amd_iommu_int_thread(int irq, void *data); |
| @@ -56,13 +57,6 @@ extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid, | |||
| 56 | extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid); | 57 | extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid); |
| 57 | extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev); | 58 | extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev); |
| 58 | 59 | ||
| 59 | /* IOMMU Performance Counter functions */ | ||
| 60 | extern bool amd_iommu_pc_supported(void); | ||
| 61 | extern u8 amd_iommu_pc_get_max_banks(u16 devid); | ||
| 62 | extern u8 amd_iommu_pc_get_max_counters(u16 devid); | ||
| 63 | extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, | ||
| 64 | u64 *value, bool is_write); | ||
| 65 | |||
| 66 | #ifdef CONFIG_IRQ_REMAP | 60 | #ifdef CONFIG_IRQ_REMAP |
| 67 | extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu); | 61 | extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu); |
| 68 | #else | 62 | #else |
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 003f3ceb2661..4de8f4160bb8 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h | |||
| @@ -611,9 +611,6 @@ extern struct list_head amd_iommu_list; | |||
| 611 | */ | 611 | */ |
| 612 | extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; | 612 | extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; |
| 613 | 613 | ||
| 614 | /* Number of IOMMUs present in the system */ | ||
| 615 | extern int amd_iommus_present; | ||
| 616 | |||
| 617 | /* | 614 | /* |
| 618 | * Declarations for the global list of all protection domains | 615 | * Declarations for the global list of all protection domains |
| 619 | */ | 616 | */ |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 5806a6acc94e..591bb96047c9 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
| @@ -1888,7 +1888,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, | |||
| 1888 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | 1888 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
| 1889 | 1889 | ||
| 1890 | region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, | 1890 | region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, |
| 1891 | prot, IOMMU_RESV_MSI); | 1891 | prot, IOMMU_RESV_SW_MSI); |
| 1892 | if (!region) | 1892 | if (!region) |
| 1893 | return; | 1893 | return; |
| 1894 | 1894 | ||
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index abf6496843a6..b493c99e17f7 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
| @@ -1608,7 +1608,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, | |||
| 1608 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; | 1608 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
| 1609 | 1609 | ||
| 1610 | region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, | 1610 | region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, |
| 1611 | prot, IOMMU_RESV_MSI); | 1611 | prot, IOMMU_RESV_SW_MSI); |
| 1612 | if (!region) | 1612 | if (!region) |
| 1613 | return; | 1613 | return; |
| 1614 | 1614 | ||
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index a7e0821c9967..c01bfcdb2383 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
| @@ -512,7 +512,13 @@ static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, | |||
| 512 | spin_lock_irqsave(&data->lock, flags); | 512 | spin_lock_irqsave(&data->lock, flags); |
| 513 | if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { | 513 | if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { |
| 514 | clk_enable(data->clk_master); | 514 | clk_enable(data->clk_master); |
| 515 | __sysmmu_tlb_invalidate_entry(data, iova, 1); | 515 | if (sysmmu_block(data)) { |
| 516 | if (data->version >= MAKE_MMU_VER(5, 0)) | ||
| 517 | __sysmmu_tlb_invalidate(data); | ||
| 518 | else | ||
| 519 | __sysmmu_tlb_invalidate_entry(data, iova, 1); | ||
| 520 | sysmmu_unblock(data); | ||
| 521 | } | ||
| 516 | clk_disable(data->clk_master); | 522 | clk_disable(data->clk_master); |
| 517 | } | 523 | } |
| 518 | spin_unlock_irqrestore(&data->lock, flags); | 524 | spin_unlock_irqrestore(&data->lock, flags); |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 238ad3447712..d412a313a372 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -916,7 +916,7 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf | |||
| 916 | * which we used for the IOMMU lookup. Strictly speaking | 916 | * which we used for the IOMMU lookup. Strictly speaking |
| 917 | * we could do this for all PCI devices; we only need to | 917 | * we could do this for all PCI devices; we only need to |
| 918 | * get the BDF# from the scope table for ACPI matches. */ | 918 | * get the BDF# from the scope table for ACPI matches. */ |
| 919 | if (pdev->is_virtfn) | 919 | if (pdev && pdev->is_virtfn) |
| 920 | goto got_pdev; | 920 | goto got_pdev; |
| 921 | 921 | ||
| 922 | *bus = drhd->devices[i].bus; | 922 | *bus = drhd->devices[i].bus; |
| @@ -5249,7 +5249,7 @@ static void intel_iommu_get_resv_regions(struct device *device, | |||
| 5249 | 5249 | ||
| 5250 | reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, | 5250 | reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, |
| 5251 | IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, | 5251 | IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, |
| 5252 | 0, IOMMU_RESV_RESERVED); | 5252 | 0, IOMMU_RESV_MSI); |
| 5253 | if (!reg) | 5253 | if (!reg) |
| 5254 | return; | 5254 | return; |
| 5255 | list_add_tail(®->list, head); | 5255 | list_add_tail(®->list, head); |
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 1c049e2e12bf..8d6ca28c3e1f 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c | |||
| @@ -422,8 +422,12 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, | |||
| 422 | pte |= ARM_V7S_ATTR_NS_TABLE; | 422 | pte |= ARM_V7S_ATTR_NS_TABLE; |
| 423 | 423 | ||
| 424 | __arm_v7s_set_pte(ptep, pte, 1, cfg); | 424 | __arm_v7s_set_pte(ptep, pte, 1, cfg); |
| 425 | } else { | 425 | } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { |
| 426 | cptep = iopte_deref(pte, lvl); | 426 | cptep = iopte_deref(pte, lvl); |
| 427 | } else { | ||
| 428 | /* We require an unmap first */ | ||
| 429 | WARN_ON(!selftest_running); | ||
| 430 | return -EEXIST; | ||
| 427 | } | 431 | } |
| 428 | 432 | ||
| 429 | /* Rinse, repeat */ | 433 | /* Rinse, repeat */ |
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index feacc54bec68..f9bc6ebb8140 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c | |||
| @@ -335,8 +335,12 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, | |||
| 335 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) | 335 | if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) |
| 336 | pte |= ARM_LPAE_PTE_NSTABLE; | 336 | pte |= ARM_LPAE_PTE_NSTABLE; |
| 337 | __arm_lpae_set_pte(ptep, pte, cfg); | 337 | __arm_lpae_set_pte(ptep, pte, cfg); |
| 338 | } else { | 338 | } else if (!iopte_leaf(pte, lvl)) { |
| 339 | cptep = iopte_deref(pte, data); | 339 | cptep = iopte_deref(pte, data); |
| 340 | } else { | ||
| 341 | /* We require an unmap first */ | ||
| 342 | WARN_ON(!selftest_running); | ||
| 343 | return -EEXIST; | ||
| 340 | } | 344 | } |
| 341 | 345 | ||
| 342 | /* Rinse, repeat */ | 346 | /* Rinse, repeat */ |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 8ea14f41a979..3b67144dead2 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
| @@ -72,6 +72,7 @@ static const char * const iommu_group_resv_type_string[] = { | |||
| 72 | [IOMMU_RESV_DIRECT] = "direct", | 72 | [IOMMU_RESV_DIRECT] = "direct", |
| 73 | [IOMMU_RESV_RESERVED] = "reserved", | 73 | [IOMMU_RESV_RESERVED] = "reserved", |
| 74 | [IOMMU_RESV_MSI] = "msi", | 74 | [IOMMU_RESV_MSI] = "msi", |
| 75 | [IOMMU_RESV_SW_MSI] = "msi", | ||
| 75 | }; | 76 | }; |
| 76 | 77 | ||
| 77 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ | 78 | #define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \ |
| @@ -1743,8 +1744,8 @@ void iommu_put_resv_regions(struct device *dev, struct list_head *list) | |||
| 1743 | } | 1744 | } |
| 1744 | 1745 | ||
| 1745 | struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, | 1746 | struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, |
| 1746 | size_t length, | 1747 | size_t length, int prot, |
| 1747 | int prot, int type) | 1748 | enum iommu_resv_type type) |
| 1748 | { | 1749 | { |
| 1749 | struct iommu_resv_region *region; | 1750 | struct iommu_resv_region *region; |
| 1750 | 1751 | ||
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 2b13117fb918..321ecac23027 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c | |||
| @@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots) | |||
| 777 | bm_lockres->flags |= DLM_LKF_NOQUEUE; | 777 | bm_lockres->flags |= DLM_LKF_NOQUEUE; |
| 778 | ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); | 778 | ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW); |
| 779 | if (ret == -EAGAIN) { | 779 | if (ret == -EAGAIN) { |
| 780 | memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE); | ||
| 781 | s = read_resync_info(mddev, bm_lockres); | 780 | s = read_resync_info(mddev, bm_lockres); |
| 782 | if (s) { | 781 | if (s) { |
| 783 | pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n", | 782 | pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n", |
| @@ -974,6 +973,7 @@ static int leave(struct mddev *mddev) | |||
| 974 | lockres_free(cinfo->bitmap_lockres); | 973 | lockres_free(cinfo->bitmap_lockres); |
| 975 | unlock_all_bitmaps(mddev); | 974 | unlock_all_bitmaps(mddev); |
| 976 | dlm_release_lockspace(cinfo->lockspace, 2); | 975 | dlm_release_lockspace(cinfo->lockspace, 2); |
| 976 | kfree(cinfo); | ||
| 977 | return 0; | 977 | return 0; |
| 978 | } | 978 | } |
| 979 | 979 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 548d1b8014f8..f6ae1d67bcd0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) | |||
| 440 | } | 440 | } |
| 441 | EXPORT_SYMBOL(md_flush_request); | 441 | EXPORT_SYMBOL(md_flush_request); |
| 442 | 442 | ||
| 443 | void md_unplug(struct blk_plug_cb *cb, bool from_schedule) | ||
| 444 | { | ||
| 445 | struct mddev *mddev = cb->data; | ||
| 446 | md_wakeup_thread(mddev->thread); | ||
| 447 | kfree(cb); | ||
| 448 | } | ||
| 449 | EXPORT_SYMBOL(md_unplug); | ||
| 450 | |||
| 451 | static inline struct mddev *mddev_get(struct mddev *mddev) | 443 | static inline struct mddev *mddev_get(struct mddev *mddev) |
| 452 | { | 444 | { |
| 453 | atomic_inc(&mddev->active); | 445 | atomic_inc(&mddev->active); |
| @@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) | |||
| 1887 | } | 1879 | } |
| 1888 | sb = page_address(rdev->sb_page); | 1880 | sb = page_address(rdev->sb_page); |
| 1889 | sb->data_size = cpu_to_le64(num_sectors); | 1881 | sb->data_size = cpu_to_le64(num_sectors); |
| 1890 | sb->super_offset = rdev->sb_start; | 1882 | sb->super_offset = cpu_to_le64(rdev->sb_start); |
| 1891 | sb->sb_csum = calc_sb_1_csum(sb); | 1883 | sb->sb_csum = calc_sb_1_csum(sb); |
| 1892 | do { | 1884 | do { |
| 1893 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, | 1885 | md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, |
| @@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev) | |||
| 2295 | /* Check if any mddev parameters have changed */ | 2287 | /* Check if any mddev parameters have changed */ |
| 2296 | if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || | 2288 | if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || |
| 2297 | (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || | 2289 | (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || |
| 2298 | (mddev->layout != le64_to_cpu(sb->layout)) || | 2290 | (mddev->layout != le32_to_cpu(sb->layout)) || |
| 2299 | (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || | 2291 | (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || |
| 2300 | (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) | 2292 | (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) |
| 2301 | return true; | 2293 | return true; |
| @@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) | |||
| 6458 | mddev->layout = info->layout; | 6450 | mddev->layout = info->layout; |
| 6459 | mddev->chunk_sectors = info->chunk_size >> 9; | 6451 | mddev->chunk_sectors = info->chunk_size >> 9; |
| 6460 | 6452 | ||
| 6461 | mddev->max_disks = MD_SB_DISKS; | ||
| 6462 | |||
| 6463 | if (mddev->persistent) { | 6453 | if (mddev->persistent) { |
| 6464 | mddev->flags = 0; | 6454 | mddev->max_disks = MD_SB_DISKS; |
| 6465 | mddev->sb_flags = 0; | 6455 | mddev->flags = 0; |
| 6456 | mddev->sb_flags = 0; | ||
| 6466 | } | 6457 | } |
| 6467 | set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); | 6458 | set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); |
| 6468 | 6459 | ||
| @@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors) | |||
| 6533 | return -ENOSPC; | 6524 | return -ENOSPC; |
| 6534 | } | 6525 | } |
| 6535 | rv = mddev->pers->resize(mddev, num_sectors); | 6526 | rv = mddev->pers->resize(mddev, num_sectors); |
| 6536 | if (!rv) | 6527 | if (!rv) { |
| 6537 | revalidate_disk(mddev->gendisk); | 6528 | if (mddev->queue) { |
| 6529 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
| 6530 | revalidate_disk(mddev->gendisk); | ||
| 6531 | } | ||
| 6532 | } | ||
| 6538 | return rv; | 6533 | return rv; |
| 6539 | } | 6534 | } |
| 6540 | 6535 | ||
diff --git a/drivers/md/md.h b/drivers/md/md.h index b8859cbf84b6..dde8ecb760c8 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
| @@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev); | |||
| 676 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 676 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
| 677 | struct mddev *mddev); | 677 | struct mddev *mddev); |
| 678 | 678 | ||
| 679 | extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); | ||
| 680 | extern void md_reload_sb(struct mddev *mddev, int raid_disk); | 679 | extern void md_reload_sb(struct mddev *mddev, int raid_disk); |
| 681 | extern void md_update_sb(struct mddev *mddev, int force); | 680 | extern void md_update_sb(struct mddev *mddev, int force); |
| 682 | extern void md_kick_rdev_from_array(struct md_rdev * rdev); | 681 | extern void md_kick_rdev_from_array(struct md_rdev * rdev); |
| 683 | struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); | 682 | struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr); |
| 684 | static inline int mddev_check_plugged(struct mddev *mddev) | ||
| 685 | { | ||
| 686 | return !!blk_check_plugged(md_unplug, mddev, | ||
| 687 | sizeof(struct blk_plug_cb)); | ||
| 688 | } | ||
| 689 | 683 | ||
| 690 | static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) | 684 | static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev) |
| 691 | { | 685 | { |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index fbc2d7851b49..a34f58772022 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf) | |||
| 1027 | static void freeze_array(struct r1conf *conf, int extra) | 1027 | static void freeze_array(struct r1conf *conf, int extra) |
| 1028 | { | 1028 | { |
| 1029 | /* Stop sync I/O and normal I/O and wait for everything to | 1029 | /* Stop sync I/O and normal I/O and wait for everything to |
| 1030 | * go quite. | 1030 | * go quiet. |
| 1031 | * This is called in two situations: | 1031 | * This is called in two situations: |
| 1032 | * 1) management command handlers (reshape, remove disk, quiesce). | 1032 | * 1) management command handlers (reshape, remove disk, quiesce). |
| 1033 | * 2) one normal I/O request failed. | 1033 | * 2) one normal I/O request failed. |
| @@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio) | |||
| 1587 | split = bio; | 1587 | split = bio; |
| 1588 | } | 1588 | } |
| 1589 | 1589 | ||
| 1590 | if (bio_data_dir(split) == READ) | 1590 | if (bio_data_dir(split) == READ) { |
| 1591 | raid1_read_request(mddev, split); | 1591 | raid1_read_request(mddev, split); |
| 1592 | else | 1592 | |
| 1593 | /* | ||
| 1594 | * If a bio is splitted, the first part of bio will | ||
| 1595 | * pass barrier but the bio is queued in | ||
| 1596 | * current->bio_list (see generic_make_request). If | ||
| 1597 | * there is a raise_barrier() called here, the second | ||
| 1598 | * part of bio can't pass barrier. But since the first | ||
| 1599 | * part bio isn't dispatched to underlaying disks yet, | ||
| 1600 | * the barrier is never released, hence raise_barrier | ||
| 1601 | * will alays wait. We have a deadlock. | ||
| 1602 | * Note, this only happens in read path. For write | ||
| 1603 | * path, the first part of bio is dispatched in a | ||
| 1604 | * schedule() call (because of blk plug) or offloaded | ||
| 1605 | * to raid10d. | ||
| 1606 | * Quitting from the function immediately can change | ||
| 1607 | * the bio order queued in bio_list and avoid the deadlock. | ||
| 1608 | */ | ||
| 1609 | if (split != bio) { | ||
| 1610 | generic_make_request(bio); | ||
| 1611 | break; | ||
| 1612 | } | ||
| 1613 | } else | ||
| 1593 | raid1_write_request(mddev, split); | 1614 | raid1_write_request(mddev, split); |
| 1594 | } while (split != bio); | 1615 | } while (split != bio); |
| 1595 | } | 1616 | } |
| @@ -3246,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors) | |||
| 3246 | return ret; | 3267 | return ret; |
| 3247 | } | 3268 | } |
| 3248 | md_set_array_sectors(mddev, newsize); | 3269 | md_set_array_sectors(mddev, newsize); |
| 3249 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
| 3250 | revalidate_disk(mddev->gendisk); | ||
| 3251 | if (sectors > mddev->dev_sectors && | 3270 | if (sectors > mddev->dev_sectors && |
| 3252 | mddev->recovery_cp > mddev->dev_sectors) { | 3271 | mddev->recovery_cp > mddev->dev_sectors) { |
| 3253 | mddev->recovery_cp = mddev->dev_sectors; | 3272 | mddev->recovery_cp = mddev->dev_sectors; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0536658c9d40..e89a8d78a9ed 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -1478,11 +1478,24 @@ retry_write: | |||
| 1478 | mbio->bi_bdev = (void*)rdev; | 1478 | mbio->bi_bdev = (void*)rdev; |
| 1479 | 1479 | ||
| 1480 | atomic_inc(&r10_bio->remaining); | 1480 | atomic_inc(&r10_bio->remaining); |
| 1481 | |||
| 1482 | cb = blk_check_plugged(raid10_unplug, mddev, | ||
| 1483 | sizeof(*plug)); | ||
| 1484 | if (cb) | ||
| 1485 | plug = container_of(cb, struct raid10_plug_cb, | ||
| 1486 | cb); | ||
| 1487 | else | ||
| 1488 | plug = NULL; | ||
| 1481 | spin_lock_irqsave(&conf->device_lock, flags); | 1489 | spin_lock_irqsave(&conf->device_lock, flags); |
| 1482 | bio_list_add(&conf->pending_bio_list, mbio); | 1490 | if (plug) { |
| 1483 | conf->pending_count++; | 1491 | bio_list_add(&plug->pending, mbio); |
| 1492 | plug->pending_cnt++; | ||
| 1493 | } else { | ||
| 1494 | bio_list_add(&conf->pending_bio_list, mbio); | ||
| 1495 | conf->pending_count++; | ||
| 1496 | } | ||
| 1484 | spin_unlock_irqrestore(&conf->device_lock, flags); | 1497 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 1485 | if (!mddev_check_plugged(mddev)) | 1498 | if (!plug) |
| 1486 | md_wakeup_thread(mddev->thread); | 1499 | md_wakeup_thread(mddev->thread); |
| 1487 | } | 1500 | } |
| 1488 | } | 1501 | } |
| @@ -1572,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio) | |||
| 1572 | split = bio; | 1585 | split = bio; |
| 1573 | } | 1586 | } |
| 1574 | 1587 | ||
| 1588 | /* | ||
| 1589 | * If a bio is splitted, the first part of bio will pass | ||
| 1590 | * barrier but the bio is queued in current->bio_list (see | ||
| 1591 | * generic_make_request). If there is a raise_barrier() called | ||
| 1592 | * here, the second part of bio can't pass barrier. But since | ||
| 1593 | * the first part bio isn't dispatched to underlaying disks | ||
| 1594 | * yet, the barrier is never released, hence raise_barrier will | ||
| 1595 | * alays wait. We have a deadlock. | ||
| 1596 | * Note, this only happens in read path. For write path, the | ||
| 1597 | * first part of bio is dispatched in a schedule() call | ||
| 1598 | * (because of blk plug) or offloaded to raid10d. | ||
| 1599 | * Quitting from the function immediately can change the bio | ||
| 1600 | * order queued in bio_list and avoid the deadlock. | ||
| 1601 | */ | ||
| 1575 | __make_request(mddev, split); | 1602 | __make_request(mddev, split); |
| 1603 | if (split != bio && bio_data_dir(bio) == READ) { | ||
| 1604 | generic_make_request(bio); | ||
| 1605 | break; | ||
| 1606 | } | ||
| 1576 | } while (split != bio); | 1607 | } while (split != bio); |
| 1577 | 1608 | ||
| 1578 | /* In case raid10d snuck in to freeze_array */ | 1609 | /* In case raid10d snuck in to freeze_array */ |
| @@ -3944,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors) | |||
| 3944 | return ret; | 3975 | return ret; |
| 3945 | } | 3976 | } |
| 3946 | md_set_array_sectors(mddev, size); | 3977 | md_set_array_sectors(mddev, size); |
| 3947 | if (mddev->queue) { | ||
| 3948 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
| 3949 | revalidate_disk(mddev->gendisk); | ||
| 3950 | } | ||
| 3951 | if (sectors > mddev->dev_sectors && | 3978 | if (sectors > mddev->dev_sectors && |
| 3952 | mddev->recovery_cp > oldsize) { | 3979 | mddev->recovery_cp > oldsize) { |
| 3953 | mddev->recovery_cp = oldsize; | 3980 | mddev->recovery_cp = oldsize; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 4fb09b3fcb41..ed5cd705b985 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs, | |||
| 1401 | (test_bit(R5_Wantdrain, &dev->flags) || | 1401 | (test_bit(R5_Wantdrain, &dev->flags) || |
| 1402 | test_bit(R5_InJournal, &dev->flags))) || | 1402 | test_bit(R5_InJournal, &dev->flags))) || |
| 1403 | (srctype == SYNDROME_SRC_WRITTEN && | 1403 | (srctype == SYNDROME_SRC_WRITTEN && |
| 1404 | dev->written)) { | 1404 | (dev->written || |
| 1405 | test_bit(R5_InJournal, &dev->flags)))) { | ||
| 1405 | if (test_bit(R5_InJournal, &dev->flags)) | 1406 | if (test_bit(R5_InJournal, &dev->flags)) |
| 1406 | srcs[slot] = sh->dev[i].orig_page; | 1407 | srcs[slot] = sh->dev[i].orig_page; |
| 1407 | else | 1408 | else |
| @@ -7605,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors) | |||
| 7605 | return ret; | 7606 | return ret; |
| 7606 | } | 7607 | } |
| 7607 | md_set_array_sectors(mddev, newsize); | 7608 | md_set_array_sectors(mddev, newsize); |
| 7608 | set_capacity(mddev->gendisk, mddev->array_sectors); | ||
| 7609 | revalidate_disk(mddev->gendisk); | ||
| 7610 | if (sectors > mddev->dev_sectors && | 7609 | if (sectors > mddev->dev_sectors && |
| 7611 | mddev->recovery_cp > mddev->dev_sectors) { | 7610 | mddev->recovery_cp > mddev->dev_sectors) { |
| 7612 | mddev->recovery_cp = mddev->dev_sectors; | 7611 | mddev->recovery_cp = mddev->dev_sectors; |
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c index 67fd8ffa60a4..669a4c82f1ff 100644 --- a/drivers/media/platform/coda/imx-vdoa.c +++ b/drivers/media/platform/coda/imx-vdoa.c | |||
| @@ -321,7 +321,7 @@ static const struct of_device_id vdoa_dt_ids[] = { | |||
| 321 | }; | 321 | }; |
| 322 | MODULE_DEVICE_TABLE(of, vdoa_dt_ids); | 322 | MODULE_DEVICE_TABLE(of, vdoa_dt_ids); |
| 323 | 323 | ||
| 324 | static const struct platform_driver vdoa_driver = { | 324 | static struct platform_driver vdoa_driver = { |
| 325 | .probe = vdoa_probe, | 325 | .probe = vdoa_probe, |
| 326 | .remove = vdoa_remove, | 326 | .remove = vdoa_remove, |
| 327 | .driver = { | 327 | .driver = { |
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c index cbb03768f5d7..0f0c389f8897 100644 --- a/drivers/media/platform/exynos-gsc/gsc-core.c +++ b/drivers/media/platform/exynos-gsc/gsc-core.c | |||
| @@ -861,9 +861,7 @@ int gsc_prepare_addr(struct gsc_ctx *ctx, struct vb2_buffer *vb, | |||
| 861 | 861 | ||
| 862 | if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) || | 862 | if ((frame->fmt->pixelformat == V4L2_PIX_FMT_VYUY) || |
| 863 | (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) || | 863 | (frame->fmt->pixelformat == V4L2_PIX_FMT_YVYU) || |
| 864 | (frame->fmt->pixelformat == V4L2_PIX_FMT_NV61) || | ||
| 865 | (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) || | 864 | (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420) || |
| 866 | (frame->fmt->pixelformat == V4L2_PIX_FMT_NV21) || | ||
| 867 | (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M)) | 865 | (frame->fmt->pixelformat == V4L2_PIX_FMT_YVU420M)) |
| 868 | swap(addr->cb, addr->cr); | 866 | swap(addr->cb, addr->cr); |
| 869 | 867 | ||
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c index 823608112d89..7918b928f058 100644 --- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c +++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c | |||
| @@ -632,8 +632,8 @@ static int bdisp_open(struct file *file) | |||
| 632 | 632 | ||
| 633 | error_ctrls: | 633 | error_ctrls: |
| 634 | bdisp_ctrls_delete(ctx); | 634 | bdisp_ctrls_delete(ctx); |
| 635 | error_fh: | ||
| 636 | v4l2_fh_del(&ctx->fh); | 635 | v4l2_fh_del(&ctx->fh); |
| 636 | error_fh: | ||
| 637 | v4l2_fh_exit(&ctx->fh); | 637 | v4l2_fh_exit(&ctx->fh); |
| 638 | bdisp_hw_free_nodes(ctx); | 638 | bdisp_hw_free_nodes(ctx); |
| 639 | mem_ctx: | 639 | mem_ctx: |
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c index ab9866024ec7..04033efe7ad5 100644 --- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c +++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c | |||
| @@ -36,16 +36,18 @@ static int usb_cypress_writemem(struct usb_device *udev,u16 addr,u8 *data, u8 le | |||
| 36 | int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) | 36 | int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type) |
| 37 | { | 37 | { |
| 38 | struct hexline *hx; | 38 | struct hexline *hx; |
| 39 | u8 reset; | 39 | u8 *buf; |
| 40 | int ret,pos=0; | 40 | int ret, pos = 0; |
| 41 | u16 cpu_cs_register = cypress[type].cpu_cs_register; | ||
| 41 | 42 | ||
| 42 | hx = kmalloc(sizeof(*hx), GFP_KERNEL); | 43 | buf = kmalloc(sizeof(*hx), GFP_KERNEL); |
| 43 | if (!hx) | 44 | if (!buf) |
| 44 | return -ENOMEM; | 45 | return -ENOMEM; |
| 46 | hx = (struct hexline *)buf; | ||
| 45 | 47 | ||
| 46 | /* stop the CPU */ | 48 | /* stop the CPU */ |
| 47 | reset = 1; | 49 | buf[0] = 1; |
| 48 | if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1) | 50 | if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) |
| 49 | err("could not stop the USB controller CPU."); | 51 | err("could not stop the USB controller CPU."); |
| 50 | 52 | ||
| 51 | while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { | 53 | while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) { |
| @@ -61,21 +63,21 @@ int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw | |||
| 61 | } | 63 | } |
| 62 | if (ret < 0) { | 64 | if (ret < 0) { |
| 63 | err("firmware download failed at %d with %d",pos,ret); | 65 | err("firmware download failed at %d with %d",pos,ret); |
| 64 | kfree(hx); | 66 | kfree(buf); |
| 65 | return ret; | 67 | return ret; |
| 66 | } | 68 | } |
| 67 | 69 | ||
| 68 | if (ret == 0) { | 70 | if (ret == 0) { |
| 69 | /* restart the CPU */ | 71 | /* restart the CPU */ |
| 70 | reset = 0; | 72 | buf[0] = 0; |
| 71 | if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) { | 73 | if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) { |
| 72 | err("could not restart the USB controller CPU."); | 74 | err("could not restart the USB controller CPU."); |
| 73 | ret = -EINVAL; | 75 | ret = -EINVAL; |
| 74 | } | 76 | } |
| 75 | } else | 77 | } else |
| 76 | ret = -EIO; | 78 | ret = -EIO; |
| 77 | 79 | ||
| 78 | kfree(hx); | 80 | kfree(buf); |
| 79 | 81 | ||
| 80 | return ret; | 82 | return ret; |
| 81 | } | 83 | } |
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index 5457c361ad58..bf0fe0137dfe 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c | |||
| @@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev, | |||
| 1947 | if (!of_property_read_u32(child, "dma-channel", &val)) | 1947 | if (!of_property_read_u32(child, "dma-channel", &val)) |
| 1948 | gpmc_onenand_data->dma_channel = val; | 1948 | gpmc_onenand_data->dma_channel = val; |
| 1949 | 1949 | ||
| 1950 | gpmc_onenand_init(gpmc_onenand_data); | 1950 | return gpmc_onenand_init(gpmc_onenand_data); |
| 1951 | |||
| 1952 | return 0; | ||
| 1953 | } | 1951 | } |
| 1954 | #else | 1952 | #else |
| 1955 | static int gpmc_probe_onenand_child(struct platform_device *pdev, | 1953 | static int gpmc_probe_onenand_child(struct platform_device *pdev, |
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 91f645992c94..b27ea98b781f 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c | |||
| @@ -1792,15 +1792,14 @@ static pci_ers_result_t cxl_pci_error_detected(struct pci_dev *pdev, | |||
| 1792 | 1792 | ||
| 1793 | /* If we're permanently dead, give up. */ | 1793 | /* If we're permanently dead, give up. */ |
| 1794 | if (state == pci_channel_io_perm_failure) { | 1794 | if (state == pci_channel_io_perm_failure) { |
| 1795 | /* Tell the AFU drivers; but we don't care what they | ||
| 1796 | * say, we're going away. | ||
| 1797 | */ | ||
| 1798 | for (i = 0; i < adapter->slices; i++) { | 1795 | for (i = 0; i < adapter->slices; i++) { |
| 1799 | afu = adapter->afu[i]; | 1796 | afu = adapter->afu[i]; |
| 1800 | /* Only participate in EEH if we are on a virtual PHB */ | 1797 | /* |
| 1801 | if (afu->phb == NULL) | 1798 | * Tell the AFU drivers; but we don't care what they |
| 1802 | return PCI_ERS_RESULT_NONE; | 1799 | * say, we're going away. |
| 1803 | cxl_vphb_error_detected(afu, state); | 1800 | */ |
| 1801 | if (afu->phb != NULL) | ||
| 1802 | cxl_vphb_error_detected(afu, state); | ||
| 1804 | } | 1803 | } |
| 1805 | return PCI_ERS_RESULT_DISCONNECT; | 1804 | return PCI_ERS_RESULT_DISCONNECT; |
| 1806 | } | 1805 | } |
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c index 3600c9993a98..29f2daed37e0 100644 --- a/drivers/misc/mei/bus-fixup.c +++ b/drivers/misc/mei/bus-fixup.c | |||
| @@ -112,11 +112,9 @@ struct mkhi_msg { | |||
| 112 | 112 | ||
| 113 | static int mei_osver(struct mei_cl_device *cldev) | 113 | static int mei_osver(struct mei_cl_device *cldev) |
| 114 | { | 114 | { |
| 115 | int ret; | ||
| 116 | const size_t size = sizeof(struct mkhi_msg_hdr) + | 115 | const size_t size = sizeof(struct mkhi_msg_hdr) + |
| 117 | sizeof(struct mkhi_fwcaps) + | 116 | sizeof(struct mkhi_fwcaps) + |
| 118 | sizeof(struct mei_os_ver); | 117 | sizeof(struct mei_os_ver); |
| 119 | size_t length = 8; | ||
| 120 | char buf[size]; | 118 | char buf[size]; |
| 121 | struct mkhi_msg *req; | 119 | struct mkhi_msg *req; |
| 122 | struct mkhi_fwcaps *fwcaps; | 120 | struct mkhi_fwcaps *fwcaps; |
| @@ -137,15 +135,7 @@ static int mei_osver(struct mei_cl_device *cldev) | |||
| 137 | os_ver = (struct mei_os_ver *)fwcaps->data; | 135 | os_ver = (struct mei_os_ver *)fwcaps->data; |
| 138 | os_ver->os_type = OSTYPE_LINUX; | 136 | os_ver->os_type = OSTYPE_LINUX; |
| 139 | 137 | ||
| 140 | ret = __mei_cl_send(cldev->cl, buf, size, mode); | 138 | return __mei_cl_send(cldev->cl, buf, size, mode); |
| 141 | if (ret < 0) | ||
| 142 | return ret; | ||
| 143 | |||
| 144 | ret = __mei_cl_recv(cldev->cl, buf, length, 0); | ||
| 145 | if (ret < 0) | ||
| 146 | return ret; | ||
| 147 | |||
| 148 | return 0; | ||
| 149 | } | 139 | } |
| 150 | 140 | ||
| 151 | static void mei_mkhi_fix(struct mei_cl_device *cldev) | 141 | static void mei_mkhi_fix(struct mei_cl_device *cldev) |
| @@ -160,7 +150,7 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev) | |||
| 160 | return; | 150 | return; |
| 161 | 151 | ||
| 162 | ret = mei_osver(cldev); | 152 | ret = mei_osver(cldev); |
| 163 | if (ret) | 153 | if (ret < 0) |
| 164 | dev_err(&cldev->dev, "OS version command failed %d\n", ret); | 154 | dev_err(&cldev->dev, "OS version command failed %d\n", ret); |
| 165 | 155 | ||
| 166 | mei_cldev_disable(cldev); | 156 | mei_cldev_disable(cldev); |
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c index cfb1cdf176fa..13c55b8f9261 100644 --- a/drivers/misc/mei/init.c +++ b/drivers/misc/mei/init.c | |||
| @@ -124,8 +124,6 @@ int mei_reset(struct mei_device *dev) | |||
| 124 | 124 | ||
| 125 | mei_clear_interrupts(dev); | 125 | mei_clear_interrupts(dev); |
| 126 | 126 | ||
| 127 | mei_synchronize_irq(dev); | ||
| 128 | |||
| 129 | /* we're already in reset, cancel the init timer | 127 | /* we're already in reset, cancel the init timer |
| 130 | * if the reset was called due the hbm protocol error | 128 | * if the reset was called due the hbm protocol error |
| 131 | * we need to call it before hw start | 129 | * we need to call it before hw start |
| @@ -304,6 +302,9 @@ static void mei_reset_work(struct work_struct *work) | |||
| 304 | container_of(work, struct mei_device, reset_work); | 302 | container_of(work, struct mei_device, reset_work); |
| 305 | int ret; | 303 | int ret; |
| 306 | 304 | ||
| 305 | mei_clear_interrupts(dev); | ||
| 306 | mei_synchronize_irq(dev); | ||
| 307 | |||
| 307 | mutex_lock(&dev->device_lock); | 308 | mutex_lock(&dev->device_lock); |
| 308 | 309 | ||
| 309 | ret = mei_reset(dev); | 310 | ret = mei_reset(dev); |
| @@ -328,6 +329,9 @@ void mei_stop(struct mei_device *dev) | |||
| 328 | 329 | ||
| 329 | mei_cancel_work(dev); | 330 | mei_cancel_work(dev); |
| 330 | 331 | ||
| 332 | mei_clear_interrupts(dev); | ||
| 333 | mei_synchronize_irq(dev); | ||
| 334 | |||
| 331 | mutex_lock(&dev->device_lock); | 335 | mutex_lock(&dev->device_lock); |
| 332 | 336 | ||
| 333 | dev->dev_state = MEI_DEV_POWER_DOWN; | 337 | dev->dev_state = MEI_DEV_POWER_DOWN; |
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index 9d659542a335..dad5abee656e 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c | |||
| @@ -566,10 +566,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, | |||
| 566 | */ | 566 | */ |
| 567 | error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, | 567 | error = pci_alloc_irq_vectors(pdev, VMCI_MAX_INTRS, VMCI_MAX_INTRS, |
| 568 | PCI_IRQ_MSIX); | 568 | PCI_IRQ_MSIX); |
| 569 | if (error) { | 569 | if (error < 0) { |
| 570 | error = pci_alloc_irq_vectors(pdev, 1, 1, | 570 | error = pci_alloc_irq_vectors(pdev, 1, 1, |
| 571 | PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); | 571 | PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY); |
| 572 | if (error) | 572 | if (error < 0) |
| 573 | goto err_remove_bitmap; | 573 | goto err_remove_bitmap; |
| 574 | } else { | 574 | } else { |
| 575 | vmci_dev->exclusive_vectors = true; | 575 | vmci_dev->exclusive_vectors = true; |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 1621fa08e206..ff3da960c473 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
| @@ -1560,11 +1560,8 @@ static bool mmc_blk_rw_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, | |||
| 1560 | struct mmc_blk_request *brq, struct request *req, | 1560 | struct mmc_blk_request *brq, struct request *req, |
| 1561 | bool old_req_pending) | 1561 | bool old_req_pending) |
| 1562 | { | 1562 | { |
| 1563 | struct mmc_queue_req *mq_rq; | ||
| 1564 | bool req_pending; | 1563 | bool req_pending; |
| 1565 | 1564 | ||
| 1566 | mq_rq = container_of(brq, struct mmc_queue_req, brq); | ||
| 1567 | |||
| 1568 | /* | 1565 | /* |
| 1569 | * If this is an SD card and we're writing, we can first | 1566 | * If this is an SD card and we're writing, we can first |
| 1570 | * mark the known good sectors as ok. | 1567 | * mark the known good sectors as ok. |
| @@ -1701,7 +1698,8 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req) | |||
| 1701 | case MMC_BLK_CMD_ERR: | 1698 | case MMC_BLK_CMD_ERR: |
| 1702 | req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); | 1699 | req_pending = mmc_blk_rw_cmd_err(md, card, brq, old_req, req_pending); |
| 1703 | if (mmc_blk_reset(md, card->host, type)) { | 1700 | if (mmc_blk_reset(md, card->host, type)) { |
| 1704 | mmc_blk_rw_cmd_abort(card, old_req); | 1701 | if (req_pending) |
| 1702 | mmc_blk_rw_cmd_abort(card, old_req); | ||
| 1705 | mmc_blk_rw_try_restart(mq, new_req); | 1703 | mmc_blk_rw_try_restart(mq, new_req); |
| 1706 | return; | 1704 | return; |
| 1707 | } | 1705 | } |
| @@ -1817,6 +1815,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
| 1817 | mmc_blk_issue_flush(mq, req); | 1815 | mmc_blk_issue_flush(mq, req); |
| 1818 | } else { | 1816 | } else { |
| 1819 | mmc_blk_issue_rw_rq(mq, req); | 1817 | mmc_blk_issue_rw_rq(mq, req); |
| 1818 | card->host->context_info.is_waiting_last_req = false; | ||
| 1820 | } | 1819 | } |
| 1821 | 1820 | ||
| 1822 | out: | 1821 | out: |
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 7fd722868875..b502601df228 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -1730,7 +1730,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, | |||
| 1730 | err = mmc_select_hs400(card); | 1730 | err = mmc_select_hs400(card); |
| 1731 | if (err) | 1731 | if (err) |
| 1732 | goto free_card; | 1732 | goto free_card; |
| 1733 | } else { | 1733 | } else if (!mmc_card_hs400es(card)) { |
| 1734 | /* Select the desired bus width optionally */ | 1734 | /* Select the desired bus width optionally */ |
| 1735 | err = mmc_select_bus_width(card); | 1735 | err = mmc_select_bus_width(card); |
| 1736 | if (err > 0 && mmc_card_hs(card)) { | 1736 | if (err > 0 && mmc_card_hs(card)) { |
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 8e32580c12b5..b235d8da0602 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c | |||
| @@ -580,7 +580,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) | |||
| 580 | } | 580 | } |
| 581 | } | 581 | } |
| 582 | sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, | 582 | sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, |
| 583 | (mode << 8) | (div % 0xff)); | 583 | (mode << 8) | div); |
| 584 | sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); | 584 | sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); |
| 585 | while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) | 585 | while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) |
| 586 | cpu_relax(); | 586 | cpu_relax(); |
| @@ -1559,7 +1559,7 @@ static int msdc_drv_probe(struct platform_device *pdev) | |||
| 1559 | host->src_clk_freq = clk_get_rate(host->src_clk); | 1559 | host->src_clk_freq = clk_get_rate(host->src_clk); |
| 1560 | /* Set host parameters to mmc */ | 1560 | /* Set host parameters to mmc */ |
| 1561 | mmc->ops = &mt_msdc_ops; | 1561 | mmc->ops = &mt_msdc_ops; |
| 1562 | mmc->f_min = host->src_clk_freq / (4 * 255); | 1562 | mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); |
| 1563 | 1563 | ||
| 1564 | mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; | 1564 | mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; |
| 1565 | /* MMC core transfer sizes tunable parameters */ | 1565 | /* MMC core transfer sizes tunable parameters */ |
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 410a55b1c25f..1cfd7f900339 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c | |||
| @@ -28,13 +28,9 @@ | |||
| 28 | #include "sdhci-pltfm.h" | 28 | #include "sdhci-pltfm.h" |
| 29 | #include <linux/of.h> | 29 | #include <linux/of.h> |
| 30 | 30 | ||
| 31 | #define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c | ||
| 32 | #define SDHCI_ARASAN_VENDOR_REGISTER 0x78 | 31 | #define SDHCI_ARASAN_VENDOR_REGISTER 0x78 |
| 33 | 32 | ||
| 34 | #define VENDOR_ENHANCED_STROBE BIT(0) | 33 | #define VENDOR_ENHANCED_STROBE BIT(0) |
| 35 | #define CLK_CTRL_TIMEOUT_SHIFT 16 | ||
| 36 | #define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT) | ||
| 37 | #define CLK_CTRL_TIMEOUT_MIN_EXP 13 | ||
| 38 | 34 | ||
| 39 | #define PHY_CLK_TOO_SLOW_HZ 400000 | 35 | #define PHY_CLK_TOO_SLOW_HZ 400000 |
| 40 | 36 | ||
| @@ -163,15 +159,15 @@ static int sdhci_arasan_syscon_write(struct sdhci_host *host, | |||
| 163 | 159 | ||
| 164 | static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) | 160 | static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) |
| 165 | { | 161 | { |
| 166 | u32 div; | ||
| 167 | unsigned long freq; | 162 | unsigned long freq; |
| 168 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 163 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
| 169 | 164 | ||
| 170 | div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET); | 165 | /* SDHCI timeout clock is in kHz */ |
| 171 | div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT; | 166 | freq = DIV_ROUND_UP(clk_get_rate(pltfm_host->clk), 1000); |
| 172 | 167 | ||
| 173 | freq = clk_get_rate(pltfm_host->clk); | 168 | /* or in MHz */ |
| 174 | freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div); | 169 | if (host->caps & SDHCI_TIMEOUT_CLK_UNIT) |
| 170 | freq = DIV_ROUND_UP(freq, 1000); | ||
| 175 | 171 | ||
| 176 | return freq; | 172 | return freq; |
| 177 | } | 173 | } |
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 2f9ad213377a..7fd964256faa 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c | |||
| @@ -85,11 +85,30 @@ static void sdhci_at91_set_clock(struct sdhci_host *host, unsigned int clock) | |||
| 85 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); | 85 | sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | /* | ||
| 89 | * In this specific implementation of the SDHCI controller, the power register | ||
| 90 | * needs to have a valid voltage set even when the power supply is managed by | ||
| 91 | * an external regulator. | ||
| 92 | */ | ||
| 93 | static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode, | ||
| 94 | unsigned short vdd) | ||
| 95 | { | ||
| 96 | if (!IS_ERR(host->mmc->supply.vmmc)) { | ||
| 97 | struct mmc_host *mmc = host->mmc; | ||
| 98 | |||
| 99 | spin_unlock_irq(&host->lock); | ||
| 100 | mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); | ||
| 101 | spin_lock_irq(&host->lock); | ||
| 102 | } | ||
| 103 | sdhci_set_power_noreg(host, mode, vdd); | ||
| 104 | } | ||
| 105 | |||
| 88 | static const struct sdhci_ops sdhci_at91_sama5d2_ops = { | 106 | static const struct sdhci_ops sdhci_at91_sama5d2_ops = { |
| 89 | .set_clock = sdhci_at91_set_clock, | 107 | .set_clock = sdhci_at91_set_clock, |
| 90 | .set_bus_width = sdhci_set_bus_width, | 108 | .set_bus_width = sdhci_set_bus_width, |
| 91 | .reset = sdhci_reset, | 109 | .reset = sdhci_reset, |
| 92 | .set_uhs_signaling = sdhci_set_uhs_signaling, | 110 | .set_uhs_signaling = sdhci_set_uhs_signaling, |
| 111 | .set_power = sdhci_at91_set_power, | ||
| 93 | }; | 112 | }; |
| 94 | 113 | ||
| 95 | static const struct sdhci_pltfm_data soc_data_sama5d2 = { | 114 | static const struct sdhci_pltfm_data soc_data_sama5d2 = { |
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 982b3e349426..86560d590786 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
| @@ -451,6 +451,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, | |||
| 451 | if (mode == MMC_POWER_OFF) | 451 | if (mode == MMC_POWER_OFF) |
| 452 | return; | 452 | return; |
| 453 | 453 | ||
| 454 | spin_unlock_irq(&host->lock); | ||
| 455 | |||
| 454 | /* | 456 | /* |
| 455 | * Bus power might not enable after D3 -> D0 transition due to the | 457 | * Bus power might not enable after D3 -> D0 transition due to the |
| 456 | * present state not yet having propagated. Retry for up to 2ms. | 458 | * present state not yet having propagated. Retry for up to 2ms. |
| @@ -463,6 +465,8 @@ static void sdhci_intel_set_power(struct sdhci_host *host, unsigned char mode, | |||
| 463 | reg |= SDHCI_POWER_ON; | 465 | reg |= SDHCI_POWER_ON; |
| 464 | sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); | 466 | sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); |
| 465 | } | 467 | } |
| 468 | |||
| 469 | spin_lock_irq(&host->lock); | ||
| 466 | } | 470 | } |
| 467 | 471 | ||
| 468 | static const struct sdhci_ops sdhci_intel_byt_ops = { | 472 | static const struct sdhci_ops sdhci_intel_byt_ops = { |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 6fdd7a70f229..9c1a099afbbe 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
| @@ -1362,7 +1362,9 @@ void sdhci_enable_clk(struct sdhci_host *host, u16 clk) | |||
| 1362 | return; | 1362 | return; |
| 1363 | } | 1363 | } |
| 1364 | timeout--; | 1364 | timeout--; |
| 1365 | mdelay(1); | 1365 | spin_unlock_irq(&host->lock); |
| 1366 | usleep_range(900, 1100); | ||
| 1367 | spin_lock_irq(&host->lock); | ||
| 1366 | } | 1368 | } |
| 1367 | 1369 | ||
| 1368 | clk |= SDHCI_CLOCK_CARD_EN; | 1370 | clk |= SDHCI_CLOCK_CARD_EN; |
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c index d2c386f09d69..1d843357422e 100644 --- a/drivers/mmc/host/ushc.c +++ b/drivers/mmc/host/ushc.c | |||
| @@ -426,6 +426,9 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id | |||
| 426 | struct ushc_data *ushc; | 426 | struct ushc_data *ushc; |
| 427 | int ret; | 427 | int ret; |
| 428 | 428 | ||
| 429 | if (intf->cur_altsetting->desc.bNumEndpoints < 1) | ||
| 430 | return -ENODEV; | ||
| 431 | |||
| 429 | mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); | 432 | mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); |
| 430 | if (mmc == NULL) | 433 | if (mmc == NULL) |
| 431 | return -ENOMEM; | 434 | return -ENOMEM; |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 8a280e7d66bd..127adbeefb10 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h | |||
| @@ -984,29 +984,29 @@ | |||
| 984 | #define XP_ECC_CNT1_DESC_DED_WIDTH 8 | 984 | #define XP_ECC_CNT1_DESC_DED_WIDTH 8 |
| 985 | #define XP_ECC_CNT1_DESC_SEC_INDEX 0 | 985 | #define XP_ECC_CNT1_DESC_SEC_INDEX 0 |
| 986 | #define XP_ECC_CNT1_DESC_SEC_WIDTH 8 | 986 | #define XP_ECC_CNT1_DESC_SEC_WIDTH 8 |
| 987 | #define XP_ECC_IER_DESC_DED_INDEX 0 | 987 | #define XP_ECC_IER_DESC_DED_INDEX 5 |
| 988 | #define XP_ECC_IER_DESC_DED_WIDTH 1 | 988 | #define XP_ECC_IER_DESC_DED_WIDTH 1 |
| 989 | #define XP_ECC_IER_DESC_SEC_INDEX 1 | 989 | #define XP_ECC_IER_DESC_SEC_INDEX 4 |
| 990 | #define XP_ECC_IER_DESC_SEC_WIDTH 1 | 990 | #define XP_ECC_IER_DESC_SEC_WIDTH 1 |
| 991 | #define XP_ECC_IER_RX_DED_INDEX 2 | 991 | #define XP_ECC_IER_RX_DED_INDEX 3 |
| 992 | #define XP_ECC_IER_RX_DED_WIDTH 1 | 992 | #define XP_ECC_IER_RX_DED_WIDTH 1 |
| 993 | #define XP_ECC_IER_RX_SEC_INDEX 3 | 993 | #define XP_ECC_IER_RX_SEC_INDEX 2 |
| 994 | #define XP_ECC_IER_RX_SEC_WIDTH 1 | 994 | #define XP_ECC_IER_RX_SEC_WIDTH 1 |
| 995 | #define XP_ECC_IER_TX_DED_INDEX 4 | 995 | #define XP_ECC_IER_TX_DED_INDEX 1 |
| 996 | #define XP_ECC_IER_TX_DED_WIDTH 1 | 996 | #define XP_ECC_IER_TX_DED_WIDTH 1 |
| 997 | #define XP_ECC_IER_TX_SEC_INDEX 5 | 997 | #define XP_ECC_IER_TX_SEC_INDEX 0 |
| 998 | #define XP_ECC_IER_TX_SEC_WIDTH 1 | 998 | #define XP_ECC_IER_TX_SEC_WIDTH 1 |
| 999 | #define XP_ECC_ISR_DESC_DED_INDEX 0 | 999 | #define XP_ECC_ISR_DESC_DED_INDEX 5 |
| 1000 | #define XP_ECC_ISR_DESC_DED_WIDTH 1 | 1000 | #define XP_ECC_ISR_DESC_DED_WIDTH 1 |
| 1001 | #define XP_ECC_ISR_DESC_SEC_INDEX 1 | 1001 | #define XP_ECC_ISR_DESC_SEC_INDEX 4 |
| 1002 | #define XP_ECC_ISR_DESC_SEC_WIDTH 1 | 1002 | #define XP_ECC_ISR_DESC_SEC_WIDTH 1 |
| 1003 | #define XP_ECC_ISR_RX_DED_INDEX 2 | 1003 | #define XP_ECC_ISR_RX_DED_INDEX 3 |
| 1004 | #define XP_ECC_ISR_RX_DED_WIDTH 1 | 1004 | #define XP_ECC_ISR_RX_DED_WIDTH 1 |
| 1005 | #define XP_ECC_ISR_RX_SEC_INDEX 3 | 1005 | #define XP_ECC_ISR_RX_SEC_INDEX 2 |
| 1006 | #define XP_ECC_ISR_RX_SEC_WIDTH 1 | 1006 | #define XP_ECC_ISR_RX_SEC_WIDTH 1 |
| 1007 | #define XP_ECC_ISR_TX_DED_INDEX 4 | 1007 | #define XP_ECC_ISR_TX_DED_INDEX 1 |
| 1008 | #define XP_ECC_ISR_TX_DED_WIDTH 1 | 1008 | #define XP_ECC_ISR_TX_DED_WIDTH 1 |
| 1009 | #define XP_ECC_ISR_TX_SEC_INDEX 5 | 1009 | #define XP_ECC_ISR_TX_SEC_INDEX 0 |
| 1010 | #define XP_ECC_ISR_TX_SEC_WIDTH 1 | 1010 | #define XP_ECC_ISR_TX_SEC_WIDTH 1 |
| 1011 | #define XP_I2C_MUTEX_BUSY_INDEX 31 | 1011 | #define XP_I2C_MUTEX_BUSY_INDEX 31 |
| 1012 | #define XP_I2C_MUTEX_BUSY_WIDTH 1 | 1012 | #define XP_I2C_MUTEX_BUSY_WIDTH 1 |
| @@ -1148,8 +1148,8 @@ | |||
| 1148 | #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 | 1148 | #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 |
| 1149 | #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 | 1149 | #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 |
| 1150 | #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 | 1150 | #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 |
| 1151 | #define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2 | 1151 | #define RX_PACKET_ATTRIBUTES_LAST_INDEX 2 |
| 1152 | #define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1 | 1152 | #define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1 |
| 1153 | #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 | 1153 | #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 |
| 1154 | #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 | 1154 | #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 |
| 1155 | #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 | 1155 | #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 |
| @@ -1158,6 +1158,8 @@ | |||
| 1158 | #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 | 1158 | #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 |
| 1159 | #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 | 1159 | #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 |
| 1160 | #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 | 1160 | #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 |
| 1161 | #define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7 | ||
| 1162 | #define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1 | ||
| 1161 | 1163 | ||
| 1162 | #define RX_NORMAL_DESC0_OVT_INDEX 0 | 1164 | #define RX_NORMAL_DESC0_OVT_INDEX 0 |
| 1163 | #define RX_NORMAL_DESC0_OVT_WIDTH 16 | 1165 | #define RX_NORMAL_DESC0_OVT_WIDTH 16 |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index 937f37a5dcb2..24a687ce4388 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
| @@ -1896,10 +1896,15 @@ static int xgbe_dev_read(struct xgbe_channel *channel) | |||
| 1896 | 1896 | ||
| 1897 | /* Get the header length */ | 1897 | /* Get the header length */ |
| 1898 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { | 1898 | if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) { |
| 1899 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, | ||
| 1900 | FIRST, 1); | ||
| 1899 | rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, | 1901 | rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2, |
| 1900 | RX_NORMAL_DESC2, HL); | 1902 | RX_NORMAL_DESC2, HL); |
| 1901 | if (rdata->rx.hdr_len) | 1903 | if (rdata->rx.hdr_len) |
| 1902 | pdata->ext_stats.rx_split_header_packets++; | 1904 | pdata->ext_stats.rx_split_header_packets++; |
| 1905 | } else { | ||
| 1906 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, | ||
| 1907 | FIRST, 0); | ||
| 1903 | } | 1908 | } |
| 1904 | 1909 | ||
| 1905 | /* Get the RSS hash */ | 1910 | /* Get the RSS hash */ |
| @@ -1922,19 +1927,16 @@ static int xgbe_dev_read(struct xgbe_channel *channel) | |||
| 1922 | } | 1927 | } |
| 1923 | } | 1928 | } |
| 1924 | 1929 | ||
| 1925 | /* Get the packet length */ | 1930 | /* Not all the data has been transferred for this packet */ |
| 1926 | rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); | 1931 | if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) |
| 1927 | |||
| 1928 | if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) { | ||
| 1929 | /* Not all the data has been transferred for this packet */ | ||
| 1930 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, | ||
| 1931 | INCOMPLETE, 1); | ||
| 1932 | return 0; | 1932 | return 0; |
| 1933 | } | ||
| 1934 | 1933 | ||
| 1935 | /* This is the last of the data for this packet */ | 1934 | /* This is the last of the data for this packet */ |
| 1936 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, | 1935 | XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
| 1937 | INCOMPLETE, 0); | 1936 | LAST, 1); |
| 1937 | |||
| 1938 | /* Get the packet length */ | ||
| 1939 | rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL); | ||
| 1938 | 1940 | ||
| 1939 | /* Set checksum done indicator as appropriate */ | 1941 | /* Set checksum done indicator as appropriate */ |
| 1940 | if (netdev->features & NETIF_F_RXCSUM) | 1942 | if (netdev->features & NETIF_F_RXCSUM) |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ffea9859f5a7..a713abd9d03e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
| @@ -1971,13 +1971,12 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, | |||
| 1971 | { | 1971 | { |
| 1972 | struct sk_buff *skb; | 1972 | struct sk_buff *skb; |
| 1973 | u8 *packet; | 1973 | u8 *packet; |
| 1974 | unsigned int copy_len; | ||
| 1975 | 1974 | ||
| 1976 | skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); | 1975 | skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len); |
| 1977 | if (!skb) | 1976 | if (!skb) |
| 1978 | return NULL; | 1977 | return NULL; |
| 1979 | 1978 | ||
| 1980 | /* Start with the header buffer which may contain just the header | 1979 | /* Pull in the header buffer which may contain just the header |
| 1981 | * or the header plus data | 1980 | * or the header plus data |
| 1982 | */ | 1981 | */ |
| 1983 | dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, | 1982 | dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base, |
| @@ -1986,30 +1985,49 @@ static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata, | |||
| 1986 | 1985 | ||
| 1987 | packet = page_address(rdata->rx.hdr.pa.pages) + | 1986 | packet = page_address(rdata->rx.hdr.pa.pages) + |
| 1988 | rdata->rx.hdr.pa.pages_offset; | 1987 | rdata->rx.hdr.pa.pages_offset; |
| 1989 | copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len; | 1988 | skb_copy_to_linear_data(skb, packet, len); |
| 1990 | copy_len = min(rdata->rx.hdr.dma_len, copy_len); | 1989 | skb_put(skb, len); |
| 1991 | skb_copy_to_linear_data(skb, packet, copy_len); | ||
| 1992 | skb_put(skb, copy_len); | ||
| 1993 | |||
| 1994 | len -= copy_len; | ||
| 1995 | if (len) { | ||
| 1996 | /* Add the remaining data as a frag */ | ||
| 1997 | dma_sync_single_range_for_cpu(pdata->dev, | ||
| 1998 | rdata->rx.buf.dma_base, | ||
| 1999 | rdata->rx.buf.dma_off, | ||
| 2000 | rdata->rx.buf.dma_len, | ||
| 2001 | DMA_FROM_DEVICE); | ||
| 2002 | |||
| 2003 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | ||
| 2004 | rdata->rx.buf.pa.pages, | ||
| 2005 | rdata->rx.buf.pa.pages_offset, | ||
| 2006 | len, rdata->rx.buf.dma_len); | ||
| 2007 | rdata->rx.buf.pa.pages = NULL; | ||
| 2008 | } | ||
| 2009 | 1990 | ||
| 2010 | return skb; | 1991 | return skb; |
| 2011 | } | 1992 | } |
| 2012 | 1993 | ||
| 1994 | static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata, | ||
| 1995 | struct xgbe_packet_data *packet) | ||
| 1996 | { | ||
| 1997 | /* Always zero if not the first descriptor */ | ||
| 1998 | if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST)) | ||
| 1999 | return 0; | ||
| 2000 | |||
| 2001 | /* First descriptor with split header, return header length */ | ||
| 2002 | if (rdata->rx.hdr_len) | ||
| 2003 | return rdata->rx.hdr_len; | ||
| 2004 | |||
| 2005 | /* First descriptor but not the last descriptor and no split header, | ||
| 2006 | * so the full buffer was used | ||
| 2007 | */ | ||
| 2008 | if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) | ||
| 2009 | return rdata->rx.hdr.dma_len; | ||
| 2010 | |||
| 2011 | /* First descriptor and last descriptor and no split header, so | ||
| 2012 | * calculate how much of the buffer was used | ||
| 2013 | */ | ||
| 2014 | return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len); | ||
| 2015 | } | ||
| 2016 | |||
| 2017 | static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata, | ||
| 2018 | struct xgbe_packet_data *packet, | ||
| 2019 | unsigned int len) | ||
| 2020 | { | ||
| 2021 | /* Always the full buffer if not the last descriptor */ | ||
| 2022 | if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST)) | ||
| 2023 | return rdata->rx.buf.dma_len; | ||
| 2024 | |||
| 2025 | /* Last descriptor so calculate how much of the buffer was used | ||
| 2026 | * for the last bit of data | ||
| 2027 | */ | ||
| 2028 | return rdata->rx.len - len; | ||
| 2029 | } | ||
| 2030 | |||
| 2013 | static int xgbe_tx_poll(struct xgbe_channel *channel) | 2031 | static int xgbe_tx_poll(struct xgbe_channel *channel) |
| 2014 | { | 2032 | { |
| 2015 | struct xgbe_prv_data *pdata = channel->pdata; | 2033 | struct xgbe_prv_data *pdata = channel->pdata; |
| @@ -2092,8 +2110,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) | |||
| 2092 | struct napi_struct *napi; | 2110 | struct napi_struct *napi; |
| 2093 | struct sk_buff *skb; | 2111 | struct sk_buff *skb; |
| 2094 | struct skb_shared_hwtstamps *hwtstamps; | 2112 | struct skb_shared_hwtstamps *hwtstamps; |
| 2095 | unsigned int incomplete, error, context_next, context; | 2113 | unsigned int last, error, context_next, context; |
| 2096 | unsigned int len, rdesc_len, max_len; | 2114 | unsigned int len, buf1_len, buf2_len, max_len; |
| 2097 | unsigned int received = 0; | 2115 | unsigned int received = 0; |
| 2098 | int packet_count = 0; | 2116 | int packet_count = 0; |
| 2099 | 2117 | ||
| @@ -2103,7 +2121,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget) | |||
| 2103 | if (!ring) | 2121 | if (!ring) |
| 2104 | return 0; | 2122 | return 0; |
| 2105 | 2123 | ||
| 2106 | incomplete = 0; | 2124 | last = 0; |
| 2107 | context_next = 0; | 2125 | context_next = 0; |
| 2108 | 2126 | ||
| 2109 | napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; | 2127 | napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi; |
| @@ -2137,9 +2155,8 @@ read_again: | |||
| 2137 | received++; | 2155 | received++; |
| 2138 | ring->cur++; | 2156 | ring->cur++; |
| 2139 | 2157 | ||
| 2140 | incomplete = XGMAC_GET_BITS(packet->attributes, | 2158 | last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, |
| 2141 | RX_PACKET_ATTRIBUTES, | 2159 | LAST); |
| 2142 | INCOMPLETE); | ||
| 2143 | context_next = XGMAC_GET_BITS(packet->attributes, | 2160 | context_next = XGMAC_GET_BITS(packet->attributes, |
| 2144 | RX_PACKET_ATTRIBUTES, | 2161 | RX_PACKET_ATTRIBUTES, |
| 2145 | CONTEXT_NEXT); | 2162 | CONTEXT_NEXT); |
| @@ -2148,7 +2165,7 @@ read_again: | |||
| 2148 | CONTEXT); | 2165 | CONTEXT); |
| 2149 | 2166 | ||
| 2150 | /* Earlier error, just drain the remaining data */ | 2167 | /* Earlier error, just drain the remaining data */ |
| 2151 | if ((incomplete || context_next) && error) | 2168 | if ((!last || context_next) && error) |
| 2152 | goto read_again; | 2169 | goto read_again; |
| 2153 | 2170 | ||
| 2154 | if (error || packet->errors) { | 2171 | if (error || packet->errors) { |
| @@ -2160,16 +2177,22 @@ read_again: | |||
| 2160 | } | 2177 | } |
| 2161 | 2178 | ||
| 2162 | if (!context) { | 2179 | if (!context) { |
| 2163 | /* Length is cumulative, get this descriptor's length */ | 2180 | /* Get the data length in the descriptor buffers */ |
| 2164 | rdesc_len = rdata->rx.len - len; | 2181 | buf1_len = xgbe_rx_buf1_len(rdata, packet); |
| 2165 | len += rdesc_len; | 2182 | len += buf1_len; |
| 2183 | buf2_len = xgbe_rx_buf2_len(rdata, packet, len); | ||
| 2184 | len += buf2_len; | ||
| 2166 | 2185 | ||
| 2167 | if (rdesc_len && !skb) { | 2186 | if (!skb) { |
| 2168 | skb = xgbe_create_skb(pdata, napi, rdata, | 2187 | skb = xgbe_create_skb(pdata, napi, rdata, |
| 2169 | rdesc_len); | 2188 | buf1_len); |
| 2170 | if (!skb) | 2189 | if (!skb) { |
| 2171 | error = 1; | 2190 | error = 1; |
| 2172 | } else if (rdesc_len) { | 2191 | goto skip_data; |
| 2192 | } | ||
| 2193 | } | ||
| 2194 | |||
| 2195 | if (buf2_len) { | ||
| 2173 | dma_sync_single_range_for_cpu(pdata->dev, | 2196 | dma_sync_single_range_for_cpu(pdata->dev, |
| 2174 | rdata->rx.buf.dma_base, | 2197 | rdata->rx.buf.dma_base, |
| 2175 | rdata->rx.buf.dma_off, | 2198 | rdata->rx.buf.dma_off, |
| @@ -2179,13 +2202,14 @@ read_again: | |||
| 2179 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, | 2202 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, |
| 2180 | rdata->rx.buf.pa.pages, | 2203 | rdata->rx.buf.pa.pages, |
| 2181 | rdata->rx.buf.pa.pages_offset, | 2204 | rdata->rx.buf.pa.pages_offset, |
| 2182 | rdesc_len, | 2205 | buf2_len, |
| 2183 | rdata->rx.buf.dma_len); | 2206 | rdata->rx.buf.dma_len); |
| 2184 | rdata->rx.buf.pa.pages = NULL; | 2207 | rdata->rx.buf.pa.pages = NULL; |
| 2185 | } | 2208 | } |
| 2186 | } | 2209 | } |
| 2187 | 2210 | ||
| 2188 | if (incomplete || context_next) | 2211 | skip_data: |
| 2212 | if (!last || context_next) | ||
| 2189 | goto read_again; | 2213 | goto read_again; |
| 2190 | 2214 | ||
| 2191 | if (!skb) | 2215 | if (!skb) |
| @@ -2243,7 +2267,7 @@ next_packet: | |||
| 2243 | } | 2267 | } |
| 2244 | 2268 | ||
| 2245 | /* Check if we need to save state before leaving */ | 2269 | /* Check if we need to save state before leaving */ |
| 2246 | if (received && (incomplete || context_next)) { | 2270 | if (received && (!last || context_next)) { |
| 2247 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); | 2271 | rdata = XGBE_GET_DESC_DATA(ring, ring->cur); |
| 2248 | rdata->state_saved = 1; | 2272 | rdata->state_saved = 1; |
| 2249 | rdata->state.skb = skb; | 2273 | rdata->state.skb = skb; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index dad63623be6a..d05fbfdce5e5 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c | |||
| @@ -98,6 +98,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu) | |||
| 98 | 98 | ||
| 99 | if (err < 0) | 99 | if (err < 0) |
| 100 | goto err_exit; | 100 | goto err_exit; |
| 101 | ndev->mtu = new_mtu; | ||
| 101 | 102 | ||
| 102 | if (netif_running(ndev)) { | 103 | if (netif_running(ndev)) { |
| 103 | aq_ndev_close(ndev); | 104 | aq_ndev_close(ndev); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h index 1093ea18823a..0592a0330cf0 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h | |||
| @@ -137,6 +137,7 @@ static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = { | |||
| 137 | .tx_rings = HW_ATL_A0_TX_RINGS, | 137 | .tx_rings = HW_ATL_A0_TX_RINGS, |
| 138 | .rx_rings = HW_ATL_A0_RX_RINGS, | 138 | .rx_rings = HW_ATL_A0_RX_RINGS, |
| 139 | .hw_features = NETIF_F_HW_CSUM | | 139 | .hw_features = NETIF_F_HW_CSUM | |
| 140 | NETIF_F_RXCSUM | | ||
| 140 | NETIF_F_RXHASH | | 141 | NETIF_F_RXHASH | |
| 141 | NETIF_F_SG | | 142 | NETIF_F_SG | |
| 142 | NETIF_F_TSO, | 143 | NETIF_F_TSO, |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index 8bdee3ddd5a0..f3957e930340 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h | |||
| @@ -188,6 +188,7 @@ static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = { | |||
| 188 | .tx_rings = HW_ATL_B0_TX_RINGS, | 188 | .tx_rings = HW_ATL_B0_TX_RINGS, |
| 189 | .rx_rings = HW_ATL_B0_RX_RINGS, | 189 | .rx_rings = HW_ATL_B0_RX_RINGS, |
| 190 | .hw_features = NETIF_F_HW_CSUM | | 190 | .hw_features = NETIF_F_HW_CSUM | |
| 191 | NETIF_F_RXCSUM | | ||
| 191 | NETIF_F_RXHASH | | 192 | NETIF_F_RXHASH | |
| 192 | NETIF_F_SG | | 193 | NETIF_F_SG | |
| 193 | NETIF_F_TSO | | 194 | NETIF_F_TSO | |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 69015fa50f20..365895ed3c3e 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c | |||
| @@ -3481,7 +3481,8 @@ static int bcmgenet_suspend(struct device *d) | |||
| 3481 | 3481 | ||
| 3482 | bcmgenet_netif_stop(dev); | 3482 | bcmgenet_netif_stop(dev); |
| 3483 | 3483 | ||
| 3484 | phy_suspend(priv->phydev); | 3484 | if (!device_may_wakeup(d)) |
| 3485 | phy_suspend(priv->phydev); | ||
| 3485 | 3486 | ||
| 3486 | netif_device_detach(dev); | 3487 | netif_device_detach(dev); |
| 3487 | 3488 | ||
| @@ -3578,7 +3579,8 @@ static int bcmgenet_resume(struct device *d) | |||
| 3578 | 3579 | ||
| 3579 | netif_device_attach(dev); | 3580 | netif_device_attach(dev); |
| 3580 | 3581 | ||
| 3581 | phy_resume(priv->phydev); | 3582 | if (!device_may_wakeup(d)) |
| 3583 | phy_resume(priv->phydev); | ||
| 3582 | 3584 | ||
| 3583 | if (priv->eee.eee_enabled) | 3585 | if (priv->eee.eee_enabled) |
| 3584 | bcmgenet_eee_enable_set(dev, true); | 3586 | bcmgenet_eee_enable_set(dev, true); |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index e87607621e62..2f9281936f0e 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
| @@ -220,20 +220,6 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable) | |||
| 220 | udelay(60); | 220 | udelay(60); |
| 221 | } | 221 | } |
| 222 | 222 | ||
| 223 | static void bcmgenet_internal_phy_setup(struct net_device *dev) | ||
| 224 | { | ||
| 225 | struct bcmgenet_priv *priv = netdev_priv(dev); | ||
| 226 | u32 reg; | ||
| 227 | |||
| 228 | /* Power up PHY */ | ||
| 229 | bcmgenet_phy_power_set(dev, true); | ||
| 230 | /* enable APD */ | ||
| 231 | reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); | ||
| 232 | reg |= EXT_PWR_DN_EN_LD; | ||
| 233 | bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); | ||
| 234 | bcmgenet_mii_reset(dev); | ||
| 235 | } | ||
| 236 | |||
| 237 | static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) | 223 | static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv) |
| 238 | { | 224 | { |
| 239 | u32 reg; | 225 | u32 reg; |
| @@ -281,7 +267,6 @@ int bcmgenet_mii_config(struct net_device *dev) | |||
| 281 | 267 | ||
| 282 | if (priv->internal_phy) { | 268 | if (priv->internal_phy) { |
| 283 | phy_name = "internal PHY"; | 269 | phy_name = "internal PHY"; |
| 284 | bcmgenet_internal_phy_setup(dev); | ||
| 285 | } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { | 270 | } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) { |
| 286 | phy_name = "MoCA"; | 271 | phy_name = "MoCA"; |
| 287 | bcmgenet_moca_phy_setup(priv); | 272 | bcmgenet_moca_phy_setup(priv); |
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c index 05c1c1dd7751..cebfe3bd086e 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c +++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c | |||
| @@ -325,7 +325,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf, | |||
| 325 | return PTR_ERR(kern_buf); | 325 | return PTR_ERR(kern_buf); |
| 326 | 326 | ||
| 327 | rc = sscanf(kern_buf, "%x:%x", &addr, &len); | 327 | rc = sscanf(kern_buf, "%x:%x", &addr, &len); |
| 328 | if (rc < 2) { | 328 | if (rc < 2 || len > UINT_MAX >> 2) { |
| 329 | netdev_warn(bnad->netdev, "failed to read user buffer\n"); | 329 | netdev_warn(bnad->netdev, "failed to read user buffer\n"); |
| 330 | kfree(kern_buf); | 330 | kfree(kern_buf); |
| 331 | return -EINVAL; | 331 | return -EINVAL; |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5f11b4dc95d2..b23d6545f835 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -1257,6 +1257,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) | |||
| 1257 | release_sub_crq_queue(adapter, | 1257 | release_sub_crq_queue(adapter, |
| 1258 | adapter->tx_scrq[i]); | 1258 | adapter->tx_scrq[i]); |
| 1259 | } | 1259 | } |
| 1260 | kfree(adapter->tx_scrq); | ||
| 1260 | adapter->tx_scrq = NULL; | 1261 | adapter->tx_scrq = NULL; |
| 1261 | } | 1262 | } |
| 1262 | 1263 | ||
| @@ -1269,6 +1270,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter) | |||
| 1269 | release_sub_crq_queue(adapter, | 1270 | release_sub_crq_queue(adapter, |
| 1270 | adapter->rx_scrq[i]); | 1271 | adapter->rx_scrq[i]); |
| 1271 | } | 1272 | } |
| 1273 | kfree(adapter->rx_scrq); | ||
| 1272 | adapter->rx_scrq = NULL; | 1274 | adapter->rx_scrq = NULL; |
| 1273 | } | 1275 | } |
| 1274 | } | 1276 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index e8c105164931..0e0fa7030565 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c | |||
| @@ -2305,6 +2305,17 @@ static int sync_toggles(struct mlx4_dev *dev) | |||
| 2305 | rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); | 2305 | rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)); |
| 2306 | if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { | 2306 | if (wr_toggle == 0xffffffff || rd_toggle == 0xffffffff) { |
| 2307 | /* PCI might be offline */ | 2307 | /* PCI might be offline */ |
| 2308 | |||
| 2309 | /* If device removal has been requested, | ||
| 2310 | * do not continue retrying. | ||
| 2311 | */ | ||
| 2312 | if (dev->persist->interface_state & | ||
| 2313 | MLX4_INTERFACE_STATE_NOWAIT) { | ||
| 2314 | mlx4_warn(dev, | ||
| 2315 | "communication channel is offline\n"); | ||
| 2316 | return -EIO; | ||
| 2317 | } | ||
| 2318 | |||
| 2308 | msleep(100); | 2319 | msleep(100); |
| 2309 | wr_toggle = swab32(readl(&priv->mfunc.comm-> | 2320 | wr_toggle = swab32(readl(&priv->mfunc.comm-> |
| 2310 | slave_write)); | 2321 | slave_write)); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 21377c315083..703205475524 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c | |||
| @@ -1940,6 +1940,14 @@ static int mlx4_comm_check_offline(struct mlx4_dev *dev) | |||
| 1940 | (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); | 1940 | (u32)(1 << COMM_CHAN_OFFLINE_OFFSET)); |
| 1941 | if (!offline_bit) | 1941 | if (!offline_bit) |
| 1942 | return 0; | 1942 | return 0; |
| 1943 | |||
| 1944 | /* If device removal has been requested, | ||
| 1945 | * do not continue retrying. | ||
| 1946 | */ | ||
| 1947 | if (dev->persist->interface_state & | ||
| 1948 | MLX4_INTERFACE_STATE_NOWAIT) | ||
| 1949 | break; | ||
| 1950 | |||
| 1943 | /* There are cases as part of AER/Reset flow that PF needs | 1951 | /* There are cases as part of AER/Reset flow that PF needs |
| 1944 | * around 100 msec to load. We therefore sleep for 100 msec | 1952 | * around 100 msec to load. We therefore sleep for 100 msec |
| 1945 | * to allow other tasks to make use of that CPU during this | 1953 | * to allow other tasks to make use of that CPU during this |
| @@ -3955,6 +3963,9 @@ static void mlx4_remove_one(struct pci_dev *pdev) | |||
| 3955 | struct devlink *devlink = priv_to_devlink(priv); | 3963 | struct devlink *devlink = priv_to_devlink(priv); |
| 3956 | int active_vfs = 0; | 3964 | int active_vfs = 0; |
| 3957 | 3965 | ||
| 3966 | if (mlx4_is_slave(dev)) | ||
| 3967 | persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT; | ||
| 3968 | |||
| 3958 | mutex_lock(&persist->interface_state_mutex); | 3969 | mutex_lock(&persist->interface_state_mutex); |
| 3959 | persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; | 3970 | persist->interface_state |= MLX4_INTERFACE_STATE_DELETION; |
| 3960 | mutex_unlock(&persist->interface_state_mutex); | 3971 | mutex_unlock(&persist->interface_state_mutex); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index caa837e5e2b9..a380353a78c2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
| @@ -361,6 +361,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, | |||
| 361 | case MLX5_CMD_OP_QUERY_VPORT_COUNTER: | 361 | case MLX5_CMD_OP_QUERY_VPORT_COUNTER: |
| 362 | case MLX5_CMD_OP_ALLOC_Q_COUNTER: | 362 | case MLX5_CMD_OP_ALLOC_Q_COUNTER: |
| 363 | case MLX5_CMD_OP_QUERY_Q_COUNTER: | 363 | case MLX5_CMD_OP_QUERY_Q_COUNTER: |
| 364 | case MLX5_CMD_OP_SET_RATE_LIMIT: | ||
| 365 | case MLX5_CMD_OP_QUERY_RATE_LIMIT: | ||
| 364 | case MLX5_CMD_OP_ALLOC_PD: | 366 | case MLX5_CMD_OP_ALLOC_PD: |
| 365 | case MLX5_CMD_OP_ALLOC_UAR: | 367 | case MLX5_CMD_OP_ALLOC_UAR: |
| 366 | case MLX5_CMD_OP_CONFIG_INT_MODERATION: | 368 | case MLX5_CMD_OP_CONFIG_INT_MODERATION: |
| @@ -497,6 +499,8 @@ const char *mlx5_command_str(int command) | |||
| 497 | MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); | 499 | MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); |
| 498 | MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); | 500 | MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); |
| 499 | MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); | 501 | MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); |
| 502 | MLX5_COMMAND_STR_CASE(SET_RATE_LIMIT); | ||
| 503 | MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); | ||
| 500 | MLX5_COMMAND_STR_CASE(ALLOC_PD); | 504 | MLX5_COMMAND_STR_CASE(ALLOC_PD); |
| 501 | MLX5_COMMAND_STR_CASE(DEALLOC_PD); | 505 | MLX5_COMMAND_STR_CASE(DEALLOC_PD); |
| 502 | MLX5_COMMAND_STR_CASE(ALLOC_UAR); | 506 | MLX5_COMMAND_STR_CASE(ALLOC_UAR); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index f6a6ded204f6..dc52053128bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
| @@ -928,10 +928,6 @@ void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv); | |||
| 928 | int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); | 928 | int mlx5e_attach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); |
| 929 | void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); | 929 | void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev); |
| 930 | u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); | 930 | u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); |
| 931 | void mlx5e_add_vxlan_port(struct net_device *netdev, | ||
| 932 | struct udp_tunnel_info *ti); | ||
| 933 | void mlx5e_del_vxlan_port(struct net_device *netdev, | ||
| 934 | struct udp_tunnel_info *ti); | ||
| 935 | 931 | ||
| 936 | int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, | 932 | int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, |
| 937 | void *sp); | 933 | void *sp); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8ef64c4db2c2..66c133757a5e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -3100,8 +3100,8 @@ static int mlx5e_get_vf_stats(struct net_device *dev, | |||
| 3100 | vf_stats); | 3100 | vf_stats); |
| 3101 | } | 3101 | } |
| 3102 | 3102 | ||
| 3103 | void mlx5e_add_vxlan_port(struct net_device *netdev, | 3103 | static void mlx5e_add_vxlan_port(struct net_device *netdev, |
| 3104 | struct udp_tunnel_info *ti) | 3104 | struct udp_tunnel_info *ti) |
| 3105 | { | 3105 | { |
| 3106 | struct mlx5e_priv *priv = netdev_priv(netdev); | 3106 | struct mlx5e_priv *priv = netdev_priv(netdev); |
| 3107 | 3107 | ||
| @@ -3114,8 +3114,8 @@ void mlx5e_add_vxlan_port(struct net_device *netdev, | |||
| 3114 | mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); | 3114 | mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1); |
| 3115 | } | 3115 | } |
| 3116 | 3116 | ||
| 3117 | void mlx5e_del_vxlan_port(struct net_device *netdev, | 3117 | static void mlx5e_del_vxlan_port(struct net_device *netdev, |
| 3118 | struct udp_tunnel_info *ti) | 3118 | struct udp_tunnel_info *ti) |
| 3119 | { | 3119 | { |
| 3120 | struct mlx5e_priv *priv = netdev_priv(netdev); | 3120 | struct mlx5e_priv *priv = netdev_priv(netdev); |
| 3121 | 3121 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 2c864574a9d5..f621373bd7a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
| @@ -393,8 +393,6 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = { | |||
| 393 | .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, | 393 | .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, |
| 394 | .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, | 394 | .ndo_setup_tc = mlx5e_rep_ndo_setup_tc, |
| 395 | .ndo_get_stats64 = mlx5e_rep_get_stats, | 395 | .ndo_get_stats64 = mlx5e_rep_get_stats, |
| 396 | .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, | ||
| 397 | .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, | ||
| 398 | .ndo_has_offload_stats = mlx5e_has_offload_stats, | 396 | .ndo_has_offload_stats = mlx5e_has_offload_stats, |
| 399 | .ndo_get_offload_stats = mlx5e_get_offload_stats, | 397 | .ndo_get_offload_stats = mlx5e_get_offload_stats, |
| 400 | }; | 398 | }; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3d371688fbbb..bafcb349a50c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -601,6 +601,10 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, | |||
| 601 | if (lro_num_seg > 1) { | 601 | if (lro_num_seg > 1) { |
| 602 | mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); | 602 | mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); |
| 603 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); | 603 | skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); |
| 604 | /* Subtract one since we already counted this as one | ||
| 605 | * "regular" packet in mlx5e_complete_rx_cqe() | ||
| 606 | */ | ||
| 607 | rq->stats.packets += lro_num_seg - 1; | ||
| 604 | rq->stats.lro_packets++; | 608 | rq->stats.lro_packets++; |
| 605 | rq->stats.lro_bytes += cqe_bcnt; | 609 | rq->stats.lro_bytes += cqe_bcnt; |
| 606 | } | 610 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 79481f4cf264..fade7233dac5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -133,6 +133,23 @@ err_create_ft: | |||
| 133 | return rule; | 133 | return rule; |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, | ||
| 137 | struct mlx5e_tc_flow *flow) | ||
| 138 | { | ||
| 139 | struct mlx5_fc *counter = NULL; | ||
| 140 | |||
| 141 | if (!IS_ERR(flow->rule)) { | ||
| 142 | counter = mlx5_flow_rule_counter(flow->rule); | ||
| 143 | mlx5_del_flow_rules(flow->rule); | ||
| 144 | mlx5_fc_destroy(priv->mdev, counter); | ||
| 145 | } | ||
| 146 | |||
| 147 | if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { | ||
| 148 | mlx5_destroy_flow_table(priv->fs.tc.t); | ||
| 149 | priv->fs.tc.t = NULL; | ||
| 150 | } | ||
| 151 | } | ||
| 152 | |||
| 136 | static struct mlx5_flow_handle * | 153 | static struct mlx5_flow_handle * |
| 137 | mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, | 154 | mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, |
| 138 | struct mlx5_flow_spec *spec, | 155 | struct mlx5_flow_spec *spec, |
| @@ -149,7 +166,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, | |||
| 149 | } | 166 | } |
| 150 | 167 | ||
| 151 | static void mlx5e_detach_encap(struct mlx5e_priv *priv, | 168 | static void mlx5e_detach_encap(struct mlx5e_priv *priv, |
| 152 | struct mlx5e_tc_flow *flow) { | 169 | struct mlx5e_tc_flow *flow); |
| 170 | |||
| 171 | static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, | ||
| 172 | struct mlx5e_tc_flow *flow) | ||
| 173 | { | ||
| 174 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | ||
| 175 | |||
| 176 | mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr); | ||
| 177 | |||
| 178 | mlx5_eswitch_del_vlan_action(esw, flow->attr); | ||
| 179 | |||
| 180 | if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) | ||
| 181 | mlx5e_detach_encap(priv, flow); | ||
| 182 | } | ||
| 183 | |||
| 184 | static void mlx5e_detach_encap(struct mlx5e_priv *priv, | ||
| 185 | struct mlx5e_tc_flow *flow) | ||
| 186 | { | ||
| 153 | struct list_head *next = flow->encap.next; | 187 | struct list_head *next = flow->encap.next; |
| 154 | 188 | ||
| 155 | list_del(&flow->encap); | 189 | list_del(&flow->encap); |
| @@ -173,25 +207,10 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, | |||
| 173 | static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, | 207 | static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, |
| 174 | struct mlx5e_tc_flow *flow) | 208 | struct mlx5e_tc_flow *flow) |
| 175 | { | 209 | { |
| 176 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | 210 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) |
| 177 | struct mlx5_fc *counter = NULL; | 211 | mlx5e_tc_del_fdb_flow(priv, flow); |
| 178 | 212 | else | |
| 179 | if (!IS_ERR(flow->rule)) { | 213 | mlx5e_tc_del_nic_flow(priv, flow); |
| 180 | counter = mlx5_flow_rule_counter(flow->rule); | ||
| 181 | mlx5_del_flow_rules(flow->rule); | ||
| 182 | mlx5_fc_destroy(priv->mdev, counter); | ||
| 183 | } | ||
| 184 | |||
| 185 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { | ||
| 186 | mlx5_eswitch_del_vlan_action(esw, flow->attr); | ||
| 187 | if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) | ||
| 188 | mlx5e_detach_encap(priv, flow); | ||
| 189 | } | ||
| 190 | |||
| 191 | if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) { | ||
| 192 | mlx5_destroy_flow_table(priv->fs.tc.t); | ||
| 193 | priv->fs.tc.t = NULL; | ||
| 194 | } | ||
| 195 | } | 214 | } |
| 196 | 215 | ||
| 197 | static void parse_vxlan_attr(struct mlx5_flow_spec *spec, | 216 | static void parse_vxlan_attr(struct mlx5_flow_spec *spec, |
| @@ -248,12 +267,15 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, | |||
| 248 | skb_flow_dissector_target(f->dissector, | 267 | skb_flow_dissector_target(f->dissector, |
| 249 | FLOW_DISSECTOR_KEY_ENC_PORTS, | 268 | FLOW_DISSECTOR_KEY_ENC_PORTS, |
| 250 | f->mask); | 269 | f->mask); |
| 270 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | ||
| 271 | struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); | ||
| 272 | struct mlx5e_priv *up_priv = netdev_priv(up_dev); | ||
| 251 | 273 | ||
| 252 | /* Full udp dst port must be given */ | 274 | /* Full udp dst port must be given */ |
| 253 | if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) | 275 | if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) |
| 254 | goto vxlan_match_offload_err; | 276 | goto vxlan_match_offload_err; |
| 255 | 277 | ||
| 256 | if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) && | 278 | if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) && |
| 257 | MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) | 279 | MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) |
| 258 | parse_vxlan_attr(spec, f); | 280 | parse_vxlan_attr(spec, f); |
| 259 | else { | 281 | else { |
| @@ -976,6 +998,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv, | |||
| 976 | struct mlx5_esw_flow_attr *attr) | 998 | struct mlx5_esw_flow_attr *attr) |
| 977 | { | 999 | { |
| 978 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | 1000 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
| 1001 | struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw); | ||
| 1002 | struct mlx5e_priv *up_priv = netdev_priv(up_dev); | ||
| 979 | unsigned short family = ip_tunnel_info_af(tun_info); | 1003 | unsigned short family = ip_tunnel_info_af(tun_info); |
| 980 | struct ip_tunnel_key *key = &tun_info->key; | 1004 | struct ip_tunnel_key *key = &tun_info->key; |
| 981 | struct mlx5_encap_entry *e; | 1005 | struct mlx5_encap_entry *e; |
| @@ -996,7 +1020,7 @@ vxlan_encap_offload_err: | |||
| 996 | return -EOPNOTSUPP; | 1020 | return -EOPNOTSUPP; |
| 997 | } | 1021 | } |
| 998 | 1022 | ||
| 999 | if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) && | 1023 | if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) && |
| 1000 | MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { | 1024 | MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { |
| 1001 | tunnel_type = MLX5_HEADER_TYPE_VXLAN; | 1025 | tunnel_type = MLX5_HEADER_TYPE_VXLAN; |
| 1002 | } else { | 1026 | } else { |
| @@ -1112,14 +1136,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 1112 | } | 1136 | } |
| 1113 | 1137 | ||
| 1114 | if (is_tcf_vlan(a)) { | 1138 | if (is_tcf_vlan(a)) { |
| 1115 | if (tcf_vlan_action(a) == VLAN_F_POP) { | 1139 | if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { |
| 1116 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; | 1140 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; |
| 1117 | } else if (tcf_vlan_action(a) == VLAN_F_PUSH) { | 1141 | } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { |
| 1118 | if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) | 1142 | if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q)) |
| 1119 | return -EOPNOTSUPP; | 1143 | return -EOPNOTSUPP; |
| 1120 | 1144 | ||
| 1121 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; | 1145 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; |
| 1122 | attr->vlan = tcf_vlan_push_vid(a); | 1146 | attr->vlan = tcf_vlan_push_vid(a); |
| 1147 | } else { /* action is TCA_VLAN_ACT_MODIFY */ | ||
| 1148 | return -EOPNOTSUPP; | ||
| 1123 | } | 1149 | } |
| 1124 | continue; | 1150 | continue; |
| 1125 | } | 1151 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index f193128bac4b..57f5e2d7ebd1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
| @@ -274,15 +274,18 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
| 274 | sq->stats.tso_bytes += skb->len - ihs; | 274 | sq->stats.tso_bytes += skb->len - ihs; |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | sq->stats.packets += skb_shinfo(skb)->gso_segs; | ||
| 277 | num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; | 278 | num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs; |
| 278 | } else { | 279 | } else { |
| 279 | bf = sq->bf_budget && | 280 | bf = sq->bf_budget && |
| 280 | !skb->xmit_more && | 281 | !skb->xmit_more && |
| 281 | !skb_shinfo(skb)->nr_frags; | 282 | !skb_shinfo(skb)->nr_frags; |
| 282 | ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); | 283 | ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); |
| 284 | sq->stats.packets++; | ||
| 283 | num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); | 285 | num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); |
| 284 | } | 286 | } |
| 285 | 287 | ||
| 288 | sq->stats.bytes += num_bytes; | ||
| 286 | wi->num_bytes = num_bytes; | 289 | wi->num_bytes = num_bytes; |
| 287 | 290 | ||
| 288 | ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; | 291 | ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS; |
| @@ -381,8 +384,6 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) | |||
| 381 | if (bf) | 384 | if (bf) |
| 382 | sq->bf_budget--; | 385 | sq->bf_budget--; |
| 383 | 386 | ||
| 384 | sq->stats.packets++; | ||
| 385 | sq->stats.bytes += num_bytes; | ||
| 386 | return NETDEV_TX_OK; | 387 | return NETDEV_TX_OK; |
| 387 | 388 | ||
| 388 | dma_unmap_wqe_err: | 389 | dma_unmap_wqe_err: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 5b78883d5654..ad329b1680b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | |||
| @@ -209,6 +209,7 @@ struct mlx5_esw_offload { | |||
| 209 | struct mlx5_eswitch_rep *vport_reps; | 209 | struct mlx5_eswitch_rep *vport_reps; |
| 210 | DECLARE_HASHTABLE(encap_tbl, 8); | 210 | DECLARE_HASHTABLE(encap_tbl, 8); |
| 211 | u8 inline_mode; | 211 | u8 inline_mode; |
| 212 | u64 num_flows; | ||
| 212 | }; | 213 | }; |
| 213 | 214 | ||
| 214 | struct mlx5_eswitch { | 215 | struct mlx5_eswitch { |
| @@ -271,6 +272,11 @@ struct mlx5_flow_handle * | |||
| 271 | mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, | 272 | mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, |
| 272 | struct mlx5_flow_spec *spec, | 273 | struct mlx5_flow_spec *spec, |
| 273 | struct mlx5_esw_flow_attr *attr); | 274 | struct mlx5_esw_flow_attr *attr); |
| 275 | void | ||
| 276 | mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, | ||
| 277 | struct mlx5_flow_handle *rule, | ||
| 278 | struct mlx5_esw_flow_attr *attr); | ||
| 279 | |||
| 274 | struct mlx5_flow_handle * | 280 | struct mlx5_flow_handle * |
| 275 | mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); | 281 | mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn); |
| 276 | 282 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 4f5b0d47d5f3..307ec6c5fd3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
| @@ -93,10 +93,27 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, | |||
| 93 | spec, &flow_act, dest, i); | 93 | spec, &flow_act, dest, i); |
| 94 | if (IS_ERR(rule)) | 94 | if (IS_ERR(rule)) |
| 95 | mlx5_fc_destroy(esw->dev, counter); | 95 | mlx5_fc_destroy(esw->dev, counter); |
| 96 | else | ||
| 97 | esw->offloads.num_flows++; | ||
| 96 | 98 | ||
| 97 | return rule; | 99 | return rule; |
| 98 | } | 100 | } |
| 99 | 101 | ||
| 102 | void | ||
| 103 | mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw, | ||
| 104 | struct mlx5_flow_handle *rule, | ||
| 105 | struct mlx5_esw_flow_attr *attr) | ||
| 106 | { | ||
| 107 | struct mlx5_fc *counter = NULL; | ||
| 108 | |||
| 109 | if (!IS_ERR(rule)) { | ||
| 110 | counter = mlx5_flow_rule_counter(rule); | ||
| 111 | mlx5_del_flow_rules(rule); | ||
| 112 | mlx5_fc_destroy(esw->dev, counter); | ||
| 113 | esw->offloads.num_flows--; | ||
| 114 | } | ||
| 115 | } | ||
| 116 | |||
| 100 | static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) | 117 | static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val) |
| 101 | { | 118 | { |
| 102 | struct mlx5_eswitch_rep *rep; | 119 | struct mlx5_eswitch_rep *rep; |
| @@ -908,6 +925,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode) | |||
| 908 | MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) | 925 | MLX5_CAP_INLINE_MODE_VPORT_CONTEXT) |
| 909 | return -EOPNOTSUPP; | 926 | return -EOPNOTSUPP; |
| 910 | 927 | ||
| 928 | if (esw->offloads.num_flows > 0) { | ||
| 929 | esw_warn(dev, "Can't set inline mode when flows are configured\n"); | ||
| 930 | return -EOPNOTSUPP; | ||
| 931 | } | ||
| 932 | |||
| 911 | err = esw_inline_mode_from_devlink(mode, &mlx5_mode); | 933 | err = esw_inline_mode_from_devlink(mode, &mlx5_mode); |
| 912 | if (err) | 934 | if (err) |
| 913 | goto out; | 935 | goto out; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index e2bd600d19de..60154a175bd3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
| @@ -87,7 +87,7 @@ static struct mlx5_profile profile[] = { | |||
| 87 | [2] = { | 87 | [2] = { |
| 88 | .mask = MLX5_PROF_MASK_QP_SIZE | | 88 | .mask = MLX5_PROF_MASK_QP_SIZE | |
| 89 | MLX5_PROF_MASK_MR_CACHE, | 89 | MLX5_PROF_MASK_MR_CACHE, |
| 90 | .log_max_qp = 17, | 90 | .log_max_qp = 18, |
| 91 | .mr_cache[0] = { | 91 | .mr_cache[0] = { |
| 92 | .size = 500, | 92 | .size = 500, |
| 93 | .limit = 250 | 93 | .limit = 250 |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 334bcc6df6b2..50d28261b6b9 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
| @@ -2404,7 +2404,7 @@ static void efx_udp_tunnel_del(struct net_device *dev, struct udp_tunnel_info *t | |||
| 2404 | tnl.type = (u16)efx_tunnel_type; | 2404 | tnl.type = (u16)efx_tunnel_type; |
| 2405 | tnl.port = ti->port; | 2405 | tnl.port = ti->port; |
| 2406 | 2406 | ||
| 2407 | if (efx->type->udp_tnl_add_port) | 2407 | if (efx->type->udp_tnl_del_port) |
| 2408 | (void)efx->type->udp_tnl_del_port(efx, tnl); | 2408 | (void)efx->type->udp_tnl_del_port(efx, tnl); |
| 2409 | } | 2409 | } |
| 2410 | 2410 | ||
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 296c8efd0038..9e631952b86f 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig | |||
| @@ -74,15 +74,21 @@ config TI_CPSW | |||
| 74 | will be called cpsw. | 74 | will be called cpsw. |
| 75 | 75 | ||
| 76 | config TI_CPTS | 76 | config TI_CPTS |
| 77 | tristate "TI Common Platform Time Sync (CPTS) Support" | 77 | bool "TI Common Platform Time Sync (CPTS) Support" |
| 78 | depends on TI_CPSW || TI_KEYSTONE_NETCP | 78 | depends on TI_CPSW || TI_KEYSTONE_NETCP |
| 79 | imply PTP_1588_CLOCK | 79 | depends on PTP_1588_CLOCK |
| 80 | ---help--- | 80 | ---help--- |
| 81 | This driver supports the Common Platform Time Sync unit of | 81 | This driver supports the Common Platform Time Sync unit of |
| 82 | the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem. | 82 | the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem. |
| 83 | The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the | 83 | The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the |
| 84 | driver offers a PTP Hardware Clock. | 84 | driver offers a PTP Hardware Clock. |
| 85 | 85 | ||
| 86 | config TI_CPTS_MOD | ||
| 87 | tristate | ||
| 88 | depends on TI_CPTS | ||
| 89 | default y if TI_CPSW=y || TI_KEYSTONE_NETCP=y | ||
| 90 | default m | ||
| 91 | |||
| 86 | config TI_KEYSTONE_NETCP | 92 | config TI_KEYSTONE_NETCP |
| 87 | tristate "TI Keystone NETCP Core Support" | 93 | tristate "TI Keystone NETCP Core Support" |
| 88 | select TI_CPSW_ALE | 94 | select TI_CPSW_ALE |
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index 1e7c10bf8713..10e6b0ce51ba 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile | |||
| @@ -12,7 +12,7 @@ obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o | |||
| 12 | obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o | 12 | obj-$(CONFIG_TI_DAVINCI_CPDMA) += davinci_cpdma.o |
| 13 | obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o | 13 | obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o |
| 14 | obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o | 14 | obj-$(CONFIG_TI_CPSW_ALE) += cpsw_ale.o |
| 15 | obj-$(CONFIG_TI_CPTS) += cpts.o | 15 | obj-$(CONFIG_TI_CPTS_MOD) += cpts.o |
| 16 | obj-$(CONFIG_TI_CPSW) += ti_cpsw.o | 16 | obj-$(CONFIG_TI_CPSW) += ti_cpsw.o |
| 17 | ti_cpsw-y := cpsw.o | 17 | ti_cpsw-y := cpsw.o |
| 18 | 18 | ||
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index b75d9cdcfb0c..ae48c809bac9 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c | |||
| @@ -45,6 +45,8 @@ MODULE_DESCRIPTION("FUJITSU Extended Socket Network Device Driver"); | |||
| 45 | MODULE_LICENSE("GPL"); | 45 | MODULE_LICENSE("GPL"); |
| 46 | MODULE_VERSION(DRV_VERSION); | 46 | MODULE_VERSION(DRV_VERSION); |
| 47 | 47 | ||
| 48 | #define ACPI_MOTHERBOARD_RESOURCE_HID "PNP0C02" | ||
| 49 | |||
| 48 | static int fjes_request_irq(struct fjes_adapter *); | 50 | static int fjes_request_irq(struct fjes_adapter *); |
| 49 | static void fjes_free_irq(struct fjes_adapter *); | 51 | static void fjes_free_irq(struct fjes_adapter *); |
| 50 | 52 | ||
| @@ -78,7 +80,7 @@ static void fjes_rx_irq(struct fjes_adapter *, int); | |||
| 78 | static int fjes_poll(struct napi_struct *, int); | 80 | static int fjes_poll(struct napi_struct *, int); |
| 79 | 81 | ||
| 80 | static const struct acpi_device_id fjes_acpi_ids[] = { | 82 | static const struct acpi_device_id fjes_acpi_ids[] = { |
| 81 | {"PNP0C02", 0}, | 83 | {ACPI_MOTHERBOARD_RESOURCE_HID, 0}, |
| 82 | {"", 0}, | 84 | {"", 0}, |
| 83 | }; | 85 | }; |
| 84 | MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); | 86 | MODULE_DEVICE_TABLE(acpi, fjes_acpi_ids); |
| @@ -115,18 +117,17 @@ static struct resource fjes_resource[] = { | |||
| 115 | }, | 117 | }, |
| 116 | }; | 118 | }; |
| 117 | 119 | ||
| 118 | static int fjes_acpi_add(struct acpi_device *device) | 120 | static bool is_extended_socket_device(struct acpi_device *device) |
| 119 | { | 121 | { |
| 120 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; | 122 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; |
| 121 | char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; | 123 | char str_buf[sizeof(FJES_ACPI_SYMBOL) + 1]; |
| 122 | struct platform_device *plat_dev; | ||
| 123 | union acpi_object *str; | 124 | union acpi_object *str; |
| 124 | acpi_status status; | 125 | acpi_status status; |
| 125 | int result; | 126 | int result; |
| 126 | 127 | ||
| 127 | status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); | 128 | status = acpi_evaluate_object(device->handle, "_STR", NULL, &buffer); |
| 128 | if (ACPI_FAILURE(status)) | 129 | if (ACPI_FAILURE(status)) |
| 129 | return -ENODEV; | 130 | return false; |
| 130 | 131 | ||
| 131 | str = buffer.pointer; | 132 | str = buffer.pointer; |
| 132 | result = utf16s_to_utf8s((wchar_t *)str->string.pointer, | 133 | result = utf16s_to_utf8s((wchar_t *)str->string.pointer, |
| @@ -136,10 +137,42 @@ static int fjes_acpi_add(struct acpi_device *device) | |||
| 136 | 137 | ||
| 137 | if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { | 138 | if (strncmp(FJES_ACPI_SYMBOL, str_buf, strlen(FJES_ACPI_SYMBOL)) != 0) { |
| 138 | kfree(buffer.pointer); | 139 | kfree(buffer.pointer); |
| 139 | return -ENODEV; | 140 | return false; |
| 140 | } | 141 | } |
| 141 | kfree(buffer.pointer); | 142 | kfree(buffer.pointer); |
| 142 | 143 | ||
| 144 | return true; | ||
| 145 | } | ||
| 146 | |||
| 147 | static int acpi_check_extended_socket_status(struct acpi_device *device) | ||
| 148 | { | ||
| 149 | unsigned long long sta; | ||
| 150 | acpi_status status; | ||
| 151 | |||
| 152 | status = acpi_evaluate_integer(device->handle, "_STA", NULL, &sta); | ||
| 153 | if (ACPI_FAILURE(status)) | ||
| 154 | return -ENODEV; | ||
| 155 | |||
| 156 | if (!((sta & ACPI_STA_DEVICE_PRESENT) && | ||
| 157 | (sta & ACPI_STA_DEVICE_ENABLED) && | ||
| 158 | (sta & ACPI_STA_DEVICE_UI) && | ||
| 159 | (sta & ACPI_STA_DEVICE_FUNCTIONING))) | ||
| 160 | return -ENODEV; | ||
| 161 | |||
| 162 | return 0; | ||
| 163 | } | ||
| 164 | |||
| 165 | static int fjes_acpi_add(struct acpi_device *device) | ||
| 166 | { | ||
| 167 | struct platform_device *plat_dev; | ||
| 168 | acpi_status status; | ||
| 169 | |||
| 170 | if (!is_extended_socket_device(device)) | ||
| 171 | return -ENODEV; | ||
| 172 | |||
| 173 | if (acpi_check_extended_socket_status(device)) | ||
| 174 | return -ENODEV; | ||
| 175 | |||
| 143 | status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, | 176 | status = acpi_walk_resources(device->handle, METHOD_NAME__CRS, |
| 144 | fjes_get_acpi_resource, fjes_resource); | 177 | fjes_get_acpi_resource, fjes_resource); |
| 145 | if (ACPI_FAILURE(status)) | 178 | if (ACPI_FAILURE(status)) |
| @@ -1316,7 +1349,7 @@ static void fjes_netdev_setup(struct net_device *netdev) | |||
| 1316 | netdev->min_mtu = fjes_support_mtu[0]; | 1349 | netdev->min_mtu = fjes_support_mtu[0]; |
| 1317 | netdev->max_mtu = fjes_support_mtu[3]; | 1350 | netdev->max_mtu = fjes_support_mtu[3]; |
| 1318 | netdev->flags |= IFF_BROADCAST; | 1351 | netdev->flags |= IFF_BROADCAST; |
| 1319 | netdev->features |= NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_FILTER; | 1352 | netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; |
| 1320 | } | 1353 | } |
| 1321 | 1354 | ||
| 1322 | static void fjes_irq_watch_task(struct work_struct *work) | 1355 | static void fjes_irq_watch_task(struct work_struct *work) |
| @@ -1473,11 +1506,44 @@ static void fjes_watch_unshare_task(struct work_struct *work) | |||
| 1473 | } | 1506 | } |
| 1474 | } | 1507 | } |
| 1475 | 1508 | ||
| 1509 | static acpi_status | ||
| 1510 | acpi_find_extended_socket_device(acpi_handle obj_handle, u32 level, | ||
| 1511 | void *context, void **return_value) | ||
| 1512 | { | ||
| 1513 | struct acpi_device *device; | ||
| 1514 | bool *found = context; | ||
| 1515 | int result; | ||
| 1516 | |||
| 1517 | result = acpi_bus_get_device(obj_handle, &device); | ||
| 1518 | if (result) | ||
| 1519 | return AE_OK; | ||
| 1520 | |||
| 1521 | if (strcmp(acpi_device_hid(device), ACPI_MOTHERBOARD_RESOURCE_HID)) | ||
| 1522 | return AE_OK; | ||
| 1523 | |||
| 1524 | if (!is_extended_socket_device(device)) | ||
| 1525 | return AE_OK; | ||
| 1526 | |||
| 1527 | if (acpi_check_extended_socket_status(device)) | ||
| 1528 | return AE_OK; | ||
| 1529 | |||
| 1530 | *found = true; | ||
| 1531 | return AE_CTRL_TERMINATE; | ||
| 1532 | } | ||
| 1533 | |||
| 1476 | /* fjes_init_module - Driver Registration Routine */ | 1534 | /* fjes_init_module - Driver Registration Routine */ |
| 1477 | static int __init fjes_init_module(void) | 1535 | static int __init fjes_init_module(void) |
| 1478 | { | 1536 | { |
| 1537 | bool found = false; | ||
| 1479 | int result; | 1538 | int result; |
| 1480 | 1539 | ||
| 1540 | acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, ACPI_UINT32_MAX, | ||
| 1541 | acpi_find_extended_socket_device, NULL, &found, | ||
| 1542 | NULL); | ||
| 1543 | |||
| 1544 | if (!found) | ||
| 1545 | return -ENODEV; | ||
| 1546 | |||
| 1481 | pr_info("%s - version %s - %s\n", | 1547 | pr_info("%s - version %s - %s\n", |
| 1482 | fjes_driver_string, fjes_driver_version, fjes_copyright); | 1548 | fjes_driver_string, fjes_driver_version, fjes_copyright); |
| 1483 | 1549 | ||
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 4c1d8cca247b..8dd0b8770328 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c | |||
| @@ -1231,8 +1231,11 @@ void netvsc_channel_cb(void *context) | |||
| 1231 | return; | 1231 | return; |
| 1232 | 1232 | ||
| 1233 | net_device = net_device_to_netvsc_device(ndev); | 1233 | net_device = net_device_to_netvsc_device(ndev); |
| 1234 | if (unlikely(net_device->destroy) && | 1234 | if (unlikely(!net_device)) |
| 1235 | netvsc_channel_idle(net_device, q_idx)) | 1235 | return; |
| 1236 | |||
| 1237 | if (unlikely(net_device->destroy && | ||
| 1238 | netvsc_channel_idle(net_device, q_idx))) | ||
| 1236 | return; | 1239 | return; |
| 1237 | 1240 | ||
| 1238 | /* commit_rd_index() -> hv_signal_on_read() needs this. */ | 1241 | /* commit_rd_index() -> hv_signal_on_read() needs this. */ |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 34cc3c590aa5..cc88cd7856f5 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
| @@ -1931,6 +1931,8 @@ static int set_offload(struct tun_struct *tun, unsigned long arg) | |||
| 1931 | return -EINVAL; | 1931 | return -EINVAL; |
| 1932 | 1932 | ||
| 1933 | tun->set_features = features; | 1933 | tun->set_features = features; |
| 1934 | tun->dev->wanted_features &= ~TUN_USER_FEATURES; | ||
| 1935 | tun->dev->wanted_features |= features; | ||
| 1934 | netdev_update_features(tun->dev); | 1936 | netdev_update_features(tun->dev); |
| 1935 | 1937 | ||
| 1936 | return 0; | 1938 | return 0; |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 805674550683..156f7f85e486 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -580,6 +580,10 @@ static const struct usb_device_id products[] = { | |||
| 580 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), | 580 | USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 0x01, 0x69), |
| 581 | .driver_info = (unsigned long)&qmi_wwan_info, | 581 | .driver_info = (unsigned long)&qmi_wwan_info, |
| 582 | }, | 582 | }, |
| 583 | { /* Motorola Mapphone devices with MDM6600 */ | ||
| 584 | USB_VENDOR_AND_INTERFACE_INFO(0x22b8, USB_CLASS_VENDOR_SPEC, 0xfb, 0xff), | ||
| 585 | .driver_info = (unsigned long)&qmi_wwan_info, | ||
| 586 | }, | ||
| 583 | 587 | ||
| 584 | /* 2. Combined interface devices matching on class+protocol */ | 588 | /* 2. Combined interface devices matching on class+protocol */ |
| 585 | { /* Huawei E367 and possibly others in "Windows mode" */ | 589 | { /* Huawei E367 and possibly others in "Windows mode" */ |
| @@ -925,6 +929,8 @@ static const struct usb_device_id products[] = { | |||
| 925 | {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ | 929 | {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ |
| 926 | {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ | 930 | {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ |
| 927 | {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ | 931 | {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ |
| 932 | {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ | ||
| 933 | {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ | ||
| 928 | {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ | 934 | {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ |
| 929 | {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ | 935 | {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ |
| 930 | {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ | 936 | {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 986243c932cc..0b1b9188625d 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
| @@ -32,7 +32,7 @@ | |||
| 32 | #define NETNEXT_VERSION "08" | 32 | #define NETNEXT_VERSION "08" |
| 33 | 33 | ||
| 34 | /* Information for net */ | 34 | /* Information for net */ |
| 35 | #define NET_VERSION "8" | 35 | #define NET_VERSION "9" |
| 36 | 36 | ||
| 37 | #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION | 37 | #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION |
| 38 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" | 38 | #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" |
| @@ -501,6 +501,8 @@ enum rtl_register_content { | |||
| 501 | #define RTL8153_RMS RTL8153_MAX_PACKET | 501 | #define RTL8153_RMS RTL8153_MAX_PACKET |
| 502 | #define RTL8152_TX_TIMEOUT (5 * HZ) | 502 | #define RTL8152_TX_TIMEOUT (5 * HZ) |
| 503 | #define RTL8152_NAPI_WEIGHT 64 | 503 | #define RTL8152_NAPI_WEIGHT 64 |
| 504 | #define rx_reserved_size(x) ((x) + VLAN_ETH_HLEN + CRC_SIZE + \ | ||
| 505 | sizeof(struct rx_desc) + RX_ALIGN) | ||
| 504 | 506 | ||
| 505 | /* rtl8152 flags */ | 507 | /* rtl8152 flags */ |
| 506 | enum rtl8152_flags { | 508 | enum rtl8152_flags { |
| @@ -1362,6 +1364,7 @@ static int alloc_all_mem(struct r8152 *tp) | |||
| 1362 | spin_lock_init(&tp->rx_lock); | 1364 | spin_lock_init(&tp->rx_lock); |
| 1363 | spin_lock_init(&tp->tx_lock); | 1365 | spin_lock_init(&tp->tx_lock); |
| 1364 | INIT_LIST_HEAD(&tp->tx_free); | 1366 | INIT_LIST_HEAD(&tp->tx_free); |
| 1367 | INIT_LIST_HEAD(&tp->rx_done); | ||
| 1365 | skb_queue_head_init(&tp->tx_queue); | 1368 | skb_queue_head_init(&tp->tx_queue); |
| 1366 | skb_queue_head_init(&tp->rx_queue); | 1369 | skb_queue_head_init(&tp->rx_queue); |
| 1367 | 1370 | ||
| @@ -2252,8 +2255,7 @@ static void r8153_set_rx_early_timeout(struct r8152 *tp) | |||
| 2252 | 2255 | ||
| 2253 | static void r8153_set_rx_early_size(struct r8152 *tp) | 2256 | static void r8153_set_rx_early_size(struct r8152 *tp) |
| 2254 | { | 2257 | { |
| 2255 | u32 mtu = tp->netdev->mtu; | 2258 | u32 ocp_data = (agg_buf_sz - rx_reserved_size(tp->netdev->mtu)) / 4; |
| 2256 | u32 ocp_data = (agg_buf_sz - mtu - VLAN_ETH_HLEN - VLAN_HLEN) / 8; | ||
| 2257 | 2259 | ||
| 2258 | ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); | 2260 | ocp_write_word(tp, MCU_TYPE_USB, USB_RX_EARLY_SIZE, ocp_data); |
| 2259 | } | 2261 | } |
| @@ -2898,7 +2900,8 @@ static void r8153_first_init(struct r8152 *tp) | |||
| 2898 | 2900 | ||
| 2899 | rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); | 2901 | rtl_rx_vlan_en(tp, tp->netdev->features & NETIF_F_HW_VLAN_CTAG_RX); |
| 2900 | 2902 | ||
| 2901 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); | 2903 | ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE; |
| 2904 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); | ||
| 2902 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); | 2905 | ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO); |
| 2903 | 2906 | ||
| 2904 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); | 2907 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0); |
| @@ -2950,7 +2953,8 @@ static void r8153_enter_oob(struct r8152 *tp) | |||
| 2950 | usleep_range(1000, 2000); | 2953 | usleep_range(1000, 2000); |
| 2951 | } | 2954 | } |
| 2952 | 2955 | ||
| 2953 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS); | 2956 | ocp_data = tp->netdev->mtu + VLAN_ETH_HLEN + CRC_SIZE; |
| 2957 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, ocp_data); | ||
| 2954 | 2958 | ||
| 2955 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); | 2959 | ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG); |
| 2956 | ocp_data &= ~TEREDO_WAKE_MASK; | 2960 | ocp_data &= ~TEREDO_WAKE_MASK; |
| @@ -4200,8 +4204,14 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu) | |||
| 4200 | 4204 | ||
| 4201 | dev->mtu = new_mtu; | 4205 | dev->mtu = new_mtu; |
| 4202 | 4206 | ||
| 4203 | if (netif_running(dev) && netif_carrier_ok(dev)) | 4207 | if (netif_running(dev)) { |
| 4204 | r8153_set_rx_early_size(tp); | 4208 | u32 rms = new_mtu + VLAN_ETH_HLEN + CRC_SIZE; |
| 4209 | |||
| 4210 | ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, rms); | ||
| 4211 | |||
| 4212 | if (netif_carrier_ok(dev)) | ||
| 4213 | r8153_set_rx_early_size(tp); | ||
| 4214 | } | ||
| 4205 | 4215 | ||
| 4206 | mutex_unlock(&tp->control); | 4216 | mutex_unlock(&tp->control); |
| 4207 | 4217 | ||
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index fea687f35b5a..d6988db1930d 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
| @@ -462,8 +462,10 @@ static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf) | |||
| 462 | } | 462 | } |
| 463 | 463 | ||
| 464 | if (rt6_local) { | 464 | if (rt6_local) { |
| 465 | if (rt6_local->rt6i_idev) | 465 | if (rt6_local->rt6i_idev) { |
| 466 | in6_dev_put(rt6_local->rt6i_idev); | 466 | in6_dev_put(rt6_local->rt6i_idev); |
| 467 | rt6_local->rt6i_idev = NULL; | ||
| 468 | } | ||
| 467 | 469 | ||
| 468 | dst = &rt6_local->dst; | 470 | dst = &rt6_local->dst; |
| 469 | dev_put(dst->dev); | 471 | dev_put(dst->dev); |
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index 33fb26833cd0..d9f37ee4bfdd 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c | |||
| @@ -51,7 +51,7 @@ const struct ath10k_hw_regs qca6174_regs = { | |||
| 51 | .rtc_soc_base_address = 0x00000800, | 51 | .rtc_soc_base_address = 0x00000800, |
| 52 | .rtc_wmac_base_address = 0x00001000, | 52 | .rtc_wmac_base_address = 0x00001000, |
| 53 | .soc_core_base_address = 0x0003a000, | 53 | .soc_core_base_address = 0x0003a000, |
| 54 | .wlan_mac_base_address = 0x00020000, | 54 | .wlan_mac_base_address = 0x00010000, |
| 55 | .ce_wrapper_base_address = 0x00034000, | 55 | .ce_wrapper_base_address = 0x00034000, |
| 56 | .ce0_base_address = 0x00034400, | 56 | .ce0_base_address = 0x00034400, |
| 57 | .ce1_base_address = 0x00034800, | 57 | .ce1_base_address = 0x00034800, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index d37b1695c64e..6927caecd48e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
| @@ -2319,7 +2319,7 @@ iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, | |||
| 2319 | { | 2319 | { |
| 2320 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); | 2320 | struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); |
| 2321 | 2321 | ||
| 2322 | /* Called when we need to transmit (a) frame(s) from agg queue */ | 2322 | /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ |
| 2323 | 2323 | ||
| 2324 | iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, | 2324 | iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, |
| 2325 | tids, more_data, true); | 2325 | tids, more_data, true); |
| @@ -2338,7 +2338,8 @@ static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, | |||
| 2338 | for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { | 2338 | for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { |
| 2339 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; | 2339 | struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; |
| 2340 | 2340 | ||
| 2341 | if (tid_data->state != IWL_AGG_ON && | 2341 | if (!iwl_mvm_is_dqa_supported(mvm) && |
| 2342 | tid_data->state != IWL_AGG_ON && | ||
| 2342 | tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) | 2343 | tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA) |
| 2343 | continue; | 2344 | continue; |
| 2344 | 2345 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index bd1dcc863d8f..b51a2853cc80 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
| @@ -3135,7 +3135,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, | |||
| 3135 | struct ieee80211_sta *sta, | 3135 | struct ieee80211_sta *sta, |
| 3136 | enum ieee80211_frame_release_type reason, | 3136 | enum ieee80211_frame_release_type reason, |
| 3137 | u16 cnt, u16 tids, bool more_data, | 3137 | u16 cnt, u16 tids, bool more_data, |
| 3138 | bool agg) | 3138 | bool single_sta_queue) |
| 3139 | { | 3139 | { |
| 3140 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | 3140 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
| 3141 | struct iwl_mvm_add_sta_cmd cmd = { | 3141 | struct iwl_mvm_add_sta_cmd cmd = { |
| @@ -3155,14 +3155,14 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, | |||
| 3155 | for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) | 3155 | for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) |
| 3156 | cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); | 3156 | cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); |
| 3157 | 3157 | ||
| 3158 | /* If we're releasing frames from aggregation queues then check if the | 3158 | /* If we're releasing frames from aggregation or dqa queues then check |
| 3159 | * all queues combined that we're releasing frames from have | 3159 | * if all the queues that we're releasing frames from, combined, have: |
| 3160 | * - more frames than the service period, in which case more_data | 3160 | * - more frames than the service period, in which case more_data |
| 3161 | * needs to be set | 3161 | * needs to be set |
| 3162 | * - fewer than 'cnt' frames, in which case we need to adjust the | 3162 | * - fewer than 'cnt' frames, in which case we need to adjust the |
| 3163 | * firmware command (but do that unconditionally) | 3163 | * firmware command (but do that unconditionally) |
| 3164 | */ | 3164 | */ |
| 3165 | if (agg) { | 3165 | if (single_sta_queue) { |
| 3166 | int remaining = cnt; | 3166 | int remaining = cnt; |
| 3167 | int sleep_tx_count; | 3167 | int sleep_tx_count; |
| 3168 | 3168 | ||
| @@ -3172,7 +3172,8 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, | |||
| 3172 | u16 n_queued; | 3172 | u16 n_queued; |
| 3173 | 3173 | ||
| 3174 | tid_data = &mvmsta->tid_data[tid]; | 3174 | tid_data = &mvmsta->tid_data[tid]; |
| 3175 | if (WARN(tid_data->state != IWL_AGG_ON && | 3175 | if (WARN(!iwl_mvm_is_dqa_supported(mvm) && |
| 3176 | tid_data->state != IWL_AGG_ON && | ||
| 3176 | tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, | 3177 | tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, |
| 3177 | "TID %d state is %d\n", | 3178 | "TID %d state is %d\n", |
| 3178 | tid, tid_data->state)) { | 3179 | tid, tid_data->state)) { |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 4be34f902278..1927ce607798 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h | |||
| @@ -547,7 +547,7 @@ void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, | |||
| 547 | struct ieee80211_sta *sta, | 547 | struct ieee80211_sta *sta, |
| 548 | enum ieee80211_frame_release_type reason, | 548 | enum ieee80211_frame_release_type reason, |
| 549 | u16 cnt, u16 tids, bool more_data, | 549 | u16 cnt, u16 tids, bool more_data, |
| 550 | bool agg); | 550 | bool single_sta_queue); |
| 551 | int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, | 551 | int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, |
| 552 | bool drain); | 552 | bool drain); |
| 553 | void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, | 553 | void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index dd2b4a300819..3f37075f4cde 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
| @@ -7,7 +7,7 @@ | |||
| 7 | * | 7 | * |
| 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 8 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
| 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 9 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 10 | * Copyright(c) 2016 Intel Deutschland GmbH | 10 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
| 11 | * | 11 | * |
| 12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
| 13 | * it under the terms of version 2 of the GNU General Public License as | 13 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -34,6 +34,7 @@ | |||
| 34 | * | 34 | * |
| 35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. | 35 | * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. |
| 36 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 36 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
| 37 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH | ||
| 37 | * All rights reserved. | 38 | * All rights reserved. |
| 38 | * | 39 | * |
| 39 | * Redistribution and use in source and binary forms, with or without | 40 | * Redistribution and use in source and binary forms, with or without |
| @@ -628,8 +629,10 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) | |||
| 628 | * values. | 629 | * values. |
| 629 | * Note that we don't need to make sure it isn't agg'd, since we're | 630 | * Note that we don't need to make sure it isn't agg'd, since we're |
| 630 | * TXing non-sta | 631 | * TXing non-sta |
| 632 | * For DQA mode - we shouldn't increase it though | ||
| 631 | */ | 633 | */ |
| 632 | atomic_inc(&mvm->pending_frames[sta_id]); | 634 | if (!iwl_mvm_is_dqa_supported(mvm)) |
| 635 | atomic_inc(&mvm->pending_frames[sta_id]); | ||
| 633 | 636 | ||
| 634 | return 0; | 637 | return 0; |
| 635 | } | 638 | } |
| @@ -1005,11 +1008,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, | |||
| 1005 | 1008 | ||
| 1006 | spin_unlock(&mvmsta->lock); | 1009 | spin_unlock(&mvmsta->lock); |
| 1007 | 1010 | ||
| 1008 | /* Increase pending frames count if this isn't AMPDU */ | 1011 | /* Increase pending frames count if this isn't AMPDU or DQA queue */ |
| 1009 | if ((iwl_mvm_is_dqa_supported(mvm) && | 1012 | if (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu) |
| 1010 | mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_ON && | ||
| 1011 | mvmsta->tid_data[tx_cmd->tid_tspec].state != IWL_AGG_STARTING) || | ||
| 1012 | (!iwl_mvm_is_dqa_supported(mvm) && !is_ampdu)) | ||
| 1013 | atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); | 1013 | atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); |
| 1014 | 1014 | ||
| 1015 | return 0; | 1015 | return 0; |
| @@ -1079,12 +1079,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm, | |||
| 1079 | lockdep_assert_held(&mvmsta->lock); | 1079 | lockdep_assert_held(&mvmsta->lock); |
| 1080 | 1080 | ||
| 1081 | if ((tid_data->state == IWL_AGG_ON || | 1081 | if ((tid_data->state == IWL_AGG_ON || |
| 1082 | tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) && | 1082 | tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA || |
| 1083 | iwl_mvm_is_dqa_supported(mvm)) && | ||
| 1083 | iwl_mvm_tid_queued(tid_data) == 0) { | 1084 | iwl_mvm_tid_queued(tid_data) == 0) { |
| 1084 | /* | 1085 | /* |
| 1085 | * Now that this aggregation queue is empty tell mac80211 so it | 1086 | * Now that this aggregation or DQA queue is empty tell |
| 1086 | * knows we no longer have frames buffered for the station on | 1087 | * mac80211 so it knows we no longer have frames buffered for |
| 1087 | * this TID (for the TIM bitmap calculation.) | 1088 | * the station on this TID (for the TIM bitmap calculation.) |
| 1088 | */ | 1089 | */ |
| 1089 | ieee80211_sta_set_buffered(sta, tid, false); | 1090 | ieee80211_sta_set_buffered(sta, tid, false); |
| 1090 | } | 1091 | } |
| @@ -1257,7 +1258,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |||
| 1257 | u8 skb_freed = 0; | 1258 | u8 skb_freed = 0; |
| 1258 | u16 next_reclaimed, seq_ctl; | 1259 | u16 next_reclaimed, seq_ctl; |
| 1259 | bool is_ndp = false; | 1260 | bool is_ndp = false; |
| 1260 | bool txq_agg = false; /* Is this TXQ aggregated */ | ||
| 1261 | 1261 | ||
| 1262 | __skb_queue_head_init(&skbs); | 1262 | __skb_queue_head_init(&skbs); |
| 1263 | 1263 | ||
| @@ -1283,6 +1283,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |||
| 1283 | info->flags |= IEEE80211_TX_STAT_ACK; | 1283 | info->flags |= IEEE80211_TX_STAT_ACK; |
| 1284 | break; | 1284 | break; |
| 1285 | case TX_STATUS_FAIL_DEST_PS: | 1285 | case TX_STATUS_FAIL_DEST_PS: |
| 1286 | /* In DQA, the FW should have stopped the queue and not | ||
| 1287 | * return this status | ||
| 1288 | */ | ||
| 1289 | WARN_ON(iwl_mvm_is_dqa_supported(mvm)); | ||
| 1286 | info->flags |= IEEE80211_TX_STAT_TX_FILTERED; | 1290 | info->flags |= IEEE80211_TX_STAT_TX_FILTERED; |
| 1287 | break; | 1291 | break; |
| 1288 | default: | 1292 | default: |
| @@ -1387,15 +1391,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |||
| 1387 | bool send_eosp_ndp = false; | 1391 | bool send_eosp_ndp = false; |
| 1388 | 1392 | ||
| 1389 | spin_lock_bh(&mvmsta->lock); | 1393 | spin_lock_bh(&mvmsta->lock); |
| 1390 | if (iwl_mvm_is_dqa_supported(mvm)) { | ||
| 1391 | enum iwl_mvm_agg_state state; | ||
| 1392 | |||
| 1393 | state = mvmsta->tid_data[tid].state; | ||
| 1394 | txq_agg = (state == IWL_AGG_ON || | ||
| 1395 | state == IWL_EMPTYING_HW_QUEUE_DELBA); | ||
| 1396 | } else { | ||
| 1397 | txq_agg = txq_id >= mvm->first_agg_queue; | ||
| 1398 | } | ||
| 1399 | 1394 | ||
| 1400 | if (!is_ndp) { | 1395 | if (!is_ndp) { |
| 1401 | tid_data->next_reclaimed = next_reclaimed; | 1396 | tid_data->next_reclaimed = next_reclaimed; |
| @@ -1452,11 +1447,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |||
| 1452 | * If the txq is not an AMPDU queue, there is no chance we freed | 1447 | * If the txq is not an AMPDU queue, there is no chance we freed |
| 1453 | * several skbs. Check that out... | 1448 | * several skbs. Check that out... |
| 1454 | */ | 1449 | */ |
| 1455 | if (txq_agg) | 1450 | if (iwl_mvm_is_dqa_supported(mvm) || txq_id >= mvm->first_agg_queue) |
| 1456 | goto out; | 1451 | goto out; |
| 1457 | 1452 | ||
| 1458 | /* We can't free more than one frame at once on a shared queue */ | 1453 | /* We can't free more than one frame at once on a shared queue */ |
| 1459 | WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1)); | 1454 | WARN_ON(skb_freed > 1); |
| 1460 | 1455 | ||
| 1461 | /* If we have still frames for this STA nothing to do here */ | 1456 | /* If we have still frames for this STA nothing to do here */ |
| 1462 | if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) | 1457 | if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) |
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c index 5ebca1d0cfc7..b62e03d11c2e 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.c +++ b/drivers/net/wireless/marvell/mwifiex/main.c | |||
| @@ -57,8 +57,8 @@ MODULE_PARM_DESC(mfg_mode, "manufacturing mode enable:1, disable:0"); | |||
| 57 | * In case of any errors during inittialization, this function also ensures | 57 | * In case of any errors during inittialization, this function also ensures |
| 58 | * proper cleanup before exiting. | 58 | * proper cleanup before exiting. |
| 59 | */ | 59 | */ |
| 60 | static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, | 60 | static int mwifiex_register(void *card, struct device *dev, |
| 61 | void **padapter) | 61 | struct mwifiex_if_ops *if_ops, void **padapter) |
| 62 | { | 62 | { |
| 63 | struct mwifiex_adapter *adapter; | 63 | struct mwifiex_adapter *adapter; |
| 64 | int i; | 64 | int i; |
| @@ -68,6 +68,7 @@ static int mwifiex_register(void *card, struct mwifiex_if_ops *if_ops, | |||
| 68 | return -ENOMEM; | 68 | return -ENOMEM; |
| 69 | 69 | ||
| 70 | *padapter = adapter; | 70 | *padapter = adapter; |
| 71 | adapter->dev = dev; | ||
| 71 | adapter->card = card; | 72 | adapter->card = card; |
| 72 | 73 | ||
| 73 | /* Save interface specific operations in adapter */ | 74 | /* Save interface specific operations in adapter */ |
| @@ -1568,12 +1569,11 @@ mwifiex_add_card(void *card, struct completion *fw_done, | |||
| 1568 | { | 1569 | { |
| 1569 | struct mwifiex_adapter *adapter; | 1570 | struct mwifiex_adapter *adapter; |
| 1570 | 1571 | ||
| 1571 | if (mwifiex_register(card, if_ops, (void **)&adapter)) { | 1572 | if (mwifiex_register(card, dev, if_ops, (void **)&adapter)) { |
| 1572 | pr_err("%s: software init failed\n", __func__); | 1573 | pr_err("%s: software init failed\n", __func__); |
| 1573 | goto err_init_sw; | 1574 | goto err_init_sw; |
| 1574 | } | 1575 | } |
| 1575 | 1576 | ||
| 1576 | adapter->dev = dev; | ||
| 1577 | mwifiex_probe_of(adapter); | 1577 | mwifiex_probe_of(adapter); |
| 1578 | 1578 | ||
| 1579 | adapter->iface_type = iface_type; | 1579 | adapter->iface_type = iface_type; |
| @@ -1718,6 +1718,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter) | |||
| 1718 | wiphy_unregister(adapter->wiphy); | 1718 | wiphy_unregister(adapter->wiphy); |
| 1719 | wiphy_free(adapter->wiphy); | 1719 | wiphy_free(adapter->wiphy); |
| 1720 | 1720 | ||
| 1721 | if (adapter->irq_wakeup >= 0) | ||
| 1722 | device_init_wakeup(adapter->dev, false); | ||
| 1723 | |||
| 1721 | /* Unregister device */ | 1724 | /* Unregister device */ |
| 1722 | mwifiex_dbg(adapter, INFO, | 1725 | mwifiex_dbg(adapter, INFO, |
| 1723 | "info: unregister device\n"); | 1726 | "info: unregister device\n"); |
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c index a0d918094889..b8c990d10d6e 100644 --- a/drivers/net/wireless/marvell/mwifiex/pcie.c +++ b/drivers/net/wireless/marvell/mwifiex/pcie.c | |||
| @@ -2739,6 +2739,21 @@ static void mwifiex_pcie_device_dump(struct mwifiex_adapter *adapter) | |||
| 2739 | schedule_work(&card->work); | 2739 | schedule_work(&card->work); |
| 2740 | } | 2740 | } |
| 2741 | 2741 | ||
| 2742 | static void mwifiex_pcie_free_buffers(struct mwifiex_adapter *adapter) | ||
| 2743 | { | ||
| 2744 | struct pcie_service_card *card = adapter->card; | ||
| 2745 | const struct mwifiex_pcie_card_reg *reg = card->pcie.reg; | ||
| 2746 | |||
| 2747 | if (reg->sleep_cookie) | ||
| 2748 | mwifiex_pcie_delete_sleep_cookie_buf(adapter); | ||
| 2749 | |||
| 2750 | mwifiex_pcie_delete_cmdrsp_buf(adapter); | ||
| 2751 | mwifiex_pcie_delete_evtbd_ring(adapter); | ||
| 2752 | mwifiex_pcie_delete_rxbd_ring(adapter); | ||
| 2753 | mwifiex_pcie_delete_txbd_ring(adapter); | ||
| 2754 | card->cmdrsp_buf = NULL; | ||
| 2755 | } | ||
| 2756 | |||
| 2742 | /* | 2757 | /* |
| 2743 | * This function initializes the PCI-E host memory space, WCB rings, etc. | 2758 | * This function initializes the PCI-E host memory space, WCB rings, etc. |
| 2744 | * | 2759 | * |
| @@ -2850,13 +2865,6 @@ err_enable_dev: | |||
| 2850 | 2865 | ||
| 2851 | /* | 2866 | /* |
| 2852 | * This function cleans up the allocated card buffers. | 2867 | * This function cleans up the allocated card buffers. |
| 2853 | * | ||
| 2854 | * The following are freed by this function - | ||
| 2855 | * - TXBD ring buffers | ||
| 2856 | * - RXBD ring buffers | ||
| 2857 | * - Event BD ring buffers | ||
| 2858 | * - Command response ring buffer | ||
| 2859 | * - Sleep cookie buffer | ||
| 2860 | */ | 2868 | */ |
| 2861 | static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) | 2869 | static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) |
| 2862 | { | 2870 | { |
| @@ -2875,6 +2883,8 @@ static void mwifiex_cleanup_pcie(struct mwifiex_adapter *adapter) | |||
| 2875 | "Failed to write driver not-ready signature\n"); | 2883 | "Failed to write driver not-ready signature\n"); |
| 2876 | } | 2884 | } |
| 2877 | 2885 | ||
| 2886 | mwifiex_pcie_free_buffers(adapter); | ||
| 2887 | |||
| 2878 | if (pdev) { | 2888 | if (pdev) { |
| 2879 | pci_iounmap(pdev, card->pci_mmap); | 2889 | pci_iounmap(pdev, card->pci_mmap); |
| 2880 | pci_iounmap(pdev, card->pci_mmap1); | 2890 | pci_iounmap(pdev, card->pci_mmap1); |
| @@ -3126,10 +3136,7 @@ err_cre_txbd: | |||
| 3126 | pci_iounmap(pdev, card->pci_mmap1); | 3136 | pci_iounmap(pdev, card->pci_mmap1); |
| 3127 | } | 3137 | } |
| 3128 | 3138 | ||
| 3129 | /* This function cleans up the PCI-E host memory space. | 3139 | /* This function cleans up the PCI-E host memory space. */ |
| 3130 | * Some code is extracted from mwifiex_unregister_dev() | ||
| 3131 | * | ||
| 3132 | */ | ||
| 3133 | static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) | 3140 | static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) |
| 3134 | { | 3141 | { |
| 3135 | struct pcie_service_card *card = adapter->card; | 3142 | struct pcie_service_card *card = adapter->card; |
| @@ -3140,14 +3147,7 @@ static void mwifiex_pcie_down_dev(struct mwifiex_adapter *adapter) | |||
| 3140 | 3147 | ||
| 3141 | adapter->seq_num = 0; | 3148 | adapter->seq_num = 0; |
| 3142 | 3149 | ||
| 3143 | if (reg->sleep_cookie) | 3150 | mwifiex_pcie_free_buffers(adapter); |
| 3144 | mwifiex_pcie_delete_sleep_cookie_buf(adapter); | ||
| 3145 | |||
| 3146 | mwifiex_pcie_delete_cmdrsp_buf(adapter); | ||
| 3147 | mwifiex_pcie_delete_evtbd_ring(adapter); | ||
| 3148 | mwifiex_pcie_delete_rxbd_ring(adapter); | ||
| 3149 | mwifiex_pcie_delete_txbd_ring(adapter); | ||
| 3150 | card->cmdrsp_buf = NULL; | ||
| 3151 | } | 3151 | } |
| 3152 | 3152 | ||
| 3153 | static struct mwifiex_if_ops pcie_ops = { | 3153 | static struct mwifiex_if_ops pcie_ops = { |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 779f516e7a4e..47a479f26e5d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
| @@ -343,8 +343,6 @@ static int __nvme_rdma_init_request(struct nvme_rdma_ctrl *ctrl, | |||
| 343 | struct ib_device *ibdev = dev->dev; | 343 | struct ib_device *ibdev = dev->dev; |
| 344 | int ret; | 344 | int ret; |
| 345 | 345 | ||
| 346 | BUG_ON(queue_idx >= ctrl->queue_count); | ||
| 347 | |||
| 348 | ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), | 346 | ret = nvme_rdma_alloc_qe(ibdev, &req->sqe, sizeof(struct nvme_command), |
| 349 | DMA_TO_DEVICE); | 347 | DMA_TO_DEVICE); |
| 350 | if (ret) | 348 | if (ret) |
| @@ -652,8 +650,22 @@ out_free_queues: | |||
| 652 | 650 | ||
| 653 | static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) | 651 | static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) |
| 654 | { | 652 | { |
| 653 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
| 654 | unsigned int nr_io_queues; | ||
| 655 | int i, ret; | 655 | int i, ret; |
| 656 | 656 | ||
| 657 | nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); | ||
| 658 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); | ||
| 659 | if (ret) | ||
| 660 | return ret; | ||
| 661 | |||
| 662 | ctrl->queue_count = nr_io_queues + 1; | ||
| 663 | if (ctrl->queue_count < 2) | ||
| 664 | return 0; | ||
| 665 | |||
| 666 | dev_info(ctrl->ctrl.device, | ||
| 667 | "creating %d I/O queues.\n", nr_io_queues); | ||
| 668 | |||
| 657 | for (i = 1; i < ctrl->queue_count; i++) { | 669 | for (i = 1; i < ctrl->queue_count; i++) { |
| 658 | ret = nvme_rdma_init_queue(ctrl, i, | 670 | ret = nvme_rdma_init_queue(ctrl, i, |
| 659 | ctrl->ctrl.opts->queue_size); | 671 | ctrl->ctrl.opts->queue_size); |
| @@ -1791,20 +1803,8 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { | |||
| 1791 | 1803 | ||
| 1792 | static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) | 1804 | static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) |
| 1793 | { | 1805 | { |
| 1794 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
| 1795 | int ret; | 1806 | int ret; |
| 1796 | 1807 | ||
| 1797 | ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); | ||
| 1798 | if (ret) | ||
| 1799 | return ret; | ||
| 1800 | |||
| 1801 | ctrl->queue_count = opts->nr_io_queues + 1; | ||
| 1802 | if (ctrl->queue_count < 2) | ||
| 1803 | return 0; | ||
| 1804 | |||
| 1805 | dev_info(ctrl->ctrl.device, | ||
| 1806 | "creating %d I/O queues.\n", opts->nr_io_queues); | ||
| 1807 | |||
| 1808 | ret = nvme_rdma_init_io_queues(ctrl); | 1808 | ret = nvme_rdma_init_io_queues(ctrl); |
| 1809 | if (ret) | 1809 | if (ret) |
| 1810 | return ret; | 1810 | return ret; |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 11b0a0a5f661..798653b329b2 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
| @@ -425,6 +425,13 @@ void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, | |||
| 425 | ctrl->sqs[qid] = sq; | 425 | ctrl->sqs[qid] = sq; |
| 426 | } | 426 | } |
| 427 | 427 | ||
| 428 | static void nvmet_confirm_sq(struct percpu_ref *ref) | ||
| 429 | { | ||
| 430 | struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); | ||
| 431 | |||
| 432 | complete(&sq->confirm_done); | ||
| 433 | } | ||
| 434 | |||
| 428 | void nvmet_sq_destroy(struct nvmet_sq *sq) | 435 | void nvmet_sq_destroy(struct nvmet_sq *sq) |
| 429 | { | 436 | { |
| 430 | /* | 437 | /* |
| @@ -433,7 +440,8 @@ void nvmet_sq_destroy(struct nvmet_sq *sq) | |||
| 433 | */ | 440 | */ |
| 434 | if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) | 441 | if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) |
| 435 | nvmet_async_events_free(sq->ctrl); | 442 | nvmet_async_events_free(sq->ctrl); |
| 436 | percpu_ref_kill(&sq->ref); | 443 | percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); |
| 444 | wait_for_completion(&sq->confirm_done); | ||
| 437 | wait_for_completion(&sq->free_done); | 445 | wait_for_completion(&sq->free_done); |
| 438 | percpu_ref_exit(&sq->ref); | 446 | percpu_ref_exit(&sq->ref); |
| 439 | 447 | ||
| @@ -461,6 +469,7 @@ int nvmet_sq_init(struct nvmet_sq *sq) | |||
| 461 | return ret; | 469 | return ret; |
| 462 | } | 470 | } |
| 463 | init_completion(&sq->free_done); | 471 | init_completion(&sq->free_done); |
| 472 | init_completion(&sq->confirm_done); | ||
| 464 | 473 | ||
| 465 | return 0; | 474 | return 0; |
| 466 | } | 475 | } |
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index d1f06e7768ff..22f7bc6bac7f 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c | |||
| @@ -223,8 +223,6 @@ static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx) | |||
| 223 | static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, | 223 | static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, |
| 224 | struct nvme_loop_iod *iod, unsigned int queue_idx) | 224 | struct nvme_loop_iod *iod, unsigned int queue_idx) |
| 225 | { | 225 | { |
| 226 | BUG_ON(queue_idx >= ctrl->queue_count); | ||
| 227 | |||
| 228 | iod->req.cmd = &iod->cmd; | 226 | iod->req.cmd = &iod->cmd; |
| 229 | iod->req.rsp = &iod->rsp; | 227 | iod->req.rsp = &iod->rsp; |
| 230 | iod->queue = &ctrl->queues[queue_idx]; | 228 | iod->queue = &ctrl->queues[queue_idx]; |
| @@ -288,9 +286,9 @@ static struct blk_mq_ops nvme_loop_admin_mq_ops = { | |||
| 288 | 286 | ||
| 289 | static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) | 287 | static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) |
| 290 | { | 288 | { |
| 289 | nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); | ||
| 291 | blk_cleanup_queue(ctrl->ctrl.admin_q); | 290 | blk_cleanup_queue(ctrl->ctrl.admin_q); |
| 292 | blk_mq_free_tag_set(&ctrl->admin_tag_set); | 291 | blk_mq_free_tag_set(&ctrl->admin_tag_set); |
| 293 | nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); | ||
| 294 | } | 292 | } |
| 295 | 293 | ||
| 296 | static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) | 294 | static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl) |
| @@ -314,6 +312,43 @@ free_ctrl: | |||
| 314 | kfree(ctrl); | 312 | kfree(ctrl); |
| 315 | } | 313 | } |
| 316 | 314 | ||
| 315 | static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) | ||
| 316 | { | ||
| 317 | int i; | ||
| 318 | |||
| 319 | for (i = 1; i < ctrl->queue_count; i++) | ||
| 320 | nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); | ||
| 321 | } | ||
| 322 | |||
| 323 | static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) | ||
| 324 | { | ||
| 325 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
| 326 | unsigned int nr_io_queues; | ||
| 327 | int ret, i; | ||
| 328 | |||
| 329 | nr_io_queues = min(opts->nr_io_queues, num_online_cpus()); | ||
| 330 | ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); | ||
| 331 | if (ret || !nr_io_queues) | ||
| 332 | return ret; | ||
| 333 | |||
| 334 | dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); | ||
| 335 | |||
| 336 | for (i = 1; i <= nr_io_queues; i++) { | ||
| 337 | ctrl->queues[i].ctrl = ctrl; | ||
| 338 | ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); | ||
| 339 | if (ret) | ||
| 340 | goto out_destroy_queues; | ||
| 341 | |||
| 342 | ctrl->queue_count++; | ||
| 343 | } | ||
| 344 | |||
| 345 | return 0; | ||
| 346 | |||
| 347 | out_destroy_queues: | ||
| 348 | nvme_loop_destroy_io_queues(ctrl); | ||
| 349 | return ret; | ||
| 350 | } | ||
| 351 | |||
| 317 | static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) | 352 | static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) |
| 318 | { | 353 | { |
| 319 | int error; | 354 | int error; |
| @@ -385,17 +420,13 @@ out_free_sq: | |||
| 385 | 420 | ||
| 386 | static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) | 421 | static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) |
| 387 | { | 422 | { |
| 388 | int i; | ||
| 389 | |||
| 390 | nvme_stop_keep_alive(&ctrl->ctrl); | 423 | nvme_stop_keep_alive(&ctrl->ctrl); |
| 391 | 424 | ||
| 392 | if (ctrl->queue_count > 1) { | 425 | if (ctrl->queue_count > 1) { |
| 393 | nvme_stop_queues(&ctrl->ctrl); | 426 | nvme_stop_queues(&ctrl->ctrl); |
| 394 | blk_mq_tagset_busy_iter(&ctrl->tag_set, | 427 | blk_mq_tagset_busy_iter(&ctrl->tag_set, |
| 395 | nvme_cancel_request, &ctrl->ctrl); | 428 | nvme_cancel_request, &ctrl->ctrl); |
| 396 | 429 | nvme_loop_destroy_io_queues(ctrl); | |
| 397 | for (i = 1; i < ctrl->queue_count; i++) | ||
| 398 | nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); | ||
| 399 | } | 430 | } |
| 400 | 431 | ||
| 401 | if (ctrl->ctrl.state == NVME_CTRL_LIVE) | 432 | if (ctrl->ctrl.state == NVME_CTRL_LIVE) |
| @@ -467,19 +498,14 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) | |||
| 467 | if (ret) | 498 | if (ret) |
| 468 | goto out_disable; | 499 | goto out_disable; |
| 469 | 500 | ||
| 470 | for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { | 501 | ret = nvme_loop_init_io_queues(ctrl); |
| 471 | ctrl->queues[i].ctrl = ctrl; | 502 | if (ret) |
| 472 | ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); | 503 | goto out_destroy_admin; |
| 473 | if (ret) | ||
| 474 | goto out_free_queues; | ||
| 475 | |||
| 476 | ctrl->queue_count++; | ||
| 477 | } | ||
| 478 | 504 | ||
| 479 | for (i = 1; i <= ctrl->ctrl.opts->nr_io_queues; i++) { | 505 | for (i = 1; i < ctrl->queue_count; i++) { |
| 480 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); | 506 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); |
| 481 | if (ret) | 507 | if (ret) |
| 482 | goto out_free_queues; | 508 | goto out_destroy_io; |
| 483 | } | 509 | } |
| 484 | 510 | ||
| 485 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | 511 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); |
| @@ -492,9 +518,9 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) | |||
| 492 | 518 | ||
| 493 | return; | 519 | return; |
| 494 | 520 | ||
| 495 | out_free_queues: | 521 | out_destroy_io: |
| 496 | for (i = 1; i < ctrl->queue_count; i++) | 522 | nvme_loop_destroy_io_queues(ctrl); |
| 497 | nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); | 523 | out_destroy_admin: |
| 498 | nvme_loop_destroy_admin_queue(ctrl); | 524 | nvme_loop_destroy_admin_queue(ctrl); |
| 499 | out_disable: | 525 | out_disable: |
| 500 | dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); | 526 | dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); |
| @@ -533,25 +559,12 @@ static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = { | |||
| 533 | 559 | ||
| 534 | static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) | 560 | static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) |
| 535 | { | 561 | { |
| 536 | struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; | ||
| 537 | int ret, i; | 562 | int ret, i; |
| 538 | 563 | ||
| 539 | ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); | 564 | ret = nvme_loop_init_io_queues(ctrl); |
| 540 | if (ret || !opts->nr_io_queues) | 565 | if (ret) |
| 541 | return ret; | 566 | return ret; |
| 542 | 567 | ||
| 543 | dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", | ||
| 544 | opts->nr_io_queues); | ||
| 545 | |||
| 546 | for (i = 1; i <= opts->nr_io_queues; i++) { | ||
| 547 | ctrl->queues[i].ctrl = ctrl; | ||
| 548 | ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); | ||
| 549 | if (ret) | ||
| 550 | goto out_destroy_queues; | ||
| 551 | |||
| 552 | ctrl->queue_count++; | ||
| 553 | } | ||
| 554 | |||
| 555 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); | 568 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
| 556 | ctrl->tag_set.ops = &nvme_loop_mq_ops; | 569 | ctrl->tag_set.ops = &nvme_loop_mq_ops; |
| 557 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; | 570 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; |
| @@ -575,7 +588,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) | |||
| 575 | goto out_free_tagset; | 588 | goto out_free_tagset; |
| 576 | } | 589 | } |
| 577 | 590 | ||
| 578 | for (i = 1; i <= opts->nr_io_queues; i++) { | 591 | for (i = 1; i < ctrl->queue_count; i++) { |
| 579 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); | 592 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); |
| 580 | if (ret) | 593 | if (ret) |
| 581 | goto out_cleanup_connect_q; | 594 | goto out_cleanup_connect_q; |
| @@ -588,8 +601,7 @@ out_cleanup_connect_q: | |||
| 588 | out_free_tagset: | 601 | out_free_tagset: |
| 589 | blk_mq_free_tag_set(&ctrl->tag_set); | 602 | blk_mq_free_tag_set(&ctrl->tag_set); |
| 590 | out_destroy_queues: | 603 | out_destroy_queues: |
| 591 | for (i = 1; i < ctrl->queue_count; i++) | 604 | nvme_loop_destroy_io_queues(ctrl); |
| 592 | nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); | ||
| 593 | return ret; | 605 | return ret; |
| 594 | } | 606 | } |
| 595 | 607 | ||
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 1370eee0a3c0..f7ff15f17ca9 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
| @@ -73,6 +73,7 @@ struct nvmet_sq { | |||
| 73 | u16 qid; | 73 | u16 qid; |
| 74 | u16 size; | 74 | u16 size; |
| 75 | struct completion free_done; | 75 | struct completion free_done; |
| 76 | struct completion confirm_done; | ||
| 76 | }; | 77 | }; |
| 77 | 78 | ||
| 78 | /** | 79 | /** |
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 9aa1da3778b3..ecc4fe862561 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
| @@ -703,11 +703,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, | |||
| 703 | { | 703 | { |
| 704 | u16 status; | 704 | u16 status; |
| 705 | 705 | ||
| 706 | cmd->queue = queue; | ||
| 707 | cmd->n_rdma = 0; | ||
| 708 | cmd->req.port = queue->port; | ||
| 709 | |||
| 710 | |||
| 711 | ib_dma_sync_single_for_cpu(queue->dev->device, | 706 | ib_dma_sync_single_for_cpu(queue->dev->device, |
| 712 | cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, | 707 | cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, |
| 713 | DMA_FROM_DEVICE); | 708 | DMA_FROM_DEVICE); |
| @@ -760,9 +755,12 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) | |||
| 760 | 755 | ||
| 761 | cmd->queue = queue; | 756 | cmd->queue = queue; |
| 762 | rsp = nvmet_rdma_get_rsp(queue); | 757 | rsp = nvmet_rdma_get_rsp(queue); |
| 758 | rsp->queue = queue; | ||
| 763 | rsp->cmd = cmd; | 759 | rsp->cmd = cmd; |
| 764 | rsp->flags = 0; | 760 | rsp->flags = 0; |
| 765 | rsp->req.cmd = cmd->nvme_cmd; | 761 | rsp->req.cmd = cmd->nvme_cmd; |
| 762 | rsp->req.port = queue->port; | ||
| 763 | rsp->n_rdma = 0; | ||
| 766 | 764 | ||
| 767 | if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { | 765 | if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { |
| 768 | unsigned long flags; | 766 | unsigned long flags; |
diff --git a/drivers/parport/share.c b/drivers/parport/share.c index bc090daa850a..5dc53d420ca8 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c | |||
| @@ -939,8 +939,10 @@ parport_register_dev_model(struct parport *port, const char *name, | |||
| 939 | * pardevice fields. -arca | 939 | * pardevice fields. -arca |
| 940 | */ | 940 | */ |
| 941 | port->ops->init_state(par_dev, par_dev->state); | 941 | port->ops->init_state(par_dev, par_dev->state); |
| 942 | port->proc_device = par_dev; | 942 | if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) { |
| 943 | parport_device_proc_register(par_dev); | 943 | port->proc_device = par_dev; |
| 944 | parport_device_proc_register(par_dev); | ||
| 945 | } | ||
| 944 | 946 | ||
| 945 | return par_dev; | 947 | return par_dev; |
| 946 | 948 | ||
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index dc5277ad1b5a..005cadb7a3f8 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig | |||
| @@ -449,6 +449,7 @@ config PHY_QCOM_UFS | |||
| 449 | config PHY_QCOM_USB_HS | 449 | config PHY_QCOM_USB_HS |
| 450 | tristate "Qualcomm USB HS PHY module" | 450 | tristate "Qualcomm USB HS PHY module" |
| 451 | depends on USB_ULPI_BUS | 451 | depends on USB_ULPI_BUS |
| 452 | depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in | ||
| 452 | select GENERIC_PHY | 453 | select GENERIC_PHY |
| 453 | help | 454 | help |
| 454 | Support for the USB high-speed ULPI compliant phy on Qualcomm | 455 | Support for the USB high-speed ULPI compliant phy on Qualcomm |
| @@ -510,12 +511,4 @@ config PHY_MESON8B_USB2 | |||
| 510 | and GXBB SoCs. | 511 | and GXBB SoCs. |
| 511 | If unsure, say N. | 512 | If unsure, say N. |
| 512 | 513 | ||
| 513 | config PHY_NSP_USB3 | ||
| 514 | tristate "Broadcom NorthStar plus USB3 PHY driver" | ||
| 515 | depends on OF && (ARCH_BCM_NSP || COMPILE_TEST) | ||
| 516 | select GENERIC_PHY | ||
| 517 | default ARCH_BCM_NSP | ||
| 518 | help | ||
| 519 | Enable this to support the Broadcom Northstar plus USB3 PHY. | ||
| 520 | If unsure, say N. | ||
| 521 | endmenu | 514 | endmenu |
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index e7b0feb1e125..dd8f3b5d2918 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile | |||
| @@ -62,4 +62,3 @@ obj-$(CONFIG_PHY_CYGNUS_PCIE) += phy-bcm-cygnus-pcie.o | |||
| 62 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ | 62 | obj-$(CONFIG_ARCH_TEGRA) += tegra/ |
| 63 | obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o | 63 | obj-$(CONFIG_PHY_NS2_PCIE) += phy-bcm-ns2-pcie.o |
| 64 | obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o | 64 | obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o |
| 65 | obj-$(CONFIG_PHY_NSP_USB3) += phy-bcm-nsp-usb3.o | ||
diff --git a/drivers/phy/phy-bcm-nsp-usb3.c b/drivers/phy/phy-bcm-nsp-usb3.c deleted file mode 100644 index 49024eaa5545..000000000000 --- a/drivers/phy/phy-bcm-nsp-usb3.c +++ /dev/null | |||
| @@ -1,177 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Copyright (C) 2016 Broadcom | ||
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or | ||
| 5 | * modify it under the terms of the GNU General Public License as | ||
| 6 | * published by the Free Software Foundation version 2. | ||
| 7 | * | ||
| 8 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
| 9 | * kind, whether express or implied; without even the implied warranty | ||
| 10 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 11 | * GNU General Public License for more details. | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/delay.h> | ||
| 15 | #include <linux/io.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/mfd/syscon.h> | ||
| 18 | #include <linux/mdio.h> | ||
| 19 | #include <linux/module.h> | ||
| 20 | #include <linux/of.h> | ||
| 21 | #include <linux/of_address.h> | ||
| 22 | #include <linux/phy/phy.h> | ||
| 23 | #include <linux/regmap.h> | ||
| 24 | |||
| 25 | #define NSP_USB3_RST_CTRL_OFFSET 0x3f8 | ||
| 26 | |||
| 27 | /* mdio reg access */ | ||
| 28 | #define NSP_USB3_PHY_BASE_ADDR_REG 0x1f | ||
| 29 | |||
| 30 | #define NSP_USB3_PHY_PLL30_BLOCK 0x8000 | ||
| 31 | #define NSP_USB3_PLL_CONTROL 0x01 | ||
| 32 | #define NSP_USB3_PLLA_CONTROL0 0x0a | ||
| 33 | #define NSP_USB3_PLLA_CONTROL1 0x0b | ||
| 34 | |||
| 35 | #define NSP_USB3_PHY_TX_PMD_BLOCK 0x8040 | ||
| 36 | #define NSP_USB3_TX_PMD_CONTROL1 0x01 | ||
| 37 | |||
| 38 | #define NSP_USB3_PHY_PIPE_BLOCK 0x8060 | ||
| 39 | #define NSP_USB3_LFPS_CMP 0x02 | ||
| 40 | #define NSP_USB3_LFPS_DEGLITCH 0x03 | ||
| 41 | |||
| 42 | struct nsp_usb3_phy { | ||
| 43 | struct regmap *usb3_ctrl; | ||
| 44 | struct phy *phy; | ||
| 45 | struct mdio_device *mdiodev; | ||
| 46 | }; | ||
| 47 | |||
| 48 | static int nsp_usb3_phy_init(struct phy *phy) | ||
| 49 | { | ||
| 50 | struct nsp_usb3_phy *iphy = phy_get_drvdata(phy); | ||
| 51 | struct mii_bus *bus = iphy->mdiodev->bus; | ||
| 52 | int addr = iphy->mdiodev->addr; | ||
| 53 | u32 data; | ||
| 54 | int rc; | ||
| 55 | |||
| 56 | rc = regmap_read(iphy->usb3_ctrl, 0, &data); | ||
| 57 | if (rc) | ||
| 58 | return rc; | ||
| 59 | data |= 1; | ||
| 60 | rc = regmap_write(iphy->usb3_ctrl, 0, data); | ||
| 61 | if (rc) | ||
| 62 | return rc; | ||
| 63 | |||
| 64 | rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 1); | ||
| 65 | if (rc) | ||
| 66 | return rc; | ||
| 67 | |||
| 68 | rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG, | ||
| 69 | NSP_USB3_PHY_PLL30_BLOCK); | ||
| 70 | if (rc) | ||
| 71 | return rc; | ||
| 72 | |||
| 73 | rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x1000); | ||
| 74 | if (rc) | ||
| 75 | return rc; | ||
| 76 | |||
| 77 | rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL0, 0x6400); | ||
| 78 | if (rc) | ||
| 79 | return rc; | ||
| 80 | |||
| 81 | rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0xc000); | ||
| 82 | if (rc) | ||
| 83 | return rc; | ||
| 84 | |||
| 85 | rc = mdiobus_write(bus, addr, NSP_USB3_PLLA_CONTROL1, 0x8000); | ||
| 86 | if (rc) | ||
| 87 | return rc; | ||
| 88 | |||
| 89 | rc = regmap_write(iphy->usb3_ctrl, NSP_USB3_RST_CTRL_OFFSET, 0); | ||
| 90 | if (rc) | ||
| 91 | return rc; | ||
| 92 | |||
| 93 | rc = mdiobus_write(bus, addr, NSP_USB3_PLL_CONTROL, 0x9000); | ||
| 94 | if (rc) | ||
| 95 | return rc; | ||
| 96 | |||
| 97 | rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG, | ||
| 98 | NSP_USB3_PHY_PIPE_BLOCK); | ||
| 99 | if (rc) | ||
| 100 | return rc; | ||
| 101 | |||
| 102 | rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_CMP, 0xf30d); | ||
| 103 | if (rc) | ||
| 104 | return rc; | ||
| 105 | |||
| 106 | rc = mdiobus_write(bus, addr, NSP_USB3_LFPS_DEGLITCH, 0x6302); | ||
| 107 | if (rc) | ||
| 108 | return rc; | ||
| 109 | |||
| 110 | rc = mdiobus_write(bus, addr, NSP_USB3_PHY_BASE_ADDR_REG, | ||
| 111 | NSP_USB3_PHY_TX_PMD_BLOCK); | ||
| 112 | if (rc) | ||
| 113 | return rc; | ||
| 114 | |||
| 115 | rc = mdiobus_write(bus, addr, NSP_USB3_TX_PMD_CONTROL1, 0x1003); | ||
| 116 | |||
| 117 | return rc; | ||
| 118 | } | ||
| 119 | |||
| 120 | static struct phy_ops nsp_usb3_phy_ops = { | ||
| 121 | .init = nsp_usb3_phy_init, | ||
| 122 | .owner = THIS_MODULE, | ||
| 123 | }; | ||
| 124 | |||
| 125 | static int nsp_usb3_phy_probe(struct mdio_device *mdiodev) | ||
| 126 | { | ||
| 127 | struct device *dev = &mdiodev->dev; | ||
| 128 | struct phy_provider *provider; | ||
| 129 | struct nsp_usb3_phy *iphy; | ||
| 130 | |||
| 131 | iphy = devm_kzalloc(dev, sizeof(*iphy), GFP_KERNEL); | ||
| 132 | if (!iphy) | ||
| 133 | return -ENOMEM; | ||
| 134 | iphy->mdiodev = mdiodev; | ||
| 135 | |||
| 136 | iphy->usb3_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node, | ||
| 137 | "usb3-ctrl-syscon"); | ||
| 138 | if (IS_ERR(iphy->usb3_ctrl)) | ||
| 139 | return PTR_ERR(iphy->usb3_ctrl); | ||
| 140 | |||
| 141 | iphy->phy = devm_phy_create(dev, dev->of_node, &nsp_usb3_phy_ops); | ||
| 142 | if (IS_ERR(iphy->phy)) { | ||
| 143 | dev_err(dev, "failed to create PHY\n"); | ||
| 144 | return PTR_ERR(iphy->phy); | ||
| 145 | } | ||
| 146 | |||
| 147 | phy_set_drvdata(iphy->phy, iphy); | ||
| 148 | |||
| 149 | provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); | ||
| 150 | if (IS_ERR(provider)) { | ||
| 151 | dev_err(dev, "could not register PHY provider\n"); | ||
| 152 | return PTR_ERR(provider); | ||
| 153 | } | ||
| 154 | |||
| 155 | return 0; | ||
| 156 | } | ||
| 157 | |||
| 158 | static const struct of_device_id nsp_usb3_phy_of_match[] = { | ||
| 159 | {.compatible = "brcm,nsp-usb3-phy",}, | ||
| 160 | { /* sentinel */ } | ||
| 161 | }; | ||
| 162 | |||
| 163 | static struct mdio_driver nsp_usb3_phy_driver = { | ||
| 164 | .mdiodrv = { | ||
| 165 | .driver = { | ||
| 166 | .name = "nsp-usb3-phy", | ||
| 167 | .of_match_table = nsp_usb3_phy_of_match, | ||
| 168 | }, | ||
| 169 | }, | ||
| 170 | .probe = nsp_usb3_phy_probe, | ||
| 171 | }; | ||
| 172 | |||
| 173 | mdio_module_driver(nsp_usb3_phy_driver); | ||
| 174 | |||
| 175 | MODULE_DESCRIPTION("Broadcom NSP USB3 PHY driver"); | ||
| 176 | MODULE_LICENSE("GPL v2"); | ||
| 177 | MODULE_AUTHOR("Yendapally Reddy Dhananjaya Reddy <yendapally.reddy@broadcom.com"); | ||
diff --git a/drivers/phy/phy-exynos-pcie.c b/drivers/phy/phy-exynos-pcie.c index 4f60b83641d5..60baf25d98e2 100644 --- a/drivers/phy/phy-exynos-pcie.c +++ b/drivers/phy/phy-exynos-pcie.c | |||
| @@ -254,8 +254,8 @@ static int exynos_pcie_phy_probe(struct platform_device *pdev) | |||
| 254 | 254 | ||
| 255 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 255 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
| 256 | exynos_phy->blk_base = devm_ioremap_resource(dev, res); | 256 | exynos_phy->blk_base = devm_ioremap_resource(dev, res); |
| 257 | if (IS_ERR(exynos_phy->phy_base)) | 257 | if (IS_ERR(exynos_phy->blk_base)) |
| 258 | return PTR_ERR(exynos_phy->phy_base); | 258 | return PTR_ERR(exynos_phy->blk_base); |
| 259 | 259 | ||
| 260 | exynos_phy->drv_data = drv_data; | 260 | exynos_phy->drv_data = drv_data; |
| 261 | 261 | ||
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c index 7671424d46cb..31a3a98d067c 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c +++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c | |||
| @@ -667,11 +667,11 @@ static const char * const uart_ao_b_groups[] = { | |||
| 667 | }; | 667 | }; |
| 668 | 668 | ||
| 669 | static const char * const i2c_ao_groups[] = { | 669 | static const char * const i2c_ao_groups[] = { |
| 670 | "i2c_sdk_ao", "i2c_sda_ao", | 670 | "i2c_sck_ao", "i2c_sda_ao", |
| 671 | }; | 671 | }; |
| 672 | 672 | ||
| 673 | static const char * const i2c_slave_ao_groups[] = { | 673 | static const char * const i2c_slave_ao_groups[] = { |
| 674 | "i2c_slave_sdk_ao", "i2c_slave_sda_ao", | 674 | "i2c_slave_sck_ao", "i2c_slave_sda_ao", |
| 675 | }; | 675 | }; |
| 676 | 676 | ||
| 677 | static const char * const remote_input_ao_groups[] = { | 677 | static const char * const remote_input_ao_groups[] = { |
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 676efcc032d2..3ae8066bc127 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c | |||
| @@ -1285,6 +1285,22 @@ static void st_gpio_irq_unmask(struct irq_data *d) | |||
| 1285 | writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); | 1285 | writel(BIT(d->hwirq), bank->base + REG_PIO_SET_PMASK); |
| 1286 | } | 1286 | } |
| 1287 | 1287 | ||
| 1288 | static int st_gpio_irq_request_resources(struct irq_data *d) | ||
| 1289 | { | ||
| 1290 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
| 1291 | |||
| 1292 | st_gpio_direction_input(gc, d->hwirq); | ||
| 1293 | |||
| 1294 | return gpiochip_lock_as_irq(gc, d->hwirq); | ||
| 1295 | } | ||
| 1296 | |||
| 1297 | static void st_gpio_irq_release_resources(struct irq_data *d) | ||
| 1298 | { | ||
| 1299 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | ||
| 1300 | |||
| 1301 | gpiochip_unlock_as_irq(gc, d->hwirq); | ||
| 1302 | } | ||
| 1303 | |||
| 1288 | static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) | 1304 | static int st_gpio_irq_set_type(struct irq_data *d, unsigned type) |
| 1289 | { | 1305 | { |
| 1290 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); | 1306 | struct gpio_chip *gc = irq_data_get_irq_chip_data(d); |
| @@ -1438,12 +1454,14 @@ static struct gpio_chip st_gpio_template = { | |||
| 1438 | }; | 1454 | }; |
| 1439 | 1455 | ||
| 1440 | static struct irq_chip st_gpio_irqchip = { | 1456 | static struct irq_chip st_gpio_irqchip = { |
| 1441 | .name = "GPIO", | 1457 | .name = "GPIO", |
| 1442 | .irq_disable = st_gpio_irq_mask, | 1458 | .irq_request_resources = st_gpio_irq_request_resources, |
| 1443 | .irq_mask = st_gpio_irq_mask, | 1459 | .irq_release_resources = st_gpio_irq_release_resources, |
| 1444 | .irq_unmask = st_gpio_irq_unmask, | 1460 | .irq_disable = st_gpio_irq_mask, |
| 1445 | .irq_set_type = st_gpio_irq_set_type, | 1461 | .irq_mask = st_gpio_irq_mask, |
| 1446 | .flags = IRQCHIP_SKIP_SET_WAKE, | 1462 | .irq_unmask = st_gpio_irq_unmask, |
| 1463 | .irq_set_type = st_gpio_irq_set_type, | ||
| 1464 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
| 1447 | }; | 1465 | }; |
| 1448 | 1466 | ||
| 1449 | static int st_gpiolib_register_bank(struct st_pinctrl *info, | 1467 | static int st_gpiolib_register_bank(struct st_pinctrl *info, |
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c index b68ae424cee2..743d1f458205 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c | |||
| @@ -405,6 +405,36 @@ static const struct msm_pingroup ipq4019_groups[] = { | |||
| 405 | PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | 405 | PINGROUP(67, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), |
| 406 | PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | 406 | PINGROUP(68, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), |
| 407 | PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | 407 | PINGROUP(69, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), |
| 408 | PINGROUP(70, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 409 | PINGROUP(71, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 410 | PINGROUP(72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 411 | PINGROUP(73, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 412 | PINGROUP(74, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 413 | PINGROUP(75, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 414 | PINGROUP(76, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 415 | PINGROUP(77, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 416 | PINGROUP(78, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 417 | PINGROUP(79, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 418 | PINGROUP(80, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 419 | PINGROUP(81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 420 | PINGROUP(82, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 421 | PINGROUP(83, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 422 | PINGROUP(84, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 423 | PINGROUP(85, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 424 | PINGROUP(86, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 425 | PINGROUP(87, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 426 | PINGROUP(88, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 427 | PINGROUP(89, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 428 | PINGROUP(90, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 429 | PINGROUP(91, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 430 | PINGROUP(92, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 431 | PINGROUP(93, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 432 | PINGROUP(94, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 433 | PINGROUP(95, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 434 | PINGROUP(96, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 435 | PINGROUP(97, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 436 | PINGROUP(98, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 437 | PINGROUP(99, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA), | ||
| 408 | }; | 438 | }; |
| 409 | 439 | ||
| 410 | static const struct msm_pinctrl_soc_data ipq4019_pinctrl = { | 440 | static const struct msm_pinctrl_soc_data ipq4019_pinctrl = { |
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index c978be5eb9eb..273badd92561 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c | |||
| @@ -609,10 +609,6 @@ static void msm_gpio_irq_unmask(struct irq_data *d) | |||
| 609 | 609 | ||
| 610 | raw_spin_lock_irqsave(&pctrl->lock, flags); | 610 | raw_spin_lock_irqsave(&pctrl->lock, flags); |
| 611 | 611 | ||
| 612 | val = readl(pctrl->regs + g->intr_status_reg); | ||
| 613 | val &= ~BIT(g->intr_status_bit); | ||
| 614 | writel(val, pctrl->regs + g->intr_status_reg); | ||
| 615 | |||
| 616 | val = readl(pctrl->regs + g->intr_cfg_reg); | 612 | val = readl(pctrl->regs + g->intr_cfg_reg); |
| 617 | val |= BIT(g->intr_enable_bit); | 613 | val |= BIT(g->intr_enable_bit); |
| 618 | writel(val, pctrl->regs + g->intr_cfg_reg); | 614 | writel(val, pctrl->regs + g->intr_cfg_reg); |
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index f9ddba7decc1..d7aa22cff480 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c | |||
| @@ -988,9 +988,16 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, | |||
| 988 | 988 | ||
| 989 | for (i = 0; i < ctrl->nr_ext_resources + 1; i++) { | 989 | for (i = 0; i < ctrl->nr_ext_resources + 1; i++) { |
| 990 | res = platform_get_resource(pdev, IORESOURCE_MEM, i); | 990 | res = platform_get_resource(pdev, IORESOURCE_MEM, i); |
| 991 | virt_base[i] = devm_ioremap_resource(&pdev->dev, res); | 991 | if (!res) { |
| 992 | if (IS_ERR(virt_base[i])) | 992 | dev_err(&pdev->dev, "failed to get mem%d resource\n", i); |
| 993 | return ERR_CAST(virt_base[i]); | 993 | return ERR_PTR(-EINVAL); |
| 994 | } | ||
| 995 | virt_base[i] = devm_ioremap(&pdev->dev, res->start, | ||
| 996 | resource_size(res)); | ||
| 997 | if (!virt_base[i]) { | ||
| 998 | dev_err(&pdev->dev, "failed to ioremap %pR\n", res); | ||
| 999 | return ERR_PTR(-EIO); | ||
| 1000 | } | ||
| 994 | } | 1001 | } |
| 995 | 1002 | ||
| 996 | bank = d->pin_banks; | 1003 | bank = d->pin_banks; |
diff --git a/drivers/pinctrl/ti/Kconfig b/drivers/pinctrl/ti/Kconfig index 815a88673d38..542077069391 100644 --- a/drivers/pinctrl/ti/Kconfig +++ b/drivers/pinctrl/ti/Kconfig | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | config PINCTRL_TI_IODELAY | 1 | config PINCTRL_TI_IODELAY |
| 2 | tristate "TI IODelay Module pinconf driver" | 2 | tristate "TI IODelay Module pinconf driver" |
| 3 | depends on OF | 3 | depends on OF && (SOC_DRA7XX || COMPILE_TEST) |
| 4 | select GENERIC_PINCTRL_GROUPS | 4 | select GENERIC_PINCTRL_GROUPS |
| 5 | select GENERIC_PINMUX_FUNCTIONS | 5 | select GENERIC_PINMUX_FUNCTIONS |
| 6 | select GENERIC_PINCONF | 6 | select GENERIC_PINCONF |
diff --git a/drivers/ptp/ptp_kvm.c b/drivers/ptp/ptp_kvm.c index 09b4df74291e..bb865695d7a6 100644 --- a/drivers/ptp/ptp_kvm.c +++ b/drivers/ptp/ptp_kvm.c | |||
| @@ -193,10 +193,7 @@ static int __init ptp_kvm_init(void) | |||
| 193 | 193 | ||
| 194 | kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); | 194 | kvm_ptp_clock.ptp_clock = ptp_clock_register(&kvm_ptp_clock.caps, NULL); |
| 195 | 195 | ||
| 196 | if (IS_ERR(kvm_ptp_clock.ptp_clock)) | 196 | return PTR_ERR_OR_ZERO(kvm_ptp_clock.ptp_clock); |
| 197 | return PTR_ERR(kvm_ptp_clock.ptp_clock); | ||
| 198 | |||
| 199 | return 0; | ||
| 200 | } | 197 | } |
| 201 | 198 | ||
| 202 | module_init(ptp_kvm_init); | 199 | module_init(ptp_kvm_init); |
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index 65f86bc24c07..1dc43fc5f65f 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig | |||
| @@ -76,7 +76,7 @@ config QCOM_ADSP_PIL | |||
| 76 | depends on OF && ARCH_QCOM | 76 | depends on OF && ARCH_QCOM |
| 77 | depends on REMOTEPROC | 77 | depends on REMOTEPROC |
| 78 | depends on QCOM_SMEM | 78 | depends on QCOM_SMEM |
| 79 | depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) | 79 | depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) |
| 80 | select MFD_SYSCON | 80 | select MFD_SYSCON |
| 81 | select QCOM_MDT_LOADER | 81 | select QCOM_MDT_LOADER |
| 82 | select QCOM_RPROC_COMMON | 82 | select QCOM_RPROC_COMMON |
| @@ -93,7 +93,7 @@ config QCOM_Q6V5_PIL | |||
| 93 | depends on OF && ARCH_QCOM | 93 | depends on OF && ARCH_QCOM |
| 94 | depends on QCOM_SMEM | 94 | depends on QCOM_SMEM |
| 95 | depends on REMOTEPROC | 95 | depends on REMOTEPROC |
| 96 | depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) | 96 | depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) |
| 97 | select MFD_SYSCON | 97 | select MFD_SYSCON |
| 98 | select QCOM_RPROC_COMMON | 98 | select QCOM_RPROC_COMMON |
| 99 | select QCOM_SCM | 99 | select QCOM_SCM |
| @@ -104,7 +104,7 @@ config QCOM_Q6V5_PIL | |||
| 104 | config QCOM_WCNSS_PIL | 104 | config QCOM_WCNSS_PIL |
| 105 | tristate "Qualcomm WCNSS Peripheral Image Loader" | 105 | tristate "Qualcomm WCNSS Peripheral Image Loader" |
| 106 | depends on OF && ARCH_QCOM | 106 | depends on OF && ARCH_QCOM |
| 107 | depends on QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n) | 107 | depends on RPMSG_QCOM_SMD || QCOM_SMD || (COMPILE_TEST && QCOM_SMD=n && RPMSG_QCOM_SMD=n) |
| 108 | depends on QCOM_SMEM | 108 | depends on QCOM_SMEM |
| 109 | depends on REMOTEPROC | 109 | depends on REMOTEPROC |
| 110 | select QCOM_MDT_LOADER | 110 | select QCOM_MDT_LOADER |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 4bf55b5d78be..3c52867dfe28 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
| @@ -1253,20 +1253,6 @@ config SCSI_LPFC_DEBUG_FS | |||
| 1253 | This makes debugging information from the lpfc driver | 1253 | This makes debugging information from the lpfc driver |
| 1254 | available via the debugfs filesystem. | 1254 | available via the debugfs filesystem. |
| 1255 | 1255 | ||
| 1256 | config LPFC_NVME_INITIATOR | ||
| 1257 | bool "Emulex LightPulse Fibre Channel NVME Initiator Support" | ||
| 1258 | depends on SCSI_LPFC && NVME_FC | ||
| 1259 | ---help--- | ||
| 1260 | This enables NVME Initiator support in the Emulex lpfc driver. | ||
| 1261 | |||
| 1262 | config LPFC_NVME_TARGET | ||
| 1263 | bool "Emulex LightPulse Fibre Channel NVME Initiator Support" | ||
| 1264 | depends on SCSI_LPFC && NVME_TARGET_FC | ||
| 1265 | ---help--- | ||
| 1266 | This enables NVME Target support in the Emulex lpfc driver. | ||
| 1267 | Target enablement must still be enabled on a per adapter | ||
| 1268 | basis by module parameters. | ||
| 1269 | |||
| 1270 | config SCSI_SIM710 | 1256 | config SCSI_SIM710 |
| 1271 | tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" | 1257 | tristate "Simple 53c710 SCSI support (Compaq, NCR machines)" |
| 1272 | depends on (EISA || MCA) && SCSI | 1258 | depends on (EISA || MCA) && SCSI |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 524a0c755ed7..0d0be7754a65 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
| @@ -2956,7 +2956,7 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, | |||
| 2956 | /* fill_cmd can't fail here, no data buffer to map. */ | 2956 | /* fill_cmd can't fail here, no data buffer to map. */ |
| 2957 | (void) fill_cmd(c, reset_type, h, NULL, 0, 0, | 2957 | (void) fill_cmd(c, reset_type, h, NULL, 0, 0, |
| 2958 | scsi3addr, TYPE_MSG); | 2958 | scsi3addr, TYPE_MSG); |
| 2959 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, DEFAULT_TIMEOUT); | 2959 | rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT); |
| 2960 | if (rc) { | 2960 | if (rc) { |
| 2961 | dev_warn(&h->pdev->dev, "Failed to send reset command\n"); | 2961 | dev_warn(&h->pdev->dev, "Failed to send reset command\n"); |
| 2962 | goto out; | 2962 | goto out; |
| @@ -3714,7 +3714,7 @@ exit_failed: | |||
| 3714 | * # (integer code indicating one of several NOT READY states | 3714 | * # (integer code indicating one of several NOT READY states |
| 3715 | * describing why a volume is to be kept offline) | 3715 | * describing why a volume is to be kept offline) |
| 3716 | */ | 3716 | */ |
| 3717 | static int hpsa_volume_offline(struct ctlr_info *h, | 3717 | static unsigned char hpsa_volume_offline(struct ctlr_info *h, |
| 3718 | unsigned char scsi3addr[]) | 3718 | unsigned char scsi3addr[]) |
| 3719 | { | 3719 | { |
| 3720 | struct CommandList *c; | 3720 | struct CommandList *c; |
| @@ -3735,7 +3735,7 @@ static int hpsa_volume_offline(struct ctlr_info *h, | |||
| 3735 | DEFAULT_TIMEOUT); | 3735 | DEFAULT_TIMEOUT); |
| 3736 | if (rc) { | 3736 | if (rc) { |
| 3737 | cmd_free(h, c); | 3737 | cmd_free(h, c); |
| 3738 | return 0; | 3738 | return HPSA_VPD_LV_STATUS_UNSUPPORTED; |
| 3739 | } | 3739 | } |
| 3740 | sense = c->err_info->SenseInfo; | 3740 | sense = c->err_info->SenseInfo; |
| 3741 | if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) | 3741 | if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo)) |
| @@ -3746,19 +3746,13 @@ static int hpsa_volume_offline(struct ctlr_info *h, | |||
| 3746 | cmd_status = c->err_info->CommandStatus; | 3746 | cmd_status = c->err_info->CommandStatus; |
| 3747 | scsi_status = c->err_info->ScsiStatus; | 3747 | scsi_status = c->err_info->ScsiStatus; |
| 3748 | cmd_free(h, c); | 3748 | cmd_free(h, c); |
| 3749 | /* Is the volume 'not ready'? */ | ||
| 3750 | if (cmd_status != CMD_TARGET_STATUS || | ||
| 3751 | scsi_status != SAM_STAT_CHECK_CONDITION || | ||
| 3752 | sense_key != NOT_READY || | ||
| 3753 | asc != ASC_LUN_NOT_READY) { | ||
| 3754 | return 0; | ||
| 3755 | } | ||
| 3756 | 3749 | ||
| 3757 | /* Determine the reason for not ready state */ | 3750 | /* Determine the reason for not ready state */ |
| 3758 | ldstat = hpsa_get_volume_status(h, scsi3addr); | 3751 | ldstat = hpsa_get_volume_status(h, scsi3addr); |
| 3759 | 3752 | ||
| 3760 | /* Keep volume offline in certain cases: */ | 3753 | /* Keep volume offline in certain cases: */ |
| 3761 | switch (ldstat) { | 3754 | switch (ldstat) { |
| 3755 | case HPSA_LV_FAILED: | ||
| 3762 | case HPSA_LV_UNDERGOING_ERASE: | 3756 | case HPSA_LV_UNDERGOING_ERASE: |
| 3763 | case HPSA_LV_NOT_AVAILABLE: | 3757 | case HPSA_LV_NOT_AVAILABLE: |
| 3764 | case HPSA_LV_UNDERGOING_RPI: | 3758 | case HPSA_LV_UNDERGOING_RPI: |
| @@ -3780,7 +3774,7 @@ static int hpsa_volume_offline(struct ctlr_info *h, | |||
| 3780 | default: | 3774 | default: |
| 3781 | break; | 3775 | break; |
| 3782 | } | 3776 | } |
| 3783 | return 0; | 3777 | return HPSA_LV_OK; |
| 3784 | } | 3778 | } |
| 3785 | 3779 | ||
| 3786 | /* | 3780 | /* |
| @@ -3853,10 +3847,10 @@ static int hpsa_update_device_info(struct ctlr_info *h, | |||
| 3853 | /* Do an inquiry to the device to see what it is. */ | 3847 | /* Do an inquiry to the device to see what it is. */ |
| 3854 | if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, | 3848 | if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, |
| 3855 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { | 3849 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { |
| 3856 | /* Inquiry failed (msg printed already) */ | ||
| 3857 | dev_err(&h->pdev->dev, | 3850 | dev_err(&h->pdev->dev, |
| 3858 | "hpsa_update_device_info: inquiry failed\n"); | 3851 | "%s: inquiry failed, device will be skipped.\n", |
| 3859 | rc = -EIO; | 3852 | __func__); |
| 3853 | rc = HPSA_INQUIRY_FAILED; | ||
| 3860 | goto bail_out; | 3854 | goto bail_out; |
| 3861 | } | 3855 | } |
| 3862 | 3856 | ||
| @@ -3885,15 +3879,19 @@ static int hpsa_update_device_info(struct ctlr_info *h, | |||
| 3885 | if ((this_device->devtype == TYPE_DISK || | 3879 | if ((this_device->devtype == TYPE_DISK || |
| 3886 | this_device->devtype == TYPE_ZBC) && | 3880 | this_device->devtype == TYPE_ZBC) && |
| 3887 | is_logical_dev_addr_mode(scsi3addr)) { | 3881 | is_logical_dev_addr_mode(scsi3addr)) { |
| 3888 | int volume_offline; | 3882 | unsigned char volume_offline; |
| 3889 | 3883 | ||
| 3890 | hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); | 3884 | hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); |
| 3891 | if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) | 3885 | if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) |
| 3892 | hpsa_get_ioaccel_status(h, scsi3addr, this_device); | 3886 | hpsa_get_ioaccel_status(h, scsi3addr, this_device); |
| 3893 | volume_offline = hpsa_volume_offline(h, scsi3addr); | 3887 | volume_offline = hpsa_volume_offline(h, scsi3addr); |
| 3894 | if (volume_offline < 0 || volume_offline > 0xff) | 3888 | if (volume_offline == HPSA_LV_FAILED) { |
| 3895 | volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; | 3889 | rc = HPSA_LV_FAILED; |
| 3896 | this_device->volume_offline = volume_offline & 0xff; | 3890 | dev_err(&h->pdev->dev, |
| 3891 | "%s: LV failed, device will be skipped.\n", | ||
| 3892 | __func__); | ||
| 3893 | goto bail_out; | ||
| 3894 | } | ||
| 3897 | } else { | 3895 | } else { |
| 3898 | this_device->raid_level = RAID_UNKNOWN; | 3896 | this_device->raid_level = RAID_UNKNOWN; |
| 3899 | this_device->offload_config = 0; | 3897 | this_device->offload_config = 0; |
| @@ -4379,8 +4377,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h) | |||
| 4379 | goto out; | 4377 | goto out; |
| 4380 | } | 4378 | } |
| 4381 | if (rc) { | 4379 | if (rc) { |
| 4382 | dev_warn(&h->pdev->dev, | 4380 | h->drv_req_rescan = 1; |
| 4383 | "Inquiry failed, skipping device.\n"); | ||
| 4384 | continue; | 4381 | continue; |
| 4385 | } | 4382 | } |
| 4386 | 4383 | ||
| @@ -5558,7 +5555,7 @@ static void hpsa_scan_complete(struct ctlr_info *h) | |||
| 5558 | 5555 | ||
| 5559 | spin_lock_irqsave(&h->scan_lock, flags); | 5556 | spin_lock_irqsave(&h->scan_lock, flags); |
| 5560 | h->scan_finished = 1; | 5557 | h->scan_finished = 1; |
| 5561 | wake_up_all(&h->scan_wait_queue); | 5558 | wake_up(&h->scan_wait_queue); |
| 5562 | spin_unlock_irqrestore(&h->scan_lock, flags); | 5559 | spin_unlock_irqrestore(&h->scan_lock, flags); |
| 5563 | } | 5560 | } |
| 5564 | 5561 | ||
| @@ -5576,11 +5573,23 @@ static void hpsa_scan_start(struct Scsi_Host *sh) | |||
| 5576 | if (unlikely(lockup_detected(h))) | 5573 | if (unlikely(lockup_detected(h))) |
| 5577 | return hpsa_scan_complete(h); | 5574 | return hpsa_scan_complete(h); |
| 5578 | 5575 | ||
| 5576 | /* | ||
| 5577 | * If a scan is already waiting to run, no need to add another | ||
| 5578 | */ | ||
| 5579 | spin_lock_irqsave(&h->scan_lock, flags); | ||
| 5580 | if (h->scan_waiting) { | ||
| 5581 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
| 5582 | return; | ||
| 5583 | } | ||
| 5584 | |||
| 5585 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
| 5586 | |||
| 5579 | /* wait until any scan already in progress is finished. */ | 5587 | /* wait until any scan already in progress is finished. */ |
| 5580 | while (1) { | 5588 | while (1) { |
| 5581 | spin_lock_irqsave(&h->scan_lock, flags); | 5589 | spin_lock_irqsave(&h->scan_lock, flags); |
| 5582 | if (h->scan_finished) | 5590 | if (h->scan_finished) |
| 5583 | break; | 5591 | break; |
| 5592 | h->scan_waiting = 1; | ||
| 5584 | spin_unlock_irqrestore(&h->scan_lock, flags); | 5593 | spin_unlock_irqrestore(&h->scan_lock, flags); |
| 5585 | wait_event(h->scan_wait_queue, h->scan_finished); | 5594 | wait_event(h->scan_wait_queue, h->scan_finished); |
| 5586 | /* Note: We don't need to worry about a race between this | 5595 | /* Note: We don't need to worry about a race between this |
| @@ -5590,6 +5599,7 @@ static void hpsa_scan_start(struct Scsi_Host *sh) | |||
| 5590 | */ | 5599 | */ |
| 5591 | } | 5600 | } |
| 5592 | h->scan_finished = 0; /* mark scan as in progress */ | 5601 | h->scan_finished = 0; /* mark scan as in progress */ |
| 5602 | h->scan_waiting = 0; | ||
| 5593 | spin_unlock_irqrestore(&h->scan_lock, flags); | 5603 | spin_unlock_irqrestore(&h->scan_lock, flags); |
| 5594 | 5604 | ||
| 5595 | if (unlikely(lockup_detected(h))) | 5605 | if (unlikely(lockup_detected(h))) |
| @@ -8792,6 +8802,7 @@ reinit_after_soft_reset: | |||
| 8792 | init_waitqueue_head(&h->event_sync_wait_queue); | 8802 | init_waitqueue_head(&h->event_sync_wait_queue); |
| 8793 | mutex_init(&h->reset_mutex); | 8803 | mutex_init(&h->reset_mutex); |
| 8794 | h->scan_finished = 1; /* no scan currently in progress */ | 8804 | h->scan_finished = 1; /* no scan currently in progress */ |
| 8805 | h->scan_waiting = 0; | ||
| 8795 | 8806 | ||
| 8796 | pci_set_drvdata(pdev, h); | 8807 | pci_set_drvdata(pdev, h); |
| 8797 | h->ndevices = 0; | 8808 | h->ndevices = 0; |
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h index bf6cdc106654..6f04f2ad4125 100644 --- a/drivers/scsi/hpsa.h +++ b/drivers/scsi/hpsa.h | |||
| @@ -201,6 +201,7 @@ struct ctlr_info { | |||
| 201 | dma_addr_t errinfo_pool_dhandle; | 201 | dma_addr_t errinfo_pool_dhandle; |
| 202 | unsigned long *cmd_pool_bits; | 202 | unsigned long *cmd_pool_bits; |
| 203 | int scan_finished; | 203 | int scan_finished; |
| 204 | u8 scan_waiting : 1; | ||
| 204 | spinlock_t scan_lock; | 205 | spinlock_t scan_lock; |
| 205 | wait_queue_head_t scan_wait_queue; | 206 | wait_queue_head_t scan_wait_queue; |
| 206 | 207 | ||
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h index a584cdf07058..5961705eef76 100644 --- a/drivers/scsi/hpsa_cmd.h +++ b/drivers/scsi/hpsa_cmd.h | |||
| @@ -156,6 +156,7 @@ | |||
| 156 | #define CFGTBL_BusType_Fibre2G 0x00000200l | 156 | #define CFGTBL_BusType_Fibre2G 0x00000200l |
| 157 | 157 | ||
| 158 | /* VPD Inquiry types */ | 158 | /* VPD Inquiry types */ |
| 159 | #define HPSA_INQUIRY_FAILED 0x02 | ||
| 159 | #define HPSA_VPD_SUPPORTED_PAGES 0x00 | 160 | #define HPSA_VPD_SUPPORTED_PAGES 0x00 |
| 160 | #define HPSA_VPD_LV_DEVICE_ID 0x83 | 161 | #define HPSA_VPD_LV_DEVICE_ID 0x83 |
| 161 | #define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 | 162 | #define HPSA_VPD_LV_DEVICE_GEOMETRY 0xC1 |
| @@ -166,6 +167,7 @@ | |||
| 166 | /* Logical volume states */ | 167 | /* Logical volume states */ |
| 167 | #define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff | 168 | #define HPSA_VPD_LV_STATUS_UNSUPPORTED 0xff |
| 168 | #define HPSA_LV_OK 0x0 | 169 | #define HPSA_LV_OK 0x0 |
| 170 | #define HPSA_LV_FAILED 0x01 | ||
| 169 | #define HPSA_LV_NOT_AVAILABLE 0x0b | 171 | #define HPSA_LV_NOT_AVAILABLE 0x0b |
| 170 | #define HPSA_LV_UNDERGOING_ERASE 0x0F | 172 | #define HPSA_LV_UNDERGOING_ERASE 0x0F |
| 171 | #define HPSA_LV_UNDERGOING_RPI 0x12 | 173 | #define HPSA_LV_UNDERGOING_RPI 0x12 |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 5c3be3e6f5e2..22819afbaef5 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
| @@ -3315,9 +3315,9 @@ LPFC_ATTR_R(nvmet_mrq_post, LPFC_DEF_MRQ_POST, | |||
| 3315 | * lpfc_enable_fc4_type: Defines what FC4 types are supported. | 3315 | * lpfc_enable_fc4_type: Defines what FC4 types are supported. |
| 3316 | * Supported Values: 1 - register just FCP | 3316 | * Supported Values: 1 - register just FCP |
| 3317 | * 3 - register both FCP and NVME | 3317 | * 3 - register both FCP and NVME |
| 3318 | * Supported values are [1,3]. Default value is 3 | 3318 | * Supported values are [1,3]. Default value is 1 |
| 3319 | */ | 3319 | */ |
| 3320 | LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH, | 3320 | LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP, |
| 3321 | LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, | 3321 | LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH, |
| 3322 | "Define fc4 type to register with fabric."); | 3322 | "Define fc4 type to register with fabric."); |
| 3323 | 3323 | ||
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 2697d49da4d7..6cc561b04211 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
| @@ -5891,10 +5891,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
| 5891 | /* Check to see if it matches any module parameter */ | 5891 | /* Check to see if it matches any module parameter */ |
| 5892 | for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { | 5892 | for (i = 0; i < lpfc_enable_nvmet_cnt; i++) { |
| 5893 | if (wwn == lpfc_enable_nvmet[i]) { | 5893 | if (wwn == lpfc_enable_nvmet[i]) { |
| 5894 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) | ||
| 5894 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 5895 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 5895 | "6017 NVME Target %016llx\n", | 5896 | "6017 NVME Target %016llx\n", |
| 5896 | wwn); | 5897 | wwn); |
| 5897 | phba->nvmet_support = 1; /* a match */ | 5898 | phba->nvmet_support = 1; /* a match */ |
| 5899 | #else | ||
| 5900 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | ||
| 5901 | "6021 Can't enable NVME Target." | ||
| 5902 | " NVME_TARGET_FC infrastructure" | ||
| 5903 | " is not in kernel\n"); | ||
| 5904 | #endif | ||
| 5898 | } | 5905 | } |
| 5899 | } | 5906 | } |
| 5900 | } | 5907 | } |
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 0a4c19081409..0024de1c6c1f 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c | |||
| @@ -2149,7 +2149,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) | |||
| 2149 | /* localport is allocated from the stack, but the registration | 2149 | /* localport is allocated from the stack, but the registration |
| 2150 | * call allocates heap memory as well as the private area. | 2150 | * call allocates heap memory as well as the private area. |
| 2151 | */ | 2151 | */ |
| 2152 | #ifdef CONFIG_LPFC_NVME_INITIATOR | 2152 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
| 2153 | ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, | 2153 | ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template, |
| 2154 | &vport->phba->pcidev->dev, &localport); | 2154 | &vport->phba->pcidev->dev, &localport); |
| 2155 | #else | 2155 | #else |
| @@ -2190,7 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) | |||
| 2190 | void | 2190 | void |
| 2191 | lpfc_nvme_destroy_localport(struct lpfc_vport *vport) | 2191 | lpfc_nvme_destroy_localport(struct lpfc_vport *vport) |
| 2192 | { | 2192 | { |
| 2193 | #ifdef CONFIG_LPFC_NVME_INITIATOR | 2193 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
| 2194 | struct nvme_fc_local_port *localport; | 2194 | struct nvme_fc_local_port *localport; |
| 2195 | struct lpfc_nvme_lport *lport; | 2195 | struct lpfc_nvme_lport *lport; |
| 2196 | struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; | 2196 | struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL; |
| @@ -2274,7 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport) | |||
| 2274 | int | 2274 | int |
| 2275 | lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | 2275 | lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
| 2276 | { | 2276 | { |
| 2277 | #ifdef CONFIG_LPFC_NVME_INITIATOR | 2277 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
| 2278 | int ret = 0; | 2278 | int ret = 0; |
| 2279 | struct nvme_fc_local_port *localport; | 2279 | struct nvme_fc_local_port *localport; |
| 2280 | struct lpfc_nvme_lport *lport; | 2280 | struct lpfc_nvme_lport *lport; |
| @@ -2403,7 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
| 2403 | void | 2403 | void |
| 2404 | lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | 2404 | lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) |
| 2405 | { | 2405 | { |
| 2406 | #ifdef CONFIG_LPFC_NVME_INITIATOR | 2406 | #if (IS_ENABLED(CONFIG_NVME_FC)) |
| 2407 | int ret; | 2407 | int ret; |
| 2408 | struct nvme_fc_local_port *localport; | 2408 | struct nvme_fc_local_port *localport; |
| 2409 | struct lpfc_nvme_lport *lport; | 2409 | struct lpfc_nvme_lport *lport; |
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index b7739a554fe0..7ca868f394da 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c | |||
| @@ -671,7 +671,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) | |||
| 671 | lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | | 671 | lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP | |
| 672 | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; | 672 | NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED; |
| 673 | 673 | ||
| 674 | #ifdef CONFIG_LPFC_NVME_TARGET | 674 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
| 675 | error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, | 675 | error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate, |
| 676 | &phba->pcidev->dev, | 676 | &phba->pcidev->dev, |
| 677 | &phba->targetport); | 677 | &phba->targetport); |
| @@ -756,7 +756,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, | |||
| 756 | void | 756 | void |
| 757 | lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) | 757 | lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) |
| 758 | { | 758 | { |
| 759 | #ifdef CONFIG_LPFC_NVME_TARGET | 759 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
| 760 | struct lpfc_nvmet_tgtport *tgtp; | 760 | struct lpfc_nvmet_tgtport *tgtp; |
| 761 | 761 | ||
| 762 | if (phba->nvmet_support == 0) | 762 | if (phba->nvmet_support == 0) |
| @@ -788,7 +788,7 @@ static void | |||
| 788 | lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, | 788 | lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, |
| 789 | struct hbq_dmabuf *nvmebuf) | 789 | struct hbq_dmabuf *nvmebuf) |
| 790 | { | 790 | { |
| 791 | #ifdef CONFIG_LPFC_NVME_TARGET | 791 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
| 792 | struct lpfc_nvmet_tgtport *tgtp; | 792 | struct lpfc_nvmet_tgtport *tgtp; |
| 793 | struct fc_frame_header *fc_hdr; | 793 | struct fc_frame_header *fc_hdr; |
| 794 | struct lpfc_nvmet_rcv_ctx *ctxp; | 794 | struct lpfc_nvmet_rcv_ctx *ctxp; |
| @@ -891,7 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, | |||
| 891 | struct rqb_dmabuf *nvmebuf, | 891 | struct rqb_dmabuf *nvmebuf, |
| 892 | uint64_t isr_timestamp) | 892 | uint64_t isr_timestamp) |
| 893 | { | 893 | { |
| 894 | #ifdef CONFIG_LPFC_NVME_TARGET | 894 | #if (IS_ENABLED(CONFIG_NVME_TARGET_FC)) |
| 895 | struct lpfc_nvmet_rcv_ctx *ctxp; | 895 | struct lpfc_nvmet_rcv_ctx *ctxp; |
| 896 | struct lpfc_nvmet_tgtport *tgtp; | 896 | struct lpfc_nvmet_tgtport *tgtp; |
| 897 | struct fc_frame_header *fc_hdr; | 897 | struct fc_frame_header *fc_hdr; |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index e7e5974e1a2c..2b209bbb4c91 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
| @@ -35,8 +35,8 @@ | |||
| 35 | /* | 35 | /* |
| 36 | * MegaRAID SAS Driver meta data | 36 | * MegaRAID SAS Driver meta data |
| 37 | */ | 37 | */ |
| 38 | #define MEGASAS_VERSION "07.701.16.00-rc1" | 38 | #define MEGASAS_VERSION "07.701.17.00-rc1" |
| 39 | #define MEGASAS_RELDATE "February 2, 2017" | 39 | #define MEGASAS_RELDATE "March 2, 2017" |
| 40 | 40 | ||
| 41 | /* | 41 | /* |
| 42 | * Device IDs | 42 | * Device IDs |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 7ac9a9ee9bd4..0016f12cc563 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
| @@ -1963,6 +1963,9 @@ scan_target: | |||
| 1963 | if (!mr_device_priv_data) | 1963 | if (!mr_device_priv_data) |
| 1964 | return -ENOMEM; | 1964 | return -ENOMEM; |
| 1965 | sdev->hostdata = mr_device_priv_data; | 1965 | sdev->hostdata = mr_device_priv_data; |
| 1966 | |||
| 1967 | atomic_set(&mr_device_priv_data->r1_ldio_hint, | ||
| 1968 | instance->r1_ldio_hint_default); | ||
| 1966 | return 0; | 1969 | return 0; |
| 1967 | } | 1970 | } |
| 1968 | 1971 | ||
| @@ -5034,10 +5037,12 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) | |||
| 5034 | &instance->irq_context[j]); | 5037 | &instance->irq_context[j]); |
| 5035 | /* Retry irq register for IO_APIC*/ | 5038 | /* Retry irq register for IO_APIC*/ |
| 5036 | instance->msix_vectors = 0; | 5039 | instance->msix_vectors = 0; |
| 5037 | if (is_probe) | 5040 | if (is_probe) { |
| 5041 | pci_free_irq_vectors(instance->pdev); | ||
| 5038 | return megasas_setup_irqs_ioapic(instance); | 5042 | return megasas_setup_irqs_ioapic(instance); |
| 5039 | else | 5043 | } else { |
| 5040 | return -1; | 5044 | return -1; |
| 5045 | } | ||
| 5041 | } | 5046 | } |
| 5042 | } | 5047 | } |
| 5043 | return 0; | 5048 | return 0; |
| @@ -5277,9 +5282,11 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
| 5277 | MPI2_REPLY_POST_HOST_INDEX_OFFSET); | 5282 | MPI2_REPLY_POST_HOST_INDEX_OFFSET); |
| 5278 | } | 5283 | } |
| 5279 | 5284 | ||
| 5280 | i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); | 5285 | if (!instance->msix_vectors) { |
| 5281 | if (i < 0) | 5286 | i = pci_alloc_irq_vectors(instance->pdev, 1, 1, PCI_IRQ_LEGACY); |
| 5282 | goto fail_setup_irqs; | 5287 | if (i < 0) |
| 5288 | goto fail_setup_irqs; | ||
| 5289 | } | ||
| 5283 | 5290 | ||
| 5284 | dev_info(&instance->pdev->dev, | 5291 | dev_info(&instance->pdev->dev, |
| 5285 | "firmware supports msix\t: (%d)", fw_msix_count); | 5292 | "firmware supports msix\t: (%d)", fw_msix_count); |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 29650ba669da..f990ab4d45e1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
| @@ -2159,7 +2159,7 @@ megasas_set_raidflag_cpu_affinity(union RAID_CONTEXT_UNION *praid_context, | |||
| 2159 | cpu_sel = MR_RAID_CTX_CPUSEL_1; | 2159 | cpu_sel = MR_RAID_CTX_CPUSEL_1; |
| 2160 | 2160 | ||
| 2161 | if (is_stream_detected(rctx_g35) && | 2161 | if (is_stream_detected(rctx_g35) && |
| 2162 | (raid->level == 5) && | 2162 | ((raid->level == 5) || (raid->level == 6)) && |
| 2163 | (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && | 2163 | (raid->writeMode == MR_RL_WRITE_THROUGH_MODE) && |
| 2164 | (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) | 2164 | (cpu_sel == MR_RAID_CTX_CPUSEL_FCFS)) |
| 2165 | cpu_sel = MR_RAID_CTX_CPUSEL_0; | 2165 | cpu_sel = MR_RAID_CTX_CPUSEL_0; |
| @@ -2338,7 +2338,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
| 2338 | fp_possible = false; | 2338 | fp_possible = false; |
| 2339 | atomic_dec(&instance->fw_outstanding); | 2339 | atomic_dec(&instance->fw_outstanding); |
| 2340 | } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || | 2340 | } else if ((scsi_buff_len > MR_LARGE_IO_MIN_SIZE) || |
| 2341 | atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint)) { | 2341 | (atomic_dec_if_positive(&mrdev_priv->r1_ldio_hint) > 0)) { |
| 2342 | fp_possible = false; | 2342 | fp_possible = false; |
| 2343 | atomic_dec(&instance->fw_outstanding); | 2343 | atomic_dec(&instance->fw_outstanding); |
| 2344 | if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) | 2344 | if (scsi_buff_len > MR_LARGE_IO_MIN_SIZE) |
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig index 67c0d5aa3212..de952935b5d2 100644 --- a/drivers/scsi/qla2xxx/Kconfig +++ b/drivers/scsi/qla2xxx/Kconfig | |||
| @@ -3,6 +3,7 @@ config SCSI_QLA_FC | |||
| 3 | depends on PCI && SCSI | 3 | depends on PCI && SCSI |
| 4 | depends on SCSI_FC_ATTRS | 4 | depends on SCSI_FC_ATTRS |
| 5 | select FW_LOADER | 5 | select FW_LOADER |
| 6 | select BTREE | ||
| 6 | ---help--- | 7 | ---help--- |
| 7 | This qla2xxx driver supports all QLogic Fibre Channel | 8 | This qla2xxx driver supports all QLogic Fibre Channel |
| 8 | PCI and PCIe host adapters. | 9 | PCI and PCIe host adapters. |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index f610103994af..435ff7fd6384 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
| @@ -2154,8 +2154,6 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
| 2154 | "Timer for the VP[%d] has stopped\n", vha->vp_idx); | 2154 | "Timer for the VP[%d] has stopped\n", vha->vp_idx); |
| 2155 | } | 2155 | } |
| 2156 | 2156 | ||
| 2157 | BUG_ON(atomic_read(&vha->vref_count)); | ||
| 2158 | |||
| 2159 | qla2x00_free_fcports(vha); | 2157 | qla2x00_free_fcports(vha); |
| 2160 | 2158 | ||
| 2161 | mutex_lock(&ha->vport_lock); | 2159 | mutex_lock(&ha->vport_lock); |
| @@ -2166,7 +2164,7 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) | |||
| 2166 | dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, | 2164 | dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, |
| 2167 | vha->gnl.ldma); | 2165 | vha->gnl.ldma); |
| 2168 | 2166 | ||
| 2169 | if (vha->qpair->vp_idx == vha->vp_idx) { | 2167 | if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { |
| 2170 | if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) | 2168 | if (qla2xxx_delete_qpair(vha, vha->qpair) != QLA_SUCCESS) |
| 2171 | ql_log(ql_log_warn, vha, 0x7087, | 2169 | ql_log(ql_log_warn, vha, 0x7087, |
| 2172 | "Queue Pair delete failed.\n"); | 2170 | "Queue Pair delete failed.\n"); |
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index e1fc4e66966a..c6bffe929fe7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h | |||
| @@ -348,6 +348,7 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...); | |||
| 348 | #define ql_dbg_tgt 0x00004000 /* Target mode */ | 348 | #define ql_dbg_tgt 0x00004000 /* Target mode */ |
| 349 | #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ | 349 | #define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */ |
| 350 | #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ | 350 | #define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */ |
| 351 | #define ql_dbg_tgt_dif 0x00000800 /* Target mode dif */ | ||
| 351 | 352 | ||
| 352 | extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, | 353 | extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *, |
| 353 | uint32_t, void **); | 354 | uint32_t, void **); |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 625d438e3cce..ae119018dfaa 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/firmware.h> | 25 | #include <linux/firmware.h> |
| 26 | #include <linux/aer.h> | 26 | #include <linux/aer.h> |
| 27 | #include <linux/mutex.h> | 27 | #include <linux/mutex.h> |
| 28 | #include <linux/btree.h> | ||
| 28 | 29 | ||
| 29 | #include <scsi/scsi.h> | 30 | #include <scsi/scsi.h> |
| 30 | #include <scsi/scsi_host.h> | 31 | #include <scsi/scsi_host.h> |
| @@ -395,11 +396,15 @@ struct srb_iocb { | |||
| 395 | struct completion comp; | 396 | struct completion comp; |
| 396 | } abt; | 397 | } abt; |
| 397 | struct ct_arg ctarg; | 398 | struct ct_arg ctarg; |
| 399 | #define MAX_IOCB_MB_REG 28 | ||
| 400 | #define SIZEOF_IOCB_MB_REG (MAX_IOCB_MB_REG * sizeof(uint16_t)) | ||
| 398 | struct { | 401 | struct { |
| 399 | __le16 in_mb[28]; /* fr fw */ | 402 | __le16 in_mb[MAX_IOCB_MB_REG]; /* from FW */ |
| 400 | __le16 out_mb[28]; /* to fw */ | 403 | __le16 out_mb[MAX_IOCB_MB_REG]; /* to FW */ |
| 401 | void *out, *in; | 404 | void *out, *in; |
| 402 | dma_addr_t out_dma, in_dma; | 405 | dma_addr_t out_dma, in_dma; |
| 406 | struct completion comp; | ||
| 407 | int rc; | ||
| 403 | } mbx; | 408 | } mbx; |
| 404 | struct { | 409 | struct { |
| 405 | struct imm_ntfy_from_isp *ntfy; | 410 | struct imm_ntfy_from_isp *ntfy; |
| @@ -437,7 +442,7 @@ typedef struct srb { | |||
| 437 | uint32_t handle; | 442 | uint32_t handle; |
| 438 | uint16_t flags; | 443 | uint16_t flags; |
| 439 | uint16_t type; | 444 | uint16_t type; |
| 440 | char *name; | 445 | const char *name; |
| 441 | int iocbs; | 446 | int iocbs; |
| 442 | struct qla_qpair *qpair; | 447 | struct qla_qpair *qpair; |
| 443 | u32 gen1; /* scratch */ | 448 | u32 gen1; /* scratch */ |
| @@ -2300,6 +2305,8 @@ typedef struct fc_port { | |||
| 2300 | struct ct_sns_desc ct_desc; | 2305 | struct ct_sns_desc ct_desc; |
| 2301 | enum discovery_state disc_state; | 2306 | enum discovery_state disc_state; |
| 2302 | enum login_state fw_login_state; | 2307 | enum login_state fw_login_state; |
| 2308 | unsigned long plogi_nack_done_deadline; | ||
| 2309 | |||
| 2303 | u32 login_gen, last_login_gen; | 2310 | u32 login_gen, last_login_gen; |
| 2304 | u32 rscn_gen, last_rscn_gen; | 2311 | u32 rscn_gen, last_rscn_gen; |
| 2305 | u32 chip_reset; | 2312 | u32 chip_reset; |
| @@ -3106,6 +3113,16 @@ struct qla_chip_state_84xx { | |||
| 3106 | uint32_t gold_fw_version; | 3113 | uint32_t gold_fw_version; |
| 3107 | }; | 3114 | }; |
| 3108 | 3115 | ||
| 3116 | struct qla_dif_statistics { | ||
| 3117 | uint64_t dif_input_bytes; | ||
| 3118 | uint64_t dif_output_bytes; | ||
| 3119 | uint64_t dif_input_requests; | ||
| 3120 | uint64_t dif_output_requests; | ||
| 3121 | uint32_t dif_guard_err; | ||
| 3122 | uint32_t dif_ref_tag_err; | ||
| 3123 | uint32_t dif_app_tag_err; | ||
| 3124 | }; | ||
| 3125 | |||
| 3109 | struct qla_statistics { | 3126 | struct qla_statistics { |
| 3110 | uint32_t total_isp_aborts; | 3127 | uint32_t total_isp_aborts; |
| 3111 | uint64_t input_bytes; | 3128 | uint64_t input_bytes; |
| @@ -3118,6 +3135,8 @@ struct qla_statistics { | |||
| 3118 | uint32_t stat_max_pend_cmds; | 3135 | uint32_t stat_max_pend_cmds; |
| 3119 | uint32_t stat_max_qfull_cmds_alloc; | 3136 | uint32_t stat_max_qfull_cmds_alloc; |
| 3120 | uint32_t stat_max_qfull_cmds_dropped; | 3137 | uint32_t stat_max_qfull_cmds_dropped; |
| 3138 | |||
| 3139 | struct qla_dif_statistics qla_dif_stats; | ||
| 3121 | }; | 3140 | }; |
| 3122 | 3141 | ||
| 3123 | struct bidi_statistics { | 3142 | struct bidi_statistics { |
| @@ -3125,6 +3144,16 @@ struct bidi_statistics { | |||
| 3125 | unsigned long long transfer_bytes; | 3144 | unsigned long long transfer_bytes; |
| 3126 | }; | 3145 | }; |
| 3127 | 3146 | ||
| 3147 | struct qla_tc_param { | ||
| 3148 | struct scsi_qla_host *vha; | ||
| 3149 | uint32_t blk_sz; | ||
| 3150 | uint32_t bufflen; | ||
| 3151 | struct scatterlist *sg; | ||
| 3152 | struct scatterlist *prot_sg; | ||
| 3153 | struct crc_context *ctx; | ||
| 3154 | uint8_t *ctx_dsd_alloced; | ||
| 3155 | }; | ||
| 3156 | |||
| 3128 | /* Multi queue support */ | 3157 | /* Multi queue support */ |
| 3129 | #define MBC_INITIALIZE_MULTIQ 0x1f | 3158 | #define MBC_INITIALIZE_MULTIQ 0x1f |
| 3130 | #define QLA_QUE_PAGE 0X1000 | 3159 | #define QLA_QUE_PAGE 0X1000 |
| @@ -3272,6 +3301,8 @@ struct qlt_hw_data { | |||
| 3272 | uint8_t tgt_node_name[WWN_SIZE]; | 3301 | uint8_t tgt_node_name[WWN_SIZE]; |
| 3273 | 3302 | ||
| 3274 | struct dentry *dfs_tgt_sess; | 3303 | struct dentry *dfs_tgt_sess; |
| 3304 | struct dentry *dfs_tgt_port_database; | ||
| 3305 | |||
| 3275 | struct list_head q_full_list; | 3306 | struct list_head q_full_list; |
| 3276 | uint32_t num_pend_cmds; | 3307 | uint32_t num_pend_cmds; |
| 3277 | uint32_t num_qfull_cmds_alloc; | 3308 | uint32_t num_qfull_cmds_alloc; |
| @@ -3281,6 +3312,7 @@ struct qlt_hw_data { | |||
| 3281 | spinlock_t sess_lock; | 3312 | spinlock_t sess_lock; |
| 3282 | int rspq_vector_cpuid; | 3313 | int rspq_vector_cpuid; |
| 3283 | spinlock_t atio_lock ____cacheline_aligned; | 3314 | spinlock_t atio_lock ____cacheline_aligned; |
| 3315 | struct btree_head32 host_map; | ||
| 3284 | }; | 3316 | }; |
| 3285 | 3317 | ||
| 3286 | #define MAX_QFULL_CMDS_ALLOC 8192 | 3318 | #define MAX_QFULL_CMDS_ALLOC 8192 |
| @@ -3290,6 +3322,10 @@ struct qlt_hw_data { | |||
| 3290 | 3322 | ||
| 3291 | #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ | 3323 | #define LEAK_EXCHG_THRESH_HOLD_PERCENT 75 /* 75 percent */ |
| 3292 | 3324 | ||
| 3325 | #define QLA_EARLY_LINKUP(_ha) \ | ||
| 3326 | ((_ha->flags.n2n_ae || _ha->flags.lip_ae) && \ | ||
| 3327 | _ha->flags.fw_started && !_ha->flags.fw_init_done) | ||
| 3328 | |||
| 3293 | /* | 3329 | /* |
| 3294 | * Qlogic host adapter specific data structure. | 3330 | * Qlogic host adapter specific data structure. |
| 3295 | */ | 3331 | */ |
| @@ -3339,7 +3375,11 @@ struct qla_hw_data { | |||
| 3339 | uint32_t fawwpn_enabled:1; | 3375 | uint32_t fawwpn_enabled:1; |
| 3340 | uint32_t exlogins_enabled:1; | 3376 | uint32_t exlogins_enabled:1; |
| 3341 | uint32_t exchoffld_enabled:1; | 3377 | uint32_t exchoffld_enabled:1; |
| 3342 | /* 35 bits */ | 3378 | |
| 3379 | uint32_t lip_ae:1; | ||
| 3380 | uint32_t n2n_ae:1; | ||
| 3381 | uint32_t fw_started:1; | ||
| 3382 | uint32_t fw_init_done:1; | ||
| 3343 | } flags; | 3383 | } flags; |
| 3344 | 3384 | ||
| 3345 | /* This spinlock is used to protect "io transactions", you must | 3385 | /* This spinlock is used to protect "io transactions", you must |
| @@ -3432,7 +3472,6 @@ struct qla_hw_data { | |||
| 3432 | #define P2P_LOOP 3 | 3472 | #define P2P_LOOP 3 |
| 3433 | uint8_t interrupts_on; | 3473 | uint8_t interrupts_on; |
| 3434 | uint32_t isp_abort_cnt; | 3474 | uint32_t isp_abort_cnt; |
| 3435 | |||
| 3436 | #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 | 3475 | #define PCI_DEVICE_ID_QLOGIC_ISP2532 0x2532 |
| 3437 | #define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 | 3476 | #define PCI_DEVICE_ID_QLOGIC_ISP8432 0x8432 |
| 3438 | #define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 | 3477 | #define PCI_DEVICE_ID_QLOGIC_ISP8001 0x8001 |
| @@ -3913,6 +3952,7 @@ typedef struct scsi_qla_host { | |||
| 3913 | struct list_head vp_fcports; /* list of fcports */ | 3952 | struct list_head vp_fcports; /* list of fcports */ |
| 3914 | struct list_head work_list; | 3953 | struct list_head work_list; |
| 3915 | spinlock_t work_lock; | 3954 | spinlock_t work_lock; |
| 3955 | struct work_struct iocb_work; | ||
| 3916 | 3956 | ||
| 3917 | /* Commonly used flags and state information. */ | 3957 | /* Commonly used flags and state information. */ |
| 3918 | struct Scsi_Host *host; | 3958 | struct Scsi_Host *host; |
| @@ -4076,6 +4116,7 @@ typedef struct scsi_qla_host { | |||
| 4076 | /* Count of active session/fcport */ | 4116 | /* Count of active session/fcport */ |
| 4077 | int fcport_count; | 4117 | int fcport_count; |
| 4078 | wait_queue_head_t fcport_waitQ; | 4118 | wait_queue_head_t fcport_waitQ; |
| 4119 | wait_queue_head_t vref_waitq; | ||
| 4079 | } scsi_qla_host_t; | 4120 | } scsi_qla_host_t; |
| 4080 | 4121 | ||
| 4081 | struct qla27xx_image_status { | 4122 | struct qla27xx_image_status { |
| @@ -4131,14 +4172,17 @@ struct qla2_sgx { | |||
| 4131 | mb(); \ | 4172 | mb(); \ |
| 4132 | if (__vha->flags.delete_progress) { \ | 4173 | if (__vha->flags.delete_progress) { \ |
| 4133 | atomic_dec(&__vha->vref_count); \ | 4174 | atomic_dec(&__vha->vref_count); \ |
| 4175 | wake_up(&__vha->vref_waitq); \ | ||
| 4134 | __bail = 1; \ | 4176 | __bail = 1; \ |
| 4135 | } else { \ | 4177 | } else { \ |
| 4136 | __bail = 0; \ | 4178 | __bail = 0; \ |
| 4137 | } \ | 4179 | } \ |
| 4138 | } while (0) | 4180 | } while (0) |
| 4139 | 4181 | ||
| 4140 | #define QLA_VHA_MARK_NOT_BUSY(__vha) \ | 4182 | #define QLA_VHA_MARK_NOT_BUSY(__vha) do { \ |
| 4141 | atomic_dec(&__vha->vref_count); \ | 4183 | atomic_dec(&__vha->vref_count); \ |
| 4184 | wake_up(&__vha->vref_waitq); \ | ||
| 4185 | } while (0) \ | ||
| 4142 | 4186 | ||
| 4143 | #define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ | 4187 | #define QLA_QPAIR_MARK_BUSY(__qpair, __bail) do { \ |
| 4144 | atomic_inc(&__qpair->ref_count); \ | 4188 | atomic_inc(&__qpair->ref_count); \ |
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index b48cce696bac..989e17b0758c 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c | |||
| @@ -19,11 +19,11 @@ qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused) | |||
| 19 | struct qla_hw_data *ha = vha->hw; | 19 | struct qla_hw_data *ha = vha->hw; |
| 20 | unsigned long flags; | 20 | unsigned long flags; |
| 21 | struct fc_port *sess = NULL; | 21 | struct fc_port *sess = NULL; |
| 22 | struct qla_tgt *tgt= vha->vha_tgt.qla_tgt; | 22 | struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; |
| 23 | 23 | ||
| 24 | seq_printf(s, "%s\n",vha->host_str); | 24 | seq_printf(s, "%s\n", vha->host_str); |
| 25 | if (tgt) { | 25 | if (tgt) { |
| 26 | seq_printf(s, "Port ID Port Name Handle\n"); | 26 | seq_puts(s, "Port ID Port Name Handle\n"); |
| 27 | 27 | ||
| 28 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); | 28 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); |
| 29 | list_for_each_entry(sess, &vha->vp_fcports, list) | 29 | list_for_each_entry(sess, &vha->vp_fcports, list) |
| @@ -44,7 +44,6 @@ qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file) | |||
| 44 | return single_open(file, qla2x00_dfs_tgt_sess_show, vha); | 44 | return single_open(file, qla2x00_dfs_tgt_sess_show, vha); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | |||
| 48 | static const struct file_operations dfs_tgt_sess_ops = { | 47 | static const struct file_operations dfs_tgt_sess_ops = { |
| 49 | .open = qla2x00_dfs_tgt_sess_open, | 48 | .open = qla2x00_dfs_tgt_sess_open, |
| 50 | .read = seq_read, | 49 | .read = seq_read, |
| @@ -53,6 +52,78 @@ static const struct file_operations dfs_tgt_sess_ops = { | |||
| 53 | }; | 52 | }; |
| 54 | 53 | ||
| 55 | static int | 54 | static int |
| 55 | qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) | ||
| 56 | { | ||
| 57 | scsi_qla_host_t *vha = s->private; | ||
| 58 | struct qla_hw_data *ha = vha->hw; | ||
| 59 | struct gid_list_info *gid_list; | ||
| 60 | dma_addr_t gid_list_dma; | ||
| 61 | fc_port_t fc_port; | ||
| 62 | char *id_iter; | ||
| 63 | int rc, i; | ||
| 64 | uint16_t entries, loop_id; | ||
| 65 | struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; | ||
| 66 | |||
| 67 | seq_printf(s, "%s\n", vha->host_str); | ||
| 68 | if (tgt) { | ||
| 69 | gid_list = dma_alloc_coherent(&ha->pdev->dev, | ||
| 70 | qla2x00_gid_list_size(ha), | ||
| 71 | &gid_list_dma, GFP_KERNEL); | ||
| 72 | if (!gid_list) { | ||
| 73 | ql_dbg(ql_dbg_user, vha, 0x705c, | ||
| 74 | "DMA allocation failed for %u\n", | ||
| 75 | qla2x00_gid_list_size(ha)); | ||
| 76 | return 0; | ||
| 77 | } | ||
| 78 | |||
| 79 | rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, | ||
| 80 | &entries); | ||
| 81 | if (rc != QLA_SUCCESS) | ||
| 82 | goto out_free_id_list; | ||
| 83 | |||
| 84 | id_iter = (char *)gid_list; | ||
| 85 | |||
| 86 | seq_puts(s, "Port Name Port ID Loop ID\n"); | ||
| 87 | |||
| 88 | for (i = 0; i < entries; i++) { | ||
| 89 | struct gid_list_info *gid = | ||
| 90 | (struct gid_list_info *)id_iter; | ||
| 91 | loop_id = le16_to_cpu(gid->loop_id); | ||
| 92 | memset(&fc_port, 0, sizeof(fc_port_t)); | ||
| 93 | |||
| 94 | fc_port.loop_id = loop_id; | ||
| 95 | |||
| 96 | rc = qla24xx_gpdb_wait(vha, &fc_port, 0); | ||
| 97 | seq_printf(s, "%8phC %02x%02x%02x %d\n", | ||
| 98 | fc_port.port_name, fc_port.d_id.b.domain, | ||
| 99 | fc_port.d_id.b.area, fc_port.d_id.b.al_pa, | ||
| 100 | fc_port.loop_id); | ||
| 101 | id_iter += ha->gid_list_info_size; | ||
| 102 | } | ||
| 103 | out_free_id_list: | ||
| 104 | dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), | ||
| 105 | gid_list, gid_list_dma); | ||
| 106 | } | ||
| 107 | |||
| 108 | return 0; | ||
| 109 | } | ||
| 110 | |||
| 111 | static int | ||
| 112 | qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file) | ||
| 113 | { | ||
| 114 | scsi_qla_host_t *vha = inode->i_private; | ||
| 115 | |||
| 116 | return single_open(file, qla2x00_dfs_tgt_port_database_show, vha); | ||
| 117 | } | ||
| 118 | |||
| 119 | static const struct file_operations dfs_tgt_port_database_ops = { | ||
| 120 | .open = qla2x00_dfs_tgt_port_database_open, | ||
| 121 | .read = seq_read, | ||
| 122 | .llseek = seq_lseek, | ||
| 123 | .release = single_release, | ||
| 124 | }; | ||
| 125 | |||
| 126 | static int | ||
| 56 | qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) | 127 | qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused) |
| 57 | { | 128 | { |
| 58 | struct scsi_qla_host *vha = s->private; | 129 | struct scsi_qla_host *vha = s->private; |
| @@ -114,6 +185,21 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused) | |||
| 114 | seq_printf(s, "num Q full sent = %lld\n", | 185 | seq_printf(s, "num Q full sent = %lld\n", |
| 115 | vha->tgt_counters.num_q_full_sent); | 186 | vha->tgt_counters.num_q_full_sent); |
| 116 | 187 | ||
| 188 | /* DIF stats */ | ||
| 189 | seq_printf(s, "DIF Inp Bytes = %lld\n", | ||
| 190 | vha->qla_stats.qla_dif_stats.dif_input_bytes); | ||
| 191 | seq_printf(s, "DIF Outp Bytes = %lld\n", | ||
| 192 | vha->qla_stats.qla_dif_stats.dif_output_bytes); | ||
| 193 | seq_printf(s, "DIF Inp Req = %lld\n", | ||
| 194 | vha->qla_stats.qla_dif_stats.dif_input_requests); | ||
| 195 | seq_printf(s, "DIF Outp Req = %lld\n", | ||
| 196 | vha->qla_stats.qla_dif_stats.dif_output_requests); | ||
| 197 | seq_printf(s, "DIF Guard err = %d\n", | ||
| 198 | vha->qla_stats.qla_dif_stats.dif_guard_err); | ||
| 199 | seq_printf(s, "DIF Ref tag err = %d\n", | ||
| 200 | vha->qla_stats.qla_dif_stats.dif_ref_tag_err); | ||
| 201 | seq_printf(s, "DIF App tag err = %d\n", | ||
| 202 | vha->qla_stats.qla_dif_stats.dif_app_tag_err); | ||
| 117 | return 0; | 203 | return 0; |
| 118 | } | 204 | } |
| 119 | 205 | ||
| @@ -281,6 +367,14 @@ create_nodes: | |||
| 281 | goto out; | 367 | goto out; |
| 282 | } | 368 | } |
| 283 | 369 | ||
| 370 | ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database", | ||
| 371 | S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops); | ||
| 372 | if (!ha->tgt.dfs_tgt_port_database) { | ||
| 373 | ql_log(ql_log_warn, vha, 0xffff, | ||
| 374 | "Unable to create debugFS tgt_port_database node.\n"); | ||
| 375 | goto out; | ||
| 376 | } | ||
| 377 | |||
| 284 | ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, | 378 | ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha, |
| 285 | &dfs_fce_ops); | 379 | &dfs_fce_ops); |
| 286 | if (!ha->dfs_fce) { | 380 | if (!ha->dfs_fce) { |
| @@ -311,6 +405,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha) | |||
| 311 | ha->tgt.dfs_tgt_sess = NULL; | 405 | ha->tgt.dfs_tgt_sess = NULL; |
| 312 | } | 406 | } |
| 313 | 407 | ||
| 408 | if (ha->tgt.dfs_tgt_port_database) { | ||
| 409 | debugfs_remove(ha->tgt.dfs_tgt_port_database); | ||
| 410 | ha->tgt.dfs_tgt_port_database = NULL; | ||
| 411 | } | ||
| 412 | |||
| 314 | if (ha->dfs_fw_resource_cnt) { | 413 | if (ha->dfs_fw_resource_cnt) { |
| 315 | debugfs_remove(ha->dfs_fw_resource_cnt); | 414 | debugfs_remove(ha->dfs_fw_resource_cnt); |
| 316 | ha->dfs_fw_resource_cnt = NULL; | 415 | ha->dfs_fw_resource_cnt = NULL; |
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index b3d6441d1d90..5b2451745e9f 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h | |||
| @@ -193,6 +193,7 @@ extern int qla24xx_post_upd_fcport_work(struct scsi_qla_host *, fc_port_t *); | |||
| 193 | void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, | 193 | void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, |
| 194 | uint16_t *); | 194 | uint16_t *); |
| 195 | int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); | 195 | int qla24xx_post_gnl_work(struct scsi_qla_host *, fc_port_t *); |
| 196 | int qla24xx_async_abort_cmd(srb_t *); | ||
| 196 | 197 | ||
| 197 | /* | 198 | /* |
| 198 | * Global Functions in qla_mid.c source file. | 199 | * Global Functions in qla_mid.c source file. |
| @@ -256,11 +257,11 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *); | |||
| 256 | extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); | 257 | extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *); |
| 257 | extern int qla2x00_issue_marker(scsi_qla_host_t *, int); | 258 | extern int qla2x00_issue_marker(scsi_qla_host_t *, int); |
| 258 | extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, | 259 | extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *, |
| 259 | uint32_t *, uint16_t, struct qla_tgt_cmd *); | 260 | uint32_t *, uint16_t, struct qla_tc_param *); |
| 260 | extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, | 261 | extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *, |
| 261 | uint32_t *, uint16_t, struct qla_tgt_cmd *); | 262 | uint32_t *, uint16_t, struct qla_tc_param *); |
| 262 | extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, | 263 | extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *, |
| 263 | uint32_t *, uint16_t, struct qla_tgt_cmd *); | 264 | uint32_t *, uint16_t, struct qla_tc_param *); |
| 264 | extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); | 265 | extern int qla24xx_get_one_block_sg(uint32_t, struct qla2_sgx *, uint32_t *); |
| 265 | extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); | 266 | extern int qla24xx_configure_prot_mode(srb_t *, uint16_t *); |
| 266 | extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, | 267 | extern int qla24xx_build_scsi_crc_2_iocbs(srb_t *, |
| @@ -368,7 +369,7 @@ qla2x00_get_link_status(scsi_qla_host_t *, uint16_t, struct link_statistics *, | |||
| 368 | 369 | ||
| 369 | extern int | 370 | extern int |
| 370 | qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, | 371 | qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *, |
| 371 | dma_addr_t, uint); | 372 | dma_addr_t, uint16_t); |
| 372 | 373 | ||
| 373 | extern int qla24xx_abort_command(srb_t *); | 374 | extern int qla24xx_abort_command(srb_t *); |
| 374 | extern int qla24xx_async_abort_command(srb_t *); | 375 | extern int qla24xx_async_abort_command(srb_t *); |
| @@ -472,6 +473,13 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t); | |||
| 472 | extern int | 473 | extern int |
| 473 | qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); | 474 | qla26xx_dport_diagnostics(scsi_qla_host_t *, void *, uint, uint); |
| 474 | 475 | ||
| 476 | int qla24xx_send_mb_cmd(struct scsi_qla_host *, mbx_cmd_t *); | ||
| 477 | int qla24xx_gpdb_wait(struct scsi_qla_host *, fc_port_t *, u8); | ||
| 478 | int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t, | ||
| 479 | uint16_t *); | ||
| 480 | int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *, | ||
| 481 | struct port_database_24xx *); | ||
| 482 | |||
| 475 | /* | 483 | /* |
| 476 | * Global Function Prototypes in qla_isr.c source file. | 484 | * Global Function Prototypes in qla_isr.c source file. |
| 477 | */ | 485 | */ |
| @@ -846,5 +854,7 @@ extern struct fc_port *qlt_find_sess_invalidate_other(scsi_qla_host_t *, | |||
| 846 | uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); | 854 | uint64_t wwn, port_id_t port_id, uint16_t loop_id, struct fc_port **); |
| 847 | void qla24xx_delete_sess_fn(struct work_struct *); | 855 | void qla24xx_delete_sess_fn(struct work_struct *); |
| 848 | void qlt_unknown_atio_work_fn(struct work_struct *); | 856 | void qlt_unknown_atio_work_fn(struct work_struct *); |
| 857 | void qlt_update_host_map(struct scsi_qla_host *, port_id_t); | ||
| 858 | void qlt_remove_target_resources(struct qla_hw_data *); | ||
| 849 | 859 | ||
| 850 | #endif /* _QLA_GBL_H */ | 860 | #endif /* _QLA_GBL_H */ |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 32fb9007f137..f9d2fe7b1ade 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
| @@ -629,7 +629,6 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) | |||
| 629 | struct srb *sp = s; | 629 | struct srb *sp = s; |
| 630 | struct scsi_qla_host *vha = sp->vha; | 630 | struct scsi_qla_host *vha = sp->vha; |
| 631 | struct qla_hw_data *ha = vha->hw; | 631 | struct qla_hw_data *ha = vha->hw; |
| 632 | uint64_t zero = 0; | ||
| 633 | struct port_database_24xx *pd; | 632 | struct port_database_24xx *pd; |
| 634 | fc_port_t *fcport = sp->fcport; | 633 | fc_port_t *fcport = sp->fcport; |
| 635 | u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; | 634 | u16 *mb = sp->u.iocb_cmd.u.mbx.in_mb; |
| @@ -649,48 +648,7 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) | |||
| 649 | 648 | ||
| 650 | pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; | 649 | pd = (struct port_database_24xx *)sp->u.iocb_cmd.u.mbx.in; |
| 651 | 650 | ||
| 652 | /* Check for logged in state. */ | 651 | rval = __qla24xx_parse_gpdb(vha, fcport, pd); |
| 653 | if (pd->current_login_state != PDS_PRLI_COMPLETE && | ||
| 654 | pd->last_login_state != PDS_PRLI_COMPLETE) { | ||
| 655 | ql_dbg(ql_dbg_mbx, vha, 0xffff, | ||
| 656 | "Unable to verify login-state (%x/%x) for " | ||
| 657 | "loop_id %x.\n", pd->current_login_state, | ||
| 658 | pd->last_login_state, fcport->loop_id); | ||
| 659 | rval = QLA_FUNCTION_FAILED; | ||
| 660 | goto gpd_error_out; | ||
| 661 | } | ||
| 662 | |||
| 663 | if (fcport->loop_id == FC_NO_LOOP_ID || | ||
| 664 | (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && | ||
| 665 | memcmp(fcport->port_name, pd->port_name, 8))) { | ||
| 666 | /* We lost the device mid way. */ | ||
| 667 | rval = QLA_NOT_LOGGED_IN; | ||
| 668 | goto gpd_error_out; | ||
| 669 | } | ||
| 670 | |||
| 671 | /* Names are little-endian. */ | ||
| 672 | memcpy(fcport->node_name, pd->node_name, WWN_SIZE); | ||
| 673 | |||
| 674 | /* Get port_id of device. */ | ||
| 675 | fcport->d_id.b.domain = pd->port_id[0]; | ||
| 676 | fcport->d_id.b.area = pd->port_id[1]; | ||
| 677 | fcport->d_id.b.al_pa = pd->port_id[2]; | ||
| 678 | fcport->d_id.b.rsvd_1 = 0; | ||
| 679 | |||
| 680 | /* If not target must be initiator or unknown type. */ | ||
| 681 | if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) | ||
| 682 | fcport->port_type = FCT_INITIATOR; | ||
| 683 | else | ||
| 684 | fcport->port_type = FCT_TARGET; | ||
| 685 | |||
| 686 | /* Passback COS information. */ | ||
| 687 | fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? | ||
| 688 | FC_COS_CLASS2 : FC_COS_CLASS3; | ||
| 689 | |||
| 690 | if (pd->prli_svc_param_word_3[0] & BIT_7) { | ||
| 691 | fcport->flags |= FCF_CONF_COMP_SUPPORTED; | ||
| 692 | fcport->conf_compl_supported = 1; | ||
| 693 | } | ||
| 694 | 652 | ||
| 695 | gpd_error_out: | 653 | gpd_error_out: |
| 696 | memset(&ea, 0, sizeof(ea)); | 654 | memset(&ea, 0, sizeof(ea)); |
| @@ -876,10 +834,14 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) | |||
| 876 | fcport->login_retry--; | 834 | fcport->login_retry--; |
| 877 | 835 | ||
| 878 | if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || | 836 | if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || |
| 879 | (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || | ||
| 880 | (fcport->fw_login_state == DSC_LS_PRLI_PEND)) | 837 | (fcport->fw_login_state == DSC_LS_PRLI_PEND)) |
| 881 | return 0; | 838 | return 0; |
| 882 | 839 | ||
| 840 | if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { | ||
| 841 | if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) | ||
| 842 | return 0; | ||
| 843 | } | ||
| 844 | |||
| 883 | /* for pure Target Mode. Login will not be initiated */ | 845 | /* for pure Target Mode. Login will not be initiated */ |
| 884 | if (vha->host->active_mode == MODE_TARGET) | 846 | if (vha->host->active_mode == MODE_TARGET) |
| 885 | return 0; | 847 | return 0; |
| @@ -1041,10 +1003,14 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, | |||
| 1041 | fcport->flags); | 1003 | fcport->flags); |
| 1042 | 1004 | ||
| 1043 | if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || | 1005 | if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || |
| 1044 | (fcport->fw_login_state == DSC_LS_PLOGI_COMP) || | ||
| 1045 | (fcport->fw_login_state == DSC_LS_PRLI_PEND)) | 1006 | (fcport->fw_login_state == DSC_LS_PRLI_PEND)) |
| 1046 | return; | 1007 | return; |
| 1047 | 1008 | ||
| 1009 | if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { | ||
| 1010 | if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) | ||
| 1011 | return; | ||
| 1012 | } | ||
| 1013 | |||
| 1048 | if (fcport->flags & FCF_ASYNC_SENT) { | 1014 | if (fcport->flags & FCF_ASYNC_SENT) { |
| 1049 | fcport->login_retry++; | 1015 | fcport->login_retry++; |
| 1050 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); | 1016 | set_bit(RELOGIN_NEEDED, &vha->dpc_flags); |
| @@ -1258,7 +1224,7 @@ qla24xx_abort_sp_done(void *ptr, int res) | |||
| 1258 | complete(&abt->u.abt.comp); | 1224 | complete(&abt->u.abt.comp); |
| 1259 | } | 1225 | } |
| 1260 | 1226 | ||
| 1261 | static int | 1227 | int |
| 1262 | qla24xx_async_abort_cmd(srb_t *cmd_sp) | 1228 | qla24xx_async_abort_cmd(srb_t *cmd_sp) |
| 1263 | { | 1229 | { |
| 1264 | scsi_qla_host_t *vha = cmd_sp->vha; | 1230 | scsi_qla_host_t *vha = cmd_sp->vha; |
| @@ -3212,6 +3178,7 @@ next_check: | |||
| 3212 | } else { | 3178 | } else { |
| 3213 | ql_dbg(ql_dbg_init, vha, 0x00d3, | 3179 | ql_dbg(ql_dbg_init, vha, 0x00d3, |
| 3214 | "Init Firmware -- success.\n"); | 3180 | "Init Firmware -- success.\n"); |
| 3181 | ha->flags.fw_started = 1; | ||
| 3215 | } | 3182 | } |
| 3216 | 3183 | ||
| 3217 | return (rval); | 3184 | return (rval); |
| @@ -3374,8 +3341,8 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) | |||
| 3374 | uint8_t domain; | 3341 | uint8_t domain; |
| 3375 | char connect_type[22]; | 3342 | char connect_type[22]; |
| 3376 | struct qla_hw_data *ha = vha->hw; | 3343 | struct qla_hw_data *ha = vha->hw; |
| 3377 | unsigned long flags; | ||
| 3378 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); | 3344 | scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); |
| 3345 | port_id_t id; | ||
| 3379 | 3346 | ||
| 3380 | /* Get host addresses. */ | 3347 | /* Get host addresses. */ |
| 3381 | rval = qla2x00_get_adapter_id(vha, | 3348 | rval = qla2x00_get_adapter_id(vha, |
| @@ -3453,13 +3420,11 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) | |||
| 3453 | 3420 | ||
| 3454 | /* Save Host port and loop ID. */ | 3421 | /* Save Host port and loop ID. */ |
| 3455 | /* byte order - Big Endian */ | 3422 | /* byte order - Big Endian */ |
| 3456 | vha->d_id.b.domain = domain; | 3423 | id.b.domain = domain; |
| 3457 | vha->d_id.b.area = area; | 3424 | id.b.area = area; |
| 3458 | vha->d_id.b.al_pa = al_pa; | 3425 | id.b.al_pa = al_pa; |
| 3459 | 3426 | id.b.rsvd_1 = 0; | |
| 3460 | spin_lock_irqsave(&ha->vport_slock, flags); | 3427 | qlt_update_host_map(vha, id); |
| 3461 | qlt_update_vp_map(vha, SET_AL_PA); | ||
| 3462 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 3463 | 3428 | ||
| 3464 | if (!vha->flags.init_done) | 3429 | if (!vha->flags.init_done) |
| 3465 | ql_log(ql_log_info, vha, 0x2010, | 3430 | ql_log(ql_log_info, vha, 0x2010, |
| @@ -4036,6 +4001,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) | |||
| 4036 | atomic_set(&vha->loop_state, LOOP_READY); | 4001 | atomic_set(&vha->loop_state, LOOP_READY); |
| 4037 | ql_dbg(ql_dbg_disc, vha, 0x2069, | 4002 | ql_dbg(ql_dbg_disc, vha, 0x2069, |
| 4038 | "LOOP READY.\n"); | 4003 | "LOOP READY.\n"); |
| 4004 | ha->flags.fw_init_done = 1; | ||
| 4039 | 4005 | ||
| 4040 | /* | 4006 | /* |
| 4041 | * Process any ATIO queue entries that came in | 4007 | * Process any ATIO queue entries that came in |
| @@ -5148,6 +5114,7 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha) | |||
| 5148 | } | 5114 | } |
| 5149 | } | 5115 | } |
| 5150 | atomic_dec(&vha->vref_count); | 5116 | atomic_dec(&vha->vref_count); |
| 5117 | wake_up(&vha->vref_waitq); | ||
| 5151 | } | 5118 | } |
| 5152 | spin_unlock_irqrestore(&ha->vport_slock, flags); | 5119 | spin_unlock_irqrestore(&ha->vport_slock, flags); |
| 5153 | } | 5120 | } |
| @@ -5526,6 +5493,11 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) | |||
| 5526 | if (!(IS_P3P_TYPE(ha))) | 5493 | if (!(IS_P3P_TYPE(ha))) |
| 5527 | ha->isp_ops->reset_chip(vha); | 5494 | ha->isp_ops->reset_chip(vha); |
| 5528 | 5495 | ||
| 5496 | ha->flags.n2n_ae = 0; | ||
| 5497 | ha->flags.lip_ae = 0; | ||
| 5498 | ha->current_topology = 0; | ||
| 5499 | ha->flags.fw_started = 0; | ||
| 5500 | ha->flags.fw_init_done = 0; | ||
| 5529 | ha->chip_reset++; | 5501 | ha->chip_reset++; |
| 5530 | 5502 | ||
| 5531 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); | 5503 | atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); |
| @@ -6802,6 +6774,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) | |||
| 6802 | return; | 6774 | return; |
| 6803 | if (!ha->fw_major_version) | 6775 | if (!ha->fw_major_version) |
| 6804 | return; | 6776 | return; |
| 6777 | if (!ha->flags.fw_started) | ||
| 6778 | return; | ||
| 6805 | 6779 | ||
| 6806 | ret = qla2x00_stop_firmware(vha); | 6780 | ret = qla2x00_stop_firmware(vha); |
| 6807 | for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && | 6781 | for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && |
| @@ -6815,6 +6789,9 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha) | |||
| 6815 | "Attempting retry of stop-firmware command.\n"); | 6789 | "Attempting retry of stop-firmware command.\n"); |
| 6816 | ret = qla2x00_stop_firmware(vha); | 6790 | ret = qla2x00_stop_firmware(vha); |
| 6817 | } | 6791 | } |
| 6792 | |||
| 6793 | ha->flags.fw_started = 0; | ||
| 6794 | ha->flags.fw_init_done = 0; | ||
| 6818 | } | 6795 | } |
| 6819 | 6796 | ||
| 6820 | int | 6797 | int |
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 535079280288..ea027f6a7fd4 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c | |||
| @@ -889,7 +889,7 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx, | |||
| 889 | 889 | ||
| 890 | int | 890 | int |
| 891 | qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, | 891 | qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, |
| 892 | uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) | 892 | uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) |
| 893 | { | 893 | { |
| 894 | void *next_dsd; | 894 | void *next_dsd; |
| 895 | uint8_t avail_dsds = 0; | 895 | uint8_t avail_dsds = 0; |
| @@ -898,7 +898,6 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp, | |||
| 898 | struct scatterlist *sg_prot; | 898 | struct scatterlist *sg_prot; |
| 899 | uint32_t *cur_dsd = dsd; | 899 | uint32_t *cur_dsd = dsd; |
| 900 | uint16_t used_dsds = tot_dsds; | 900 | uint16_t used_dsds = tot_dsds; |
| 901 | |||
| 902 | uint32_t prot_int; /* protection interval */ | 901 | uint32_t prot_int; /* protection interval */ |
| 903 | uint32_t partial; | 902 | uint32_t partial; |
| 904 | struct qla2_sgx sgx; | 903 | struct qla2_sgx sgx; |
| @@ -966,7 +965,7 @@ alloc_and_fill: | |||
| 966 | } else { | 965 | } else { |
| 967 | list_add_tail(&dsd_ptr->list, | 966 | list_add_tail(&dsd_ptr->list, |
| 968 | &(tc->ctx->dsd_list)); | 967 | &(tc->ctx->dsd_list)); |
| 969 | tc->ctx_dsd_alloced = 1; | 968 | *tc->ctx_dsd_alloced = 1; |
| 970 | } | 969 | } |
| 971 | 970 | ||
| 972 | 971 | ||
| @@ -1005,7 +1004,7 @@ alloc_and_fill: | |||
| 1005 | 1004 | ||
| 1006 | int | 1005 | int |
| 1007 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | 1006 | qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, |
| 1008 | uint16_t tot_dsds, struct qla_tgt_cmd *tc) | 1007 | uint16_t tot_dsds, struct qla_tc_param *tc) |
| 1009 | { | 1008 | { |
| 1010 | void *next_dsd; | 1009 | void *next_dsd; |
| 1011 | uint8_t avail_dsds = 0; | 1010 | uint8_t avail_dsds = 0; |
| @@ -1066,7 +1065,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | |||
| 1066 | } else { | 1065 | } else { |
| 1067 | list_add_tail(&dsd_ptr->list, | 1066 | list_add_tail(&dsd_ptr->list, |
| 1068 | &(tc->ctx->dsd_list)); | 1067 | &(tc->ctx->dsd_list)); |
| 1069 | tc->ctx_dsd_alloced = 1; | 1068 | *tc->ctx_dsd_alloced = 1; |
| 1070 | } | 1069 | } |
| 1071 | 1070 | ||
| 1072 | /* add new list to cmd iocb or last list */ | 1071 | /* add new list to cmd iocb or last list */ |
| @@ -1092,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd, | |||
| 1092 | 1091 | ||
| 1093 | int | 1092 | int |
| 1094 | qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | 1093 | qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, |
| 1095 | uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc) | 1094 | uint32_t *dsd, uint16_t tot_dsds, struct qla_tc_param *tc) |
| 1096 | { | 1095 | { |
| 1097 | void *next_dsd; | 1096 | void *next_dsd; |
| 1098 | uint8_t avail_dsds = 0; | 1097 | uint8_t avail_dsds = 0; |
| @@ -1158,7 +1157,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, | |||
| 1158 | } else { | 1157 | } else { |
| 1159 | list_add_tail(&dsd_ptr->list, | 1158 | list_add_tail(&dsd_ptr->list, |
| 1160 | &(tc->ctx->dsd_list)); | 1159 | &(tc->ctx->dsd_list)); |
| 1161 | tc->ctx_dsd_alloced = 1; | 1160 | *tc->ctx_dsd_alloced = 1; |
| 1162 | } | 1161 | } |
| 1163 | 1162 | ||
| 1164 | /* add new list to cmd iocb or last list */ | 1163 | /* add new list to cmd iocb or last list */ |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 3c66ea29de27..3203367a4f42 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
| @@ -708,6 +708,8 @@ skip_rio: | |||
| 708 | "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); | 708 | "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx); |
| 709 | 709 | ||
| 710 | ha->isp_ops->fw_dump(vha, 1); | 710 | ha->isp_ops->fw_dump(vha, 1); |
| 711 | ha->flags.fw_init_done = 0; | ||
| 712 | ha->flags.fw_started = 0; | ||
| 711 | 713 | ||
| 712 | if (IS_FWI2_CAPABLE(ha)) { | 714 | if (IS_FWI2_CAPABLE(ha)) { |
| 713 | if (mb[1] == 0 && mb[2] == 0) { | 715 | if (mb[1] == 0 && mb[2] == 0) { |
| @@ -761,6 +763,9 @@ skip_rio: | |||
| 761 | break; | 763 | break; |
| 762 | 764 | ||
| 763 | case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ | 765 | case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ |
| 766 | ha->flags.lip_ae = 1; | ||
| 767 | ha->flags.n2n_ae = 0; | ||
| 768 | |||
| 764 | ql_dbg(ql_dbg_async, vha, 0x5009, | 769 | ql_dbg(ql_dbg_async, vha, 0x5009, |
| 765 | "LIP occurred (%x).\n", mb[1]); | 770 | "LIP occurred (%x).\n", mb[1]); |
| 766 | 771 | ||
| @@ -797,6 +802,10 @@ skip_rio: | |||
| 797 | break; | 802 | break; |
| 798 | 803 | ||
| 799 | case MBA_LOOP_DOWN: /* Loop Down Event */ | 804 | case MBA_LOOP_DOWN: /* Loop Down Event */ |
| 805 | ha->flags.n2n_ae = 0; | ||
| 806 | ha->flags.lip_ae = 0; | ||
| 807 | ha->current_topology = 0; | ||
| 808 | |||
| 800 | mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) | 809 | mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha)) |
| 801 | ? RD_REG_WORD(®24->mailbox4) : 0; | 810 | ? RD_REG_WORD(®24->mailbox4) : 0; |
| 802 | mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) | 811 | mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(®82->mailbox_out[4]) |
| @@ -866,6 +875,9 @@ skip_rio: | |||
| 866 | 875 | ||
| 867 | /* case MBA_DCBX_COMPLETE: */ | 876 | /* case MBA_DCBX_COMPLETE: */ |
| 868 | case MBA_POINT_TO_POINT: /* Point-to-Point */ | 877 | case MBA_POINT_TO_POINT: /* Point-to-Point */ |
| 878 | ha->flags.lip_ae = 0; | ||
| 879 | ha->flags.n2n_ae = 1; | ||
| 880 | |||
| 869 | if (IS_QLA2100(ha)) | 881 | if (IS_QLA2100(ha)) |
| 870 | break; | 882 | break; |
| 871 | 883 | ||
| @@ -1620,9 +1632,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
| 1620 | QLA_LOGIO_LOGIN_RETRIED : 0; | 1632 | QLA_LOGIO_LOGIN_RETRIED : 0; |
| 1621 | if (logio->entry_status) { | 1633 | if (logio->entry_status) { |
| 1622 | ql_log(ql_log_warn, fcport->vha, 0x5034, | 1634 | ql_log(ql_log_warn, fcport->vha, 0x5034, |
| 1623 | "Async-%s error entry - hdl=%x" | 1635 | "Async-%s error entry - %8phC hdl=%x" |
| 1624 | "portid=%02x%02x%02x entry-status=%x.\n", | 1636 | "portid=%02x%02x%02x entry-status=%x.\n", |
| 1625 | type, sp->handle, fcport->d_id.b.domain, | 1637 | type, fcport->port_name, sp->handle, fcport->d_id.b.domain, |
| 1626 | fcport->d_id.b.area, fcport->d_id.b.al_pa, | 1638 | fcport->d_id.b.area, fcport->d_id.b.al_pa, |
| 1627 | logio->entry_status); | 1639 | logio->entry_status); |
| 1628 | ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, | 1640 | ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d, |
| @@ -1633,8 +1645,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
| 1633 | 1645 | ||
| 1634 | if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { | 1646 | if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) { |
| 1635 | ql_dbg(ql_dbg_async, fcport->vha, 0x5036, | 1647 | ql_dbg(ql_dbg_async, fcport->vha, 0x5036, |
| 1636 | "Async-%s complete - hdl=%x portid=%02x%02x%02x " | 1648 | "Async-%s complete - %8phC hdl=%x portid=%02x%02x%02x " |
| 1637 | "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain, | 1649 | "iop0=%x.\n", type, fcport->port_name, sp->handle, |
| 1650 | fcport->d_id.b.domain, | ||
| 1638 | fcport->d_id.b.area, fcport->d_id.b.al_pa, | 1651 | fcport->d_id.b.area, fcport->d_id.b.al_pa, |
| 1639 | le32_to_cpu(logio->io_parameter[0])); | 1652 | le32_to_cpu(logio->io_parameter[0])); |
| 1640 | 1653 | ||
| @@ -1674,6 +1687,17 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
| 1674 | case LSC_SCODE_NPORT_USED: | 1687 | case LSC_SCODE_NPORT_USED: |
| 1675 | data[0] = MBS_LOOP_ID_USED; | 1688 | data[0] = MBS_LOOP_ID_USED; |
| 1676 | break; | 1689 | break; |
| 1690 | case LSC_SCODE_CMD_FAILED: | ||
| 1691 | if (iop[1] == 0x0606) { | ||
| 1692 | /* | ||
| 1693 | * PLOGI/PRLI Completed. We must have Recv PLOGI/PRLI, | ||
| 1694 | * Target side acked. | ||
| 1695 | */ | ||
| 1696 | data[0] = MBS_COMMAND_COMPLETE; | ||
| 1697 | goto logio_done; | ||
| 1698 | } | ||
| 1699 | data[0] = MBS_COMMAND_ERROR; | ||
| 1700 | break; | ||
| 1677 | case LSC_SCODE_NOXCB: | 1701 | case LSC_SCODE_NOXCB: |
| 1678 | vha->hw->exch_starvation++; | 1702 | vha->hw->exch_starvation++; |
| 1679 | if (vha->hw->exch_starvation > 5) { | 1703 | if (vha->hw->exch_starvation > 5) { |
| @@ -1695,8 +1719,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
| 1695 | } | 1719 | } |
| 1696 | 1720 | ||
| 1697 | ql_dbg(ql_dbg_async, fcport->vha, 0x5037, | 1721 | ql_dbg(ql_dbg_async, fcport->vha, 0x5037, |
| 1698 | "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x " | 1722 | "Async-%s failed - %8phC hdl=%x portid=%02x%02x%02x comp=%x " |
| 1699 | "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain, | 1723 | "iop0=%x iop1=%x.\n", type, fcport->port_name, |
| 1724 | sp->handle, fcport->d_id.b.domain, | ||
| 1700 | fcport->d_id.b.area, fcport->d_id.b.al_pa, | 1725 | fcport->d_id.b.area, fcport->d_id.b.al_pa, |
| 1701 | le16_to_cpu(logio->comp_status), | 1726 | le16_to_cpu(logio->comp_status), |
| 1702 | le32_to_cpu(logio->io_parameter[0]), | 1727 | le32_to_cpu(logio->io_parameter[0]), |
| @@ -2679,7 +2704,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, | |||
| 2679 | return; | 2704 | return; |
| 2680 | 2705 | ||
| 2681 | abt = &sp->u.iocb_cmd; | 2706 | abt = &sp->u.iocb_cmd; |
| 2682 | abt->u.abt.comp_status = le32_to_cpu(pkt->nport_handle); | 2707 | abt->u.abt.comp_status = le16_to_cpu(pkt->nport_handle); |
| 2683 | sp->done(sp, 0); | 2708 | sp->done(sp, 0); |
| 2684 | } | 2709 | } |
| 2685 | 2710 | ||
| @@ -2693,7 +2718,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, | |||
| 2693 | struct sts_entry_24xx *pkt; | 2718 | struct sts_entry_24xx *pkt; |
| 2694 | struct qla_hw_data *ha = vha->hw; | 2719 | struct qla_hw_data *ha = vha->hw; |
| 2695 | 2720 | ||
| 2696 | if (!vha->flags.online) | 2721 | if (!ha->flags.fw_started) |
| 2697 | return; | 2722 | return; |
| 2698 | 2723 | ||
| 2699 | while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { | 2724 | while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) { |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 35079f417417..a113ab3592a7 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
| @@ -10,6 +10,28 @@ | |||
| 10 | #include <linux/delay.h> | 10 | #include <linux/delay.h> |
| 11 | #include <linux/gfp.h> | 11 | #include <linux/gfp.h> |
| 12 | 12 | ||
| 13 | static struct mb_cmd_name { | ||
| 14 | uint16_t cmd; | ||
| 15 | const char *str; | ||
| 16 | } mb_str[] = { | ||
| 17 | {MBC_GET_PORT_DATABASE, "GPDB"}, | ||
| 18 | {MBC_GET_ID_LIST, "GIDList"}, | ||
| 19 | {MBC_GET_LINK_PRIV_STATS, "Stats"}, | ||
| 20 | }; | ||
| 21 | |||
| 22 | static const char *mb_to_str(uint16_t cmd) | ||
| 23 | { | ||
| 24 | int i; | ||
| 25 | struct mb_cmd_name *e; | ||
| 26 | |||
| 27 | for (i = 0; i < ARRAY_SIZE(mb_str); i++) { | ||
| 28 | e = mb_str + i; | ||
| 29 | if (cmd == e->cmd) | ||
| 30 | return e->str; | ||
| 31 | } | ||
| 32 | return "unknown"; | ||
| 33 | } | ||
| 34 | |||
| 13 | static struct rom_cmd { | 35 | static struct rom_cmd { |
| 14 | uint16_t cmd; | 36 | uint16_t cmd; |
| 15 | } rom_cmds[] = { | 37 | } rom_cmds[] = { |
| @@ -2818,7 +2840,7 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id, | |||
| 2818 | 2840 | ||
| 2819 | int | 2841 | int |
| 2820 | qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, | 2842 | qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, |
| 2821 | dma_addr_t stats_dma, uint options) | 2843 | dma_addr_t stats_dma, uint16_t options) |
| 2822 | { | 2844 | { |
| 2823 | int rval; | 2845 | int rval; |
| 2824 | mbx_cmd_t mc; | 2846 | mbx_cmd_t mc; |
| @@ -2828,19 +2850,17 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats, | |||
| 2828 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, | 2850 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088, |
| 2829 | "Entered %s.\n", __func__); | 2851 | "Entered %s.\n", __func__); |
| 2830 | 2852 | ||
| 2831 | mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; | 2853 | memset(&mc, 0, sizeof(mc)); |
| 2832 | mcp->mb[2] = MSW(stats_dma); | 2854 | mc.mb[0] = MBC_GET_LINK_PRIV_STATS; |
| 2833 | mcp->mb[3] = LSW(stats_dma); | 2855 | mc.mb[2] = MSW(stats_dma); |
| 2834 | mcp->mb[6] = MSW(MSD(stats_dma)); | 2856 | mc.mb[3] = LSW(stats_dma); |
| 2835 | mcp->mb[7] = LSW(MSD(stats_dma)); | 2857 | mc.mb[6] = MSW(MSD(stats_dma)); |
| 2836 | mcp->mb[8] = sizeof(struct link_statistics) / 4; | 2858 | mc.mb[7] = LSW(MSD(stats_dma)); |
| 2837 | mcp->mb[9] = vha->vp_idx; | 2859 | mc.mb[8] = sizeof(struct link_statistics) / 4; |
| 2838 | mcp->mb[10] = options; | 2860 | mc.mb[9] = cpu_to_le16(vha->vp_idx); |
| 2839 | mcp->out_mb = MBX_10|MBX_9|MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0; | 2861 | mc.mb[10] = cpu_to_le16(options); |
| 2840 | mcp->in_mb = MBX_2|MBX_1|MBX_0; | 2862 | |
| 2841 | mcp->tov = MBX_TOV_SECONDS; | 2863 | rval = qla24xx_send_mb_cmd(vha, &mc); |
| 2842 | mcp->flags = IOCTL_CMD; | ||
| 2843 | rval = qla2x00_mailbox_command(vha, mcp); | ||
| 2844 | 2864 | ||
| 2845 | if (rval == QLA_SUCCESS) { | 2865 | if (rval == QLA_SUCCESS) { |
| 2846 | if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { | 2866 | if (mcp->mb[0] != MBS_COMMAND_COMPLETE) { |
| @@ -3603,6 +3623,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
| 3603 | scsi_qla_host_t *vp = NULL; | 3623 | scsi_qla_host_t *vp = NULL; |
| 3604 | unsigned long flags; | 3624 | unsigned long flags; |
| 3605 | int found; | 3625 | int found; |
| 3626 | port_id_t id; | ||
| 3606 | 3627 | ||
| 3607 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, | 3628 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6, |
| 3608 | "Entered %s.\n", __func__); | 3629 | "Entered %s.\n", __func__); |
| @@ -3610,28 +3631,27 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
| 3610 | if (rptid_entry->entry_status != 0) | 3631 | if (rptid_entry->entry_status != 0) |
| 3611 | return; | 3632 | return; |
| 3612 | 3633 | ||
| 3634 | id.b.domain = rptid_entry->port_id[2]; | ||
| 3635 | id.b.area = rptid_entry->port_id[1]; | ||
| 3636 | id.b.al_pa = rptid_entry->port_id[0]; | ||
| 3637 | id.b.rsvd_1 = 0; | ||
| 3638 | |||
| 3613 | if (rptid_entry->format == 0) { | 3639 | if (rptid_entry->format == 0) { |
| 3614 | /* loop */ | 3640 | /* loop */ |
| 3615 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7, | 3641 | ql_dbg(ql_dbg_async, vha, 0x10b7, |
| 3616 | "Format 0 : Number of VPs setup %d, number of " | 3642 | "Format 0 : Number of VPs setup %d, number of " |
| 3617 | "VPs acquired %d.\n", rptid_entry->vp_setup, | 3643 | "VPs acquired %d.\n", rptid_entry->vp_setup, |
| 3618 | rptid_entry->vp_acquired); | 3644 | rptid_entry->vp_acquired); |
| 3619 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8, | 3645 | ql_dbg(ql_dbg_async, vha, 0x10b8, |
| 3620 | "Primary port id %02x%02x%02x.\n", | 3646 | "Primary port id %02x%02x%02x.\n", |
| 3621 | rptid_entry->port_id[2], rptid_entry->port_id[1], | 3647 | rptid_entry->port_id[2], rptid_entry->port_id[1], |
| 3622 | rptid_entry->port_id[0]); | 3648 | rptid_entry->port_id[0]); |
| 3623 | 3649 | ||
| 3624 | vha->d_id.b.domain = rptid_entry->port_id[2]; | 3650 | qlt_update_host_map(vha, id); |
| 3625 | vha->d_id.b.area = rptid_entry->port_id[1]; | ||
| 3626 | vha->d_id.b.al_pa = rptid_entry->port_id[0]; | ||
| 3627 | |||
| 3628 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 3629 | qlt_update_vp_map(vha, SET_AL_PA); | ||
| 3630 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 3631 | 3651 | ||
| 3632 | } else if (rptid_entry->format == 1) { | 3652 | } else if (rptid_entry->format == 1) { |
| 3633 | /* fabric */ | 3653 | /* fabric */ |
| 3634 | ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9, | 3654 | ql_dbg(ql_dbg_async, vha, 0x10b9, |
| 3635 | "Format 1: VP[%d] enabled - status %d - with " | 3655 | "Format 1: VP[%d] enabled - status %d - with " |
| 3636 | "port id %02x%02x%02x.\n", rptid_entry->vp_idx, | 3656 | "port id %02x%02x%02x.\n", rptid_entry->vp_idx, |
| 3637 | rptid_entry->vp_status, | 3657 | rptid_entry->vp_status, |
| @@ -3653,12 +3673,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
| 3653 | WWN_SIZE); | 3673 | WWN_SIZE); |
| 3654 | } | 3674 | } |
| 3655 | 3675 | ||
| 3656 | vha->d_id.b.domain = rptid_entry->port_id[2]; | 3676 | qlt_update_host_map(vha, id); |
| 3657 | vha->d_id.b.area = rptid_entry->port_id[1]; | ||
| 3658 | vha->d_id.b.al_pa = rptid_entry->port_id[0]; | ||
| 3659 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 3660 | qlt_update_vp_map(vha, SET_AL_PA); | ||
| 3661 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 3662 | } | 3677 | } |
| 3663 | 3678 | ||
| 3664 | fc_host_port_name(vha->host) = | 3679 | fc_host_port_name(vha->host) = |
| @@ -3694,12 +3709,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, | |||
| 3694 | if (!found) | 3709 | if (!found) |
| 3695 | return; | 3710 | return; |
| 3696 | 3711 | ||
| 3697 | vp->d_id.b.domain = rptid_entry->port_id[2]; | 3712 | qlt_update_host_map(vp, id); |
| 3698 | vp->d_id.b.area = rptid_entry->port_id[1]; | ||
| 3699 | vp->d_id.b.al_pa = rptid_entry->port_id[0]; | ||
| 3700 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 3701 | qlt_update_vp_map(vp, SET_AL_PA); | ||
| 3702 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 3703 | 3713 | ||
| 3704 | /* | 3714 | /* |
| 3705 | * Cannot configure here as we are still sitting on the | 3715 | * Cannot configure here as we are still sitting on the |
| @@ -5827,3 +5837,225 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha, | |||
| 5827 | 5837 | ||
| 5828 | return rval; | 5838 | return rval; |
| 5829 | } | 5839 | } |
| 5840 | |||
| 5841 | static void qla2x00_async_mb_sp_done(void *s, int res) | ||
| 5842 | { | ||
| 5843 | struct srb *sp = s; | ||
| 5844 | |||
| 5845 | sp->u.iocb_cmd.u.mbx.rc = res; | ||
| 5846 | |||
| 5847 | complete(&sp->u.iocb_cmd.u.mbx.comp); | ||
| 5848 | /* don't free sp here. Let the caller do the free */ | ||
| 5849 | } | ||
| 5850 | |||
| 5851 | /* | ||
| 5852 | * This mailbox uses the iocb interface to send MB command. | ||
| 5853 | * This allows non-critial (non chip setup) command to go | ||
| 5854 | * out in parrallel. | ||
| 5855 | */ | ||
| 5856 | int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) | ||
| 5857 | { | ||
| 5858 | int rval = QLA_FUNCTION_FAILED; | ||
| 5859 | srb_t *sp; | ||
| 5860 | struct srb_iocb *c; | ||
| 5861 | |||
| 5862 | if (!vha->hw->flags.fw_started) | ||
| 5863 | goto done; | ||
| 5864 | |||
| 5865 | sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL); | ||
| 5866 | if (!sp) | ||
| 5867 | goto done; | ||
| 5868 | |||
| 5869 | sp->type = SRB_MB_IOCB; | ||
| 5870 | sp->name = mb_to_str(mcp->mb[0]); | ||
| 5871 | |||
| 5872 | qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2); | ||
| 5873 | |||
| 5874 | memcpy(sp->u.iocb_cmd.u.mbx.out_mb, mcp->mb, SIZEOF_IOCB_MB_REG); | ||
| 5875 | |||
| 5876 | c = &sp->u.iocb_cmd; | ||
| 5877 | c->timeout = qla2x00_async_iocb_timeout; | ||
| 5878 | init_completion(&c->u.mbx.comp); | ||
| 5879 | |||
| 5880 | sp->done = qla2x00_async_mb_sp_done; | ||
| 5881 | |||
| 5882 | rval = qla2x00_start_sp(sp); | ||
| 5883 | if (rval != QLA_SUCCESS) { | ||
| 5884 | ql_dbg(ql_dbg_mbx, vha, 0xffff, | ||
| 5885 | "%s: %s Failed submission. %x.\n", | ||
| 5886 | __func__, sp->name, rval); | ||
| 5887 | goto done_free_sp; | ||
| 5888 | } | ||
| 5889 | |||
| 5890 | ql_dbg(ql_dbg_mbx, vha, 0xffff, "MB:%s hndl %x submitted\n", | ||
| 5891 | sp->name, sp->handle); | ||
| 5892 | |||
| 5893 | wait_for_completion(&c->u.mbx.comp); | ||
| 5894 | memcpy(mcp->mb, sp->u.iocb_cmd.u.mbx.in_mb, SIZEOF_IOCB_MB_REG); | ||
| 5895 | |||
| 5896 | rval = c->u.mbx.rc; | ||
| 5897 | switch (rval) { | ||
| 5898 | case QLA_FUNCTION_TIMEOUT: | ||
| 5899 | ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Timeout. %x.\n", | ||
| 5900 | __func__, sp->name, rval); | ||
| 5901 | break; | ||
| 5902 | case QLA_SUCCESS: | ||
| 5903 | ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s done.\n", | ||
| 5904 | __func__, sp->name); | ||
| 5905 | sp->free(sp); | ||
| 5906 | break; | ||
| 5907 | default: | ||
| 5908 | ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %s Failed. %x.\n", | ||
| 5909 | __func__, sp->name, rval); | ||
| 5910 | sp->free(sp); | ||
| 5911 | break; | ||
| 5912 | } | ||
| 5913 | |||
| 5914 | return rval; | ||
| 5915 | |||
| 5916 | done_free_sp: | ||
| 5917 | sp->free(sp); | ||
| 5918 | done: | ||
| 5919 | return rval; | ||
| 5920 | } | ||
| 5921 | |||
| 5922 | /* | ||
| 5923 | * qla24xx_gpdb_wait | ||
| 5924 | * NOTE: Do not call this routine from DPC thread | ||
| 5925 | */ | ||
| 5926 | int qla24xx_gpdb_wait(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) | ||
| 5927 | { | ||
| 5928 | int rval = QLA_FUNCTION_FAILED; | ||
| 5929 | dma_addr_t pd_dma; | ||
| 5930 | struct port_database_24xx *pd; | ||
| 5931 | struct qla_hw_data *ha = vha->hw; | ||
| 5932 | mbx_cmd_t mc; | ||
| 5933 | |||
| 5934 | if (!vha->hw->flags.fw_started) | ||
| 5935 | goto done; | ||
| 5936 | |||
| 5937 | pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); | ||
| 5938 | if (pd == NULL) { | ||
| 5939 | ql_log(ql_log_warn, vha, 0xffff, | ||
| 5940 | "Failed to allocate port database structure.\n"); | ||
| 5941 | goto done_free_sp; | ||
| 5942 | } | ||
| 5943 | memset(pd, 0, max(PORT_DATABASE_SIZE, PORT_DATABASE_24XX_SIZE)); | ||
| 5944 | |||
| 5945 | memset(&mc, 0, sizeof(mc)); | ||
| 5946 | mc.mb[0] = MBC_GET_PORT_DATABASE; | ||
| 5947 | mc.mb[1] = cpu_to_le16(fcport->loop_id); | ||
| 5948 | mc.mb[2] = MSW(pd_dma); | ||
| 5949 | mc.mb[3] = LSW(pd_dma); | ||
| 5950 | mc.mb[6] = MSW(MSD(pd_dma)); | ||
| 5951 | mc.mb[7] = LSW(MSD(pd_dma)); | ||
| 5952 | mc.mb[9] = cpu_to_le16(vha->vp_idx); | ||
| 5953 | mc.mb[10] = cpu_to_le16((uint16_t)opt); | ||
| 5954 | |||
| 5955 | rval = qla24xx_send_mb_cmd(vha, &mc); | ||
| 5956 | if (rval != QLA_SUCCESS) { | ||
| 5957 | ql_dbg(ql_dbg_mbx, vha, 0xffff, | ||
| 5958 | "%s: %8phC fail\n", __func__, fcport->port_name); | ||
| 5959 | goto done_free_sp; | ||
| 5960 | } | ||
| 5961 | |||
| 5962 | rval = __qla24xx_parse_gpdb(vha, fcport, pd); | ||
| 5963 | |||
| 5964 | ql_dbg(ql_dbg_mbx, vha, 0xffff, "%s: %8phC done\n", | ||
| 5965 | __func__, fcport->port_name); | ||
| 5966 | |||
| 5967 | done_free_sp: | ||
| 5968 | if (pd) | ||
| 5969 | dma_pool_free(ha->s_dma_pool, pd, pd_dma); | ||
| 5970 | done: | ||
| 5971 | return rval; | ||
| 5972 | } | ||
| 5973 | |||
| 5974 | int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, | ||
| 5975 | struct port_database_24xx *pd) | ||
| 5976 | { | ||
| 5977 | int rval = QLA_SUCCESS; | ||
| 5978 | uint64_t zero = 0; | ||
| 5979 | |||
| 5980 | /* Check for logged in state. */ | ||
| 5981 | if (pd->current_login_state != PDS_PRLI_COMPLETE && | ||
| 5982 | pd->last_login_state != PDS_PRLI_COMPLETE) { | ||
| 5983 | ql_dbg(ql_dbg_mbx, vha, 0xffff, | ||
| 5984 | "Unable to verify login-state (%x/%x) for " | ||
| 5985 | "loop_id %x.\n", pd->current_login_state, | ||
| 5986 | pd->last_login_state, fcport->loop_id); | ||
| 5987 | rval = QLA_FUNCTION_FAILED; | ||
| 5988 | goto gpd_error_out; | ||
| 5989 | } | ||
| 5990 | |||
| 5991 | if (fcport->loop_id == FC_NO_LOOP_ID || | ||
| 5992 | (memcmp(fcport->port_name, (uint8_t *)&zero, 8) && | ||
| 5993 | memcmp(fcport->port_name, pd->port_name, 8))) { | ||
| 5994 | /* We lost the device mid way. */ | ||
| 5995 | rval = QLA_NOT_LOGGED_IN; | ||
| 5996 | goto gpd_error_out; | ||
| 5997 | } | ||
| 5998 | |||
| 5999 | /* Names are little-endian. */ | ||
| 6000 | memcpy(fcport->node_name, pd->node_name, WWN_SIZE); | ||
| 6001 | memcpy(fcport->port_name, pd->port_name, WWN_SIZE); | ||
| 6002 | |||
| 6003 | /* Get port_id of device. */ | ||
| 6004 | fcport->d_id.b.domain = pd->port_id[0]; | ||
| 6005 | fcport->d_id.b.area = pd->port_id[1]; | ||
| 6006 | fcport->d_id.b.al_pa = pd->port_id[2]; | ||
| 6007 | fcport->d_id.b.rsvd_1 = 0; | ||
| 6008 | |||
| 6009 | /* If not target must be initiator or unknown type. */ | ||
| 6010 | if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) | ||
| 6011 | fcport->port_type = FCT_INITIATOR; | ||
| 6012 | else | ||
| 6013 | fcport->port_type = FCT_TARGET; | ||
| 6014 | |||
| 6015 | /* Passback COS information. */ | ||
| 6016 | fcport->supported_classes = (pd->flags & PDF_CLASS_2) ? | ||
| 6017 | FC_COS_CLASS2 : FC_COS_CLASS3; | ||
| 6018 | |||
| 6019 | if (pd->prli_svc_param_word_3[0] & BIT_7) { | ||
| 6020 | fcport->flags |= FCF_CONF_COMP_SUPPORTED; | ||
| 6021 | fcport->conf_compl_supported = 1; | ||
| 6022 | } | ||
| 6023 | |||
| 6024 | gpd_error_out: | ||
| 6025 | return rval; | ||
| 6026 | } | ||
| 6027 | |||
| 6028 | /* | ||
| 6029 | * qla24xx_gidlist__wait | ||
| 6030 | * NOTE: don't call this routine from DPC thread. | ||
| 6031 | */ | ||
| 6032 | int qla24xx_gidlist_wait(struct scsi_qla_host *vha, | ||
| 6033 | void *id_list, dma_addr_t id_list_dma, uint16_t *entries) | ||
| 6034 | { | ||
| 6035 | int rval = QLA_FUNCTION_FAILED; | ||
| 6036 | mbx_cmd_t mc; | ||
| 6037 | |||
| 6038 | if (!vha->hw->flags.fw_started) | ||
| 6039 | goto done; | ||
| 6040 | |||
| 6041 | memset(&mc, 0, sizeof(mc)); | ||
| 6042 | mc.mb[0] = MBC_GET_ID_LIST; | ||
| 6043 | mc.mb[2] = MSW(id_list_dma); | ||
| 6044 | mc.mb[3] = LSW(id_list_dma); | ||
| 6045 | mc.mb[6] = MSW(MSD(id_list_dma)); | ||
| 6046 | mc.mb[7] = LSW(MSD(id_list_dma)); | ||
| 6047 | mc.mb[8] = 0; | ||
| 6048 | mc.mb[9] = cpu_to_le16(vha->vp_idx); | ||
| 6049 | |||
| 6050 | rval = qla24xx_send_mb_cmd(vha, &mc); | ||
| 6051 | if (rval != QLA_SUCCESS) { | ||
| 6052 | ql_dbg(ql_dbg_mbx, vha, 0xffff, | ||
| 6053 | "%s: fail\n", __func__); | ||
| 6054 | } else { | ||
| 6055 | *entries = mc.mb[1]; | ||
| 6056 | ql_dbg(ql_dbg_mbx, vha, 0xffff, | ||
| 6057 | "%s: done\n", __func__); | ||
| 6058 | } | ||
| 6059 | done: | ||
| 6060 | return rval; | ||
| 6061 | } | ||
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c6d6f0d912ff..09a490c98763 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c | |||
| @@ -74,13 +74,14 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) | |||
| 74 | * ensures no active vp_list traversal while the vport is removed | 74 | * ensures no active vp_list traversal while the vport is removed |
| 75 | * from the queue) | 75 | * from the queue) |
| 76 | */ | 76 | */ |
| 77 | spin_lock_irqsave(&ha->vport_slock, flags); | 77 | wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count), |
| 78 | while (atomic_read(&vha->vref_count)) { | 78 | 10*HZ); |
| 79 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 80 | |||
| 81 | msleep(500); | ||
| 82 | 79 | ||
| 83 | spin_lock_irqsave(&ha->vport_slock, flags); | 80 | spin_lock_irqsave(&ha->vport_slock, flags); |
| 81 | if (atomic_read(&vha->vref_count)) { | ||
| 82 | ql_dbg(ql_dbg_vport, vha, 0xfffa, | ||
| 83 | "vha->vref_count=%u timeout\n", vha->vref_count.counter); | ||
| 84 | vha->vref_count = (atomic_t)ATOMIC_INIT(0); | ||
| 84 | } | 85 | } |
| 85 | list_del(&vha->list); | 86 | list_del(&vha->list); |
| 86 | qlt_update_vp_map(vha, RESET_VP_IDX); | 87 | qlt_update_vp_map(vha, RESET_VP_IDX); |
| @@ -269,6 +270,7 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) | |||
| 269 | 270 | ||
| 270 | spin_lock_irqsave(&ha->vport_slock, flags); | 271 | spin_lock_irqsave(&ha->vport_slock, flags); |
| 271 | atomic_dec(&vha->vref_count); | 272 | atomic_dec(&vha->vref_count); |
| 273 | wake_up(&vha->vref_waitq); | ||
| 272 | } | 274 | } |
| 273 | i++; | 275 | i++; |
| 274 | } | 276 | } |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1fed235a1b4a..41d5b09f7326 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -2560,6 +2560,20 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time) | |||
| 2560 | return atomic_read(&vha->loop_state) == LOOP_READY; | 2560 | return atomic_read(&vha->loop_state) == LOOP_READY; |
| 2561 | } | 2561 | } |
| 2562 | 2562 | ||
| 2563 | static void qla2x00_iocb_work_fn(struct work_struct *work) | ||
| 2564 | { | ||
| 2565 | struct scsi_qla_host *vha = container_of(work, | ||
| 2566 | struct scsi_qla_host, iocb_work); | ||
| 2567 | int cnt = 0; | ||
| 2568 | |||
| 2569 | while (!list_empty(&vha->work_list)) { | ||
| 2570 | qla2x00_do_work(vha); | ||
| 2571 | cnt++; | ||
| 2572 | if (cnt > 10) | ||
| 2573 | break; | ||
| 2574 | } | ||
| 2575 | } | ||
| 2576 | |||
| 2563 | /* | 2577 | /* |
| 2564 | * PCI driver interface | 2578 | * PCI driver interface |
| 2565 | */ | 2579 | */ |
| @@ -3078,6 +3092,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 3078 | */ | 3092 | */ |
| 3079 | qla2xxx_wake_dpc(base_vha); | 3093 | qla2xxx_wake_dpc(base_vha); |
| 3080 | 3094 | ||
| 3095 | INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); | ||
| 3081 | INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); | 3096 | INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); |
| 3082 | 3097 | ||
| 3083 | if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { | 3098 | if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { |
| @@ -3469,6 +3484,7 @@ qla2x00_remove_one(struct pci_dev *pdev) | |||
| 3469 | qla2x00_free_sysfs_attr(base_vha, true); | 3484 | qla2x00_free_sysfs_attr(base_vha, true); |
| 3470 | 3485 | ||
| 3471 | fc_remove_host(base_vha->host); | 3486 | fc_remove_host(base_vha->host); |
| 3487 | qlt_remove_target_resources(ha); | ||
| 3472 | 3488 | ||
| 3473 | scsi_remove_host(base_vha->host); | 3489 | scsi_remove_host(base_vha->host); |
| 3474 | 3490 | ||
| @@ -4268,6 +4284,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, | |||
| 4268 | spin_lock_init(&vha->work_lock); | 4284 | spin_lock_init(&vha->work_lock); |
| 4269 | spin_lock_init(&vha->cmd_list_lock); | 4285 | spin_lock_init(&vha->cmd_list_lock); |
| 4270 | init_waitqueue_head(&vha->fcport_waitQ); | 4286 | init_waitqueue_head(&vha->fcport_waitQ); |
| 4287 | init_waitqueue_head(&vha->vref_waitq); | ||
| 4271 | 4288 | ||
| 4272 | vha->gnl.size = sizeof(struct get_name_list_extended) * | 4289 | vha->gnl.size = sizeof(struct get_name_list_extended) * |
| 4273 | (ha->max_loop_id + 1); | 4290 | (ha->max_loop_id + 1); |
| @@ -4319,7 +4336,11 @@ qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e) | |||
| 4319 | spin_lock_irqsave(&vha->work_lock, flags); | 4336 | spin_lock_irqsave(&vha->work_lock, flags); |
| 4320 | list_add_tail(&e->list, &vha->work_list); | 4337 | list_add_tail(&e->list, &vha->work_list); |
| 4321 | spin_unlock_irqrestore(&vha->work_lock, flags); | 4338 | spin_unlock_irqrestore(&vha->work_lock, flags); |
| 4322 | qla2xxx_wake_dpc(vha); | 4339 | |
| 4340 | if (QLA_EARLY_LINKUP(vha->hw)) | ||
| 4341 | schedule_work(&vha->iocb_work); | ||
| 4342 | else | ||
| 4343 | qla2xxx_wake_dpc(vha); | ||
| 4323 | 4344 | ||
| 4324 | return QLA_SUCCESS; | 4345 | return QLA_SUCCESS; |
| 4325 | } | 4346 | } |
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 45f5077684f0..0e03ca2ab3e5 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c | |||
| @@ -130,6 +130,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha, | |||
| 130 | static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, | 130 | static struct fc_port *qlt_create_sess(struct scsi_qla_host *vha, |
| 131 | fc_port_t *fcport, bool local); | 131 | fc_port_t *fcport, bool local); |
| 132 | void qlt_unreg_sess(struct fc_port *sess); | 132 | void qlt_unreg_sess(struct fc_port *sess); |
| 133 | static void qlt_24xx_handle_abts(struct scsi_qla_host *, | ||
| 134 | struct abts_recv_from_24xx *); | ||
| 135 | |||
| 133 | /* | 136 | /* |
| 134 | * Global Variables | 137 | * Global Variables |
| 135 | */ | 138 | */ |
| @@ -140,6 +143,20 @@ static struct workqueue_struct *qla_tgt_wq; | |||
| 140 | static DEFINE_MUTEX(qla_tgt_mutex); | 143 | static DEFINE_MUTEX(qla_tgt_mutex); |
| 141 | static LIST_HEAD(qla_tgt_glist); | 144 | static LIST_HEAD(qla_tgt_glist); |
| 142 | 145 | ||
| 146 | static const char *prot_op_str(u32 prot_op) | ||
| 147 | { | ||
| 148 | switch (prot_op) { | ||
| 149 | case TARGET_PROT_NORMAL: return "NORMAL"; | ||
| 150 | case TARGET_PROT_DIN_INSERT: return "DIN_INSERT"; | ||
| 151 | case TARGET_PROT_DOUT_INSERT: return "DOUT_INSERT"; | ||
| 152 | case TARGET_PROT_DIN_STRIP: return "DIN_STRIP"; | ||
| 153 | case TARGET_PROT_DOUT_STRIP: return "DOUT_STRIP"; | ||
| 154 | case TARGET_PROT_DIN_PASS: return "DIN_PASS"; | ||
| 155 | case TARGET_PROT_DOUT_PASS: return "DOUT_PASS"; | ||
| 156 | default: return "UNKNOWN"; | ||
| 157 | } | ||
| 158 | } | ||
| 159 | |||
| 143 | /* This API intentionally takes dest as a parameter, rather than returning | 160 | /* This API intentionally takes dest as a parameter, rather than returning |
| 144 | * int value to avoid caller forgetting to issue wmb() after the store */ | 161 | * int value to avoid caller forgetting to issue wmb() after the store */ |
| 145 | void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) | 162 | void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest) |
| @@ -170,21 +187,23 @@ static inline | |||
| 170 | struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, | 187 | struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, |
| 171 | uint8_t *d_id) | 188 | uint8_t *d_id) |
| 172 | { | 189 | { |
| 173 | struct qla_hw_data *ha = vha->hw; | 190 | struct scsi_qla_host *host; |
| 174 | uint8_t vp_idx; | 191 | uint32_t key = 0; |
| 175 | |||
| 176 | if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0])) | ||
| 177 | return NULL; | ||
| 178 | 192 | ||
| 179 | if (vha->d_id.b.al_pa == d_id[2]) | 193 | if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && |
| 194 | (vha->d_id.b.al_pa == d_id[2])) | ||
| 180 | return vha; | 195 | return vha; |
| 181 | 196 | ||
| 182 | BUG_ON(ha->tgt.tgt_vp_map == NULL); | 197 | key = (uint32_t)d_id[0] << 16; |
| 183 | vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx; | 198 | key |= (uint32_t)d_id[1] << 8; |
| 184 | if (likely(test_bit(vp_idx, ha->vp_idx_map))) | 199 | key |= (uint32_t)d_id[2]; |
| 185 | return ha->tgt.tgt_vp_map[vp_idx].vha; | ||
| 186 | 200 | ||
| 187 | return NULL; | 201 | host = btree_lookup32(&vha->hw->tgt.host_map, key); |
| 202 | if (!host) | ||
| 203 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, | ||
| 204 | "Unable to find host %06x\n", key); | ||
| 205 | |||
| 206 | return host; | ||
| 188 | } | 207 | } |
| 189 | 208 | ||
| 190 | static inline | 209 | static inline |
| @@ -389,6 +408,8 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, | |||
| 389 | (struct abts_recv_from_24xx *)atio; | 408 | (struct abts_recv_from_24xx *)atio; |
| 390 | struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, | 409 | struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha, |
| 391 | entry->vp_index); | 410 | entry->vp_index); |
| 411 | unsigned long flags; | ||
| 412 | |||
| 392 | if (unlikely(!host)) { | 413 | if (unlikely(!host)) { |
| 393 | ql_dbg(ql_dbg_tgt, vha, 0xffff, | 414 | ql_dbg(ql_dbg_tgt, vha, 0xffff, |
| 394 | "qla_target(%d): Response pkt (ABTS_RECV_24XX) " | 415 | "qla_target(%d): Response pkt (ABTS_RECV_24XX) " |
| @@ -396,9 +417,12 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, | |||
| 396 | vha->vp_idx, entry->vp_index); | 417 | vha->vp_idx, entry->vp_index); |
| 397 | break; | 418 | break; |
| 398 | } | 419 | } |
| 399 | qlt_response_pkt(host, (response_t *)atio); | 420 | if (!ha_locked) |
| 421 | spin_lock_irqsave(&host->hw->hardware_lock, flags); | ||
| 422 | qlt_24xx_handle_abts(host, (struct abts_recv_from_24xx *)atio); | ||
| 423 | if (!ha_locked) | ||
| 424 | spin_unlock_irqrestore(&host->hw->hardware_lock, flags); | ||
| 400 | break; | 425 | break; |
| 401 | |||
| 402 | } | 426 | } |
| 403 | 427 | ||
| 404 | /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ | 428 | /* case PUREX_IOCB_TYPE: ql2xmvasynctoatio */ |
| @@ -554,6 +578,7 @@ void qla2x00_async_nack_sp_done(void *s, int res) | |||
| 554 | sp->fcport->login_gen++; | 578 | sp->fcport->login_gen++; |
| 555 | sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; | 579 | sp->fcport->fw_login_state = DSC_LS_PLOGI_COMP; |
| 556 | sp->fcport->logout_on_delete = 1; | 580 | sp->fcport->logout_on_delete = 1; |
| 581 | sp->fcport->plogi_nack_done_deadline = jiffies + HZ; | ||
| 557 | break; | 582 | break; |
| 558 | 583 | ||
| 559 | case SRB_NACK_PRLI: | 584 | case SRB_NACK_PRLI: |
| @@ -613,6 +638,7 @@ int qla24xx_async_notify_ack(scsi_qla_host_t *vha, fc_port_t *fcport, | |||
| 613 | break; | 638 | break; |
| 614 | case SRB_NACK_PRLI: | 639 | case SRB_NACK_PRLI: |
| 615 | fcport->fw_login_state = DSC_LS_PRLI_PEND; | 640 | fcport->fw_login_state = DSC_LS_PRLI_PEND; |
| 641 | fcport->deleted = 0; | ||
| 616 | c = "PRLI"; | 642 | c = "PRLI"; |
| 617 | break; | 643 | break; |
| 618 | case SRB_NACK_LOGO: | 644 | case SRB_NACK_LOGO: |
| @@ -1215,7 +1241,7 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, | |||
| 1215 | } | 1241 | } |
| 1216 | 1242 | ||
| 1217 | /* Get list of logged in devices */ | 1243 | /* Get list of logged in devices */ |
| 1218 | rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries); | 1244 | rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma, &entries); |
| 1219 | if (rc != QLA_SUCCESS) { | 1245 | if (rc != QLA_SUCCESS) { |
| 1220 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, | 1246 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045, |
| 1221 | "qla_target(%d): get_id_list() failed: %x\n", | 1247 | "qla_target(%d): get_id_list() failed: %x\n", |
| @@ -1551,6 +1577,9 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha, | |||
| 1551 | request_t *pkt; | 1577 | request_t *pkt; |
| 1552 | struct nack_to_isp *nack; | 1578 | struct nack_to_isp *nack; |
| 1553 | 1579 | ||
| 1580 | if (!ha->flags.fw_started) | ||
| 1581 | return; | ||
| 1582 | |||
| 1554 | ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); | 1583 | ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha); |
| 1555 | 1584 | ||
| 1556 | /* Send marker if required */ | 1585 | /* Send marker if required */ |
| @@ -2013,6 +2042,70 @@ void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) | |||
| 2013 | } | 2042 | } |
| 2014 | EXPORT_SYMBOL(qlt_free_mcmd); | 2043 | EXPORT_SYMBOL(qlt_free_mcmd); |
| 2015 | 2044 | ||
| 2045 | /* | ||
| 2046 | * ha->hardware_lock supposed to be held on entry. Might drop it, then | ||
| 2047 | * reacquire | ||
| 2048 | */ | ||
| 2049 | void qlt_send_resp_ctio(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, | ||
| 2050 | uint8_t scsi_status, uint8_t sense_key, uint8_t asc, uint8_t ascq) | ||
| 2051 | { | ||
| 2052 | struct atio_from_isp *atio = &cmd->atio; | ||
| 2053 | struct ctio7_to_24xx *ctio; | ||
| 2054 | uint16_t temp; | ||
| 2055 | |||
| 2056 | ql_dbg(ql_dbg_tgt_dif, vha, 0x3066, | ||
| 2057 | "Sending response CTIO7 (vha=%p, atio=%p, scsi_status=%02x, " | ||
| 2058 | "sense_key=%02x, asc=%02x, ascq=%02x", | ||
| 2059 | vha, atio, scsi_status, sense_key, asc, ascq); | ||
| 2060 | |||
| 2061 | ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL); | ||
| 2062 | if (!ctio) { | ||
| 2063 | ql_dbg(ql_dbg_async, vha, 0x3067, | ||
| 2064 | "qla2x00t(%ld): %s failed: unable to allocate request packet", | ||
| 2065 | vha->host_no, __func__); | ||
| 2066 | goto out; | ||
| 2067 | } | ||
| 2068 | |||
| 2069 | ctio->entry_type = CTIO_TYPE7; | ||
| 2070 | ctio->entry_count = 1; | ||
| 2071 | ctio->handle = QLA_TGT_SKIP_HANDLE; | ||
| 2072 | ctio->nport_handle = cmd->sess->loop_id; | ||
| 2073 | ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); | ||
| 2074 | ctio->vp_index = vha->vp_idx; | ||
| 2075 | ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; | ||
| 2076 | ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; | ||
| 2077 | ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; | ||
| 2078 | ctio->exchange_addr = atio->u.isp24.exchange_addr; | ||
| 2079 | ctio->u.status1.flags = (atio->u.isp24.attr << 9) | | ||
| 2080 | cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS); | ||
| 2081 | temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id); | ||
| 2082 | ctio->u.status1.ox_id = cpu_to_le16(temp); | ||
| 2083 | ctio->u.status1.scsi_status = | ||
| 2084 | cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID | scsi_status); | ||
| 2085 | ctio->u.status1.response_len = cpu_to_le16(18); | ||
| 2086 | ctio->u.status1.residual = cpu_to_le32(get_datalen_for_atio(atio)); | ||
| 2087 | |||
| 2088 | if (ctio->u.status1.residual != 0) | ||
| 2089 | ctio->u.status1.scsi_status |= | ||
| 2090 | cpu_to_le16(SS_RESIDUAL_UNDER); | ||
| 2091 | |||
| 2092 | /* Response code and sense key */ | ||
| 2093 | put_unaligned_le32(((0x70 << 24) | (sense_key << 8)), | ||
| 2094 | (&ctio->u.status1.sense_data)[0]); | ||
| 2095 | /* Additional sense length */ | ||
| 2096 | put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]); | ||
| 2097 | /* ASC and ASCQ */ | ||
| 2098 | put_unaligned_le32(((asc << 24) | (ascq << 16)), | ||
| 2099 | (&ctio->u.status1.sense_data)[3]); | ||
| 2100 | |||
| 2101 | /* Memory Barrier */ | ||
| 2102 | wmb(); | ||
| 2103 | |||
| 2104 | qla2x00_start_iocbs(vha, vha->req); | ||
| 2105 | out: | ||
| 2106 | return; | ||
| 2107 | } | ||
| 2108 | |||
| 2016 | /* callback from target fabric module code */ | 2109 | /* callback from target fabric module code */ |
| 2017 | void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) | 2110 | void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) |
| 2018 | { | 2111 | { |
| @@ -2261,7 +2354,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm, | |||
| 2261 | */ | 2354 | */ |
| 2262 | return -EAGAIN; | 2355 | return -EAGAIN; |
| 2263 | } else | 2356 | } else |
| 2264 | ha->tgt.cmds[h-1] = prm->cmd; | 2357 | ha->tgt.cmds[h - 1] = prm->cmd; |
| 2265 | 2358 | ||
| 2266 | pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; | 2359 | pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; |
| 2267 | pkt->nport_handle = prm->cmd->loop_id; | 2360 | pkt->nport_handle = prm->cmd->loop_id; |
| @@ -2391,6 +2484,50 @@ static inline int qlt_has_data(struct qla_tgt_cmd *cmd) | |||
| 2391 | return cmd->bufflen > 0; | 2484 | return cmd->bufflen > 0; |
| 2392 | } | 2485 | } |
| 2393 | 2486 | ||
| 2487 | static void qlt_print_dif_err(struct qla_tgt_prm *prm) | ||
| 2488 | { | ||
| 2489 | struct qla_tgt_cmd *cmd; | ||
| 2490 | struct scsi_qla_host *vha; | ||
| 2491 | |||
| 2492 | /* asc 0x10=dif error */ | ||
| 2493 | if (prm->sense_buffer && (prm->sense_buffer[12] == 0x10)) { | ||
| 2494 | cmd = prm->cmd; | ||
| 2495 | vha = cmd->vha; | ||
| 2496 | /* ASCQ */ | ||
| 2497 | switch (prm->sense_buffer[13]) { | ||
| 2498 | case 1: | ||
| 2499 | ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, | ||
| 2500 | "BE detected Guard TAG ERR: lba[0x%llx|%lld] len[0x%x] " | ||
| 2501 | "se_cmd=%p tag[%x]", | ||
| 2502 | cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, | ||
| 2503 | cmd->atio.u.isp24.exchange_addr); | ||
| 2504 | break; | ||
| 2505 | case 2: | ||
| 2506 | ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, | ||
| 2507 | "BE detected APP TAG ERR: lba[0x%llx|%lld] len[0x%x] " | ||
| 2508 | "se_cmd=%p tag[%x]", | ||
| 2509 | cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, | ||
| 2510 | cmd->atio.u.isp24.exchange_addr); | ||
| 2511 | break; | ||
| 2512 | case 3: | ||
| 2513 | ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, | ||
| 2514 | "BE detected REF TAG ERR: lba[0x%llx|%lld] len[0x%x] " | ||
| 2515 | "se_cmd=%p tag[%x]", | ||
| 2516 | cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, | ||
| 2517 | cmd->atio.u.isp24.exchange_addr); | ||
| 2518 | break; | ||
| 2519 | default: | ||
| 2520 | ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, | ||
| 2521 | "BE detected Dif ERR: lba[%llx|%lld] len[%x] " | ||
| 2522 | "se_cmd=%p tag[%x]", | ||
| 2523 | cmd->lba, cmd->lba, cmd->num_blks, &cmd->se_cmd, | ||
| 2524 | cmd->atio.u.isp24.exchange_addr); | ||
| 2525 | break; | ||
| 2526 | } | ||
| 2527 | ql_dump_buffer(ql_dbg_tgt_dif, vha, 0xffff, cmd->cdb, 16); | ||
| 2528 | } | ||
| 2529 | } | ||
| 2530 | |||
| 2394 | /* | 2531 | /* |
| 2395 | * Called without ha->hardware_lock held | 2532 | * Called without ha->hardware_lock held |
| 2396 | */ | 2533 | */ |
| @@ -2512,18 +2649,9 @@ skip_explict_conf: | |||
| 2512 | for (i = 0; i < prm->sense_buffer_len/4; i++) | 2649 | for (i = 0; i < prm->sense_buffer_len/4; i++) |
| 2513 | ((uint32_t *)ctio->u.status1.sense_data)[i] = | 2650 | ((uint32_t *)ctio->u.status1.sense_data)[i] = |
| 2514 | cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); | 2651 | cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]); |
| 2515 | #if 0 | 2652 | |
| 2516 | if (unlikely((prm->sense_buffer_len % 4) != 0)) { | 2653 | qlt_print_dif_err(prm); |
| 2517 | static int q; | 2654 | |
| 2518 | if (q < 10) { | ||
| 2519 | ql_dbg(ql_dbg_tgt, vha, 0xe04f, | ||
| 2520 | "qla_target(%d): %d bytes of sense " | ||
| 2521 | "lost", prm->tgt->ha->vp_idx, | ||
| 2522 | prm->sense_buffer_len % 4); | ||
| 2523 | q++; | ||
| 2524 | } | ||
| 2525 | } | ||
| 2526 | #endif | ||
| 2527 | } else { | 2655 | } else { |
| 2528 | ctio->u.status1.flags &= | 2656 | ctio->u.status1.flags &= |
| 2529 | ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); | 2657 | ~cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0); |
| @@ -2537,19 +2665,9 @@ skip_explict_conf: | |||
| 2537 | /* Sense with len > 24, is it possible ??? */ | 2665 | /* Sense with len > 24, is it possible ??? */ |
| 2538 | } | 2666 | } |
| 2539 | 2667 | ||
| 2540 | |||
| 2541 | |||
| 2542 | /* diff */ | ||
| 2543 | static inline int | 2668 | static inline int |
| 2544 | qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) | 2669 | qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) |
| 2545 | { | 2670 | { |
| 2546 | /* | ||
| 2547 | * Uncomment when corresponding SCSI changes are done. | ||
| 2548 | * | ||
| 2549 | if (!sp->cmd->prot_chk) | ||
| 2550 | return 0; | ||
| 2551 | * | ||
| 2552 | */ | ||
| 2553 | switch (se_cmd->prot_op) { | 2671 | switch (se_cmd->prot_op) { |
| 2554 | case TARGET_PROT_DOUT_INSERT: | 2672 | case TARGET_PROT_DOUT_INSERT: |
| 2555 | case TARGET_PROT_DIN_STRIP: | 2673 | case TARGET_PROT_DIN_STRIP: |
| @@ -2570,16 +2688,38 @@ qlt_hba_err_chk_enabled(struct se_cmd *se_cmd) | |||
| 2570 | return 0; | 2688 | return 0; |
| 2571 | } | 2689 | } |
| 2572 | 2690 | ||
| 2691 | static inline int | ||
| 2692 | qla_tgt_ref_mask_check(struct se_cmd *se_cmd) | ||
| 2693 | { | ||
| 2694 | switch (se_cmd->prot_op) { | ||
| 2695 | case TARGET_PROT_DIN_INSERT: | ||
| 2696 | case TARGET_PROT_DOUT_INSERT: | ||
| 2697 | case TARGET_PROT_DIN_STRIP: | ||
| 2698 | case TARGET_PROT_DOUT_STRIP: | ||
| 2699 | case TARGET_PROT_DIN_PASS: | ||
| 2700 | case TARGET_PROT_DOUT_PASS: | ||
| 2701 | return 1; | ||
| 2702 | default: | ||
| 2703 | return 0; | ||
| 2704 | } | ||
| 2705 | return 0; | ||
| 2706 | } | ||
| 2707 | |||
| 2573 | /* | 2708 | /* |
| 2574 | * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command | 2709 | * qla_tgt_set_dif_tags - Extract Ref and App tags from SCSI command |
| 2575 | * | ||
| 2576 | */ | 2710 | */ |
| 2577 | static inline void | 2711 | static void |
| 2578 | qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) | 2712 | qla_tgt_set_dif_tags(struct qla_tgt_cmd *cmd, struct crc_context *ctx, |
| 2713 | uint16_t *pfw_prot_opts) | ||
| 2579 | { | 2714 | { |
| 2715 | struct se_cmd *se_cmd = &cmd->se_cmd; | ||
| 2580 | uint32_t lba = 0xffffffff & se_cmd->t_task_lba; | 2716 | uint32_t lba = 0xffffffff & se_cmd->t_task_lba; |
| 2717 | scsi_qla_host_t *vha = cmd->tgt->vha; | ||
| 2718 | struct qla_hw_data *ha = vha->hw; | ||
| 2719 | uint32_t t32 = 0; | ||
| 2581 | 2720 | ||
| 2582 | /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2 | 2721 | /* |
| 2722 | * wait till Mode Sense/Select cmd, modepage Ah, subpage 2 | ||
| 2583 | * have been immplemented by TCM, before AppTag is avail. | 2723 | * have been immplemented by TCM, before AppTag is avail. |
| 2584 | * Look for modesense_handlers[] | 2724 | * Look for modesense_handlers[] |
| 2585 | */ | 2725 | */ |
| @@ -2587,65 +2727,73 @@ qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx) | |||
| 2587 | ctx->app_tag_mask[0] = 0x0; | 2727 | ctx->app_tag_mask[0] = 0x0; |
| 2588 | ctx->app_tag_mask[1] = 0x0; | 2728 | ctx->app_tag_mask[1] = 0x0; |
| 2589 | 2729 | ||
| 2730 | if (IS_PI_UNINIT_CAPABLE(ha)) { | ||
| 2731 | if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) || | ||
| 2732 | (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)) | ||
| 2733 | *pfw_prot_opts |= PO_DIS_VALD_APP_ESC; | ||
| 2734 | else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT) | ||
| 2735 | *pfw_prot_opts |= PO_DIS_VALD_APP_REF_ESC; | ||
| 2736 | } | ||
| 2737 | |||
| 2738 | t32 = ha->tgt.tgt_ops->get_dif_tags(cmd, pfw_prot_opts); | ||
| 2739 | |||
| 2590 | switch (se_cmd->prot_type) { | 2740 | switch (se_cmd->prot_type) { |
| 2591 | case TARGET_DIF_TYPE0_PROT: | 2741 | case TARGET_DIF_TYPE0_PROT: |
| 2592 | /* | 2742 | /* |
| 2593 | * No check for ql2xenablehba_err_chk, as it would be an | 2743 | * No check for ql2xenablehba_err_chk, as it |
| 2594 | * I/O error if hba tag generation is not done. | 2744 | * would be an I/O error if hba tag generation |
| 2745 | * is not done. | ||
| 2595 | */ | 2746 | */ |
| 2596 | ctx->ref_tag = cpu_to_le32(lba); | 2747 | ctx->ref_tag = cpu_to_le32(lba); |
| 2597 | |||
| 2598 | if (!qlt_hba_err_chk_enabled(se_cmd)) | ||
| 2599 | break; | ||
| 2600 | |||
| 2601 | /* enable ALL bytes of the ref tag */ | 2748 | /* enable ALL bytes of the ref tag */ |
| 2602 | ctx->ref_tag_mask[0] = 0xff; | 2749 | ctx->ref_tag_mask[0] = 0xff; |
| 2603 | ctx->ref_tag_mask[1] = 0xff; | 2750 | ctx->ref_tag_mask[1] = 0xff; |
| 2604 | ctx->ref_tag_mask[2] = 0xff; | 2751 | ctx->ref_tag_mask[2] = 0xff; |
| 2605 | ctx->ref_tag_mask[3] = 0xff; | 2752 | ctx->ref_tag_mask[3] = 0xff; |
| 2606 | break; | 2753 | break; |
| 2607 | /* | ||
| 2608 | * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and | ||
| 2609 | * 16 bit app tag. | ||
| 2610 | */ | ||
| 2611 | case TARGET_DIF_TYPE1_PROT: | 2754 | case TARGET_DIF_TYPE1_PROT: |
| 2612 | ctx->ref_tag = cpu_to_le32(lba); | 2755 | /* |
| 2613 | 2756 | * For TYPE 1 protection: 16 bit GUARD tag, 32 bit | |
| 2614 | if (!qlt_hba_err_chk_enabled(se_cmd)) | 2757 | * REF tag, and 16 bit app tag. |
| 2615 | break; | 2758 | */ |
| 2616 | 2759 | ctx->ref_tag = cpu_to_le32(lba); | |
| 2617 | /* enable ALL bytes of the ref tag */ | 2760 | if (!qla_tgt_ref_mask_check(se_cmd) || |
| 2618 | ctx->ref_tag_mask[0] = 0xff; | 2761 | !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { |
| 2619 | ctx->ref_tag_mask[1] = 0xff; | 2762 | *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; |
| 2620 | ctx->ref_tag_mask[2] = 0xff; | 2763 | break; |
| 2621 | ctx->ref_tag_mask[3] = 0xff; | 2764 | } |
| 2622 | break; | 2765 | /* enable ALL bytes of the ref tag */ |
| 2623 | /* | 2766 | ctx->ref_tag_mask[0] = 0xff; |
| 2624 | * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to | 2767 | ctx->ref_tag_mask[1] = 0xff; |
| 2625 | * match LBA in CDB + N | 2768 | ctx->ref_tag_mask[2] = 0xff; |
| 2626 | */ | 2769 | ctx->ref_tag_mask[3] = 0xff; |
| 2770 | break; | ||
| 2627 | case TARGET_DIF_TYPE2_PROT: | 2771 | case TARGET_DIF_TYPE2_PROT: |
| 2628 | ctx->ref_tag = cpu_to_le32(lba); | 2772 | /* |
| 2629 | 2773 | * For TYPE 2 protection: 16 bit GUARD + 32 bit REF | |
| 2630 | if (!qlt_hba_err_chk_enabled(se_cmd)) | 2774 | * tag has to match LBA in CDB + N |
| 2631 | break; | 2775 | */ |
| 2632 | 2776 | ctx->ref_tag = cpu_to_le32(lba); | |
| 2633 | /* enable ALL bytes of the ref tag */ | 2777 | if (!qla_tgt_ref_mask_check(se_cmd) || |
| 2634 | ctx->ref_tag_mask[0] = 0xff; | 2778 | !(ha->tgt.tgt_ops->chk_dif_tags(t32))) { |
| 2635 | ctx->ref_tag_mask[1] = 0xff; | 2779 | *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; |
| 2636 | ctx->ref_tag_mask[2] = 0xff; | 2780 | break; |
| 2637 | ctx->ref_tag_mask[3] = 0xff; | 2781 | } |
| 2638 | break; | 2782 | /* enable ALL bytes of the ref tag */ |
| 2639 | 2783 | ctx->ref_tag_mask[0] = 0xff; | |
| 2640 | /* For Type 3 protection: 16 bit GUARD only */ | 2784 | ctx->ref_tag_mask[1] = 0xff; |
| 2785 | ctx->ref_tag_mask[2] = 0xff; | ||
| 2786 | ctx->ref_tag_mask[3] = 0xff; | ||
| 2787 | break; | ||
| 2641 | case TARGET_DIF_TYPE3_PROT: | 2788 | case TARGET_DIF_TYPE3_PROT: |
| 2642 | ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = | 2789 | /* For TYPE 3 protection: 16 bit GUARD only */ |
| 2643 | ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; | 2790 | *pfw_prot_opts |= PO_DIS_REF_TAG_VALD; |
| 2644 | break; | 2791 | ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] = |
| 2792 | ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00; | ||
| 2793 | break; | ||
| 2645 | } | 2794 | } |
| 2646 | } | 2795 | } |
| 2647 | 2796 | ||
| 2648 | |||
| 2649 | static inline int | 2797 | static inline int |
| 2650 | qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | 2798 | qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) |
| 2651 | { | 2799 | { |
| @@ -2664,6 +2812,7 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | |||
| 2664 | struct se_cmd *se_cmd = &cmd->se_cmd; | 2812 | struct se_cmd *se_cmd = &cmd->se_cmd; |
| 2665 | uint32_t h; | 2813 | uint32_t h; |
| 2666 | struct atio_from_isp *atio = &prm->cmd->atio; | 2814 | struct atio_from_isp *atio = &prm->cmd->atio; |
| 2815 | struct qla_tc_param tc; | ||
| 2667 | uint16_t t16; | 2816 | uint16_t t16; |
| 2668 | 2817 | ||
| 2669 | ha = vha->hw; | 2818 | ha = vha->hw; |
| @@ -2689,16 +2838,15 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | |||
| 2689 | case TARGET_PROT_DIN_INSERT: | 2838 | case TARGET_PROT_DIN_INSERT: |
| 2690 | case TARGET_PROT_DOUT_STRIP: | 2839 | case TARGET_PROT_DOUT_STRIP: |
| 2691 | transfer_length = data_bytes; | 2840 | transfer_length = data_bytes; |
| 2692 | data_bytes += dif_bytes; | 2841 | if (cmd->prot_sg_cnt) |
| 2842 | data_bytes += dif_bytes; | ||
| 2693 | break; | 2843 | break; |
| 2694 | |||
| 2695 | case TARGET_PROT_DIN_STRIP: | 2844 | case TARGET_PROT_DIN_STRIP: |
| 2696 | case TARGET_PROT_DOUT_INSERT: | 2845 | case TARGET_PROT_DOUT_INSERT: |
| 2697 | case TARGET_PROT_DIN_PASS: | 2846 | case TARGET_PROT_DIN_PASS: |
| 2698 | case TARGET_PROT_DOUT_PASS: | 2847 | case TARGET_PROT_DOUT_PASS: |
| 2699 | transfer_length = data_bytes + dif_bytes; | 2848 | transfer_length = data_bytes + dif_bytes; |
| 2700 | break; | 2849 | break; |
| 2701 | |||
| 2702 | default: | 2850 | default: |
| 2703 | BUG(); | 2851 | BUG(); |
| 2704 | break; | 2852 | break; |
| @@ -2734,7 +2882,6 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | |||
| 2734 | break; | 2882 | break; |
| 2735 | } | 2883 | } |
| 2736 | 2884 | ||
| 2737 | |||
| 2738 | /* ---- PKT ---- */ | 2885 | /* ---- PKT ---- */ |
| 2739 | /* Update entry type to indicate Command Type CRC_2 IOCB */ | 2886 | /* Update entry type to indicate Command Type CRC_2 IOCB */ |
| 2740 | pkt->entry_type = CTIO_CRC2; | 2887 | pkt->entry_type = CTIO_CRC2; |
| @@ -2752,9 +2899,8 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | |||
| 2752 | } else | 2899 | } else |
| 2753 | ha->tgt.cmds[h-1] = prm->cmd; | 2900 | ha->tgt.cmds[h-1] = prm->cmd; |
| 2754 | 2901 | ||
| 2755 | |||
| 2756 | pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; | 2902 | pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK; |
| 2757 | pkt->nport_handle = prm->cmd->loop_id; | 2903 | pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); |
| 2758 | pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); | 2904 | pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); |
| 2759 | pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; | 2905 | pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; |
| 2760 | pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; | 2906 | pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; |
| @@ -2775,12 +2921,10 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | |||
| 2775 | else if (cmd->dma_data_direction == DMA_FROM_DEVICE) | 2921 | else if (cmd->dma_data_direction == DMA_FROM_DEVICE) |
| 2776 | pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); | 2922 | pkt->flags = cpu_to_le16(CTIO7_FLAGS_DATA_OUT); |
| 2777 | 2923 | ||
| 2778 | |||
| 2779 | pkt->dseg_count = prm->tot_dsds; | 2924 | pkt->dseg_count = prm->tot_dsds; |
| 2780 | /* Fibre channel byte count */ | 2925 | /* Fibre channel byte count */ |
| 2781 | pkt->transfer_length = cpu_to_le32(transfer_length); | 2926 | pkt->transfer_length = cpu_to_le32(transfer_length); |
| 2782 | 2927 | ||
| 2783 | |||
| 2784 | /* ----- CRC context -------- */ | 2928 | /* ----- CRC context -------- */ |
| 2785 | 2929 | ||
| 2786 | /* Allocate CRC context from global pool */ | 2930 | /* Allocate CRC context from global pool */ |
| @@ -2800,13 +2944,12 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | |||
| 2800 | /* Set handle */ | 2944 | /* Set handle */ |
| 2801 | crc_ctx_pkt->handle = pkt->handle; | 2945 | crc_ctx_pkt->handle = pkt->handle; |
| 2802 | 2946 | ||
| 2803 | qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt); | 2947 | qla_tgt_set_dif_tags(cmd, crc_ctx_pkt, &fw_prot_opts); |
| 2804 | 2948 | ||
| 2805 | pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); | 2949 | pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma)); |
| 2806 | pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); | 2950 | pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma)); |
| 2807 | pkt->crc_context_len = CRC_CONTEXT_LEN_FW; | 2951 | pkt->crc_context_len = CRC_CONTEXT_LEN_FW; |
| 2808 | 2952 | ||
| 2809 | |||
| 2810 | if (!bundling) { | 2953 | if (!bundling) { |
| 2811 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; | 2954 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address; |
| 2812 | } else { | 2955 | } else { |
| @@ -2827,16 +2970,24 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | |||
| 2827 | crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); | 2970 | crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes); |
| 2828 | crc_ctx_pkt->guard_seed = cpu_to_le16(0); | 2971 | crc_ctx_pkt->guard_seed = cpu_to_le16(0); |
| 2829 | 2972 | ||
| 2973 | memset((uint8_t *)&tc, 0 , sizeof(tc)); | ||
| 2974 | tc.vha = vha; | ||
| 2975 | tc.blk_sz = cmd->blk_sz; | ||
| 2976 | tc.bufflen = cmd->bufflen; | ||
| 2977 | tc.sg = cmd->sg; | ||
| 2978 | tc.prot_sg = cmd->prot_sg; | ||
| 2979 | tc.ctx = crc_ctx_pkt; | ||
| 2980 | tc.ctx_dsd_alloced = &cmd->ctx_dsd_alloced; | ||
| 2830 | 2981 | ||
| 2831 | /* Walks data segments */ | 2982 | /* Walks data segments */ |
| 2832 | pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); | 2983 | pkt->flags |= cpu_to_le16(CTIO7_FLAGS_DSD_PTR); |
| 2833 | 2984 | ||
| 2834 | if (!bundling && prm->prot_seg_cnt) { | 2985 | if (!bundling && prm->prot_seg_cnt) { |
| 2835 | if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, | 2986 | if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd, |
| 2836 | prm->tot_dsds, cmd)) | 2987 | prm->tot_dsds, &tc)) |
| 2837 | goto crc_queuing_error; | 2988 | goto crc_queuing_error; |
| 2838 | } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, | 2989 | } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd, |
| 2839 | (prm->tot_dsds - prm->prot_seg_cnt), cmd)) | 2990 | (prm->tot_dsds - prm->prot_seg_cnt), &tc)) |
| 2840 | goto crc_queuing_error; | 2991 | goto crc_queuing_error; |
| 2841 | 2992 | ||
| 2842 | if (bundling && prm->prot_seg_cnt) { | 2993 | if (bundling && prm->prot_seg_cnt) { |
| @@ -2845,18 +2996,18 @@ qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha) | |||
| 2845 | 2996 | ||
| 2846 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; | 2997 | cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address; |
| 2847 | if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, | 2998 | if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd, |
| 2848 | prm->prot_seg_cnt, cmd)) | 2999 | prm->prot_seg_cnt, &tc)) |
| 2849 | goto crc_queuing_error; | 3000 | goto crc_queuing_error; |
| 2850 | } | 3001 | } |
| 2851 | return QLA_SUCCESS; | 3002 | return QLA_SUCCESS; |
| 2852 | 3003 | ||
| 2853 | crc_queuing_error: | 3004 | crc_queuing_error: |
| 2854 | /* Cleanup will be performed by the caller */ | 3005 | /* Cleanup will be performed by the caller */ |
| 3006 | vha->hw->tgt.cmds[h - 1] = NULL; | ||
| 2855 | 3007 | ||
| 2856 | return QLA_FUNCTION_FAILED; | 3008 | return QLA_FUNCTION_FAILED; |
| 2857 | } | 3009 | } |
| 2858 | 3010 | ||
| 2859 | |||
| 2860 | /* | 3011 | /* |
| 2861 | * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * | 3012 | * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and * |
| 2862 | * QLA_TGT_XMIT_STATUS for >= 24xx silicon | 3013 | * QLA_TGT_XMIT_STATUS for >= 24xx silicon |
| @@ -2906,7 +3057,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, | |||
| 2906 | else | 3057 | else |
| 2907 | vha->tgt_counters.core_qla_que_buf++; | 3058 | vha->tgt_counters.core_qla_que_buf++; |
| 2908 | 3059 | ||
| 2909 | if (!vha->flags.online || cmd->reset_count != ha->chip_reset) { | 3060 | if (!ha->flags.fw_started || cmd->reset_count != ha->chip_reset) { |
| 2910 | /* | 3061 | /* |
| 2911 | * Either the port is not online or this request was from | 3062 | * Either the port is not online or this request was from |
| 2912 | * previous life, just abort the processing. | 3063 | * previous life, just abort the processing. |
| @@ -3047,7 +3198,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd) | |||
| 3047 | 3198 | ||
| 3048 | spin_lock_irqsave(&ha->hardware_lock, flags); | 3199 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 3049 | 3200 | ||
| 3050 | if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) || | 3201 | if (!ha->flags.fw_started || (cmd->reset_count != ha->chip_reset) || |
| 3051 | (cmd->sess && cmd->sess->deleted)) { | 3202 | (cmd->sess && cmd->sess->deleted)) { |
| 3052 | /* | 3203 | /* |
| 3053 | * Either the port is not online or this request was from | 3204 | * Either the port is not online or this request was from |
| @@ -3104,139 +3255,113 @@ EXPORT_SYMBOL(qlt_rdy_to_xfer); | |||
| 3104 | 3255 | ||
| 3105 | 3256 | ||
| 3106 | /* | 3257 | /* |
| 3107 | * Checks the guard or meta-data for the type of error | 3258 | * it is assumed either hardware_lock or qpair lock is held. |
| 3108 | * detected by the HBA. | ||
| 3109 | */ | 3259 | */ |
| 3110 | static inline int | 3260 | static void |
| 3111 | qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, | 3261 | qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd, |
| 3112 | struct ctio_crc_from_fw *sts) | 3262 | struct ctio_crc_from_fw *sts) |
| 3113 | { | 3263 | { |
| 3114 | uint8_t *ap = &sts->actual_dif[0]; | 3264 | uint8_t *ap = &sts->actual_dif[0]; |
| 3115 | uint8_t *ep = &sts->expected_dif[0]; | 3265 | uint8_t *ep = &sts->expected_dif[0]; |
| 3116 | uint32_t e_ref_tag, a_ref_tag; | ||
| 3117 | uint16_t e_app_tag, a_app_tag; | ||
| 3118 | uint16_t e_guard, a_guard; | ||
| 3119 | uint64_t lba = cmd->se_cmd.t_task_lba; | 3266 | uint64_t lba = cmd->se_cmd.t_task_lba; |
| 3267 | uint8_t scsi_status, sense_key, asc, ascq; | ||
| 3268 | unsigned long flags; | ||
| 3120 | 3269 | ||
| 3121 | a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); | 3270 | cmd->trc_flags |= TRC_DIF_ERR; |
| 3122 | a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); | ||
| 3123 | a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); | ||
| 3124 | |||
| 3125 | e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); | ||
| 3126 | e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); | ||
| 3127 | e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); | ||
| 3128 | |||
| 3129 | ql_dbg(ql_dbg_tgt, vha, 0xe075, | ||
| 3130 | "iocb(s) %p Returned STATUS.\n", sts); | ||
| 3131 | |||
| 3132 | ql_dbg(ql_dbg_tgt, vha, 0xf075, | ||
| 3133 | "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n", | ||
| 3134 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | ||
| 3135 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard); | ||
| 3136 | |||
| 3137 | /* | ||
| 3138 | * Ignore sector if: | ||
| 3139 | * For type 3: ref & app tag is all 'f's | ||
| 3140 | * For type 0,1,2: app tag is all 'f's | ||
| 3141 | */ | ||
| 3142 | if ((a_app_tag == 0xffff) && | ||
| 3143 | ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) || | ||
| 3144 | (a_ref_tag == 0xffffffff))) { | ||
| 3145 | uint32_t blocks_done; | ||
| 3146 | |||
| 3147 | /* 2TB boundary case covered automatically with this */ | ||
| 3148 | blocks_done = e_ref_tag - (uint32_t)lba + 1; | ||
| 3149 | cmd->se_cmd.bad_sector = e_ref_tag; | ||
| 3150 | cmd->se_cmd.pi_err = 0; | ||
| 3151 | ql_dbg(ql_dbg_tgt, vha, 0xf074, | ||
| 3152 | "need to return scsi good\n"); | ||
| 3153 | |||
| 3154 | /* Update protection tag */ | ||
| 3155 | if (cmd->prot_sg_cnt) { | ||
| 3156 | uint32_t i, k = 0, num_ent; | ||
| 3157 | struct scatterlist *sg, *sgl; | ||
| 3158 | |||
| 3159 | |||
| 3160 | sgl = cmd->prot_sg; | ||
| 3161 | |||
| 3162 | /* Patch the corresponding protection tags */ | ||
| 3163 | for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) { | ||
| 3164 | num_ent = sg_dma_len(sg) / 8; | ||
| 3165 | if (k + num_ent < blocks_done) { | ||
| 3166 | k += num_ent; | ||
| 3167 | continue; | ||
| 3168 | } | ||
| 3169 | k = blocks_done; | ||
| 3170 | break; | ||
| 3171 | } | ||
| 3172 | 3271 | ||
| 3173 | if (k != blocks_done) { | 3272 | cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); |
| 3174 | ql_log(ql_log_warn, vha, 0xf076, | 3273 | cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); |
| 3175 | "unexpected tag values tag:lba=%u:%llu)\n", | 3274 | cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); |
| 3176 | e_ref_tag, (unsigned long long)lba); | ||
| 3177 | goto out; | ||
| 3178 | } | ||
| 3179 | 3275 | ||
| 3180 | #if 0 | 3276 | cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); |
| 3181 | struct sd_dif_tuple *spt; | 3277 | cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); |
| 3182 | /* TODO: | 3278 | cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); |
| 3183 | * This section came from initiator. Is it valid here? | ||
| 3184 | * should ulp be override with actual val??? | ||
| 3185 | */ | ||
| 3186 | spt = page_address(sg_page(sg)) + sg->offset; | ||
| 3187 | spt += j; | ||
| 3188 | 3279 | ||
| 3189 | spt->app_tag = 0xffff; | 3280 | ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, |
| 3190 | if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3) | 3281 | "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); |
| 3191 | spt->ref_tag = 0xffffffff; | ||
| 3192 | #endif | ||
| 3193 | } | ||
| 3194 | 3282 | ||
| 3195 | return 0; | 3283 | scsi_status = sense_key = asc = ascq = 0; |
| 3196 | } | ||
| 3197 | 3284 | ||
| 3198 | /* check guard */ | 3285 | /* check appl tag */ |
| 3199 | if (e_guard != a_guard) { | 3286 | if (cmd->e_app_tag != cmd->a_app_tag) { |
| 3200 | cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; | 3287 | ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, |
| 3201 | cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; | 3288 | "App Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " |
| 3202 | 3289 | "Ref[%x|%x], App[%x|%x], " | |
| 3203 | ql_log(ql_log_warn, vha, 0xe076, | 3290 | "Guard [%x|%x] cmd=%p ox_id[%04x]", |
| 3204 | "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", | 3291 | cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, |
| 3205 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | 3292 | cmd->a_ref_tag, cmd->e_ref_tag, |
| 3206 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, | 3293 | cmd->a_app_tag, cmd->e_app_tag, |
| 3207 | a_guard, e_guard, cmd); | 3294 | cmd->a_guard, cmd->e_guard, |
| 3208 | goto out; | 3295 | cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); |
| 3296 | |||
| 3297 | cmd->dif_err_code = DIF_ERR_APP; | ||
| 3298 | scsi_status = SAM_STAT_CHECK_CONDITION; | ||
| 3299 | sense_key = ABORTED_COMMAND; | ||
| 3300 | asc = 0x10; | ||
| 3301 | ascq = 0x2; | ||
| 3209 | } | 3302 | } |
| 3210 | 3303 | ||
| 3211 | /* check ref tag */ | 3304 | /* check ref tag */ |
| 3212 | if (e_ref_tag != a_ref_tag) { | 3305 | if (cmd->e_ref_tag != cmd->a_ref_tag) { |
| 3213 | cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; | 3306 | ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, |
| 3214 | cmd->se_cmd.bad_sector = e_ref_tag; | 3307 | "Ref Tag ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " |
| 3215 | 3308 | "Ref[%x|%x], App[%x|%x], " | |
| 3216 | ql_log(ql_log_warn, vha, 0xe077, | 3309 | "Guard[%x|%x] cmd=%p ox_id[%04x] ", |
| 3217 | "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", | 3310 | cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, |
| 3218 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | 3311 | cmd->a_ref_tag, cmd->e_ref_tag, |
| 3219 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, | 3312 | cmd->a_app_tag, cmd->e_app_tag, |
| 3220 | a_guard, e_guard, cmd); | 3313 | cmd->a_guard, cmd->e_guard, |
| 3314 | cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); | ||
| 3315 | |||
| 3316 | cmd->dif_err_code = DIF_ERR_REF; | ||
| 3317 | scsi_status = SAM_STAT_CHECK_CONDITION; | ||
| 3318 | sense_key = ABORTED_COMMAND; | ||
| 3319 | asc = 0x10; | ||
| 3320 | ascq = 0x3; | ||
| 3221 | goto out; | 3321 | goto out; |
| 3222 | } | 3322 | } |
| 3223 | 3323 | ||
| 3224 | /* check appl tag */ | 3324 | /* check guard */ |
| 3225 | if (e_app_tag != a_app_tag) { | 3325 | if (cmd->e_guard != cmd->a_guard) { |
| 3226 | cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; | 3326 | ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, |
| 3227 | cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba; | 3327 | "Guard ERR: cdb[%x] lba[%llx %llx] blks[%x] [Actual|Expected] " |
| 3228 | 3328 | "Ref[%x|%x], App[%x|%x], " | |
| 3229 | ql_log(ql_log_warn, vha, 0xe078, | 3329 | "Guard [%x|%x] cmd=%p ox_id[%04x]", |
| 3230 | "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n", | 3330 | cmd->cdb[0], lba, (lba+cmd->num_blks), cmd->num_blks, |
| 3231 | cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba, | 3331 | cmd->a_ref_tag, cmd->e_ref_tag, |
| 3232 | a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, | 3332 | cmd->a_app_tag, cmd->e_app_tag, |
| 3233 | a_guard, e_guard, cmd); | 3333 | cmd->a_guard, cmd->e_guard, |
| 3234 | goto out; | 3334 | cmd, cmd->atio.u.isp24.fcp_hdr.ox_id); |
| 3335 | cmd->dif_err_code = DIF_ERR_GRD; | ||
| 3336 | scsi_status = SAM_STAT_CHECK_CONDITION; | ||
| 3337 | sense_key = ABORTED_COMMAND; | ||
| 3338 | asc = 0x10; | ||
| 3339 | ascq = 0x1; | ||
| 3235 | } | 3340 | } |
| 3236 | out: | 3341 | out: |
| 3237 | return 1; | 3342 | switch (cmd->state) { |
| 3238 | } | 3343 | case QLA_TGT_STATE_NEED_DATA: |
| 3344 | /* handle_data will load DIF error code */ | ||
| 3345 | cmd->state = QLA_TGT_STATE_DATA_IN; | ||
| 3346 | vha->hw->tgt.tgt_ops->handle_data(cmd); | ||
| 3347 | break; | ||
| 3348 | default: | ||
| 3349 | spin_lock_irqsave(&cmd->cmd_lock, flags); | ||
| 3350 | if (cmd->aborted) { | ||
| 3351 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 3352 | vha->hw->tgt.tgt_ops->free_cmd(cmd); | ||
| 3353 | break; | ||
| 3354 | } | ||
| 3355 | spin_unlock_irqrestore(&cmd->cmd_lock, flags); | ||
| 3239 | 3356 | ||
| 3357 | qlt_send_resp_ctio(vha, cmd, scsi_status, sense_key, asc, ascq); | ||
| 3358 | /* assume scsi status gets out on the wire. | ||
| 3359 | * Will not wait for completion. | ||
| 3360 | */ | ||
| 3361 | vha->hw->tgt.tgt_ops->free_cmd(cmd); | ||
| 3362 | break; | ||
| 3363 | } | ||
| 3364 | } | ||
| 3240 | 3365 | ||
| 3241 | /* If hardware_lock held on entry, might drop it, then reaquire */ | 3366 | /* If hardware_lock held on entry, might drop it, then reaquire */ |
| 3242 | /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ | 3367 | /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ |
| @@ -3251,7 +3376,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha, | |||
| 3251 | ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, | 3376 | ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c, |
| 3252 | "Sending TERM ELS CTIO (ha=%p)\n", ha); | 3377 | "Sending TERM ELS CTIO (ha=%p)\n", ha); |
| 3253 | 3378 | ||
| 3254 | pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL); | 3379 | pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL); |
| 3255 | if (pkt == NULL) { | 3380 | if (pkt == NULL) { |
| 3256 | ql_dbg(ql_dbg_tgt, vha, 0xe080, | 3381 | ql_dbg(ql_dbg_tgt, vha, 0xe080, |
| 3257 | "qla_target(%d): %s failed: unable to allocate " | 3382 | "qla_target(%d): %s failed: unable to allocate " |
| @@ -3543,6 +3668,16 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, | |||
| 3543 | { | 3668 | { |
| 3544 | int term = 0; | 3669 | int term = 0; |
| 3545 | 3670 | ||
| 3671 | if (cmd->se_cmd.prot_op) | ||
| 3672 | ql_dbg(ql_dbg_tgt_dif, vha, 0xffff, | ||
| 3673 | "Term DIF cmd: lba[0x%llx|%lld] len[0x%x] " | ||
| 3674 | "se_cmd=%p tag[%x] op %#x/%s", | ||
| 3675 | cmd->lba, cmd->lba, | ||
| 3676 | cmd->num_blks, &cmd->se_cmd, | ||
| 3677 | cmd->atio.u.isp24.exchange_addr, | ||
| 3678 | cmd->se_cmd.prot_op, | ||
| 3679 | prot_op_str(cmd->se_cmd.prot_op)); | ||
| 3680 | |||
| 3546 | if (ctio != NULL) { | 3681 | if (ctio != NULL) { |
| 3547 | struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; | 3682 | struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio; |
| 3548 | term = !(c->flags & | 3683 | term = !(c->flags & |
| @@ -3760,32 +3895,15 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
| 3760 | struct ctio_crc_from_fw *crc = | 3895 | struct ctio_crc_from_fw *crc = |
| 3761 | (struct ctio_crc_from_fw *)ctio; | 3896 | (struct ctio_crc_from_fw *)ctio; |
| 3762 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, | 3897 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073, |
| 3763 | "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n", | 3898 | "qla_target(%d): CTIO with DIF_ERROR status %x " |
| 3899 | "received (state %x, ulp_cmd %p) actual_dif[0x%llx] " | ||
| 3900 | "expect_dif[0x%llx]\n", | ||
| 3764 | vha->vp_idx, status, cmd->state, se_cmd, | 3901 | vha->vp_idx, status, cmd->state, se_cmd, |
| 3765 | *((u64 *)&crc->actual_dif[0]), | 3902 | *((u64 *)&crc->actual_dif[0]), |
| 3766 | *((u64 *)&crc->expected_dif[0])); | 3903 | *((u64 *)&crc->expected_dif[0])); |
| 3767 | 3904 | ||
| 3768 | if (qlt_handle_dif_error(vha, cmd, ctio)) { | 3905 | qlt_handle_dif_error(vha, cmd, ctio); |
| 3769 | if (cmd->state == QLA_TGT_STATE_NEED_DATA) { | 3906 | return; |
| 3770 | /* scsi Write/xfer rdy complete */ | ||
| 3771 | goto skip_term; | ||
| 3772 | } else { | ||
| 3773 | /* scsi read/xmit respond complete | ||
| 3774 | * call handle dif to send scsi status | ||
| 3775 | * rather than terminate exchange. | ||
| 3776 | */ | ||
| 3777 | cmd->state = QLA_TGT_STATE_PROCESSED; | ||
| 3778 | ha->tgt.tgt_ops->handle_dif_err(cmd); | ||
| 3779 | return; | ||
| 3780 | } | ||
| 3781 | } else { | ||
| 3782 | /* Need to generate a SCSI good completion. | ||
| 3783 | * because FW did not send scsi status. | ||
| 3784 | */ | ||
| 3785 | status = 0; | ||
| 3786 | goto skip_term; | ||
| 3787 | } | ||
| 3788 | break; | ||
| 3789 | } | 3907 | } |
| 3790 | default: | 3908 | default: |
| 3791 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, | 3909 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b, |
| @@ -3808,7 +3926,6 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, | |||
| 3808 | return; | 3926 | return; |
| 3809 | } | 3927 | } |
| 3810 | } | 3928 | } |
| 3811 | skip_term: | ||
| 3812 | 3929 | ||
| 3813 | if (cmd->state == QLA_TGT_STATE_PROCESSED) { | 3930 | if (cmd->state == QLA_TGT_STATE_PROCESSED) { |
| 3814 | cmd->trc_flags |= TRC_CTIO_DONE; | 3931 | cmd->trc_flags |= TRC_CTIO_DONE; |
| @@ -4584,7 +4701,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, | |||
| 4584 | } | 4701 | } |
| 4585 | 4702 | ||
| 4586 | if (sess != NULL) { | 4703 | if (sess != NULL) { |
| 4587 | if (sess->fw_login_state == DSC_LS_PLOGI_PEND) { | 4704 | if (sess->fw_login_state != DSC_LS_PLOGI_PEND && |
| 4705 | sess->fw_login_state != DSC_LS_PLOGI_COMP) { | ||
| 4588 | /* | 4706 | /* |
| 4589 | * Impatient initiator sent PRLI before last | 4707 | * Impatient initiator sent PRLI before last |
| 4590 | * PLOGI could finish. Will force him to re-try, | 4708 | * PLOGI could finish. Will force him to re-try, |
| @@ -4623,15 +4741,23 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, | |||
| 4623 | 4741 | ||
| 4624 | /* Make session global (not used in fabric mode) */ | 4742 | /* Make session global (not used in fabric mode) */ |
| 4625 | if (ha->current_topology != ISP_CFG_F) { | 4743 | if (ha->current_topology != ISP_CFG_F) { |
| 4626 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); | 4744 | if (sess) { |
| 4627 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); | 4745 | ql_dbg(ql_dbg_disc, vha, 0xffff, |
| 4628 | qla2xxx_wake_dpc(vha); | 4746 | "%s %d %8phC post nack\n", |
| 4747 | __func__, __LINE__, sess->port_name); | ||
| 4748 | qla24xx_post_nack_work(vha, sess, iocb, | ||
| 4749 | SRB_NACK_PRLI); | ||
| 4750 | res = 0; | ||
| 4751 | } else { | ||
| 4752 | set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); | ||
| 4753 | set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); | ||
| 4754 | qla2xxx_wake_dpc(vha); | ||
| 4755 | } | ||
| 4629 | } else { | 4756 | } else { |
| 4630 | if (sess) { | 4757 | if (sess) { |
| 4631 | ql_dbg(ql_dbg_disc, vha, 0xffff, | 4758 | ql_dbg(ql_dbg_disc, vha, 0xffff, |
| 4632 | "%s %d %8phC post nack\n", | 4759 | "%s %d %8phC post nack\n", |
| 4633 | __func__, __LINE__, sess->port_name); | 4760 | __func__, __LINE__, sess->port_name); |
| 4634 | |||
| 4635 | qla24xx_post_nack_work(vha, sess, iocb, | 4761 | qla24xx_post_nack_work(vha, sess, iocb, |
| 4636 | SRB_NACK_PRLI); | 4762 | SRB_NACK_PRLI); |
| 4637 | res = 0; | 4763 | res = 0; |
| @@ -4639,7 +4765,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, | |||
| 4639 | } | 4765 | } |
| 4640 | break; | 4766 | break; |
| 4641 | 4767 | ||
| 4642 | |||
| 4643 | case ELS_TPRLO: | 4768 | case ELS_TPRLO: |
| 4644 | if (le16_to_cpu(iocb->u.isp24.flags) & | 4769 | if (le16_to_cpu(iocb->u.isp24.flags) & |
| 4645 | NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { | 4770 | NOTIFY24XX_FLAGS_GLOBAL_TPRLO) { |
| @@ -5079,16 +5204,22 @@ qlt_send_busy(struct scsi_qla_host *vha, | |||
| 5079 | 5204 | ||
| 5080 | static int | 5205 | static int |
| 5081 | qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, | 5206 | qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha, |
| 5082 | struct atio_from_isp *atio) | 5207 | struct atio_from_isp *atio, bool ha_locked) |
| 5083 | { | 5208 | { |
| 5084 | struct qla_hw_data *ha = vha->hw; | 5209 | struct qla_hw_data *ha = vha->hw; |
| 5085 | uint16_t status; | 5210 | uint16_t status; |
| 5211 | unsigned long flags; | ||
| 5086 | 5212 | ||
| 5087 | if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) | 5213 | if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha)) |
| 5088 | return 0; | 5214 | return 0; |
| 5089 | 5215 | ||
| 5216 | if (!ha_locked) | ||
| 5217 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
| 5090 | status = temp_sam_status; | 5218 | status = temp_sam_status; |
| 5091 | qlt_send_busy(vha, atio, status); | 5219 | qlt_send_busy(vha, atio, status); |
| 5220 | if (!ha_locked) | ||
| 5221 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
| 5222 | |||
| 5092 | return 1; | 5223 | return 1; |
| 5093 | } | 5224 | } |
| 5094 | 5225 | ||
| @@ -5103,7 +5234,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, | |||
| 5103 | unsigned long flags; | 5234 | unsigned long flags; |
| 5104 | 5235 | ||
| 5105 | if (unlikely(tgt == NULL)) { | 5236 | if (unlikely(tgt == NULL)) { |
| 5106 | ql_dbg(ql_dbg_io, vha, 0x3064, | 5237 | ql_dbg(ql_dbg_tgt, vha, 0x3064, |
| 5107 | "ATIO pkt, but no tgt (ha %p)", ha); | 5238 | "ATIO pkt, but no tgt (ha %p)", ha); |
| 5108 | return; | 5239 | return; |
| 5109 | } | 5240 | } |
| @@ -5133,7 +5264,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, | |||
| 5133 | 5264 | ||
| 5134 | 5265 | ||
| 5135 | if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { | 5266 | if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) { |
| 5136 | rc = qlt_chk_qfull_thresh_hold(vha, atio); | 5267 | rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked); |
| 5137 | if (rc != 0) { | 5268 | if (rc != 0) { |
| 5138 | tgt->atio_irq_cmd_count--; | 5269 | tgt->atio_irq_cmd_count--; |
| 5139 | return; | 5270 | return; |
| @@ -5256,7 +5387,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) | |||
| 5256 | break; | 5387 | break; |
| 5257 | } | 5388 | } |
| 5258 | 5389 | ||
| 5259 | rc = qlt_chk_qfull_thresh_hold(vha, atio); | 5390 | rc = qlt_chk_qfull_thresh_hold(vha, atio, true); |
| 5260 | if (rc != 0) { | 5391 | if (rc != 0) { |
| 5261 | tgt->irq_cmd_count--; | 5392 | tgt->irq_cmd_count--; |
| 5262 | return; | 5393 | return; |
| @@ -5531,7 +5662,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, | |||
| 5531 | 5662 | ||
| 5532 | fcport->loop_id = loop_id; | 5663 | fcport->loop_id = loop_id; |
| 5533 | 5664 | ||
| 5534 | rc = qla2x00_get_port_database(vha, fcport, 0); | 5665 | rc = qla24xx_gpdb_wait(vha, fcport, 0); |
| 5535 | if (rc != QLA_SUCCESS) { | 5666 | if (rc != QLA_SUCCESS) { |
| 5536 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, | 5667 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070, |
| 5537 | "qla_target(%d): Failed to retrieve fcport " | 5668 | "qla_target(%d): Failed to retrieve fcport " |
| @@ -5713,30 +5844,23 @@ static void qlt_abort_work(struct qla_tgt *tgt, | |||
| 5713 | } | 5844 | } |
| 5714 | } | 5845 | } |
| 5715 | 5846 | ||
| 5716 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
| 5717 | |||
| 5718 | if (tgt->tgt_stop) | ||
| 5719 | goto out_term; | ||
| 5720 | |||
| 5721 | rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); | 5847 | rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess); |
| 5848 | ha->tgt.tgt_ops->put_sess(sess); | ||
| 5849 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); | ||
| 5850 | |||
| 5722 | if (rc != 0) | 5851 | if (rc != 0) |
| 5723 | goto out_term; | 5852 | goto out_term; |
| 5724 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
| 5725 | if (sess) | ||
| 5726 | ha->tgt.tgt_ops->put_sess(sess); | ||
| 5727 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); | ||
| 5728 | return; | 5853 | return; |
| 5729 | 5854 | ||
| 5730 | out_term2: | 5855 | out_term2: |
| 5731 | spin_lock_irqsave(&ha->hardware_lock, flags); | 5856 | if (sess) |
| 5857 | ha->tgt.tgt_ops->put_sess(sess); | ||
| 5858 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); | ||
| 5732 | 5859 | ||
| 5733 | out_term: | 5860 | out_term: |
| 5861 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
| 5734 | qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); | 5862 | qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false); |
| 5735 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 5863 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 5736 | |||
| 5737 | if (sess) | ||
| 5738 | ha->tgt.tgt_ops->put_sess(sess); | ||
| 5739 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); | ||
| 5740 | } | 5864 | } |
| 5741 | 5865 | ||
| 5742 | static void qlt_tmr_work(struct qla_tgt *tgt, | 5866 | static void qlt_tmr_work(struct qla_tgt *tgt, |
| @@ -5756,7 +5880,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, | |||
| 5756 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); | 5880 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); |
| 5757 | 5881 | ||
| 5758 | if (tgt->tgt_stop) | 5882 | if (tgt->tgt_stop) |
| 5759 | goto out_term; | 5883 | goto out_term2; |
| 5760 | 5884 | ||
| 5761 | s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; | 5885 | s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id; |
| 5762 | sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); | 5886 | sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); |
| @@ -5768,11 +5892,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt, | |||
| 5768 | 5892 | ||
| 5769 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); | 5893 | spin_lock_irqsave(&ha->tgt.sess_lock, flags); |
| 5770 | if (!sess) | 5894 | if (!sess) |
| 5771 | goto out_term; | 5895 | goto out_term2; |
| 5772 | } else { | 5896 | } else { |
| 5773 | if (sess->deleted) { | 5897 | if (sess->deleted) { |
| 5774 | sess = NULL; | 5898 | sess = NULL; |
| 5775 | goto out_term; | 5899 | goto out_term2; |
| 5776 | } | 5900 | } |
| 5777 | 5901 | ||
| 5778 | if (!kref_get_unless_zero(&sess->sess_kref)) { | 5902 | if (!kref_get_unless_zero(&sess->sess_kref)) { |
| @@ -5780,7 +5904,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, | |||
| 5780 | "%s: kref_get fail %8phC\n", | 5904 | "%s: kref_get fail %8phC\n", |
| 5781 | __func__, sess->port_name); | 5905 | __func__, sess->port_name); |
| 5782 | sess = NULL; | 5906 | sess = NULL; |
| 5783 | goto out_term; | 5907 | goto out_term2; |
| 5784 | } | 5908 | } |
| 5785 | } | 5909 | } |
| 5786 | 5910 | ||
| @@ -5790,17 +5914,19 @@ static void qlt_tmr_work(struct qla_tgt *tgt, | |||
| 5790 | unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); | 5914 | unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun); |
| 5791 | 5915 | ||
| 5792 | rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); | 5916 | rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); |
| 5793 | if (rc != 0) | ||
| 5794 | goto out_term; | ||
| 5795 | |||
| 5796 | ha->tgt.tgt_ops->put_sess(sess); | 5917 | ha->tgt.tgt_ops->put_sess(sess); |
| 5797 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | 5918 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); |
| 5919 | |||
| 5920 | if (rc != 0) | ||
| 5921 | goto out_term; | ||
| 5798 | return; | 5922 | return; |
| 5799 | 5923 | ||
| 5924 | out_term2: | ||
| 5925 | if (sess) | ||
| 5926 | ha->tgt.tgt_ops->put_sess(sess); | ||
| 5927 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | ||
| 5800 | out_term: | 5928 | out_term: |
| 5801 | qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); | 5929 | qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0); |
| 5802 | ha->tgt.tgt_ops->put_sess(sess); | ||
| 5803 | spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); | ||
| 5804 | } | 5930 | } |
| 5805 | 5931 | ||
| 5806 | static void qlt_sess_work_fn(struct work_struct *work) | 5932 | static void qlt_sess_work_fn(struct work_struct *work) |
| @@ -5893,13 +6019,13 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha) | |||
| 5893 | tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; | 6019 | tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX; |
| 5894 | tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; | 6020 | tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX; |
| 5895 | 6021 | ||
| 5896 | if (base_vha->fc_vport) | ||
| 5897 | return 0; | ||
| 5898 | |||
| 5899 | mutex_lock(&qla_tgt_mutex); | 6022 | mutex_lock(&qla_tgt_mutex); |
| 5900 | list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); | 6023 | list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist); |
| 5901 | mutex_unlock(&qla_tgt_mutex); | 6024 | mutex_unlock(&qla_tgt_mutex); |
| 5902 | 6025 | ||
| 6026 | if (ha->tgt.tgt_ops && ha->tgt.tgt_ops->add_target) | ||
| 6027 | ha->tgt.tgt_ops->add_target(base_vha); | ||
| 6028 | |||
| 5903 | return 0; | 6029 | return 0; |
| 5904 | } | 6030 | } |
| 5905 | 6031 | ||
| @@ -5928,6 +6054,17 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha) | |||
| 5928 | return 0; | 6054 | return 0; |
| 5929 | } | 6055 | } |
| 5930 | 6056 | ||
| 6057 | void qlt_remove_target_resources(struct qla_hw_data *ha) | ||
| 6058 | { | ||
| 6059 | struct scsi_qla_host *node; | ||
| 6060 | u32 key = 0; | ||
| 6061 | |||
| 6062 | btree_for_each_safe32(&ha->tgt.host_map, key, node) | ||
| 6063 | btree_remove32(&ha->tgt.host_map, key); | ||
| 6064 | |||
| 6065 | btree_destroy32(&ha->tgt.host_map); | ||
| 6066 | } | ||
| 6067 | |||
| 5931 | static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, | 6068 | static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, |
| 5932 | unsigned char *b) | 6069 | unsigned char *b) |
| 5933 | { | 6070 | { |
| @@ -6234,7 +6371,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) | |||
| 6234 | struct atio_from_isp *pkt; | 6371 | struct atio_from_isp *pkt; |
| 6235 | int cnt, i; | 6372 | int cnt, i; |
| 6236 | 6373 | ||
| 6237 | if (!vha->flags.online) | 6374 | if (!ha->flags.fw_started) |
| 6238 | return; | 6375 | return; |
| 6239 | 6376 | ||
| 6240 | while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || | 6377 | while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) || |
| @@ -6581,6 +6718,8 @@ qlt_modify_vp_config(struct scsi_qla_host *vha, | |||
| 6581 | void | 6718 | void |
| 6582 | qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) | 6719 | qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) |
| 6583 | { | 6720 | { |
| 6721 | int rc; | ||
| 6722 | |||
| 6584 | if (!QLA_TGT_MODE_ENABLED()) | 6723 | if (!QLA_TGT_MODE_ENABLED()) |
| 6585 | return; | 6724 | return; |
| 6586 | 6725 | ||
| @@ -6600,6 +6739,13 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha) | |||
| 6600 | qlt_unknown_atio_work_fn); | 6739 | qlt_unknown_atio_work_fn); |
| 6601 | 6740 | ||
| 6602 | qlt_clear_mode(base_vha); | 6741 | qlt_clear_mode(base_vha); |
| 6742 | |||
| 6743 | rc = btree_init32(&ha->tgt.host_map); | ||
| 6744 | if (rc) | ||
| 6745 | ql_log(ql_log_info, base_vha, 0xffff, | ||
| 6746 | "Unable to initialize ha->host_map btree\n"); | ||
| 6747 | |||
| 6748 | qlt_update_vp_map(base_vha, SET_VP_IDX); | ||
| 6603 | } | 6749 | } |
| 6604 | 6750 | ||
| 6605 | irqreturn_t | 6751 | irqreturn_t |
| @@ -6642,6 +6788,8 @@ qlt_handle_abts_recv_work(struct work_struct *work) | |||
| 6642 | spin_lock_irqsave(&ha->hardware_lock, flags); | 6788 | spin_lock_irqsave(&ha->hardware_lock, flags); |
| 6643 | qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); | 6789 | qlt_response_pkt_all_vps(vha, (response_t *)&op->atio); |
| 6644 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 6790 | spin_unlock_irqrestore(&ha->hardware_lock, flags); |
| 6791 | |||
| 6792 | kfree(op); | ||
| 6645 | } | 6793 | } |
| 6646 | 6794 | ||
| 6647 | void | 6795 | void |
| @@ -6706,25 +6854,69 @@ qlt_mem_free(struct qla_hw_data *ha) | |||
| 6706 | void | 6854 | void |
| 6707 | qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) | 6855 | qlt_update_vp_map(struct scsi_qla_host *vha, int cmd) |
| 6708 | { | 6856 | { |
| 6857 | void *slot; | ||
| 6858 | u32 key; | ||
| 6859 | int rc; | ||
| 6860 | |||
| 6709 | if (!QLA_TGT_MODE_ENABLED()) | 6861 | if (!QLA_TGT_MODE_ENABLED()) |
| 6710 | return; | 6862 | return; |
| 6711 | 6863 | ||
| 6864 | key = vha->d_id.b24; | ||
| 6865 | |||
| 6712 | switch (cmd) { | 6866 | switch (cmd) { |
| 6713 | case SET_VP_IDX: | 6867 | case SET_VP_IDX: |
| 6714 | vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; | 6868 | vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha; |
| 6715 | break; | 6869 | break; |
| 6716 | case SET_AL_PA: | 6870 | case SET_AL_PA: |
| 6717 | vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx; | 6871 | slot = btree_lookup32(&vha->hw->tgt.host_map, key); |
| 6872 | if (!slot) { | ||
| 6873 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, | ||
| 6874 | "Save vha in host_map %p %06x\n", vha, key); | ||
| 6875 | rc = btree_insert32(&vha->hw->tgt.host_map, | ||
| 6876 | key, vha, GFP_ATOMIC); | ||
| 6877 | if (rc) | ||
| 6878 | ql_log(ql_log_info, vha, 0xffff, | ||
| 6879 | "Unable to insert s_id into host_map: %06x\n", | ||
| 6880 | key); | ||
| 6881 | return; | ||
| 6882 | } | ||
| 6883 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, | ||
| 6884 | "replace existing vha in host_map %p %06x\n", vha, key); | ||
| 6885 | btree_update32(&vha->hw->tgt.host_map, key, vha); | ||
| 6718 | break; | 6886 | break; |
| 6719 | case RESET_VP_IDX: | 6887 | case RESET_VP_IDX: |
| 6720 | vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; | 6888 | vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL; |
| 6721 | break; | 6889 | break; |
| 6722 | case RESET_AL_PA: | 6890 | case RESET_AL_PA: |
| 6723 | vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0; | 6891 | ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff, |
| 6892 | "clear vha in host_map %p %06x\n", vha, key); | ||
| 6893 | slot = btree_lookup32(&vha->hw->tgt.host_map, key); | ||
| 6894 | if (slot) | ||
| 6895 | btree_remove32(&vha->hw->tgt.host_map, key); | ||
| 6896 | vha->d_id.b24 = 0; | ||
| 6724 | break; | 6897 | break; |
| 6725 | } | 6898 | } |
| 6726 | } | 6899 | } |
| 6727 | 6900 | ||
| 6901 | void qlt_update_host_map(struct scsi_qla_host *vha, port_id_t id) | ||
| 6902 | { | ||
| 6903 | unsigned long flags; | ||
| 6904 | struct qla_hw_data *ha = vha->hw; | ||
| 6905 | |||
| 6906 | if (!vha->d_id.b24) { | ||
| 6907 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 6908 | vha->d_id = id; | ||
| 6909 | qlt_update_vp_map(vha, SET_AL_PA); | ||
| 6910 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 6911 | } else if (vha->d_id.b24 != id.b24) { | ||
| 6912 | spin_lock_irqsave(&ha->vport_slock, flags); | ||
| 6913 | qlt_update_vp_map(vha, RESET_AL_PA); | ||
| 6914 | vha->d_id = id; | ||
| 6915 | qlt_update_vp_map(vha, SET_AL_PA); | ||
| 6916 | spin_unlock_irqrestore(&ha->vport_slock, flags); | ||
| 6917 | } | ||
| 6918 | } | ||
| 6919 | |||
| 6728 | static int __init qlt_parse_ini_mode(void) | 6920 | static int __init qlt_parse_ini_mode(void) |
| 6729 | { | 6921 | { |
| 6730 | if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) | 6922 | if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0) |
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index a7f90dcaae37..d64420251194 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h | |||
| @@ -378,6 +378,14 @@ static inline void adjust_corrupted_atio(struct atio_from_isp *atio) | |||
| 378 | atio->u.isp24.fcp_cmnd.add_cdb_len = 0; | 378 | atio->u.isp24.fcp_cmnd.add_cdb_len = 0; |
| 379 | } | 379 | } |
| 380 | 380 | ||
| 381 | static inline int get_datalen_for_atio(struct atio_from_isp *atio) | ||
| 382 | { | ||
| 383 | int len = atio->u.isp24.fcp_cmnd.add_cdb_len; | ||
| 384 | |||
| 385 | return (be32_to_cpu(get_unaligned((uint32_t *) | ||
| 386 | &atio->u.isp24.fcp_cmnd.add_cdb[len * 4]))); | ||
| 387 | } | ||
| 388 | |||
| 381 | #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ | 389 | #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ |
| 382 | 390 | ||
| 383 | /* | 391 | /* |
| @@ -667,7 +675,6 @@ struct qla_tgt_func_tmpl { | |||
| 667 | int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, | 675 | int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *, |
| 668 | unsigned char *, uint32_t, int, int, int); | 676 | unsigned char *, uint32_t, int, int, int); |
| 669 | void (*handle_data)(struct qla_tgt_cmd *); | 677 | void (*handle_data)(struct qla_tgt_cmd *); |
| 670 | void (*handle_dif_err)(struct qla_tgt_cmd *); | ||
| 671 | int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, | 678 | int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint16_t, |
| 672 | uint32_t); | 679 | uint32_t); |
| 673 | void (*free_cmd)(struct qla_tgt_cmd *); | 680 | void (*free_cmd)(struct qla_tgt_cmd *); |
| @@ -684,6 +691,9 @@ struct qla_tgt_func_tmpl { | |||
| 684 | void (*clear_nacl_from_fcport_map)(struct fc_port *); | 691 | void (*clear_nacl_from_fcport_map)(struct fc_port *); |
| 685 | void (*put_sess)(struct fc_port *); | 692 | void (*put_sess)(struct fc_port *); |
| 686 | void (*shutdown_sess)(struct fc_port *); | 693 | void (*shutdown_sess)(struct fc_port *); |
| 694 | int (*get_dif_tags)(struct qla_tgt_cmd *cmd, uint16_t *pfw_prot_opts); | ||
| 695 | int (*chk_dif_tags)(uint32_t tag); | ||
| 696 | void (*add_target)(struct scsi_qla_host *); | ||
| 687 | }; | 697 | }; |
| 688 | 698 | ||
| 689 | int qla2x00_wait_for_hba_online(struct scsi_qla_host *); | 699 | int qla2x00_wait_for_hba_online(struct scsi_qla_host *); |
| @@ -720,8 +730,8 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *); | |||
| 720 | #define QLA_TGT_ABORT_ALL 0xFFFE | 730 | #define QLA_TGT_ABORT_ALL 0xFFFE |
| 721 | #define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD | 731 | #define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD |
| 722 | #define QLA_TGT_NEXUS_LOSS 0xFFFC | 732 | #define QLA_TGT_NEXUS_LOSS 0xFFFC |
| 723 | #define QLA_TGT_ABTS 0xFFFB | 733 | #define QLA_TGT_ABTS 0xFFFB |
| 724 | #define QLA_TGT_2G_ABORT_TASK 0xFFFA | 734 | #define QLA_TGT_2G_ABORT_TASK 0xFFFA |
| 725 | 735 | ||
| 726 | /* Notify Acknowledge flags */ | 736 | /* Notify Acknowledge flags */ |
| 727 | #define NOTIFY_ACK_RES_COUNT BIT_8 | 737 | #define NOTIFY_ACK_RES_COUNT BIT_8 |
| @@ -845,6 +855,7 @@ enum trace_flags { | |||
| 845 | TRC_CMD_FREE = BIT_17, | 855 | TRC_CMD_FREE = BIT_17, |
| 846 | TRC_DATA_IN = BIT_18, | 856 | TRC_DATA_IN = BIT_18, |
| 847 | TRC_ABORT = BIT_19, | 857 | TRC_ABORT = BIT_19, |
| 858 | TRC_DIF_ERR = BIT_20, | ||
| 848 | }; | 859 | }; |
| 849 | 860 | ||
| 850 | struct qla_tgt_cmd { | 861 | struct qla_tgt_cmd { |
| @@ -862,7 +873,6 @@ struct qla_tgt_cmd { | |||
| 862 | unsigned int sg_mapped:1; | 873 | unsigned int sg_mapped:1; |
| 863 | unsigned int free_sg:1; | 874 | unsigned int free_sg:1; |
| 864 | unsigned int write_data_transferred:1; | 875 | unsigned int write_data_transferred:1; |
| 865 | unsigned int ctx_dsd_alloced:1; | ||
| 866 | unsigned int q_full:1; | 876 | unsigned int q_full:1; |
| 867 | unsigned int term_exchg:1; | 877 | unsigned int term_exchg:1; |
| 868 | unsigned int cmd_sent_to_fw:1; | 878 | unsigned int cmd_sent_to_fw:1; |
| @@ -885,11 +895,25 @@ struct qla_tgt_cmd { | |||
| 885 | struct list_head cmd_list; | 895 | struct list_head cmd_list; |
| 886 | 896 | ||
| 887 | struct atio_from_isp atio; | 897 | struct atio_from_isp atio; |
| 888 | /* t10dif */ | 898 | |
| 899 | uint8_t ctx_dsd_alloced; | ||
| 900 | |||
| 901 | /* T10-DIF */ | ||
| 902 | #define DIF_ERR_NONE 0 | ||
| 903 | #define DIF_ERR_GRD 1 | ||
| 904 | #define DIF_ERR_REF 2 | ||
| 905 | #define DIF_ERR_APP 3 | ||
| 906 | int8_t dif_err_code; | ||
| 889 | struct scatterlist *prot_sg; | 907 | struct scatterlist *prot_sg; |
| 890 | uint32_t prot_sg_cnt; | 908 | uint32_t prot_sg_cnt; |
| 891 | uint32_t blk_sz; | 909 | uint32_t blk_sz, num_blks; |
| 910 | uint8_t scsi_status, sense_key, asc, ascq; | ||
| 911 | |||
| 892 | struct crc_context *ctx; | 912 | struct crc_context *ctx; |
| 913 | uint8_t *cdb; | ||
| 914 | uint64_t lba; | ||
| 915 | uint16_t a_guard, e_guard, a_app_tag, e_app_tag; | ||
| 916 | uint32_t a_ref_tag, e_ref_tag; | ||
| 893 | 917 | ||
| 894 | uint64_t jiffies_at_alloc; | 918 | uint64_t jiffies_at_alloc; |
| 895 | uint64_t jiffies_at_free; | 919 | uint64_t jiffies_at_free; |
| @@ -1053,4 +1077,7 @@ extern int qlt_free_qfull_cmds(struct scsi_qla_host *); | |||
| 1053 | extern void qlt_logo_completion_handler(fc_port_t *, int); | 1077 | extern void qlt_logo_completion_handler(fc_port_t *, int); |
| 1054 | extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); | 1078 | extern void qlt_do_generation_tick(struct scsi_qla_host *, int *); |
| 1055 | 1079 | ||
| 1080 | void qlt_send_resp_ctio(scsi_qla_host_t *, struct qla_tgt_cmd *, uint8_t, | ||
| 1081 | uint8_t, uint8_t, uint8_t); | ||
| 1082 | |||
| 1056 | #endif /* __QLA_TARGET_H */ | 1083 | #endif /* __QLA_TARGET_H */ |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index 3cb1964b7786..45bc84e8e3bf 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
| @@ -7,9 +7,9 @@ | |||
| 7 | /* | 7 | /* |
| 8 | * Driver version | 8 | * Driver version |
| 9 | */ | 9 | */ |
| 10 | #define QLA2XXX_VERSION "8.07.00.38-k" | 10 | #define QLA2XXX_VERSION "9.00.00.00-k" |
| 11 | 11 | ||
| 12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 9 |
| 13 | #define QLA_DRIVER_MINOR_VER 7 | 13 | #define QLA_DRIVER_MINOR_VER 0 |
| 14 | #define QLA_DRIVER_PATCH_VER 0 | 14 | #define QLA_DRIVER_PATCH_VER 0 |
| 15 | #define QLA_DRIVER_BETA_VER 0 | 15 | #define QLA_DRIVER_BETA_VER 0 |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 8e8ab0fa9672..7443e4efa3ae 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
| @@ -531,6 +531,24 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) | |||
| 531 | return; | 531 | return; |
| 532 | } | 532 | } |
| 533 | 533 | ||
| 534 | switch (cmd->dif_err_code) { | ||
| 535 | case DIF_ERR_GRD: | ||
| 536 | cmd->se_cmd.pi_err = | ||
| 537 | TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED; | ||
| 538 | break; | ||
| 539 | case DIF_ERR_REF: | ||
| 540 | cmd->se_cmd.pi_err = | ||
| 541 | TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED; | ||
| 542 | break; | ||
| 543 | case DIF_ERR_APP: | ||
| 544 | cmd->se_cmd.pi_err = | ||
| 545 | TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED; | ||
| 546 | break; | ||
| 547 | case DIF_ERR_NONE: | ||
| 548 | default: | ||
| 549 | break; | ||
| 550 | } | ||
| 551 | |||
| 534 | if (cmd->se_cmd.pi_err) | 552 | if (cmd->se_cmd.pi_err) |
| 535 | transport_generic_request_failure(&cmd->se_cmd, | 553 | transport_generic_request_failure(&cmd->se_cmd, |
| 536 | cmd->se_cmd.pi_err); | 554 | cmd->se_cmd.pi_err); |
| @@ -555,25 +573,23 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd) | |||
| 555 | queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); | 573 | queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); |
| 556 | } | 574 | } |
| 557 | 575 | ||
| 558 | static void tcm_qla2xxx_handle_dif_work(struct work_struct *work) | 576 | static int tcm_qla2xxx_chk_dif_tags(uint32_t tag) |
| 559 | { | 577 | { |
| 560 | struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); | 578 | return 0; |
| 561 | |||
| 562 | /* take an extra kref to prevent cmd free too early. | ||
| 563 | * need to wait for SCSI status/check condition to | ||
| 564 | * finish responding generate by transport_generic_request_failure. | ||
| 565 | */ | ||
| 566 | kref_get(&cmd->se_cmd.cmd_kref); | ||
| 567 | transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err); | ||
| 568 | } | 579 | } |
| 569 | 580 | ||
| 570 | /* | 581 | static int tcm_qla2xxx_dif_tags(struct qla_tgt_cmd *cmd, |
| 571 | * Called from qla_target.c:qlt_do_ctio_completion() | 582 | uint16_t *pfw_prot_opts) |
| 572 | */ | ||
| 573 | static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd) | ||
| 574 | { | 583 | { |
| 575 | INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work); | 584 | struct se_cmd *se_cmd = &cmd->se_cmd; |
| 576 | queue_work(tcm_qla2xxx_free_wq, &cmd->work); | 585 | |
| 586 | if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD)) | ||
| 587 | *pfw_prot_opts |= PO_DISABLE_GUARD_CHECK; | ||
| 588 | |||
| 589 | if (!(se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG)) | ||
| 590 | *pfw_prot_opts |= PO_DIS_APP_TAG_VALD; | ||
| 591 | |||
| 592 | return 0; | ||
| 577 | } | 593 | } |
| 578 | 594 | ||
| 579 | /* | 595 | /* |
| @@ -1610,7 +1626,6 @@ static void tcm_qla2xxx_update_sess(struct fc_port *sess, port_id_t s_id, | |||
| 1610 | static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { | 1626 | static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { |
| 1611 | .handle_cmd = tcm_qla2xxx_handle_cmd, | 1627 | .handle_cmd = tcm_qla2xxx_handle_cmd, |
| 1612 | .handle_data = tcm_qla2xxx_handle_data, | 1628 | .handle_data = tcm_qla2xxx_handle_data, |
| 1613 | .handle_dif_err = tcm_qla2xxx_handle_dif_err, | ||
| 1614 | .handle_tmr = tcm_qla2xxx_handle_tmr, | 1629 | .handle_tmr = tcm_qla2xxx_handle_tmr, |
| 1615 | .free_cmd = tcm_qla2xxx_free_cmd, | 1630 | .free_cmd = tcm_qla2xxx_free_cmd, |
| 1616 | .free_mcmd = tcm_qla2xxx_free_mcmd, | 1631 | .free_mcmd = tcm_qla2xxx_free_mcmd, |
| @@ -1622,6 +1637,8 @@ static struct qla_tgt_func_tmpl tcm_qla2xxx_template = { | |||
| 1622 | .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, | 1637 | .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map, |
| 1623 | .put_sess = tcm_qla2xxx_put_sess, | 1638 | .put_sess = tcm_qla2xxx_put_sess, |
| 1624 | .shutdown_sess = tcm_qla2xxx_shutdown_sess, | 1639 | .shutdown_sess = tcm_qla2xxx_shutdown_sess, |
| 1640 | .get_dif_tags = tcm_qla2xxx_dif_tags, | ||
| 1641 | .chk_dif_tags = tcm_qla2xxx_chk_dif_tags, | ||
| 1625 | }; | 1642 | }; |
| 1626 | 1643 | ||
| 1627 | static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) | 1644 | static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport) |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 1359913bf840..e8c26e6e6237 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
| @@ -7642,7 +7642,7 @@ static inline ssize_t ufshcd_pm_lvl_store(struct device *dev, | |||
| 7642 | if (kstrtoul(buf, 0, &value)) | 7642 | if (kstrtoul(buf, 0, &value)) |
| 7643 | return -EINVAL; | 7643 | return -EINVAL; |
| 7644 | 7644 | ||
| 7645 | if ((value < UFS_PM_LVL_0) || (value >= UFS_PM_LVL_MAX)) | 7645 | if (value >= UFS_PM_LVL_MAX) |
| 7646 | return -EINVAL; | 7646 | return -EINVAL; |
| 7647 | 7647 | ||
| 7648 | spin_lock_irqsave(hba->host->host_lock, flags); | 7648 | spin_lock_irqsave(hba->host->host_lock, flags); |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index f5e330099bfc..fd7c16a7ca6e 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c | |||
| @@ -43,7 +43,7 @@ | |||
| 43 | #include "target_core_ua.h" | 43 | #include "target_core_ua.h" |
| 44 | 44 | ||
| 45 | static sense_reason_t core_alua_check_transition(int state, int valid, | 45 | static sense_reason_t core_alua_check_transition(int state, int valid, |
| 46 | int *primary); | 46 | int *primary, int explicit); |
| 47 | static int core_alua_set_tg_pt_secondary_state( | 47 | static int core_alua_set_tg_pt_secondary_state( |
| 48 | struct se_lun *lun, int explicit, int offline); | 48 | struct se_lun *lun, int explicit, int offline); |
| 49 | 49 | ||
| @@ -335,8 +335,8 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) | |||
| 335 | * the state is a primary or secondary target port asymmetric | 335 | * the state is a primary or secondary target port asymmetric |
| 336 | * access state. | 336 | * access state. |
| 337 | */ | 337 | */ |
| 338 | rc = core_alua_check_transition(alua_access_state, | 338 | rc = core_alua_check_transition(alua_access_state, valid_states, |
| 339 | valid_states, &primary); | 339 | &primary, 1); |
| 340 | if (rc) { | 340 | if (rc) { |
| 341 | /* | 341 | /* |
| 342 | * If the SET TARGET PORT GROUPS attempts to establish | 342 | * If the SET TARGET PORT GROUPS attempts to establish |
| @@ -691,7 +691,7 @@ target_alua_state_check(struct se_cmd *cmd) | |||
| 691 | 691 | ||
| 692 | if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) | 692 | if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE) |
| 693 | return 0; | 693 | return 0; |
| 694 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) | 694 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) |
| 695 | return 0; | 695 | return 0; |
| 696 | 696 | ||
| 697 | /* | 697 | /* |
| @@ -762,7 +762,7 @@ target_alua_state_check(struct se_cmd *cmd) | |||
| 762 | * Check implicit and explicit ALUA state change request. | 762 | * Check implicit and explicit ALUA state change request. |
| 763 | */ | 763 | */ |
| 764 | static sense_reason_t | 764 | static sense_reason_t |
| 765 | core_alua_check_transition(int state, int valid, int *primary) | 765 | core_alua_check_transition(int state, int valid, int *primary, int explicit) |
| 766 | { | 766 | { |
| 767 | /* | 767 | /* |
| 768 | * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are | 768 | * OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are |
| @@ -804,11 +804,14 @@ core_alua_check_transition(int state, int valid, int *primary) | |||
| 804 | *primary = 0; | 804 | *primary = 0; |
| 805 | break; | 805 | break; |
| 806 | case ALUA_ACCESS_STATE_TRANSITION: | 806 | case ALUA_ACCESS_STATE_TRANSITION: |
| 807 | /* | 807 | if (!(valid & ALUA_T_SUP) || explicit) |
| 808 | * Transitioning is set internally, and | 808 | /* |
| 809 | * cannot be selected manually. | 809 | * Transitioning is set internally and by tcmu daemon, |
| 810 | */ | 810 | * and cannot be selected through a STPG. |
| 811 | goto not_supported; | 811 | */ |
| 812 | goto not_supported; | ||
| 813 | *primary = 0; | ||
| 814 | break; | ||
| 812 | default: | 815 | default: |
| 813 | pr_err("Unknown ALUA access state: 0x%02x\n", state); | 816 | pr_err("Unknown ALUA access state: 0x%02x\n", state); |
| 814 | return TCM_INVALID_PARAMETER_LIST; | 817 | return TCM_INVALID_PARAMETER_LIST; |
| @@ -1013,7 +1016,7 @@ static void core_alua_queue_state_change_ua(struct t10_alua_tg_pt_gp *tg_pt_gp) | |||
| 1013 | static void core_alua_do_transition_tg_pt_work(struct work_struct *work) | 1016 | static void core_alua_do_transition_tg_pt_work(struct work_struct *work) |
| 1014 | { | 1017 | { |
| 1015 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, | 1018 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(work, |
| 1016 | struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work.work); | 1019 | struct t10_alua_tg_pt_gp, tg_pt_gp_transition_work); |
| 1017 | struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; | 1020 | struct se_device *dev = tg_pt_gp->tg_pt_gp_dev; |
| 1018 | bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == | 1021 | bool explicit = (tg_pt_gp->tg_pt_gp_alua_access_status == |
| 1019 | ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); | 1022 | ALUA_STATUS_ALTERED_BY_EXPLICIT_STPG); |
| @@ -1070,32 +1073,19 @@ static int core_alua_do_transition_tg_pt( | |||
| 1070 | if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) | 1073 | if (atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == new_state) |
| 1071 | return 0; | 1074 | return 0; |
| 1072 | 1075 | ||
| 1073 | if (new_state == ALUA_ACCESS_STATE_TRANSITION) | 1076 | if (explicit && new_state == ALUA_ACCESS_STATE_TRANSITION) |
| 1074 | return -EAGAIN; | 1077 | return -EAGAIN; |
| 1075 | 1078 | ||
| 1076 | /* | 1079 | /* |
| 1077 | * Flush any pending transitions | 1080 | * Flush any pending transitions |
| 1078 | */ | 1081 | */ |
| 1079 | if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs && | 1082 | if (!explicit) |
| 1080 | atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state) == | 1083 | flush_work(&tg_pt_gp->tg_pt_gp_transition_work); |
| 1081 | ALUA_ACCESS_STATE_TRANSITION) { | ||
| 1082 | /* Just in case */ | ||
| 1083 | tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; | ||
| 1084 | tg_pt_gp->tg_pt_gp_transition_complete = &wait; | ||
| 1085 | flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); | ||
| 1086 | wait_for_completion(&wait); | ||
| 1087 | tg_pt_gp->tg_pt_gp_transition_complete = NULL; | ||
| 1088 | return 0; | ||
| 1089 | } | ||
| 1090 | 1084 | ||
| 1091 | /* | 1085 | /* |
| 1092 | * Save the old primary ALUA access state, and set the current state | 1086 | * Save the old primary ALUA access state, and set the current state |
| 1093 | * to ALUA_ACCESS_STATE_TRANSITION. | 1087 | * to ALUA_ACCESS_STATE_TRANSITION. |
| 1094 | */ | 1088 | */ |
| 1095 | tg_pt_gp->tg_pt_gp_alua_previous_state = | ||
| 1096 | atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); | ||
| 1097 | tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; | ||
| 1098 | |||
| 1099 | atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, | 1089 | atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, |
| 1100 | ALUA_ACCESS_STATE_TRANSITION); | 1090 | ALUA_ACCESS_STATE_TRANSITION); |
| 1101 | tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? | 1091 | tg_pt_gp->tg_pt_gp_alua_access_status = (explicit) ? |
| @@ -1104,6 +1094,13 @@ static int core_alua_do_transition_tg_pt( | |||
| 1104 | 1094 | ||
| 1105 | core_alua_queue_state_change_ua(tg_pt_gp); | 1095 | core_alua_queue_state_change_ua(tg_pt_gp); |
| 1106 | 1096 | ||
| 1097 | if (new_state == ALUA_ACCESS_STATE_TRANSITION) | ||
| 1098 | return 0; | ||
| 1099 | |||
| 1100 | tg_pt_gp->tg_pt_gp_alua_previous_state = | ||
| 1101 | atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state); | ||
| 1102 | tg_pt_gp->tg_pt_gp_alua_pending_state = new_state; | ||
| 1103 | |||
| 1107 | /* | 1104 | /* |
| 1108 | * Check for the optional ALUA primary state transition delay | 1105 | * Check for the optional ALUA primary state transition delay |
| 1109 | */ | 1106 | */ |
| @@ -1117,17 +1114,9 @@ static int core_alua_do_transition_tg_pt( | |||
| 1117 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); | 1114 | atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); |
| 1118 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 1115 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
| 1119 | 1116 | ||
| 1120 | if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { | 1117 | schedule_work(&tg_pt_gp->tg_pt_gp_transition_work); |
| 1121 | unsigned long transition_tmo; | 1118 | if (explicit) { |
| 1122 | |||
| 1123 | transition_tmo = tg_pt_gp->tg_pt_gp_implicit_trans_secs * HZ; | ||
| 1124 | queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, | ||
| 1125 | &tg_pt_gp->tg_pt_gp_transition_work, | ||
| 1126 | transition_tmo); | ||
| 1127 | } else { | ||
| 1128 | tg_pt_gp->tg_pt_gp_transition_complete = &wait; | 1119 | tg_pt_gp->tg_pt_gp_transition_complete = &wait; |
| 1129 | queue_delayed_work(tg_pt_gp->tg_pt_gp_dev->tmr_wq, | ||
| 1130 | &tg_pt_gp->tg_pt_gp_transition_work, 0); | ||
| 1131 | wait_for_completion(&wait); | 1120 | wait_for_completion(&wait); |
| 1132 | tg_pt_gp->tg_pt_gp_transition_complete = NULL; | 1121 | tg_pt_gp->tg_pt_gp_transition_complete = NULL; |
| 1133 | } | 1122 | } |
| @@ -1149,8 +1138,12 @@ int core_alua_do_port_transition( | |||
| 1149 | struct t10_alua_tg_pt_gp *tg_pt_gp; | 1138 | struct t10_alua_tg_pt_gp *tg_pt_gp; |
| 1150 | int primary, valid_states, rc = 0; | 1139 | int primary, valid_states, rc = 0; |
| 1151 | 1140 | ||
| 1141 | if (l_dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA) | ||
| 1142 | return -ENODEV; | ||
| 1143 | |||
| 1152 | valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; | 1144 | valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; |
| 1153 | if (core_alua_check_transition(new_state, valid_states, &primary) != 0) | 1145 | if (core_alua_check_transition(new_state, valid_states, &primary, |
| 1146 | explicit) != 0) | ||
| 1154 | return -EINVAL; | 1147 | return -EINVAL; |
| 1155 | 1148 | ||
| 1156 | local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; | 1149 | local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem; |
| @@ -1695,8 +1688,8 @@ struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(struct se_device *dev, | |||
| 1695 | mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); | 1688 | mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex); |
| 1696 | spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); | 1689 | spin_lock_init(&tg_pt_gp->tg_pt_gp_lock); |
| 1697 | atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); | 1690 | atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0); |
| 1698 | INIT_DELAYED_WORK(&tg_pt_gp->tg_pt_gp_transition_work, | 1691 | INIT_WORK(&tg_pt_gp->tg_pt_gp_transition_work, |
| 1699 | core_alua_do_transition_tg_pt_work); | 1692 | core_alua_do_transition_tg_pt_work); |
| 1700 | tg_pt_gp->tg_pt_gp_dev = dev; | 1693 | tg_pt_gp->tg_pt_gp_dev = dev; |
| 1701 | atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, | 1694 | atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, |
| 1702 | ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); | 1695 | ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED); |
| @@ -1804,7 +1797,7 @@ void core_alua_free_tg_pt_gp( | |||
| 1804 | dev->t10_alua.alua_tg_pt_gps_counter--; | 1797 | dev->t10_alua.alua_tg_pt_gps_counter--; |
| 1805 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); | 1798 | spin_unlock(&dev->t10_alua.tg_pt_gps_lock); |
| 1806 | 1799 | ||
| 1807 | flush_delayed_work(&tg_pt_gp->tg_pt_gp_transition_work); | 1800 | flush_work(&tg_pt_gp->tg_pt_gp_transition_work); |
| 1808 | 1801 | ||
| 1809 | /* | 1802 | /* |
| 1810 | * Allow a struct t10_alua_tg_pt_gp_member * referenced by | 1803 | * Allow a struct t10_alua_tg_pt_gp_member * referenced by |
| @@ -1973,7 +1966,7 @@ ssize_t core_alua_store_tg_pt_gp_info( | |||
| 1973 | unsigned char buf[TG_PT_GROUP_NAME_BUF]; | 1966 | unsigned char buf[TG_PT_GROUP_NAME_BUF]; |
| 1974 | int move = 0; | 1967 | int move = 0; |
| 1975 | 1968 | ||
| 1976 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || | 1969 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || |
| 1977 | (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) | 1970 | (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) |
| 1978 | return -ENODEV; | 1971 | return -ENODEV; |
| 1979 | 1972 | ||
| @@ -2230,7 +2223,7 @@ ssize_t core_alua_store_offline_bit( | |||
| 2230 | unsigned long tmp; | 2223 | unsigned long tmp; |
| 2231 | int ret; | 2224 | int ret; |
| 2232 | 2225 | ||
| 2233 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH || | 2226 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA || |
| 2234 | (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) | 2227 | (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) |
| 2235 | return -ENODEV; | 2228 | return -ENODEV; |
| 2236 | 2229 | ||
| @@ -2316,7 +2309,8 @@ ssize_t core_alua_store_secondary_write_metadata( | |||
| 2316 | 2309 | ||
| 2317 | int core_setup_alua(struct se_device *dev) | 2310 | int core_setup_alua(struct se_device *dev) |
| 2318 | { | 2311 | { |
| 2319 | if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && | 2312 | if (!(dev->transport->transport_flags & |
| 2313 | TRANSPORT_FLAG_PASSTHROUGH_ALUA) && | ||
| 2320 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { | 2314 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) { |
| 2321 | struct t10_alua_lu_gp_member *lu_gp_mem; | 2315 | struct t10_alua_lu_gp_member *lu_gp_mem; |
| 2322 | 2316 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 54b36c9835be..38b5025e4c7a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
| @@ -421,6 +421,10 @@ static int target_fabric_tf_ops_check(const struct target_core_fabric_ops *tfo) | |||
| 421 | pr_err("Missing tfo->aborted_task()\n"); | 421 | pr_err("Missing tfo->aborted_task()\n"); |
| 422 | return -EINVAL; | 422 | return -EINVAL; |
| 423 | } | 423 | } |
| 424 | if (!tfo->check_stop_free) { | ||
| 425 | pr_err("Missing tfo->check_stop_free()\n"); | ||
| 426 | return -EINVAL; | ||
| 427 | } | ||
| 424 | /* | 428 | /* |
| 425 | * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() | 429 | * We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn() |
| 426 | * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in | 430 | * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in |
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index a8f8e53f2f57..94cda7991e80 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
| @@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, | |||
| 154 | 154 | ||
| 155 | buf = kzalloc(12, GFP_KERNEL); | 155 | buf = kzalloc(12, GFP_KERNEL); |
| 156 | if (!buf) | 156 | if (!buf) |
| 157 | return; | 157 | goto out_free; |
| 158 | 158 | ||
| 159 | memset(cdb, 0, MAX_COMMAND_SIZE); | 159 | memset(cdb, 0, MAX_COMMAND_SIZE); |
| 160 | cdb[0] = MODE_SENSE; | 160 | cdb[0] = MODE_SENSE; |
| @@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, | |||
| 169 | * If MODE_SENSE still returns zero, set the default value to 1024. | 169 | * If MODE_SENSE still returns zero, set the default value to 1024. |
| 170 | */ | 170 | */ |
| 171 | sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); | 171 | sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); |
| 172 | out_free: | ||
| 172 | if (!sdev->sector_size) | 173 | if (!sdev->sector_size) |
| 173 | sdev->sector_size = 1024; | 174 | sdev->sector_size = 1024; |
| 174 | out_free: | 175 | |
| 175 | kfree(buf); | 176 | kfree(buf); |
| 176 | } | 177 | } |
| 177 | 178 | ||
| @@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, | |||
| 314 | sd->lun, sd->queue_depth); | 315 | sd->lun, sd->queue_depth); |
| 315 | } | 316 | } |
| 316 | 317 | ||
| 317 | dev->dev_attrib.hw_block_size = sd->sector_size; | 318 | dev->dev_attrib.hw_block_size = |
| 319 | min_not_zero((int)sd->sector_size, 512); | ||
| 318 | dev->dev_attrib.hw_max_sectors = | 320 | dev->dev_attrib.hw_max_sectors = |
| 319 | min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q)); | 321 | min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); |
| 320 | dev->dev_attrib.hw_queue_depth = sd->queue_depth; | 322 | dev->dev_attrib.hw_queue_depth = sd->queue_depth; |
| 321 | 323 | ||
| 322 | /* | 324 | /* |
| @@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev, | |||
| 339 | /* | 341 | /* |
| 340 | * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. | 342 | * For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE. |
| 341 | */ | 343 | */ |
| 342 | if (sd->type == TYPE_TAPE) | 344 | if (sd->type == TYPE_TAPE) { |
| 343 | pscsi_tape_read_blocksize(dev, sd); | 345 | pscsi_tape_read_blocksize(dev, sd); |
| 346 | dev->dev_attrib.hw_block_size = sd->sector_size; | ||
| 347 | } | ||
| 344 | return 0; | 348 | return 0; |
| 345 | } | 349 | } |
| 346 | 350 | ||
| @@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) | |||
| 406 | /* | 410 | /* |
| 407 | * Called with struct Scsi_Host->host_lock called. | 411 | * Called with struct Scsi_Host->host_lock called. |
| 408 | */ | 412 | */ |
| 409 | static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) | 413 | static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd) |
| 410 | __releases(sh->host_lock) | 414 | __releases(sh->host_lock) |
| 411 | { | 415 | { |
| 412 | struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; | 416 | struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; |
| @@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd) | |||
| 433 | return 0; | 437 | return 0; |
| 434 | } | 438 | } |
| 435 | 439 | ||
| 436 | /* | ||
| 437 | * Called with struct Scsi_Host->host_lock called. | ||
| 438 | */ | ||
| 439 | static int pscsi_create_type_other(struct se_device *dev, | ||
| 440 | struct scsi_device *sd) | ||
| 441 | __releases(sh->host_lock) | ||
| 442 | { | ||
| 443 | struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; | ||
| 444 | struct Scsi_Host *sh = sd->host; | ||
| 445 | int ret; | ||
| 446 | |||
| 447 | spin_unlock_irq(sh->host_lock); | ||
| 448 | ret = pscsi_add_device_to_list(dev, sd); | ||
| 449 | if (ret) | ||
| 450 | return ret; | ||
| 451 | |||
| 452 | pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n", | ||
| 453 | phv->phv_host_id, scsi_device_type(sd->type), sh->host_no, | ||
| 454 | sd->channel, sd->id, sd->lun); | ||
| 455 | return 0; | ||
| 456 | } | ||
| 457 | |||
| 458 | static int pscsi_configure_device(struct se_device *dev) | 440 | static int pscsi_configure_device(struct se_device *dev) |
| 459 | { | 441 | { |
| 460 | struct se_hba *hba = dev->se_hba; | 442 | struct se_hba *hba = dev->se_hba; |
| @@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev) | |||
| 542 | case TYPE_DISK: | 524 | case TYPE_DISK: |
| 543 | ret = pscsi_create_type_disk(dev, sd); | 525 | ret = pscsi_create_type_disk(dev, sd); |
| 544 | break; | 526 | break; |
| 545 | case TYPE_ROM: | ||
| 546 | ret = pscsi_create_type_rom(dev, sd); | ||
| 547 | break; | ||
| 548 | default: | 527 | default: |
| 549 | ret = pscsi_create_type_other(dev, sd); | 528 | ret = pscsi_create_type_nondisk(dev, sd); |
| 550 | break; | 529 | break; |
| 551 | } | 530 | } |
| 552 | 531 | ||
| @@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev) | |||
| 611 | else if (pdv->pdv_lld_host) | 590 | else if (pdv->pdv_lld_host) |
| 612 | scsi_host_put(pdv->pdv_lld_host); | 591 | scsi_host_put(pdv->pdv_lld_host); |
| 613 | 592 | ||
| 614 | if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM)) | 593 | scsi_device_put(sd); |
| 615 | scsi_device_put(sd); | ||
| 616 | 594 | ||
| 617 | pdv->pdv_sd = NULL; | 595 | pdv->pdv_sd = NULL; |
| 618 | } | 596 | } |
| @@ -1064,7 +1042,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev) | |||
| 1064 | if (pdv->pdv_bd && pdv->pdv_bd->bd_part) | 1042 | if (pdv->pdv_bd && pdv->pdv_bd->bd_part) |
| 1065 | return pdv->pdv_bd->bd_part->nr_sects; | 1043 | return pdv->pdv_bd->bd_part->nr_sects; |
| 1066 | 1044 | ||
| 1067 | dump_stack(); | ||
| 1068 | return 0; | 1045 | return 0; |
| 1069 | } | 1046 | } |
| 1070 | 1047 | ||
| @@ -1103,7 +1080,8 @@ static void pscsi_req_done(struct request *req, int uptodate) | |||
| 1103 | static const struct target_backend_ops pscsi_ops = { | 1080 | static const struct target_backend_ops pscsi_ops = { |
| 1104 | .name = "pscsi", | 1081 | .name = "pscsi", |
| 1105 | .owner = THIS_MODULE, | 1082 | .owner = THIS_MODULE, |
| 1106 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, | 1083 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH | |
| 1084 | TRANSPORT_FLAG_PASSTHROUGH_ALUA, | ||
| 1107 | .attach_hba = pscsi_attach_hba, | 1085 | .attach_hba = pscsi_attach_hba, |
| 1108 | .detach_hba = pscsi_detach_hba, | 1086 | .detach_hba = pscsi_detach_hba, |
| 1109 | .pmode_enable_hba = pscsi_pmode_enable_hba, | 1087 | .pmode_enable_hba = pscsi_pmode_enable_hba, |
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 68d8aef7ab78..c194063f169b 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c | |||
| @@ -1105,9 +1105,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) | |||
| 1105 | return ret; | 1105 | return ret; |
| 1106 | break; | 1106 | break; |
| 1107 | case VERIFY: | 1107 | case VERIFY: |
| 1108 | case VERIFY_16: | ||
| 1108 | size = 0; | 1109 | size = 0; |
| 1109 | sectors = transport_get_sectors_10(cdb); | 1110 | if (cdb[0] == VERIFY) { |
| 1110 | cmd->t_task_lba = transport_lba_32(cdb); | 1111 | sectors = transport_get_sectors_10(cdb); |
| 1112 | cmd->t_task_lba = transport_lba_32(cdb); | ||
| 1113 | } else { | ||
| 1114 | sectors = transport_get_sectors_16(cdb); | ||
| 1115 | cmd->t_task_lba = transport_lba_64(cdb); | ||
| 1116 | } | ||
| 1111 | cmd->execute_cmd = sbc_emulate_noop; | 1117 | cmd->execute_cmd = sbc_emulate_noop; |
| 1112 | goto check_lba; | 1118 | goto check_lba; |
| 1113 | case REZERO_UNIT: | 1119 | case REZERO_UNIT: |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index c0dbfa016575..6fb191914f45 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
| @@ -602,7 +602,8 @@ int core_tpg_add_lun( | |||
| 602 | if (ret) | 602 | if (ret) |
| 603 | goto out_kill_ref; | 603 | goto out_kill_ref; |
| 604 | 604 | ||
| 605 | if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) && | 605 | if (!(dev->transport->transport_flags & |
| 606 | TRANSPORT_FLAG_PASSTHROUGH_ALUA) && | ||
| 606 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) | 607 | !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) |
| 607 | target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); | 608 | target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); |
| 608 | 609 | ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 434d9d693989..b1a3cdb29468 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
| @@ -636,8 +636,7 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) | |||
| 636 | * Fabric modules are expected to return '1' here if the se_cmd being | 636 | * Fabric modules are expected to return '1' here if the se_cmd being |
| 637 | * passed is released at this point, or zero if not being released. | 637 | * passed is released at this point, or zero if not being released. |
| 638 | */ | 638 | */ |
| 639 | return cmd->se_tfo->check_stop_free ? cmd->se_tfo->check_stop_free(cmd) | 639 | return cmd->se_tfo->check_stop_free(cmd); |
| 640 | : 0; | ||
| 641 | } | 640 | } |
| 642 | 641 | ||
| 643 | static void transport_lun_remove_cmd(struct se_cmd *cmd) | 642 | static void transport_lun_remove_cmd(struct se_cmd *cmd) |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index c3adefe95e50..c6874c38a10b 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/stringify.h> | 28 | #include <linux/stringify.h> |
| 29 | #include <linux/bitops.h> | 29 | #include <linux/bitops.h> |
| 30 | #include <linux/highmem.h> | 30 | #include <linux/highmem.h> |
| 31 | #include <linux/configfs.h> | ||
| 31 | #include <net/genetlink.h> | 32 | #include <net/genetlink.h> |
| 32 | #include <scsi/scsi_common.h> | 33 | #include <scsi/scsi_common.h> |
| 33 | #include <scsi/scsi_proto.h> | 34 | #include <scsi/scsi_proto.h> |
| @@ -112,6 +113,7 @@ struct tcmu_dev { | |||
| 112 | spinlock_t commands_lock; | 113 | spinlock_t commands_lock; |
| 113 | 114 | ||
| 114 | struct timer_list timeout; | 115 | struct timer_list timeout; |
| 116 | unsigned int cmd_time_out; | ||
| 115 | 117 | ||
| 116 | char dev_config[TCMU_CONFIG_LEN]; | 118 | char dev_config[TCMU_CONFIG_LEN]; |
| 117 | }; | 119 | }; |
| @@ -172,7 +174,9 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) | |||
| 172 | 174 | ||
| 173 | tcmu_cmd->se_cmd = se_cmd; | 175 | tcmu_cmd->se_cmd = se_cmd; |
| 174 | tcmu_cmd->tcmu_dev = udev; | 176 | tcmu_cmd->tcmu_dev = udev; |
| 175 | tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT); | 177 | if (udev->cmd_time_out) |
| 178 | tcmu_cmd->deadline = jiffies + | ||
| 179 | msecs_to_jiffies(udev->cmd_time_out); | ||
| 176 | 180 | ||
| 177 | idr_preload(GFP_KERNEL); | 181 | idr_preload(GFP_KERNEL); |
| 178 | spin_lock_irq(&udev->commands_lock); | 182 | spin_lock_irq(&udev->commands_lock); |
| @@ -451,7 +455,11 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
| 451 | 455 | ||
| 452 | pr_debug("sleeping for ring space\n"); | 456 | pr_debug("sleeping for ring space\n"); |
| 453 | spin_unlock_irq(&udev->cmdr_lock); | 457 | spin_unlock_irq(&udev->cmdr_lock); |
| 454 | ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); | 458 | if (udev->cmd_time_out) |
| 459 | ret = schedule_timeout( | ||
| 460 | msecs_to_jiffies(udev->cmd_time_out)); | ||
| 461 | else | ||
| 462 | ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); | ||
| 455 | finish_wait(&udev->wait_cmdr, &__wait); | 463 | finish_wait(&udev->wait_cmdr, &__wait); |
| 456 | if (!ret) { | 464 | if (!ret) { |
| 457 | pr_warn("tcmu: command timed out\n"); | 465 | pr_warn("tcmu: command timed out\n"); |
| @@ -526,8 +534,9 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) | |||
| 526 | /* TODO: only if FLUSH and FUA? */ | 534 | /* TODO: only if FLUSH and FUA? */ |
| 527 | uio_event_notify(&udev->uio_info); | 535 | uio_event_notify(&udev->uio_info); |
| 528 | 536 | ||
| 529 | mod_timer(&udev->timeout, | 537 | if (udev->cmd_time_out) |
| 530 | round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT))); | 538 | mod_timer(&udev->timeout, round_jiffies_up(jiffies + |
| 539 | msecs_to_jiffies(udev->cmd_time_out))); | ||
| 531 | 540 | ||
| 532 | return TCM_NO_SENSE; | 541 | return TCM_NO_SENSE; |
| 533 | } | 542 | } |
| @@ -742,6 +751,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
| 742 | } | 751 | } |
| 743 | 752 | ||
| 744 | udev->hba = hba; | 753 | udev->hba = hba; |
| 754 | udev->cmd_time_out = TCMU_TIME_OUT; | ||
| 745 | 755 | ||
| 746 | init_waitqueue_head(&udev->wait_cmdr); | 756 | init_waitqueue_head(&udev->wait_cmdr); |
| 747 | spin_lock_init(&udev->cmdr_lock); | 757 | spin_lock_init(&udev->cmdr_lock); |
| @@ -960,7 +970,8 @@ static int tcmu_configure_device(struct se_device *dev) | |||
| 960 | if (dev->dev_attrib.hw_block_size == 0) | 970 | if (dev->dev_attrib.hw_block_size == 0) |
| 961 | dev->dev_attrib.hw_block_size = 512; | 971 | dev->dev_attrib.hw_block_size = 512; |
| 962 | /* Other attributes can be configured in userspace */ | 972 | /* Other attributes can be configured in userspace */ |
| 963 | dev->dev_attrib.hw_max_sectors = 128; | 973 | if (!dev->dev_attrib.hw_max_sectors) |
| 974 | dev->dev_attrib.hw_max_sectors = 128; | ||
| 964 | dev->dev_attrib.hw_queue_depth = 128; | 975 | dev->dev_attrib.hw_queue_depth = 128; |
| 965 | 976 | ||
| 966 | ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, | 977 | ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, |
| @@ -997,6 +1008,11 @@ static void tcmu_dev_call_rcu(struct rcu_head *p) | |||
| 997 | kfree(udev); | 1008 | kfree(udev); |
| 998 | } | 1009 | } |
| 999 | 1010 | ||
| 1011 | static bool tcmu_dev_configured(struct tcmu_dev *udev) | ||
| 1012 | { | ||
| 1013 | return udev->uio_info.uio_dev ? true : false; | ||
| 1014 | } | ||
| 1015 | |||
| 1000 | static void tcmu_free_device(struct se_device *dev) | 1016 | static void tcmu_free_device(struct se_device *dev) |
| 1001 | { | 1017 | { |
| 1002 | struct tcmu_dev *udev = TCMU_DEV(dev); | 1018 | struct tcmu_dev *udev = TCMU_DEV(dev); |
| @@ -1018,8 +1034,7 @@ static void tcmu_free_device(struct se_device *dev) | |||
| 1018 | spin_unlock_irq(&udev->commands_lock); | 1034 | spin_unlock_irq(&udev->commands_lock); |
| 1019 | WARN_ON(!all_expired); | 1035 | WARN_ON(!all_expired); |
| 1020 | 1036 | ||
| 1021 | /* Device was configured */ | 1037 | if (tcmu_dev_configured(udev)) { |
| 1022 | if (udev->uio_info.uio_dev) { | ||
| 1023 | tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, | 1038 | tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, |
| 1024 | udev->uio_info.uio_dev->minor); | 1039 | udev->uio_info.uio_dev->minor); |
| 1025 | 1040 | ||
| @@ -1031,16 +1046,42 @@ static void tcmu_free_device(struct se_device *dev) | |||
| 1031 | } | 1046 | } |
| 1032 | 1047 | ||
| 1033 | enum { | 1048 | enum { |
| 1034 | Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err, | 1049 | Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors, |
| 1050 | Opt_err, | ||
| 1035 | }; | 1051 | }; |
| 1036 | 1052 | ||
| 1037 | static match_table_t tokens = { | 1053 | static match_table_t tokens = { |
| 1038 | {Opt_dev_config, "dev_config=%s"}, | 1054 | {Opt_dev_config, "dev_config=%s"}, |
| 1039 | {Opt_dev_size, "dev_size=%u"}, | 1055 | {Opt_dev_size, "dev_size=%u"}, |
| 1040 | {Opt_hw_block_size, "hw_block_size=%u"}, | 1056 | {Opt_hw_block_size, "hw_block_size=%u"}, |
| 1057 | {Opt_hw_max_sectors, "hw_max_sectors=%u"}, | ||
| 1041 | {Opt_err, NULL} | 1058 | {Opt_err, NULL} |
| 1042 | }; | 1059 | }; |
| 1043 | 1060 | ||
| 1061 | static int tcmu_set_dev_attrib(substring_t *arg, u32 *dev_attrib) | ||
| 1062 | { | ||
| 1063 | unsigned long tmp_ul; | ||
| 1064 | char *arg_p; | ||
| 1065 | int ret; | ||
| 1066 | |||
| 1067 | arg_p = match_strdup(arg); | ||
| 1068 | if (!arg_p) | ||
| 1069 | return -ENOMEM; | ||
| 1070 | |||
| 1071 | ret = kstrtoul(arg_p, 0, &tmp_ul); | ||
| 1072 | kfree(arg_p); | ||
| 1073 | if (ret < 0) { | ||
| 1074 | pr_err("kstrtoul() failed for dev attrib\n"); | ||
| 1075 | return ret; | ||
| 1076 | } | ||
| 1077 | if (!tmp_ul) { | ||
| 1078 | pr_err("dev attrib must be nonzero\n"); | ||
| 1079 | return -EINVAL; | ||
| 1080 | } | ||
| 1081 | *dev_attrib = tmp_ul; | ||
| 1082 | return 0; | ||
| 1083 | } | ||
| 1084 | |||
| 1044 | static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, | 1085 | static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, |
| 1045 | const char *page, ssize_t count) | 1086 | const char *page, ssize_t count) |
| 1046 | { | 1087 | { |
| @@ -1048,7 +1089,6 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, | |||
| 1048 | char *orig, *ptr, *opts, *arg_p; | 1089 | char *orig, *ptr, *opts, *arg_p; |
| 1049 | substring_t args[MAX_OPT_ARGS]; | 1090 | substring_t args[MAX_OPT_ARGS]; |
| 1050 | int ret = 0, token; | 1091 | int ret = 0, token; |
| 1051 | unsigned long tmp_ul; | ||
| 1052 | 1092 | ||
| 1053 | opts = kstrdup(page, GFP_KERNEL); | 1093 | opts = kstrdup(page, GFP_KERNEL); |
| 1054 | if (!opts) | 1094 | if (!opts) |
| @@ -1082,26 +1122,19 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev, | |||
| 1082 | pr_err("kstrtoul() failed for dev_size=\n"); | 1122 | pr_err("kstrtoul() failed for dev_size=\n"); |
| 1083 | break; | 1123 | break; |
| 1084 | case Opt_hw_block_size: | 1124 | case Opt_hw_block_size: |
| 1085 | arg_p = match_strdup(&args[0]); | 1125 | ret = tcmu_set_dev_attrib(&args[0], |
| 1086 | if (!arg_p) { | 1126 | &(dev->dev_attrib.hw_block_size)); |
| 1087 | ret = -ENOMEM; | 1127 | break; |
| 1088 | break; | 1128 | case Opt_hw_max_sectors: |
| 1089 | } | 1129 | ret = tcmu_set_dev_attrib(&args[0], |
| 1090 | ret = kstrtoul(arg_p, 0, &tmp_ul); | 1130 | &(dev->dev_attrib.hw_max_sectors)); |
| 1091 | kfree(arg_p); | ||
| 1092 | if (ret < 0) { | ||
| 1093 | pr_err("kstrtoul() failed for hw_block_size=\n"); | ||
| 1094 | break; | ||
| 1095 | } | ||
| 1096 | if (!tmp_ul) { | ||
| 1097 | pr_err("hw_block_size must be nonzero\n"); | ||
| 1098 | break; | ||
| 1099 | } | ||
| 1100 | dev->dev_attrib.hw_block_size = tmp_ul; | ||
| 1101 | break; | 1131 | break; |
| 1102 | default: | 1132 | default: |
| 1103 | break; | 1133 | break; |
| 1104 | } | 1134 | } |
| 1135 | |||
| 1136 | if (ret) | ||
| 1137 | break; | ||
| 1105 | } | 1138 | } |
| 1106 | 1139 | ||
| 1107 | kfree(orig); | 1140 | kfree(orig); |
| @@ -1134,7 +1167,48 @@ tcmu_parse_cdb(struct se_cmd *cmd) | |||
| 1134 | return passthrough_parse_cdb(cmd, tcmu_queue_cmd); | 1167 | return passthrough_parse_cdb(cmd, tcmu_queue_cmd); |
| 1135 | } | 1168 | } |
| 1136 | 1169 | ||
| 1137 | static const struct target_backend_ops tcmu_ops = { | 1170 | static ssize_t tcmu_cmd_time_out_show(struct config_item *item, char *page) |
| 1171 | { | ||
| 1172 | struct se_dev_attrib *da = container_of(to_config_group(item), | ||
| 1173 | struct se_dev_attrib, da_group); | ||
| 1174 | struct tcmu_dev *udev = container_of(da->da_dev, | ||
| 1175 | struct tcmu_dev, se_dev); | ||
| 1176 | |||
| 1177 | return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC); | ||
| 1178 | } | ||
| 1179 | |||
| 1180 | static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *page, | ||
| 1181 | size_t count) | ||
| 1182 | { | ||
| 1183 | struct se_dev_attrib *da = container_of(to_config_group(item), | ||
| 1184 | struct se_dev_attrib, da_group); | ||
| 1185 | struct tcmu_dev *udev = container_of(da->da_dev, | ||
| 1186 | struct tcmu_dev, se_dev); | ||
| 1187 | u32 val; | ||
| 1188 | int ret; | ||
| 1189 | |||
| 1190 | if (da->da_dev->export_count) { | ||
| 1191 | pr_err("Unable to set tcmu cmd_time_out while exports exist\n"); | ||
| 1192 | return -EINVAL; | ||
| 1193 | } | ||
| 1194 | |||
| 1195 | ret = kstrtou32(page, 0, &val); | ||
| 1196 | if (ret < 0) | ||
| 1197 | return ret; | ||
| 1198 | |||
| 1199 | if (!val) { | ||
| 1200 | pr_err("Illegal value for cmd_time_out\n"); | ||
| 1201 | return -EINVAL; | ||
| 1202 | } | ||
| 1203 | |||
| 1204 | udev->cmd_time_out = val * MSEC_PER_SEC; | ||
| 1205 | return count; | ||
| 1206 | } | ||
| 1207 | CONFIGFS_ATTR(tcmu_, cmd_time_out); | ||
| 1208 | |||
| 1209 | static struct configfs_attribute **tcmu_attrs; | ||
| 1210 | |||
| 1211 | static struct target_backend_ops tcmu_ops = { | ||
| 1138 | .name = "user", | 1212 | .name = "user", |
| 1139 | .owner = THIS_MODULE, | 1213 | .owner = THIS_MODULE, |
| 1140 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, | 1214 | .transport_flags = TRANSPORT_FLAG_PASSTHROUGH, |
| @@ -1148,12 +1222,12 @@ static const struct target_backend_ops tcmu_ops = { | |||
| 1148 | .show_configfs_dev_params = tcmu_show_configfs_dev_params, | 1222 | .show_configfs_dev_params = tcmu_show_configfs_dev_params, |
| 1149 | .get_device_type = sbc_get_device_type, | 1223 | .get_device_type = sbc_get_device_type, |
| 1150 | .get_blocks = tcmu_get_blocks, | 1224 | .get_blocks = tcmu_get_blocks, |
| 1151 | .tb_dev_attrib_attrs = passthrough_attrib_attrs, | 1225 | .tb_dev_attrib_attrs = NULL, |
| 1152 | }; | 1226 | }; |
| 1153 | 1227 | ||
| 1154 | static int __init tcmu_module_init(void) | 1228 | static int __init tcmu_module_init(void) |
| 1155 | { | 1229 | { |
| 1156 | int ret; | 1230 | int ret, i, len = 0; |
| 1157 | 1231 | ||
| 1158 | BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); | 1232 | BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); |
| 1159 | 1233 | ||
| @@ -1175,12 +1249,31 @@ static int __init tcmu_module_init(void) | |||
| 1175 | goto out_unreg_device; | 1249 | goto out_unreg_device; |
| 1176 | } | 1250 | } |
| 1177 | 1251 | ||
| 1252 | for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { | ||
| 1253 | len += sizeof(struct configfs_attribute *); | ||
| 1254 | } | ||
| 1255 | len += sizeof(struct configfs_attribute *) * 2; | ||
| 1256 | |||
| 1257 | tcmu_attrs = kzalloc(len, GFP_KERNEL); | ||
| 1258 | if (!tcmu_attrs) { | ||
| 1259 | ret = -ENOMEM; | ||
| 1260 | goto out_unreg_genl; | ||
| 1261 | } | ||
| 1262 | |||
| 1263 | for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { | ||
| 1264 | tcmu_attrs[i] = passthrough_attrib_attrs[i]; | ||
| 1265 | } | ||
| 1266 | tcmu_attrs[i] = &tcmu_attr_cmd_time_out; | ||
| 1267 | tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; | ||
| 1268 | |||
| 1178 | ret = transport_backend_register(&tcmu_ops); | 1269 | ret = transport_backend_register(&tcmu_ops); |
| 1179 | if (ret) | 1270 | if (ret) |
| 1180 | goto out_unreg_genl; | 1271 | goto out_attrs; |
| 1181 | 1272 | ||
| 1182 | return 0; | 1273 | return 0; |
| 1183 | 1274 | ||
| 1275 | out_attrs: | ||
| 1276 | kfree(tcmu_attrs); | ||
| 1184 | out_unreg_genl: | 1277 | out_unreg_genl: |
| 1185 | genl_unregister_family(&tcmu_genl_family); | 1278 | genl_unregister_family(&tcmu_genl_family); |
| 1186 | out_unreg_device: | 1279 | out_unreg_device: |
| @@ -1194,6 +1287,7 @@ out_free_cache: | |||
| 1194 | static void __exit tcmu_module_exit(void) | 1287 | static void __exit tcmu_module_exit(void) |
| 1195 | { | 1288 | { |
| 1196 | target_backend_unregister(&tcmu_ops); | 1289 | target_backend_unregister(&tcmu_ops); |
| 1290 | kfree(tcmu_attrs); | ||
| 1197 | genl_unregister_family(&tcmu_genl_family); | 1291 | genl_unregister_family(&tcmu_genl_family); |
| 1198 | root_device_unregister(tcmu_root_device); | 1292 | root_device_unregister(tcmu_root_device); |
| 1199 | kmem_cache_destroy(tcmu_cmd_cache); | 1293 | kmem_cache_destroy(tcmu_cmd_cache); |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 6ee55a2d47bb..e65808c482f1 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
| @@ -257,7 +257,7 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, | |||
| 257 | { | 257 | { |
| 258 | unsigned int baud = tty_termios_baud_rate(termios); | 258 | unsigned int baud = tty_termios_baud_rate(termios); |
| 259 | struct dw8250_data *d = p->private_data; | 259 | struct dw8250_data *d = p->private_data; |
| 260 | unsigned int rate; | 260 | long rate; |
| 261 | int ret; | 261 | int ret; |
| 262 | 262 | ||
| 263 | if (IS_ERR(d->clk) || !old) | 263 | if (IS_ERR(d->clk) || !old) |
| @@ -265,7 +265,12 @@ static void dw8250_set_termios(struct uart_port *p, struct ktermios *termios, | |||
| 265 | 265 | ||
| 266 | clk_disable_unprepare(d->clk); | 266 | clk_disable_unprepare(d->clk); |
| 267 | rate = clk_round_rate(d->clk, baud * 16); | 267 | rate = clk_round_rate(d->clk, baud * 16); |
| 268 | ret = clk_set_rate(d->clk, rate); | 268 | if (rate < 0) |
| 269 | ret = rate; | ||
| 270 | else if (rate == 0) | ||
| 271 | ret = -ENOENT; | ||
| 272 | else | ||
| 273 | ret = clk_set_rate(d->clk, rate); | ||
| 269 | clk_prepare_enable(d->clk); | 274 | clk_prepare_enable(d->clk); |
| 270 | 275 | ||
| 271 | if (!ret) | 276 | if (!ret) |
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 8789ea423ccf..56f92d7348bf 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c | |||
| @@ -2373,7 +2373,7 @@ static int __init pl011_console_match(struct console *co, char *name, int idx, | |||
| 2373 | if (strcmp(name, "qdf2400_e44") == 0) { | 2373 | if (strcmp(name, "qdf2400_e44") == 0) { |
| 2374 | pr_info_once("UART: Working around QDF2400 SoC erratum 44"); | 2374 | pr_info_once("UART: Working around QDF2400 SoC erratum 44"); |
| 2375 | qdf2400_e44_present = true; | 2375 | qdf2400_e44_present = true; |
| 2376 | } else if (strcmp(name, "pl011") != 0 || strcmp(name, "ttyAMA") != 0) { | 2376 | } else if (strcmp(name, "pl011") != 0) { |
| 2377 | return -ENODEV; | 2377 | return -ENODEV; |
| 2378 | } | 2378 | } |
| 2379 | 2379 | ||
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index bcf1d33e6ffe..c334bcc59c64 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c | |||
| @@ -575,12 +575,13 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios, | |||
| 575 | pinctrl_select_state(ascport->pinctrl, | 575 | pinctrl_select_state(ascport->pinctrl, |
| 576 | ascport->states[NO_HW_FLOWCTRL]); | 576 | ascport->states[NO_HW_FLOWCTRL]); |
| 577 | 577 | ||
| 578 | gpiod = devm_get_gpiod_from_child(port->dev, "rts", | 578 | gpiod = devm_fwnode_get_gpiod_from_child(port->dev, |
| 579 | &np->fwnode); | 579 | "rts", |
| 580 | if (!IS_ERR(gpiod)) { | 580 | &np->fwnode, |
| 581 | gpiod_direction_output(gpiod, 0); | 581 | GPIOD_OUT_LOW, |
| 582 | np->name); | ||
| 583 | if (!IS_ERR(gpiod)) | ||
| 582 | ascport->rts = gpiod; | 584 | ascport->rts = gpiod; |
| 583 | } | ||
| 584 | } | 585 | } |
| 585 | } | 586 | } |
| 586 | 587 | ||
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 68947f6de5ad..b0500a0a87b8 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c | |||
| @@ -271,10 +271,13 @@ const struct file_operations tty_ldiscs_proc_fops = { | |||
| 271 | 271 | ||
| 272 | struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) | 272 | struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) |
| 273 | { | 273 | { |
| 274 | struct tty_ldisc *ld; | ||
| 275 | |||
| 274 | ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT); | 276 | ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT); |
| 275 | if (!tty->ldisc) | 277 | ld = tty->ldisc; |
| 278 | if (!ld) | ||
| 276 | ldsem_up_read(&tty->ldisc_sem); | 279 | ldsem_up_read(&tty->ldisc_sem); |
| 277 | return tty->ldisc; | 280 | return ld; |
| 278 | } | 281 | } |
| 279 | EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); | 282 | EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); |
| 280 | 283 | ||
| @@ -489,41 +492,6 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld) | |||
| 489 | } | 492 | } |
| 490 | 493 | ||
| 491 | /** | 494 | /** |
| 492 | * tty_ldisc_restore - helper for tty ldisc change | ||
| 493 | * @tty: tty to recover | ||
| 494 | * @old: previous ldisc | ||
| 495 | * | ||
| 496 | * Restore the previous line discipline or N_TTY when a line discipline | ||
| 497 | * change fails due to an open error | ||
| 498 | */ | ||
| 499 | |||
| 500 | static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) | ||
| 501 | { | ||
| 502 | struct tty_ldisc *new_ldisc; | ||
| 503 | int r; | ||
| 504 | |||
| 505 | /* There is an outstanding reference here so this is safe */ | ||
| 506 | old = tty_ldisc_get(tty, old->ops->num); | ||
| 507 | WARN_ON(IS_ERR(old)); | ||
| 508 | tty->ldisc = old; | ||
| 509 | tty_set_termios_ldisc(tty, old->ops->num); | ||
| 510 | if (tty_ldisc_open(tty, old) < 0) { | ||
| 511 | tty_ldisc_put(old); | ||
| 512 | /* This driver is always present */ | ||
| 513 | new_ldisc = tty_ldisc_get(tty, N_TTY); | ||
| 514 | if (IS_ERR(new_ldisc)) | ||
| 515 | panic("n_tty: get"); | ||
| 516 | tty->ldisc = new_ldisc; | ||
| 517 | tty_set_termios_ldisc(tty, N_TTY); | ||
| 518 | r = tty_ldisc_open(tty, new_ldisc); | ||
| 519 | if (r < 0) | ||
| 520 | panic("Couldn't open N_TTY ldisc for " | ||
| 521 | "%s --- error %d.", | ||
| 522 | tty_name(tty), r); | ||
| 523 | } | ||
| 524 | } | ||
| 525 | |||
| 526 | /** | ||
| 527 | * tty_set_ldisc - set line discipline | 495 | * tty_set_ldisc - set line discipline |
| 528 | * @tty: the terminal to set | 496 | * @tty: the terminal to set |
| 529 | * @ldisc: the line discipline | 497 | * @ldisc: the line discipline |
| @@ -536,12 +504,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old) | |||
| 536 | 504 | ||
| 537 | int tty_set_ldisc(struct tty_struct *tty, int disc) | 505 | int tty_set_ldisc(struct tty_struct *tty, int disc) |
| 538 | { | 506 | { |
| 539 | int retval; | 507 | int retval, old_disc; |
| 540 | struct tty_ldisc *old_ldisc, *new_ldisc; | ||
| 541 | |||
| 542 | new_ldisc = tty_ldisc_get(tty, disc); | ||
| 543 | if (IS_ERR(new_ldisc)) | ||
| 544 | return PTR_ERR(new_ldisc); | ||
| 545 | 508 | ||
| 546 | tty_lock(tty); | 509 | tty_lock(tty); |
| 547 | retval = tty_ldisc_lock(tty, 5 * HZ); | 510 | retval = tty_ldisc_lock(tty, 5 * HZ); |
| @@ -554,7 +517,8 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) | |||
| 554 | } | 517 | } |
| 555 | 518 | ||
| 556 | /* Check the no-op case */ | 519 | /* Check the no-op case */ |
| 557 | if (tty->ldisc->ops->num == disc) | 520 | old_disc = tty->ldisc->ops->num; |
| 521 | if (old_disc == disc) | ||
| 558 | goto out; | 522 | goto out; |
| 559 | 523 | ||
| 560 | if (test_bit(TTY_HUPPED, &tty->flags)) { | 524 | if (test_bit(TTY_HUPPED, &tty->flags)) { |
| @@ -563,34 +527,25 @@ int tty_set_ldisc(struct tty_struct *tty, int disc) | |||
| 563 | goto out; | 527 | goto out; |
| 564 | } | 528 | } |
| 565 | 529 | ||
| 566 | old_ldisc = tty->ldisc; | 530 | retval = tty_ldisc_reinit(tty, disc); |
| 567 | |||
| 568 | /* Shutdown the old discipline. */ | ||
| 569 | tty_ldisc_close(tty, old_ldisc); | ||
| 570 | |||
| 571 | /* Now set up the new line discipline. */ | ||
| 572 | tty->ldisc = new_ldisc; | ||
| 573 | tty_set_termios_ldisc(tty, disc); | ||
| 574 | |||
| 575 | retval = tty_ldisc_open(tty, new_ldisc); | ||
| 576 | if (retval < 0) { | 531 | if (retval < 0) { |
| 577 | /* Back to the old one or N_TTY if we can't */ | 532 | /* Back to the old one or N_TTY if we can't */ |
| 578 | tty_ldisc_put(new_ldisc); | 533 | if (tty_ldisc_reinit(tty, old_disc) < 0) { |
| 579 | tty_ldisc_restore(tty, old_ldisc); | 534 | pr_err("tty: TIOCSETD failed, reinitializing N_TTY\n"); |
| 535 | if (tty_ldisc_reinit(tty, N_TTY) < 0) { | ||
| 536 | /* At this point we have tty->ldisc == NULL. */ | ||
| 537 | pr_err("tty: reinitializing N_TTY failed\n"); | ||
| 538 | } | ||
| 539 | } | ||
| 580 | } | 540 | } |
| 581 | 541 | ||
| 582 | if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc) { | 542 | if (tty->ldisc && tty->ldisc->ops->num != old_disc && |
| 543 | tty->ops->set_ldisc) { | ||
| 583 | down_read(&tty->termios_rwsem); | 544 | down_read(&tty->termios_rwsem); |
| 584 | tty->ops->set_ldisc(tty); | 545 | tty->ops->set_ldisc(tty); |
| 585 | up_read(&tty->termios_rwsem); | 546 | up_read(&tty->termios_rwsem); |
| 586 | } | 547 | } |
| 587 | 548 | ||
| 588 | /* At this point we hold a reference to the new ldisc and a | ||
| 589 | reference to the old ldisc, or we hold two references to | ||
| 590 | the old ldisc (if it was restored as part of error cleanup | ||
| 591 | above). In either case, releasing a single reference from | ||
| 592 | the old ldisc is correct. */ | ||
| 593 | new_ldisc = old_ldisc; | ||
| 594 | out: | 549 | out: |
| 595 | tty_ldisc_unlock(tty); | 550 | tty_ldisc_unlock(tty); |
| 596 | 551 | ||
| @@ -598,7 +553,6 @@ out: | |||
| 598 | already running */ | 553 | already running */ |
| 599 | tty_buffer_restart_work(tty->port); | 554 | tty_buffer_restart_work(tty->port); |
| 600 | err: | 555 | err: |
| 601 | tty_ldisc_put(new_ldisc); /* drop the extra reference */ | ||
| 602 | tty_unlock(tty); | 556 | tty_unlock(tty); |
| 603 | return retval; | 557 | return retval; |
| 604 | } | 558 | } |
| @@ -659,10 +613,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) | |||
| 659 | int retval; | 613 | int retval; |
| 660 | 614 | ||
| 661 | ld = tty_ldisc_get(tty, disc); | 615 | ld = tty_ldisc_get(tty, disc); |
| 662 | if (IS_ERR(ld)) { | 616 | if (IS_ERR(ld)) |
| 663 | BUG_ON(disc == N_TTY); | ||
| 664 | return PTR_ERR(ld); | 617 | return PTR_ERR(ld); |
| 665 | } | ||
| 666 | 618 | ||
| 667 | if (tty->ldisc) { | 619 | if (tty->ldisc) { |
| 668 | tty_ldisc_close(tty, tty->ldisc); | 620 | tty_ldisc_close(tty, tty->ldisc); |
| @@ -674,10 +626,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) | |||
| 674 | tty_set_termios_ldisc(tty, disc); | 626 | tty_set_termios_ldisc(tty, disc); |
| 675 | retval = tty_ldisc_open(tty, tty->ldisc); | 627 | retval = tty_ldisc_open(tty, tty->ldisc); |
| 676 | if (retval) { | 628 | if (retval) { |
| 677 | if (!WARN_ON(disc == N_TTY)) { | 629 | tty_ldisc_put(tty->ldisc); |
| 678 | tty_ldisc_put(tty->ldisc); | 630 | tty->ldisc = NULL; |
| 679 | tty->ldisc = NULL; | ||
| 680 | } | ||
| 681 | } | 631 | } |
| 682 | return retval; | 632 | return retval; |
| 683 | } | 633 | } |
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index f03692ec5520..8fb309a0ff6b 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c | |||
| @@ -1381,7 +1381,7 @@ static int usbtmc_probe(struct usb_interface *intf, | |||
| 1381 | 1381 | ||
| 1382 | dev_dbg(&intf->dev, "%s called\n", __func__); | 1382 | dev_dbg(&intf->dev, "%s called\n", __func__); |
| 1383 | 1383 | ||
| 1384 | data = kmalloc(sizeof(*data), GFP_KERNEL); | 1384 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
| 1385 | if (!data) | 1385 | if (!data) |
| 1386 | return -ENOMEM; | 1386 | return -ENOMEM; |
| 1387 | 1387 | ||
| @@ -1444,6 +1444,13 @@ static int usbtmc_probe(struct usb_interface *intf, | |||
| 1444 | break; | 1444 | break; |
| 1445 | } | 1445 | } |
| 1446 | } | 1446 | } |
| 1447 | |||
| 1448 | if (!data->bulk_out || !data->bulk_in) { | ||
| 1449 | dev_err(&intf->dev, "bulk endpoints not found\n"); | ||
| 1450 | retcode = -ENODEV; | ||
| 1451 | goto err_put; | ||
| 1452 | } | ||
| 1453 | |||
| 1447 | /* Find int endpoint */ | 1454 | /* Find int endpoint */ |
| 1448 | for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) { | 1455 | for (n = 0; n < iface_desc->desc.bNumEndpoints; n++) { |
| 1449 | endpoint = &iface_desc->endpoint[n].desc; | 1456 | endpoint = &iface_desc->endpoint[n].desc; |
| @@ -1469,8 +1476,10 @@ static int usbtmc_probe(struct usb_interface *intf, | |||
| 1469 | if (data->iin_ep_present) { | 1476 | if (data->iin_ep_present) { |
| 1470 | /* allocate int urb */ | 1477 | /* allocate int urb */ |
| 1471 | data->iin_urb = usb_alloc_urb(0, GFP_KERNEL); | 1478 | data->iin_urb = usb_alloc_urb(0, GFP_KERNEL); |
| 1472 | if (!data->iin_urb) | 1479 | if (!data->iin_urb) { |
| 1480 | retcode = -ENOMEM; | ||
| 1473 | goto error_register; | 1481 | goto error_register; |
| 1482 | } | ||
| 1474 | 1483 | ||
| 1475 | /* Protect interrupt in endpoint data until iin_urb is freed */ | 1484 | /* Protect interrupt in endpoint data until iin_urb is freed */ |
| 1476 | kref_get(&data->kref); | 1485 | kref_get(&data->kref); |
| @@ -1478,8 +1487,10 @@ static int usbtmc_probe(struct usb_interface *intf, | |||
| 1478 | /* allocate buffer for interrupt in */ | 1487 | /* allocate buffer for interrupt in */ |
| 1479 | data->iin_buffer = kmalloc(data->iin_wMaxPacketSize, | 1488 | data->iin_buffer = kmalloc(data->iin_wMaxPacketSize, |
| 1480 | GFP_KERNEL); | 1489 | GFP_KERNEL); |
| 1481 | if (!data->iin_buffer) | 1490 | if (!data->iin_buffer) { |
| 1491 | retcode = -ENOMEM; | ||
| 1482 | goto error_register; | 1492 | goto error_register; |
| 1493 | } | ||
| 1483 | 1494 | ||
| 1484 | /* fill interrupt urb */ | 1495 | /* fill interrupt urb */ |
| 1485 | usb_fill_int_urb(data->iin_urb, data->usb_dev, | 1496 | usb_fill_int_urb(data->iin_urb, data->usb_dev, |
| @@ -1512,6 +1523,7 @@ error_register: | |||
| 1512 | sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp); | 1523 | sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp); |
| 1513 | sysfs_remove_group(&intf->dev.kobj, &data_attr_grp); | 1524 | sysfs_remove_group(&intf->dev.kobj, &data_attr_grp); |
| 1514 | usbtmc_free_int(data); | 1525 | usbtmc_free_int(data); |
| 1526 | err_put: | ||
| 1515 | kref_put(&data->kref, usbtmc_delete); | 1527 | kref_put(&data->kref, usbtmc_delete); |
| 1516 | return retcode; | 1528 | return retcode; |
| 1517 | } | 1529 | } |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 25dbd8c7aec7..4be52c602e9b 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
| @@ -280,6 +280,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, | |||
| 280 | 280 | ||
| 281 | /* | 281 | /* |
| 282 | * Adjust bInterval for quirked devices. | 282 | * Adjust bInterval for quirked devices. |
| 283 | */ | ||
| 284 | /* | ||
| 285 | * This quirk fixes bIntervals reported in ms. | ||
| 286 | */ | ||
| 287 | if (to_usb_device(ddev)->quirks & | ||
| 288 | USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) { | ||
| 289 | n = clamp(fls(d->bInterval) + 3, i, j); | ||
| 290 | i = j = n; | ||
| 291 | } | ||
| 292 | /* | ||
| 283 | * This quirk fixes bIntervals reported in | 293 | * This quirk fixes bIntervals reported in |
| 284 | * linear microframes. | 294 | * linear microframes. |
| 285 | */ | 295 | */ |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index f0dd08198d74..5286bf67869a 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -4275,7 +4275,7 @@ static void hub_set_initial_usb2_lpm_policy(struct usb_device *udev) | |||
| 4275 | struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); | 4275 | struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent); |
| 4276 | int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; | 4276 | int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN; |
| 4277 | 4277 | ||
| 4278 | if (!udev->usb2_hw_lpm_capable) | 4278 | if (!udev->usb2_hw_lpm_capable || !udev->bos) |
| 4279 | return; | 4279 | return; |
| 4280 | 4280 | ||
| 4281 | if (hub) | 4281 | if (hub) |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 24f9f98968a5..96b21b0dac1e 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -170,6 +170,14 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 170 | /* M-Systems Flash Disk Pioneers */ | 170 | /* M-Systems Flash Disk Pioneers */ |
| 171 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, | 171 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, |
| 172 | 172 | ||
| 173 | /* Baum Vario Ultra */ | ||
| 174 | { USB_DEVICE(0x0904, 0x6101), .driver_info = | ||
| 175 | USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, | ||
| 176 | { USB_DEVICE(0x0904, 0x6102), .driver_info = | ||
| 177 | USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, | ||
| 178 | { USB_DEVICE(0x0904, 0x6103), .driver_info = | ||
| 179 | USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL }, | ||
| 180 | |||
| 173 | /* Keytouch QWERTY Panel keyboard */ | 181 | /* Keytouch QWERTY Panel keyboard */ |
| 174 | { USB_DEVICE(0x0926, 0x3333), .driver_info = | 182 | { USB_DEVICE(0x0926, 0x3333), .driver_info = |
| 175 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 183 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 0d75158e43fe..79e7a3480d51 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -171,6 +171,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, | |||
| 171 | int status) | 171 | int status) |
| 172 | { | 172 | { |
| 173 | struct dwc3 *dwc = dep->dwc; | 173 | struct dwc3 *dwc = dep->dwc; |
| 174 | unsigned int unmap_after_complete = false; | ||
| 174 | 175 | ||
| 175 | req->started = false; | 176 | req->started = false; |
| 176 | list_del(&req->list); | 177 | list_del(&req->list); |
| @@ -180,11 +181,19 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, | |||
| 180 | if (req->request.status == -EINPROGRESS) | 181 | if (req->request.status == -EINPROGRESS) |
| 181 | req->request.status = status; | 182 | req->request.status = status; |
| 182 | 183 | ||
| 183 | if (dwc->ep0_bounced && dep->number <= 1) | 184 | /* |
| 185 | * NOTICE we don't want to unmap before calling ->complete() if we're | ||
| 186 | * dealing with a bounced ep0 request. If we unmap it here, we would end | ||
| 187 | * up overwritting the contents of req->buf and this could confuse the | ||
| 188 | * gadget driver. | ||
| 189 | */ | ||
| 190 | if (dwc->ep0_bounced && dep->number <= 1) { | ||
| 184 | dwc->ep0_bounced = false; | 191 | dwc->ep0_bounced = false; |
| 185 | 192 | unmap_after_complete = true; | |
| 186 | usb_gadget_unmap_request_by_dev(dwc->sysdev, | 193 | } else { |
| 187 | &req->request, req->direction); | 194 | usb_gadget_unmap_request_by_dev(dwc->sysdev, |
| 195 | &req->request, req->direction); | ||
| 196 | } | ||
| 188 | 197 | ||
| 189 | trace_dwc3_gadget_giveback(req); | 198 | trace_dwc3_gadget_giveback(req); |
| 190 | 199 | ||
| @@ -192,6 +201,10 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, | |||
| 192 | usb_gadget_giveback_request(&dep->endpoint, &req->request); | 201 | usb_gadget_giveback_request(&dep->endpoint, &req->request); |
| 193 | spin_lock(&dwc->lock); | 202 | spin_lock(&dwc->lock); |
| 194 | 203 | ||
| 204 | if (unmap_after_complete) | ||
| 205 | usb_gadget_unmap_request_by_dev(dwc->sysdev, | ||
| 206 | &req->request, req->direction); | ||
| 207 | |||
| 195 | if (dep->number > 1) | 208 | if (dep->number > 1) |
| 196 | pm_runtime_put(dwc->dev); | 209 | pm_runtime_put(dwc->dev); |
| 197 | } | 210 | } |
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index a30766ca4226..5e3828d9dac7 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c | |||
| @@ -535,13 +535,15 @@ static int acm_notify_serial_state(struct f_acm *acm) | |||
| 535 | { | 535 | { |
| 536 | struct usb_composite_dev *cdev = acm->port.func.config->cdev; | 536 | struct usb_composite_dev *cdev = acm->port.func.config->cdev; |
| 537 | int status; | 537 | int status; |
| 538 | __le16 serial_state; | ||
| 538 | 539 | ||
| 539 | spin_lock(&acm->lock); | 540 | spin_lock(&acm->lock); |
| 540 | if (acm->notify_req) { | 541 | if (acm->notify_req) { |
| 541 | dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", | 542 | dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n", |
| 542 | acm->port_num, acm->serial_state); | 543 | acm->port_num, acm->serial_state); |
| 544 | serial_state = cpu_to_le16(acm->serial_state); | ||
| 543 | status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, | 545 | status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE, |
| 544 | 0, &acm->serial_state, sizeof(acm->serial_state)); | 546 | 0, &serial_state, sizeof(acm->serial_state)); |
| 545 | } else { | 547 | } else { |
| 546 | acm->pending = true; | 548 | acm->pending = true; |
| 547 | status = 0; | 549 | status = 0; |
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 89b48bcc377a..5eea44823ca0 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c | |||
| @@ -367,7 +367,7 @@ try_again: | |||
| 367 | count = min_t(unsigned, count, hidg->report_length); | 367 | count = min_t(unsigned, count, hidg->report_length); |
| 368 | 368 | ||
| 369 | spin_unlock_irqrestore(&hidg->write_spinlock, flags); | 369 | spin_unlock_irqrestore(&hidg->write_spinlock, flags); |
| 370 | status = copy_from_user(hidg->req->buf, buffer, count); | 370 | status = copy_from_user(req->buf, buffer, count); |
| 371 | 371 | ||
| 372 | if (status != 0) { | 372 | if (status != 0) { |
| 373 | ERROR(hidg->func.config->cdev, | 373 | ERROR(hidg->func.config->cdev, |
| @@ -378,9 +378,9 @@ try_again: | |||
| 378 | 378 | ||
| 379 | spin_lock_irqsave(&hidg->write_spinlock, flags); | 379 | spin_lock_irqsave(&hidg->write_spinlock, flags); |
| 380 | 380 | ||
| 381 | /* we our function has been disabled by host */ | 381 | /* when our function has been disabled by host */ |
| 382 | if (!hidg->req) { | 382 | if (!hidg->req) { |
| 383 | free_ep_req(hidg->in_ep, hidg->req); | 383 | free_ep_req(hidg->in_ep, req); |
| 384 | /* | 384 | /* |
| 385 | * TODO | 385 | * TODO |
| 386 | * Should we fail with error here? | 386 | * Should we fail with error here? |
| @@ -394,7 +394,7 @@ try_again: | |||
| 394 | req->complete = f_hidg_req_complete; | 394 | req->complete = f_hidg_req_complete; |
| 395 | req->context = hidg; | 395 | req->context = hidg; |
| 396 | 396 | ||
| 397 | status = usb_ep_queue(hidg->in_ep, hidg->req, GFP_ATOMIC); | 397 | status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); |
| 398 | if (status < 0) { | 398 | if (status < 0) { |
| 399 | ERROR(hidg->func.config->cdev, | 399 | ERROR(hidg->func.config->cdev, |
| 400 | "usb_ep_queue error on int endpoint %zd\n", status); | 400 | "usb_ep_queue error on int endpoint %zd\n", status); |
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index 29b41b5dee04..f8a1881609a2 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c | |||
| @@ -594,6 +594,14 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) | |||
| 594 | opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); | 594 | opts->streaming_maxpacket = clamp(opts->streaming_maxpacket, 1U, 3072U); |
| 595 | opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); | 595 | opts->streaming_maxburst = min(opts->streaming_maxburst, 15U); |
| 596 | 596 | ||
| 597 | /* For SS, wMaxPacketSize has to be 1024 if bMaxBurst is not 0 */ | ||
| 598 | if (opts->streaming_maxburst && | ||
| 599 | (opts->streaming_maxpacket % 1024) != 0) { | ||
| 600 | opts->streaming_maxpacket = roundup(opts->streaming_maxpacket, 1024); | ||
| 601 | INFO(cdev, "overriding streaming_maxpacket to %d\n", | ||
| 602 | opts->streaming_maxpacket); | ||
| 603 | } | ||
| 604 | |||
| 597 | /* Fill in the FS/HS/SS Video Streaming specific descriptors from the | 605 | /* Fill in the FS/HS/SS Video Streaming specific descriptors from the |
| 598 | * module parameters. | 606 | * module parameters. |
| 599 | * | 607 | * |
| @@ -625,7 +633,7 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) | |||
| 625 | uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; | 633 | uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst; |
| 626 | uvc_ss_streaming_comp.wBytesPerInterval = | 634 | uvc_ss_streaming_comp.wBytesPerInterval = |
| 627 | cpu_to_le16(max_packet_size * max_packet_mult * | 635 | cpu_to_le16(max_packet_size * max_packet_mult * |
| 628 | opts->streaming_maxburst); | 636 | (opts->streaming_maxburst + 1)); |
| 629 | 637 | ||
| 630 | /* Allocate endpoints. */ | 638 | /* Allocate endpoints. */ |
| 631 | ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); | 639 | ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep); |
diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index a97da645c1b9..8a365aad66fe 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c | |||
| @@ -1523,7 +1523,6 @@ static void pch_udc_free_dma_chain(struct pch_udc_dev *dev, | |||
| 1523 | td = phys_to_virt(addr); | 1523 | td = phys_to_virt(addr); |
| 1524 | addr2 = (dma_addr_t)td->next; | 1524 | addr2 = (dma_addr_t)td->next; |
| 1525 | pci_pool_free(dev->data_requests, td, addr); | 1525 | pci_pool_free(dev->data_requests, td, addr); |
| 1526 | td->next = 0x00; | ||
| 1527 | addr = addr2; | 1526 | addr = addr2; |
| 1528 | } | 1527 | } |
| 1529 | req->chain_len = 1; | 1528 | req->chain_len = 1; |
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c index 8b9fd7534f69..502bfe30a077 100644 --- a/drivers/usb/misc/idmouse.c +++ b/drivers/usb/misc/idmouse.c | |||
| @@ -347,6 +347,9 @@ static int idmouse_probe(struct usb_interface *interface, | |||
| 347 | if (iface_desc->desc.bInterfaceClass != 0x0A) | 347 | if (iface_desc->desc.bInterfaceClass != 0x0A) |
| 348 | return -ENODEV; | 348 | return -ENODEV; |
| 349 | 349 | ||
| 350 | if (iface_desc->desc.bNumEndpoints < 1) | ||
| 351 | return -ENODEV; | ||
| 352 | |||
| 350 | /* allocate memory for our device state and initialize it */ | 353 | /* allocate memory for our device state and initialize it */ |
| 351 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 354 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
| 352 | if (dev == NULL) | 355 | if (dev == NULL) |
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c index 77176511658f..d3d124753266 100644 --- a/drivers/usb/misc/lvstest.c +++ b/drivers/usb/misc/lvstest.c | |||
| @@ -366,6 +366,10 @@ static int lvs_rh_probe(struct usb_interface *intf, | |||
| 366 | 366 | ||
| 367 | hdev = interface_to_usbdev(intf); | 367 | hdev = interface_to_usbdev(intf); |
| 368 | desc = intf->cur_altsetting; | 368 | desc = intf->cur_altsetting; |
| 369 | |||
| 370 | if (desc->desc.bNumEndpoints < 1) | ||
| 371 | return -ENODEV; | ||
| 372 | |||
| 369 | endpoint = &desc->endpoint[0].desc; | 373 | endpoint = &desc->endpoint[0].desc; |
| 370 | 374 | ||
| 371 | /* valid only for SS root hub */ | 375 | /* valid only for SS root hub */ |
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c index e45a3a680db8..07014cad6dbe 100644 --- a/drivers/usb/misc/uss720.c +++ b/drivers/usb/misc/uss720.c | |||
| @@ -709,6 +709,11 @@ static int uss720_probe(struct usb_interface *intf, | |||
| 709 | 709 | ||
| 710 | interface = intf->cur_altsetting; | 710 | interface = intf->cur_altsetting; |
| 711 | 711 | ||
| 712 | if (interface->desc.bNumEndpoints < 3) { | ||
| 713 | usb_put_dev(usbdev); | ||
| 714 | return -ENODEV; | ||
| 715 | } | ||
| 716 | |||
| 712 | /* | 717 | /* |
| 713 | * Allocate parport interface | 718 | * Allocate parport interface |
| 714 | */ | 719 | */ |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index d8bae6ca8904..0c3664ab705e 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
| @@ -2490,8 +2490,8 @@ static int musb_remove(struct platform_device *pdev) | |||
| 2490 | musb_host_cleanup(musb); | 2490 | musb_host_cleanup(musb); |
| 2491 | musb_gadget_cleanup(musb); | 2491 | musb_gadget_cleanup(musb); |
| 2492 | 2492 | ||
| 2493 | spin_lock_irqsave(&musb->lock, flags); | ||
| 2494 | musb_platform_disable(musb); | 2493 | musb_platform_disable(musb); |
| 2494 | spin_lock_irqsave(&musb->lock, flags); | ||
| 2495 | musb_disable_interrupts(musb); | 2495 | musb_disable_interrupts(musb); |
| 2496 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | 2496 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); |
| 2497 | spin_unlock_irqrestore(&musb->lock, flags); | 2497 | spin_unlock_irqrestore(&musb->lock, flags); |
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index 00e272bfee39..355655f8a3fb 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c | |||
| @@ -238,8 +238,27 @@ static void cppi41_dma_callback(void *private_data, | |||
| 238 | transferred < cppi41_channel->packet_sz) | 238 | transferred < cppi41_channel->packet_sz) |
| 239 | cppi41_channel->prog_len = 0; | 239 | cppi41_channel->prog_len = 0; |
| 240 | 240 | ||
| 241 | if (cppi41_channel->is_tx) | 241 | if (cppi41_channel->is_tx) { |
| 242 | empty = musb_is_tx_fifo_empty(hw_ep); | 242 | u8 type; |
| 243 | |||
| 244 | if (is_host_active(musb)) | ||
| 245 | type = hw_ep->out_qh->type; | ||
| 246 | else | ||
| 247 | type = hw_ep->ep_in.type; | ||
| 248 | |||
| 249 | if (type == USB_ENDPOINT_XFER_ISOC) | ||
| 250 | /* | ||
| 251 | * Don't use the early-TX-interrupt workaround below | ||
| 252 | * for Isoch transfter. Since Isoch are periodic | ||
| 253 | * transfer, by the time the next transfer is | ||
| 254 | * scheduled, the current one should be done already. | ||
| 255 | * | ||
| 256 | * This avoids audio playback underrun issue. | ||
| 257 | */ | ||
| 258 | empty = true; | ||
| 259 | else | ||
| 260 | empty = musb_is_tx_fifo_empty(hw_ep); | ||
| 261 | } | ||
| 243 | 262 | ||
| 244 | if (!cppi41_channel->is_tx || empty) { | 263 | if (!cppi41_channel->is_tx || empty) { |
| 245 | cppi41_trans_done(cppi41_channel); | 264 | cppi41_trans_done(cppi41_channel); |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 7c047c4a2565..9c7ee26ef388 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c | |||
| @@ -933,7 +933,7 @@ static int dsps_probe(struct platform_device *pdev) | |||
| 933 | if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) { | 933 | if (usb_get_dr_mode(&pdev->dev) == USB_DR_MODE_PERIPHERAL) { |
| 934 | ret = dsps_setup_optional_vbus_irq(pdev, glue); | 934 | ret = dsps_setup_optional_vbus_irq(pdev, glue); |
| 935 | if (ret) | 935 | if (ret) |
| 936 | return ret; | 936 | goto err_iounmap; |
| 937 | } | 937 | } |
| 938 | 938 | ||
| 939 | platform_set_drvdata(pdev, glue); | 939 | platform_set_drvdata(pdev, glue); |
| @@ -946,6 +946,8 @@ static int dsps_probe(struct platform_device *pdev) | |||
| 946 | 946 | ||
| 947 | err: | 947 | err: |
| 948 | pm_runtime_disable(&pdev->dev); | 948 | pm_runtime_disable(&pdev->dev); |
| 949 | err_iounmap: | ||
| 950 | iounmap(glue->usbss_base); | ||
| 949 | return ret; | 951 | return ret; |
| 950 | } | 952 | } |
| 951 | 953 | ||
| @@ -956,6 +958,7 @@ static int dsps_remove(struct platform_device *pdev) | |||
| 956 | platform_device_unregister(glue->musb); | 958 | platform_device_unregister(glue->musb); |
| 957 | 959 | ||
| 958 | pm_runtime_disable(&pdev->dev); | 960 | pm_runtime_disable(&pdev->dev); |
| 961 | iounmap(glue->usbss_base); | ||
| 959 | 962 | ||
| 960 | return 0; | 963 | return 0; |
| 961 | } | 964 | } |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 42cc72e54c05..af67a0de6b5d 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
| @@ -233,6 +233,14 @@ static void option_instat_callback(struct urb *urb); | |||
| 233 | #define BANDRICH_PRODUCT_1012 0x1012 | 233 | #define BANDRICH_PRODUCT_1012 0x1012 |
| 234 | 234 | ||
| 235 | #define QUALCOMM_VENDOR_ID 0x05C6 | 235 | #define QUALCOMM_VENDOR_ID 0x05C6 |
| 236 | /* These Quectel products use Qualcomm's vendor ID */ | ||
| 237 | #define QUECTEL_PRODUCT_UC20 0x9003 | ||
| 238 | #define QUECTEL_PRODUCT_UC15 0x9090 | ||
| 239 | |||
| 240 | #define QUECTEL_VENDOR_ID 0x2c7c | ||
| 241 | /* These Quectel products use Quectel's vendor ID */ | ||
| 242 | #define QUECTEL_PRODUCT_EC21 0x0121 | ||
| 243 | #define QUECTEL_PRODUCT_EC25 0x0125 | ||
| 236 | 244 | ||
| 237 | #define CMOTECH_VENDOR_ID 0x16d8 | 245 | #define CMOTECH_VENDOR_ID 0x16d8 |
| 238 | #define CMOTECH_PRODUCT_6001 0x6001 | 246 | #define CMOTECH_PRODUCT_6001 0x6001 |
| @@ -1161,7 +1169,14 @@ static const struct usb_device_id option_ids[] = { | |||
| 1161 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ | 1169 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ |
| 1162 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ | 1170 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ |
| 1163 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ | 1171 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ |
| 1164 | { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */ | 1172 | /* Quectel products using Qualcomm vendor ID */ |
| 1173 | { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, | ||
| 1174 | { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), | ||
| 1175 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | ||
| 1176 | /* Quectel products using Quectel vendor ID */ | ||
| 1177 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21), | ||
| 1178 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | ||
| 1179 | { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25), | ||
| 1165 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 1180 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
| 1166 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, | 1181 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, |
| 1167 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, | 1182 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 696458db7e3c..38b3f0d8cd58 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
| @@ -169,6 +169,8 @@ static const struct usb_device_id id_table[] = { | |||
| 169 | {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ | 169 | {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ |
| 170 | {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ | 170 | {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ |
| 171 | {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ | 171 | {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ |
| 172 | {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ | ||
| 173 | {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ | ||
| 172 | 174 | ||
| 173 | /* Huawei devices */ | 175 | /* Huawei devices */ |
| 174 | {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ | 176 | {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ |
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c index 252c7bd9218a..d01496fd27fe 100644 --- a/drivers/usb/wusbcore/wa-hc.c +++ b/drivers/usb/wusbcore/wa-hc.c | |||
| @@ -39,6 +39,9 @@ int wa_create(struct wahc *wa, struct usb_interface *iface, | |||
| 39 | int result; | 39 | int result; |
| 40 | struct device *dev = &iface->dev; | 40 | struct device *dev = &iface->dev; |
| 41 | 41 | ||
| 42 | if (iface->cur_altsetting->desc.bNumEndpoints < 3) | ||
| 43 | return -ENODEV; | ||
| 44 | |||
| 42 | result = wa_rpipes_create(wa); | 45 | result = wa_rpipes_create(wa); |
| 43 | if (result < 0) | 46 | if (result < 0) |
| 44 | goto error_rpipes_create; | 47 | goto error_rpipes_create; |
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c index 0aa6c3c29d17..35a1e777b449 100644 --- a/drivers/uwb/hwa-rc.c +++ b/drivers/uwb/hwa-rc.c | |||
| @@ -823,6 +823,9 @@ static int hwarc_probe(struct usb_interface *iface, | |||
| 823 | struct hwarc *hwarc; | 823 | struct hwarc *hwarc; |
| 824 | struct device *dev = &iface->dev; | 824 | struct device *dev = &iface->dev; |
| 825 | 825 | ||
| 826 | if (iface->cur_altsetting->desc.bNumEndpoints < 1) | ||
| 827 | return -ENODEV; | ||
| 828 | |||
| 826 | result = -ENOMEM; | 829 | result = -ENOMEM; |
| 827 | uwb_rc = uwb_rc_alloc(); | 830 | uwb_rc = uwb_rc_alloc(); |
| 828 | if (uwb_rc == NULL) { | 831 | if (uwb_rc == NULL) { |
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c index 2bfc846ac071..6345e85822a4 100644 --- a/drivers/uwb/i1480/dfu/usb.c +++ b/drivers/uwb/i1480/dfu/usb.c | |||
| @@ -362,6 +362,9 @@ int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id) | |||
| 362 | result); | 362 | result); |
| 363 | } | 363 | } |
| 364 | 364 | ||
| 365 | if (iface->cur_altsetting->desc.bNumEndpoints < 1) | ||
| 366 | return -ENODEV; | ||
| 367 | |||
| 365 | result = -ENOMEM; | 368 | result = -ENOMEM; |
| 366 | i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); | 369 | i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL); |
| 367 | if (i1480_usb == NULL) { | 370 | if (i1480_usb == NULL) { |
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 609f4f982c74..561084ab387f 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c | |||
| @@ -403,6 +403,7 @@ static void vfio_group_release(struct kref *kref) | |||
| 403 | struct iommu_group *iommu_group = group->iommu_group; | 403 | struct iommu_group *iommu_group = group->iommu_group; |
| 404 | 404 | ||
| 405 | WARN_ON(!list_empty(&group->device_list)); | 405 | WARN_ON(!list_empty(&group->device_list)); |
| 406 | WARN_ON(group->notifier.head); | ||
| 406 | 407 | ||
| 407 | list_for_each_entry_safe(unbound, tmp, | 408 | list_for_each_entry_safe(unbound, tmp, |
| 408 | &group->unbound_list, unbound_next) { | 409 | &group->unbound_list, unbound_next) { |
| @@ -1573,6 +1574,10 @@ static int vfio_group_fops_open(struct inode *inode, struct file *filep) | |||
| 1573 | return -EBUSY; | 1574 | return -EBUSY; |
| 1574 | } | 1575 | } |
| 1575 | 1576 | ||
| 1577 | /* Warn if previous user didn't cleanup and re-init to drop them */ | ||
| 1578 | if (WARN_ON(group->notifier.head)) | ||
| 1579 | BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier); | ||
| 1580 | |||
| 1576 | filep->private_data = group; | 1581 | filep->private_data = group; |
| 1577 | 1582 | ||
| 1578 | return 0; | 1583 | return 0; |
| @@ -1584,9 +1589,6 @@ static int vfio_group_fops_release(struct inode *inode, struct file *filep) | |||
| 1584 | 1589 | ||
| 1585 | filep->private_data = NULL; | 1590 | filep->private_data = NULL; |
| 1586 | 1591 | ||
| 1587 | /* Any user didn't unregister? */ | ||
| 1588 | WARN_ON(group->notifier.head); | ||
| 1589 | |||
| 1590 | vfio_group_try_dissolve_container(group); | 1592 | vfio_group_try_dissolve_container(group); |
| 1591 | 1593 | ||
| 1592 | atomic_dec(&group->opened); | 1594 | atomic_dec(&group->opened); |
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index c26fa1f3ed86..32d2633092a3 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c | |||
| @@ -1182,8 +1182,7 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain, | |||
| 1182 | return NULL; | 1182 | return NULL; |
| 1183 | } | 1183 | } |
| 1184 | 1184 | ||
| 1185 | static bool vfio_iommu_has_resv_msi(struct iommu_group *group, | 1185 | static bool vfio_iommu_has_sw_msi(struct iommu_group *group, phys_addr_t *base) |
| 1186 | phys_addr_t *base) | ||
| 1187 | { | 1186 | { |
| 1188 | struct list_head group_resv_regions; | 1187 | struct list_head group_resv_regions; |
| 1189 | struct iommu_resv_region *region, *next; | 1188 | struct iommu_resv_region *region, *next; |
| @@ -1192,7 +1191,7 @@ static bool vfio_iommu_has_resv_msi(struct iommu_group *group, | |||
| 1192 | INIT_LIST_HEAD(&group_resv_regions); | 1191 | INIT_LIST_HEAD(&group_resv_regions); |
| 1193 | iommu_get_group_resv_regions(group, &group_resv_regions); | 1192 | iommu_get_group_resv_regions(group, &group_resv_regions); |
| 1194 | list_for_each_entry(region, &group_resv_regions, list) { | 1193 | list_for_each_entry(region, &group_resv_regions, list) { |
| 1195 | if (region->type & IOMMU_RESV_MSI) { | 1194 | if (region->type == IOMMU_RESV_SW_MSI) { |
| 1196 | *base = region->start; | 1195 | *base = region->start; |
| 1197 | ret = true; | 1196 | ret = true; |
| 1198 | goto out; | 1197 | goto out; |
| @@ -1283,7 +1282,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, | |||
| 1283 | if (ret) | 1282 | if (ret) |
| 1284 | goto out_domain; | 1283 | goto out_domain; |
| 1285 | 1284 | ||
| 1286 | resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base); | 1285 | resv_msi = vfio_iommu_has_sw_msi(iommu_group, &resv_msi_base); |
| 1287 | 1286 | ||
| 1288 | INIT_LIST_HEAD(&domain->group_list); | 1287 | INIT_LIST_HEAD(&domain->group_list); |
| 1289 | list_add(&group->next, &domain->group_list); | 1288 | list_add(&group->next, &domain->group_list); |
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index ce5e63d2c66a..44eed8eb0725 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c | |||
| @@ -223,6 +223,46 @@ vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt) | |||
| 223 | return len; | 223 | return len; |
| 224 | } | 224 | } |
| 225 | 225 | ||
| 226 | static int | ||
| 227 | vhost_transport_cancel_pkt(struct vsock_sock *vsk) | ||
| 228 | { | ||
| 229 | struct vhost_vsock *vsock; | ||
| 230 | struct virtio_vsock_pkt *pkt, *n; | ||
| 231 | int cnt = 0; | ||
| 232 | LIST_HEAD(freeme); | ||
| 233 | |||
| 234 | /* Find the vhost_vsock according to guest context id */ | ||
| 235 | vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); | ||
| 236 | if (!vsock) | ||
| 237 | return -ENODEV; | ||
| 238 | |||
| 239 | spin_lock_bh(&vsock->send_pkt_list_lock); | ||
| 240 | list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { | ||
| 241 | if (pkt->vsk != vsk) | ||
| 242 | continue; | ||
| 243 | list_move(&pkt->list, &freeme); | ||
| 244 | } | ||
| 245 | spin_unlock_bh(&vsock->send_pkt_list_lock); | ||
| 246 | |||
| 247 | list_for_each_entry_safe(pkt, n, &freeme, list) { | ||
| 248 | if (pkt->reply) | ||
| 249 | cnt++; | ||
| 250 | list_del(&pkt->list); | ||
| 251 | virtio_transport_free_pkt(pkt); | ||
| 252 | } | ||
| 253 | |||
| 254 | if (cnt) { | ||
| 255 | struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX]; | ||
| 256 | int new_cnt; | ||
| 257 | |||
| 258 | new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); | ||
| 259 | if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num) | ||
| 260 | vhost_poll_queue(&tx_vq->poll); | ||
| 261 | } | ||
| 262 | |||
| 263 | return 0; | ||
| 264 | } | ||
| 265 | |||
| 226 | static struct virtio_vsock_pkt * | 266 | static struct virtio_vsock_pkt * |
| 227 | vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, | 267 | vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq, |
| 228 | unsigned int out, unsigned int in) | 268 | unsigned int out, unsigned int in) |
| @@ -675,6 +715,7 @@ static struct virtio_transport vhost_transport = { | |||
| 675 | .release = virtio_transport_release, | 715 | .release = virtio_transport_release, |
| 676 | .connect = virtio_transport_connect, | 716 | .connect = virtio_transport_connect, |
| 677 | .shutdown = virtio_transport_shutdown, | 717 | .shutdown = virtio_transport_shutdown, |
| 718 | .cancel_pkt = vhost_transport_cancel_pkt, | ||
| 678 | 719 | ||
| 679 | .dgram_enqueue = virtio_transport_dgram_enqueue, | 720 | .dgram_enqueue = virtio_transport_dgram_enqueue, |
| 680 | .dgram_dequeue = virtio_transport_dgram_dequeue, | 721 | .dgram_dequeue = virtio_transport_dgram_dequeue, |
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 4e1191508228..34adf9b9c053 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c | |||
| @@ -242,11 +242,11 @@ static inline void update_stat(struct virtio_balloon *vb, int idx, | |||
| 242 | 242 | ||
| 243 | #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) | 243 | #define pages_to_bytes(x) ((u64)(x) << PAGE_SHIFT) |
| 244 | 244 | ||
| 245 | static void update_balloon_stats(struct virtio_balloon *vb) | 245 | static unsigned int update_balloon_stats(struct virtio_balloon *vb) |
| 246 | { | 246 | { |
| 247 | unsigned long events[NR_VM_EVENT_ITEMS]; | 247 | unsigned long events[NR_VM_EVENT_ITEMS]; |
| 248 | struct sysinfo i; | 248 | struct sysinfo i; |
| 249 | int idx = 0; | 249 | unsigned int idx = 0; |
| 250 | long available; | 250 | long available; |
| 251 | 251 | ||
| 252 | all_vm_events(events); | 252 | all_vm_events(events); |
| @@ -254,18 +254,22 @@ static void update_balloon_stats(struct virtio_balloon *vb) | |||
| 254 | 254 | ||
| 255 | available = si_mem_available(); | 255 | available = si_mem_available(); |
| 256 | 256 | ||
| 257 | #ifdef CONFIG_VM_EVENT_COUNTERS | ||
| 257 | update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, | 258 | update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_IN, |
| 258 | pages_to_bytes(events[PSWPIN])); | 259 | pages_to_bytes(events[PSWPIN])); |
| 259 | update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, | 260 | update_stat(vb, idx++, VIRTIO_BALLOON_S_SWAP_OUT, |
| 260 | pages_to_bytes(events[PSWPOUT])); | 261 | pages_to_bytes(events[PSWPOUT])); |
| 261 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); | 262 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MAJFLT, events[PGMAJFAULT]); |
| 262 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); | 263 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MINFLT, events[PGFAULT]); |
| 264 | #endif | ||
| 263 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, | 265 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMFREE, |
| 264 | pages_to_bytes(i.freeram)); | 266 | pages_to_bytes(i.freeram)); |
| 265 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, | 267 | update_stat(vb, idx++, VIRTIO_BALLOON_S_MEMTOT, |
| 266 | pages_to_bytes(i.totalram)); | 268 | pages_to_bytes(i.totalram)); |
| 267 | update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, | 269 | update_stat(vb, idx++, VIRTIO_BALLOON_S_AVAIL, |
| 268 | pages_to_bytes(available)); | 270 | pages_to_bytes(available)); |
| 271 | |||
| 272 | return idx; | ||
| 269 | } | 273 | } |
| 270 | 274 | ||
| 271 | /* | 275 | /* |
| @@ -291,14 +295,14 @@ static void stats_handle_request(struct virtio_balloon *vb) | |||
| 291 | { | 295 | { |
| 292 | struct virtqueue *vq; | 296 | struct virtqueue *vq; |
| 293 | struct scatterlist sg; | 297 | struct scatterlist sg; |
| 294 | unsigned int len; | 298 | unsigned int len, num_stats; |
| 295 | 299 | ||
| 296 | update_balloon_stats(vb); | 300 | num_stats = update_balloon_stats(vb); |
| 297 | 301 | ||
| 298 | vq = vb->stats_vq; | 302 | vq = vb->stats_vq; |
| 299 | if (!virtqueue_get_buf(vq, &len)) | 303 | if (!virtqueue_get_buf(vq, &len)) |
| 300 | return; | 304 | return; |
| 301 | sg_init_one(&sg, vb->stats, sizeof(vb->stats)); | 305 | sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); |
| 302 | virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); | 306 | virtqueue_add_outbuf(vq, &sg, 1, vb, GFP_KERNEL); |
| 303 | virtqueue_kick(vq); | 307 | virtqueue_kick(vq); |
| 304 | } | 308 | } |
| @@ -423,13 +427,16 @@ static int init_vqs(struct virtio_balloon *vb) | |||
| 423 | vb->deflate_vq = vqs[1]; | 427 | vb->deflate_vq = vqs[1]; |
| 424 | if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { | 428 | if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { |
| 425 | struct scatterlist sg; | 429 | struct scatterlist sg; |
| 430 | unsigned int num_stats; | ||
| 426 | vb->stats_vq = vqs[2]; | 431 | vb->stats_vq = vqs[2]; |
| 427 | 432 | ||
| 428 | /* | 433 | /* |
| 429 | * Prime this virtqueue with one buffer so the hypervisor can | 434 | * Prime this virtqueue with one buffer so the hypervisor can |
| 430 | * use it to signal us later (it can't be broken yet!). | 435 | * use it to signal us later (it can't be broken yet!). |
| 431 | */ | 436 | */ |
| 432 | sg_init_one(&sg, vb->stats, sizeof vb->stats); | 437 | num_stats = update_balloon_stats(vb); |
| 438 | |||
| 439 | sg_init_one(&sg, vb->stats, sizeof(vb->stats[0]) * num_stats); | ||
| 433 | if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) | 440 | if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL) |
| 434 | < 0) | 441 | < 0) |
| 435 | BUG(); | 442 | BUG(); |
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index df548a6fb844..590534910dc6 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c | |||
| @@ -147,7 +147,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, | |||
| 147 | { | 147 | { |
| 148 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); | 148 | struct virtio_pci_device *vp_dev = to_vp_device(vdev); |
| 149 | const char *name = dev_name(&vp_dev->vdev.dev); | 149 | const char *name = dev_name(&vp_dev->vdev.dev); |
| 150 | int i, err = -ENOMEM, allocated_vectors, nvectors; | 150 | int i, j, err = -ENOMEM, allocated_vectors, nvectors; |
| 151 | unsigned flags = PCI_IRQ_MSIX; | 151 | unsigned flags = PCI_IRQ_MSIX; |
| 152 | bool shared = false; | 152 | bool shared = false; |
| 153 | u16 msix_vec; | 153 | u16 msix_vec; |
| @@ -212,7 +212,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, | |||
| 212 | if (!vp_dev->msix_vector_map) | 212 | if (!vp_dev->msix_vector_map) |
| 213 | goto out_disable_config_irq; | 213 | goto out_disable_config_irq; |
| 214 | 214 | ||
| 215 | allocated_vectors = 1; /* vector 0 is the config interrupt */ | 215 | allocated_vectors = j = 1; /* vector 0 is the config interrupt */ |
| 216 | for (i = 0; i < nvqs; ++i) { | 216 | for (i = 0; i < nvqs; ++i) { |
| 217 | if (!names[i]) { | 217 | if (!names[i]) { |
| 218 | vqs[i] = NULL; | 218 | vqs[i] = NULL; |
| @@ -236,18 +236,19 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs, | |||
| 236 | continue; | 236 | continue; |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | snprintf(vp_dev->msix_names[i + 1], | 239 | snprintf(vp_dev->msix_names[j], |
| 240 | sizeof(*vp_dev->msix_names), "%s-%s", | 240 | sizeof(*vp_dev->msix_names), "%s-%s", |
| 241 | dev_name(&vp_dev->vdev.dev), names[i]); | 241 | dev_name(&vp_dev->vdev.dev), names[i]); |
| 242 | err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), | 242 | err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec), |
| 243 | vring_interrupt, IRQF_SHARED, | 243 | vring_interrupt, IRQF_SHARED, |
| 244 | vp_dev->msix_names[i + 1], vqs[i]); | 244 | vp_dev->msix_names[j], vqs[i]); |
| 245 | if (err) { | 245 | if (err) { |
| 246 | /* don't free this irq on error */ | 246 | /* don't free this irq on error */ |
| 247 | vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; | 247 | vp_dev->msix_vector_map[i] = VIRTIO_MSI_NO_VECTOR; |
| 248 | goto out_remove_vqs; | 248 | goto out_remove_vqs; |
| 249 | } | 249 | } |
| 250 | vp_dev->msix_vector_map[i] = msix_vec; | 250 | vp_dev->msix_vector_map[i] = msix_vec; |
| 251 | j++; | ||
| 251 | 252 | ||
| 252 | /* | 253 | /* |
| 253 | * Use a different vector for each queue if they are available, | 254 | * Use a different vector for each queue if they are available, |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index c77a0751a311..f3bf8f4e2d6c 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
| 37 | #include <linux/slab.h> | 37 | #include <linux/slab.h> |
| 38 | #include <linux/highmem.h> | 38 | #include <linux/highmem.h> |
| 39 | #include <linux/refcount.h> | ||
| 39 | 40 | ||
| 40 | #include <xen/xen.h> | 41 | #include <xen/xen.h> |
| 41 | #include <xen/grant_table.h> | 42 | #include <xen/grant_table.h> |
| @@ -86,7 +87,7 @@ struct grant_map { | |||
| 86 | int index; | 87 | int index; |
| 87 | int count; | 88 | int count; |
| 88 | int flags; | 89 | int flags; |
| 89 | atomic_t users; | 90 | refcount_t users; |
| 90 | struct unmap_notify notify; | 91 | struct unmap_notify notify; |
| 91 | struct ioctl_gntdev_grant_ref *grants; | 92 | struct ioctl_gntdev_grant_ref *grants; |
| 92 | struct gnttab_map_grant_ref *map_ops; | 93 | struct gnttab_map_grant_ref *map_ops; |
| @@ -166,7 +167,7 @@ static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count) | |||
| 166 | 167 | ||
| 167 | add->index = 0; | 168 | add->index = 0; |
| 168 | add->count = count; | 169 | add->count = count; |
| 169 | atomic_set(&add->users, 1); | 170 | refcount_set(&add->users, 1); |
| 170 | 171 | ||
| 171 | return add; | 172 | return add; |
| 172 | 173 | ||
| @@ -212,7 +213,7 @@ static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map) | |||
| 212 | if (!map) | 213 | if (!map) |
| 213 | return; | 214 | return; |
| 214 | 215 | ||
| 215 | if (!atomic_dec_and_test(&map->users)) | 216 | if (!refcount_dec_and_test(&map->users)) |
| 216 | return; | 217 | return; |
| 217 | 218 | ||
| 218 | atomic_sub(map->count, &pages_mapped); | 219 | atomic_sub(map->count, &pages_mapped); |
| @@ -400,7 +401,7 @@ static void gntdev_vma_open(struct vm_area_struct *vma) | |||
| 400 | struct grant_map *map = vma->vm_private_data; | 401 | struct grant_map *map = vma->vm_private_data; |
| 401 | 402 | ||
| 402 | pr_debug("gntdev_vma_open %p\n", vma); | 403 | pr_debug("gntdev_vma_open %p\n", vma); |
| 403 | atomic_inc(&map->users); | 404 | refcount_inc(&map->users); |
| 404 | } | 405 | } |
| 405 | 406 | ||
| 406 | static void gntdev_vma_close(struct vm_area_struct *vma) | 407 | static void gntdev_vma_close(struct vm_area_struct *vma) |
| @@ -1004,7 +1005,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
| 1004 | goto unlock_out; | 1005 | goto unlock_out; |
| 1005 | } | 1006 | } |
| 1006 | 1007 | ||
| 1007 | atomic_inc(&map->users); | 1008 | refcount_inc(&map->users); |
| 1008 | 1009 | ||
| 1009 | vma->vm_ops = &gntdev_vmops; | 1010 | vma->vm_ops = &gntdev_vmops; |
| 1010 | 1011 | ||
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c index 4ce10bcca18b..23e391d3ec01 100644 --- a/drivers/xen/xen-acpi-processor.c +++ b/drivers/xen/xen-acpi-processor.c | |||
| @@ -27,10 +27,10 @@ | |||
| 27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
| 28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
| 29 | #include <linux/types.h> | 29 | #include <linux/types.h> |
| 30 | #include <linux/syscore_ops.h> | ||
| 30 | #include <linux/acpi.h> | 31 | #include <linux/acpi.h> |
| 31 | #include <acpi/processor.h> | 32 | #include <acpi/processor.h> |
| 32 | #include <xen/xen.h> | 33 | #include <xen/xen.h> |
| 33 | #include <xen/xen-ops.h> | ||
| 34 | #include <xen/interface/platform.h> | 34 | #include <xen/interface/platform.h> |
| 35 | #include <asm/xen/hypercall.h> | 35 | #include <asm/xen/hypercall.h> |
| 36 | 36 | ||
| @@ -408,7 +408,7 @@ static int check_acpi_ids(struct acpi_processor *pr_backup) | |||
| 408 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | 408 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, |
| 409 | ACPI_UINT32_MAX, | 409 | ACPI_UINT32_MAX, |
| 410 | read_acpi_id, NULL, NULL, NULL); | 410 | read_acpi_id, NULL, NULL, NULL); |
| 411 | acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); | 411 | acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, read_acpi_id, NULL, NULL); |
| 412 | 412 | ||
| 413 | upload: | 413 | upload: |
| 414 | if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { | 414 | if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { |
| @@ -466,15 +466,33 @@ static int xen_upload_processor_pm_data(void) | |||
| 466 | return rc; | 466 | return rc; |
| 467 | } | 467 | } |
| 468 | 468 | ||
| 469 | static int xen_acpi_processor_resume(struct notifier_block *nb, | 469 | static void xen_acpi_processor_resume_worker(struct work_struct *dummy) |
| 470 | unsigned long action, void *data) | ||
| 471 | { | 470 | { |
| 471 | int rc; | ||
| 472 | |||
| 472 | bitmap_zero(acpi_ids_done, nr_acpi_bits); | 473 | bitmap_zero(acpi_ids_done, nr_acpi_bits); |
| 473 | return xen_upload_processor_pm_data(); | 474 | |
| 475 | rc = xen_upload_processor_pm_data(); | ||
| 476 | if (rc != 0) | ||
| 477 | pr_info("ACPI data upload failed, error = %d\n", rc); | ||
| 478 | } | ||
| 479 | |||
| 480 | static void xen_acpi_processor_resume(void) | ||
| 481 | { | ||
| 482 | static DECLARE_WORK(wq, xen_acpi_processor_resume_worker); | ||
| 483 | |||
| 484 | /* | ||
| 485 | * xen_upload_processor_pm_data() calls non-atomic code. | ||
| 486 | * However, the context for xen_acpi_processor_resume is syscore | ||
| 487 | * with only the boot CPU online and in an atomic context. | ||
| 488 | * | ||
| 489 | * So defer the upload for some point safer. | ||
| 490 | */ | ||
| 491 | schedule_work(&wq); | ||
| 474 | } | 492 | } |
| 475 | 493 | ||
| 476 | struct notifier_block xen_acpi_processor_resume_nb = { | 494 | static struct syscore_ops xap_syscore_ops = { |
| 477 | .notifier_call = xen_acpi_processor_resume, | 495 | .resume = xen_acpi_processor_resume, |
| 478 | }; | 496 | }; |
| 479 | 497 | ||
| 480 | static int __init xen_acpi_processor_init(void) | 498 | static int __init xen_acpi_processor_init(void) |
| @@ -527,7 +545,7 @@ static int __init xen_acpi_processor_init(void) | |||
| 527 | if (rc) | 545 | if (rc) |
| 528 | goto err_unregister; | 546 | goto err_unregister; |
| 529 | 547 | ||
| 530 | xen_resume_notifier_register(&xen_acpi_processor_resume_nb); | 548 | register_syscore_ops(&xap_syscore_ops); |
| 531 | 549 | ||
| 532 | return 0; | 550 | return 0; |
| 533 | err_unregister: | 551 | err_unregister: |
| @@ -544,7 +562,7 @@ static void __exit xen_acpi_processor_exit(void) | |||
| 544 | { | 562 | { |
| 545 | int i; | 563 | int i; |
| 546 | 564 | ||
| 547 | xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb); | 565 | unregister_syscore_ops(&xap_syscore_ops); |
| 548 | kfree(acpi_ids_done); | 566 | kfree(acpi_ids_done); |
| 549 | kfree(acpi_id_present); | 567 | kfree(acpi_id_present); |
| 550 | kfree(acpi_id_cst_present); | 568 | kfree(acpi_id_cst_present); |
diff --git a/fs/afs/callback.c b/fs/afs/callback.c index b29447e03ede..25d404d22cae 100644 --- a/fs/afs/callback.c +++ b/fs/afs/callback.c | |||
| @@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work) | |||
| 362 | { | 362 | { |
| 363 | struct afs_server *server; | 363 | struct afs_server *server; |
| 364 | struct afs_vnode *vnode, *xvnode; | 364 | struct afs_vnode *vnode, *xvnode; |
| 365 | time_t now; | 365 | time64_t now; |
| 366 | long timeout; | 366 | long timeout; |
| 367 | int ret; | 367 | int ret; |
| 368 | 368 | ||
| @@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work) | |||
| 370 | 370 | ||
| 371 | _enter(""); | 371 | _enter(""); |
| 372 | 372 | ||
| 373 | now = get_seconds(); | 373 | now = ktime_get_real_seconds(); |
| 374 | 374 | ||
| 375 | /* find the first vnode to update */ | 375 | /* find the first vnode to update */ |
| 376 | spin_lock(&server->cb_lock); | 376 | spin_lock(&server->cb_lock); |
| @@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work) | |||
| 424 | 424 | ||
| 425 | /* and then reschedule */ | 425 | /* and then reschedule */ |
| 426 | _debug("reschedule"); | 426 | _debug("reschedule"); |
| 427 | vnode->update_at = get_seconds() + afs_vnode_update_timeout; | 427 | vnode->update_at = ktime_get_real_seconds() + |
| 428 | afs_vnode_update_timeout; | ||
| 428 | 429 | ||
| 429 | spin_lock(&server->cb_lock); | 430 | spin_lock(&server->cb_lock); |
| 430 | 431 | ||
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 2edbdcbf6432..3062cceb5c2a 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c | |||
| @@ -187,7 +187,6 @@ static int afs_deliver_cb_callback(struct afs_call *call) | |||
| 187 | struct afs_callback *cb; | 187 | struct afs_callback *cb; |
| 188 | struct afs_server *server; | 188 | struct afs_server *server; |
| 189 | __be32 *bp; | 189 | __be32 *bp; |
| 190 | u32 tmp; | ||
| 191 | int ret, loop; | 190 | int ret, loop; |
| 192 | 191 | ||
| 193 | _enter("{%u}", call->unmarshall); | 192 | _enter("{%u}", call->unmarshall); |
| @@ -249,9 +248,9 @@ static int afs_deliver_cb_callback(struct afs_call *call) | |||
| 249 | if (ret < 0) | 248 | if (ret < 0) |
| 250 | return ret; | 249 | return ret; |
| 251 | 250 | ||
| 252 | tmp = ntohl(call->tmp); | 251 | call->count2 = ntohl(call->tmp); |
| 253 | _debug("CB count: %u", tmp); | 252 | _debug("CB count: %u", call->count2); |
| 254 | if (tmp != call->count && tmp != 0) | 253 | if (call->count2 != call->count && call->count2 != 0) |
| 255 | return -EBADMSG; | 254 | return -EBADMSG; |
| 256 | call->offset = 0; | 255 | call->offset = 0; |
| 257 | call->unmarshall++; | 256 | call->unmarshall++; |
| @@ -259,14 +258,14 @@ static int afs_deliver_cb_callback(struct afs_call *call) | |||
| 259 | case 4: | 258 | case 4: |
| 260 | _debug("extract CB array"); | 259 | _debug("extract CB array"); |
| 261 | ret = afs_extract_data(call, call->buffer, | 260 | ret = afs_extract_data(call, call->buffer, |
| 262 | call->count * 3 * 4, false); | 261 | call->count2 * 3 * 4, false); |
| 263 | if (ret < 0) | 262 | if (ret < 0) |
| 264 | return ret; | 263 | return ret; |
| 265 | 264 | ||
| 266 | _debug("unmarshall CB array"); | 265 | _debug("unmarshall CB array"); |
| 267 | cb = call->request; | 266 | cb = call->request; |
| 268 | bp = call->buffer; | 267 | bp = call->buffer; |
| 269 | for (loop = call->count; loop > 0; loop--, cb++) { | 268 | for (loop = call->count2; loop > 0; loop--, cb++) { |
| 270 | cb->version = ntohl(*bp++); | 269 | cb->version = ntohl(*bp++); |
| 271 | cb->expiry = ntohl(*bp++); | 270 | cb->expiry = ntohl(*bp++); |
| 272 | cb->type = ntohl(*bp++); | 271 | cb->type = ntohl(*bp++); |
diff --git a/fs/afs/file.c b/fs/afs/file.c index ba7b71fba34b..0d5b8508869b 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c | |||
| @@ -30,6 +30,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping, | |||
| 30 | 30 | ||
| 31 | const struct file_operations afs_file_operations = { | 31 | const struct file_operations afs_file_operations = { |
| 32 | .open = afs_open, | 32 | .open = afs_open, |
| 33 | .flush = afs_flush, | ||
| 33 | .release = afs_release, | 34 | .release = afs_release, |
| 34 | .llseek = generic_file_llseek, | 35 | .llseek = generic_file_llseek, |
| 35 | .read_iter = generic_file_read_iter, | 36 | .read_iter = generic_file_read_iter, |
| @@ -184,10 +185,13 @@ int afs_page_filler(void *data, struct page *page) | |||
| 184 | if (!req) | 185 | if (!req) |
| 185 | goto enomem; | 186 | goto enomem; |
| 186 | 187 | ||
| 188 | /* We request a full page. If the page is a partial one at the | ||
| 189 | * end of the file, the server will return a short read and the | ||
| 190 | * unmarshalling code will clear the unfilled space. | ||
| 191 | */ | ||
| 187 | atomic_set(&req->usage, 1); | 192 | atomic_set(&req->usage, 1); |
| 188 | req->pos = (loff_t)page->index << PAGE_SHIFT; | 193 | req->pos = (loff_t)page->index << PAGE_SHIFT; |
| 189 | req->len = min_t(size_t, i_size_read(inode) - req->pos, | 194 | req->len = PAGE_SIZE; |
| 190 | PAGE_SIZE); | ||
| 191 | req->nr_pages = 1; | 195 | req->nr_pages = 1; |
| 192 | req->pages[0] = page; | 196 | req->pages[0] = page; |
| 193 | get_page(page); | 197 | get_page(page); |
| @@ -208,7 +212,13 @@ int afs_page_filler(void *data, struct page *page) | |||
| 208 | fscache_uncache_page(vnode->cache, page); | 212 | fscache_uncache_page(vnode->cache, page); |
| 209 | #endif | 213 | #endif |
| 210 | BUG_ON(PageFsCache(page)); | 214 | BUG_ON(PageFsCache(page)); |
| 211 | goto error; | 215 | |
| 216 | if (ret == -EINTR || | ||
| 217 | ret == -ENOMEM || | ||
| 218 | ret == -ERESTARTSYS || | ||
| 219 | ret == -EAGAIN) | ||
| 220 | goto error; | ||
| 221 | goto io_error; | ||
| 212 | } | 222 | } |
| 213 | 223 | ||
| 214 | SetPageUptodate(page); | 224 | SetPageUptodate(page); |
| @@ -227,10 +237,12 @@ int afs_page_filler(void *data, struct page *page) | |||
| 227 | _leave(" = 0"); | 237 | _leave(" = 0"); |
| 228 | return 0; | 238 | return 0; |
| 229 | 239 | ||
| 240 | io_error: | ||
| 241 | SetPageError(page); | ||
| 242 | goto error; | ||
| 230 | enomem: | 243 | enomem: |
| 231 | ret = -ENOMEM; | 244 | ret = -ENOMEM; |
| 232 | error: | 245 | error: |
| 233 | SetPageError(page); | ||
| 234 | unlock_page(page); | 246 | unlock_page(page); |
| 235 | _leave(" = %d", ret); | 247 | _leave(" = %d", ret); |
| 236 | return ret; | 248 | return ret; |
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index ac8e766978dc..19f76ae36982 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c | |||
| @@ -17,6 +17,12 @@ | |||
| 17 | #include "afs_fs.h" | 17 | #include "afs_fs.h" |
| 18 | 18 | ||
| 19 | /* | 19 | /* |
| 20 | * We need somewhere to discard into in case the server helpfully returns more | ||
| 21 | * than we asked for in FS.FetchData{,64}. | ||
| 22 | */ | ||
| 23 | static u8 afs_discard_buffer[64]; | ||
| 24 | |||
| 25 | /* | ||
| 20 | * decode an AFSFid block | 26 | * decode an AFSFid block |
| 21 | */ | 27 | */ |
| 22 | static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid) | 28 | static void xdr_decode_AFSFid(const __be32 **_bp, struct afs_fid *fid) |
| @@ -105,7 +111,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp, | |||
| 105 | vnode->vfs_inode.i_mode = mode; | 111 | vnode->vfs_inode.i_mode = mode; |
| 106 | } | 112 | } |
| 107 | 113 | ||
| 108 | vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server; | 114 | vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client; |
| 109 | vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime; | 115 | vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime; |
| 110 | vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; | 116 | vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; |
| 111 | vnode->vfs_inode.i_version = data_version; | 117 | vnode->vfs_inode.i_version = data_version; |
| @@ -139,7 +145,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode) | |||
| 139 | vnode->cb_version = ntohl(*bp++); | 145 | vnode->cb_version = ntohl(*bp++); |
| 140 | vnode->cb_expiry = ntohl(*bp++); | 146 | vnode->cb_expiry = ntohl(*bp++); |
| 141 | vnode->cb_type = ntohl(*bp++); | 147 | vnode->cb_type = ntohl(*bp++); |
| 142 | vnode->cb_expires = vnode->cb_expiry + get_seconds(); | 148 | vnode->cb_expires = vnode->cb_expiry + ktime_get_real_seconds(); |
| 143 | *_bp = bp; | 149 | *_bp = bp; |
| 144 | } | 150 | } |
| 145 | 151 | ||
| @@ -315,7 +321,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 315 | void *buffer; | 321 | void *buffer; |
| 316 | int ret; | 322 | int ret; |
| 317 | 323 | ||
| 318 | _enter("{%u,%zu/%u;%u/%llu}", | 324 | _enter("{%u,%zu/%u;%llu/%llu}", |
| 319 | call->unmarshall, call->offset, call->count, | 325 | call->unmarshall, call->offset, call->count, |
| 320 | req->remain, req->actual_len); | 326 | req->remain, req->actual_len); |
| 321 | 327 | ||
| @@ -353,12 +359,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 353 | 359 | ||
| 354 | req->actual_len |= ntohl(call->tmp); | 360 | req->actual_len |= ntohl(call->tmp); |
| 355 | _debug("DATA length: %llu", req->actual_len); | 361 | _debug("DATA length: %llu", req->actual_len); |
| 356 | /* Check that the server didn't want to send us extra. We | ||
| 357 | * might want to just discard instead, but that requires | ||
| 358 | * cooperation from AF_RXRPC. | ||
| 359 | */ | ||
| 360 | if (req->actual_len > req->len) | ||
| 361 | return -EBADMSG; | ||
| 362 | 362 | ||
| 363 | req->remain = req->actual_len; | 363 | req->remain = req->actual_len; |
| 364 | call->offset = req->pos & (PAGE_SIZE - 1); | 364 | call->offset = req->pos & (PAGE_SIZE - 1); |
| @@ -368,6 +368,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 368 | call->unmarshall++; | 368 | call->unmarshall++; |
| 369 | 369 | ||
| 370 | begin_page: | 370 | begin_page: |
| 371 | ASSERTCMP(req->index, <, req->nr_pages); | ||
| 371 | if (req->remain > PAGE_SIZE - call->offset) | 372 | if (req->remain > PAGE_SIZE - call->offset) |
| 372 | size = PAGE_SIZE - call->offset; | 373 | size = PAGE_SIZE - call->offset; |
| 373 | else | 374 | else |
| @@ -378,7 +379,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 378 | 379 | ||
| 379 | /* extract the returned data */ | 380 | /* extract the returned data */ |
| 380 | case 3: | 381 | case 3: |
| 381 | _debug("extract data %u/%llu %zu/%u", | 382 | _debug("extract data %llu/%llu %zu/%u", |
| 382 | req->remain, req->actual_len, call->offset, call->count); | 383 | req->remain, req->actual_len, call->offset, call->count); |
| 383 | 384 | ||
| 384 | buffer = kmap(req->pages[req->index]); | 385 | buffer = kmap(req->pages[req->index]); |
| @@ -389,19 +390,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 389 | if (call->offset == PAGE_SIZE) { | 390 | if (call->offset == PAGE_SIZE) { |
| 390 | if (req->page_done) | 391 | if (req->page_done) |
| 391 | req->page_done(call, req); | 392 | req->page_done(call, req); |
| 393 | req->index++; | ||
| 392 | if (req->remain > 0) { | 394 | if (req->remain > 0) { |
| 393 | req->index++; | ||
| 394 | call->offset = 0; | 395 | call->offset = 0; |
| 396 | if (req->index >= req->nr_pages) { | ||
| 397 | call->unmarshall = 4; | ||
| 398 | goto begin_discard; | ||
| 399 | } | ||
| 395 | goto begin_page; | 400 | goto begin_page; |
| 396 | } | 401 | } |
| 397 | } | 402 | } |
| 403 | goto no_more_data; | ||
| 404 | |||
| 405 | /* Discard any excess data the server gave us */ | ||
| 406 | begin_discard: | ||
| 407 | case 4: | ||
| 408 | size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain); | ||
| 409 | call->count = size; | ||
| 410 | _debug("extract discard %llu/%llu %zu/%u", | ||
| 411 | req->remain, req->actual_len, call->offset, call->count); | ||
| 412 | |||
| 413 | call->offset = 0; | ||
| 414 | ret = afs_extract_data(call, afs_discard_buffer, call->count, true); | ||
| 415 | req->remain -= call->offset; | ||
| 416 | if (ret < 0) | ||
| 417 | return ret; | ||
| 418 | if (req->remain > 0) | ||
| 419 | goto begin_discard; | ||
| 398 | 420 | ||
| 399 | no_more_data: | 421 | no_more_data: |
| 400 | call->offset = 0; | 422 | call->offset = 0; |
| 401 | call->unmarshall++; | 423 | call->unmarshall = 5; |
| 402 | 424 | ||
| 403 | /* extract the metadata */ | 425 | /* extract the metadata */ |
| 404 | case 4: | 426 | case 5: |
| 405 | ret = afs_extract_data(call, call->buffer, | 427 | ret = afs_extract_data(call, call->buffer, |
| 406 | (21 + 3 + 6) * 4, false); | 428 | (21 + 3 + 6) * 4, false); |
| 407 | if (ret < 0) | 429 | if (ret < 0) |
| @@ -416,16 +438,17 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) | |||
| 416 | call->offset = 0; | 438 | call->offset = 0; |
| 417 | call->unmarshall++; | 439 | call->unmarshall++; |
| 418 | 440 | ||
| 419 | case 5: | 441 | case 6: |
| 420 | break; | 442 | break; |
| 421 | } | 443 | } |
| 422 | 444 | ||
| 423 | if (call->count < PAGE_SIZE) { | 445 | for (; req->index < req->nr_pages; req->index++) { |
| 424 | buffer = kmap(req->pages[req->index]); | 446 | if (call->count < PAGE_SIZE) |
| 425 | memset(buffer + call->count, 0, PAGE_SIZE - call->count); | 447 | zero_user_segment(req->pages[req->index], |
| 426 | kunmap(req->pages[req->index]); | 448 | call->count, PAGE_SIZE); |
| 427 | if (req->page_done) | 449 | if (req->page_done) |
| 428 | req->page_done(call, req); | 450 | req->page_done(call, req); |
| 451 | call->count = 0; | ||
| 429 | } | 452 | } |
| 430 | 453 | ||
| 431 | _leave(" = 0 [done]"); | 454 | _leave(" = 0 [done]"); |
| @@ -711,8 +734,8 @@ int afs_fs_create(struct afs_server *server, | |||
| 711 | memset(bp, 0, padsz); | 734 | memset(bp, 0, padsz); |
| 712 | bp = (void *) bp + padsz; | 735 | bp = (void *) bp + padsz; |
| 713 | } | 736 | } |
| 714 | *bp++ = htonl(AFS_SET_MODE); | 737 | *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); |
| 715 | *bp++ = 0; /* mtime */ | 738 | *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ |
| 716 | *bp++ = 0; /* owner */ | 739 | *bp++ = 0; /* owner */ |
| 717 | *bp++ = 0; /* group */ | 740 | *bp++ = 0; /* group */ |
| 718 | *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ | 741 | *bp++ = htonl(mode & S_IALLUGO); /* unix mode */ |
| @@ -980,8 +1003,8 @@ int afs_fs_symlink(struct afs_server *server, | |||
| 980 | memset(bp, 0, c_padsz); | 1003 | memset(bp, 0, c_padsz); |
| 981 | bp = (void *) bp + c_padsz; | 1004 | bp = (void *) bp + c_padsz; |
| 982 | } | 1005 | } |
| 983 | *bp++ = htonl(AFS_SET_MODE); | 1006 | *bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME); |
| 984 | *bp++ = 0; /* mtime */ | 1007 | *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ |
| 985 | *bp++ = 0; /* owner */ | 1008 | *bp++ = 0; /* owner */ |
| 986 | *bp++ = 0; /* group */ | 1009 | *bp++ = 0; /* group */ |
| 987 | *bp++ = htonl(S_IRWXUGO); /* unix mode */ | 1010 | *bp++ = htonl(S_IRWXUGO); /* unix mode */ |
| @@ -1180,8 +1203,8 @@ static int afs_fs_store_data64(struct afs_server *server, | |||
| 1180 | *bp++ = htonl(vnode->fid.vnode); | 1203 | *bp++ = htonl(vnode->fid.vnode); |
| 1181 | *bp++ = htonl(vnode->fid.unique); | 1204 | *bp++ = htonl(vnode->fid.unique); |
| 1182 | 1205 | ||
| 1183 | *bp++ = 0; /* mask */ | 1206 | *bp++ = htonl(AFS_SET_MTIME); /* mask */ |
| 1184 | *bp++ = 0; /* mtime */ | 1207 | *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ |
| 1185 | *bp++ = 0; /* owner */ | 1208 | *bp++ = 0; /* owner */ |
| 1186 | *bp++ = 0; /* group */ | 1209 | *bp++ = 0; /* group */ |
| 1187 | *bp++ = 0; /* unix mode */ | 1210 | *bp++ = 0; /* unix mode */ |
| @@ -1213,7 +1236,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, | |||
| 1213 | _enter(",%x,{%x:%u},,", | 1236 | _enter(",%x,{%x:%u},,", |
| 1214 | key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode); | 1237 | key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode); |
| 1215 | 1238 | ||
| 1216 | size = to - offset; | 1239 | size = (loff_t)to - (loff_t)offset; |
| 1217 | if (first != last) | 1240 | if (first != last) |
| 1218 | size += (loff_t)(last - first) << PAGE_SHIFT; | 1241 | size += (loff_t)(last - first) << PAGE_SHIFT; |
| 1219 | pos = (loff_t)first << PAGE_SHIFT; | 1242 | pos = (loff_t)first << PAGE_SHIFT; |
| @@ -1257,8 +1280,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb, | |||
| 1257 | *bp++ = htonl(vnode->fid.vnode); | 1280 | *bp++ = htonl(vnode->fid.vnode); |
| 1258 | *bp++ = htonl(vnode->fid.unique); | 1281 | *bp++ = htonl(vnode->fid.unique); |
| 1259 | 1282 | ||
| 1260 | *bp++ = 0; /* mask */ | 1283 | *bp++ = htonl(AFS_SET_MTIME); /* mask */ |
| 1261 | *bp++ = 0; /* mtime */ | 1284 | *bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */ |
| 1262 | *bp++ = 0; /* owner */ | 1285 | *bp++ = 0; /* owner */ |
| 1263 | *bp++ = 0; /* group */ | 1286 | *bp++ = 0; /* group */ |
| 1264 | *bp++ = 0; /* unix mode */ | 1287 | *bp++ = 0; /* unix mode */ |
diff --git a/fs/afs/inode.c b/fs/afs/inode.c index 1e4897a048d2..aae55dd15108 100644 --- a/fs/afs/inode.c +++ b/fs/afs/inode.c | |||
| @@ -54,8 +54,21 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key) | |||
| 54 | inode->i_fop = &afs_dir_file_operations; | 54 | inode->i_fop = &afs_dir_file_operations; |
| 55 | break; | 55 | break; |
| 56 | case AFS_FTYPE_SYMLINK: | 56 | case AFS_FTYPE_SYMLINK: |
| 57 | inode->i_mode = S_IFLNK | vnode->status.mode; | 57 | /* Symlinks with a mode of 0644 are actually mountpoints. */ |
| 58 | inode->i_op = &page_symlink_inode_operations; | 58 | if ((vnode->status.mode & 0777) == 0644) { |
| 59 | inode->i_flags |= S_AUTOMOUNT; | ||
| 60 | |||
| 61 | spin_lock(&vnode->lock); | ||
| 62 | set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); | ||
| 63 | spin_unlock(&vnode->lock); | ||
| 64 | |||
| 65 | inode->i_mode = S_IFDIR | 0555; | ||
| 66 | inode->i_op = &afs_mntpt_inode_operations; | ||
| 67 | inode->i_fop = &afs_mntpt_file_operations; | ||
| 68 | } else { | ||
| 69 | inode->i_mode = S_IFLNK | vnode->status.mode; | ||
| 70 | inode->i_op = &page_symlink_inode_operations; | ||
| 71 | } | ||
| 59 | inode_nohighmem(inode); | 72 | inode_nohighmem(inode); |
| 60 | break; | 73 | break; |
| 61 | default: | 74 | default: |
| @@ -70,27 +83,15 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key) | |||
| 70 | 83 | ||
| 71 | set_nlink(inode, vnode->status.nlink); | 84 | set_nlink(inode, vnode->status.nlink); |
| 72 | inode->i_uid = vnode->status.owner; | 85 | inode->i_uid = vnode->status.owner; |
| 73 | inode->i_gid = GLOBAL_ROOT_GID; | 86 | inode->i_gid = vnode->status.group; |
| 74 | inode->i_size = vnode->status.size; | 87 | inode->i_size = vnode->status.size; |
| 75 | inode->i_ctime.tv_sec = vnode->status.mtime_server; | 88 | inode->i_ctime.tv_sec = vnode->status.mtime_client; |
| 76 | inode->i_ctime.tv_nsec = 0; | 89 | inode->i_ctime.tv_nsec = 0; |
| 77 | inode->i_atime = inode->i_mtime = inode->i_ctime; | 90 | inode->i_atime = inode->i_mtime = inode->i_ctime; |
| 78 | inode->i_blocks = 0; | 91 | inode->i_blocks = 0; |
| 79 | inode->i_generation = vnode->fid.unique; | 92 | inode->i_generation = vnode->fid.unique; |
| 80 | inode->i_version = vnode->status.data_version; | 93 | inode->i_version = vnode->status.data_version; |
| 81 | inode->i_mapping->a_ops = &afs_fs_aops; | 94 | inode->i_mapping->a_ops = &afs_fs_aops; |
| 82 | |||
| 83 | /* check to see whether a symbolic link is really a mountpoint */ | ||
| 84 | if (vnode->status.type == AFS_FTYPE_SYMLINK) { | ||
| 85 | afs_mntpt_check_symlink(vnode, key); | ||
| 86 | |||
| 87 | if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) { | ||
| 88 | inode->i_mode = S_IFDIR | vnode->status.mode; | ||
| 89 | inode->i_op = &afs_mntpt_inode_operations; | ||
| 90 | inode->i_fop = &afs_mntpt_file_operations; | ||
| 91 | } | ||
| 92 | } | ||
| 93 | |||
| 94 | return 0; | 95 | return 0; |
| 95 | } | 96 | } |
| 96 | 97 | ||
| @@ -245,12 +246,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key, | |||
| 245 | vnode->cb_version = 0; | 246 | vnode->cb_version = 0; |
| 246 | vnode->cb_expiry = 0; | 247 | vnode->cb_expiry = 0; |
| 247 | vnode->cb_type = 0; | 248 | vnode->cb_type = 0; |
| 248 | vnode->cb_expires = get_seconds(); | 249 | vnode->cb_expires = ktime_get_real_seconds(); |
| 249 | } else { | 250 | } else { |
| 250 | vnode->cb_version = cb->version; | 251 | vnode->cb_version = cb->version; |
| 251 | vnode->cb_expiry = cb->expiry; | 252 | vnode->cb_expiry = cb->expiry; |
| 252 | vnode->cb_type = cb->type; | 253 | vnode->cb_type = cb->type; |
| 253 | vnode->cb_expires = vnode->cb_expiry + get_seconds(); | 254 | vnode->cb_expires = vnode->cb_expiry + |
| 255 | ktime_get_real_seconds(); | ||
| 254 | } | 256 | } |
| 255 | } | 257 | } |
| 256 | 258 | ||
| @@ -323,7 +325,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key) | |||
| 323 | !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && | 325 | !test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) && |
| 324 | !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) && | 326 | !test_bit(AFS_VNODE_MODIFIED, &vnode->flags) && |
| 325 | !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { | 327 | !test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) { |
| 326 | if (vnode->cb_expires < get_seconds() + 10) { | 328 | if (vnode->cb_expires < ktime_get_real_seconds() + 10) { |
| 327 | _debug("callback expired"); | 329 | _debug("callback expired"); |
| 328 | set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); | 330 | set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags); |
| 329 | } else { | 331 | } else { |
| @@ -444,7 +446,7 @@ void afs_evict_inode(struct inode *inode) | |||
| 444 | 446 | ||
| 445 | mutex_lock(&vnode->permits_lock); | 447 | mutex_lock(&vnode->permits_lock); |
| 446 | permits = vnode->permits; | 448 | permits = vnode->permits; |
| 447 | rcu_assign_pointer(vnode->permits, NULL); | 449 | RCU_INIT_POINTER(vnode->permits, NULL); |
| 448 | mutex_unlock(&vnode->permits_lock); | 450 | mutex_unlock(&vnode->permits_lock); |
| 449 | if (permits) | 451 | if (permits) |
| 450 | call_rcu(&permits->rcu, afs_zap_permits); | 452 | call_rcu(&permits->rcu, afs_zap_permits); |
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 5dfa56903a2d..a6901360fb81 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | 11 | ||
| 12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
| 13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
| 14 | #include <linux/ktime.h> | ||
| 14 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
| 15 | #include <linux/pagemap.h> | 16 | #include <linux/pagemap.h> |
| 16 | #include <linux/rxrpc.h> | 17 | #include <linux/rxrpc.h> |
| @@ -90,7 +91,10 @@ struct afs_call { | |||
| 90 | unsigned request_size; /* size of request data */ | 91 | unsigned request_size; /* size of request data */ |
| 91 | unsigned reply_max; /* maximum size of reply */ | 92 | unsigned reply_max; /* maximum size of reply */ |
| 92 | unsigned first_offset; /* offset into mapping[first] */ | 93 | unsigned first_offset; /* offset into mapping[first] */ |
| 93 | unsigned last_to; /* amount of mapping[last] */ | 94 | union { |
| 95 | unsigned last_to; /* amount of mapping[last] */ | ||
| 96 | unsigned count2; /* count used in unmarshalling */ | ||
| 97 | }; | ||
| 94 | unsigned char unmarshall; /* unmarshalling phase */ | 98 | unsigned char unmarshall; /* unmarshalling phase */ |
| 95 | bool incoming; /* T if incoming call */ | 99 | bool incoming; /* T if incoming call */ |
| 96 | bool send_pages; /* T if data from mapping should be sent */ | 100 | bool send_pages; /* T if data from mapping should be sent */ |
| @@ -127,12 +131,11 @@ struct afs_call_type { | |||
| 127 | */ | 131 | */ |
| 128 | struct afs_read { | 132 | struct afs_read { |
| 129 | loff_t pos; /* Where to start reading */ | 133 | loff_t pos; /* Where to start reading */ |
| 130 | loff_t len; /* How much to read */ | 134 | loff_t len; /* How much we're asking for */ |
| 131 | loff_t actual_len; /* How much we're actually getting */ | 135 | loff_t actual_len; /* How much we're actually getting */ |
| 136 | loff_t remain; /* Amount remaining */ | ||
| 132 | atomic_t usage; | 137 | atomic_t usage; |
| 133 | unsigned int remain; /* Amount remaining */ | ||
| 134 | unsigned int index; /* Which page we're reading into */ | 138 | unsigned int index; /* Which page we're reading into */ |
| 135 | unsigned int pg_offset; /* Offset in page we're at */ | ||
| 136 | unsigned int nr_pages; | 139 | unsigned int nr_pages; |
| 137 | void (*page_done)(struct afs_call *, struct afs_read *); | 140 | void (*page_done)(struct afs_call *, struct afs_read *); |
| 138 | struct page *pages[]; | 141 | struct page *pages[]; |
| @@ -247,7 +250,7 @@ struct afs_cache_vhash { | |||
| 247 | */ | 250 | */ |
| 248 | struct afs_vlocation { | 251 | struct afs_vlocation { |
| 249 | atomic_t usage; | 252 | atomic_t usage; |
| 250 | time_t time_of_death; /* time at which put reduced usage to 0 */ | 253 | time64_t time_of_death; /* time at which put reduced usage to 0 */ |
| 251 | struct list_head link; /* link in cell volume location list */ | 254 | struct list_head link; /* link in cell volume location list */ |
| 252 | struct list_head grave; /* link in master graveyard list */ | 255 | struct list_head grave; /* link in master graveyard list */ |
| 253 | struct list_head update; /* link in master update list */ | 256 | struct list_head update; /* link in master update list */ |
| @@ -258,7 +261,7 @@ struct afs_vlocation { | |||
| 258 | struct afs_cache_vlocation vldb; /* volume information DB record */ | 261 | struct afs_cache_vlocation vldb; /* volume information DB record */ |
| 259 | struct afs_volume *vols[3]; /* volume access record pointer (index by type) */ | 262 | struct afs_volume *vols[3]; /* volume access record pointer (index by type) */ |
| 260 | wait_queue_head_t waitq; /* status change waitqueue */ | 263 | wait_queue_head_t waitq; /* status change waitqueue */ |
| 261 | time_t update_at; /* time at which record should be updated */ | 264 | time64_t update_at; /* time at which record should be updated */ |
| 262 | spinlock_t lock; /* access lock */ | 265 | spinlock_t lock; /* access lock */ |
| 263 | afs_vlocation_state_t state; /* volume location state */ | 266 | afs_vlocation_state_t state; /* volume location state */ |
| 264 | unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */ | 267 | unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */ |
| @@ -271,7 +274,7 @@ struct afs_vlocation { | |||
| 271 | */ | 274 | */ |
| 272 | struct afs_server { | 275 | struct afs_server { |
| 273 | atomic_t usage; | 276 | atomic_t usage; |
| 274 | time_t time_of_death; /* time at which put reduced usage to 0 */ | 277 | time64_t time_of_death; /* time at which put reduced usage to 0 */ |
| 275 | struct in_addr addr; /* server address */ | 278 | struct in_addr addr; /* server address */ |
| 276 | struct afs_cell *cell; /* cell in which server resides */ | 279 | struct afs_cell *cell; /* cell in which server resides */ |
| 277 | struct list_head link; /* link in cell's server list */ | 280 | struct list_head link; /* link in cell's server list */ |
| @@ -374,8 +377,8 @@ struct afs_vnode { | |||
| 374 | struct rb_node server_rb; /* link in server->fs_vnodes */ | 377 | struct rb_node server_rb; /* link in server->fs_vnodes */ |
| 375 | struct rb_node cb_promise; /* link in server->cb_promises */ | 378 | struct rb_node cb_promise; /* link in server->cb_promises */ |
| 376 | struct work_struct cb_broken_work; /* work to be done on callback break */ | 379 | struct work_struct cb_broken_work; /* work to be done on callback break */ |
| 377 | time_t cb_expires; /* time at which callback expires */ | 380 | time64_t cb_expires; /* time at which callback expires */ |
| 378 | time_t cb_expires_at; /* time used to order cb_promise */ | 381 | time64_t cb_expires_at; /* time used to order cb_promise */ |
| 379 | unsigned cb_version; /* callback version */ | 382 | unsigned cb_version; /* callback version */ |
| 380 | unsigned cb_expiry; /* callback expiry time */ | 383 | unsigned cb_expiry; /* callback expiry time */ |
| 381 | afs_callback_type_t cb_type; /* type of callback */ | 384 | afs_callback_type_t cb_type; /* type of callback */ |
| @@ -557,7 +560,6 @@ extern const struct inode_operations afs_autocell_inode_operations; | |||
| 557 | extern const struct file_operations afs_mntpt_file_operations; | 560 | extern const struct file_operations afs_mntpt_file_operations; |
| 558 | 561 | ||
| 559 | extern struct vfsmount *afs_d_automount(struct path *); | 562 | extern struct vfsmount *afs_d_automount(struct path *); |
| 560 | extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *); | ||
| 561 | extern void afs_mntpt_kill_timer(void); | 563 | extern void afs_mntpt_kill_timer(void); |
| 562 | 564 | ||
| 563 | /* | 565 | /* |
| @@ -718,6 +720,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *); | |||
| 718 | extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); | 720 | extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *); |
| 719 | extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *); | 721 | extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *); |
| 720 | extern int afs_writeback_all(struct afs_vnode *); | 722 | extern int afs_writeback_all(struct afs_vnode *); |
| 723 | extern int afs_flush(struct file *, fl_owner_t); | ||
| 721 | extern int afs_fsync(struct file *, loff_t, loff_t, int); | 724 | extern int afs_fsync(struct file *, loff_t, loff_t, int); |
| 722 | 725 | ||
| 723 | 726 | ||
diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 91ea1aa0d8b3..100b207efc9e 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c | |||
| @@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code) | |||
| 84 | case RXKADDATALEN: return -EKEYREJECTED; | 84 | case RXKADDATALEN: return -EKEYREJECTED; |
| 85 | case RXKADILLEGALLEVEL: return -EKEYREJECTED; | 85 | case RXKADILLEGALLEVEL: return -EKEYREJECTED; |
| 86 | 86 | ||
| 87 | case RXGEN_OPCODE: return -ENOTSUPP; | ||
| 88 | |||
| 87 | default: return -EREMOTEIO; | 89 | default: return -EREMOTEIO; |
| 88 | } | 90 | } |
| 89 | } | 91 | } |
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c index d4fb0afc0097..bd3b65cde282 100644 --- a/fs/afs/mntpt.c +++ b/fs/afs/mntpt.c | |||
| @@ -47,59 +47,6 @@ static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out); | |||
| 47 | static unsigned long afs_mntpt_expiry_timeout = 10 * 60; | 47 | static unsigned long afs_mntpt_expiry_timeout = 10 * 60; |
| 48 | 48 | ||
| 49 | /* | 49 | /* |
| 50 | * check a symbolic link to see whether it actually encodes a mountpoint | ||
| 51 | * - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately | ||
| 52 | */ | ||
| 53 | int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key) | ||
| 54 | { | ||
| 55 | struct page *page; | ||
| 56 | size_t size; | ||
| 57 | char *buf; | ||
| 58 | int ret; | ||
| 59 | |||
| 60 | _enter("{%x:%u,%u}", | ||
| 61 | vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique); | ||
| 62 | |||
| 63 | /* read the contents of the symlink into the pagecache */ | ||
| 64 | page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0, | ||
| 65 | afs_page_filler, key); | ||
| 66 | if (IS_ERR(page)) { | ||
| 67 | ret = PTR_ERR(page); | ||
| 68 | goto out; | ||
| 69 | } | ||
| 70 | |||
| 71 | ret = -EIO; | ||
| 72 | if (PageError(page)) | ||
| 73 | goto out_free; | ||
| 74 | |||
| 75 | buf = kmap(page); | ||
| 76 | |||
| 77 | /* examine the symlink's contents */ | ||
| 78 | size = vnode->status.size; | ||
| 79 | _debug("symlink to %*.*s", (int) size, (int) size, buf); | ||
| 80 | |||
| 81 | if (size > 2 && | ||
| 82 | (buf[0] == '%' || buf[0] == '#') && | ||
| 83 | buf[size - 1] == '.' | ||
| 84 | ) { | ||
| 85 | _debug("symlink is a mountpoint"); | ||
| 86 | spin_lock(&vnode->lock); | ||
| 87 | set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags); | ||
| 88 | vnode->vfs_inode.i_flags |= S_AUTOMOUNT; | ||
| 89 | spin_unlock(&vnode->lock); | ||
| 90 | } | ||
| 91 | |||
| 92 | ret = 0; | ||
| 93 | |||
| 94 | kunmap(page); | ||
| 95 | out_free: | ||
| 96 | put_page(page); | ||
| 97 | out: | ||
| 98 | _leave(" = %d", ret); | ||
| 99 | return ret; | ||
| 100 | } | ||
| 101 | |||
| 102 | /* | ||
| 103 | * no valid lookup procedure on this sort of dir | 50 | * no valid lookup procedure on this sort of dir |
| 104 | */ | 51 | */ |
| 105 | static struct dentry *afs_mntpt_lookup(struct inode *dir, | 52 | static struct dentry *afs_mntpt_lookup(struct inode *dir, |
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 419ef05dcb5e..8f76b13d5549 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
| @@ -259,67 +259,74 @@ void afs_flat_call_destructor(struct afs_call *call) | |||
| 259 | call->buffer = NULL; | 259 | call->buffer = NULL; |
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | #define AFS_BVEC_MAX 8 | ||
| 263 | |||
| 264 | /* | ||
| 265 | * Load the given bvec with the next few pages. | ||
| 266 | */ | ||
| 267 | static void afs_load_bvec(struct afs_call *call, struct msghdr *msg, | ||
| 268 | struct bio_vec *bv, pgoff_t first, pgoff_t last, | ||
| 269 | unsigned offset) | ||
| 270 | { | ||
| 271 | struct page *pages[AFS_BVEC_MAX]; | ||
| 272 | unsigned int nr, n, i, to, bytes = 0; | ||
| 273 | |||
| 274 | nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX); | ||
| 275 | n = find_get_pages_contig(call->mapping, first, nr, pages); | ||
| 276 | ASSERTCMP(n, ==, nr); | ||
| 277 | |||
| 278 | msg->msg_flags |= MSG_MORE; | ||
| 279 | for (i = 0; i < nr; i++) { | ||
| 280 | to = PAGE_SIZE; | ||
| 281 | if (first + i >= last) { | ||
| 282 | to = call->last_to; | ||
| 283 | msg->msg_flags &= ~MSG_MORE; | ||
| 284 | } | ||
| 285 | bv[i].bv_page = pages[i]; | ||
| 286 | bv[i].bv_len = to - offset; | ||
| 287 | bv[i].bv_offset = offset; | ||
| 288 | bytes += to - offset; | ||
| 289 | offset = 0; | ||
| 290 | } | ||
| 291 | |||
| 292 | iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes); | ||
| 293 | } | ||
| 294 | |||
| 262 | /* | 295 | /* |
| 263 | * attach the data from a bunch of pages on an inode to a call | 296 | * attach the data from a bunch of pages on an inode to a call |
| 264 | */ | 297 | */ |
| 265 | static int afs_send_pages(struct afs_call *call, struct msghdr *msg) | 298 | static int afs_send_pages(struct afs_call *call, struct msghdr *msg) |
| 266 | { | 299 | { |
| 267 | struct page *pages[8]; | 300 | struct bio_vec bv[AFS_BVEC_MAX]; |
| 268 | unsigned count, n, loop, offset, to; | 301 | unsigned int bytes, nr, loop, offset; |
| 269 | pgoff_t first = call->first, last = call->last; | 302 | pgoff_t first = call->first, last = call->last; |
| 270 | int ret; | 303 | int ret; |
| 271 | 304 | ||
| 272 | _enter(""); | ||
| 273 | |||
| 274 | offset = call->first_offset; | 305 | offset = call->first_offset; |
| 275 | call->first_offset = 0; | 306 | call->first_offset = 0; |
| 276 | 307 | ||
| 277 | do { | 308 | do { |
| 278 | _debug("attach %lx-%lx", first, last); | 309 | afs_load_bvec(call, msg, bv, first, last, offset); |
| 279 | 310 | offset = 0; | |
| 280 | count = last - first + 1; | 311 | bytes = msg->msg_iter.count; |
| 281 | if (count > ARRAY_SIZE(pages)) | 312 | nr = msg->msg_iter.nr_segs; |
| 282 | count = ARRAY_SIZE(pages); | 313 | |
| 283 | n = find_get_pages_contig(call->mapping, first, count, pages); | 314 | /* Have to change the state *before* sending the last |
| 284 | ASSERTCMP(n, ==, count); | 315 | * packet as RxRPC might give us the reply before it |
| 285 | 316 | * returns from sending the request. | |
| 286 | loop = 0; | 317 | */ |
| 287 | do { | 318 | if (first + nr - 1 >= last) |
| 288 | struct bio_vec bvec = {.bv_page = pages[loop], | 319 | call->state = AFS_CALL_AWAIT_REPLY; |
| 289 | .bv_offset = offset}; | 320 | ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, |
| 290 | msg->msg_flags = 0; | 321 | msg, bytes); |
| 291 | to = PAGE_SIZE; | 322 | for (loop = 0; loop < nr; loop++) |
| 292 | if (first + loop >= last) | 323 | put_page(bv[loop].bv_page); |
| 293 | to = call->last_to; | ||
| 294 | else | ||
| 295 | msg->msg_flags = MSG_MORE; | ||
| 296 | bvec.bv_len = to - offset; | ||
| 297 | offset = 0; | ||
| 298 | |||
| 299 | _debug("- range %u-%u%s", | ||
| 300 | offset, to, msg->msg_flags ? " [more]" : ""); | ||
| 301 | iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, | ||
| 302 | &bvec, 1, to - offset); | ||
| 303 | |||
| 304 | /* have to change the state *before* sending the last | ||
| 305 | * packet as RxRPC might give us the reply before it | ||
| 306 | * returns from sending the request */ | ||
| 307 | if (first + loop >= last) | ||
| 308 | call->state = AFS_CALL_AWAIT_REPLY; | ||
| 309 | ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, | ||
| 310 | msg, to - offset); | ||
| 311 | if (ret < 0) | ||
| 312 | break; | ||
| 313 | } while (++loop < count); | ||
| 314 | first += count; | ||
| 315 | |||
| 316 | for (loop = 0; loop < count; loop++) | ||
| 317 | put_page(pages[loop]); | ||
| 318 | if (ret < 0) | 324 | if (ret < 0) |
| 319 | break; | 325 | break; |
| 326 | |||
| 327 | first += nr; | ||
| 320 | } while (first <= last); | 328 | } while (first <= last); |
| 321 | 329 | ||
| 322 | _leave(" = %d", ret); | ||
| 323 | return ret; | 330 | return ret; |
| 324 | } | 331 | } |
| 325 | 332 | ||
| @@ -333,6 +340,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, | |||
| 333 | struct rxrpc_call *rxcall; | 340 | struct rxrpc_call *rxcall; |
| 334 | struct msghdr msg; | 341 | struct msghdr msg; |
| 335 | struct kvec iov[1]; | 342 | struct kvec iov[1]; |
| 343 | size_t offset; | ||
| 344 | u32 abort_code; | ||
| 336 | int ret; | 345 | int ret; |
| 337 | 346 | ||
| 338 | _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); | 347 | _enter("%x,{%d},", addr->s_addr, ntohs(call->port)); |
| @@ -381,9 +390,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, | |||
| 381 | msg.msg_controllen = 0; | 390 | msg.msg_controllen = 0; |
| 382 | msg.msg_flags = (call->send_pages ? MSG_MORE : 0); | 391 | msg.msg_flags = (call->send_pages ? MSG_MORE : 0); |
| 383 | 392 | ||
| 384 | /* have to change the state *before* sending the last packet as RxRPC | 393 | /* We have to change the state *before* sending the last packet as |
| 385 | * might give us the reply before it returns from sending the | 394 | * rxrpc might give us the reply before it returns from sending the |
| 386 | * request */ | 395 | * request. Further, if the send fails, we may already have been given |
| 396 | * a notification and may have collected it. | ||
| 397 | */ | ||
| 387 | if (!call->send_pages) | 398 | if (!call->send_pages) |
| 388 | call->state = AFS_CALL_AWAIT_REPLY; | 399 | call->state = AFS_CALL_AWAIT_REPLY; |
| 389 | ret = rxrpc_kernel_send_data(afs_socket, rxcall, | 400 | ret = rxrpc_kernel_send_data(afs_socket, rxcall, |
| @@ -405,7 +416,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, | |||
| 405 | return afs_wait_for_call_to_complete(call); | 416 | return afs_wait_for_call_to_complete(call); |
| 406 | 417 | ||
| 407 | error_do_abort: | 418 | error_do_abort: |
| 408 | rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD"); | 419 | call->state = AFS_CALL_COMPLETE; |
| 420 | if (ret != -ECONNABORTED) { | ||
| 421 | rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, | ||
| 422 | -ret, "KSD"); | ||
| 423 | } else { | ||
| 424 | abort_code = 0; | ||
| 425 | offset = 0; | ||
| 426 | rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset, | ||
| 427 | false, &abort_code); | ||
| 428 | ret = call->type->abort_to_error(abort_code); | ||
| 429 | } | ||
| 409 | error_kill_call: | 430 | error_kill_call: |
| 410 | afs_put_call(call); | 431 | afs_put_call(call); |
| 411 | _leave(" = %d", ret); | 432 | _leave(" = %d", ret); |
| @@ -452,16 +473,18 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
| 452 | case -EINPROGRESS: | 473 | case -EINPROGRESS: |
| 453 | case -EAGAIN: | 474 | case -EAGAIN: |
| 454 | goto out; | 475 | goto out; |
| 476 | case -ECONNABORTED: | ||
| 477 | goto call_complete; | ||
| 455 | case -ENOTCONN: | 478 | case -ENOTCONN: |
| 456 | abort_code = RX_CALL_DEAD; | 479 | abort_code = RX_CALL_DEAD; |
| 457 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, | 480 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, |
| 458 | abort_code, -ret, "KNC"); | 481 | abort_code, -ret, "KNC"); |
| 459 | goto do_abort; | 482 | goto save_error; |
| 460 | case -ENOTSUPP: | 483 | case -ENOTSUPP: |
| 461 | abort_code = RX_INVALID_OPERATION; | 484 | abort_code = RXGEN_OPCODE; |
| 462 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, | 485 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, |
| 463 | abort_code, -ret, "KIV"); | 486 | abort_code, -ret, "KIV"); |
| 464 | goto do_abort; | 487 | goto save_error; |
| 465 | case -ENODATA: | 488 | case -ENODATA: |
| 466 | case -EBADMSG: | 489 | case -EBADMSG: |
| 467 | case -EMSGSIZE: | 490 | case -EMSGSIZE: |
| @@ -471,7 +494,7 @@ static void afs_deliver_to_call(struct afs_call *call) | |||
| 471 | abort_code = RXGEN_SS_UNMARSHAL; | 494 | abort_code = RXGEN_SS_UNMARSHAL; |
| 472 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, | 495 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, |
| 473 | abort_code, EBADMSG, "KUM"); | 496 | abort_code, EBADMSG, "KUM"); |
| 474 | goto do_abort; | 497 | goto save_error; |
| 475 | } | 498 | } |
| 476 | } | 499 | } |
| 477 | 500 | ||
| @@ -482,8 +505,9 @@ out: | |||
| 482 | _leave(""); | 505 | _leave(""); |
| 483 | return; | 506 | return; |
| 484 | 507 | ||
| 485 | do_abort: | 508 | save_error: |
| 486 | call->error = ret; | 509 | call->error = ret; |
| 510 | call_complete: | ||
| 487 | call->state = AFS_CALL_COMPLETE; | 511 | call->state = AFS_CALL_COMPLETE; |
| 488 | goto done; | 512 | goto done; |
| 489 | } | 513 | } |
| @@ -493,7 +517,6 @@ do_abort: | |||
| 493 | */ | 517 | */ |
| 494 | static int afs_wait_for_call_to_complete(struct afs_call *call) | 518 | static int afs_wait_for_call_to_complete(struct afs_call *call) |
| 495 | { | 519 | { |
| 496 | const char *abort_why; | ||
| 497 | int ret; | 520 | int ret; |
| 498 | 521 | ||
| 499 | DECLARE_WAITQUEUE(myself, current); | 522 | DECLARE_WAITQUEUE(myself, current); |
| @@ -512,13 +535,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call) | |||
| 512 | continue; | 535 | continue; |
| 513 | } | 536 | } |
| 514 | 537 | ||
| 515 | abort_why = "KWC"; | 538 | if (call->state == AFS_CALL_COMPLETE || |
| 516 | ret = call->error; | 539 | signal_pending(current)) |
| 517 | if (call->state == AFS_CALL_COMPLETE) | ||
| 518 | break; | ||
| 519 | abort_why = "KWI"; | ||
| 520 | ret = -EINTR; | ||
| 521 | if (signal_pending(current)) | ||
| 522 | break; | 540 | break; |
| 523 | schedule(); | 541 | schedule(); |
| 524 | } | 542 | } |
| @@ -526,13 +544,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call) | |||
| 526 | remove_wait_queue(&call->waitq, &myself); | 544 | remove_wait_queue(&call->waitq, &myself); |
| 527 | __set_current_state(TASK_RUNNING); | 545 | __set_current_state(TASK_RUNNING); |
| 528 | 546 | ||
| 529 | /* kill the call */ | 547 | /* Kill off the call if it's still live. */ |
| 530 | if (call->state < AFS_CALL_COMPLETE) { | 548 | if (call->state < AFS_CALL_COMPLETE) { |
| 531 | _debug("call incomplete"); | 549 | _debug("call interrupted"); |
| 532 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, | 550 | rxrpc_kernel_abort_call(afs_socket, call->rxcall, |
| 533 | RX_CALL_DEAD, -ret, abort_why); | 551 | RX_USER_ABORT, -EINTR, "KWI"); |
| 534 | } | 552 | } |
| 535 | 553 | ||
| 554 | ret = call->error; | ||
| 536 | _debug("call complete"); | 555 | _debug("call complete"); |
| 537 | afs_put_call(call); | 556 | afs_put_call(call); |
| 538 | _leave(" = %d", ret); | 557 | _leave(" = %d", ret); |
diff --git a/fs/afs/security.c b/fs/afs/security.c index 8d010422dc89..ecb86a670180 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c | |||
| @@ -114,7 +114,7 @@ void afs_clear_permits(struct afs_vnode *vnode) | |||
| 114 | 114 | ||
| 115 | mutex_lock(&vnode->permits_lock); | 115 | mutex_lock(&vnode->permits_lock); |
| 116 | permits = vnode->permits; | 116 | permits = vnode->permits; |
| 117 | rcu_assign_pointer(vnode->permits, NULL); | 117 | RCU_INIT_POINTER(vnode->permits, NULL); |
| 118 | mutex_unlock(&vnode->permits_lock); | 118 | mutex_unlock(&vnode->permits_lock); |
| 119 | 119 | ||
| 120 | if (permits) | 120 | if (permits) |
| @@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask) | |||
| 340 | } else { | 340 | } else { |
| 341 | if (!(access & AFS_ACE_LOOKUP)) | 341 | if (!(access & AFS_ACE_LOOKUP)) |
| 342 | goto permission_denied; | 342 | goto permission_denied; |
| 343 | if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR)) | ||
| 344 | goto permission_denied; | ||
| 343 | if (mask & (MAY_EXEC | MAY_READ)) { | 345 | if (mask & (MAY_EXEC | MAY_READ)) { |
| 344 | if (!(access & AFS_ACE_READ)) | 346 | if (!(access & AFS_ACE_READ)) |
| 345 | goto permission_denied; | 347 | goto permission_denied; |
| 348 | if (!(inode->i_mode & S_IRUSR)) | ||
| 349 | goto permission_denied; | ||
| 346 | } else if (mask & MAY_WRITE) { | 350 | } else if (mask & MAY_WRITE) { |
| 347 | if (!(access & AFS_ACE_WRITE)) | 351 | if (!(access & AFS_ACE_WRITE)) |
| 348 | goto permission_denied; | 352 | goto permission_denied; |
| 353 | if (!(inode->i_mode & S_IWUSR)) | ||
| 354 | goto permission_denied; | ||
| 349 | } | 355 | } |
| 350 | } | 356 | } |
| 351 | 357 | ||
| 352 | key_put(key); | 358 | key_put(key); |
| 353 | ret = generic_permission(inode, mask); | ||
| 354 | _leave(" = %d", ret); | 359 | _leave(" = %d", ret); |
| 355 | return ret; | 360 | return ret; |
| 356 | 361 | ||
diff --git a/fs/afs/server.c b/fs/afs/server.c index d4066ab7dd55..c001b1f2455f 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c | |||
| @@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server) | |||
| 242 | spin_lock(&afs_server_graveyard_lock); | 242 | spin_lock(&afs_server_graveyard_lock); |
| 243 | if (atomic_read(&server->usage) == 0) { | 243 | if (atomic_read(&server->usage) == 0) { |
| 244 | list_move_tail(&server->grave, &afs_server_graveyard); | 244 | list_move_tail(&server->grave, &afs_server_graveyard); |
| 245 | server->time_of_death = get_seconds(); | 245 | server->time_of_death = ktime_get_real_seconds(); |
| 246 | queue_delayed_work(afs_wq, &afs_server_reaper, | 246 | queue_delayed_work(afs_wq, &afs_server_reaper, |
| 247 | afs_server_timeout * HZ); | 247 | afs_server_timeout * HZ); |
| 248 | } | 248 | } |
| @@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work) | |||
| 277 | LIST_HEAD(corpses); | 277 | LIST_HEAD(corpses); |
| 278 | struct afs_server *server; | 278 | struct afs_server *server; |
| 279 | unsigned long delay, expiry; | 279 | unsigned long delay, expiry; |
| 280 | time_t now; | 280 | time64_t now; |
| 281 | 281 | ||
| 282 | now = get_seconds(); | 282 | now = ktime_get_real_seconds(); |
| 283 | spin_lock(&afs_server_graveyard_lock); | 283 | spin_lock(&afs_server_graveyard_lock); |
| 284 | 284 | ||
| 285 | while (!list_empty(&afs_server_graveyard)) { | 285 | while (!list_empty(&afs_server_graveyard)) { |
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c index d7d8dd8c0b31..37b7c3b342a6 100644 --- a/fs/afs/vlocation.c +++ b/fs/afs/vlocation.c | |||
| @@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl) | |||
| 340 | struct afs_vlocation *xvl; | 340 | struct afs_vlocation *xvl; |
| 341 | 341 | ||
| 342 | /* wait at least 10 minutes before updating... */ | 342 | /* wait at least 10 minutes before updating... */ |
| 343 | vl->update_at = get_seconds() + afs_vlocation_update_timeout; | 343 | vl->update_at = ktime_get_real_seconds() + |
| 344 | afs_vlocation_update_timeout; | ||
| 344 | 345 | ||
| 345 | spin_lock(&afs_vlocation_updates_lock); | 346 | spin_lock(&afs_vlocation_updates_lock); |
| 346 | 347 | ||
| @@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl) | |||
| 506 | if (atomic_read(&vl->usage) == 0) { | 507 | if (atomic_read(&vl->usage) == 0) { |
| 507 | _debug("buried"); | 508 | _debug("buried"); |
| 508 | list_move_tail(&vl->grave, &afs_vlocation_graveyard); | 509 | list_move_tail(&vl->grave, &afs_vlocation_graveyard); |
| 509 | vl->time_of_death = get_seconds(); | 510 | vl->time_of_death = ktime_get_real_seconds(); |
| 510 | queue_delayed_work(afs_wq, &afs_vlocation_reap, | 511 | queue_delayed_work(afs_wq, &afs_vlocation_reap, |
| 511 | afs_vlocation_timeout * HZ); | 512 | afs_vlocation_timeout * HZ); |
| 512 | 513 | ||
| @@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work) | |||
| 543 | LIST_HEAD(corpses); | 544 | LIST_HEAD(corpses); |
| 544 | struct afs_vlocation *vl; | 545 | struct afs_vlocation *vl; |
| 545 | unsigned long delay, expiry; | 546 | unsigned long delay, expiry; |
| 546 | time_t now; | 547 | time64_t now; |
| 547 | 548 | ||
| 548 | _enter(""); | 549 | _enter(""); |
| 549 | 550 | ||
| 550 | now = get_seconds(); | 551 | now = ktime_get_real_seconds(); |
| 551 | spin_lock(&afs_vlocation_graveyard_lock); | 552 | spin_lock(&afs_vlocation_graveyard_lock); |
| 552 | 553 | ||
| 553 | while (!list_empty(&afs_vlocation_graveyard)) { | 554 | while (!list_empty(&afs_vlocation_graveyard)) { |
| @@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work) | |||
| 622 | { | 623 | { |
| 623 | struct afs_cache_vlocation vldb; | 624 | struct afs_cache_vlocation vldb; |
| 624 | struct afs_vlocation *vl, *xvl; | 625 | struct afs_vlocation *vl, *xvl; |
| 625 | time_t now; | 626 | time64_t now; |
| 626 | long timeout; | 627 | long timeout; |
| 627 | int ret; | 628 | int ret; |
| 628 | 629 | ||
| 629 | _enter(""); | 630 | _enter(""); |
| 630 | 631 | ||
| 631 | now = get_seconds(); | 632 | now = ktime_get_real_seconds(); |
| 632 | 633 | ||
| 633 | /* find a record to update */ | 634 | /* find a record to update */ |
| 634 | spin_lock(&afs_vlocation_updates_lock); | 635 | spin_lock(&afs_vlocation_updates_lock); |
| @@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work) | |||
| 684 | 685 | ||
| 685 | /* and then reschedule */ | 686 | /* and then reschedule */ |
| 686 | _debug("reschedule"); | 687 | _debug("reschedule"); |
| 687 | vl->update_at = get_seconds() + afs_vlocation_update_timeout; | 688 | vl->update_at = ktime_get_real_seconds() + |
| 689 | afs_vlocation_update_timeout; | ||
| 688 | 690 | ||
| 689 | spin_lock(&afs_vlocation_updates_lock); | 691 | spin_lock(&afs_vlocation_updates_lock); |
| 690 | 692 | ||
diff --git a/fs/afs/write.c b/fs/afs/write.c index c83c1a0e851f..2d2fccd5044b 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
| @@ -84,10 +84,9 @@ void afs_put_writeback(struct afs_writeback *wb) | |||
| 84 | * partly or wholly fill a page that's under preparation for writing | 84 | * partly or wholly fill a page that's under preparation for writing |
| 85 | */ | 85 | */ |
| 86 | static int afs_fill_page(struct afs_vnode *vnode, struct key *key, | 86 | static int afs_fill_page(struct afs_vnode *vnode, struct key *key, |
| 87 | loff_t pos, struct page *page) | 87 | loff_t pos, unsigned int len, struct page *page) |
| 88 | { | 88 | { |
| 89 | struct afs_read *req; | 89 | struct afs_read *req; |
| 90 | loff_t i_size; | ||
| 91 | int ret; | 90 | int ret; |
| 92 | 91 | ||
| 93 | _enter(",,%llu", (unsigned long long)pos); | 92 | _enter(",,%llu", (unsigned long long)pos); |
| @@ -99,14 +98,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key, | |||
| 99 | 98 | ||
| 100 | atomic_set(&req->usage, 1); | 99 | atomic_set(&req->usage, 1); |
| 101 | req->pos = pos; | 100 | req->pos = pos; |
| 101 | req->len = len; | ||
| 102 | req->nr_pages = 1; | 102 | req->nr_pages = 1; |
| 103 | req->pages[0] = page; | 103 | req->pages[0] = page; |
| 104 | 104 | get_page(page); | |
| 105 | i_size = i_size_read(&vnode->vfs_inode); | ||
| 106 | if (pos + PAGE_SIZE > i_size) | ||
| 107 | req->len = i_size - pos; | ||
| 108 | else | ||
| 109 | req->len = PAGE_SIZE; | ||
| 110 | 105 | ||
| 111 | ret = afs_vnode_fetch_data(vnode, key, req); | 106 | ret = afs_vnode_fetch_data(vnode, key, req); |
| 112 | afs_put_read(req); | 107 | afs_put_read(req); |
| @@ -159,12 +154,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping, | |||
| 159 | kfree(candidate); | 154 | kfree(candidate); |
| 160 | return -ENOMEM; | 155 | return -ENOMEM; |
| 161 | } | 156 | } |
| 162 | *pagep = page; | ||
| 163 | /* page won't leak in error case: it eventually gets cleaned off LRU */ | ||
| 164 | 157 | ||
| 165 | if (!PageUptodate(page) && len != PAGE_SIZE) { | 158 | if (!PageUptodate(page) && len != PAGE_SIZE) { |
| 166 | ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page); | 159 | ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page); |
| 167 | if (ret < 0) { | 160 | if (ret < 0) { |
| 161 | unlock_page(page); | ||
| 162 | put_page(page); | ||
| 168 | kfree(candidate); | 163 | kfree(candidate); |
| 169 | _leave(" = %d [prep]", ret); | 164 | _leave(" = %d [prep]", ret); |
| 170 | return ret; | 165 | return ret; |
| @@ -172,6 +167,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping, | |||
| 172 | SetPageUptodate(page); | 167 | SetPageUptodate(page); |
| 173 | } | 168 | } |
| 174 | 169 | ||
| 170 | /* page won't leak in error case: it eventually gets cleaned off LRU */ | ||
| 171 | *pagep = page; | ||
| 172 | |||
| 175 | try_again: | 173 | try_again: |
| 176 | spin_lock(&vnode->writeback_lock); | 174 | spin_lock(&vnode->writeback_lock); |
| 177 | 175 | ||
| @@ -233,7 +231,7 @@ flush_conflicting_wb: | |||
| 233 | if (wb->state == AFS_WBACK_PENDING) | 231 | if (wb->state == AFS_WBACK_PENDING) |
| 234 | wb->state = AFS_WBACK_CONFLICTING; | 232 | wb->state = AFS_WBACK_CONFLICTING; |
| 235 | spin_unlock(&vnode->writeback_lock); | 233 | spin_unlock(&vnode->writeback_lock); |
| 236 | if (PageDirty(page)) { | 234 | if (clear_page_dirty_for_io(page)) { |
| 237 | ret = afs_write_back_from_locked_page(wb, page); | 235 | ret = afs_write_back_from_locked_page(wb, page); |
| 238 | if (ret < 0) { | 236 | if (ret < 0) { |
| 239 | afs_put_writeback(candidate); | 237 | afs_put_writeback(candidate); |
| @@ -257,7 +255,9 @@ int afs_write_end(struct file *file, struct address_space *mapping, | |||
| 257 | struct page *page, void *fsdata) | 255 | struct page *page, void *fsdata) |
| 258 | { | 256 | { |
| 259 | struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); | 257 | struct afs_vnode *vnode = AFS_FS_I(file_inode(file)); |
| 258 | struct key *key = file->private_data; | ||
| 260 | loff_t i_size, maybe_i_size; | 259 | loff_t i_size, maybe_i_size; |
| 260 | int ret; | ||
| 261 | 261 | ||
| 262 | _enter("{%x:%u},{%lx}", | 262 | _enter("{%x:%u},{%lx}", |
| 263 | vnode->fid.vid, vnode->fid.vnode, page->index); | 263 | vnode->fid.vid, vnode->fid.vnode, page->index); |
| @@ -273,6 +273,20 @@ int afs_write_end(struct file *file, struct address_space *mapping, | |||
| 273 | spin_unlock(&vnode->writeback_lock); | 273 | spin_unlock(&vnode->writeback_lock); |
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | if (!PageUptodate(page)) { | ||
| 277 | if (copied < len) { | ||
| 278 | /* Try and load any missing data from the server. The | ||
| 279 | * unmarshalling routine will take care of clearing any | ||
| 280 | * bits that are beyond the EOF. | ||
| 281 | */ | ||
| 282 | ret = afs_fill_page(vnode, key, pos + copied, | ||
| 283 | len - copied, page); | ||
| 284 | if (ret < 0) | ||
| 285 | return ret; | ||
| 286 | } | ||
| 287 | SetPageUptodate(page); | ||
| 288 | } | ||
| 289 | |||
| 276 | set_page_dirty(page); | 290 | set_page_dirty(page); |
| 277 | if (PageDirty(page)) | 291 | if (PageDirty(page)) |
| 278 | _debug("dirtied"); | 292 | _debug("dirtied"); |
| @@ -307,10 +321,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error, | |||
| 307 | ASSERTCMP(pv.nr, ==, count); | 321 | ASSERTCMP(pv.nr, ==, count); |
| 308 | 322 | ||
| 309 | for (loop = 0; loop < count; loop++) { | 323 | for (loop = 0; loop < count; loop++) { |
| 310 | ClearPageUptodate(pv.pages[loop]); | 324 | struct page *page = pv.pages[loop]; |
| 325 | ClearPageUptodate(page); | ||
| 311 | if (error) | 326 | if (error) |
| 312 | SetPageError(pv.pages[loop]); | 327 | SetPageError(page); |
| 313 | end_page_writeback(pv.pages[loop]); | 328 | if (PageWriteback(page)) |
| 329 | end_page_writeback(page); | ||
| 330 | if (page->index >= first) | ||
| 331 | first = page->index + 1; | ||
| 314 | } | 332 | } |
| 315 | 333 | ||
| 316 | __pagevec_release(&pv); | 334 | __pagevec_release(&pv); |
| @@ -335,8 +353,6 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb, | |||
| 335 | _enter(",%lx", primary_page->index); | 353 | _enter(",%lx", primary_page->index); |
| 336 | 354 | ||
| 337 | count = 1; | 355 | count = 1; |
| 338 | if (!clear_page_dirty_for_io(primary_page)) | ||
| 339 | BUG(); | ||
| 340 | if (test_set_page_writeback(primary_page)) | 356 | if (test_set_page_writeback(primary_page)) |
| 341 | BUG(); | 357 | BUG(); |
| 342 | 358 | ||
| @@ -502,17 +518,17 @@ static int afs_writepages_region(struct address_space *mapping, | |||
| 502 | */ | 518 | */ |
| 503 | lock_page(page); | 519 | lock_page(page); |
| 504 | 520 | ||
| 505 | if (page->mapping != mapping) { | 521 | if (page->mapping != mapping || !PageDirty(page)) { |
| 506 | unlock_page(page); | 522 | unlock_page(page); |
| 507 | put_page(page); | 523 | put_page(page); |
| 508 | continue; | 524 | continue; |
| 509 | } | 525 | } |
| 510 | 526 | ||
| 511 | if (wbc->sync_mode != WB_SYNC_NONE) | 527 | if (PageWriteback(page)) { |
| 512 | wait_on_page_writeback(page); | ||
| 513 | |||
| 514 | if (PageWriteback(page) || !PageDirty(page)) { | ||
| 515 | unlock_page(page); | 528 | unlock_page(page); |
| 529 | if (wbc->sync_mode != WB_SYNC_NONE) | ||
| 530 | wait_on_page_writeback(page); | ||
| 531 | put_page(page); | ||
| 516 | continue; | 532 | continue; |
| 517 | } | 533 | } |
| 518 | 534 | ||
| @@ -523,6 +539,8 @@ static int afs_writepages_region(struct address_space *mapping, | |||
| 523 | wb->state = AFS_WBACK_WRITING; | 539 | wb->state = AFS_WBACK_WRITING; |
| 524 | spin_unlock(&wb->vnode->writeback_lock); | 540 | spin_unlock(&wb->vnode->writeback_lock); |
| 525 | 541 | ||
| 542 | if (!clear_page_dirty_for_io(page)) | ||
| 543 | BUG(); | ||
| 526 | ret = afs_write_back_from_locked_page(wb, page); | 544 | ret = afs_write_back_from_locked_page(wb, page); |
| 527 | unlock_page(page); | 545 | unlock_page(page); |
| 528 | put_page(page); | 546 | put_page(page); |
| @@ -746,6 +764,20 @@ out: | |||
| 746 | } | 764 | } |
| 747 | 765 | ||
| 748 | /* | 766 | /* |
| 767 | * Flush out all outstanding writes on a file opened for writing when it is | ||
| 768 | * closed. | ||
| 769 | */ | ||
| 770 | int afs_flush(struct file *file, fl_owner_t id) | ||
| 771 | { | ||
| 772 | _enter(""); | ||
| 773 | |||
| 774 | if ((file->f_mode & FMODE_WRITE) == 0) | ||
| 775 | return 0; | ||
| 776 | |||
| 777 | return vfs_fsync(file, 0); | ||
| 778 | } | ||
| 779 | |||
| 780 | /* | ||
| 749 | * notification that a previously read-only page is about to become writable | 781 | * notification that a previously read-only page is about to become writable |
| 750 | * - if it returns an error, the caller will deliver a bus error signal | 782 | * - if it returns an error, the caller will deliver a bus error signal |
| 751 | */ | 783 | */ |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 28e81922a21c..8df797432740 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
| @@ -1714,7 +1714,8 @@ static int __process_pages_contig(struct address_space *mapping, | |||
| 1714 | * can we find nothing at @index. | 1714 | * can we find nothing at @index. |
| 1715 | */ | 1715 | */ |
| 1716 | ASSERT(page_ops & PAGE_LOCK); | 1716 | ASSERT(page_ops & PAGE_LOCK); |
| 1717 | return ret; | 1717 | err = -EAGAIN; |
| 1718 | goto out; | ||
| 1718 | } | 1719 | } |
| 1719 | 1720 | ||
| 1720 | for (i = 0; i < ret; i++) { | 1721 | for (i = 0; i < ret; i++) { |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index c40060cc481f..231503935652 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
| @@ -6709,6 +6709,20 @@ static noinline int uncompress_inline(struct btrfs_path *path, | |||
| 6709 | max_size = min_t(unsigned long, PAGE_SIZE, max_size); | 6709 | max_size = min_t(unsigned long, PAGE_SIZE, max_size); |
| 6710 | ret = btrfs_decompress(compress_type, tmp, page, | 6710 | ret = btrfs_decompress(compress_type, tmp, page, |
| 6711 | extent_offset, inline_size, max_size); | 6711 | extent_offset, inline_size, max_size); |
| 6712 | |||
| 6713 | /* | ||
| 6714 | * decompression code contains a memset to fill in any space between the end | ||
| 6715 | * of the uncompressed data and the end of max_size in case the decompressed | ||
| 6716 | * data ends up shorter than ram_bytes. That doesn't cover the hole between | ||
| 6717 | * the end of an inline extent and the beginning of the next block, so we | ||
| 6718 | * cover that region here. | ||
| 6719 | */ | ||
| 6720 | |||
| 6721 | if (max_size + pg_offset < PAGE_SIZE) { | ||
| 6722 | char *map = kmap(page); | ||
| 6723 | memset(map + pg_offset + max_size, 0, PAGE_SIZE - max_size - pg_offset); | ||
| 6724 | kunmap(page); | ||
| 6725 | } | ||
| 6712 | kfree(tmp); | 6726 | kfree(tmp); |
| 6713 | return ret; | 6727 | return ret; |
| 6714 | } | 6728 | } |
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index 02a7a9286449..6d6eca394d4d 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c | |||
| @@ -327,7 +327,6 @@ EXPORT_SYMBOL(fscrypt_decrypt_page); | |||
| 327 | static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) | 327 | static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) |
| 328 | { | 328 | { |
| 329 | struct dentry *dir; | 329 | struct dentry *dir; |
| 330 | struct fscrypt_info *ci; | ||
| 331 | int dir_has_key, cached_with_key; | 330 | int dir_has_key, cached_with_key; |
| 332 | 331 | ||
| 333 | if (flags & LOOKUP_RCU) | 332 | if (flags & LOOKUP_RCU) |
| @@ -339,18 +338,11 @@ static int fscrypt_d_revalidate(struct dentry *dentry, unsigned int flags) | |||
| 339 | return 0; | 338 | return 0; |
| 340 | } | 339 | } |
| 341 | 340 | ||
| 342 | ci = d_inode(dir)->i_crypt_info; | ||
| 343 | if (ci && ci->ci_keyring_key && | ||
| 344 | (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | | ||
| 345 | (1 << KEY_FLAG_REVOKED) | | ||
| 346 | (1 << KEY_FLAG_DEAD)))) | ||
| 347 | ci = NULL; | ||
| 348 | |||
| 349 | /* this should eventually be an flag in d_flags */ | 341 | /* this should eventually be an flag in d_flags */ |
| 350 | spin_lock(&dentry->d_lock); | 342 | spin_lock(&dentry->d_lock); |
| 351 | cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; | 343 | cached_with_key = dentry->d_flags & DCACHE_ENCRYPTED_WITH_KEY; |
| 352 | spin_unlock(&dentry->d_lock); | 344 | spin_unlock(&dentry->d_lock); |
| 353 | dir_has_key = (ci != NULL); | 345 | dir_has_key = (d_inode(dir)->i_crypt_info != NULL); |
| 354 | dput(dir); | 346 | dput(dir); |
| 355 | 347 | ||
| 356 | /* | 348 | /* |
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 13052b85c393..37b49894c762 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c | |||
| @@ -350,7 +350,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname, | |||
| 350 | fname->disk_name.len = iname->len; | 350 | fname->disk_name.len = iname->len; |
| 351 | return 0; | 351 | return 0; |
| 352 | } | 352 | } |
| 353 | ret = fscrypt_get_crypt_info(dir); | 353 | ret = fscrypt_get_encryption_info(dir); |
| 354 | if (ret && ret != -EOPNOTSUPP) | 354 | if (ret && ret != -EOPNOTSUPP) |
| 355 | return ret; | 355 | return ret; |
| 356 | 356 | ||
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index fdbb8af32eaf..e39696e64494 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h | |||
| @@ -67,7 +67,6 @@ struct fscrypt_info { | |||
| 67 | u8 ci_filename_mode; | 67 | u8 ci_filename_mode; |
| 68 | u8 ci_flags; | 68 | u8 ci_flags; |
| 69 | struct crypto_skcipher *ci_ctfm; | 69 | struct crypto_skcipher *ci_ctfm; |
| 70 | struct key *ci_keyring_key; | ||
| 71 | u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; | 70 | u8 ci_master_key[FS_KEY_DESCRIPTOR_SIZE]; |
| 72 | }; | 71 | }; |
| 73 | 72 | ||
| @@ -101,7 +100,4 @@ extern int fscrypt_do_page_crypto(const struct inode *inode, | |||
| 101 | extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, | 100 | extern struct page *fscrypt_alloc_bounce_page(struct fscrypt_ctx *ctx, |
| 102 | gfp_t gfp_flags); | 101 | gfp_t gfp_flags); |
| 103 | 102 | ||
| 104 | /* keyinfo.c */ | ||
| 105 | extern int fscrypt_get_crypt_info(struct inode *); | ||
| 106 | |||
| 107 | #endif /* _FSCRYPT_PRIVATE_H */ | 103 | #endif /* _FSCRYPT_PRIVATE_H */ |
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index d5d896fa5a71..8cdfddce2b34 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c | |||
| @@ -95,6 +95,7 @@ static int validate_user_key(struct fscrypt_info *crypt_info, | |||
| 95 | kfree(description); | 95 | kfree(description); |
| 96 | if (IS_ERR(keyring_key)) | 96 | if (IS_ERR(keyring_key)) |
| 97 | return PTR_ERR(keyring_key); | 97 | return PTR_ERR(keyring_key); |
| 98 | down_read(&keyring_key->sem); | ||
| 98 | 99 | ||
| 99 | if (keyring_key->type != &key_type_logon) { | 100 | if (keyring_key->type != &key_type_logon) { |
| 100 | printk_once(KERN_WARNING | 101 | printk_once(KERN_WARNING |
| @@ -102,11 +103,9 @@ static int validate_user_key(struct fscrypt_info *crypt_info, | |||
| 102 | res = -ENOKEY; | 103 | res = -ENOKEY; |
| 103 | goto out; | 104 | goto out; |
| 104 | } | 105 | } |
| 105 | down_read(&keyring_key->sem); | ||
| 106 | ukp = user_key_payload_locked(keyring_key); | 106 | ukp = user_key_payload_locked(keyring_key); |
| 107 | if (ukp->datalen != sizeof(struct fscrypt_key)) { | 107 | if (ukp->datalen != sizeof(struct fscrypt_key)) { |
| 108 | res = -EINVAL; | 108 | res = -EINVAL; |
| 109 | up_read(&keyring_key->sem); | ||
| 110 | goto out; | 109 | goto out; |
| 111 | } | 110 | } |
| 112 | master_key = (struct fscrypt_key *)ukp->data; | 111 | master_key = (struct fscrypt_key *)ukp->data; |
| @@ -117,17 +116,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info, | |||
| 117 | "%s: key size incorrect: %d\n", | 116 | "%s: key size incorrect: %d\n", |
| 118 | __func__, master_key->size); | 117 | __func__, master_key->size); |
| 119 | res = -ENOKEY; | 118 | res = -ENOKEY; |
| 120 | up_read(&keyring_key->sem); | ||
| 121 | goto out; | 119 | goto out; |
| 122 | } | 120 | } |
| 123 | res = derive_key_aes(ctx->nonce, master_key->raw, raw_key); | 121 | res = derive_key_aes(ctx->nonce, master_key->raw, raw_key); |
| 124 | up_read(&keyring_key->sem); | ||
| 125 | if (res) | ||
| 126 | goto out; | ||
| 127 | |||
| 128 | crypt_info->ci_keyring_key = keyring_key; | ||
| 129 | return 0; | ||
| 130 | out: | 122 | out: |
| 123 | up_read(&keyring_key->sem); | ||
| 131 | key_put(keyring_key); | 124 | key_put(keyring_key); |
| 132 | return res; | 125 | return res; |
| 133 | } | 126 | } |
| @@ -169,12 +162,11 @@ static void put_crypt_info(struct fscrypt_info *ci) | |||
| 169 | if (!ci) | 162 | if (!ci) |
| 170 | return; | 163 | return; |
| 171 | 164 | ||
| 172 | key_put(ci->ci_keyring_key); | ||
| 173 | crypto_free_skcipher(ci->ci_ctfm); | 165 | crypto_free_skcipher(ci->ci_ctfm); |
| 174 | kmem_cache_free(fscrypt_info_cachep, ci); | 166 | kmem_cache_free(fscrypt_info_cachep, ci); |
| 175 | } | 167 | } |
| 176 | 168 | ||
| 177 | int fscrypt_get_crypt_info(struct inode *inode) | 169 | int fscrypt_get_encryption_info(struct inode *inode) |
| 178 | { | 170 | { |
| 179 | struct fscrypt_info *crypt_info; | 171 | struct fscrypt_info *crypt_info; |
| 180 | struct fscrypt_context ctx; | 172 | struct fscrypt_context ctx; |
| @@ -184,21 +176,15 @@ int fscrypt_get_crypt_info(struct inode *inode) | |||
| 184 | u8 *raw_key = NULL; | 176 | u8 *raw_key = NULL; |
| 185 | int res; | 177 | int res; |
| 186 | 178 | ||
| 179 | if (inode->i_crypt_info) | ||
| 180 | return 0; | ||
| 181 | |||
| 187 | res = fscrypt_initialize(inode->i_sb->s_cop->flags); | 182 | res = fscrypt_initialize(inode->i_sb->s_cop->flags); |
| 188 | if (res) | 183 | if (res) |
| 189 | return res; | 184 | return res; |
| 190 | 185 | ||
| 191 | if (!inode->i_sb->s_cop->get_context) | 186 | if (!inode->i_sb->s_cop->get_context) |
| 192 | return -EOPNOTSUPP; | 187 | return -EOPNOTSUPP; |
| 193 | retry: | ||
| 194 | crypt_info = ACCESS_ONCE(inode->i_crypt_info); | ||
| 195 | if (crypt_info) { | ||
| 196 | if (!crypt_info->ci_keyring_key || | ||
| 197 | key_validate(crypt_info->ci_keyring_key) == 0) | ||
| 198 | return 0; | ||
| 199 | fscrypt_put_encryption_info(inode, crypt_info); | ||
| 200 | goto retry; | ||
| 201 | } | ||
| 202 | 188 | ||
| 203 | res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); | 189 | res = inode->i_sb->s_cop->get_context(inode, &ctx, sizeof(ctx)); |
| 204 | if (res < 0) { | 190 | if (res < 0) { |
| @@ -229,7 +215,6 @@ retry: | |||
| 229 | crypt_info->ci_data_mode = ctx.contents_encryption_mode; | 215 | crypt_info->ci_data_mode = ctx.contents_encryption_mode; |
| 230 | crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; | 216 | crypt_info->ci_filename_mode = ctx.filenames_encryption_mode; |
| 231 | crypt_info->ci_ctfm = NULL; | 217 | crypt_info->ci_ctfm = NULL; |
| 232 | crypt_info->ci_keyring_key = NULL; | ||
| 233 | memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, | 218 | memcpy(crypt_info->ci_master_key, ctx.master_key_descriptor, |
| 234 | sizeof(crypt_info->ci_master_key)); | 219 | sizeof(crypt_info->ci_master_key)); |
| 235 | 220 | ||
| @@ -273,14 +258,8 @@ retry: | |||
| 273 | if (res) | 258 | if (res) |
| 274 | goto out; | 259 | goto out; |
| 275 | 260 | ||
| 276 | kzfree(raw_key); | 261 | if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL) |
| 277 | raw_key = NULL; | 262 | crypt_info = NULL; |
| 278 | if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { | ||
| 279 | put_crypt_info(crypt_info); | ||
| 280 | goto retry; | ||
| 281 | } | ||
| 282 | return 0; | ||
| 283 | |||
| 284 | out: | 263 | out: |
| 285 | if (res == -ENOKEY) | 264 | if (res == -ENOKEY) |
| 286 | res = 0; | 265 | res = 0; |
| @@ -288,6 +267,7 @@ out: | |||
| 288 | kzfree(raw_key); | 267 | kzfree(raw_key); |
| 289 | return res; | 268 | return res; |
| 290 | } | 269 | } |
| 270 | EXPORT_SYMBOL(fscrypt_get_encryption_info); | ||
| 291 | 271 | ||
| 292 | void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) | 272 | void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) |
| 293 | { | 273 | { |
| @@ -305,17 +285,3 @@ void fscrypt_put_encryption_info(struct inode *inode, struct fscrypt_info *ci) | |||
| 305 | put_crypt_info(ci); | 285 | put_crypt_info(ci); |
| 306 | } | 286 | } |
| 307 | EXPORT_SYMBOL(fscrypt_put_encryption_info); | 287 | EXPORT_SYMBOL(fscrypt_put_encryption_info); |
| 308 | |||
| 309 | int fscrypt_get_encryption_info(struct inode *inode) | ||
| 310 | { | ||
| 311 | struct fscrypt_info *ci = inode->i_crypt_info; | ||
| 312 | |||
| 313 | if (!ci || | ||
| 314 | (ci->ci_keyring_key && | ||
| 315 | (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) | | ||
| 316 | (1 << KEY_FLAG_REVOKED) | | ||
| 317 | (1 << KEY_FLAG_DEAD))))) | ||
| 318 | return fscrypt_get_crypt_info(inode); | ||
| 319 | return 0; | ||
| 320 | } | ||
| 321 | EXPORT_SYMBOL(fscrypt_get_encryption_info); | ||
diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index 14b76da71269..4908906d54d5 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c | |||
| @@ -33,17 +33,10 @@ static int create_encryption_context_from_policy(struct inode *inode, | |||
| 33 | const struct fscrypt_policy *policy) | 33 | const struct fscrypt_policy *policy) |
| 34 | { | 34 | { |
| 35 | struct fscrypt_context ctx; | 35 | struct fscrypt_context ctx; |
| 36 | int res; | ||
| 37 | 36 | ||
| 38 | if (!inode->i_sb->s_cop->set_context) | 37 | if (!inode->i_sb->s_cop->set_context) |
| 39 | return -EOPNOTSUPP; | 38 | return -EOPNOTSUPP; |
| 40 | 39 | ||
| 41 | if (inode->i_sb->s_cop->prepare_context) { | ||
| 42 | res = inode->i_sb->s_cop->prepare_context(inode); | ||
| 43 | if (res) | ||
| 44 | return res; | ||
| 45 | } | ||
| 46 | |||
| 47 | ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; | 40 | ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1; |
| 48 | memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, | 41 | memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, |
| 49 | FS_KEY_DESCRIPTOR_SIZE); | 42 | FS_KEY_DESCRIPTOR_SIZE); |
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index 30a9f210d1e3..375fb1c05d49 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c | |||
| @@ -1169,10 +1169,9 @@ static int ext4_finish_convert_inline_dir(handle_t *handle, | |||
| 1169 | set_buffer_uptodate(dir_block); | 1169 | set_buffer_uptodate(dir_block); |
| 1170 | err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); | 1170 | err = ext4_handle_dirty_dirent_node(handle, inode, dir_block); |
| 1171 | if (err) | 1171 | if (err) |
| 1172 | goto out; | 1172 | return err; |
| 1173 | set_buffer_verified(dir_block); | 1173 | set_buffer_verified(dir_block); |
| 1174 | out: | 1174 | return ext4_mark_inode_dirty(handle, inode); |
| 1175 | return err; | ||
| 1176 | } | 1175 | } |
| 1177 | 1176 | ||
| 1178 | static int ext4_convert_inline_data_nolock(handle_t *handle, | 1177 | static int ext4_convert_inline_data_nolock(handle_t *handle, |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7385e6a6b6cb..4247d8d25687 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -5400,7 +5400,7 @@ int ext4_getattr(const struct path *path, struct kstat *stat, | |||
| 5400 | * If there is inline data in the inode, the inode will normally not | 5400 | * If there is inline data in the inode, the inode will normally not |
| 5401 | * have data blocks allocated (it may have an external xattr block). | 5401 | * have data blocks allocated (it may have an external xattr block). |
| 5402 | * Report at least one sector for such files, so tools like tar, rsync, | 5402 | * Report at least one sector for such files, so tools like tar, rsync, |
| 5403 | * others doen't incorrectly think the file is completely sparse. | 5403 | * others don't incorrectly think the file is completely sparse. |
| 5404 | */ | 5404 | */ |
| 5405 | if (unlikely(ext4_has_inline_data(inode))) | 5405 | if (unlikely(ext4_has_inline_data(inode))) |
| 5406 | stat->blocks += (stat->size + 511) >> 9; | 5406 | stat->blocks += (stat->size + 511) >> 9; |
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 578f8c33fb44..c992ef2c2f94 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c | |||
| @@ -511,7 +511,7 @@ mext_check_arguments(struct inode *orig_inode, | |||
| 511 | if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) != | 511 | if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) != |
| 512 | (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) { | 512 | (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) { |
| 513 | ext4_debug("ext4 move extent: orig and donor's start " | 513 | ext4_debug("ext4 move extent: orig and donor's start " |
| 514 | "offset are not alligned [ino:orig %lu, donor %lu]\n", | 514 | "offsets are not aligned [ino:orig %lu, donor %lu]\n", |
| 515 | orig_inode->i_ino, donor_inode->i_ino); | 515 | orig_inode->i_ino, donor_inode->i_ino); |
| 516 | return -EINVAL; | 516 | return -EINVAL; |
| 517 | } | 517 | } |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 2e03a0a88d92..a9448db1cf7e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -1120,17 +1120,16 @@ static int ext4_get_context(struct inode *inode, void *ctx, size_t len) | |||
| 1120 | EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len); | 1120 | EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, ctx, len); |
| 1121 | } | 1121 | } |
| 1122 | 1122 | ||
| 1123 | static int ext4_prepare_context(struct inode *inode) | ||
| 1124 | { | ||
| 1125 | return ext4_convert_inline_data(inode); | ||
| 1126 | } | ||
| 1127 | |||
| 1128 | static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, | 1123 | static int ext4_set_context(struct inode *inode, const void *ctx, size_t len, |
| 1129 | void *fs_data) | 1124 | void *fs_data) |
| 1130 | { | 1125 | { |
| 1131 | handle_t *handle = fs_data; | 1126 | handle_t *handle = fs_data; |
| 1132 | int res, res2, retries = 0; | 1127 | int res, res2, retries = 0; |
| 1133 | 1128 | ||
| 1129 | res = ext4_convert_inline_data(inode); | ||
| 1130 | if (res) | ||
| 1131 | return res; | ||
| 1132 | |||
| 1134 | /* | 1133 | /* |
| 1135 | * If a journal handle was specified, then the encryption context is | 1134 | * If a journal handle was specified, then the encryption context is |
| 1136 | * being set on a new inode via inheritance and is part of a larger | 1135 | * being set on a new inode via inheritance and is part of a larger |
| @@ -1196,7 +1195,6 @@ static unsigned ext4_max_namelen(struct inode *inode) | |||
| 1196 | static const struct fscrypt_operations ext4_cryptops = { | 1195 | static const struct fscrypt_operations ext4_cryptops = { |
| 1197 | .key_prefix = "ext4:", | 1196 | .key_prefix = "ext4:", |
| 1198 | .get_context = ext4_get_context, | 1197 | .get_context = ext4_get_context, |
| 1199 | .prepare_context = ext4_prepare_context, | ||
| 1200 | .set_context = ext4_set_context, | 1198 | .set_context = ext4_set_context, |
| 1201 | .dummy_context = ext4_dummy_context, | 1199 | .dummy_context = ext4_dummy_context, |
| 1202 | .is_encrypted = ext4_encrypted_inode, | 1200 | .is_encrypted = ext4_encrypted_inode, |
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 67636acf7624..996e7900d4c8 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c | |||
| @@ -131,31 +131,26 @@ static __le32 ext4_xattr_block_csum(struct inode *inode, | |||
| 131 | } | 131 | } |
| 132 | 132 | ||
| 133 | static int ext4_xattr_block_csum_verify(struct inode *inode, | 133 | static int ext4_xattr_block_csum_verify(struct inode *inode, |
| 134 | sector_t block_nr, | 134 | struct buffer_head *bh) |
| 135 | struct ext4_xattr_header *hdr) | ||
| 136 | { | 135 | { |
| 137 | if (ext4_has_metadata_csum(inode->i_sb) && | 136 | struct ext4_xattr_header *hdr = BHDR(bh); |
| 138 | (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) | 137 | int ret = 1; |
| 139 | return 0; | ||
| 140 | return 1; | ||
| 141 | } | ||
| 142 | |||
| 143 | static void ext4_xattr_block_csum_set(struct inode *inode, | ||
| 144 | sector_t block_nr, | ||
| 145 | struct ext4_xattr_header *hdr) | ||
| 146 | { | ||
| 147 | if (!ext4_has_metadata_csum(inode->i_sb)) | ||
| 148 | return; | ||
| 149 | 138 | ||
| 150 | hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); | 139 | if (ext4_has_metadata_csum(inode->i_sb)) { |
| 140 | lock_buffer(bh); | ||
| 141 | ret = (hdr->h_checksum == ext4_xattr_block_csum(inode, | ||
| 142 | bh->b_blocknr, hdr)); | ||
| 143 | unlock_buffer(bh); | ||
| 144 | } | ||
| 145 | return ret; | ||
| 151 | } | 146 | } |
| 152 | 147 | ||
| 153 | static inline int ext4_handle_dirty_xattr_block(handle_t *handle, | 148 | static void ext4_xattr_block_csum_set(struct inode *inode, |
| 154 | struct inode *inode, | 149 | struct buffer_head *bh) |
| 155 | struct buffer_head *bh) | ||
| 156 | { | 150 | { |
| 157 | ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh)); | 151 | if (ext4_has_metadata_csum(inode->i_sb)) |
| 158 | return ext4_handle_dirty_metadata(handle, inode, bh); | 152 | BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode, |
| 153 | bh->b_blocknr, BHDR(bh)); | ||
| 159 | } | 154 | } |
| 160 | 155 | ||
| 161 | static inline const struct xattr_handler * | 156 | static inline const struct xattr_handler * |
| @@ -233,7 +228,7 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh) | |||
| 233 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || | 228 | if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || |
| 234 | BHDR(bh)->h_blocks != cpu_to_le32(1)) | 229 | BHDR(bh)->h_blocks != cpu_to_le32(1)) |
| 235 | return -EFSCORRUPTED; | 230 | return -EFSCORRUPTED; |
| 236 | if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) | 231 | if (!ext4_xattr_block_csum_verify(inode, bh)) |
| 237 | return -EFSBADCRC; | 232 | return -EFSBADCRC; |
| 238 | error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, | 233 | error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, |
| 239 | bh->b_data); | 234 | bh->b_data); |
| @@ -618,23 +613,22 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode, | |||
| 618 | } | 613 | } |
| 619 | } | 614 | } |
| 620 | 615 | ||
| 616 | ext4_xattr_block_csum_set(inode, bh); | ||
| 621 | /* | 617 | /* |
| 622 | * Beware of this ugliness: Releasing of xattr block references | 618 | * Beware of this ugliness: Releasing of xattr block references |
| 623 | * from different inodes can race and so we have to protect | 619 | * from different inodes can race and so we have to protect |
| 624 | * from a race where someone else frees the block (and releases | 620 | * from a race where someone else frees the block (and releases |
| 625 | * its journal_head) before we are done dirtying the buffer. In | 621 | * its journal_head) before we are done dirtying the buffer. In |
| 626 | * nojournal mode this race is harmless and we actually cannot | 622 | * nojournal mode this race is harmless and we actually cannot |
| 627 | * call ext4_handle_dirty_xattr_block() with locked buffer as | 623 | * call ext4_handle_dirty_metadata() with locked buffer as |
| 628 | * that function can call sync_dirty_buffer() so for that case | 624 | * that function can call sync_dirty_buffer() so for that case |
| 629 | * we handle the dirtying after unlocking the buffer. | 625 | * we handle the dirtying after unlocking the buffer. |
| 630 | */ | 626 | */ |
| 631 | if (ext4_handle_valid(handle)) | 627 | if (ext4_handle_valid(handle)) |
| 632 | error = ext4_handle_dirty_xattr_block(handle, inode, | 628 | error = ext4_handle_dirty_metadata(handle, inode, bh); |
| 633 | bh); | ||
| 634 | unlock_buffer(bh); | 629 | unlock_buffer(bh); |
| 635 | if (!ext4_handle_valid(handle)) | 630 | if (!ext4_handle_valid(handle)) |
| 636 | error = ext4_handle_dirty_xattr_block(handle, inode, | 631 | error = ext4_handle_dirty_metadata(handle, inode, bh); |
| 637 | bh); | ||
| 638 | if (IS_SYNC(inode)) | 632 | if (IS_SYNC(inode)) |
| 639 | ext4_handle_sync(handle); | 633 | ext4_handle_sync(handle); |
| 640 | dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); | 634 | dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1)); |
| @@ -863,13 +857,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode, | |||
| 863 | ext4_xattr_cache_insert(ext4_mb_cache, | 857 | ext4_xattr_cache_insert(ext4_mb_cache, |
| 864 | bs->bh); | 858 | bs->bh); |
| 865 | } | 859 | } |
| 860 | ext4_xattr_block_csum_set(inode, bs->bh); | ||
| 866 | unlock_buffer(bs->bh); | 861 | unlock_buffer(bs->bh); |
| 867 | if (error == -EFSCORRUPTED) | 862 | if (error == -EFSCORRUPTED) |
| 868 | goto bad_block; | 863 | goto bad_block; |
| 869 | if (!error) | 864 | if (!error) |
| 870 | error = ext4_handle_dirty_xattr_block(handle, | 865 | error = ext4_handle_dirty_metadata(handle, |
| 871 | inode, | 866 | inode, |
| 872 | bs->bh); | 867 | bs->bh); |
| 873 | if (error) | 868 | if (error) |
| 874 | goto cleanup; | 869 | goto cleanup; |
| 875 | goto inserted; | 870 | goto inserted; |
| @@ -967,10 +962,11 @@ inserted: | |||
| 967 | ce->e_reusable = 0; | 962 | ce->e_reusable = 0; |
| 968 | ea_bdebug(new_bh, "reusing; refcount now=%d", | 963 | ea_bdebug(new_bh, "reusing; refcount now=%d", |
| 969 | ref); | 964 | ref); |
| 965 | ext4_xattr_block_csum_set(inode, new_bh); | ||
| 970 | unlock_buffer(new_bh); | 966 | unlock_buffer(new_bh); |
| 971 | error = ext4_handle_dirty_xattr_block(handle, | 967 | error = ext4_handle_dirty_metadata(handle, |
| 972 | inode, | 968 | inode, |
| 973 | new_bh); | 969 | new_bh); |
| 974 | if (error) | 970 | if (error) |
| 975 | goto cleanup_dquot; | 971 | goto cleanup_dquot; |
| 976 | } | 972 | } |
| @@ -1020,11 +1016,12 @@ getblk_failed: | |||
| 1020 | goto getblk_failed; | 1016 | goto getblk_failed; |
| 1021 | } | 1017 | } |
| 1022 | memcpy(new_bh->b_data, s->base, new_bh->b_size); | 1018 | memcpy(new_bh->b_data, s->base, new_bh->b_size); |
| 1019 | ext4_xattr_block_csum_set(inode, new_bh); | ||
| 1023 | set_buffer_uptodate(new_bh); | 1020 | set_buffer_uptodate(new_bh); |
| 1024 | unlock_buffer(new_bh); | 1021 | unlock_buffer(new_bh); |
| 1025 | ext4_xattr_cache_insert(ext4_mb_cache, new_bh); | 1022 | ext4_xattr_cache_insert(ext4_mb_cache, new_bh); |
| 1026 | error = ext4_handle_dirty_xattr_block(handle, | 1023 | error = ext4_handle_dirty_metadata(handle, inode, |
| 1027 | inode, new_bh); | 1024 | new_bh); |
| 1028 | if (error) | 1025 | if (error) |
| 1029 | goto cleanup; | 1026 | goto cleanup; |
| 1030 | } | 1027 | } |
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index a77df377e2e8..ee2d0a485fc3 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c | |||
| @@ -196,6 +196,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) | |||
| 196 | si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS); | 196 | si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS); |
| 197 | si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE; | 197 | si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE; |
| 198 | si->base_mem += NM_I(sbi)->nat_blocks / 8; | 198 | si->base_mem += NM_I(sbi)->nat_blocks / 8; |
| 199 | si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short); | ||
| 199 | 200 | ||
| 200 | get_cache: | 201 | get_cache: |
| 201 | si->cache_mem = 0; | 202 | si->cache_mem = 0; |
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 4650c9b85de7..8d5c62b07b28 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c | |||
| @@ -750,7 +750,7 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, | |||
| 750 | dentry_blk = page_address(page); | 750 | dentry_blk = page_address(page); |
| 751 | bit_pos = dentry - dentry_blk->dentry; | 751 | bit_pos = dentry - dentry_blk->dentry; |
| 752 | for (i = 0; i < slots; i++) | 752 | for (i = 0; i < slots; i++) |
| 753 | clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); | 753 | __clear_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); |
| 754 | 754 | ||
| 755 | /* Let's check and deallocate this dentry page */ | 755 | /* Let's check and deallocate this dentry page */ |
| 756 | bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, | 756 | bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index e849f83d6114..0a6e115562f6 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
| @@ -561,6 +561,8 @@ struct f2fs_nm_info { | |||
| 561 | struct mutex build_lock; /* lock for build free nids */ | 561 | struct mutex build_lock; /* lock for build free nids */ |
| 562 | unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; | 562 | unsigned char (*free_nid_bitmap)[NAT_ENTRY_BITMAP_SIZE]; |
| 563 | unsigned char *nat_block_bitmap; | 563 | unsigned char *nat_block_bitmap; |
| 564 | unsigned short *free_nid_count; /* free nid count of NAT block */ | ||
| 565 | spinlock_t free_nid_lock; /* protect updating of nid count */ | ||
| 564 | 566 | ||
| 565 | /* for checkpoint */ | 567 | /* for checkpoint */ |
| 566 | char *nat_bitmap; /* NAT bitmap pointer */ | 568 | char *nat_bitmap; /* NAT bitmap pointer */ |
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 94967171dee8..481aa8dc79f4 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
| @@ -338,9 +338,6 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, | |||
| 338 | set_nat_flag(e, IS_CHECKPOINTED, false); | 338 | set_nat_flag(e, IS_CHECKPOINTED, false); |
| 339 | __set_nat_cache_dirty(nm_i, e); | 339 | __set_nat_cache_dirty(nm_i, e); |
| 340 | 340 | ||
| 341 | if (enabled_nat_bits(sbi, NULL) && new_blkaddr == NEW_ADDR) | ||
| 342 | clear_bit_le(NAT_BLOCK_OFFSET(ni->nid), nm_i->empty_nat_bits); | ||
| 343 | |||
| 344 | /* update fsync_mark if its inode nat entry is still alive */ | 341 | /* update fsync_mark if its inode nat entry is still alive */ |
| 345 | if (ni->nid != ni->ino) | 342 | if (ni->nid != ni->ino) |
| 346 | e = __lookup_nat_cache(nm_i, ni->ino); | 343 | e = __lookup_nat_cache(nm_i, ni->ino); |
| @@ -1823,7 +1820,8 @@ static void remove_free_nid(struct f2fs_sb_info *sbi, nid_t nid) | |||
| 1823 | kmem_cache_free(free_nid_slab, i); | 1820 | kmem_cache_free(free_nid_slab, i); |
| 1824 | } | 1821 | } |
| 1825 | 1822 | ||
| 1826 | void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set) | 1823 | static void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, |
| 1824 | bool set, bool build, bool locked) | ||
| 1827 | { | 1825 | { |
| 1828 | struct f2fs_nm_info *nm_i = NM_I(sbi); | 1826 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
| 1829 | unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); | 1827 | unsigned int nat_ofs = NAT_BLOCK_OFFSET(nid); |
| @@ -1833,9 +1831,18 @@ void update_free_nid_bitmap(struct f2fs_sb_info *sbi, nid_t nid, bool set) | |||
| 1833 | return; | 1831 | return; |
| 1834 | 1832 | ||
| 1835 | if (set) | 1833 | if (set) |
| 1836 | set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); | 1834 | __set_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); |
| 1837 | else | 1835 | else |
| 1838 | clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); | 1836 | __clear_bit_le(nid_ofs, nm_i->free_nid_bitmap[nat_ofs]); |
| 1837 | |||
| 1838 | if (!locked) | ||
| 1839 | spin_lock(&nm_i->free_nid_lock); | ||
| 1840 | if (set) | ||
| 1841 | nm_i->free_nid_count[nat_ofs]++; | ||
| 1842 | else if (!build) | ||
| 1843 | nm_i->free_nid_count[nat_ofs]--; | ||
| 1844 | if (!locked) | ||
| 1845 | spin_unlock(&nm_i->free_nid_lock); | ||
| 1839 | } | 1846 | } |
| 1840 | 1847 | ||
| 1841 | static void scan_nat_page(struct f2fs_sb_info *sbi, | 1848 | static void scan_nat_page(struct f2fs_sb_info *sbi, |
| @@ -1847,7 +1854,10 @@ static void scan_nat_page(struct f2fs_sb_info *sbi, | |||
| 1847 | unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); | 1854 | unsigned int nat_ofs = NAT_BLOCK_OFFSET(start_nid); |
| 1848 | int i; | 1855 | int i; |
| 1849 | 1856 | ||
| 1850 | set_bit_le(nat_ofs, nm_i->nat_block_bitmap); | 1857 | if (test_bit_le(nat_ofs, nm_i->nat_block_bitmap)) |
| 1858 | return; | ||
| 1859 | |||
| 1860 | __set_bit_le(nat_ofs, nm_i->nat_block_bitmap); | ||
| 1851 | 1861 | ||
| 1852 | i = start_nid % NAT_ENTRY_PER_BLOCK; | 1862 | i = start_nid % NAT_ENTRY_PER_BLOCK; |
| 1853 | 1863 | ||
| @@ -1861,7 +1871,7 @@ static void scan_nat_page(struct f2fs_sb_info *sbi, | |||
| 1861 | f2fs_bug_on(sbi, blk_addr == NEW_ADDR); | 1871 | f2fs_bug_on(sbi, blk_addr == NEW_ADDR); |
| 1862 | if (blk_addr == NULL_ADDR) | 1872 | if (blk_addr == NULL_ADDR) |
| 1863 | freed = add_free_nid(sbi, start_nid, true); | 1873 | freed = add_free_nid(sbi, start_nid, true); |
| 1864 | update_free_nid_bitmap(sbi, start_nid, freed); | 1874 | update_free_nid_bitmap(sbi, start_nid, freed, true, false); |
| 1865 | } | 1875 | } |
| 1866 | } | 1876 | } |
| 1867 | 1877 | ||
| @@ -1877,6 +1887,8 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi) | |||
| 1877 | for (i = 0; i < nm_i->nat_blocks; i++) { | 1887 | for (i = 0; i < nm_i->nat_blocks; i++) { |
| 1878 | if (!test_bit_le(i, nm_i->nat_block_bitmap)) | 1888 | if (!test_bit_le(i, nm_i->nat_block_bitmap)) |
| 1879 | continue; | 1889 | continue; |
| 1890 | if (!nm_i->free_nid_count[i]) | ||
| 1891 | continue; | ||
| 1880 | for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { | 1892 | for (idx = 0; idx < NAT_ENTRY_PER_BLOCK; idx++) { |
| 1881 | nid_t nid; | 1893 | nid_t nid; |
| 1882 | 1894 | ||
| @@ -1907,58 +1919,6 @@ out: | |||
| 1907 | up_read(&nm_i->nat_tree_lock); | 1919 | up_read(&nm_i->nat_tree_lock); |
| 1908 | } | 1920 | } |
| 1909 | 1921 | ||
| 1910 | static int scan_nat_bits(struct f2fs_sb_info *sbi) | ||
| 1911 | { | ||
| 1912 | struct f2fs_nm_info *nm_i = NM_I(sbi); | ||
| 1913 | struct page *page; | ||
| 1914 | unsigned int i = 0; | ||
| 1915 | nid_t nid; | ||
| 1916 | |||
| 1917 | if (!enabled_nat_bits(sbi, NULL)) | ||
| 1918 | return -EAGAIN; | ||
| 1919 | |||
| 1920 | down_read(&nm_i->nat_tree_lock); | ||
| 1921 | check_empty: | ||
| 1922 | i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); | ||
| 1923 | if (i >= nm_i->nat_blocks) { | ||
| 1924 | i = 0; | ||
| 1925 | goto check_partial; | ||
| 1926 | } | ||
| 1927 | |||
| 1928 | for (nid = i * NAT_ENTRY_PER_BLOCK; nid < (i + 1) * NAT_ENTRY_PER_BLOCK; | ||
| 1929 | nid++) { | ||
| 1930 | if (unlikely(nid >= nm_i->max_nid)) | ||
| 1931 | break; | ||
| 1932 | add_free_nid(sbi, nid, true); | ||
| 1933 | } | ||
| 1934 | |||
| 1935 | if (nm_i->nid_cnt[FREE_NID_LIST] >= MAX_FREE_NIDS) | ||
| 1936 | goto out; | ||
| 1937 | i++; | ||
| 1938 | goto check_empty; | ||
| 1939 | |||
| 1940 | check_partial: | ||
| 1941 | i = find_next_zero_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); | ||
| 1942 | if (i >= nm_i->nat_blocks) { | ||
| 1943 | disable_nat_bits(sbi, true); | ||
| 1944 | up_read(&nm_i->nat_tree_lock); | ||
| 1945 | return -EINVAL; | ||
| 1946 | } | ||
| 1947 | |||
| 1948 | nid = i * NAT_ENTRY_PER_BLOCK; | ||
| 1949 | page = get_current_nat_page(sbi, nid); | ||
| 1950 | scan_nat_page(sbi, page, nid); | ||
| 1951 | f2fs_put_page(page, 1); | ||
| 1952 | |||
| 1953 | if (nm_i->nid_cnt[FREE_NID_LIST] < MAX_FREE_NIDS) { | ||
| 1954 | i++; | ||
| 1955 | goto check_partial; | ||
| 1956 | } | ||
| 1957 | out: | ||
| 1958 | up_read(&nm_i->nat_tree_lock); | ||
| 1959 | return 0; | ||
| 1960 | } | ||
| 1961 | |||
| 1962 | static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) | 1922 | static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) |
| 1963 | { | 1923 | { |
| 1964 | struct f2fs_nm_info *nm_i = NM_I(sbi); | 1924 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
| @@ -1980,21 +1940,6 @@ static void __build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount) | |||
| 1980 | 1940 | ||
| 1981 | if (nm_i->nid_cnt[FREE_NID_LIST]) | 1941 | if (nm_i->nid_cnt[FREE_NID_LIST]) |
| 1982 | return; | 1942 | return; |
| 1983 | |||
| 1984 | /* try to find free nids with nat_bits */ | ||
| 1985 | if (!scan_nat_bits(sbi) && nm_i->nid_cnt[FREE_NID_LIST]) | ||
| 1986 | return; | ||
| 1987 | } | ||
| 1988 | |||
| 1989 | /* find next valid candidate */ | ||
| 1990 | if (enabled_nat_bits(sbi, NULL)) { | ||
| 1991 | int idx = find_next_zero_bit_le(nm_i->full_nat_bits, | ||
| 1992 | nm_i->nat_blocks, 0); | ||
| 1993 | |||
| 1994 | if (idx >= nm_i->nat_blocks) | ||
| 1995 | set_sbi_flag(sbi, SBI_NEED_FSCK); | ||
| 1996 | else | ||
| 1997 | nid = idx * NAT_ENTRY_PER_BLOCK; | ||
| 1998 | } | 1943 | } |
| 1999 | 1944 | ||
| 2000 | /* readahead nat pages to be scanned */ | 1945 | /* readahead nat pages to be scanned */ |
| @@ -2081,7 +2026,7 @@ retry: | |||
| 2081 | __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); | 2026 | __insert_nid_to_list(sbi, i, ALLOC_NID_LIST, false); |
| 2082 | nm_i->available_nids--; | 2027 | nm_i->available_nids--; |
| 2083 | 2028 | ||
| 2084 | update_free_nid_bitmap(sbi, *nid, false); | 2029 | update_free_nid_bitmap(sbi, *nid, false, false, false); |
| 2085 | 2030 | ||
| 2086 | spin_unlock(&nm_i->nid_list_lock); | 2031 | spin_unlock(&nm_i->nid_list_lock); |
| 2087 | return true; | 2032 | return true; |
| @@ -2137,7 +2082,7 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) | |||
| 2137 | 2082 | ||
| 2138 | nm_i->available_nids++; | 2083 | nm_i->available_nids++; |
| 2139 | 2084 | ||
| 2140 | update_free_nid_bitmap(sbi, nid, true); | 2085 | update_free_nid_bitmap(sbi, nid, true, false, false); |
| 2141 | 2086 | ||
| 2142 | spin_unlock(&nm_i->nid_list_lock); | 2087 | spin_unlock(&nm_i->nid_list_lock); |
| 2143 | 2088 | ||
| @@ -2383,7 +2328,7 @@ add_out: | |||
| 2383 | list_add_tail(&nes->set_list, head); | 2328 | list_add_tail(&nes->set_list, head); |
| 2384 | } | 2329 | } |
| 2385 | 2330 | ||
| 2386 | void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, | 2331 | static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, |
| 2387 | struct page *page) | 2332 | struct page *page) |
| 2388 | { | 2333 | { |
| 2389 | struct f2fs_nm_info *nm_i = NM_I(sbi); | 2334 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
| @@ -2402,16 +2347,16 @@ void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid, | |||
| 2402 | valid++; | 2347 | valid++; |
| 2403 | } | 2348 | } |
| 2404 | if (valid == 0) { | 2349 | if (valid == 0) { |
| 2405 | set_bit_le(nat_index, nm_i->empty_nat_bits); | 2350 | __set_bit_le(nat_index, nm_i->empty_nat_bits); |
| 2406 | clear_bit_le(nat_index, nm_i->full_nat_bits); | 2351 | __clear_bit_le(nat_index, nm_i->full_nat_bits); |
| 2407 | return; | 2352 | return; |
| 2408 | } | 2353 | } |
| 2409 | 2354 | ||
| 2410 | clear_bit_le(nat_index, nm_i->empty_nat_bits); | 2355 | __clear_bit_le(nat_index, nm_i->empty_nat_bits); |
| 2411 | if (valid == NAT_ENTRY_PER_BLOCK) | 2356 | if (valid == NAT_ENTRY_PER_BLOCK) |
| 2412 | set_bit_le(nat_index, nm_i->full_nat_bits); | 2357 | __set_bit_le(nat_index, nm_i->full_nat_bits); |
| 2413 | else | 2358 | else |
| 2414 | clear_bit_le(nat_index, nm_i->full_nat_bits); | 2359 | __clear_bit_le(nat_index, nm_i->full_nat_bits); |
| 2415 | } | 2360 | } |
| 2416 | 2361 | ||
| 2417 | static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, | 2362 | static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, |
| @@ -2467,11 +2412,11 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, | |||
| 2467 | add_free_nid(sbi, nid, false); | 2412 | add_free_nid(sbi, nid, false); |
| 2468 | spin_lock(&NM_I(sbi)->nid_list_lock); | 2413 | spin_lock(&NM_I(sbi)->nid_list_lock); |
| 2469 | NM_I(sbi)->available_nids++; | 2414 | NM_I(sbi)->available_nids++; |
| 2470 | update_free_nid_bitmap(sbi, nid, true); | 2415 | update_free_nid_bitmap(sbi, nid, true, false, false); |
| 2471 | spin_unlock(&NM_I(sbi)->nid_list_lock); | 2416 | spin_unlock(&NM_I(sbi)->nid_list_lock); |
| 2472 | } else { | 2417 | } else { |
| 2473 | spin_lock(&NM_I(sbi)->nid_list_lock); | 2418 | spin_lock(&NM_I(sbi)->nid_list_lock); |
| 2474 | update_free_nid_bitmap(sbi, nid, false); | 2419 | update_free_nid_bitmap(sbi, nid, false, false, false); |
| 2475 | spin_unlock(&NM_I(sbi)->nid_list_lock); | 2420 | spin_unlock(&NM_I(sbi)->nid_list_lock); |
| 2476 | } | 2421 | } |
| 2477 | } | 2422 | } |
| @@ -2577,6 +2522,40 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi) | |||
| 2577 | return 0; | 2522 | return 0; |
| 2578 | } | 2523 | } |
| 2579 | 2524 | ||
| 2525 | inline void load_free_nid_bitmap(struct f2fs_sb_info *sbi) | ||
| 2526 | { | ||
| 2527 | struct f2fs_nm_info *nm_i = NM_I(sbi); | ||
| 2528 | unsigned int i = 0; | ||
| 2529 | nid_t nid, last_nid; | ||
| 2530 | |||
| 2531 | if (!enabled_nat_bits(sbi, NULL)) | ||
| 2532 | return; | ||
| 2533 | |||
| 2534 | for (i = 0; i < nm_i->nat_blocks; i++) { | ||
| 2535 | i = find_next_bit_le(nm_i->empty_nat_bits, nm_i->nat_blocks, i); | ||
| 2536 | if (i >= nm_i->nat_blocks) | ||
| 2537 | break; | ||
| 2538 | |||
| 2539 | __set_bit_le(i, nm_i->nat_block_bitmap); | ||
| 2540 | |||
| 2541 | nid = i * NAT_ENTRY_PER_BLOCK; | ||
| 2542 | last_nid = (i + 1) * NAT_ENTRY_PER_BLOCK; | ||
| 2543 | |||
| 2544 | spin_lock(&nm_i->free_nid_lock); | ||
| 2545 | for (; nid < last_nid; nid++) | ||
| 2546 | update_free_nid_bitmap(sbi, nid, true, true, true); | ||
| 2547 | spin_unlock(&nm_i->free_nid_lock); | ||
| 2548 | } | ||
| 2549 | |||
| 2550 | for (i = 0; i < nm_i->nat_blocks; i++) { | ||
| 2551 | i = find_next_bit_le(nm_i->full_nat_bits, nm_i->nat_blocks, i); | ||
| 2552 | if (i >= nm_i->nat_blocks) | ||
| 2553 | break; | ||
| 2554 | |||
| 2555 | __set_bit_le(i, nm_i->nat_block_bitmap); | ||
| 2556 | } | ||
| 2557 | } | ||
| 2558 | |||
| 2580 | static int init_node_manager(struct f2fs_sb_info *sbi) | 2559 | static int init_node_manager(struct f2fs_sb_info *sbi) |
| 2581 | { | 2560 | { |
| 2582 | struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); | 2561 | struct f2fs_super_block *sb_raw = F2FS_RAW_SUPER(sbi); |
| @@ -2638,7 +2617,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi) | |||
| 2638 | return 0; | 2617 | return 0; |
| 2639 | } | 2618 | } |
| 2640 | 2619 | ||
| 2641 | int init_free_nid_cache(struct f2fs_sb_info *sbi) | 2620 | static int init_free_nid_cache(struct f2fs_sb_info *sbi) |
| 2642 | { | 2621 | { |
| 2643 | struct f2fs_nm_info *nm_i = NM_I(sbi); | 2622 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
| 2644 | 2623 | ||
| @@ -2651,6 +2630,14 @@ int init_free_nid_cache(struct f2fs_sb_info *sbi) | |||
| 2651 | GFP_KERNEL); | 2630 | GFP_KERNEL); |
| 2652 | if (!nm_i->nat_block_bitmap) | 2631 | if (!nm_i->nat_block_bitmap) |
| 2653 | return -ENOMEM; | 2632 | return -ENOMEM; |
| 2633 | |||
| 2634 | nm_i->free_nid_count = f2fs_kvzalloc(nm_i->nat_blocks * | ||
| 2635 | sizeof(unsigned short), GFP_KERNEL); | ||
| 2636 | if (!nm_i->free_nid_count) | ||
| 2637 | return -ENOMEM; | ||
| 2638 | |||
| 2639 | spin_lock_init(&nm_i->free_nid_lock); | ||
| 2640 | |||
| 2654 | return 0; | 2641 | return 0; |
| 2655 | } | 2642 | } |
| 2656 | 2643 | ||
| @@ -2670,6 +2657,9 @@ int build_node_manager(struct f2fs_sb_info *sbi) | |||
| 2670 | if (err) | 2657 | if (err) |
| 2671 | return err; | 2658 | return err; |
| 2672 | 2659 | ||
| 2660 | /* load free nid status from nat_bits table */ | ||
| 2661 | load_free_nid_bitmap(sbi); | ||
| 2662 | |||
| 2673 | build_free_nids(sbi, true, true); | 2663 | build_free_nids(sbi, true, true); |
| 2674 | return 0; | 2664 | return 0; |
| 2675 | } | 2665 | } |
| @@ -2730,6 +2720,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) | |||
| 2730 | 2720 | ||
| 2731 | kvfree(nm_i->nat_block_bitmap); | 2721 | kvfree(nm_i->nat_block_bitmap); |
| 2732 | kvfree(nm_i->free_nid_bitmap); | 2722 | kvfree(nm_i->free_nid_bitmap); |
| 2723 | kvfree(nm_i->free_nid_count); | ||
| 2733 | 2724 | ||
| 2734 | kfree(nm_i->nat_bitmap); | 2725 | kfree(nm_i->nat_bitmap); |
| 2735 | kfree(nm_i->nat_bits); | 2726 | kfree(nm_i->nat_bits); |
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 4bd7a8b19332..29ef7088c558 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
| @@ -1163,6 +1163,12 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) | |||
| 1163 | if (f2fs_discard_en(sbi) && | 1163 | if (f2fs_discard_en(sbi) && |
| 1164 | !f2fs_test_and_set_bit(offset, se->discard_map)) | 1164 | !f2fs_test_and_set_bit(offset, se->discard_map)) |
| 1165 | sbi->discard_blks--; | 1165 | sbi->discard_blks--; |
| 1166 | |||
| 1167 | /* don't overwrite by SSR to keep node chain */ | ||
| 1168 | if (se->type == CURSEG_WARM_NODE) { | ||
| 1169 | if (!f2fs_test_and_set_bit(offset, se->ckpt_valid_map)) | ||
| 1170 | se->ckpt_valid_blocks++; | ||
| 1171 | } | ||
| 1166 | } else { | 1172 | } else { |
| 1167 | if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { | 1173 | if (!f2fs_test_and_clear_bit(offset, se->cur_valid_map)) { |
| 1168 | #ifdef CONFIG_F2FS_CHECK_FS | 1174 | #ifdef CONFIG_F2FS_CHECK_FS |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index a1a359bfcc9c..5adc2fb62b0f 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
| @@ -1125,10 +1125,8 @@ static journal_t *journal_init_common(struct block_device *bdev, | |||
| 1125 | 1125 | ||
| 1126 | /* Set up a default-sized revoke table for the new mount. */ | 1126 | /* Set up a default-sized revoke table for the new mount. */ |
| 1127 | err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); | 1127 | err = jbd2_journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH); |
| 1128 | if (err) { | 1128 | if (err) |
| 1129 | kfree(journal); | 1129 | goto err_cleanup; |
| 1130 | return NULL; | ||
| 1131 | } | ||
| 1132 | 1130 | ||
| 1133 | spin_lock_init(&journal->j_history_lock); | 1131 | spin_lock_init(&journal->j_history_lock); |
| 1134 | 1132 | ||
| @@ -1145,23 +1143,25 @@ static journal_t *journal_init_common(struct block_device *bdev, | |||
| 1145 | journal->j_wbufsize = n; | 1143 | journal->j_wbufsize = n; |
| 1146 | journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *), | 1144 | journal->j_wbuf = kmalloc_array(n, sizeof(struct buffer_head *), |
| 1147 | GFP_KERNEL); | 1145 | GFP_KERNEL); |
| 1148 | if (!journal->j_wbuf) { | 1146 | if (!journal->j_wbuf) |
| 1149 | kfree(journal); | 1147 | goto err_cleanup; |
| 1150 | return NULL; | ||
| 1151 | } | ||
| 1152 | 1148 | ||
| 1153 | bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize); | 1149 | bh = getblk_unmovable(journal->j_dev, start, journal->j_blocksize); |
| 1154 | if (!bh) { | 1150 | if (!bh) { |
| 1155 | pr_err("%s: Cannot get buffer for journal superblock\n", | 1151 | pr_err("%s: Cannot get buffer for journal superblock\n", |
| 1156 | __func__); | 1152 | __func__); |
| 1157 | kfree(journal->j_wbuf); | 1153 | goto err_cleanup; |
| 1158 | kfree(journal); | ||
| 1159 | return NULL; | ||
| 1160 | } | 1154 | } |
| 1161 | journal->j_sb_buffer = bh; | 1155 | journal->j_sb_buffer = bh; |
| 1162 | journal->j_superblock = (journal_superblock_t *)bh->b_data; | 1156 | journal->j_superblock = (journal_superblock_t *)bh->b_data; |
| 1163 | 1157 | ||
| 1164 | return journal; | 1158 | return journal; |
| 1159 | |||
| 1160 | err_cleanup: | ||
| 1161 | kfree(journal->j_wbuf); | ||
| 1162 | jbd2_journal_destroy_revoke(journal); | ||
| 1163 | kfree(journal); | ||
| 1164 | return NULL; | ||
| 1165 | } | 1165 | } |
| 1166 | 1166 | ||
| 1167 | /* jbd2_journal_init_dev and jbd2_journal_init_inode: | 1167 | /* jbd2_journal_init_dev and jbd2_journal_init_inode: |
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c index cfc38b552118..f9aefcda5854 100644 --- a/fs/jbd2/revoke.c +++ b/fs/jbd2/revoke.c | |||
| @@ -280,6 +280,7 @@ int jbd2_journal_init_revoke(journal_t *journal, int hash_size) | |||
| 280 | 280 | ||
| 281 | fail1: | 281 | fail1: |
| 282 | jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); | 282 | jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]); |
| 283 | journal->j_revoke_table[0] = NULL; | ||
| 283 | fail0: | 284 | fail0: |
| 284 | return -ENOMEM; | 285 | return -ENOMEM; |
| 285 | } | 286 | } |
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 8e4dc7ab584c..ac2dfe0c5a9c 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c | |||
| @@ -809,7 +809,8 @@ void kernfs_drain_open_files(struct kernfs_node *kn) | |||
| 809 | if (kn->flags & KERNFS_HAS_MMAP) | 809 | if (kn->flags & KERNFS_HAS_MMAP) |
| 810 | unmap_mapping_range(inode->i_mapping, 0, 0, 1); | 810 | unmap_mapping_range(inode->i_mapping, 0, 0, 1); |
| 811 | 811 | ||
| 812 | kernfs_release_file(kn, of); | 812 | if (kn->flags & KERNFS_HAS_RELEASE) |
| 813 | kernfs_release_file(kn, of); | ||
| 813 | } | 814 | } |
| 814 | 815 | ||
| 815 | mutex_unlock(&kernfs_open_file_mutex); | 816 | mutex_unlock(&kernfs_open_file_mutex); |
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index bb79972dc638..773774531aff 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
| @@ -232,12 +232,12 @@ static struct svc_serv_ops nfs41_cb_sv_ops = { | |||
| 232 | .svo_module = THIS_MODULE, | 232 | .svo_module = THIS_MODULE, |
| 233 | }; | 233 | }; |
| 234 | 234 | ||
| 235 | struct svc_serv_ops *nfs4_cb_sv_ops[] = { | 235 | static struct svc_serv_ops *nfs4_cb_sv_ops[] = { |
| 236 | [0] = &nfs40_cb_sv_ops, | 236 | [0] = &nfs40_cb_sv_ops, |
| 237 | [1] = &nfs41_cb_sv_ops, | 237 | [1] = &nfs41_cb_sv_ops, |
| 238 | }; | 238 | }; |
| 239 | #else | 239 | #else |
| 240 | struct svc_serv_ops *nfs4_cb_sv_ops[] = { | 240 | static struct svc_serv_ops *nfs4_cb_sv_ops[] = { |
| 241 | [0] = &nfs40_cb_sv_ops, | 241 | [0] = &nfs40_cb_sv_ops, |
| 242 | [1] = NULL, | 242 | [1] = NULL, |
| 243 | }; | 243 | }; |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 91a8d610ba0f..390ada8741bc 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
| @@ -325,10 +325,33 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat | |||
| 325 | return NULL; | 325 | return NULL; |
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | static bool nfs_client_init_is_complete(const struct nfs_client *clp) | 328 | /* |
| 329 | * Return true if @clp is done initializing, false if still working on it. | ||
| 330 | * | ||
| 331 | * Use nfs_client_init_status to check if it was successful. | ||
| 332 | */ | ||
| 333 | bool nfs_client_init_is_complete(const struct nfs_client *clp) | ||
| 329 | { | 334 | { |
| 330 | return clp->cl_cons_state <= NFS_CS_READY; | 335 | return clp->cl_cons_state <= NFS_CS_READY; |
| 331 | } | 336 | } |
| 337 | EXPORT_SYMBOL_GPL(nfs_client_init_is_complete); | ||
| 338 | |||
| 339 | /* | ||
| 340 | * Return 0 if @clp was successfully initialized, -errno otherwise. | ||
| 341 | * | ||
| 342 | * This must be called *after* nfs_client_init_is_complete() returns true, | ||
| 343 | * otherwise it will pop WARN_ON_ONCE and return -EINVAL | ||
| 344 | */ | ||
| 345 | int nfs_client_init_status(const struct nfs_client *clp) | ||
| 346 | { | ||
| 347 | /* called without checking nfs_client_init_is_complete */ | ||
| 348 | if (clp->cl_cons_state > NFS_CS_READY) { | ||
| 349 | WARN_ON_ONCE(1); | ||
| 350 | return -EINVAL; | ||
| 351 | } | ||
| 352 | return clp->cl_cons_state; | ||
| 353 | } | ||
| 354 | EXPORT_SYMBOL_GPL(nfs_client_init_status); | ||
| 332 | 355 | ||
| 333 | int nfs_wait_client_init_complete(const struct nfs_client *clp) | 356 | int nfs_wait_client_init_complete(const struct nfs_client *clp) |
| 334 | { | 357 | { |
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c index f956ca20a8a3..d913e818858f 100644 --- a/fs/nfs/filelayout/filelayoutdev.c +++ b/fs/nfs/filelayout/filelayoutdev.c | |||
| @@ -266,6 +266,7 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) | |||
| 266 | struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); | 266 | struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg); |
| 267 | struct nfs4_pnfs_ds *ret = ds; | 267 | struct nfs4_pnfs_ds *ret = ds; |
| 268 | struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); | 268 | struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); |
| 269 | int status; | ||
| 269 | 270 | ||
| 270 | if (ds == NULL) { | 271 | if (ds == NULL) { |
| 271 | printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", | 272 | printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", |
| @@ -277,9 +278,14 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) | |||
| 277 | if (ds->ds_clp) | 278 | if (ds->ds_clp) |
| 278 | goto out_test_devid; | 279 | goto out_test_devid; |
| 279 | 280 | ||
| 280 | nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, | 281 | status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, |
| 281 | dataserver_retrans, 4, | 282 | dataserver_retrans, 4, |
| 282 | s->nfs_client->cl_minorversion); | 283 | s->nfs_client->cl_minorversion); |
| 284 | if (status) { | ||
| 285 | nfs4_mark_deviceid_unavailable(devid); | ||
| 286 | ret = NULL; | ||
| 287 | goto out; | ||
| 288 | } | ||
| 283 | 289 | ||
| 284 | out_test_devid: | 290 | out_test_devid: |
| 285 | if (ret->ds_clp == NULL || | 291 | if (ret->ds_clp == NULL || |
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h index f4f39b0ab09b..98b34c9b0564 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.h +++ b/fs/nfs/flexfilelayout/flexfilelayout.h | |||
| @@ -175,7 +175,19 @@ ff_layout_no_read_on_rw(struct pnfs_layout_segment *lseg) | |||
| 175 | static inline bool | 175 | static inline bool |
| 176 | ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node) | 176 | ff_layout_test_devid_unavailable(struct nfs4_deviceid_node *node) |
| 177 | { | 177 | { |
| 178 | return nfs4_test_deviceid_unavailable(node); | 178 | /* |
| 179 | * Flexfiles should never mark a DS unavailable, but if it does | ||
| 180 | * print a (ratelimited) warning as this can affect performance. | ||
| 181 | */ | ||
| 182 | if (nfs4_test_deviceid_unavailable(node)) { | ||
| 183 | u32 *p = (u32 *)node->deviceid.data; | ||
| 184 | |||
| 185 | pr_warn_ratelimited("NFS: flexfiles layout referencing an " | ||
| 186 | "unavailable device [%x%x%x%x]\n", | ||
| 187 | p[0], p[1], p[2], p[3]); | ||
| 188 | return true; | ||
| 189 | } | ||
| 190 | return false; | ||
| 179 | } | 191 | } |
| 180 | 192 | ||
| 181 | static inline int | 193 | static inline int |
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c index e5a6f248697b..85fde93dff77 100644 --- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c +++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c | |||
| @@ -384,6 +384,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, | |||
| 384 | struct inode *ino = lseg->pls_layout->plh_inode; | 384 | struct inode *ino = lseg->pls_layout->plh_inode; |
| 385 | struct nfs_server *s = NFS_SERVER(ino); | 385 | struct nfs_server *s = NFS_SERVER(ino); |
| 386 | unsigned int max_payload; | 386 | unsigned int max_payload; |
| 387 | int status; | ||
| 387 | 388 | ||
| 388 | if (!ff_layout_mirror_valid(lseg, mirror, true)) { | 389 | if (!ff_layout_mirror_valid(lseg, mirror, true)) { |
| 389 | pr_err_ratelimited("NFS: %s: No data server for offset index %d\n", | 390 | pr_err_ratelimited("NFS: %s: No data server for offset index %d\n", |
| @@ -404,7 +405,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, | |||
| 404 | /* FIXME: For now we assume the server sent only one version of NFS | 405 | /* FIXME: For now we assume the server sent only one version of NFS |
| 405 | * to use for the DS. | 406 | * to use for the DS. |
| 406 | */ | 407 | */ |
| 407 | nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, | 408 | status = nfs4_pnfs_ds_connect(s, ds, devid, dataserver_timeo, |
| 408 | dataserver_retrans, | 409 | dataserver_retrans, |
| 409 | mirror->mirror_ds->ds_versions[0].version, | 410 | mirror->mirror_ds->ds_versions[0].version, |
| 410 | mirror->mirror_ds->ds_versions[0].minor_version); | 411 | mirror->mirror_ds->ds_versions[0].minor_version); |
| @@ -420,11 +421,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx, | |||
| 420 | mirror->mirror_ds->ds_versions[0].wsize = max_payload; | 421 | mirror->mirror_ds->ds_versions[0].wsize = max_payload; |
| 421 | goto out; | 422 | goto out; |
| 422 | } | 423 | } |
| 424 | out_fail: | ||
| 423 | ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), | 425 | ff_layout_track_ds_error(FF_LAYOUT_FROM_HDR(lseg->pls_layout), |
| 424 | mirror, lseg->pls_range.offset, | 426 | mirror, lseg->pls_range.offset, |
| 425 | lseg->pls_range.length, NFS4ERR_NXIO, | 427 | lseg->pls_range.length, NFS4ERR_NXIO, |
| 426 | OP_ILLEGAL, GFP_NOIO); | 428 | OP_ILLEGAL, GFP_NOIO); |
| 427 | out_fail: | ||
| 428 | if (fail_return || !ff_layout_has_available_ds(lseg)) | 429 | if (fail_return || !ff_layout_has_available_ds(lseg)) |
| 429 | pnfs_error_mark_layout_for_return(ino, lseg); | 430 | pnfs_error_mark_layout_for_return(ino, lseg); |
| 430 | ds = NULL; | 431 | ds = NULL; |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 09ca5095c04e..7b38fedb7e03 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
| @@ -186,6 +186,8 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *, | |||
| 186 | struct nfs_fh *, | 186 | struct nfs_fh *, |
| 187 | struct nfs_fattr *, | 187 | struct nfs_fattr *, |
| 188 | rpc_authflavor_t); | 188 | rpc_authflavor_t); |
| 189 | extern bool nfs_client_init_is_complete(const struct nfs_client *clp); | ||
| 190 | extern int nfs_client_init_status(const struct nfs_client *clp); | ||
| 189 | extern int nfs_wait_client_init_complete(const struct nfs_client *clp); | 191 | extern int nfs_wait_client_init_complete(const struct nfs_client *clp); |
| 190 | extern void nfs_mark_client_ready(struct nfs_client *clp, int state); | 192 | extern void nfs_mark_client_ready(struct nfs_client *clp, int state); |
| 191 | extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv, | 193 | extern struct nfs_client *nfs4_set_ds_client(struct nfs_server *mds_srv, |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 5ae9d64ea08b..8346ccbf2d52 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
| @@ -1023,9 +1023,9 @@ static void nfs4_session_set_rwsize(struct nfs_server *server) | |||
| 1023 | server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead; | 1023 | server_resp_sz = sess->fc_attrs.max_resp_sz - nfs41_maxread_overhead; |
| 1024 | server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead; | 1024 | server_rqst_sz = sess->fc_attrs.max_rqst_sz - nfs41_maxwrite_overhead; |
| 1025 | 1025 | ||
| 1026 | if (server->rsize > server_resp_sz) | 1026 | if (!server->rsize || server->rsize > server_resp_sz) |
| 1027 | server->rsize = server_resp_sz; | 1027 | server->rsize = server_resp_sz; |
| 1028 | if (server->wsize > server_rqst_sz) | 1028 | if (!server->wsize || server->wsize > server_rqst_sz) |
| 1029 | server->wsize = server_rqst_sz; | 1029 | server->wsize = server_rqst_sz; |
| 1030 | #endif /* CONFIG_NFS_V4_1 */ | 1030 | #endif /* CONFIG_NFS_V4_1 */ |
| 1031 | } | 1031 | } |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 1b183686c6d4..c780d98035cc 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -2258,8 +2258,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred, | |||
| 2258 | if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) | 2258 | if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0) |
| 2259 | return 0; | 2259 | return 0; |
| 2260 | 2260 | ||
| 2261 | /* even though OPEN succeeded, access is denied. Close the file */ | ||
| 2262 | nfs4_close_state(state, fmode); | ||
| 2263 | return -EACCES; | 2261 | return -EACCES; |
| 2264 | } | 2262 | } |
| 2265 | 2263 | ||
| @@ -7427,11 +7425,11 @@ static void nfs4_exchange_id_release(void *data) | |||
| 7427 | struct nfs41_exchange_id_data *cdata = | 7425 | struct nfs41_exchange_id_data *cdata = |
| 7428 | (struct nfs41_exchange_id_data *)data; | 7426 | (struct nfs41_exchange_id_data *)data; |
| 7429 | 7427 | ||
| 7430 | nfs_put_client(cdata->args.client); | ||
| 7431 | if (cdata->xprt) { | 7428 | if (cdata->xprt) { |
| 7432 | xprt_put(cdata->xprt); | 7429 | xprt_put(cdata->xprt); |
| 7433 | rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient); | 7430 | rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient); |
| 7434 | } | 7431 | } |
| 7432 | nfs_put_client(cdata->args.client); | ||
| 7435 | kfree(cdata->res.impl_id); | 7433 | kfree(cdata->res.impl_id); |
| 7436 | kfree(cdata->res.server_scope); | 7434 | kfree(cdata->res.server_scope); |
| 7437 | kfree(cdata->res.server_owner); | 7435 | kfree(cdata->res.server_owner); |
| @@ -7538,10 +7536,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred, | |||
| 7538 | task_setup_data.callback_data = calldata; | 7536 | task_setup_data.callback_data = calldata; |
| 7539 | 7537 | ||
| 7540 | task = rpc_run_task(&task_setup_data); | 7538 | task = rpc_run_task(&task_setup_data); |
| 7541 | if (IS_ERR(task)) { | 7539 | if (IS_ERR(task)) |
| 7542 | status = PTR_ERR(task); | 7540 | return PTR_ERR(task); |
| 7543 | goto out_impl_id; | ||
| 7544 | } | ||
| 7545 | 7541 | ||
| 7546 | if (!xprt) { | 7542 | if (!xprt) { |
| 7547 | status = rpc_wait_for_completion_task(task); | 7543 | status = rpc_wait_for_completion_task(task); |
| @@ -7569,6 +7565,7 @@ out_server_owner: | |||
| 7569 | kfree(calldata->res.server_owner); | 7565 | kfree(calldata->res.server_owner); |
| 7570 | out_calldata: | 7566 | out_calldata: |
| 7571 | kfree(calldata); | 7567 | kfree(calldata); |
| 7568 | nfs_put_client(clp); | ||
| 7572 | goto out; | 7569 | goto out; |
| 7573 | } | 7570 | } |
| 7574 | 7571 | ||
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index f0369e362753..80ce289eea05 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
| @@ -3942,7 +3942,7 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, | |||
| 3942 | if (len <= 0) | 3942 | if (len <= 0) |
| 3943 | goto out; | 3943 | goto out; |
| 3944 | dprintk("%s: name=%s\n", __func__, group_name->data); | 3944 | dprintk("%s: name=%s\n", __func__, group_name->data); |
| 3945 | return NFS_ATTR_FATTR_OWNER_NAME; | 3945 | return NFS_ATTR_FATTR_GROUP_NAME; |
| 3946 | } else { | 3946 | } else { |
| 3947 | len = xdr_stream_decode_opaque_inline(xdr, (void **)&p, | 3947 | len = xdr_stream_decode_opaque_inline(xdr, (void **)&p, |
| 3948 | XDR_MAX_NETOBJ); | 3948 | XDR_MAX_NETOBJ); |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 63f77b49a586..590e1e35781f 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
| @@ -367,7 +367,7 @@ void nfs4_pnfs_ds_put(struct nfs4_pnfs_ds *ds); | |||
| 367 | struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, | 367 | struct nfs4_pnfs_ds *nfs4_pnfs_ds_add(struct list_head *dsaddrs, |
| 368 | gfp_t gfp_flags); | 368 | gfp_t gfp_flags); |
| 369 | void nfs4_pnfs_v3_ds_connect_unload(void); | 369 | void nfs4_pnfs_v3_ds_connect_unload(void); |
| 370 | void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, | 370 | int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, |
| 371 | struct nfs4_deviceid_node *devid, unsigned int timeo, | 371 | struct nfs4_deviceid_node *devid, unsigned int timeo, |
| 372 | unsigned int retrans, u32 version, u32 minor_version); | 372 | unsigned int retrans, u32 version, u32 minor_version); |
| 373 | struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, | 373 | struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, |
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index 9414b492439f..7250b95549ec 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c | |||
| @@ -745,15 +745,17 @@ out: | |||
| 745 | /* | 745 | /* |
| 746 | * Create an rpc connection to the nfs4_pnfs_ds data server. | 746 | * Create an rpc connection to the nfs4_pnfs_ds data server. |
| 747 | * Currently only supports IPv4 and IPv6 addresses. | 747 | * Currently only supports IPv4 and IPv6 addresses. |
| 748 | * If connection fails, make devid unavailable. | 748 | * If connection fails, make devid unavailable and return a -errno. |
| 749 | */ | 749 | */ |
| 750 | void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, | 750 | int nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, |
| 751 | struct nfs4_deviceid_node *devid, unsigned int timeo, | 751 | struct nfs4_deviceid_node *devid, unsigned int timeo, |
| 752 | unsigned int retrans, u32 version, u32 minor_version) | 752 | unsigned int retrans, u32 version, u32 minor_version) |
| 753 | { | 753 | { |
| 754 | if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { | 754 | int err; |
| 755 | int err = 0; | ||
| 756 | 755 | ||
| 756 | again: | ||
| 757 | err = 0; | ||
| 758 | if (test_and_set_bit(NFS4DS_CONNECTING, &ds->ds_state) == 0) { | ||
| 757 | if (version == 3) { | 759 | if (version == 3) { |
| 758 | err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, | 760 | err = _nfs4_pnfs_v3_ds_connect(mds_srv, ds, timeo, |
| 759 | retrans); | 761 | retrans); |
| @@ -766,12 +768,29 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, | |||
| 766 | err = -EPROTONOSUPPORT; | 768 | err = -EPROTONOSUPPORT; |
| 767 | } | 769 | } |
| 768 | 770 | ||
| 769 | if (err) | ||
| 770 | nfs4_mark_deviceid_unavailable(devid); | ||
| 771 | nfs4_clear_ds_conn_bit(ds); | 771 | nfs4_clear_ds_conn_bit(ds); |
| 772 | } else { | 772 | } else { |
| 773 | nfs4_wait_ds_connect(ds); | 773 | nfs4_wait_ds_connect(ds); |
| 774 | |||
| 775 | /* what was waited on didn't connect AND didn't mark unavail */ | ||
| 776 | if (!ds->ds_clp && !nfs4_test_deviceid_unavailable(devid)) | ||
| 777 | goto again; | ||
| 774 | } | 778 | } |
| 779 | |||
| 780 | /* | ||
| 781 | * At this point the ds->ds_clp should be ready, but it might have | ||
| 782 | * hit an error. | ||
| 783 | */ | ||
| 784 | if (!err) { | ||
| 785 | if (!ds->ds_clp || !nfs_client_init_is_complete(ds->ds_clp)) { | ||
| 786 | WARN_ON_ONCE(ds->ds_clp || | ||
| 787 | !nfs4_test_deviceid_unavailable(devid)); | ||
| 788 | return -EINVAL; | ||
| 789 | } | ||
| 790 | err = nfs_client_init_status(ds->ds_clp); | ||
| 791 | } | ||
| 792 | |||
| 793 | return err; | ||
| 775 | } | 794 | } |
| 776 | EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect); | 795 | EXPORT_SYMBOL_GPL(nfs4_pnfs_ds_connect); |
| 777 | 796 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index e75b056f46f4..abb2c8a3be42 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -1784,7 +1784,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) | |||
| 1784 | (long long)req_offset(req)); | 1784 | (long long)req_offset(req)); |
| 1785 | if (status < 0) { | 1785 | if (status < 0) { |
| 1786 | nfs_context_set_write_error(req->wb_context, status); | 1786 | nfs_context_set_write_error(req->wb_context, status); |
| 1787 | nfs_inode_remove_request(req); | 1787 | if (req->wb_page) |
| 1788 | nfs_inode_remove_request(req); | ||
| 1788 | dprintk_cont(", error = %d\n", status); | 1789 | dprintk_cont(", error = %d\n", status); |
| 1789 | goto next; | 1790 | goto next; |
| 1790 | } | 1791 | } |
| @@ -1793,7 +1794,8 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) | |||
| 1793 | * returned by the server against all stored verfs. */ | 1794 | * returned by the server against all stored verfs. */ |
| 1794 | if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { | 1795 | if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { |
| 1795 | /* We have a match */ | 1796 | /* We have a match */ |
| 1796 | nfs_inode_remove_request(req); | 1797 | if (req->wb_page) |
| 1798 | nfs_inode_remove_request(req); | ||
| 1797 | dprintk_cont(" OK\n"); | 1799 | dprintk_cont(" OK\n"); |
| 1798 | goto next; | 1800 | goto next; |
| 1799 | } | 1801 | } |
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h index d04547fcf274..eb00bc133bca 100644 --- a/fs/xfs/libxfs/xfs_dir2_priv.h +++ b/fs/xfs/libxfs/xfs_dir2_priv.h | |||
| @@ -125,6 +125,8 @@ extern int xfs_dir2_sf_create(struct xfs_da_args *args, xfs_ino_t pino); | |||
| 125 | extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); | 125 | extern int xfs_dir2_sf_lookup(struct xfs_da_args *args); |
| 126 | extern int xfs_dir2_sf_removename(struct xfs_da_args *args); | 126 | extern int xfs_dir2_sf_removename(struct xfs_da_args *args); |
| 127 | extern int xfs_dir2_sf_replace(struct xfs_da_args *args); | 127 | extern int xfs_dir2_sf_replace(struct xfs_da_args *args); |
| 128 | extern int xfs_dir2_sf_verify(struct xfs_mount *mp, struct xfs_dir2_sf_hdr *sfp, | ||
| 129 | int size); | ||
| 128 | 130 | ||
| 129 | /* xfs_dir2_readdir.c */ | 131 | /* xfs_dir2_readdir.c */ |
| 130 | extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, | 132 | extern int xfs_readdir(struct xfs_inode *dp, struct dir_context *ctx, |
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c index c6809ff41197..96b45cd6c63f 100644 --- a/fs/xfs/libxfs/xfs_dir2_sf.c +++ b/fs/xfs/libxfs/xfs_dir2_sf.c | |||
| @@ -629,6 +629,93 @@ xfs_dir2_sf_check( | |||
| 629 | } | 629 | } |
| 630 | #endif /* DEBUG */ | 630 | #endif /* DEBUG */ |
| 631 | 631 | ||
| 632 | /* Verify the consistency of an inline directory. */ | ||
| 633 | int | ||
| 634 | xfs_dir2_sf_verify( | ||
| 635 | struct xfs_mount *mp, | ||
| 636 | struct xfs_dir2_sf_hdr *sfp, | ||
| 637 | int size) | ||
| 638 | { | ||
| 639 | struct xfs_dir2_sf_entry *sfep; | ||
| 640 | struct xfs_dir2_sf_entry *next_sfep; | ||
| 641 | char *endp; | ||
| 642 | const struct xfs_dir_ops *dops; | ||
| 643 | xfs_ino_t ino; | ||
| 644 | int i; | ||
| 645 | int i8count; | ||
| 646 | int offset; | ||
| 647 | __uint8_t filetype; | ||
| 648 | |||
| 649 | dops = xfs_dir_get_ops(mp, NULL); | ||
| 650 | |||
| 651 | /* | ||
| 652 | * Give up if the directory is way too short. | ||
| 653 | */ | ||
| 654 | XFS_WANT_CORRUPTED_RETURN(mp, size > | ||
| 655 | offsetof(struct xfs_dir2_sf_hdr, parent)); | ||
| 656 | XFS_WANT_CORRUPTED_RETURN(mp, size >= | ||
| 657 | xfs_dir2_sf_hdr_size(sfp->i8count)); | ||
| 658 | |||
| 659 | endp = (char *)sfp + size; | ||
| 660 | |||
| 661 | /* Check .. entry */ | ||
| 662 | ino = dops->sf_get_parent_ino(sfp); | ||
| 663 | i8count = ino > XFS_DIR2_MAX_SHORT_INUM; | ||
| 664 | XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); | ||
| 665 | offset = dops->data_first_offset; | ||
| 666 | |||
| 667 | /* Check all reported entries */ | ||
| 668 | sfep = xfs_dir2_sf_firstentry(sfp); | ||
| 669 | for (i = 0; i < sfp->count; i++) { | ||
| 670 | /* | ||
| 671 | * struct xfs_dir2_sf_entry has a variable length. | ||
| 672 | * Check the fixed-offset parts of the structure are | ||
| 673 | * within the data buffer. | ||
| 674 | */ | ||
| 675 | XFS_WANT_CORRUPTED_RETURN(mp, | ||
| 676 | ((char *)sfep + sizeof(*sfep)) < endp); | ||
| 677 | |||
| 678 | /* Don't allow names with known bad length. */ | ||
| 679 | XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen > 0); | ||
| 680 | XFS_WANT_CORRUPTED_RETURN(mp, sfep->namelen < MAXNAMELEN); | ||
| 681 | |||
| 682 | /* | ||
| 683 | * Check that the variable-length part of the structure is | ||
| 684 | * within the data buffer. The next entry starts after the | ||
| 685 | * name component, so nextentry is an acceptable test. | ||
| 686 | */ | ||
| 687 | next_sfep = dops->sf_nextentry(sfp, sfep); | ||
| 688 | XFS_WANT_CORRUPTED_RETURN(mp, endp >= (char *)next_sfep); | ||
| 689 | |||
| 690 | /* Check that the offsets always increase. */ | ||
| 691 | XFS_WANT_CORRUPTED_RETURN(mp, | ||
| 692 | xfs_dir2_sf_get_offset(sfep) >= offset); | ||
| 693 | |||
| 694 | /* Check the inode number. */ | ||
| 695 | ino = dops->sf_get_ino(sfp, sfep); | ||
| 696 | i8count += ino > XFS_DIR2_MAX_SHORT_INUM; | ||
| 697 | XFS_WANT_CORRUPTED_RETURN(mp, !xfs_dir_ino_validate(mp, ino)); | ||
| 698 | |||
| 699 | /* Check the file type. */ | ||
| 700 | filetype = dops->sf_get_ftype(sfep); | ||
| 701 | XFS_WANT_CORRUPTED_RETURN(mp, filetype < XFS_DIR3_FT_MAX); | ||
| 702 | |||
| 703 | offset = xfs_dir2_sf_get_offset(sfep) + | ||
| 704 | dops->data_entsize(sfep->namelen); | ||
| 705 | |||
| 706 | sfep = next_sfep; | ||
| 707 | } | ||
| 708 | XFS_WANT_CORRUPTED_RETURN(mp, i8count == sfp->i8count); | ||
| 709 | XFS_WANT_CORRUPTED_RETURN(mp, (void *)sfep == (void *)endp); | ||
| 710 | |||
| 711 | /* Make sure this whole thing ought to be in local format. */ | ||
| 712 | XFS_WANT_CORRUPTED_RETURN(mp, offset + | ||
| 713 | (sfp->count + 2) * (uint)sizeof(xfs_dir2_leaf_entry_t) + | ||
| 714 | (uint)sizeof(xfs_dir2_block_tail_t) <= mp->m_dir_geo->blksize); | ||
| 715 | |||
| 716 | return 0; | ||
| 717 | } | ||
| 718 | |||
| 632 | /* | 719 | /* |
| 633 | * Create a new (shortform) directory. | 720 | * Create a new (shortform) directory. |
| 634 | */ | 721 | */ |
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index 25c1e078aef6..9653e964eda4 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c | |||
| @@ -33,6 +33,8 @@ | |||
| 33 | #include "xfs_trace.h" | 33 | #include "xfs_trace.h" |
| 34 | #include "xfs_attr_sf.h" | 34 | #include "xfs_attr_sf.h" |
| 35 | #include "xfs_da_format.h" | 35 | #include "xfs_da_format.h" |
| 36 | #include "xfs_da_btree.h" | ||
| 37 | #include "xfs_dir2_priv.h" | ||
| 36 | 38 | ||
| 37 | kmem_zone_t *xfs_ifork_zone; | 39 | kmem_zone_t *xfs_ifork_zone; |
| 38 | 40 | ||
| @@ -320,6 +322,7 @@ xfs_iformat_local( | |||
| 320 | int whichfork, | 322 | int whichfork, |
| 321 | int size) | 323 | int size) |
| 322 | { | 324 | { |
| 325 | int error; | ||
| 323 | 326 | ||
| 324 | /* | 327 | /* |
| 325 | * If the size is unreasonable, then something | 328 | * If the size is unreasonable, then something |
| @@ -336,6 +339,14 @@ xfs_iformat_local( | |||
| 336 | return -EFSCORRUPTED; | 339 | return -EFSCORRUPTED; |
| 337 | } | 340 | } |
| 338 | 341 | ||
| 342 | if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) { | ||
| 343 | error = xfs_dir2_sf_verify(ip->i_mount, | ||
| 344 | (struct xfs_dir2_sf_hdr *)XFS_DFORK_DPTR(dip), | ||
| 345 | size); | ||
| 346 | if (error) | ||
| 347 | return error; | ||
| 348 | } | ||
| 349 | |||
| 339 | xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); | 350 | xfs_init_local_fork(ip, whichfork, XFS_DFORK_PTR(dip, whichfork), size); |
| 340 | return 0; | 351 | return 0; |
| 341 | } | 352 | } |
| @@ -856,7 +867,7 @@ xfs_iextents_copy( | |||
| 856 | * In these cases, the format always takes precedence, because the | 867 | * In these cases, the format always takes precedence, because the |
| 857 | * format indicates the current state of the fork. | 868 | * format indicates the current state of the fork. |
| 858 | */ | 869 | */ |
| 859 | void | 870 | int |
| 860 | xfs_iflush_fork( | 871 | xfs_iflush_fork( |
| 861 | xfs_inode_t *ip, | 872 | xfs_inode_t *ip, |
| 862 | xfs_dinode_t *dip, | 873 | xfs_dinode_t *dip, |
| @@ -866,6 +877,7 @@ xfs_iflush_fork( | |||
| 866 | char *cp; | 877 | char *cp; |
| 867 | xfs_ifork_t *ifp; | 878 | xfs_ifork_t *ifp; |
| 868 | xfs_mount_t *mp; | 879 | xfs_mount_t *mp; |
| 880 | int error; | ||
| 869 | static const short brootflag[2] = | 881 | static const short brootflag[2] = |
| 870 | { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; | 882 | { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; |
| 871 | static const short dataflag[2] = | 883 | static const short dataflag[2] = |
| @@ -874,7 +886,7 @@ xfs_iflush_fork( | |||
| 874 | { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; | 886 | { XFS_ILOG_DEXT, XFS_ILOG_AEXT }; |
| 875 | 887 | ||
| 876 | if (!iip) | 888 | if (!iip) |
| 877 | return; | 889 | return 0; |
| 878 | ifp = XFS_IFORK_PTR(ip, whichfork); | 890 | ifp = XFS_IFORK_PTR(ip, whichfork); |
| 879 | /* | 891 | /* |
| 880 | * This can happen if we gave up in iformat in an error path, | 892 | * This can happen if we gave up in iformat in an error path, |
| @@ -882,12 +894,19 @@ xfs_iflush_fork( | |||
| 882 | */ | 894 | */ |
| 883 | if (!ifp) { | 895 | if (!ifp) { |
| 884 | ASSERT(whichfork == XFS_ATTR_FORK); | 896 | ASSERT(whichfork == XFS_ATTR_FORK); |
| 885 | return; | 897 | return 0; |
| 886 | } | 898 | } |
| 887 | cp = XFS_DFORK_PTR(dip, whichfork); | 899 | cp = XFS_DFORK_PTR(dip, whichfork); |
| 888 | mp = ip->i_mount; | 900 | mp = ip->i_mount; |
| 889 | switch (XFS_IFORK_FORMAT(ip, whichfork)) { | 901 | switch (XFS_IFORK_FORMAT(ip, whichfork)) { |
| 890 | case XFS_DINODE_FMT_LOCAL: | 902 | case XFS_DINODE_FMT_LOCAL: |
| 903 | if (S_ISDIR(VFS_I(ip)->i_mode) && whichfork == XFS_DATA_FORK) { | ||
| 904 | error = xfs_dir2_sf_verify(mp, | ||
| 905 | (struct xfs_dir2_sf_hdr *)ifp->if_u1.if_data, | ||
| 906 | ifp->if_bytes); | ||
| 907 | if (error) | ||
| 908 | return error; | ||
| 909 | } | ||
| 891 | if ((iip->ili_fields & dataflag[whichfork]) && | 910 | if ((iip->ili_fields & dataflag[whichfork]) && |
| 892 | (ifp->if_bytes > 0)) { | 911 | (ifp->if_bytes > 0)) { |
| 893 | ASSERT(ifp->if_u1.if_data != NULL); | 912 | ASSERT(ifp->if_u1.if_data != NULL); |
| @@ -940,6 +959,7 @@ xfs_iflush_fork( | |||
| 940 | ASSERT(0); | 959 | ASSERT(0); |
| 941 | break; | 960 | break; |
| 942 | } | 961 | } |
| 962 | return 0; | ||
| 943 | } | 963 | } |
| 944 | 964 | ||
| 945 | /* | 965 | /* |
diff --git a/fs/xfs/libxfs/xfs_inode_fork.h b/fs/xfs/libxfs/xfs_inode_fork.h index 7fb8365326d1..132dc59fdde6 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.h +++ b/fs/xfs/libxfs/xfs_inode_fork.h | |||
| @@ -140,7 +140,7 @@ typedef struct xfs_ifork { | |||
| 140 | struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state); | 140 | struct xfs_ifork *xfs_iext_state_to_fork(struct xfs_inode *ip, int state); |
| 141 | 141 | ||
| 142 | int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); | 142 | int xfs_iformat_fork(struct xfs_inode *, struct xfs_dinode *); |
| 143 | void xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, | 143 | int xfs_iflush_fork(struct xfs_inode *, struct xfs_dinode *, |
| 144 | struct xfs_inode_log_item *, int); | 144 | struct xfs_inode_log_item *, int); |
| 145 | void xfs_idestroy_fork(struct xfs_inode *, int); | 145 | void xfs_idestroy_fork(struct xfs_inode *, int); |
| 146 | void xfs_idata_realloc(struct xfs_inode *, int, int); | 146 | void xfs_idata_realloc(struct xfs_inode *, int, int); |
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c index 003a99b83bd8..ad9396e516f6 100644 --- a/fs/xfs/xfs_dir2_readdir.c +++ b/fs/xfs/xfs_dir2_readdir.c | |||
| @@ -71,22 +71,11 @@ xfs_dir2_sf_getdents( | |||
| 71 | struct xfs_da_geometry *geo = args->geo; | 71 | struct xfs_da_geometry *geo = args->geo; |
| 72 | 72 | ||
| 73 | ASSERT(dp->i_df.if_flags & XFS_IFINLINE); | 73 | ASSERT(dp->i_df.if_flags & XFS_IFINLINE); |
| 74 | /* | ||
| 75 | * Give up if the directory is way too short. | ||
| 76 | */ | ||
| 77 | if (dp->i_d.di_size < offsetof(xfs_dir2_sf_hdr_t, parent)) { | ||
| 78 | ASSERT(XFS_FORCED_SHUTDOWN(dp->i_mount)); | ||
| 79 | return -EIO; | ||
| 80 | } | ||
| 81 | |||
| 82 | ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); | 74 | ASSERT(dp->i_df.if_bytes == dp->i_d.di_size); |
| 83 | ASSERT(dp->i_df.if_u1.if_data != NULL); | 75 | ASSERT(dp->i_df.if_u1.if_data != NULL); |
| 84 | 76 | ||
| 85 | sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; | 77 | sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; |
| 86 | 78 | ||
| 87 | if (dp->i_d.di_size < xfs_dir2_sf_hdr_size(sfp->i8count)) | ||
| 88 | return -EFSCORRUPTED; | ||
| 89 | |||
| 90 | /* | 79 | /* |
| 91 | * If the block number in the offset is out of range, we're done. | 80 | * If the block number in the offset is out of range, we're done. |
| 92 | */ | 81 | */ |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 7eaf1ef74e3c..c7fe2c2123ab 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
| @@ -3475,6 +3475,7 @@ xfs_iflush_int( | |||
| 3475 | struct xfs_inode_log_item *iip = ip->i_itemp; | 3475 | struct xfs_inode_log_item *iip = ip->i_itemp; |
| 3476 | struct xfs_dinode *dip; | 3476 | struct xfs_dinode *dip; |
| 3477 | struct xfs_mount *mp = ip->i_mount; | 3477 | struct xfs_mount *mp = ip->i_mount; |
| 3478 | int error; | ||
| 3478 | 3479 | ||
| 3479 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); | 3480 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
| 3480 | ASSERT(xfs_isiflocked(ip)); | 3481 | ASSERT(xfs_isiflocked(ip)); |
| @@ -3557,9 +3558,14 @@ xfs_iflush_int( | |||
| 3557 | if (ip->i_d.di_flushiter == DI_MAX_FLUSH) | 3558 | if (ip->i_d.di_flushiter == DI_MAX_FLUSH) |
| 3558 | ip->i_d.di_flushiter = 0; | 3559 | ip->i_d.di_flushiter = 0; |
| 3559 | 3560 | ||
| 3560 | xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); | 3561 | error = xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); |
| 3561 | if (XFS_IFORK_Q(ip)) | 3562 | if (error) |
| 3562 | xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); | 3563 | return error; |
| 3564 | if (XFS_IFORK_Q(ip)) { | ||
| 3565 | error = xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); | ||
| 3566 | if (error) | ||
| 3567 | return error; | ||
| 3568 | } | ||
| 3563 | xfs_inobp_check(mp, bp); | 3569 | xfs_inobp_check(mp, bp); |
| 3564 | 3570 | ||
| 3565 | /* | 3571 | /* |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 673acda012af..9b05886f9773 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -287,18 +287,15 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id) | |||
| 287 | } | 287 | } |
| 288 | 288 | ||
| 289 | /* Validate the processor object's proc_id */ | 289 | /* Validate the processor object's proc_id */ |
| 290 | bool acpi_processor_validate_proc_id(int proc_id); | 290 | bool acpi_duplicate_processor_id(int proc_id); |
| 291 | 291 | ||
| 292 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 292 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
| 293 | /* Arch dependent functions for cpu hotplug support */ | 293 | /* Arch dependent functions for cpu hotplug support */ |
| 294 | int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, | 294 | int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, |
| 295 | int *pcpu); | 295 | int *pcpu); |
| 296 | int acpi_unmap_cpu(int cpu); | 296 | int acpi_unmap_cpu(int cpu); |
| 297 | int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid); | ||
| 298 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ | 297 | #endif /* CONFIG_ACPI_HOTPLUG_CPU */ |
| 299 | 298 | ||
| 300 | void acpi_set_processor_mapping(void); | ||
| 301 | |||
| 302 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC | 299 | #ifdef CONFIG_ACPI_HOTPLUG_IOAPIC |
| 303 | int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); | 300 | int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); |
| 304 | #endif | 301 | #endif |
diff --git a/include/linux/ccp.h b/include/linux/ccp.h index c71dd8fa5764..c41b8d99dd0e 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h | |||
| @@ -556,7 +556,7 @@ enum ccp_engine { | |||
| 556 | * struct ccp_cmd - CCP operation request | 556 | * struct ccp_cmd - CCP operation request |
| 557 | * @entry: list element (ccp driver use only) | 557 | * @entry: list element (ccp driver use only) |
| 558 | * @work: work element used for callbacks (ccp driver use only) | 558 | * @work: work element used for callbacks (ccp driver use only) |
| 559 | * @ccp: CCP device to be run on (ccp driver use only) | 559 | * @ccp: CCP device to be run on |
| 560 | * @ret: operation return code (ccp driver use only) | 560 | * @ret: operation return code (ccp driver use only) |
| 561 | * @flags: cmd processing flags | 561 | * @flags: cmd processing flags |
| 562 | * @engine: CCP operation to perform | 562 | * @engine: CCP operation to perform |
diff --git a/include/linux/device.h b/include/linux/device.h index 30c4570e928d..9ef518af5515 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -1140,7 +1140,6 @@ static inline bool device_supports_offline(struct device *dev) | |||
| 1140 | extern void lock_device_hotplug(void); | 1140 | extern void lock_device_hotplug(void); |
| 1141 | extern void unlock_device_hotplug(void); | 1141 | extern void unlock_device_hotplug(void); |
| 1142 | extern int lock_device_hotplug_sysfs(void); | 1142 | extern int lock_device_hotplug_sysfs(void); |
| 1143 | void assert_held_device_hotplug(void); | ||
| 1144 | extern int device_offline(struct device *dev); | 1143 | extern int device_offline(struct device *dev); |
| 1145 | extern int device_online(struct device *dev); | 1144 | extern int device_online(struct device *dev); |
| 1146 | extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); | 1145 | extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode); |
diff --git a/include/linux/errqueue.h b/include/linux/errqueue.h index 9ca23fcfb5d7..6fdfc884fdeb 100644 --- a/include/linux/errqueue.h +++ b/include/linux/errqueue.h | |||
| @@ -20,6 +20,8 @@ struct sock_exterr_skb { | |||
| 20 | struct sock_extended_err ee; | 20 | struct sock_extended_err ee; |
| 21 | u16 addr_offset; | 21 | u16 addr_offset; |
| 22 | __be16 port; | 22 | __be16 port; |
| 23 | u8 opt_stats:1, | ||
| 24 | unused:7; | ||
| 23 | }; | 25 | }; |
| 24 | 26 | ||
| 25 | #endif | 27 | #endif |
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h index 547f81592ba1..10c1abfbac6c 100644 --- a/include/linux/fscrypt_common.h +++ b/include/linux/fscrypt_common.h | |||
| @@ -87,7 +87,6 @@ struct fscrypt_operations { | |||
| 87 | unsigned int flags; | 87 | unsigned int flags; |
| 88 | const char *key_prefix; | 88 | const char *key_prefix; |
| 89 | int (*get_context)(struct inode *, void *, size_t); | 89 | int (*get_context)(struct inode *, void *, size_t); |
| 90 | int (*prepare_context)(struct inode *); | ||
| 91 | int (*set_context)(struct inode *, const void *, size_t, void *); | 90 | int (*set_context)(struct inode *, const void *, size_t, void *); |
| 92 | int (*dummy_context)(struct inode *); | 91 | int (*dummy_context)(struct inode *); |
| 93 | bool (*is_encrypted)(struct inode *); | 92 | bool (*is_encrypted)(struct inode *); |
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 2484b2fcc6eb..933d93656605 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h | |||
| @@ -143,15 +143,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, | |||
| 143 | struct fwnode_handle *child, | 143 | struct fwnode_handle *child, |
| 144 | enum gpiod_flags flags, | 144 | enum gpiod_flags flags, |
| 145 | const char *label); | 145 | const char *label); |
| 146 | /* FIXME: delete this helper when users are switched over */ | ||
| 147 | static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, | ||
| 148 | const char *con_id, struct fwnode_handle *child) | ||
| 149 | { | ||
| 150 | return devm_fwnode_get_index_gpiod_from_child(dev, con_id, | ||
| 151 | 0, child, | ||
| 152 | GPIOD_ASIS, | ||
| 153 | "?"); | ||
| 154 | } | ||
| 155 | 146 | ||
| 156 | #else /* CONFIG_GPIOLIB */ | 147 | #else /* CONFIG_GPIOLIB */ |
| 157 | 148 | ||
| @@ -444,13 +435,6 @@ struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev, | |||
| 444 | return ERR_PTR(-ENOSYS); | 435 | return ERR_PTR(-ENOSYS); |
| 445 | } | 436 | } |
| 446 | 437 | ||
| 447 | /* FIXME: delete this when all users are switched over */ | ||
| 448 | static inline struct gpio_desc *devm_get_gpiod_from_child(struct device *dev, | ||
| 449 | const char *con_id, struct fwnode_handle *child) | ||
| 450 | { | ||
| 451 | return ERR_PTR(-ENOSYS); | ||
| 452 | } | ||
| 453 | |||
| 454 | #endif /* CONFIG_GPIOLIB */ | 438 | #endif /* CONFIG_GPIOLIB */ |
| 455 | 439 | ||
| 456 | static inline | 440 | static inline |
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 78d59dba563e..88b673749121 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h | |||
| @@ -88,6 +88,7 @@ enum hwmon_temp_attributes { | |||
| 88 | #define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst) | 88 | #define HWMON_T_CRIT_HYST BIT(hwmon_temp_crit_hyst) |
| 89 | #define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency) | 89 | #define HWMON_T_EMERGENCY BIT(hwmon_temp_emergency) |
| 90 | #define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst) | 90 | #define HWMON_T_EMERGENCY_HYST BIT(hwmon_temp_emergency_hyst) |
| 91 | #define HWMON_T_ALARM BIT(hwmon_temp_alarm) | ||
| 91 | #define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) | 92 | #define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm) |
| 92 | #define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) | 93 | #define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm) |
| 93 | #define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) | 94 | #define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm) |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 62bbf3c1aa4a..970771a5f739 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
| @@ -845,6 +845,13 @@ struct vmbus_channel { | |||
| 845 | * link up channels based on their CPU affinity. | 845 | * link up channels based on their CPU affinity. |
| 846 | */ | 846 | */ |
| 847 | struct list_head percpu_list; | 847 | struct list_head percpu_list; |
| 848 | |||
| 849 | /* | ||
| 850 | * Defer freeing channel until after all cpu's have | ||
| 851 | * gone through grace period. | ||
| 852 | */ | ||
| 853 | struct rcu_head rcu; | ||
| 854 | |||
| 848 | /* | 855 | /* |
| 849 | * For performance critical channels (storage, networking | 856 | * For performance critical channels (storage, networking |
| 850 | * etc,), Hyper-V has a mechanism to enhance the throughput | 857 | * etc,), Hyper-V has a mechanism to enhance the throughput |
| @@ -1430,9 +1437,6 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, | |||
| 1430 | const int *srv_version, int srv_vercnt, | 1437 | const int *srv_version, int srv_vercnt, |
| 1431 | int *nego_fw_version, int *nego_srv_version); | 1438 | int *nego_fw_version, int *nego_srv_version); |
| 1432 | 1439 | ||
| 1433 | void hv_event_tasklet_disable(struct vmbus_channel *channel); | ||
| 1434 | void hv_event_tasklet_enable(struct vmbus_channel *channel); | ||
| 1435 | |||
| 1436 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); | 1440 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); |
| 1437 | 1441 | ||
| 1438 | void vmbus_setevent(struct vmbus_channel *channel); | 1442 | void vmbus_setevent(struct vmbus_channel *channel); |
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h index 23ca41515527..fa7931933067 100644 --- a/include/linux/iio/sw_device.h +++ b/include/linux/iio/sw_device.h | |||
| @@ -62,7 +62,7 @@ void iio_swd_group_init_type_name(struct iio_sw_device *d, | |||
| 62 | const char *name, | 62 | const char *name, |
| 63 | struct config_item_type *type) | 63 | struct config_item_type *type) |
| 64 | { | 64 | { |
| 65 | #ifdef CONFIG_CONFIGFS_FS | 65 | #if IS_ENABLED(CONFIG_CONFIGFS_FS) |
| 66 | config_group_init_type_name(&d->group, name, type); | 66 | config_group_init_type_name(&d->group, name, type); |
| 67 | #endif | 67 | #endif |
| 68 | } | 68 | } |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 6a6de187ddc0..2e4de0deee53 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -125,9 +125,16 @@ enum iommu_attr { | |||
| 125 | }; | 125 | }; |
| 126 | 126 | ||
| 127 | /* These are the possible reserved region types */ | 127 | /* These are the possible reserved region types */ |
| 128 | #define IOMMU_RESV_DIRECT (1 << 0) | 128 | enum iommu_resv_type { |
| 129 | #define IOMMU_RESV_RESERVED (1 << 1) | 129 | /* Memory regions which must be mapped 1:1 at all times */ |
| 130 | #define IOMMU_RESV_MSI (1 << 2) | 130 | IOMMU_RESV_DIRECT, |
| 131 | /* Arbitrary "never map this or give it to a device" address ranges */ | ||
| 132 | IOMMU_RESV_RESERVED, | ||
| 133 | /* Hardware MSI region (untranslated) */ | ||
| 134 | IOMMU_RESV_MSI, | ||
| 135 | /* Software-managed MSI translation window */ | ||
| 136 | IOMMU_RESV_SW_MSI, | ||
| 137 | }; | ||
| 131 | 138 | ||
| 132 | /** | 139 | /** |
| 133 | * struct iommu_resv_region - descriptor for a reserved memory region | 140 | * struct iommu_resv_region - descriptor for a reserved memory region |
| @@ -142,7 +149,7 @@ struct iommu_resv_region { | |||
| 142 | phys_addr_t start; | 149 | phys_addr_t start; |
| 143 | size_t length; | 150 | size_t length; |
| 144 | int prot; | 151 | int prot; |
| 145 | int type; | 152 | enum iommu_resv_type type; |
| 146 | }; | 153 | }; |
| 147 | 154 | ||
| 148 | #ifdef CONFIG_IOMMU_API | 155 | #ifdef CONFIG_IOMMU_API |
| @@ -288,7 +295,8 @@ extern void iommu_get_resv_regions(struct device *dev, struct list_head *list); | |||
| 288 | extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); | 295 | extern void iommu_put_resv_regions(struct device *dev, struct list_head *list); |
| 289 | extern int iommu_request_dm_for_dev(struct device *dev); | 296 | extern int iommu_request_dm_for_dev(struct device *dev); |
| 290 | extern struct iommu_resv_region * | 297 | extern struct iommu_resv_region * |
| 291 | iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, int type); | 298 | iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, |
| 299 | enum iommu_resv_type type); | ||
| 292 | extern int iommu_get_group_resv_regions(struct iommu_group *group, | 300 | extern int iommu_get_group_resv_regions(struct iommu_group *group, |
| 293 | struct list_head *head); | 301 | struct list_head *head); |
| 294 | 302 | ||
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 1c823bef4c15..5734480c9590 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | struct kmem_cache; | 6 | struct kmem_cache; |
| 7 | struct page; | 7 | struct page; |
| 8 | struct vm_struct; | 8 | struct vm_struct; |
| 9 | struct task_struct; | ||
| 9 | 10 | ||
| 10 | #ifdef CONFIG_KASAN | 11 | #ifdef CONFIG_KASAN |
| 11 | 12 | ||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 2c14ad9809da..d0250744507a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -162,8 +162,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, | |||
| 162 | int len, void *val); | 162 | int len, void *val); |
| 163 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | 163 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
| 164 | int len, struct kvm_io_device *dev); | 164 | int len, struct kvm_io_device *dev); |
| 165 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, | 165 | void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 166 | struct kvm_io_device *dev); | 166 | struct kvm_io_device *dev); |
| 167 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, | 167 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 168 | gpa_t addr); | 168 | gpa_t addr); |
| 169 | 169 | ||
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7e66e4f62858..1beb1ec2fbdf 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -476,6 +476,7 @@ enum { | |||
| 476 | enum { | 476 | enum { |
| 477 | MLX4_INTERFACE_STATE_UP = 1 << 0, | 477 | MLX4_INTERFACE_STATE_UP = 1 << 0, |
| 478 | MLX4_INTERFACE_STATE_DELETION = 1 << 1, | 478 | MLX4_INTERFACE_STATE_DELETION = 1 << 1, |
| 479 | MLX4_INTERFACE_STATE_NOWAIT = 1 << 2, | ||
| 479 | }; | 480 | }; |
| 480 | 481 | ||
| 481 | #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ | 482 | #define MSTR_SM_CHANGE_MASK (MLX4_EQ_PORT_INFO_MSTR_SM_SL_CHANGE_MASK | \ |
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index 35d0fd7a4948..fd0de00c0d77 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h | |||
| @@ -76,22 +76,12 @@ struct gpmc_timings; | |||
| 76 | struct omap_nand_platform_data; | 76 | struct omap_nand_platform_data; |
| 77 | struct omap_onenand_platform_data; | 77 | struct omap_onenand_platform_data; |
| 78 | 78 | ||
| 79 | #if IS_ENABLED(CONFIG_MTD_NAND_OMAP2) | ||
| 80 | extern int gpmc_nand_init(struct omap_nand_platform_data *d, | ||
| 81 | struct gpmc_timings *gpmc_t); | ||
| 82 | #else | ||
| 83 | static inline int gpmc_nand_init(struct omap_nand_platform_data *d, | ||
| 84 | struct gpmc_timings *gpmc_t) | ||
| 85 | { | ||
| 86 | return 0; | ||
| 87 | } | ||
| 88 | #endif | ||
| 89 | |||
| 90 | #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) | 79 | #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) |
| 91 | extern void gpmc_onenand_init(struct omap_onenand_platform_data *d); | 80 | extern int gpmc_onenand_init(struct omap_onenand_platform_data *d); |
| 92 | #else | 81 | #else |
| 93 | #define board_onenand_data NULL | 82 | #define board_onenand_data NULL |
| 94 | static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d) | 83 | static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d) |
| 95 | { | 84 | { |
| 85 | return 0; | ||
| 96 | } | 86 | } |
| 97 | #endif | 87 | #endif |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index b6e75c9d4791..24a635887f28 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -165,6 +165,13 @@ struct hw_perf_event { | |||
| 165 | struct list_head bp_list; | 165 | struct list_head bp_list; |
| 166 | }; | 166 | }; |
| 167 | #endif | 167 | #endif |
| 168 | struct { /* amd_iommu */ | ||
| 169 | u8 iommu_bank; | ||
| 170 | u8 iommu_cntr; | ||
| 171 | u16 padding; | ||
| 172 | u64 conf; | ||
| 173 | u64 conf1; | ||
| 174 | }; | ||
| 168 | }; | 175 | }; |
| 169 | /* | 176 | /* |
| 170 | * If the event is a per task event, this will point to the task in | 177 | * If the event is a per task event, this will point to the task in |
diff --git a/include/linux/reset.h b/include/linux/reset.h index 86b4ed75359e..96fb139bdd08 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h | |||
| @@ -31,31 +31,26 @@ static inline int device_reset_optional(struct device *dev) | |||
| 31 | 31 | ||
| 32 | static inline int reset_control_reset(struct reset_control *rstc) | 32 | static inline int reset_control_reset(struct reset_control *rstc) |
| 33 | { | 33 | { |
| 34 | WARN_ON(1); | ||
| 35 | return 0; | 34 | return 0; |
| 36 | } | 35 | } |
| 37 | 36 | ||
| 38 | static inline int reset_control_assert(struct reset_control *rstc) | 37 | static inline int reset_control_assert(struct reset_control *rstc) |
| 39 | { | 38 | { |
| 40 | WARN_ON(1); | ||
| 41 | return 0; | 39 | return 0; |
| 42 | } | 40 | } |
| 43 | 41 | ||
| 44 | static inline int reset_control_deassert(struct reset_control *rstc) | 42 | static inline int reset_control_deassert(struct reset_control *rstc) |
| 45 | { | 43 | { |
| 46 | WARN_ON(1); | ||
| 47 | return 0; | 44 | return 0; |
| 48 | } | 45 | } |
| 49 | 46 | ||
| 50 | static inline int reset_control_status(struct reset_control *rstc) | 47 | static inline int reset_control_status(struct reset_control *rstc) |
| 51 | { | 48 | { |
| 52 | WARN_ON(1); | ||
| 53 | return 0; | 49 | return 0; |
| 54 | } | 50 | } |
| 55 | 51 | ||
| 56 | static inline void reset_control_put(struct reset_control *rstc) | 52 | static inline void reset_control_put(struct reset_control *rstc) |
| 57 | { | 53 | { |
| 58 | WARN_ON(1); | ||
| 59 | } | 54 | } |
| 60 | 55 | ||
| 61 | static inline int __must_check device_reset(struct device *dev) | 56 | static inline int __must_check device_reset(struct device *dev) |
| @@ -74,14 +69,14 @@ static inline struct reset_control *__of_reset_control_get( | |||
| 74 | const char *id, int index, bool shared, | 69 | const char *id, int index, bool shared, |
| 75 | bool optional) | 70 | bool optional) |
| 76 | { | 71 | { |
| 77 | return ERR_PTR(-ENOTSUPP); | 72 | return optional ? NULL : ERR_PTR(-ENOTSUPP); |
| 78 | } | 73 | } |
| 79 | 74 | ||
| 80 | static inline struct reset_control *__devm_reset_control_get( | 75 | static inline struct reset_control *__devm_reset_control_get( |
| 81 | struct device *dev, const char *id, | 76 | struct device *dev, const char *id, |
| 82 | int index, bool shared, bool optional) | 77 | int index, bool shared, bool optional) |
| 83 | { | 78 | { |
| 84 | return ERR_PTR(-ENOTSUPP); | 79 | return optional ? NULL : ERR_PTR(-ENOTSUPP); |
| 85 | } | 80 | } |
| 86 | 81 | ||
| 87 | #endif /* CONFIG_RESET_CONTROLLER */ | 82 | #endif /* CONFIG_RESET_CONTROLLER */ |
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 1d0043dc34e4..de2a722fe3cf 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h | |||
| @@ -50,4 +50,10 @@ | |||
| 50 | /* device can't handle Link Power Management */ | 50 | /* device can't handle Link Power Management */ |
| 51 | #define USB_QUIRK_NO_LPM BIT(10) | 51 | #define USB_QUIRK_NO_LPM BIT(10) |
| 52 | 52 | ||
| 53 | /* | ||
| 54 | * Device reports its bInterval as linear frames instead of the | ||
| 55 | * USB 2.0 calculation. | ||
| 56 | */ | ||
| 57 | #define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11) | ||
| 58 | |||
| 53 | #endif /* __LINUX_USB_QUIRKS_H */ | 59 | #endif /* __LINUX_USB_QUIRKS_H */ |
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 9638bfeb0d1f..584f9a647ad4 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h | |||
| @@ -48,6 +48,8 @@ struct virtio_vsock_pkt { | |||
| 48 | struct virtio_vsock_hdr hdr; | 48 | struct virtio_vsock_hdr hdr; |
| 49 | struct work_struct work; | 49 | struct work_struct work; |
| 50 | struct list_head list; | 50 | struct list_head list; |
| 51 | /* socket refcnt not held, only use for cancellation */ | ||
| 52 | struct vsock_sock *vsk; | ||
| 51 | void *buf; | 53 | void *buf; |
| 52 | u32 len; | 54 | u32 len; |
| 53 | u32 off; | 55 | u32 off; |
| @@ -56,6 +58,7 @@ struct virtio_vsock_pkt { | |||
| 56 | 58 | ||
| 57 | struct virtio_vsock_pkt_info { | 59 | struct virtio_vsock_pkt_info { |
| 58 | u32 remote_cid, remote_port; | 60 | u32 remote_cid, remote_port; |
| 61 | struct vsock_sock *vsk; | ||
| 59 | struct msghdr *msg; | 62 | struct msghdr *msg; |
| 60 | u32 pkt_len; | 63 | u32 pkt_len; |
| 61 | u16 type; | 64 | u16 type; |
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h index f2758964ce6f..f32ed9ac181a 100644 --- a/include/net/af_vsock.h +++ b/include/net/af_vsock.h | |||
| @@ -100,6 +100,9 @@ struct vsock_transport { | |||
| 100 | void (*destruct)(struct vsock_sock *); | 100 | void (*destruct)(struct vsock_sock *); |
| 101 | void (*release)(struct vsock_sock *); | 101 | void (*release)(struct vsock_sock *); |
| 102 | 102 | ||
| 103 | /* Cancel all pending packets sent on vsock. */ | ||
| 104 | int (*cancel_pkt)(struct vsock_sock *vsk); | ||
| 105 | |||
| 103 | /* Connections. */ | 106 | /* Connections. */ |
| 104 | int (*connect)(struct vsock_sock *); | 107 | int (*connect)(struct vsock_sock *); |
| 105 | 108 | ||
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index f540f9ad2af4..19605878da47 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
| @@ -244,7 +244,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, | |||
| 244 | u32 seq); | 244 | u32 seq); |
| 245 | 245 | ||
| 246 | /* Fake conntrack entry for untracked connections */ | 246 | /* Fake conntrack entry for untracked connections */ |
| 247 | DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); | 247 | DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked); |
| 248 | static inline struct nf_conn *nf_ct_untracked_get(void) | 248 | static inline struct nf_conn *nf_ct_untracked_get(void) |
| 249 | { | 249 | { |
| 250 | return raw_cpu_ptr(&nf_conntrack_untracked); | 250 | return raw_cpu_ptr(&nf_conntrack_untracked); |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 2aa8a9d80fbe..0136028652bd 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
| @@ -103,6 +103,35 @@ struct nft_regs { | |||
| 103 | }; | 103 | }; |
| 104 | }; | 104 | }; |
| 105 | 105 | ||
| 106 | /* Store/load an u16 or u8 integer to/from the u32 data register. | ||
| 107 | * | ||
| 108 | * Note, when using concatenations, register allocation happens at 32-bit | ||
| 109 | * level. So for store instruction, pad the rest part with zero to avoid | ||
| 110 | * garbage values. | ||
| 111 | */ | ||
| 112 | |||
| 113 | static inline void nft_reg_store16(u32 *dreg, u16 val) | ||
| 114 | { | ||
| 115 | *dreg = 0; | ||
| 116 | *(u16 *)dreg = val; | ||
| 117 | } | ||
| 118 | |||
| 119 | static inline void nft_reg_store8(u32 *dreg, u8 val) | ||
| 120 | { | ||
| 121 | *dreg = 0; | ||
| 122 | *(u8 *)dreg = val; | ||
| 123 | } | ||
| 124 | |||
| 125 | static inline u16 nft_reg_load16(u32 *sreg) | ||
| 126 | { | ||
| 127 | return *(u16 *)sreg; | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline u8 nft_reg_load8(u32 *sreg) | ||
| 131 | { | ||
| 132 | return *(u8 *)sreg; | ||
| 133 | } | ||
| 134 | |||
| 106 | static inline void nft_data_copy(u32 *dst, const struct nft_data *src, | 135 | static inline void nft_data_copy(u32 *dst, const struct nft_data *src, |
| 107 | unsigned int len) | 136 | unsigned int len) |
| 108 | { | 137 | { |
| @@ -203,7 +232,6 @@ struct nft_set_elem { | |||
| 203 | struct nft_set; | 232 | struct nft_set; |
| 204 | struct nft_set_iter { | 233 | struct nft_set_iter { |
| 205 | u8 genmask; | 234 | u8 genmask; |
| 206 | bool flush; | ||
| 207 | unsigned int count; | 235 | unsigned int count; |
| 208 | unsigned int skip; | 236 | unsigned int skip; |
| 209 | int err; | 237 | int err; |
diff --git a/include/net/netfilter/nf_tables_ipv6.h b/include/net/netfilter/nf_tables_ipv6.h index d150b5066201..97983d1c05e4 100644 --- a/include/net/netfilter/nf_tables_ipv6.h +++ b/include/net/netfilter/nf_tables_ipv6.h | |||
| @@ -9,12 +9,13 @@ nft_set_pktinfo_ipv6(struct nft_pktinfo *pkt, | |||
| 9 | struct sk_buff *skb, | 9 | struct sk_buff *skb, |
| 10 | const struct nf_hook_state *state) | 10 | const struct nf_hook_state *state) |
| 11 | { | 11 | { |
| 12 | unsigned int flags = IP6_FH_F_AUTH; | ||
| 12 | int protohdr, thoff = 0; | 13 | int protohdr, thoff = 0; |
| 13 | unsigned short frag_off; | 14 | unsigned short frag_off; |
| 14 | 15 | ||
| 15 | nft_set_pktinfo(pkt, skb, state); | 16 | nft_set_pktinfo(pkt, skb, state); |
| 16 | 17 | ||
| 17 | protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); | 18 | protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); |
| 18 | if (protohdr < 0) { | 19 | if (protohdr < 0) { |
| 19 | nft_set_pktinfo_proto_unspec(pkt, skb); | 20 | nft_set_pktinfo_proto_unspec(pkt, skb); |
| 20 | return; | 21 | return; |
| @@ -32,6 +33,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, | |||
| 32 | const struct nf_hook_state *state) | 33 | const struct nf_hook_state *state) |
| 33 | { | 34 | { |
| 34 | #if IS_ENABLED(CONFIG_IPV6) | 35 | #if IS_ENABLED(CONFIG_IPV6) |
| 36 | unsigned int flags = IP6_FH_F_AUTH; | ||
| 35 | struct ipv6hdr *ip6h, _ip6h; | 37 | struct ipv6hdr *ip6h, _ip6h; |
| 36 | unsigned int thoff = 0; | 38 | unsigned int thoff = 0; |
| 37 | unsigned short frag_off; | 39 | unsigned short frag_off; |
| @@ -50,7 +52,7 @@ __nft_set_pktinfo_ipv6_validate(struct nft_pktinfo *pkt, | |||
| 50 | if (pkt_len + sizeof(*ip6h) > skb->len) | 52 | if (pkt_len + sizeof(*ip6h) > skb->len) |
| 51 | return -1; | 53 | return -1; |
| 52 | 54 | ||
| 53 | protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, NULL); | 55 | protohdr = ipv6_find_hdr(pkt->skb, &thoff, -1, &frag_off, &flags); |
| 54 | if (protohdr < 0) | 56 | if (protohdr < 0) |
| 55 | return -1; | 57 | return -1; |
| 56 | 58 | ||
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 07a0b128625a..592decebac75 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
| @@ -83,6 +83,7 @@ struct sctp_bind_addr; | |||
| 83 | struct sctp_ulpq; | 83 | struct sctp_ulpq; |
| 84 | struct sctp_ep_common; | 84 | struct sctp_ep_common; |
| 85 | struct crypto_shash; | 85 | struct crypto_shash; |
| 86 | struct sctp_stream; | ||
| 86 | 87 | ||
| 87 | 88 | ||
| 88 | #include <net/sctp/tsnmap.h> | 89 | #include <net/sctp/tsnmap.h> |
| @@ -753,6 +754,8 @@ struct sctp_transport { | |||
| 753 | /* Is the Path MTU update pending on this tranport */ | 754 | /* Is the Path MTU update pending on this tranport */ |
| 754 | pmtu_pending:1, | 755 | pmtu_pending:1, |
| 755 | 756 | ||
| 757 | dst_pending_confirm:1, /* need to confirm neighbour */ | ||
| 758 | |||
| 756 | /* Has this transport moved the ctsn since we last sacked */ | 759 | /* Has this transport moved the ctsn since we last sacked */ |
| 757 | sack_generation:1; | 760 | sack_generation:1; |
| 758 | u32 dst_cookie; | 761 | u32 dst_cookie; |
| @@ -806,8 +809,6 @@ struct sctp_transport { | |||
| 806 | 809 | ||
| 807 | __u32 burst_limited; /* Holds old cwnd when max.burst is applied */ | 810 | __u32 burst_limited; /* Holds old cwnd when max.burst is applied */ |
| 808 | 811 | ||
| 809 | __u32 dst_pending_confirm; /* need to confirm neighbour */ | ||
| 810 | |||
| 811 | /* Destination */ | 812 | /* Destination */ |
| 812 | struct dst_entry *dst; | 813 | struct dst_entry *dst; |
| 813 | /* Source address. */ | 814 | /* Source address. */ |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 0f1813c13687..99e4423eb2b8 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
| @@ -1863,6 +1863,9 @@ struct ib_port_immutable { | |||
| 1863 | }; | 1863 | }; |
| 1864 | 1864 | ||
| 1865 | struct ib_device { | 1865 | struct ib_device { |
| 1866 | /* Do not access @dma_device directly from ULP nor from HW drivers. */ | ||
| 1867 | struct device *dma_device; | ||
| 1868 | |||
| 1866 | char name[IB_DEVICE_NAME_MAX]; | 1869 | char name[IB_DEVICE_NAME_MAX]; |
| 1867 | 1870 | ||
| 1868 | struct list_head event_handler_list; | 1871 | struct list_head event_handler_list; |
| @@ -3007,7 +3010,7 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) | |||
| 3007 | */ | 3010 | */ |
| 3008 | static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) | 3011 | static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr) |
| 3009 | { | 3012 | { |
| 3010 | return dma_mapping_error(&dev->dev, dma_addr); | 3013 | return dma_mapping_error(dev->dma_device, dma_addr); |
| 3011 | } | 3014 | } |
| 3012 | 3015 | ||
| 3013 | /** | 3016 | /** |
| @@ -3021,7 +3024,7 @@ static inline u64 ib_dma_map_single(struct ib_device *dev, | |||
| 3021 | void *cpu_addr, size_t size, | 3024 | void *cpu_addr, size_t size, |
| 3022 | enum dma_data_direction direction) | 3025 | enum dma_data_direction direction) |
| 3023 | { | 3026 | { |
| 3024 | return dma_map_single(&dev->dev, cpu_addr, size, direction); | 3027 | return dma_map_single(dev->dma_device, cpu_addr, size, direction); |
| 3025 | } | 3028 | } |
| 3026 | 3029 | ||
| 3027 | /** | 3030 | /** |
| @@ -3035,7 +3038,7 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, | |||
| 3035 | u64 addr, size_t size, | 3038 | u64 addr, size_t size, |
| 3036 | enum dma_data_direction direction) | 3039 | enum dma_data_direction direction) |
| 3037 | { | 3040 | { |
| 3038 | dma_unmap_single(&dev->dev, addr, size, direction); | 3041 | dma_unmap_single(dev->dma_device, addr, size, direction); |
| 3039 | } | 3042 | } |
| 3040 | 3043 | ||
| 3041 | /** | 3044 | /** |
| @@ -3052,7 +3055,7 @@ static inline u64 ib_dma_map_page(struct ib_device *dev, | |||
| 3052 | size_t size, | 3055 | size_t size, |
| 3053 | enum dma_data_direction direction) | 3056 | enum dma_data_direction direction) |
| 3054 | { | 3057 | { |
| 3055 | return dma_map_page(&dev->dev, page, offset, size, direction); | 3058 | return dma_map_page(dev->dma_device, page, offset, size, direction); |
| 3056 | } | 3059 | } |
| 3057 | 3060 | ||
| 3058 | /** | 3061 | /** |
| @@ -3066,7 +3069,7 @@ static inline void ib_dma_unmap_page(struct ib_device *dev, | |||
| 3066 | u64 addr, size_t size, | 3069 | u64 addr, size_t size, |
| 3067 | enum dma_data_direction direction) | 3070 | enum dma_data_direction direction) |
| 3068 | { | 3071 | { |
| 3069 | dma_unmap_page(&dev->dev, addr, size, direction); | 3072 | dma_unmap_page(dev->dma_device, addr, size, direction); |
| 3070 | } | 3073 | } |
| 3071 | 3074 | ||
| 3072 | /** | 3075 | /** |
| @@ -3080,7 +3083,7 @@ static inline int ib_dma_map_sg(struct ib_device *dev, | |||
| 3080 | struct scatterlist *sg, int nents, | 3083 | struct scatterlist *sg, int nents, |
| 3081 | enum dma_data_direction direction) | 3084 | enum dma_data_direction direction) |
| 3082 | { | 3085 | { |
| 3083 | return dma_map_sg(&dev->dev, sg, nents, direction); | 3086 | return dma_map_sg(dev->dma_device, sg, nents, direction); |
| 3084 | } | 3087 | } |
| 3085 | 3088 | ||
| 3086 | /** | 3089 | /** |
| @@ -3094,7 +3097,7 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, | |||
| 3094 | struct scatterlist *sg, int nents, | 3097 | struct scatterlist *sg, int nents, |
| 3095 | enum dma_data_direction direction) | 3098 | enum dma_data_direction direction) |
| 3096 | { | 3099 | { |
| 3097 | dma_unmap_sg(&dev->dev, sg, nents, direction); | 3100 | dma_unmap_sg(dev->dma_device, sg, nents, direction); |
| 3098 | } | 3101 | } |
| 3099 | 3102 | ||
| 3100 | static inline int ib_dma_map_sg_attrs(struct ib_device *dev, | 3103 | static inline int ib_dma_map_sg_attrs(struct ib_device *dev, |
| @@ -3102,7 +3105,8 @@ static inline int ib_dma_map_sg_attrs(struct ib_device *dev, | |||
| 3102 | enum dma_data_direction direction, | 3105 | enum dma_data_direction direction, |
| 3103 | unsigned long dma_attrs) | 3106 | unsigned long dma_attrs) |
| 3104 | { | 3107 | { |
| 3105 | return dma_map_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); | 3108 | return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, |
| 3109 | dma_attrs); | ||
| 3106 | } | 3110 | } |
| 3107 | 3111 | ||
| 3108 | static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, | 3112 | static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, |
| @@ -3110,7 +3114,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, | |||
| 3110 | enum dma_data_direction direction, | 3114 | enum dma_data_direction direction, |
| 3111 | unsigned long dma_attrs) | 3115 | unsigned long dma_attrs) |
| 3112 | { | 3116 | { |
| 3113 | dma_unmap_sg_attrs(&dev->dev, sg, nents, direction, dma_attrs); | 3117 | dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs); |
| 3114 | } | 3118 | } |
| 3115 | /** | 3119 | /** |
| 3116 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry | 3120 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry |
| @@ -3152,7 +3156,7 @@ static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev, | |||
| 3152 | size_t size, | 3156 | size_t size, |
| 3153 | enum dma_data_direction dir) | 3157 | enum dma_data_direction dir) |
| 3154 | { | 3158 | { |
| 3155 | dma_sync_single_for_cpu(&dev->dev, addr, size, dir); | 3159 | dma_sync_single_for_cpu(dev->dma_device, addr, size, dir); |
| 3156 | } | 3160 | } |
| 3157 | 3161 | ||
| 3158 | /** | 3162 | /** |
| @@ -3167,7 +3171,7 @@ static inline void ib_dma_sync_single_for_device(struct ib_device *dev, | |||
| 3167 | size_t size, | 3171 | size_t size, |
| 3168 | enum dma_data_direction dir) | 3172 | enum dma_data_direction dir) |
| 3169 | { | 3173 | { |
| 3170 | dma_sync_single_for_device(&dev->dev, addr, size, dir); | 3174 | dma_sync_single_for_device(dev->dma_device, addr, size, dir); |
| 3171 | } | 3175 | } |
| 3172 | 3176 | ||
| 3173 | /** | 3177 | /** |
| @@ -3182,7 +3186,7 @@ static inline void *ib_dma_alloc_coherent(struct ib_device *dev, | |||
| 3182 | dma_addr_t *dma_handle, | 3186 | dma_addr_t *dma_handle, |
| 3183 | gfp_t flag) | 3187 | gfp_t flag) |
| 3184 | { | 3188 | { |
| 3185 | return dma_alloc_coherent(&dev->dev, size, dma_handle, flag); | 3189 | return dma_alloc_coherent(dev->dma_device, size, dma_handle, flag); |
| 3186 | } | 3190 | } |
| 3187 | 3191 | ||
| 3188 | /** | 3192 | /** |
| @@ -3196,7 +3200,7 @@ static inline void ib_dma_free_coherent(struct ib_device *dev, | |||
| 3196 | size_t size, void *cpu_addr, | 3200 | size_t size, void *cpu_addr, |
| 3197 | dma_addr_t dma_handle) | 3201 | dma_addr_t dma_handle) |
| 3198 | { | 3202 | { |
| 3199 | dma_free_coherent(&dev->dev, size, cpu_addr, dma_handle); | 3203 | dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle); |
| 3200 | } | 3204 | } |
| 3201 | 3205 | ||
| 3202 | /** | 3206 | /** |
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index b54b98dc2d4a..1b0f447ce850 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h | |||
| @@ -4,7 +4,12 @@ | |||
| 4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | #include <target/target_core_base.h> | 5 | #include <target/target_core_base.h> |
| 6 | 6 | ||
| 7 | #define TRANSPORT_FLAG_PASSTHROUGH 1 | 7 | #define TRANSPORT_FLAG_PASSTHROUGH 0x1 |
| 8 | /* | ||
| 9 | * ALUA commands, state checks and setup operations are handled by the | ||
| 10 | * backend module. | ||
| 11 | */ | ||
| 12 | #define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2 | ||
| 8 | 13 | ||
| 9 | struct request_queue; | 14 | struct request_queue; |
| 10 | struct scatterlist; | 15 | struct scatterlist; |
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 37c274e61acc..4b784b6e21c0 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
| @@ -299,7 +299,7 @@ struct t10_alua_tg_pt_gp { | |||
| 299 | struct list_head tg_pt_gp_lun_list; | 299 | struct list_head tg_pt_gp_lun_list; |
| 300 | struct se_lun *tg_pt_gp_alua_lun; | 300 | struct se_lun *tg_pt_gp_alua_lun; |
| 301 | struct se_node_acl *tg_pt_gp_alua_nacl; | 301 | struct se_node_acl *tg_pt_gp_alua_nacl; |
| 302 | struct delayed_work tg_pt_gp_transition_work; | 302 | struct work_struct tg_pt_gp_transition_work; |
| 303 | struct completion *tg_pt_gp_transition_complete; | 303 | struct completion *tg_pt_gp_transition_complete; |
| 304 | }; | 304 | }; |
| 305 | 305 | ||
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 9b1462e38b82..a076cf1a3a23 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h | |||
| @@ -730,9 +730,11 @@ __SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect) | |||
| 730 | __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) | 730 | __SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) |
| 731 | #define __NR_pkey_free 290 | 731 | #define __NR_pkey_free 290 |
| 732 | __SYSCALL(__NR_pkey_free, sys_pkey_free) | 732 | __SYSCALL(__NR_pkey_free, sys_pkey_free) |
| 733 | #define __NR_statx 291 | ||
| 734 | __SYSCALL(__NR_statx, sys_statx) | ||
| 733 | 735 | ||
| 734 | #undef __NR_syscalls | 736 | #undef __NR_syscalls |
| 735 | #define __NR_syscalls 291 | 737 | #define __NR_syscalls 292 |
| 736 | 738 | ||
| 737 | /* | 739 | /* |
| 738 | * All syscalls below here should go away really, | 740 | * All syscalls below here should go away really, |
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h index 407cb55df6ac..7fb97863c945 100644 --- a/include/uapi/drm/omap_drm.h +++ b/include/uapi/drm/omap_drm.h | |||
| @@ -33,8 +33,8 @@ extern "C" { | |||
| 33 | #define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */ | 33 | #define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */ |
| 34 | 34 | ||
| 35 | struct drm_omap_param { | 35 | struct drm_omap_param { |
| 36 | uint64_t param; /* in */ | 36 | __u64 param; /* in */ |
| 37 | uint64_t value; /* in (set_param), out (get_param) */ | 37 | __u64 value; /* in (set_param), out (get_param) */ |
| 38 | }; | 38 | }; |
| 39 | 39 | ||
| 40 | #define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */ | 40 | #define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */ |
| @@ -53,18 +53,18 @@ struct drm_omap_param { | |||
| 53 | #define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32) | 53 | #define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32) |
| 54 | 54 | ||
| 55 | union omap_gem_size { | 55 | union omap_gem_size { |
| 56 | uint32_t bytes; /* (for non-tiled formats) */ | 56 | __u32 bytes; /* (for non-tiled formats) */ |
| 57 | struct { | 57 | struct { |
| 58 | uint16_t width; | 58 | __u16 width; |
| 59 | uint16_t height; | 59 | __u16 height; |
| 60 | } tiled; /* (for tiled formats) */ | 60 | } tiled; /* (for tiled formats) */ |
| 61 | }; | 61 | }; |
| 62 | 62 | ||
| 63 | struct drm_omap_gem_new { | 63 | struct drm_omap_gem_new { |
| 64 | union omap_gem_size size; /* in */ | 64 | union omap_gem_size size; /* in */ |
| 65 | uint32_t flags; /* in */ | 65 | __u32 flags; /* in */ |
| 66 | uint32_t handle; /* out */ | 66 | __u32 handle; /* out */ |
| 67 | uint32_t __pad; | 67 | __u32 __pad; |
| 68 | }; | 68 | }; |
| 69 | 69 | ||
| 70 | /* mask of operations: */ | 70 | /* mask of operations: */ |
| @@ -74,33 +74,33 @@ enum omap_gem_op { | |||
| 74 | }; | 74 | }; |
| 75 | 75 | ||
| 76 | struct drm_omap_gem_cpu_prep { | 76 | struct drm_omap_gem_cpu_prep { |
| 77 | uint32_t handle; /* buffer handle (in) */ | 77 | __u32 handle; /* buffer handle (in) */ |
| 78 | uint32_t op; /* mask of omap_gem_op (in) */ | 78 | __u32 op; /* mask of omap_gem_op (in) */ |
| 79 | }; | 79 | }; |
| 80 | 80 | ||
| 81 | struct drm_omap_gem_cpu_fini { | 81 | struct drm_omap_gem_cpu_fini { |
| 82 | uint32_t handle; /* buffer handle (in) */ | 82 | __u32 handle; /* buffer handle (in) */ |
| 83 | uint32_t op; /* mask of omap_gem_op (in) */ | 83 | __u32 op; /* mask of omap_gem_op (in) */ |
| 84 | /* TODO maybe here we pass down info about what regions are touched | 84 | /* TODO maybe here we pass down info about what regions are touched |
| 85 | * by sw so we can be clever about cache ops? For now a placeholder, | 85 | * by sw so we can be clever about cache ops? For now a placeholder, |
| 86 | * set to zero and we just do full buffer flush.. | 86 | * set to zero and we just do full buffer flush.. |
| 87 | */ | 87 | */ |
| 88 | uint32_t nregions; | 88 | __u32 nregions; |
| 89 | uint32_t __pad; | 89 | __u32 __pad; |
| 90 | }; | 90 | }; |
| 91 | 91 | ||
| 92 | struct drm_omap_gem_info { | 92 | struct drm_omap_gem_info { |
| 93 | uint32_t handle; /* buffer handle (in) */ | 93 | __u32 handle; /* buffer handle (in) */ |
| 94 | uint32_t pad; | 94 | __u32 pad; |
| 95 | uint64_t offset; /* mmap offset (out) */ | 95 | __u64 offset; /* mmap offset (out) */ |
| 96 | /* note: in case of tiled buffers, the user virtual size can be | 96 | /* note: in case of tiled buffers, the user virtual size can be |
| 97 | * different from the physical size (ie. how many pages are needed | 97 | * different from the physical size (ie. how many pages are needed |
| 98 | * to back the object) which is returned in DRM_IOCTL_GEM_OPEN.. | 98 | * to back the object) which is returned in DRM_IOCTL_GEM_OPEN.. |
| 99 | * This size here is the one that should be used if you want to | 99 | * This size here is the one that should be used if you want to |
| 100 | * mmap() the buffer: | 100 | * mmap() the buffer: |
| 101 | */ | 101 | */ |
| 102 | uint32_t size; /* virtual size for mmap'ing (out) */ | 102 | __u32 size; /* virtual size for mmap'ing (out) */ |
| 103 | uint32_t __pad; | 103 | __u32 __pad; |
| 104 | }; | 104 | }; |
| 105 | 105 | ||
| 106 | #define DRM_OMAP_GET_PARAM 0x00 | 106 | #define DRM_OMAP_GET_PARAM 0x00 |
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index db4c253f8011..dcfc3a5a9cb1 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h | |||
| @@ -713,33 +713,6 @@ enum btrfs_err_code { | |||
| 713 | BTRFS_ERROR_DEV_ONLY_WRITABLE, | 713 | BTRFS_ERROR_DEV_ONLY_WRITABLE, |
| 714 | BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS | 714 | BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS |
| 715 | }; | 715 | }; |
| 716 | /* An error code to error string mapping for the kernel | ||
| 717 | * error codes | ||
| 718 | */ | ||
| 719 | static inline char *btrfs_err_str(enum btrfs_err_code err_code) | ||
| 720 | { | ||
| 721 | switch (err_code) { | ||
| 722 | case BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET: | ||
| 723 | return "unable to go below two devices on raid1"; | ||
| 724 | case BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET: | ||
| 725 | return "unable to go below four devices on raid10"; | ||
| 726 | case BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET: | ||
| 727 | return "unable to go below two devices on raid5"; | ||
| 728 | case BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET: | ||
| 729 | return "unable to go below three devices on raid6"; | ||
| 730 | case BTRFS_ERROR_DEV_TGT_REPLACE: | ||
| 731 | return "unable to remove the dev_replace target dev"; | ||
| 732 | case BTRFS_ERROR_DEV_MISSING_NOT_FOUND: | ||
| 733 | return "no missing devices found to remove"; | ||
| 734 | case BTRFS_ERROR_DEV_ONLY_WRITABLE: | ||
| 735 | return "unable to remove the only writeable device"; | ||
| 736 | case BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS: | ||
| 737 | return "add/delete/balance/replace/resize operation "\ | ||
| 738 | "in progress"; | ||
| 739 | default: | ||
| 740 | return NULL; | ||
| 741 | } | ||
| 742 | } | ||
| 743 | 716 | ||
| 744 | #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ | 717 | #define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ |
| 745 | struct btrfs_ioctl_vol_args) | 718 | struct btrfs_ioctl_vol_args) |
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index da7cd62bace7..0b3d30837a9f 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #define MLX5_ABI_USER_H | 34 | #define MLX5_ABI_USER_H |
| 35 | 35 | ||
| 36 | #include <linux/types.h> | 36 | #include <linux/types.h> |
| 37 | #include <linux/if_ether.h> /* For ETH_ALEN. */ | ||
| 37 | 38 | ||
| 38 | enum { | 39 | enum { |
| 39 | MLX5_QP_FLAG_SIGNATURE = 1 << 0, | 40 | MLX5_QP_FLAG_SIGNATURE = 1 << 0, |
| @@ -66,7 +67,7 @@ struct mlx5_ib_alloc_ucontext_req { | |||
| 66 | }; | 67 | }; |
| 67 | 68 | ||
| 68 | enum mlx5_lib_caps { | 69 | enum mlx5_lib_caps { |
| 69 | MLX5_LIB_CAP_4K_UAR = (u64)1 << 0, | 70 | MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0, |
| 70 | }; | 71 | }; |
| 71 | 72 | ||
| 72 | struct mlx5_ib_alloc_ucontext_req_v2 { | 73 | struct mlx5_ib_alloc_ucontext_req_v2 { |
diff --git a/include/video/exynos5433_decon.h b/include/video/exynos5433_decon.h index ef8e2a8ad0af..6b083d327e98 100644 --- a/include/video/exynos5433_decon.h +++ b/include/video/exynos5433_decon.h | |||
| @@ -46,6 +46,7 @@ | |||
| 46 | #define DECON_FRAMEFIFO_STATUS 0x0524 | 46 | #define DECON_FRAMEFIFO_STATUS 0x0524 |
| 47 | #define DECON_CMU 0x1404 | 47 | #define DECON_CMU 0x1404 |
| 48 | #define DECON_UPDATE 0x1410 | 48 | #define DECON_UPDATE 0x1410 |
| 49 | #define DECON_CRFMID 0x1414 | ||
| 49 | #define DECON_UPDATE_SCHEME 0x1438 | 50 | #define DECON_UPDATE_SCHEME 0x1438 |
| 50 | #define DECON_VIDCON1 0x2000 | 51 | #define DECON_VIDCON1 0x2000 |
| 51 | #define DECON_VIDCON2 0x2004 | 52 | #define DECON_VIDCON2 0x2004 |
| @@ -126,6 +127,10 @@ | |||
| 126 | 127 | ||
| 127 | /* VIDINTCON0 */ | 128 | /* VIDINTCON0 */ |
| 128 | #define VIDINTCON0_FRAMEDONE (1 << 17) | 129 | #define VIDINTCON0_FRAMEDONE (1 << 17) |
| 130 | #define VIDINTCON0_FRAMESEL_BP (0 << 15) | ||
| 131 | #define VIDINTCON0_FRAMESEL_VS (1 << 15) | ||
| 132 | #define VIDINTCON0_FRAMESEL_AC (2 << 15) | ||
| 133 | #define VIDINTCON0_FRAMESEL_FP (3 << 15) | ||
| 129 | #define VIDINTCON0_INTFRMEN (1 << 12) | 134 | #define VIDINTCON0_INTFRMEN (1 << 12) |
| 130 | #define VIDINTCON0_INTEN (1 << 0) | 135 | #define VIDINTCON0_INTEN (1 << 0) |
| 131 | 136 | ||
| @@ -142,6 +147,13 @@ | |||
| 142 | #define STANDALONE_UPDATE_F (1 << 0) | 147 | #define STANDALONE_UPDATE_F (1 << 0) |
| 143 | 148 | ||
| 144 | /* DECON_VIDCON1 */ | 149 | /* DECON_VIDCON1 */ |
| 150 | #define VIDCON1_LINECNT_MASK (0x0fff << 16) | ||
| 151 | #define VIDCON1_I80_ACTIVE (1 << 15) | ||
| 152 | #define VIDCON1_VSTATUS_MASK (0x3 << 13) | ||
| 153 | #define VIDCON1_VSTATUS_VS (0 << 13) | ||
| 154 | #define VIDCON1_VSTATUS_BP (1 << 13) | ||
| 155 | #define VIDCON1_VSTATUS_AC (2 << 13) | ||
| 156 | #define VIDCON1_VSTATUS_FP (3 << 13) | ||
| 145 | #define VIDCON1_VCLK_MASK (0x3 << 9) | 157 | #define VIDCON1_VCLK_MASK (0x3 << 9) |
| 146 | #define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9) | 158 | #define VIDCON1_VCLK_RUN_VDEN_DISABLE (0x3 << 9) |
| 147 | #define VIDCON1_VCLK_HOLD (0x0 << 9) | 159 | #define VIDCON1_VCLK_HOLD (0x0 << 9) |
diff --git a/kernel/audit.c b/kernel/audit.c index e794544f5e63..2f4964cfde0b 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
| @@ -54,6 +54,10 @@ | |||
| 54 | #include <linux/kthread.h> | 54 | #include <linux/kthread.h> |
| 55 | #include <linux/kernel.h> | 55 | #include <linux/kernel.h> |
| 56 | #include <linux/syscalls.h> | 56 | #include <linux/syscalls.h> |
| 57 | #include <linux/spinlock.h> | ||
| 58 | #include <linux/rcupdate.h> | ||
| 59 | #include <linux/mutex.h> | ||
| 60 | #include <linux/gfp.h> | ||
| 57 | 61 | ||
| 58 | #include <linux/audit.h> | 62 | #include <linux/audit.h> |
| 59 | 63 | ||
| @@ -90,13 +94,34 @@ static u32 audit_default; | |||
| 90 | /* If auditing cannot proceed, audit_failure selects what happens. */ | 94 | /* If auditing cannot proceed, audit_failure selects what happens. */ |
| 91 | static u32 audit_failure = AUDIT_FAIL_PRINTK; | 95 | static u32 audit_failure = AUDIT_FAIL_PRINTK; |
| 92 | 96 | ||
| 93 | /* | 97 | /* private audit network namespace index */ |
| 94 | * If audit records are to be written to the netlink socket, audit_pid | 98 | static unsigned int audit_net_id; |
| 95 | * contains the pid of the auditd process and audit_nlk_portid contains | 99 | |
| 96 | * the portid to use to send netlink messages to that process. | 100 | /** |
| 101 | * struct audit_net - audit private network namespace data | ||
| 102 | * @sk: communication socket | ||
| 103 | */ | ||
| 104 | struct audit_net { | ||
| 105 | struct sock *sk; | ||
| 106 | }; | ||
| 107 | |||
| 108 | /** | ||
| 109 | * struct auditd_connection - kernel/auditd connection state | ||
| 110 | * @pid: auditd PID | ||
| 111 | * @portid: netlink portid | ||
| 112 | * @net: the associated network namespace | ||
| 113 | * @lock: spinlock to protect write access | ||
| 114 | * | ||
| 115 | * Description: | ||
| 116 | * This struct is RCU protected; you must either hold the RCU lock for reading | ||
| 117 | * or the included spinlock for writing. | ||
| 97 | */ | 118 | */ |
| 98 | int audit_pid; | 119 | static struct auditd_connection { |
| 99 | static __u32 audit_nlk_portid; | 120 | int pid; |
| 121 | u32 portid; | ||
| 122 | struct net *net; | ||
| 123 | spinlock_t lock; | ||
| 124 | } auditd_conn; | ||
| 100 | 125 | ||
| 101 | /* If audit_rate_limit is non-zero, limit the rate of sending audit records | 126 | /* If audit_rate_limit is non-zero, limit the rate of sending audit records |
| 102 | * to that number per second. This prevents DoS attacks, but results in | 127 | * to that number per second. This prevents DoS attacks, but results in |
| @@ -123,10 +148,6 @@ u32 audit_sig_sid = 0; | |||
| 123 | */ | 148 | */ |
| 124 | static atomic_t audit_lost = ATOMIC_INIT(0); | 149 | static atomic_t audit_lost = ATOMIC_INIT(0); |
| 125 | 150 | ||
| 126 | /* The netlink socket. */ | ||
| 127 | static struct sock *audit_sock; | ||
| 128 | static unsigned int audit_net_id; | ||
| 129 | |||
| 130 | /* Hash for inode-based rules */ | 151 | /* Hash for inode-based rules */ |
| 131 | struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; | 152 | struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; |
| 132 | 153 | ||
| @@ -139,6 +160,7 @@ static LIST_HEAD(audit_freelist); | |||
| 139 | 160 | ||
| 140 | /* queue msgs to send via kauditd_task */ | 161 | /* queue msgs to send via kauditd_task */ |
| 141 | static struct sk_buff_head audit_queue; | 162 | static struct sk_buff_head audit_queue; |
| 163 | static void kauditd_hold_skb(struct sk_buff *skb); | ||
| 142 | /* queue msgs due to temporary unicast send problems */ | 164 | /* queue msgs due to temporary unicast send problems */ |
| 143 | static struct sk_buff_head audit_retry_queue; | 165 | static struct sk_buff_head audit_retry_queue; |
| 144 | /* queue msgs waiting for new auditd connection */ | 166 | /* queue msgs waiting for new auditd connection */ |
| @@ -192,6 +214,43 @@ struct audit_reply { | |||
| 192 | struct sk_buff *skb; | 214 | struct sk_buff *skb; |
| 193 | }; | 215 | }; |
| 194 | 216 | ||
| 217 | /** | ||
| 218 | * auditd_test_task - Check to see if a given task is an audit daemon | ||
| 219 | * @task: the task to check | ||
| 220 | * | ||
| 221 | * Description: | ||
| 222 | * Return 1 if the task is a registered audit daemon, 0 otherwise. | ||
| 223 | */ | ||
| 224 | int auditd_test_task(const struct task_struct *task) | ||
| 225 | { | ||
| 226 | int rc; | ||
| 227 | |||
| 228 | rcu_read_lock(); | ||
| 229 | rc = (auditd_conn.pid && task->tgid == auditd_conn.pid ? 1 : 0); | ||
| 230 | rcu_read_unlock(); | ||
| 231 | |||
| 232 | return rc; | ||
| 233 | } | ||
| 234 | |||
| 235 | /** | ||
| 236 | * audit_get_sk - Return the audit socket for the given network namespace | ||
| 237 | * @net: the destination network namespace | ||
| 238 | * | ||
| 239 | * Description: | ||
| 240 | * Returns the sock pointer if valid, NULL otherwise. The caller must ensure | ||
| 241 | * that a reference is held for the network namespace while the sock is in use. | ||
| 242 | */ | ||
| 243 | static struct sock *audit_get_sk(const struct net *net) | ||
| 244 | { | ||
| 245 | struct audit_net *aunet; | ||
| 246 | |||
| 247 | if (!net) | ||
| 248 | return NULL; | ||
| 249 | |||
| 250 | aunet = net_generic(net, audit_net_id); | ||
| 251 | return aunet->sk; | ||
| 252 | } | ||
| 253 | |||
| 195 | static void audit_set_portid(struct audit_buffer *ab, __u32 portid) | 254 | static void audit_set_portid(struct audit_buffer *ab, __u32 portid) |
| 196 | { | 255 | { |
| 197 | if (ab) { | 256 | if (ab) { |
| @@ -210,9 +269,7 @@ void audit_panic(const char *message) | |||
| 210 | pr_err("%s\n", message); | 269 | pr_err("%s\n", message); |
| 211 | break; | 270 | break; |
| 212 | case AUDIT_FAIL_PANIC: | 271 | case AUDIT_FAIL_PANIC: |
| 213 | /* test audit_pid since printk is always losey, why bother? */ | 272 | panic("audit: %s\n", message); |
| 214 | if (audit_pid) | ||
| 215 | panic("audit: %s\n", message); | ||
| 216 | break; | 273 | break; |
| 217 | } | 274 | } |
| 218 | } | 275 | } |
| @@ -370,21 +427,87 @@ static int audit_set_failure(u32 state) | |||
| 370 | return audit_do_config_change("audit_failure", &audit_failure, state); | 427 | return audit_do_config_change("audit_failure", &audit_failure, state); |
| 371 | } | 428 | } |
| 372 | 429 | ||
| 373 | /* | 430 | /** |
| 374 | * For one reason or another this nlh isn't getting delivered to the userspace | 431 | * auditd_set - Set/Reset the auditd connection state |
| 375 | * audit daemon, just send it to printk. | 432 | * @pid: auditd PID |
| 433 | * @portid: auditd netlink portid | ||
| 434 | * @net: auditd network namespace pointer | ||
| 435 | * | ||
| 436 | * Description: | ||
| 437 | * This function will obtain and drop network namespace references as | ||
| 438 | * necessary. | ||
| 439 | */ | ||
| 440 | static void auditd_set(int pid, u32 portid, struct net *net) | ||
| 441 | { | ||
| 442 | unsigned long flags; | ||
| 443 | |||
| 444 | spin_lock_irqsave(&auditd_conn.lock, flags); | ||
| 445 | auditd_conn.pid = pid; | ||
| 446 | auditd_conn.portid = portid; | ||
| 447 | if (auditd_conn.net) | ||
| 448 | put_net(auditd_conn.net); | ||
| 449 | if (net) | ||
| 450 | auditd_conn.net = get_net(net); | ||
| 451 | else | ||
| 452 | auditd_conn.net = NULL; | ||
| 453 | spin_unlock_irqrestore(&auditd_conn.lock, flags); | ||
| 454 | } | ||
| 455 | |||
| 456 | /** | ||
| 457 | * auditd_reset - Disconnect the auditd connection | ||
| 458 | * | ||
| 459 | * Description: | ||
| 460 | * Break the auditd/kauditd connection and move all the queued records into the | ||
| 461 | * hold queue in case auditd reconnects. | ||
| 462 | */ | ||
| 463 | static void auditd_reset(void) | ||
| 464 | { | ||
| 465 | struct sk_buff *skb; | ||
| 466 | |||
| 467 | /* if it isn't already broken, break the connection */ | ||
| 468 | rcu_read_lock(); | ||
| 469 | if (auditd_conn.pid) | ||
| 470 | auditd_set(0, 0, NULL); | ||
| 471 | rcu_read_unlock(); | ||
| 472 | |||
| 473 | /* flush all of the main and retry queues to the hold queue */ | ||
| 474 | while ((skb = skb_dequeue(&audit_retry_queue))) | ||
| 475 | kauditd_hold_skb(skb); | ||
| 476 | while ((skb = skb_dequeue(&audit_queue))) | ||
| 477 | kauditd_hold_skb(skb); | ||
| 478 | } | ||
| 479 | |||
| 480 | /** | ||
| 481 | * kauditd_print_skb - Print the audit record to the ring buffer | ||
| 482 | * @skb: audit record | ||
| 483 | * | ||
| 484 | * Whatever the reason, this packet may not make it to the auditd connection | ||
| 485 | * so write it via printk so the information isn't completely lost. | ||
| 376 | */ | 486 | */ |
| 377 | static void kauditd_printk_skb(struct sk_buff *skb) | 487 | static void kauditd_printk_skb(struct sk_buff *skb) |
| 378 | { | 488 | { |
| 379 | struct nlmsghdr *nlh = nlmsg_hdr(skb); | 489 | struct nlmsghdr *nlh = nlmsg_hdr(skb); |
| 380 | char *data = nlmsg_data(nlh); | 490 | char *data = nlmsg_data(nlh); |
| 381 | 491 | ||
| 382 | if (nlh->nlmsg_type != AUDIT_EOE) { | 492 | if (nlh->nlmsg_type != AUDIT_EOE && printk_ratelimit()) |
| 383 | if (printk_ratelimit()) | 493 | pr_notice("type=%d %s\n", nlh->nlmsg_type, data); |
| 384 | pr_notice("type=%d %s\n", nlh->nlmsg_type, data); | 494 | } |
| 385 | else | 495 | |
| 386 | audit_log_lost("printk limit exceeded"); | 496 | /** |
| 387 | } | 497 | * kauditd_rehold_skb - Handle a audit record send failure in the hold queue |
| 498 | * @skb: audit record | ||
| 499 | * | ||
| 500 | * Description: | ||
| 501 | * This should only be used by the kauditd_thread when it fails to flush the | ||
| 502 | * hold queue. | ||
| 503 | */ | ||
| 504 | static void kauditd_rehold_skb(struct sk_buff *skb) | ||
| 505 | { | ||
| 506 | /* put the record back in the queue at the same place */ | ||
| 507 | skb_queue_head(&audit_hold_queue, skb); | ||
| 508 | |||
| 509 | /* fail the auditd connection */ | ||
| 510 | auditd_reset(); | ||
| 388 | } | 511 | } |
| 389 | 512 | ||
| 390 | /** | 513 | /** |
| @@ -421,6 +544,9 @@ static void kauditd_hold_skb(struct sk_buff *skb) | |||
| 421 | /* we have no other options - drop the message */ | 544 | /* we have no other options - drop the message */ |
| 422 | audit_log_lost("kauditd hold queue overflow"); | 545 | audit_log_lost("kauditd hold queue overflow"); |
| 423 | kfree_skb(skb); | 546 | kfree_skb(skb); |
| 547 | |||
| 548 | /* fail the auditd connection */ | ||
| 549 | auditd_reset(); | ||
| 424 | } | 550 | } |
| 425 | 551 | ||
| 426 | /** | 552 | /** |
| @@ -441,51 +567,122 @@ static void kauditd_retry_skb(struct sk_buff *skb) | |||
| 441 | } | 567 | } |
| 442 | 568 | ||
| 443 | /** | 569 | /** |
| 444 | * auditd_reset - Disconnect the auditd connection | 570 | * auditd_send_unicast_skb - Send a record via unicast to auditd |
| 571 | * @skb: audit record | ||
| 445 | * | 572 | * |
| 446 | * Description: | 573 | * Description: |
| 447 | * Break the auditd/kauditd connection and move all the records in the retry | 574 | * Send a skb to the audit daemon, returns positive/zero values on success and |
| 448 | * queue into the hold queue in case auditd reconnects. The audit_cmd_mutex | 575 | * negative values on failure; in all cases the skb will be consumed by this |
| 449 | * must be held when calling this function. | 576 | * function. If the send results in -ECONNREFUSED the connection with auditd |
| 577 | * will be reset. This function may sleep so callers should not hold any locks | ||
| 578 | * where this would cause a problem. | ||
| 450 | */ | 579 | */ |
| 451 | static void auditd_reset(void) | 580 | static int auditd_send_unicast_skb(struct sk_buff *skb) |
| 452 | { | 581 | { |
| 453 | struct sk_buff *skb; | 582 | int rc; |
| 454 | 583 | u32 portid; | |
| 455 | /* break the connection */ | 584 | struct net *net; |
| 456 | if (audit_sock) { | 585 | struct sock *sk; |
| 457 | sock_put(audit_sock); | 586 | |
| 458 | audit_sock = NULL; | 587 | /* NOTE: we can't call netlink_unicast while in the RCU section so |
| 588 | * take a reference to the network namespace and grab local | ||
| 589 | * copies of the namespace, the sock, and the portid; the | ||
| 590 | * namespace and sock aren't going to go away while we hold a | ||
| 591 | * reference and if the portid does become invalid after the RCU | ||
| 592 | * section netlink_unicast() should safely return an error */ | ||
| 593 | |||
| 594 | rcu_read_lock(); | ||
| 595 | if (!auditd_conn.pid) { | ||
| 596 | rcu_read_unlock(); | ||
| 597 | rc = -ECONNREFUSED; | ||
| 598 | goto err; | ||
| 459 | } | 599 | } |
| 460 | audit_pid = 0; | 600 | net = auditd_conn.net; |
| 461 | audit_nlk_portid = 0; | 601 | get_net(net); |
| 602 | sk = audit_get_sk(net); | ||
| 603 | portid = auditd_conn.portid; | ||
| 604 | rcu_read_unlock(); | ||
| 462 | 605 | ||
| 463 | /* flush all of the retry queue to the hold queue */ | 606 | rc = netlink_unicast(sk, skb, portid, 0); |
| 464 | while ((skb = skb_dequeue(&audit_retry_queue))) | 607 | put_net(net); |
| 465 | kauditd_hold_skb(skb); | 608 | if (rc < 0) |
| 609 | goto err; | ||
| 610 | |||
| 611 | return rc; | ||
| 612 | |||
| 613 | err: | ||
| 614 | if (rc == -ECONNREFUSED) | ||
| 615 | auditd_reset(); | ||
| 616 | return rc; | ||
| 466 | } | 617 | } |
| 467 | 618 | ||
| 468 | /** | 619 | /** |
| 469 | * kauditd_send_unicast_skb - Send a record via unicast to auditd | 620 | * kauditd_send_queue - Helper for kauditd_thread to flush skb queues |
| 470 | * @skb: audit record | 621 | * @sk: the sending sock |
| 622 | * @portid: the netlink destination | ||
| 623 | * @queue: the skb queue to process | ||
| 624 | * @retry_limit: limit on number of netlink unicast failures | ||
| 625 | * @skb_hook: per-skb hook for additional processing | ||
| 626 | * @err_hook: hook called if the skb fails the netlink unicast send | ||
| 627 | * | ||
| 628 | * Description: | ||
| 629 | * Run through the given queue and attempt to send the audit records to auditd, | ||
| 630 | * returns zero on success, negative values on failure. It is up to the caller | ||
| 631 | * to ensure that the @sk is valid for the duration of this function. | ||
| 632 | * | ||
| 471 | */ | 633 | */ |
| 472 | static int kauditd_send_unicast_skb(struct sk_buff *skb) | 634 | static int kauditd_send_queue(struct sock *sk, u32 portid, |
| 635 | struct sk_buff_head *queue, | ||
| 636 | unsigned int retry_limit, | ||
| 637 | void (*skb_hook)(struct sk_buff *skb), | ||
| 638 | void (*err_hook)(struct sk_buff *skb)) | ||
| 473 | { | 639 | { |
| 474 | int rc; | 640 | int rc = 0; |
| 641 | struct sk_buff *skb; | ||
| 642 | static unsigned int failed = 0; | ||
| 475 | 643 | ||
| 476 | /* if we know nothing is connected, don't even try the netlink call */ | 644 | /* NOTE: kauditd_thread takes care of all our locking, we just use |
| 477 | if (!audit_pid) | 645 | * the netlink info passed to us (e.g. sk and portid) */ |
| 478 | return -ECONNREFUSED; | 646 | |
| 647 | while ((skb = skb_dequeue(queue))) { | ||
| 648 | /* call the skb_hook for each skb we touch */ | ||
| 649 | if (skb_hook) | ||
| 650 | (*skb_hook)(skb); | ||
| 651 | |||
| 652 | /* can we send to anyone via unicast? */ | ||
| 653 | if (!sk) { | ||
| 654 | if (err_hook) | ||
| 655 | (*err_hook)(skb); | ||
| 656 | continue; | ||
| 657 | } | ||
| 479 | 658 | ||
| 480 | /* get an extra skb reference in case we fail to send */ | 659 | /* grab an extra skb reference in case of error */ |
| 481 | skb_get(skb); | 660 | skb_get(skb); |
| 482 | rc = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0); | 661 | rc = netlink_unicast(sk, skb, portid, 0); |
| 483 | if (rc >= 0) { | 662 | if (rc < 0) { |
| 484 | consume_skb(skb); | 663 | /* fatal failure for our queue flush attempt? */ |
| 485 | rc = 0; | 664 | if (++failed >= retry_limit || |
| 665 | rc == -ECONNREFUSED || rc == -EPERM) { | ||
| 666 | /* yes - error processing for the queue */ | ||
| 667 | sk = NULL; | ||
| 668 | if (err_hook) | ||
| 669 | (*err_hook)(skb); | ||
| 670 | if (!skb_hook) | ||
| 671 | goto out; | ||
| 672 | /* keep processing with the skb_hook */ | ||
| 673 | continue; | ||
| 674 | } else | ||
| 675 | /* no - requeue to preserve ordering */ | ||
| 676 | skb_queue_head(queue, skb); | ||
| 677 | } else { | ||
| 678 | /* it worked - drop the extra reference and continue */ | ||
| 679 | consume_skb(skb); | ||
| 680 | failed = 0; | ||
| 681 | } | ||
| 486 | } | 682 | } |
| 487 | 683 | ||
| 488 | return rc; | 684 | out: |
| 685 | return (rc >= 0 ? 0 : rc); | ||
| 489 | } | 686 | } |
| 490 | 687 | ||
| 491 | /* | 688 | /* |
| @@ -493,16 +690,19 @@ static int kauditd_send_unicast_skb(struct sk_buff *skb) | |||
| 493 | * @skb: audit record | 690 | * @skb: audit record |
| 494 | * | 691 | * |
| 495 | * Description: | 692 | * Description: |
| 496 | * This function doesn't consume an skb as might be expected since it has to | 693 | * Write a multicast message to anyone listening in the initial network |
| 497 | * copy it anyways. | 694 | * namespace. This function doesn't consume an skb as might be expected since |
| 695 | * it has to copy it anyways. | ||
| 498 | */ | 696 | */ |
| 499 | static void kauditd_send_multicast_skb(struct sk_buff *skb) | 697 | static void kauditd_send_multicast_skb(struct sk_buff *skb) |
| 500 | { | 698 | { |
| 501 | struct sk_buff *copy; | 699 | struct sk_buff *copy; |
| 502 | struct audit_net *aunet = net_generic(&init_net, audit_net_id); | 700 | struct sock *sock = audit_get_sk(&init_net); |
| 503 | struct sock *sock = aunet->nlsk; | ||
| 504 | struct nlmsghdr *nlh; | 701 | struct nlmsghdr *nlh; |
| 505 | 702 | ||
| 703 | /* NOTE: we are not taking an additional reference for init_net since | ||
| 704 | * we don't have to worry about it going away */ | ||
| 705 | |||
| 506 | if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG)) | 706 | if (!netlink_has_listeners(sock, AUDIT_NLGRP_READLOG)) |
| 507 | return; | 707 | return; |
| 508 | 708 | ||
| @@ -526,149 +726,75 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb) | |||
| 526 | } | 726 | } |
| 527 | 727 | ||
| 528 | /** | 728 | /** |
| 529 | * kauditd_wake_condition - Return true when it is time to wake kauditd_thread | 729 | * kauditd_thread - Worker thread to send audit records to userspace |
| 530 | * | 730 | * @dummy: unused |
| 531 | * Description: | ||
| 532 | * This function is for use by the wait_event_freezable() call in | ||
| 533 | * kauditd_thread(). | ||
| 534 | */ | 731 | */ |
| 535 | static int kauditd_wake_condition(void) | ||
| 536 | { | ||
| 537 | static int pid_last = 0; | ||
| 538 | int rc; | ||
| 539 | int pid = audit_pid; | ||
| 540 | |||
| 541 | /* wake on new messages or a change in the connected auditd */ | ||
| 542 | rc = skb_queue_len(&audit_queue) || (pid && pid != pid_last); | ||
| 543 | if (rc) | ||
| 544 | pid_last = pid; | ||
| 545 | |||
| 546 | return rc; | ||
| 547 | } | ||
| 548 | |||
| 549 | static int kauditd_thread(void *dummy) | 732 | static int kauditd_thread(void *dummy) |
| 550 | { | 733 | { |
| 551 | int rc; | 734 | int rc; |
| 552 | int auditd = 0; | 735 | u32 portid = 0; |
| 553 | int reschedule = 0; | 736 | struct net *net = NULL; |
| 554 | struct sk_buff *skb; | 737 | struct sock *sk = NULL; |
| 555 | struct nlmsghdr *nlh; | ||
| 556 | 738 | ||
| 557 | #define UNICAST_RETRIES 5 | 739 | #define UNICAST_RETRIES 5 |
| 558 | #define AUDITD_BAD(x,y) \ | ||
| 559 | ((x) == -ECONNREFUSED || (x) == -EPERM || ++(y) >= UNICAST_RETRIES) | ||
| 560 | |||
| 561 | /* NOTE: we do invalidate the auditd connection flag on any sending | ||
| 562 | * errors, but we only "restore" the connection flag at specific places | ||
| 563 | * in the loop in order to help ensure proper ordering of audit | ||
| 564 | * records */ | ||
| 565 | 740 | ||
| 566 | set_freezable(); | 741 | set_freezable(); |
| 567 | while (!kthread_should_stop()) { | 742 | while (!kthread_should_stop()) { |
| 568 | /* NOTE: possible area for future improvement is to look at | 743 | /* NOTE: see the lock comments in auditd_send_unicast_skb() */ |
| 569 | * the hold and retry queues, since only this thread | 744 | rcu_read_lock(); |
| 570 | * has access to these queues we might be able to do | 745 | if (!auditd_conn.pid) { |
| 571 | * our own queuing and skip some/all of the locking */ | 746 | rcu_read_unlock(); |
| 572 | 747 | goto main_queue; | |
| 573 | /* NOTE: it might be a fun experiment to split the hold and | 748 | } |
| 574 | * retry queue handling to another thread, but the | 749 | net = auditd_conn.net; |
| 575 | * synchronization issues and other overhead might kill | 750 | get_net(net); |
| 576 | * any performance gains */ | 751 | sk = audit_get_sk(net); |
| 752 | portid = auditd_conn.portid; | ||
| 753 | rcu_read_unlock(); | ||
| 577 | 754 | ||
| 578 | /* attempt to flush the hold queue */ | 755 | /* attempt to flush the hold queue */ |
| 579 | while (auditd && (skb = skb_dequeue(&audit_hold_queue))) { | 756 | rc = kauditd_send_queue(sk, portid, |
| 580 | rc = kauditd_send_unicast_skb(skb); | 757 | &audit_hold_queue, UNICAST_RETRIES, |
| 581 | if (rc) { | 758 | NULL, kauditd_rehold_skb); |
| 582 | /* requeue to the same spot */ | 759 | if (rc < 0) { |
| 583 | skb_queue_head(&audit_hold_queue, skb); | 760 | sk = NULL; |
| 584 | 761 | goto main_queue; | |
| 585 | auditd = 0; | ||
| 586 | if (AUDITD_BAD(rc, reschedule)) { | ||
| 587 | mutex_lock(&audit_cmd_mutex); | ||
| 588 | auditd_reset(); | ||
| 589 | mutex_unlock(&audit_cmd_mutex); | ||
| 590 | reschedule = 0; | ||
| 591 | } | ||
| 592 | } else | ||
| 593 | /* we were able to send successfully */ | ||
| 594 | reschedule = 0; | ||
| 595 | } | 762 | } |
| 596 | 763 | ||
| 597 | /* attempt to flush the retry queue */ | 764 | /* attempt to flush the retry queue */ |
| 598 | while (auditd && (skb = skb_dequeue(&audit_retry_queue))) { | 765 | rc = kauditd_send_queue(sk, portid, |
| 599 | rc = kauditd_send_unicast_skb(skb); | 766 | &audit_retry_queue, UNICAST_RETRIES, |
| 600 | if (rc) { | 767 | NULL, kauditd_hold_skb); |
| 601 | auditd = 0; | 768 | if (rc < 0) { |
| 602 | if (AUDITD_BAD(rc, reschedule)) { | 769 | sk = NULL; |
| 603 | kauditd_hold_skb(skb); | 770 | goto main_queue; |
| 604 | mutex_lock(&audit_cmd_mutex); | ||
| 605 | auditd_reset(); | ||
| 606 | mutex_unlock(&audit_cmd_mutex); | ||
| 607 | reschedule = 0; | ||
| 608 | } else | ||
| 609 | /* temporary problem (we hope), queue | ||
| 610 | * to the same spot and retry */ | ||
| 611 | skb_queue_head(&audit_retry_queue, skb); | ||
| 612 | } else | ||
| 613 | /* we were able to send successfully */ | ||
| 614 | reschedule = 0; | ||
| 615 | } | 771 | } |
| 616 | 772 | ||
| 617 | /* standard queue processing, try to be as quick as possible */ | 773 | main_queue: |
| 618 | quick_loop: | 774 | /* process the main queue - do the multicast send and attempt |
| 619 | skb = skb_dequeue(&audit_queue); | 775 | * unicast, dump failed record sends to the retry queue; if |
| 620 | if (skb) { | 776 | * sk == NULL due to previous failures we will just do the |
| 621 | /* setup the netlink header, see the comments in | 777 | * multicast send and move the record to the retry queue */ |
| 622 | * kauditd_send_multicast_skb() for length quirks */ | 778 | kauditd_send_queue(sk, portid, &audit_queue, 1, |
| 623 | nlh = nlmsg_hdr(skb); | 779 | kauditd_send_multicast_skb, |
| 624 | nlh->nlmsg_len = skb->len - NLMSG_HDRLEN; | 780 | kauditd_retry_skb); |
| 625 | 781 | ||
| 626 | /* attempt to send to any multicast listeners */ | 782 | /* drop our netns reference, no auditd sends past this line */ |
| 627 | kauditd_send_multicast_skb(skb); | 783 | if (net) { |
| 628 | 784 | put_net(net); | |
| 629 | /* attempt to send to auditd, queue on failure */ | 785 | net = NULL; |
| 630 | if (auditd) { | ||
| 631 | rc = kauditd_send_unicast_skb(skb); | ||
| 632 | if (rc) { | ||
| 633 | auditd = 0; | ||
| 634 | if (AUDITD_BAD(rc, reschedule)) { | ||
| 635 | mutex_lock(&audit_cmd_mutex); | ||
| 636 | auditd_reset(); | ||
| 637 | mutex_unlock(&audit_cmd_mutex); | ||
| 638 | reschedule = 0; | ||
| 639 | } | ||
| 640 | |||
| 641 | /* move to the retry queue */ | ||
| 642 | kauditd_retry_skb(skb); | ||
| 643 | } else | ||
| 644 | /* everything is working so go fast! */ | ||
| 645 | goto quick_loop; | ||
| 646 | } else if (reschedule) | ||
| 647 | /* we are currently having problems, move to | ||
| 648 | * the retry queue */ | ||
| 649 | kauditd_retry_skb(skb); | ||
| 650 | else | ||
| 651 | /* dump the message via printk and hold it */ | ||
| 652 | kauditd_hold_skb(skb); | ||
| 653 | } else { | ||
| 654 | /* we have flushed the backlog so wake everyone */ | ||
| 655 | wake_up(&audit_backlog_wait); | ||
| 656 | |||
| 657 | /* if everything is okay with auditd (if present), go | ||
| 658 | * to sleep until there is something new in the queue | ||
| 659 | * or we have a change in the connected auditd; | ||
| 660 | * otherwise simply reschedule to give things a chance | ||
| 661 | * to recover */ | ||
| 662 | if (reschedule) { | ||
| 663 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 664 | schedule(); | ||
| 665 | } else | ||
| 666 | wait_event_freezable(kauditd_wait, | ||
| 667 | kauditd_wake_condition()); | ||
| 668 | |||
| 669 | /* update the auditd connection status */ | ||
| 670 | auditd = (audit_pid ? 1 : 0); | ||
| 671 | } | 786 | } |
| 787 | sk = NULL; | ||
| 788 | |||
| 789 | /* we have processed all the queues so wake everyone */ | ||
| 790 | wake_up(&audit_backlog_wait); | ||
| 791 | |||
| 792 | /* NOTE: we want to wake up if there is anything on the queue, | ||
| 793 | * regardless of if an auditd is connected, as we need to | ||
| 794 | * do the multicast send and rotate records from the | ||
| 795 | * main queue to the retry/hold queues */ | ||
| 796 | wait_event_freezable(kauditd_wait, | ||
| 797 | (skb_queue_len(&audit_queue) ? 1 : 0)); | ||
| 672 | } | 798 | } |
| 673 | 799 | ||
| 674 | return 0; | 800 | return 0; |
| @@ -678,17 +804,16 @@ int audit_send_list(void *_dest) | |||
| 678 | { | 804 | { |
| 679 | struct audit_netlink_list *dest = _dest; | 805 | struct audit_netlink_list *dest = _dest; |
| 680 | struct sk_buff *skb; | 806 | struct sk_buff *skb; |
| 681 | struct net *net = dest->net; | 807 | struct sock *sk = audit_get_sk(dest->net); |
| 682 | struct audit_net *aunet = net_generic(net, audit_net_id); | ||
| 683 | 808 | ||
| 684 | /* wait for parent to finish and send an ACK */ | 809 | /* wait for parent to finish and send an ACK */ |
| 685 | mutex_lock(&audit_cmd_mutex); | 810 | mutex_lock(&audit_cmd_mutex); |
| 686 | mutex_unlock(&audit_cmd_mutex); | 811 | mutex_unlock(&audit_cmd_mutex); |
| 687 | 812 | ||
| 688 | while ((skb = __skb_dequeue(&dest->q)) != NULL) | 813 | while ((skb = __skb_dequeue(&dest->q)) != NULL) |
| 689 | netlink_unicast(aunet->nlsk, skb, dest->portid, 0); | 814 | netlink_unicast(sk, skb, dest->portid, 0); |
| 690 | 815 | ||
| 691 | put_net(net); | 816 | put_net(dest->net); |
| 692 | kfree(dest); | 817 | kfree(dest); |
| 693 | 818 | ||
| 694 | return 0; | 819 | return 0; |
| @@ -722,16 +847,15 @@ out_kfree_skb: | |||
| 722 | static int audit_send_reply_thread(void *arg) | 847 | static int audit_send_reply_thread(void *arg) |
| 723 | { | 848 | { |
| 724 | struct audit_reply *reply = (struct audit_reply *)arg; | 849 | struct audit_reply *reply = (struct audit_reply *)arg; |
| 725 | struct net *net = reply->net; | 850 | struct sock *sk = audit_get_sk(reply->net); |
| 726 | struct audit_net *aunet = net_generic(net, audit_net_id); | ||
| 727 | 851 | ||
| 728 | mutex_lock(&audit_cmd_mutex); | 852 | mutex_lock(&audit_cmd_mutex); |
| 729 | mutex_unlock(&audit_cmd_mutex); | 853 | mutex_unlock(&audit_cmd_mutex); |
| 730 | 854 | ||
| 731 | /* Ignore failure. It'll only happen if the sender goes away, | 855 | /* Ignore failure. It'll only happen if the sender goes away, |
| 732 | because our timeout is set to infinite. */ | 856 | because our timeout is set to infinite. */ |
| 733 | netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); | 857 | netlink_unicast(sk, reply->skb, reply->portid, 0); |
| 734 | put_net(net); | 858 | put_net(reply->net); |
| 735 | kfree(reply); | 859 | kfree(reply); |
| 736 | return 0; | 860 | return 0; |
| 737 | } | 861 | } |
| @@ -949,12 +1073,12 @@ static int audit_set_feature(struct sk_buff *skb) | |||
| 949 | 1073 | ||
| 950 | static int audit_replace(pid_t pid) | 1074 | static int audit_replace(pid_t pid) |
| 951 | { | 1075 | { |
| 952 | struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, | 1076 | struct sk_buff *skb; |
| 953 | &pid, sizeof(pid)); | ||
| 954 | 1077 | ||
| 1078 | skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0, &pid, sizeof(pid)); | ||
| 955 | if (!skb) | 1079 | if (!skb) |
| 956 | return -ENOMEM; | 1080 | return -ENOMEM; |
| 957 | return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0); | 1081 | return auditd_send_unicast_skb(skb); |
| 958 | } | 1082 | } |
| 959 | 1083 | ||
| 960 | static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | 1084 | static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) |
| @@ -981,7 +1105,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 981 | memset(&s, 0, sizeof(s)); | 1105 | memset(&s, 0, sizeof(s)); |
| 982 | s.enabled = audit_enabled; | 1106 | s.enabled = audit_enabled; |
| 983 | s.failure = audit_failure; | 1107 | s.failure = audit_failure; |
| 984 | s.pid = audit_pid; | 1108 | rcu_read_lock(); |
| 1109 | s.pid = auditd_conn.pid; | ||
| 1110 | rcu_read_unlock(); | ||
| 985 | s.rate_limit = audit_rate_limit; | 1111 | s.rate_limit = audit_rate_limit; |
| 986 | s.backlog_limit = audit_backlog_limit; | 1112 | s.backlog_limit = audit_backlog_limit; |
| 987 | s.lost = atomic_read(&audit_lost); | 1113 | s.lost = atomic_read(&audit_lost); |
| @@ -1014,30 +1140,44 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 1014 | * from the initial pid namespace, but something | 1140 | * from the initial pid namespace, but something |
| 1015 | * to keep in mind if this changes */ | 1141 | * to keep in mind if this changes */ |
| 1016 | int new_pid = s.pid; | 1142 | int new_pid = s.pid; |
| 1143 | pid_t auditd_pid; | ||
| 1017 | pid_t requesting_pid = task_tgid_vnr(current); | 1144 | pid_t requesting_pid = task_tgid_vnr(current); |
| 1018 | 1145 | ||
| 1019 | if ((!new_pid) && (requesting_pid != audit_pid)) { | 1146 | /* test the auditd connection */ |
| 1020 | audit_log_config_change("audit_pid", new_pid, audit_pid, 0); | 1147 | audit_replace(requesting_pid); |
| 1148 | |||
| 1149 | rcu_read_lock(); | ||
| 1150 | auditd_pid = auditd_conn.pid; | ||
| 1151 | /* only the current auditd can unregister itself */ | ||
| 1152 | if ((!new_pid) && (requesting_pid != auditd_pid)) { | ||
| 1153 | rcu_read_unlock(); | ||
| 1154 | audit_log_config_change("audit_pid", new_pid, | ||
| 1155 | auditd_pid, 0); | ||
| 1021 | return -EACCES; | 1156 | return -EACCES; |
| 1022 | } | 1157 | } |
| 1023 | if (audit_pid && new_pid && | 1158 | /* replacing a healthy auditd is not allowed */ |
| 1024 | audit_replace(requesting_pid) != -ECONNREFUSED) { | 1159 | if (auditd_pid && new_pid) { |
| 1025 | audit_log_config_change("audit_pid", new_pid, audit_pid, 0); | 1160 | rcu_read_unlock(); |
| 1161 | audit_log_config_change("audit_pid", new_pid, | ||
| 1162 | auditd_pid, 0); | ||
| 1026 | return -EEXIST; | 1163 | return -EEXIST; |
| 1027 | } | 1164 | } |
| 1165 | rcu_read_unlock(); | ||
| 1166 | |||
| 1028 | if (audit_enabled != AUDIT_OFF) | 1167 | if (audit_enabled != AUDIT_OFF) |
| 1029 | audit_log_config_change("audit_pid", new_pid, audit_pid, 1); | 1168 | audit_log_config_change("audit_pid", new_pid, |
| 1169 | auditd_pid, 1); | ||
| 1170 | |||
| 1030 | if (new_pid) { | 1171 | if (new_pid) { |
| 1031 | if (audit_sock) | 1172 | /* register a new auditd connection */ |
| 1032 | sock_put(audit_sock); | 1173 | auditd_set(new_pid, |
| 1033 | audit_pid = new_pid; | 1174 | NETLINK_CB(skb).portid, |
| 1034 | audit_nlk_portid = NETLINK_CB(skb).portid; | 1175 | sock_net(NETLINK_CB(skb).sk)); |
| 1035 | sock_hold(skb->sk); | 1176 | /* try to process any backlog */ |
| 1036 | audit_sock = skb->sk; | 1177 | wake_up_interruptible(&kauditd_wait); |
| 1037 | } else { | 1178 | } else |
| 1179 | /* unregister the auditd connection */ | ||
| 1038 | auditd_reset(); | 1180 | auditd_reset(); |
| 1039 | } | ||
| 1040 | wake_up_interruptible(&kauditd_wait); | ||
| 1041 | } | 1181 | } |
| 1042 | if (s.mask & AUDIT_STATUS_RATE_LIMIT) { | 1182 | if (s.mask & AUDIT_STATUS_RATE_LIMIT) { |
| 1043 | err = audit_set_rate_limit(s.rate_limit); | 1183 | err = audit_set_rate_limit(s.rate_limit); |
| @@ -1090,7 +1230,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 1090 | if (err) | 1230 | if (err) |
| 1091 | break; | 1231 | break; |
| 1092 | } | 1232 | } |
| 1093 | mutex_unlock(&audit_cmd_mutex); | ||
| 1094 | audit_log_common_recv_msg(&ab, msg_type); | 1233 | audit_log_common_recv_msg(&ab, msg_type); |
| 1095 | if (msg_type != AUDIT_USER_TTY) | 1234 | if (msg_type != AUDIT_USER_TTY) |
| 1096 | audit_log_format(ab, " msg='%.*s'", | 1235 | audit_log_format(ab, " msg='%.*s'", |
| @@ -1108,7 +1247,6 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
| 1108 | } | 1247 | } |
| 1109 | audit_set_portid(ab, NETLINK_CB(skb).portid); | 1248 | audit_set_portid(ab, NETLINK_CB(skb).portid); |
| 1110 | audit_log_end(ab); | 1249 | audit_log_end(ab); |
| 1111 | mutex_lock(&audit_cmd_mutex); | ||
| 1112 | } | 1250 | } |
| 1113 | break; | 1251 | break; |
| 1114 | case AUDIT_ADD_RULE: | 1252 | case AUDIT_ADD_RULE: |
| @@ -1298,26 +1436,26 @@ static int __net_init audit_net_init(struct net *net) | |||
| 1298 | 1436 | ||
| 1299 | struct audit_net *aunet = net_generic(net, audit_net_id); | 1437 | struct audit_net *aunet = net_generic(net, audit_net_id); |
| 1300 | 1438 | ||
| 1301 | aunet->nlsk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg); | 1439 | aunet->sk = netlink_kernel_create(net, NETLINK_AUDIT, &cfg); |
| 1302 | if (aunet->nlsk == NULL) { | 1440 | if (aunet->sk == NULL) { |
| 1303 | audit_panic("cannot initialize netlink socket in namespace"); | 1441 | audit_panic("cannot initialize netlink socket in namespace"); |
| 1304 | return -ENOMEM; | 1442 | return -ENOMEM; |
| 1305 | } | 1443 | } |
| 1306 | aunet->nlsk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; | 1444 | aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT; |
| 1445 | |||
| 1307 | return 0; | 1446 | return 0; |
| 1308 | } | 1447 | } |
| 1309 | 1448 | ||
| 1310 | static void __net_exit audit_net_exit(struct net *net) | 1449 | static void __net_exit audit_net_exit(struct net *net) |
| 1311 | { | 1450 | { |
| 1312 | struct audit_net *aunet = net_generic(net, audit_net_id); | 1451 | struct audit_net *aunet = net_generic(net, audit_net_id); |
| 1313 | struct sock *sock = aunet->nlsk; | 1452 | |
| 1314 | mutex_lock(&audit_cmd_mutex); | 1453 | rcu_read_lock(); |
| 1315 | if (sock == audit_sock) | 1454 | if (net == auditd_conn.net) |
| 1316 | auditd_reset(); | 1455 | auditd_reset(); |
| 1317 | mutex_unlock(&audit_cmd_mutex); | 1456 | rcu_read_unlock(); |
| 1318 | 1457 | ||
| 1319 | netlink_kernel_release(sock); | 1458 | netlink_kernel_release(aunet->sk); |
| 1320 | aunet->nlsk = NULL; | ||
| 1321 | } | 1459 | } |
| 1322 | 1460 | ||
| 1323 | static struct pernet_operations audit_net_ops __net_initdata = { | 1461 | static struct pernet_operations audit_net_ops __net_initdata = { |
| @@ -1335,20 +1473,24 @@ static int __init audit_init(void) | |||
| 1335 | if (audit_initialized == AUDIT_DISABLED) | 1473 | if (audit_initialized == AUDIT_DISABLED) |
| 1336 | return 0; | 1474 | return 0; |
| 1337 | 1475 | ||
| 1338 | pr_info("initializing netlink subsys (%s)\n", | 1476 | memset(&auditd_conn, 0, sizeof(auditd_conn)); |
| 1339 | audit_default ? "enabled" : "disabled"); | 1477 | spin_lock_init(&auditd_conn.lock); |
| 1340 | register_pernet_subsys(&audit_net_ops); | ||
| 1341 | 1478 | ||
| 1342 | skb_queue_head_init(&audit_queue); | 1479 | skb_queue_head_init(&audit_queue); |
| 1343 | skb_queue_head_init(&audit_retry_queue); | 1480 | skb_queue_head_init(&audit_retry_queue); |
| 1344 | skb_queue_head_init(&audit_hold_queue); | 1481 | skb_queue_head_init(&audit_hold_queue); |
| 1345 | audit_initialized = AUDIT_INITIALIZED; | ||
| 1346 | audit_enabled = audit_default; | ||
| 1347 | audit_ever_enabled |= !!audit_default; | ||
| 1348 | 1482 | ||
| 1349 | for (i = 0; i < AUDIT_INODE_BUCKETS; i++) | 1483 | for (i = 0; i < AUDIT_INODE_BUCKETS; i++) |
| 1350 | INIT_LIST_HEAD(&audit_inode_hash[i]); | 1484 | INIT_LIST_HEAD(&audit_inode_hash[i]); |
| 1351 | 1485 | ||
| 1486 | pr_info("initializing netlink subsys (%s)\n", | ||
| 1487 | audit_default ? "enabled" : "disabled"); | ||
| 1488 | register_pernet_subsys(&audit_net_ops); | ||
| 1489 | |||
| 1490 | audit_initialized = AUDIT_INITIALIZED; | ||
| 1491 | audit_enabled = audit_default; | ||
| 1492 | audit_ever_enabled |= !!audit_default; | ||
| 1493 | |||
| 1352 | kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); | 1494 | kauditd_task = kthread_run(kauditd_thread, NULL, "kauditd"); |
| 1353 | if (IS_ERR(kauditd_task)) { | 1495 | if (IS_ERR(kauditd_task)) { |
| 1354 | int err = PTR_ERR(kauditd_task); | 1496 | int err = PTR_ERR(kauditd_task); |
| @@ -1519,20 +1661,16 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | |||
| 1519 | if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE))) | 1661 | if (unlikely(!audit_filter(type, AUDIT_FILTER_TYPE))) |
| 1520 | return NULL; | 1662 | return NULL; |
| 1521 | 1663 | ||
| 1522 | /* don't ever fail/sleep on these two conditions: | 1664 | /* NOTE: don't ever fail/sleep on these two conditions: |
| 1523 | * 1. auditd generated record - since we need auditd to drain the | 1665 | * 1. auditd generated record - since we need auditd to drain the |
| 1524 | * queue; also, when we are checking for auditd, compare PIDs using | 1666 | * queue; also, when we are checking for auditd, compare PIDs using |
| 1525 | * task_tgid_vnr() since auditd_pid is set in audit_receive_msg() | 1667 | * task_tgid_vnr() since auditd_pid is set in audit_receive_msg() |
| 1526 | * using a PID anchored in the caller's namespace | 1668 | * using a PID anchored in the caller's namespace |
| 1527 | * 2. audit command message - record types 1000 through 1099 inclusive | 1669 | * 2. generator holding the audit_cmd_mutex - we don't want to block |
| 1528 | * are command messages/records used to manage the kernel subsystem | 1670 | * while holding the mutex */ |
| 1529 | * and the audit userspace, blocking on these messages could cause | 1671 | if (!(auditd_test_task(current) || |
| 1530 | * problems under load so don't do it (note: not all of these | 1672 | (current == __mutex_owner(&audit_cmd_mutex)))) { |
| 1531 | * command types are valid as record types, but it is quicker to | 1673 | long stime = audit_backlog_wait_time; |
| 1532 | * just check two ints than a series of ints in a if/switch stmt) */ | ||
| 1533 | if (!((audit_pid && audit_pid == task_tgid_vnr(current)) || | ||
| 1534 | (type >= 1000 && type <= 1099))) { | ||
| 1535 | long sleep_time = audit_backlog_wait_time; | ||
| 1536 | 1674 | ||
| 1537 | while (audit_backlog_limit && | 1675 | while (audit_backlog_limit && |
| 1538 | (skb_queue_len(&audit_queue) > audit_backlog_limit)) { | 1676 | (skb_queue_len(&audit_queue) > audit_backlog_limit)) { |
| @@ -1541,14 +1679,13 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | |||
| 1541 | 1679 | ||
| 1542 | /* sleep if we are allowed and we haven't exhausted our | 1680 | /* sleep if we are allowed and we haven't exhausted our |
| 1543 | * backlog wait limit */ | 1681 | * backlog wait limit */ |
| 1544 | if ((gfp_mask & __GFP_DIRECT_RECLAIM) && | 1682 | if (gfpflags_allow_blocking(gfp_mask) && (stime > 0)) { |
| 1545 | (sleep_time > 0)) { | ||
| 1546 | DECLARE_WAITQUEUE(wait, current); | 1683 | DECLARE_WAITQUEUE(wait, current); |
| 1547 | 1684 | ||
| 1548 | add_wait_queue_exclusive(&audit_backlog_wait, | 1685 | add_wait_queue_exclusive(&audit_backlog_wait, |
| 1549 | &wait); | 1686 | &wait); |
| 1550 | set_current_state(TASK_UNINTERRUPTIBLE); | 1687 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 1551 | sleep_time = schedule_timeout(sleep_time); | 1688 | stime = schedule_timeout(stime); |
| 1552 | remove_wait_queue(&audit_backlog_wait, &wait); | 1689 | remove_wait_queue(&audit_backlog_wait, &wait); |
| 1553 | } else { | 1690 | } else { |
| 1554 | if (audit_rate_check() && printk_ratelimit()) | 1691 | if (audit_rate_check() && printk_ratelimit()) |
| @@ -2127,15 +2264,27 @@ out: | |||
| 2127 | */ | 2264 | */ |
| 2128 | void audit_log_end(struct audit_buffer *ab) | 2265 | void audit_log_end(struct audit_buffer *ab) |
| 2129 | { | 2266 | { |
| 2267 | struct sk_buff *skb; | ||
| 2268 | struct nlmsghdr *nlh; | ||
| 2269 | |||
| 2130 | if (!ab) | 2270 | if (!ab) |
| 2131 | return; | 2271 | return; |
| 2132 | if (!audit_rate_check()) { | 2272 | |
| 2133 | audit_log_lost("rate limit exceeded"); | 2273 | if (audit_rate_check()) { |
| 2134 | } else { | 2274 | skb = ab->skb; |
| 2135 | skb_queue_tail(&audit_queue, ab->skb); | ||
| 2136 | wake_up_interruptible(&kauditd_wait); | ||
| 2137 | ab->skb = NULL; | 2275 | ab->skb = NULL; |
| 2138 | } | 2276 | |
| 2277 | /* setup the netlink header, see the comments in | ||
| 2278 | * kauditd_send_multicast_skb() for length quirks */ | ||
| 2279 | nlh = nlmsg_hdr(skb); | ||
| 2280 | nlh->nlmsg_len = skb->len - NLMSG_HDRLEN; | ||
| 2281 | |||
| 2282 | /* queue the netlink packet and poke the kauditd thread */ | ||
| 2283 | skb_queue_tail(&audit_queue, skb); | ||
| 2284 | wake_up_interruptible(&kauditd_wait); | ||
| 2285 | } else | ||
| 2286 | audit_log_lost("rate limit exceeded"); | ||
| 2287 | |||
| 2139 | audit_buffer_free(ab); | 2288 | audit_buffer_free(ab); |
| 2140 | } | 2289 | } |
| 2141 | 2290 | ||
diff --git a/kernel/audit.h b/kernel/audit.h index ca579880303a..0f1cf6d1878a 100644 --- a/kernel/audit.h +++ b/kernel/audit.h | |||
| @@ -218,7 +218,7 @@ extern void audit_log_name(struct audit_context *context, | |||
| 218 | struct audit_names *n, const struct path *path, | 218 | struct audit_names *n, const struct path *path, |
| 219 | int record_num, int *call_panic); | 219 | int record_num, int *call_panic); |
| 220 | 220 | ||
| 221 | extern int audit_pid; | 221 | extern int auditd_test_task(const struct task_struct *task); |
| 222 | 222 | ||
| 223 | #define AUDIT_INODE_BUCKETS 32 | 223 | #define AUDIT_INODE_BUCKETS 32 |
| 224 | extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; | 224 | extern struct list_head audit_inode_hash[AUDIT_INODE_BUCKETS]; |
| @@ -250,10 +250,6 @@ struct audit_netlink_list { | |||
| 250 | 250 | ||
| 251 | int audit_send_list(void *); | 251 | int audit_send_list(void *); |
| 252 | 252 | ||
| 253 | struct audit_net { | ||
| 254 | struct sock *nlsk; | ||
| 255 | }; | ||
| 256 | |||
| 257 | extern int selinux_audit_rule_update(void); | 253 | extern int selinux_audit_rule_update(void); |
| 258 | 254 | ||
| 259 | extern struct mutex audit_filter_mutex; | 255 | extern struct mutex audit_filter_mutex; |
| @@ -340,8 +336,7 @@ extern int audit_filter(int msgtype, unsigned int listtype); | |||
| 340 | extern int __audit_signal_info(int sig, struct task_struct *t); | 336 | extern int __audit_signal_info(int sig, struct task_struct *t); |
| 341 | static inline int audit_signal_info(int sig, struct task_struct *t) | 337 | static inline int audit_signal_info(int sig, struct task_struct *t) |
| 342 | { | 338 | { |
| 343 | if (unlikely((audit_pid && t->tgid == audit_pid) || | 339 | if (auditd_test_task(t) || (audit_signals && !audit_dummy_context())) |
| 344 | (audit_signals && !audit_dummy_context()))) | ||
| 345 | return __audit_signal_info(sig, t); | 340 | return __audit_signal_info(sig, t); |
| 346 | return 0; | 341 | return 0; |
| 347 | } | 342 | } |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index d6a8de5f8fa3..e59ffc7fc522 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -762,7 +762,7 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk, | |||
| 762 | struct audit_entry *e; | 762 | struct audit_entry *e; |
| 763 | enum audit_state state; | 763 | enum audit_state state; |
| 764 | 764 | ||
| 765 | if (audit_pid && tsk->tgid == audit_pid) | 765 | if (auditd_test_task(tsk)) |
| 766 | return AUDIT_DISABLED; | 766 | return AUDIT_DISABLED; |
| 767 | 767 | ||
| 768 | rcu_read_lock(); | 768 | rcu_read_lock(); |
| @@ -816,7 +816,7 @@ void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx) | |||
| 816 | { | 816 | { |
| 817 | struct audit_names *n; | 817 | struct audit_names *n; |
| 818 | 818 | ||
| 819 | if (audit_pid && tsk->tgid == audit_pid) | 819 | if (auditd_test_task(tsk)) |
| 820 | return; | 820 | return; |
| 821 | 821 | ||
| 822 | rcu_read_lock(); | 822 | rcu_read_lock(); |
| @@ -2256,7 +2256,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
| 2256 | struct audit_context *ctx = tsk->audit_context; | 2256 | struct audit_context *ctx = tsk->audit_context; |
| 2257 | kuid_t uid = current_uid(), t_uid = task_uid(t); | 2257 | kuid_t uid = current_uid(), t_uid = task_uid(t); |
| 2258 | 2258 | ||
| 2259 | if (audit_pid && t->tgid == audit_pid) { | 2259 | if (auditd_test_task(t)) { |
| 2260 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { | 2260 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { |
| 2261 | audit_sig_pid = task_tgid_nr(tsk); | 2261 | audit_sig_pid = task_tgid_nr(tsk); |
| 2262 | if (uid_valid(tsk->loginuid)) | 2262 | if (uid_valid(tsk->loginuid)) |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index afe5bab376c9..361a69dfe543 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
| @@ -30,18 +30,12 @@ struct bpf_htab { | |||
| 30 | struct pcpu_freelist freelist; | 30 | struct pcpu_freelist freelist; |
| 31 | struct bpf_lru lru; | 31 | struct bpf_lru lru; |
| 32 | }; | 32 | }; |
| 33 | void __percpu *extra_elems; | 33 | struct htab_elem *__percpu *extra_elems; |
| 34 | atomic_t count; /* number of elements in this hashtable */ | 34 | atomic_t count; /* number of elements in this hashtable */ |
| 35 | u32 n_buckets; /* number of hash buckets */ | 35 | u32 n_buckets; /* number of hash buckets */ |
| 36 | u32 elem_size; /* size of each element in bytes */ | 36 | u32 elem_size; /* size of each element in bytes */ |
| 37 | }; | 37 | }; |
| 38 | 38 | ||
| 39 | enum extra_elem_state { | ||
| 40 | HTAB_NOT_AN_EXTRA_ELEM = 0, | ||
| 41 | HTAB_EXTRA_ELEM_FREE, | ||
| 42 | HTAB_EXTRA_ELEM_USED | ||
| 43 | }; | ||
| 44 | |||
| 45 | /* each htab element is struct htab_elem + key + value */ | 39 | /* each htab element is struct htab_elem + key + value */ |
| 46 | struct htab_elem { | 40 | struct htab_elem { |
| 47 | union { | 41 | union { |
| @@ -56,7 +50,6 @@ struct htab_elem { | |||
| 56 | }; | 50 | }; |
| 57 | union { | 51 | union { |
| 58 | struct rcu_head rcu; | 52 | struct rcu_head rcu; |
| 59 | enum extra_elem_state state; | ||
| 60 | struct bpf_lru_node lru_node; | 53 | struct bpf_lru_node lru_node; |
| 61 | }; | 54 | }; |
| 62 | u32 hash; | 55 | u32 hash; |
| @@ -77,6 +70,11 @@ static bool htab_is_percpu(const struct bpf_htab *htab) | |||
| 77 | htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; | 70 | htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH; |
| 78 | } | 71 | } |
| 79 | 72 | ||
| 73 | static bool htab_is_prealloc(const struct bpf_htab *htab) | ||
| 74 | { | ||
| 75 | return !(htab->map.map_flags & BPF_F_NO_PREALLOC); | ||
| 76 | } | ||
| 77 | |||
| 80 | static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, | 78 | static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size, |
| 81 | void __percpu *pptr) | 79 | void __percpu *pptr) |
| 82 | { | 80 | { |
| @@ -128,17 +126,20 @@ static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key, | |||
| 128 | 126 | ||
| 129 | static int prealloc_init(struct bpf_htab *htab) | 127 | static int prealloc_init(struct bpf_htab *htab) |
| 130 | { | 128 | { |
| 129 | u32 num_entries = htab->map.max_entries; | ||
| 131 | int err = -ENOMEM, i; | 130 | int err = -ENOMEM, i; |
| 132 | 131 | ||
| 133 | htab->elems = bpf_map_area_alloc(htab->elem_size * | 132 | if (!htab_is_percpu(htab) && !htab_is_lru(htab)) |
| 134 | htab->map.max_entries); | 133 | num_entries += num_possible_cpus(); |
| 134 | |||
| 135 | htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries); | ||
| 135 | if (!htab->elems) | 136 | if (!htab->elems) |
| 136 | return -ENOMEM; | 137 | return -ENOMEM; |
| 137 | 138 | ||
| 138 | if (!htab_is_percpu(htab)) | 139 | if (!htab_is_percpu(htab)) |
| 139 | goto skip_percpu_elems; | 140 | goto skip_percpu_elems; |
| 140 | 141 | ||
| 141 | for (i = 0; i < htab->map.max_entries; i++) { | 142 | for (i = 0; i < num_entries; i++) { |
| 142 | u32 size = round_up(htab->map.value_size, 8); | 143 | u32 size = round_up(htab->map.value_size, 8); |
| 143 | void __percpu *pptr; | 144 | void __percpu *pptr; |
| 144 | 145 | ||
| @@ -166,11 +167,11 @@ skip_percpu_elems: | |||
| 166 | if (htab_is_lru(htab)) | 167 | if (htab_is_lru(htab)) |
| 167 | bpf_lru_populate(&htab->lru, htab->elems, | 168 | bpf_lru_populate(&htab->lru, htab->elems, |
| 168 | offsetof(struct htab_elem, lru_node), | 169 | offsetof(struct htab_elem, lru_node), |
| 169 | htab->elem_size, htab->map.max_entries); | 170 | htab->elem_size, num_entries); |
| 170 | else | 171 | else |
| 171 | pcpu_freelist_populate(&htab->freelist, | 172 | pcpu_freelist_populate(&htab->freelist, |
| 172 | htab->elems + offsetof(struct htab_elem, fnode), | 173 | htab->elems + offsetof(struct htab_elem, fnode), |
| 173 | htab->elem_size, htab->map.max_entries); | 174 | htab->elem_size, num_entries); |
| 174 | 175 | ||
| 175 | return 0; | 176 | return 0; |
| 176 | 177 | ||
| @@ -191,16 +192,22 @@ static void prealloc_destroy(struct bpf_htab *htab) | |||
| 191 | 192 | ||
| 192 | static int alloc_extra_elems(struct bpf_htab *htab) | 193 | static int alloc_extra_elems(struct bpf_htab *htab) |
| 193 | { | 194 | { |
| 194 | void __percpu *pptr; | 195 | struct htab_elem *__percpu *pptr, *l_new; |
| 196 | struct pcpu_freelist_node *l; | ||
| 195 | int cpu; | 197 | int cpu; |
| 196 | 198 | ||
| 197 | pptr = __alloc_percpu_gfp(htab->elem_size, 8, GFP_USER | __GFP_NOWARN); | 199 | pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8, |
| 200 | GFP_USER | __GFP_NOWARN); | ||
| 198 | if (!pptr) | 201 | if (!pptr) |
| 199 | return -ENOMEM; | 202 | return -ENOMEM; |
| 200 | 203 | ||
| 201 | for_each_possible_cpu(cpu) { | 204 | for_each_possible_cpu(cpu) { |
| 202 | ((struct htab_elem *)per_cpu_ptr(pptr, cpu))->state = | 205 | l = pcpu_freelist_pop(&htab->freelist); |
| 203 | HTAB_EXTRA_ELEM_FREE; | 206 | /* pop will succeed, since prealloc_init() |
| 207 | * preallocated extra num_possible_cpus elements | ||
| 208 | */ | ||
| 209 | l_new = container_of(l, struct htab_elem, fnode); | ||
| 210 | *per_cpu_ptr(pptr, cpu) = l_new; | ||
| 204 | } | 211 | } |
| 205 | htab->extra_elems = pptr; | 212 | htab->extra_elems = pptr; |
| 206 | return 0; | 213 | return 0; |
| @@ -342,25 +349,25 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
| 342 | raw_spin_lock_init(&htab->buckets[i].lock); | 349 | raw_spin_lock_init(&htab->buckets[i].lock); |
| 343 | } | 350 | } |
| 344 | 351 | ||
| 345 | if (!percpu && !lru) { | ||
| 346 | /* lru itself can remove the least used element, so | ||
| 347 | * there is no need for an extra elem during map_update. | ||
| 348 | */ | ||
| 349 | err = alloc_extra_elems(htab); | ||
| 350 | if (err) | ||
| 351 | goto free_buckets; | ||
| 352 | } | ||
| 353 | |||
| 354 | if (prealloc) { | 352 | if (prealloc) { |
| 355 | err = prealloc_init(htab); | 353 | err = prealloc_init(htab); |
| 356 | if (err) | 354 | if (err) |
| 357 | goto free_extra_elems; | 355 | goto free_buckets; |
| 356 | |||
| 357 | if (!percpu && !lru) { | ||
| 358 | /* lru itself can remove the least used element, so | ||
| 359 | * there is no need for an extra elem during map_update. | ||
| 360 | */ | ||
| 361 | err = alloc_extra_elems(htab); | ||
| 362 | if (err) | ||
| 363 | goto free_prealloc; | ||
| 364 | } | ||
| 358 | } | 365 | } |
| 359 | 366 | ||
| 360 | return &htab->map; | 367 | return &htab->map; |
| 361 | 368 | ||
| 362 | free_extra_elems: | 369 | free_prealloc: |
| 363 | free_percpu(htab->extra_elems); | 370 | prealloc_destroy(htab); |
| 364 | free_buckets: | 371 | free_buckets: |
| 365 | bpf_map_area_free(htab->buckets); | 372 | bpf_map_area_free(htab->buckets); |
| 366 | free_htab: | 373 | free_htab: |
| @@ -575,12 +582,7 @@ static void htab_elem_free_rcu(struct rcu_head *head) | |||
| 575 | 582 | ||
| 576 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) | 583 | static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) |
| 577 | { | 584 | { |
| 578 | if (l->state == HTAB_EXTRA_ELEM_USED) { | 585 | if (htab_is_prealloc(htab)) { |
| 579 | l->state = HTAB_EXTRA_ELEM_FREE; | ||
| 580 | return; | ||
| 581 | } | ||
| 582 | |||
| 583 | if (!(htab->map.map_flags & BPF_F_NO_PREALLOC)) { | ||
| 584 | pcpu_freelist_push(&htab->freelist, &l->fnode); | 586 | pcpu_freelist_push(&htab->freelist, &l->fnode); |
| 585 | } else { | 587 | } else { |
| 586 | atomic_dec(&htab->count); | 588 | atomic_dec(&htab->count); |
| @@ -610,47 +612,43 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, | |||
| 610 | static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, | 612 | static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, |
| 611 | void *value, u32 key_size, u32 hash, | 613 | void *value, u32 key_size, u32 hash, |
| 612 | bool percpu, bool onallcpus, | 614 | bool percpu, bool onallcpus, |
| 613 | bool old_elem_exists) | 615 | struct htab_elem *old_elem) |
| 614 | { | 616 | { |
| 615 | u32 size = htab->map.value_size; | 617 | u32 size = htab->map.value_size; |
| 616 | bool prealloc = !(htab->map.map_flags & BPF_F_NO_PREALLOC); | 618 | bool prealloc = htab_is_prealloc(htab); |
| 617 | struct htab_elem *l_new; | 619 | struct htab_elem *l_new, **pl_new; |
| 618 | void __percpu *pptr; | 620 | void __percpu *pptr; |
| 619 | int err = 0; | ||
| 620 | 621 | ||
| 621 | if (prealloc) { | 622 | if (prealloc) { |
| 622 | struct pcpu_freelist_node *l; | 623 | if (old_elem) { |
| 624 | /* if we're updating the existing element, | ||
| 625 | * use per-cpu extra elems to avoid freelist_pop/push | ||
| 626 | */ | ||
| 627 | pl_new = this_cpu_ptr(htab->extra_elems); | ||
| 628 | l_new = *pl_new; | ||
| 629 | *pl_new = old_elem; | ||
| 630 | } else { | ||
| 631 | struct pcpu_freelist_node *l; | ||
| 623 | 632 | ||
| 624 | l = pcpu_freelist_pop(&htab->freelist); | 633 | l = pcpu_freelist_pop(&htab->freelist); |
| 625 | if (!l) | 634 | if (!l) |
| 626 | err = -E2BIG; | 635 | return ERR_PTR(-E2BIG); |
| 627 | else | ||
| 628 | l_new = container_of(l, struct htab_elem, fnode); | 636 | l_new = container_of(l, struct htab_elem, fnode); |
| 629 | } else { | ||
| 630 | if (atomic_inc_return(&htab->count) > htab->map.max_entries) { | ||
| 631 | atomic_dec(&htab->count); | ||
| 632 | err = -E2BIG; | ||
| 633 | } else { | ||
| 634 | l_new = kmalloc(htab->elem_size, | ||
| 635 | GFP_ATOMIC | __GFP_NOWARN); | ||
| 636 | if (!l_new) | ||
| 637 | return ERR_PTR(-ENOMEM); | ||
| 638 | } | 637 | } |
| 639 | } | ||
| 640 | |||
| 641 | if (err) { | ||
| 642 | if (!old_elem_exists) | ||
| 643 | return ERR_PTR(err); | ||
| 644 | |||
| 645 | /* if we're updating the existing element and the hash table | ||
| 646 | * is full, use per-cpu extra elems | ||
| 647 | */ | ||
| 648 | l_new = this_cpu_ptr(htab->extra_elems); | ||
| 649 | if (l_new->state != HTAB_EXTRA_ELEM_FREE) | ||
| 650 | return ERR_PTR(-E2BIG); | ||
| 651 | l_new->state = HTAB_EXTRA_ELEM_USED; | ||
| 652 | } else { | 638 | } else { |
| 653 | l_new->state = HTAB_NOT_AN_EXTRA_ELEM; | 639 | if (atomic_inc_return(&htab->count) > htab->map.max_entries) |
| 640 | if (!old_elem) { | ||
| 641 | /* when map is full and update() is replacing | ||
| 642 | * old element, it's ok to allocate, since | ||
| 643 | * old element will be freed immediately. | ||
| 644 | * Otherwise return an error | ||
| 645 | */ | ||
| 646 | atomic_dec(&htab->count); | ||
| 647 | return ERR_PTR(-E2BIG); | ||
| 648 | } | ||
| 649 | l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); | ||
| 650 | if (!l_new) | ||
| 651 | return ERR_PTR(-ENOMEM); | ||
| 654 | } | 652 | } |
| 655 | 653 | ||
| 656 | memcpy(l_new->key, key, key_size); | 654 | memcpy(l_new->key, key, key_size); |
| @@ -731,7 +729,7 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
| 731 | goto err; | 729 | goto err; |
| 732 | 730 | ||
| 733 | l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, | 731 | l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false, |
| 734 | !!l_old); | 732 | l_old); |
| 735 | if (IS_ERR(l_new)) { | 733 | if (IS_ERR(l_new)) { |
| 736 | /* all pre-allocated elements are in use or memory exhausted */ | 734 | /* all pre-allocated elements are in use or memory exhausted */ |
| 737 | ret = PTR_ERR(l_new); | 735 | ret = PTR_ERR(l_new); |
| @@ -744,7 +742,8 @@ static int htab_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
| 744 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); | 742 | hlist_nulls_add_head_rcu(&l_new->hash_node, head); |
| 745 | if (l_old) { | 743 | if (l_old) { |
| 746 | hlist_nulls_del_rcu(&l_old->hash_node); | 744 | hlist_nulls_del_rcu(&l_old->hash_node); |
| 747 | free_htab_elem(htab, l_old); | 745 | if (!htab_is_prealloc(htab)) |
| 746 | free_htab_elem(htab, l_old); | ||
| 748 | } | 747 | } |
| 749 | ret = 0; | 748 | ret = 0; |
| 750 | err: | 749 | err: |
| @@ -856,7 +855,7 @@ static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key, | |||
| 856 | value, onallcpus); | 855 | value, onallcpus); |
| 857 | } else { | 856 | } else { |
| 858 | l_new = alloc_htab_elem(htab, key, value, key_size, | 857 | l_new = alloc_htab_elem(htab, key, value, key_size, |
| 859 | hash, true, onallcpus, false); | 858 | hash, true, onallcpus, NULL); |
| 860 | if (IS_ERR(l_new)) { | 859 | if (IS_ERR(l_new)) { |
| 861 | ret = PTR_ERR(l_new); | 860 | ret = PTR_ERR(l_new); |
| 862 | goto err; | 861 | goto err; |
| @@ -1024,8 +1023,7 @@ static void delete_all_elements(struct bpf_htab *htab) | |||
| 1024 | 1023 | ||
| 1025 | hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { | 1024 | hlist_nulls_for_each_entry_safe(l, n, head, hash_node) { |
| 1026 | hlist_nulls_del_rcu(&l->hash_node); | 1025 | hlist_nulls_del_rcu(&l->hash_node); |
| 1027 | if (l->state != HTAB_EXTRA_ELEM_USED) | 1026 | htab_elem_free(htab, l); |
| 1028 | htab_elem_free(htab, l); | ||
| 1029 | } | 1027 | } |
| 1030 | } | 1028 | } |
| 1031 | } | 1029 | } |
| @@ -1045,7 +1043,7 @@ static void htab_map_free(struct bpf_map *map) | |||
| 1045 | * not have executed. Wait for them. | 1043 | * not have executed. Wait for them. |
| 1046 | */ | 1044 | */ |
| 1047 | rcu_barrier(); | 1045 | rcu_barrier(); |
| 1048 | if (htab->map.map_flags & BPF_F_NO_PREALLOC) | 1046 | if (!htab_is_prealloc(htab)) |
| 1049 | delete_all_elements(htab); | 1047 | delete_all_elements(htab); |
| 1050 | else | 1048 | else |
| 1051 | prealloc_destroy(htab); | 1049 | prealloc_destroy(htab); |
diff --git a/kernel/cpu.c b/kernel/cpu.c index f7c063239fa5..37b223e4fc05 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -1335,26 +1335,21 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name, | |||
| 1335 | struct cpuhp_step *sp; | 1335 | struct cpuhp_step *sp; |
| 1336 | int ret = 0; | 1336 | int ret = 0; |
| 1337 | 1337 | ||
| 1338 | mutex_lock(&cpuhp_state_mutex); | ||
| 1339 | |||
| 1340 | if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) { | 1338 | if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) { |
| 1341 | ret = cpuhp_reserve_state(state); | 1339 | ret = cpuhp_reserve_state(state); |
| 1342 | if (ret < 0) | 1340 | if (ret < 0) |
| 1343 | goto out; | 1341 | return ret; |
| 1344 | state = ret; | 1342 | state = ret; |
| 1345 | } | 1343 | } |
| 1346 | sp = cpuhp_get_step(state); | 1344 | sp = cpuhp_get_step(state); |
| 1347 | if (name && sp->name) { | 1345 | if (name && sp->name) |
| 1348 | ret = -EBUSY; | 1346 | return -EBUSY; |
| 1349 | goto out; | 1347 | |
| 1350 | } | ||
| 1351 | sp->startup.single = startup; | 1348 | sp->startup.single = startup; |
| 1352 | sp->teardown.single = teardown; | 1349 | sp->teardown.single = teardown; |
| 1353 | sp->name = name; | 1350 | sp->name = name; |
| 1354 | sp->multi_instance = multi_instance; | 1351 | sp->multi_instance = multi_instance; |
| 1355 | INIT_HLIST_HEAD(&sp->list); | 1352 | INIT_HLIST_HEAD(&sp->list); |
| 1356 | out: | ||
| 1357 | mutex_unlock(&cpuhp_state_mutex); | ||
| 1358 | return ret; | 1353 | return ret; |
| 1359 | } | 1354 | } |
| 1360 | 1355 | ||
| @@ -1428,6 +1423,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, | |||
| 1428 | return -EINVAL; | 1423 | return -EINVAL; |
| 1429 | 1424 | ||
| 1430 | get_online_cpus(); | 1425 | get_online_cpus(); |
| 1426 | mutex_lock(&cpuhp_state_mutex); | ||
| 1431 | 1427 | ||
| 1432 | if (!invoke || !sp->startup.multi) | 1428 | if (!invoke || !sp->startup.multi) |
| 1433 | goto add_node; | 1429 | goto add_node; |
| @@ -1447,16 +1443,14 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node, | |||
| 1447 | if (ret) { | 1443 | if (ret) { |
| 1448 | if (sp->teardown.multi) | 1444 | if (sp->teardown.multi) |
| 1449 | cpuhp_rollback_install(cpu, state, node); | 1445 | cpuhp_rollback_install(cpu, state, node); |
| 1450 | goto err; | 1446 | goto unlock; |
| 1451 | } | 1447 | } |
| 1452 | } | 1448 | } |
| 1453 | add_node: | 1449 | add_node: |
| 1454 | ret = 0; | 1450 | ret = 0; |
| 1455 | mutex_lock(&cpuhp_state_mutex); | ||
| 1456 | hlist_add_head(node, &sp->list); | 1451 | hlist_add_head(node, &sp->list); |
| 1452 | unlock: | ||
| 1457 | mutex_unlock(&cpuhp_state_mutex); | 1453 | mutex_unlock(&cpuhp_state_mutex); |
| 1458 | |||
| 1459 | err: | ||
| 1460 | put_online_cpus(); | 1454 | put_online_cpus(); |
| 1461 | return ret; | 1455 | return ret; |
| 1462 | } | 1456 | } |
| @@ -1491,6 +1485,7 @@ int __cpuhp_setup_state(enum cpuhp_state state, | |||
| 1491 | return -EINVAL; | 1485 | return -EINVAL; |
| 1492 | 1486 | ||
| 1493 | get_online_cpus(); | 1487 | get_online_cpus(); |
| 1488 | mutex_lock(&cpuhp_state_mutex); | ||
| 1494 | 1489 | ||
| 1495 | ret = cpuhp_store_callbacks(state, name, startup, teardown, | 1490 | ret = cpuhp_store_callbacks(state, name, startup, teardown, |
| 1496 | multi_instance); | 1491 | multi_instance); |
| @@ -1524,6 +1519,7 @@ int __cpuhp_setup_state(enum cpuhp_state state, | |||
| 1524 | } | 1519 | } |
| 1525 | } | 1520 | } |
| 1526 | out: | 1521 | out: |
| 1522 | mutex_unlock(&cpuhp_state_mutex); | ||
| 1527 | put_online_cpus(); | 1523 | put_online_cpus(); |
| 1528 | /* | 1524 | /* |
| 1529 | * If the requested state is CPUHP_AP_ONLINE_DYN, return the | 1525 | * If the requested state is CPUHP_AP_ONLINE_DYN, return the |
| @@ -1547,6 +1543,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state, | |||
| 1547 | return -EINVAL; | 1543 | return -EINVAL; |
| 1548 | 1544 | ||
| 1549 | get_online_cpus(); | 1545 | get_online_cpus(); |
| 1546 | mutex_lock(&cpuhp_state_mutex); | ||
| 1547 | |||
| 1550 | if (!invoke || !cpuhp_get_teardown_cb(state)) | 1548 | if (!invoke || !cpuhp_get_teardown_cb(state)) |
| 1551 | goto remove; | 1549 | goto remove; |
| 1552 | /* | 1550 | /* |
| @@ -1563,7 +1561,6 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state, | |||
| 1563 | } | 1561 | } |
| 1564 | 1562 | ||
| 1565 | remove: | 1563 | remove: |
| 1566 | mutex_lock(&cpuhp_state_mutex); | ||
| 1567 | hlist_del(node); | 1564 | hlist_del(node); |
| 1568 | mutex_unlock(&cpuhp_state_mutex); | 1565 | mutex_unlock(&cpuhp_state_mutex); |
| 1569 | put_online_cpus(); | 1566 | put_online_cpus(); |
| @@ -1571,6 +1568,7 @@ remove: | |||
| 1571 | return 0; | 1568 | return 0; |
| 1572 | } | 1569 | } |
| 1573 | EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); | 1570 | EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance); |
| 1571 | |||
| 1574 | /** | 1572 | /** |
| 1575 | * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state | 1573 | * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state |
| 1576 | * @state: The state to remove | 1574 | * @state: The state to remove |
| @@ -1589,6 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) | |||
| 1589 | 1587 | ||
| 1590 | get_online_cpus(); | 1588 | get_online_cpus(); |
| 1591 | 1589 | ||
| 1590 | mutex_lock(&cpuhp_state_mutex); | ||
| 1592 | if (sp->multi_instance) { | 1591 | if (sp->multi_instance) { |
| 1593 | WARN(!hlist_empty(&sp->list), | 1592 | WARN(!hlist_empty(&sp->list), |
| 1594 | "Error: Removing state %d which has instances left.\n", | 1593 | "Error: Removing state %d which has instances left.\n", |
| @@ -1613,6 +1612,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke) | |||
| 1613 | } | 1612 | } |
| 1614 | remove: | 1613 | remove: |
| 1615 | cpuhp_store_callbacks(state, NULL, NULL, NULL, false); | 1614 | cpuhp_store_callbacks(state, NULL, NULL, NULL, false); |
| 1615 | mutex_unlock(&cpuhp_state_mutex); | ||
| 1616 | put_online_cpus(); | 1616 | put_online_cpus(); |
| 1617 | } | 1617 | } |
| 1618 | EXPORT_SYMBOL(__cpuhp_remove_state); | 1618 | EXPORT_SYMBOL(__cpuhp_remove_state); |
diff --git a/kernel/futex.c b/kernel/futex.c index 229a744b1781..45858ec73941 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -2815,7 +2815,6 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | |||
| 2815 | { | 2815 | { |
| 2816 | struct hrtimer_sleeper timeout, *to = NULL; | 2816 | struct hrtimer_sleeper timeout, *to = NULL; |
| 2817 | struct rt_mutex_waiter rt_waiter; | 2817 | struct rt_mutex_waiter rt_waiter; |
| 2818 | struct rt_mutex *pi_mutex = NULL; | ||
| 2819 | struct futex_hash_bucket *hb; | 2818 | struct futex_hash_bucket *hb; |
| 2820 | union futex_key key2 = FUTEX_KEY_INIT; | 2819 | union futex_key key2 = FUTEX_KEY_INIT; |
| 2821 | struct futex_q q = futex_q_init; | 2820 | struct futex_q q = futex_q_init; |
| @@ -2899,6 +2898,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | |||
| 2899 | if (q.pi_state && (q.pi_state->owner != current)) { | 2898 | if (q.pi_state && (q.pi_state->owner != current)) { |
| 2900 | spin_lock(q.lock_ptr); | 2899 | spin_lock(q.lock_ptr); |
| 2901 | ret = fixup_pi_state_owner(uaddr2, &q, current); | 2900 | ret = fixup_pi_state_owner(uaddr2, &q, current); |
| 2901 | if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) | ||
| 2902 | rt_mutex_unlock(&q.pi_state->pi_mutex); | ||
| 2902 | /* | 2903 | /* |
| 2903 | * Drop the reference to the pi state which | 2904 | * Drop the reference to the pi state which |
| 2904 | * the requeue_pi() code acquired for us. | 2905 | * the requeue_pi() code acquired for us. |
| @@ -2907,6 +2908,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | |||
| 2907 | spin_unlock(q.lock_ptr); | 2908 | spin_unlock(q.lock_ptr); |
| 2908 | } | 2909 | } |
| 2909 | } else { | 2910 | } else { |
| 2911 | struct rt_mutex *pi_mutex; | ||
| 2912 | |||
| 2910 | /* | 2913 | /* |
| 2911 | * We have been woken up by futex_unlock_pi(), a timeout, or a | 2914 | * We have been woken up by futex_unlock_pi(), a timeout, or a |
| 2912 | * signal. futex_unlock_pi() will not destroy the lock_ptr nor | 2915 | * signal. futex_unlock_pi() will not destroy the lock_ptr nor |
| @@ -2930,18 +2933,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | |||
| 2930 | if (res) | 2933 | if (res) |
| 2931 | ret = (res < 0) ? res : 0; | 2934 | ret = (res < 0) ? res : 0; |
| 2932 | 2935 | ||
| 2936 | /* | ||
| 2937 | * If fixup_pi_state_owner() faulted and was unable to handle | ||
| 2938 | * the fault, unlock the rt_mutex and return the fault to | ||
| 2939 | * userspace. | ||
| 2940 | */ | ||
| 2941 | if (ret && rt_mutex_owner(pi_mutex) == current) | ||
| 2942 | rt_mutex_unlock(pi_mutex); | ||
| 2943 | |||
| 2933 | /* Unqueue and drop the lock. */ | 2944 | /* Unqueue and drop the lock. */ |
| 2934 | unqueue_me_pi(&q); | 2945 | unqueue_me_pi(&q); |
| 2935 | } | 2946 | } |
| 2936 | 2947 | ||
| 2937 | /* | 2948 | if (ret == -EINTR) { |
| 2938 | * If fixup_pi_state_owner() faulted and was unable to handle the | ||
| 2939 | * fault, unlock the rt_mutex and return the fault to userspace. | ||
| 2940 | */ | ||
| 2941 | if (ret == -EFAULT) { | ||
| 2942 | if (pi_mutex && rt_mutex_owner(pi_mutex) == current) | ||
| 2943 | rt_mutex_unlock(pi_mutex); | ||
| 2944 | } else if (ret == -EINTR) { | ||
| 2945 | /* | 2949 | /* |
| 2946 | * We've already been requeued, but cannot restart by calling | 2950 | * We've already been requeued, but cannot restart by calling |
| 2947 | * futex_lock_pi() directly. We could restart this syscall, but | 2951 | * futex_lock_pi() directly. We could restart this syscall, but |
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c index 7bc24d477805..c65f7989f850 100644 --- a/kernel/locking/rwsem-spinlock.c +++ b/kernel/locking/rwsem-spinlock.c | |||
| @@ -213,10 +213,9 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) | |||
| 213 | */ | 213 | */ |
| 214 | if (sem->count == 0) | 214 | if (sem->count == 0) |
| 215 | break; | 215 | break; |
| 216 | if (signal_pending_state(state, current)) { | 216 | if (signal_pending_state(state, current)) |
| 217 | ret = -EINTR; | 217 | goto out_nolock; |
| 218 | goto out; | 218 | |
| 219 | } | ||
| 220 | set_current_state(state); | 219 | set_current_state(state); |
| 221 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 220 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 222 | schedule(); | 221 | schedule(); |
| @@ -224,12 +223,19 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state) | |||
| 224 | } | 223 | } |
| 225 | /* got the lock */ | 224 | /* got the lock */ |
| 226 | sem->count = -1; | 225 | sem->count = -1; |
| 227 | out: | ||
| 228 | list_del(&waiter.list); | 226 | list_del(&waiter.list); |
| 229 | 227 | ||
| 230 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | 228 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
| 231 | 229 | ||
| 232 | return ret; | 230 | return ret; |
| 231 | |||
| 232 | out_nolock: | ||
| 233 | list_del(&waiter.list); | ||
| 234 | if (!list_empty(&sem->wait_list)) | ||
| 235 | __rwsem_do_wake(sem, 1); | ||
| 236 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); | ||
| 237 | |||
| 238 | return -EINTR; | ||
| 233 | } | 239 | } |
| 234 | 240 | ||
| 235 | void __sched __down_write(struct rw_semaphore *sem) | 241 | void __sched __down_write(struct rw_semaphore *sem) |
diff --git a/kernel/memremap.c b/kernel/memremap.c index 06123234f118..07e85e5229da 100644 --- a/kernel/memremap.c +++ b/kernel/memremap.c | |||
| @@ -247,11 +247,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data) | |||
| 247 | align_start = res->start & ~(SECTION_SIZE - 1); | 247 | align_start = res->start & ~(SECTION_SIZE - 1); |
| 248 | align_size = ALIGN(resource_size(res), SECTION_SIZE); | 248 | align_size = ALIGN(resource_size(res), SECTION_SIZE); |
| 249 | 249 | ||
| 250 | lock_device_hotplug(); | ||
| 251 | mem_hotplug_begin(); | 250 | mem_hotplug_begin(); |
| 252 | arch_remove_memory(align_start, align_size); | 251 | arch_remove_memory(align_start, align_size); |
| 253 | mem_hotplug_done(); | 252 | mem_hotplug_done(); |
| 254 | unlock_device_hotplug(); | ||
| 255 | 253 | ||
| 256 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); | 254 | untrack_pfn(NULL, PHYS_PFN(align_start), align_size); |
| 257 | pgmap_radix_release(res); | 255 | pgmap_radix_release(res); |
| @@ -364,11 +362,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res, | |||
| 364 | if (error) | 362 | if (error) |
| 365 | goto err_pfn_remap; | 363 | goto err_pfn_remap; |
| 366 | 364 | ||
| 367 | lock_device_hotplug(); | ||
| 368 | mem_hotplug_begin(); | 365 | mem_hotplug_begin(); |
| 369 | error = arch_add_memory(nid, align_start, align_size, true); | 366 | error = arch_add_memory(nid, align_start, align_size, true); |
| 370 | mem_hotplug_done(); | 367 | mem_hotplug_done(); |
| 371 | unlock_device_hotplug(); | ||
| 372 | if (error) | 368 | if (error) |
| 373 | goto err_add_memory; | 369 | goto err_add_memory; |
| 374 | 370 | ||
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index cd7cd489f739..54c577578da6 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
| @@ -584,20 +584,14 @@ static int sugov_start(struct cpufreq_policy *policy) | |||
| 584 | for_each_cpu(cpu, policy->cpus) { | 584 | for_each_cpu(cpu, policy->cpus) { |
| 585 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); | 585 | struct sugov_cpu *sg_cpu = &per_cpu(sugov_cpu, cpu); |
| 586 | 586 | ||
| 587 | memset(sg_cpu, 0, sizeof(*sg_cpu)); | ||
| 587 | sg_cpu->sg_policy = sg_policy; | 588 | sg_cpu->sg_policy = sg_policy; |
| 588 | if (policy_is_shared(policy)) { | 589 | sg_cpu->flags = SCHED_CPUFREQ_RT; |
| 589 | sg_cpu->util = 0; | 590 | sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; |
| 590 | sg_cpu->max = 0; | 591 | cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, |
| 591 | sg_cpu->flags = SCHED_CPUFREQ_RT; | 592 | policy_is_shared(policy) ? |
| 592 | sg_cpu->last_update = 0; | 593 | sugov_update_shared : |
| 593 | sg_cpu->iowait_boost = 0; | 594 | sugov_update_single); |
| 594 | sg_cpu->iowait_boost_max = policy->cpuinfo.max_freq; | ||
| 595 | cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, | ||
| 596 | sugov_update_shared); | ||
| 597 | } else { | ||
| 598 | cpufreq_add_update_util_hook(cpu, &sg_cpu->update_util, | ||
| 599 | sugov_update_single); | ||
| 600 | } | ||
| 601 | } | 595 | } |
| 602 | return 0; | 596 | return 0; |
| 603 | } | 597 | } |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 99b2c33a9fbc..a2ce59015642 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
| @@ -445,13 +445,13 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se, | |||
| 445 | * | 445 | * |
| 446 | * This function returns true if: | 446 | * This function returns true if: |
| 447 | * | 447 | * |
| 448 | * runtime / (deadline - t) > dl_runtime / dl_period , | 448 | * runtime / (deadline - t) > dl_runtime / dl_deadline , |
| 449 | * | 449 | * |
| 450 | * IOW we can't recycle current parameters. | 450 | * IOW we can't recycle current parameters. |
| 451 | * | 451 | * |
| 452 | * Notice that the bandwidth check is done against the period. For | 452 | * Notice that the bandwidth check is done against the deadline. For |
| 453 | * task with deadline equal to period this is the same of using | 453 | * task with deadline equal to period this is the same of using |
| 454 | * dl_deadline instead of dl_period in the equation above. | 454 | * dl_period instead of dl_deadline in the equation above. |
| 455 | */ | 455 | */ |
| 456 | static bool dl_entity_overflow(struct sched_dl_entity *dl_se, | 456 | static bool dl_entity_overflow(struct sched_dl_entity *dl_se, |
| 457 | struct sched_dl_entity *pi_se, u64 t) | 457 | struct sched_dl_entity *pi_se, u64 t) |
| @@ -476,7 +476,7 @@ static bool dl_entity_overflow(struct sched_dl_entity *dl_se, | |||
| 476 | * of anything below microseconds resolution is actually fiction | 476 | * of anything below microseconds resolution is actually fiction |
| 477 | * (but still we want to give the user that illusion >;). | 477 | * (but still we want to give the user that illusion >;). |
| 478 | */ | 478 | */ |
| 479 | left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); | 479 | left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE); |
| 480 | right = ((dl_se->deadline - t) >> DL_SCALE) * | 480 | right = ((dl_se->deadline - t) >> DL_SCALE) * |
| 481 | (pi_se->dl_runtime >> DL_SCALE); | 481 | (pi_se->dl_runtime >> DL_SCALE); |
| 482 | 482 | ||
| @@ -505,10 +505,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se, | |||
| 505 | } | 505 | } |
| 506 | } | 506 | } |
| 507 | 507 | ||
| 508 | static inline u64 dl_next_period(struct sched_dl_entity *dl_se) | ||
| 509 | { | ||
| 510 | return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period; | ||
| 511 | } | ||
| 512 | |||
| 508 | /* | 513 | /* |
| 509 | * If the entity depleted all its runtime, and if we want it to sleep | 514 | * If the entity depleted all its runtime, and if we want it to sleep |
| 510 | * while waiting for some new execution time to become available, we | 515 | * while waiting for some new execution time to become available, we |
| 511 | * set the bandwidth enforcement timer to the replenishment instant | 516 | * set the bandwidth replenishment timer to the replenishment instant |
| 512 | * and try to activate it. | 517 | * and try to activate it. |
| 513 | * | 518 | * |
| 514 | * Notice that it is important for the caller to know if the timer | 519 | * Notice that it is important for the caller to know if the timer |
| @@ -530,7 +535,7 @@ static int start_dl_timer(struct task_struct *p) | |||
| 530 | * that it is actually coming from rq->clock and not from | 535 | * that it is actually coming from rq->clock and not from |
| 531 | * hrtimer's time base reading. | 536 | * hrtimer's time base reading. |
| 532 | */ | 537 | */ |
| 533 | act = ns_to_ktime(dl_se->deadline); | 538 | act = ns_to_ktime(dl_next_period(dl_se)); |
| 534 | now = hrtimer_cb_get_time(timer); | 539 | now = hrtimer_cb_get_time(timer); |
| 535 | delta = ktime_to_ns(now) - rq_clock(rq); | 540 | delta = ktime_to_ns(now) - rq_clock(rq); |
| 536 | act = ktime_add_ns(act, delta); | 541 | act = ktime_add_ns(act, delta); |
| @@ -638,6 +643,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) | |||
| 638 | lockdep_unpin_lock(&rq->lock, rf.cookie); | 643 | lockdep_unpin_lock(&rq->lock, rf.cookie); |
| 639 | rq = dl_task_offline_migration(rq, p); | 644 | rq = dl_task_offline_migration(rq, p); |
| 640 | rf.cookie = lockdep_pin_lock(&rq->lock); | 645 | rf.cookie = lockdep_pin_lock(&rq->lock); |
| 646 | update_rq_clock(rq); | ||
| 641 | 647 | ||
| 642 | /* | 648 | /* |
| 643 | * Now that the task has been migrated to the new RQ and we | 649 | * Now that the task has been migrated to the new RQ and we |
| @@ -689,6 +695,37 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) | |||
| 689 | timer->function = dl_task_timer; | 695 | timer->function = dl_task_timer; |
| 690 | } | 696 | } |
| 691 | 697 | ||
| 698 | /* | ||
| 699 | * During the activation, CBS checks if it can reuse the current task's | ||
| 700 | * runtime and period. If the deadline of the task is in the past, CBS | ||
| 701 | * cannot use the runtime, and so it replenishes the task. This rule | ||
| 702 | * works fine for implicit deadline tasks (deadline == period), and the | ||
| 703 | * CBS was designed for implicit deadline tasks. However, a task with | ||
| 704 | * constrained deadline (deadine < period) might be awakened after the | ||
| 705 | * deadline, but before the next period. In this case, replenishing the | ||
| 706 | * task would allow it to run for runtime / deadline. As in this case | ||
| 707 | * deadline < period, CBS enables a task to run for more than the | ||
| 708 | * runtime / period. In a very loaded system, this can cause a domino | ||
| 709 | * effect, making other tasks miss their deadlines. | ||
| 710 | * | ||
| 711 | * To avoid this problem, in the activation of a constrained deadline | ||
| 712 | * task after the deadline but before the next period, throttle the | ||
| 713 | * task and set the replenishing timer to the begin of the next period, | ||
| 714 | * unless it is boosted. | ||
| 715 | */ | ||
| 716 | static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se) | ||
| 717 | { | ||
| 718 | struct task_struct *p = dl_task_of(dl_se); | ||
| 719 | struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se)); | ||
| 720 | |||
| 721 | if (dl_time_before(dl_se->deadline, rq_clock(rq)) && | ||
| 722 | dl_time_before(rq_clock(rq), dl_next_period(dl_se))) { | ||
| 723 | if (unlikely(dl_se->dl_boosted || !start_dl_timer(p))) | ||
| 724 | return; | ||
| 725 | dl_se->dl_throttled = 1; | ||
| 726 | } | ||
| 727 | } | ||
| 728 | |||
| 692 | static | 729 | static |
| 693 | int dl_runtime_exceeded(struct sched_dl_entity *dl_se) | 730 | int dl_runtime_exceeded(struct sched_dl_entity *dl_se) |
| 694 | { | 731 | { |
| @@ -922,6 +959,11 @@ static void dequeue_dl_entity(struct sched_dl_entity *dl_se) | |||
| 922 | __dequeue_dl_entity(dl_se); | 959 | __dequeue_dl_entity(dl_se); |
| 923 | } | 960 | } |
| 924 | 961 | ||
| 962 | static inline bool dl_is_constrained(struct sched_dl_entity *dl_se) | ||
| 963 | { | ||
| 964 | return dl_se->dl_deadline < dl_se->dl_period; | ||
| 965 | } | ||
| 966 | |||
| 925 | static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | 967 | static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) |
| 926 | { | 968 | { |
| 927 | struct task_struct *pi_task = rt_mutex_get_top_task(p); | 969 | struct task_struct *pi_task = rt_mutex_get_top_task(p); |
| @@ -948,6 +990,15 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) | |||
| 948 | } | 990 | } |
| 949 | 991 | ||
| 950 | /* | 992 | /* |
| 993 | * Check if a constrained deadline task was activated | ||
| 994 | * after the deadline but before the next period. | ||
| 995 | * If that is the case, the task will be throttled and | ||
| 996 | * the replenishment timer will be set to the next period. | ||
| 997 | */ | ||
| 998 | if (!p->dl.dl_throttled && dl_is_constrained(&p->dl)) | ||
| 999 | dl_check_constrained_dl(&p->dl); | ||
| 1000 | |||
| 1001 | /* | ||
| 951 | * If p is throttled, we do nothing. In fact, if it exhausted | 1002 | * If p is throttled, we do nothing. In fact, if it exhausted |
| 952 | * its budget it needs a replenishment and, since it now is on | 1003 | * its budget it needs a replenishment and, since it now is on |
| 953 | * its rq, the bandwidth timer callback (which clearly has not | 1004 | * its rq, the bandwidth timer callback (which clearly has not |
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 7296b7308eca..f15fb2bdbc0d 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c | |||
| @@ -169,7 +169,7 @@ static inline int calc_load_write_idx(void) | |||
| 169 | * If the folding window started, make sure we start writing in the | 169 | * If the folding window started, make sure we start writing in the |
| 170 | * next idle-delta. | 170 | * next idle-delta. |
| 171 | */ | 171 | */ |
| 172 | if (!time_before(jiffies, calc_load_update)) | 172 | if (!time_before(jiffies, READ_ONCE(calc_load_update))) |
| 173 | idx++; | 173 | idx++; |
| 174 | 174 | ||
| 175 | return idx & 1; | 175 | return idx & 1; |
| @@ -202,8 +202,9 @@ void calc_load_exit_idle(void) | |||
| 202 | struct rq *this_rq = this_rq(); | 202 | struct rq *this_rq = this_rq(); |
| 203 | 203 | ||
| 204 | /* | 204 | /* |
| 205 | * If we're still before the sample window, we're done. | 205 | * If we're still before the pending sample window, we're done. |
| 206 | */ | 206 | */ |
| 207 | this_rq->calc_load_update = READ_ONCE(calc_load_update); | ||
| 207 | if (time_before(jiffies, this_rq->calc_load_update)) | 208 | if (time_before(jiffies, this_rq->calc_load_update)) |
| 208 | return; | 209 | return; |
| 209 | 210 | ||
| @@ -212,7 +213,6 @@ void calc_load_exit_idle(void) | |||
| 212 | * accounted through the nohz accounting, so skip the entire deal and | 213 | * accounted through the nohz accounting, so skip the entire deal and |
| 213 | * sync up for the next window. | 214 | * sync up for the next window. |
| 214 | */ | 215 | */ |
| 215 | this_rq->calc_load_update = calc_load_update; | ||
| 216 | if (time_before(jiffies, this_rq->calc_load_update + 10)) | 216 | if (time_before(jiffies, this_rq->calc_load_update + 10)) |
| 217 | this_rq->calc_load_update += LOAD_FREQ; | 217 | this_rq->calc_load_update += LOAD_FREQ; |
| 218 | } | 218 | } |
| @@ -308,13 +308,15 @@ calc_load_n(unsigned long load, unsigned long exp, | |||
| 308 | */ | 308 | */ |
| 309 | static void calc_global_nohz(void) | 309 | static void calc_global_nohz(void) |
| 310 | { | 310 | { |
| 311 | unsigned long sample_window; | ||
| 311 | long delta, active, n; | 312 | long delta, active, n; |
| 312 | 313 | ||
| 313 | if (!time_before(jiffies, calc_load_update + 10)) { | 314 | sample_window = READ_ONCE(calc_load_update); |
| 315 | if (!time_before(jiffies, sample_window + 10)) { | ||
| 314 | /* | 316 | /* |
| 315 | * Catch-up, fold however many we are behind still | 317 | * Catch-up, fold however many we are behind still |
| 316 | */ | 318 | */ |
| 317 | delta = jiffies - calc_load_update - 10; | 319 | delta = jiffies - sample_window - 10; |
| 318 | n = 1 + (delta / LOAD_FREQ); | 320 | n = 1 + (delta / LOAD_FREQ); |
| 319 | 321 | ||
| 320 | active = atomic_long_read(&calc_load_tasks); | 322 | active = atomic_long_read(&calc_load_tasks); |
| @@ -324,7 +326,7 @@ static void calc_global_nohz(void) | |||
| 324 | avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); | 326 | avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); |
| 325 | avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); | 327 | avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); |
| 326 | 328 | ||
| 327 | calc_load_update += n * LOAD_FREQ; | 329 | WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ); |
| 328 | } | 330 | } |
| 329 | 331 | ||
| 330 | /* | 332 | /* |
| @@ -352,9 +354,11 @@ static inline void calc_global_nohz(void) { } | |||
| 352 | */ | 354 | */ |
| 353 | void calc_global_load(unsigned long ticks) | 355 | void calc_global_load(unsigned long ticks) |
| 354 | { | 356 | { |
| 357 | unsigned long sample_window; | ||
| 355 | long active, delta; | 358 | long active, delta; |
| 356 | 359 | ||
| 357 | if (time_before(jiffies, calc_load_update + 10)) | 360 | sample_window = READ_ONCE(calc_load_update); |
| 361 | if (time_before(jiffies, sample_window + 10)) | ||
| 358 | return; | 362 | return; |
| 359 | 363 | ||
| 360 | /* | 364 | /* |
| @@ -371,7 +375,7 @@ void calc_global_load(unsigned long ticks) | |||
| 371 | avenrun[1] = calc_load(avenrun[1], EXP_5, active); | 375 | avenrun[1] = calc_load(avenrun[1], EXP_5, active); |
| 372 | avenrun[2] = calc_load(avenrun[2], EXP_15, active); | 376 | avenrun[2] = calc_load(avenrun[2], EXP_15, active); |
| 373 | 377 | ||
| 374 | calc_load_update += LOAD_FREQ; | 378 | WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); |
| 375 | 379 | ||
| 376 | /* | 380 | /* |
| 377 | * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. | 381 | * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk. |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 295479b792ec..6fa7208bcd56 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
| @@ -125,9 +125,12 @@ void put_online_mems(void) | |||
| 125 | 125 | ||
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | /* Serializes write accesses to mem_hotplug.active_writer. */ | ||
| 129 | static DEFINE_MUTEX(memory_add_remove_lock); | ||
| 130 | |||
| 128 | void mem_hotplug_begin(void) | 131 | void mem_hotplug_begin(void) |
| 129 | { | 132 | { |
| 130 | assert_held_device_hotplug(); | 133 | mutex_lock(&memory_add_remove_lock); |
| 131 | 134 | ||
| 132 | mem_hotplug.active_writer = current; | 135 | mem_hotplug.active_writer = current; |
| 133 | 136 | ||
| @@ -147,6 +150,7 @@ void mem_hotplug_done(void) | |||
| 147 | mem_hotplug.active_writer = NULL; | 150 | mem_hotplug.active_writer = NULL; |
| 148 | mutex_unlock(&mem_hotplug.lock); | 151 | mutex_unlock(&mem_hotplug.lock); |
| 149 | memhp_lock_release(); | 152 | memhp_lock_release(); |
| 153 | mutex_unlock(&memory_add_remove_lock); | ||
| 150 | } | 154 | } |
| 151 | 155 | ||
| 152 | /* add this memory to iomem resource */ | 156 | /* add this memory to iomem resource */ |
diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 9b5bc86f96ad..b1ccb58ad397 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c | |||
| @@ -267,8 +267,6 @@ int free_swap_slot(swp_entry_t entry) | |||
| 267 | { | 267 | { |
| 268 | struct swap_slots_cache *cache; | 268 | struct swap_slots_cache *cache; |
| 269 | 269 | ||
| 270 | BUG_ON(!swap_slot_cache_initialized); | ||
| 271 | |||
| 272 | cache = &get_cpu_var(swp_slots); | 270 | cache = &get_cpu_var(swp_slots); |
| 273 | if (use_swap_slot_cache && cache->slots_ret) { | 271 | if (use_swap_slot_cache && cache->slots_ret) { |
| 274 | spin_lock_irq(&cache->free_lock); | 272 | spin_lock_irq(&cache->free_lock); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 0dd80222b20b..0b057628a7ba 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
| @@ -1683,7 +1683,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
| 1683 | 1683 | ||
| 1684 | if (fatal_signal_pending(current)) { | 1684 | if (fatal_signal_pending(current)) { |
| 1685 | area->nr_pages = i; | 1685 | area->nr_pages = i; |
| 1686 | goto fail; | 1686 | goto fail_no_warn; |
| 1687 | } | 1687 | } |
| 1688 | 1688 | ||
| 1689 | if (node == NUMA_NO_NODE) | 1689 | if (node == NUMA_NO_NODE) |
| @@ -1709,6 +1709,7 @@ fail: | |||
| 1709 | warn_alloc(gfp_mask, NULL, | 1709 | warn_alloc(gfp_mask, NULL, |
| 1710 | "vmalloc: allocation failure, allocated %ld of %ld bytes", | 1710 | "vmalloc: allocation failure, allocated %ld of %ld bytes", |
| 1711 | (area->nr_pages*PAGE_SIZE), area->size); | 1711 | (area->nr_pages*PAGE_SIZE), area->size); |
| 1712 | fail_no_warn: | ||
| 1712 | vfree(area->addr); | 1713 | vfree(area->addr); |
| 1713 | return NULL; | 1714 | return NULL; |
| 1714 | } | 1715 | } |
diff --git a/mm/z3fold.c b/mm/z3fold.c index 8970a2fd3b1a..f9492bccfd79 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c | |||
| @@ -667,6 +667,7 @@ next: | |||
| 667 | z3fold_page_unlock(zhdr); | 667 | z3fold_page_unlock(zhdr); |
| 668 | spin_lock(&pool->lock); | 668 | spin_lock(&pool->lock); |
| 669 | if (kref_put(&zhdr->refcount, release_z3fold_page)) { | 669 | if (kref_put(&zhdr->refcount, release_z3fold_page)) { |
| 670 | spin_unlock(&pool->lock); | ||
| 670 | atomic64_dec(&pool->pages_nr); | 671 | atomic64_dec(&pool->pages_nr); |
| 671 | return 0; | 672 | return 0; |
| 672 | } | 673 | } |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 7c3d994e90d8..71343d0fec94 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
| @@ -2477,6 +2477,16 @@ static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface) | |||
| 2477 | batadv_iv_ogm_schedule(hard_iface); | 2477 | batadv_iv_ogm_schedule(hard_iface); |
| 2478 | } | 2478 | } |
| 2479 | 2479 | ||
| 2480 | /** | ||
| 2481 | * batadv_iv_init_sel_class - initialize GW selection class | ||
| 2482 | * @bat_priv: the bat priv with all the soft interface information | ||
| 2483 | */ | ||
| 2484 | static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv) | ||
| 2485 | { | ||
| 2486 | /* set default TQ difference threshold to 20 */ | ||
| 2487 | atomic_set(&bat_priv->gw.sel_class, 20); | ||
| 2488 | } | ||
| 2489 | |||
| 2480 | static struct batadv_gw_node * | 2490 | static struct batadv_gw_node * |
| 2481 | batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) | 2491 | batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv) |
| 2482 | { | 2492 | { |
| @@ -2823,6 +2833,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = { | |||
| 2823 | .del_if = batadv_iv_ogm_orig_del_if, | 2833 | .del_if = batadv_iv_ogm_orig_del_if, |
| 2824 | }, | 2834 | }, |
| 2825 | .gw = { | 2835 | .gw = { |
| 2836 | .init_sel_class = batadv_iv_init_sel_class, | ||
| 2826 | .get_best_gw_node = batadv_iv_gw_get_best_gw_node, | 2837 | .get_best_gw_node = batadv_iv_gw_get_best_gw_node, |
| 2827 | .is_eligible = batadv_iv_gw_is_eligible, | 2838 | .is_eligible = batadv_iv_gw_is_eligible, |
| 2828 | #ifdef CONFIG_BATMAN_ADV_DEBUGFS | 2839 | #ifdef CONFIG_BATMAN_ADV_DEBUGFS |
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 0acd081dd286..a36c8e7291d6 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c | |||
| @@ -668,6 +668,16 @@ err_ifinfo1: | |||
| 668 | return ret; | 668 | return ret; |
| 669 | } | 669 | } |
| 670 | 670 | ||
| 671 | /** | ||
| 672 | * batadv_v_init_sel_class - initialize GW selection class | ||
| 673 | * @bat_priv: the bat priv with all the soft interface information | ||
| 674 | */ | ||
| 675 | static void batadv_v_init_sel_class(struct batadv_priv *bat_priv) | ||
| 676 | { | ||
| 677 | /* set default throughput difference threshold to 5Mbps */ | ||
| 678 | atomic_set(&bat_priv->gw.sel_class, 50); | ||
| 679 | } | ||
| 680 | |||
| 671 | static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv, | 681 | static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv, |
| 672 | char *buff, size_t count) | 682 | char *buff, size_t count) |
| 673 | { | 683 | { |
| @@ -1052,6 +1062,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = { | |||
| 1052 | .dump = batadv_v_orig_dump, | 1062 | .dump = batadv_v_orig_dump, |
| 1053 | }, | 1063 | }, |
| 1054 | .gw = { | 1064 | .gw = { |
| 1065 | .init_sel_class = batadv_v_init_sel_class, | ||
| 1055 | .store_sel_class = batadv_v_store_sel_class, | 1066 | .store_sel_class = batadv_v_store_sel_class, |
| 1056 | .show_sel_class = batadv_v_show_sel_class, | 1067 | .show_sel_class = batadv_v_show_sel_class, |
| 1057 | .get_best_gw_node = batadv_v_gw_get_best_gw_node, | 1068 | .get_best_gw_node = batadv_v_gw_get_best_gw_node, |
| @@ -1092,9 +1103,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv) | |||
| 1092 | if (ret < 0) | 1103 | if (ret < 0) |
| 1093 | return ret; | 1104 | return ret; |
| 1094 | 1105 | ||
| 1095 | /* set default throughput difference threshold to 5Mbps */ | ||
| 1096 | atomic_set(&bat_priv->gw.sel_class, 50); | ||
| 1097 | |||
| 1098 | return 0; | 1106 | return 0; |
| 1099 | } | 1107 | } |
| 1100 | 1108 | ||
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index 11a23fd6e1a0..8f964beaac28 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
| @@ -404,7 +404,7 @@ out: | |||
| 404 | * batadv_frag_create - create a fragment from skb | 404 | * batadv_frag_create - create a fragment from skb |
| 405 | * @skb: skb to create fragment from | 405 | * @skb: skb to create fragment from |
| 406 | * @frag_head: header to use in new fragment | 406 | * @frag_head: header to use in new fragment |
| 407 | * @mtu: size of new fragment | 407 | * @fragment_size: size of new fragment |
| 408 | * | 408 | * |
| 409 | * Split the passed skb into two fragments: A new one with size matching the | 409 | * Split the passed skb into two fragments: A new one with size matching the |
| 410 | * passed mtu and the old one with the rest. The new skb contains data from the | 410 | * passed mtu and the old one with the rest. The new skb contains data from the |
| @@ -414,11 +414,11 @@ out: | |||
| 414 | */ | 414 | */ |
| 415 | static struct sk_buff *batadv_frag_create(struct sk_buff *skb, | 415 | static struct sk_buff *batadv_frag_create(struct sk_buff *skb, |
| 416 | struct batadv_frag_packet *frag_head, | 416 | struct batadv_frag_packet *frag_head, |
| 417 | unsigned int mtu) | 417 | unsigned int fragment_size) |
| 418 | { | 418 | { |
| 419 | struct sk_buff *skb_fragment; | 419 | struct sk_buff *skb_fragment; |
| 420 | unsigned int header_size = sizeof(*frag_head); | 420 | unsigned int header_size = sizeof(*frag_head); |
| 421 | unsigned int fragment_size = mtu - header_size; | 421 | unsigned int mtu = fragment_size + header_size; |
| 422 | 422 | ||
| 423 | skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); | 423 | skb_fragment = netdev_alloc_skb(NULL, mtu + ETH_HLEN); |
| 424 | if (!skb_fragment) | 424 | if (!skb_fragment) |
| @@ -456,7 +456,7 @@ int batadv_frag_send_packet(struct sk_buff *skb, | |||
| 456 | struct sk_buff *skb_fragment; | 456 | struct sk_buff *skb_fragment; |
| 457 | unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; | 457 | unsigned int mtu = neigh_node->if_incoming->net_dev->mtu; |
| 458 | unsigned int header_size = sizeof(frag_header); | 458 | unsigned int header_size = sizeof(frag_header); |
| 459 | unsigned int max_fragment_size, max_packet_size; | 459 | unsigned int max_fragment_size, num_fragments; |
| 460 | int ret; | 460 | int ret; |
| 461 | 461 | ||
| 462 | /* To avoid merge and refragmentation at next-hops we never send | 462 | /* To avoid merge and refragmentation at next-hops we never send |
| @@ -464,10 +464,15 @@ int batadv_frag_send_packet(struct sk_buff *skb, | |||
| 464 | */ | 464 | */ |
| 465 | mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE); | 465 | mtu = min_t(unsigned int, mtu, BATADV_FRAG_MAX_FRAG_SIZE); |
| 466 | max_fragment_size = mtu - header_size; | 466 | max_fragment_size = mtu - header_size; |
| 467 | max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS; | 467 | |
| 468 | if (skb->len == 0 || max_fragment_size == 0) | ||
| 469 | return -EINVAL; | ||
| 470 | |||
| 471 | num_fragments = (skb->len - 1) / max_fragment_size + 1; | ||
| 472 | max_fragment_size = (skb->len - 1) / num_fragments + 1; | ||
| 468 | 473 | ||
| 469 | /* Don't even try to fragment, if we need more than 16 fragments */ | 474 | /* Don't even try to fragment, if we need more than 16 fragments */ |
| 470 | if (skb->len > max_packet_size) { | 475 | if (num_fragments > BATADV_FRAG_MAX_FRAGMENTS) { |
| 471 | ret = -EAGAIN; | 476 | ret = -EAGAIN; |
| 472 | goto free_skb; | 477 | goto free_skb; |
| 473 | } | 478 | } |
| @@ -507,7 +512,8 @@ int batadv_frag_send_packet(struct sk_buff *skb, | |||
| 507 | goto put_primary_if; | 512 | goto put_primary_if; |
| 508 | } | 513 | } |
| 509 | 514 | ||
| 510 | skb_fragment = batadv_frag_create(skb, &frag_header, mtu); | 515 | skb_fragment = batadv_frag_create(skb, &frag_header, |
| 516 | max_fragment_size); | ||
| 511 | if (!skb_fragment) { | 517 | if (!skb_fragment) { |
| 512 | ret = -ENOMEM; | 518 | ret = -ENOMEM; |
| 513 | goto put_primary_if; | 519 | goto put_primary_if; |
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c index 5db2e43e3775..33940c5c74a8 100644 --- a/net/batman-adv/gateway_common.c +++ b/net/batman-adv/gateway_common.c | |||
| @@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, | |||
| 253 | */ | 253 | */ |
| 254 | void batadv_gw_init(struct batadv_priv *bat_priv) | 254 | void batadv_gw_init(struct batadv_priv *bat_priv) |
| 255 | { | 255 | { |
| 256 | if (bat_priv->algo_ops->gw.init_sel_class) | ||
| 257 | bat_priv->algo_ops->gw.init_sel_class(bat_priv); | ||
| 258 | else | ||
| 259 | atomic_set(&bat_priv->gw.sel_class, 1); | ||
| 260 | |||
| 256 | batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1, | 261 | batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1, |
| 257 | NULL, BATADV_TVLV_GW, 1, | 262 | NULL, BATADV_TVLV_GW, 1, |
| 258 | BATADV_TVLV_HANDLER_OGM_CIFNOTFND); | 263 | BATADV_TVLV_HANDLER_OGM_CIFNOTFND); |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 5d099b2e6cfc..d042c99af028 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
| @@ -819,7 +819,6 @@ static int batadv_softif_init_late(struct net_device *dev) | |||
| 819 | atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); | 819 | atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); |
| 820 | #endif | 820 | #endif |
| 821 | atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF); | 821 | atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF); |
| 822 | atomic_set(&bat_priv->gw.sel_class, 20); | ||
| 823 | atomic_set(&bat_priv->gw.bandwidth_down, 100); | 822 | atomic_set(&bat_priv->gw.bandwidth_down, 100); |
| 824 | atomic_set(&bat_priv->gw.bandwidth_up, 20); | 823 | atomic_set(&bat_priv->gw.bandwidth_up, 20); |
| 825 | atomic_set(&bat_priv->orig_interval, 1000); | 824 | atomic_set(&bat_priv->orig_interval, 1000); |
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 66b25e410a41..246f21b4973b 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h | |||
| @@ -1489,6 +1489,7 @@ struct batadv_algo_orig_ops { | |||
| 1489 | 1489 | ||
| 1490 | /** | 1490 | /** |
| 1491 | * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific) | 1491 | * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific) |
| 1492 | * @init_sel_class: initialize GW selection class (optional) | ||
| 1492 | * @store_sel_class: parse and stores a new GW selection class (optional) | 1493 | * @store_sel_class: parse and stores a new GW selection class (optional) |
| 1493 | * @show_sel_class: prints the current GW selection class (optional) | 1494 | * @show_sel_class: prints the current GW selection class (optional) |
| 1494 | * @get_best_gw_node: select the best GW from the list of available nodes | 1495 | * @get_best_gw_node: select the best GW from the list of available nodes |
| @@ -1499,6 +1500,7 @@ struct batadv_algo_orig_ops { | |||
| 1499 | * @dump: dump gateways to a netlink socket (optional) | 1500 | * @dump: dump gateways to a netlink socket (optional) |
| 1500 | */ | 1501 | */ |
| 1501 | struct batadv_algo_gw_ops { | 1502 | struct batadv_algo_gw_ops { |
| 1503 | void (*init_sel_class)(struct batadv_priv *bat_priv); | ||
| 1502 | ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, | 1504 | ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff, |
| 1503 | size_t count); | 1505 | size_t count); |
| 1504 | ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff); | 1506 | ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff); |
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 4f598dc2d916..6e08b7199dd7 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
| @@ -106,7 +106,7 @@ static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br, | |||
| 106 | struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; | 106 | struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)]; |
| 107 | struct net_bridge_fdb_entry *fdb; | 107 | struct net_bridge_fdb_entry *fdb; |
| 108 | 108 | ||
| 109 | WARN_ON_ONCE(!br_hash_lock_held(br)); | 109 | lockdep_assert_held_once(&br->hash_lock); |
| 110 | 110 | ||
| 111 | rcu_read_lock(); | 111 | rcu_read_lock(); |
| 112 | fdb = fdb_find_rcu(head, addr, vid); | 112 | fdb = fdb_find_rcu(head, addr, vid); |
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index fa87fbd62bb7..1f1e62095464 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c | |||
| @@ -706,18 +706,20 @@ static unsigned int nf_bridge_mtu_reduction(const struct sk_buff *skb) | |||
| 706 | 706 | ||
| 707 | static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) | 707 | static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff *skb) |
| 708 | { | 708 | { |
| 709 | struct nf_bridge_info *nf_bridge; | 709 | struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); |
| 710 | unsigned int mtu_reserved; | 710 | unsigned int mtu, mtu_reserved; |
| 711 | 711 | ||
| 712 | mtu_reserved = nf_bridge_mtu_reduction(skb); | 712 | mtu_reserved = nf_bridge_mtu_reduction(skb); |
| 713 | mtu = skb->dev->mtu; | ||
| 714 | |||
| 715 | if (nf_bridge->frag_max_size && nf_bridge->frag_max_size < mtu) | ||
| 716 | mtu = nf_bridge->frag_max_size; | ||
| 713 | 717 | ||
| 714 | if (skb_is_gso(skb) || skb->len + mtu_reserved <= skb->dev->mtu) { | 718 | if (skb_is_gso(skb) || skb->len + mtu_reserved <= mtu) { |
| 715 | nf_bridge_info_free(skb); | 719 | nf_bridge_info_free(skb); |
| 716 | return br_dev_queue_push_xmit(net, sk, skb); | 720 | return br_dev_queue_push_xmit(net, sk, skb); |
| 717 | } | 721 | } |
| 718 | 722 | ||
| 719 | nf_bridge = nf_bridge_info_get(skb); | ||
| 720 | |||
| 721 | /* This is wrong! We should preserve the original fragment | 723 | /* This is wrong! We should preserve the original fragment |
| 722 | * boundaries by preserving frag_list rather than refragmenting. | 724 | * boundaries by preserving frag_list rather than refragmenting. |
| 723 | */ | 725 | */ |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 2288fca7756c..61368186edea 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
| @@ -531,15 +531,6 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p, | |||
| 531 | int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, | 531 | int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p, |
| 532 | const unsigned char *addr, u16 vid); | 532 | const unsigned char *addr, u16 vid); |
| 533 | 533 | ||
| 534 | static inline bool br_hash_lock_held(struct net_bridge *br) | ||
| 535 | { | ||
| 536 | #ifdef CONFIG_LOCKDEP | ||
| 537 | return lockdep_is_held(&br->hash_lock); | ||
| 538 | #else | ||
| 539 | return true; | ||
| 540 | #endif | ||
| 541 | } | ||
| 542 | |||
| 543 | /* br_forward.c */ | 534 | /* br_forward.c */ |
| 544 | enum br_pkt_type { | 535 | enum br_pkt_type { |
| 545 | BR_PKT_UNICAST, | 536 | BR_PKT_UNICAST, |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 38dcf1eb427d..f76bb3332613 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/kthread.h> | 7 | #include <linux/kthread.h> |
| 8 | #include <linux/net.h> | 8 | #include <linux/net.h> |
| 9 | #include <linux/nsproxy.h> | 9 | #include <linux/nsproxy.h> |
| 10 | #include <linux/sched/mm.h> | ||
| 10 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
| 11 | #include <linux/socket.h> | 12 | #include <linux/socket.h> |
| 12 | #include <linux/string.h> | 13 | #include <linux/string.h> |
| @@ -469,11 +470,16 @@ static int ceph_tcp_connect(struct ceph_connection *con) | |||
| 469 | { | 470 | { |
| 470 | struct sockaddr_storage *paddr = &con->peer_addr.in_addr; | 471 | struct sockaddr_storage *paddr = &con->peer_addr.in_addr; |
| 471 | struct socket *sock; | 472 | struct socket *sock; |
| 473 | unsigned int noio_flag; | ||
| 472 | int ret; | 474 | int ret; |
| 473 | 475 | ||
| 474 | BUG_ON(con->sock); | 476 | BUG_ON(con->sock); |
| 477 | |||
| 478 | /* sock_create_kern() allocates with GFP_KERNEL */ | ||
| 479 | noio_flag = memalloc_noio_save(); | ||
| 475 | ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, | 480 | ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family, |
| 476 | SOCK_STREAM, IPPROTO_TCP, &sock); | 481 | SOCK_STREAM, IPPROTO_TCP, &sock); |
| 482 | memalloc_noio_restore(noio_flag); | ||
| 477 | if (ret) | 483 | if (ret) |
| 478 | return ret; | 484 | return ret; |
| 479 | sock->sk->sk_allocation = GFP_NOFS; | 485 | sock->sk->sk_allocation = GFP_NOFS; |
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c index 6ae56037bb13..029a61ac6cdd 100644 --- a/net/core/netclassid_cgroup.c +++ b/net/core/netclassid_cgroup.c | |||
| @@ -71,27 +71,17 @@ static int update_classid_sock(const void *v, struct file *file, unsigned n) | |||
| 71 | return 0; | 71 | return 0; |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | static void update_classid(struct cgroup_subsys_state *css, void *v) | 74 | static void cgrp_attach(struct cgroup_taskset *tset) |
| 75 | { | 75 | { |
| 76 | struct css_task_iter it; | 76 | struct cgroup_subsys_state *css; |
| 77 | struct task_struct *p; | 77 | struct task_struct *p; |
| 78 | 78 | ||
| 79 | css_task_iter_start(css, &it); | 79 | cgroup_taskset_for_each(p, css, tset) { |
| 80 | while ((p = css_task_iter_next(&it))) { | ||
| 81 | task_lock(p); | 80 | task_lock(p); |
| 82 | iterate_fd(p->files, 0, update_classid_sock, v); | 81 | iterate_fd(p->files, 0, update_classid_sock, |
| 82 | (void *)(unsigned long)css_cls_state(css)->classid); | ||
| 83 | task_unlock(p); | 83 | task_unlock(p); |
| 84 | } | 84 | } |
| 85 | css_task_iter_end(&it); | ||
| 86 | } | ||
| 87 | |||
| 88 | static void cgrp_attach(struct cgroup_taskset *tset) | ||
| 89 | { | ||
| 90 | struct cgroup_subsys_state *css; | ||
| 91 | |||
| 92 | cgroup_taskset_first(tset, &css); | ||
| 93 | update_classid(css, | ||
| 94 | (void *)(unsigned long)css_cls_state(css)->classid); | ||
| 95 | } | 85 | } |
| 96 | 86 | ||
| 97 | static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) | 87 | static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft) |
| @@ -103,12 +93,22 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft, | |||
| 103 | u64 value) | 93 | u64 value) |
| 104 | { | 94 | { |
| 105 | struct cgroup_cls_state *cs = css_cls_state(css); | 95 | struct cgroup_cls_state *cs = css_cls_state(css); |
| 96 | struct css_task_iter it; | ||
| 97 | struct task_struct *p; | ||
| 106 | 98 | ||
| 107 | cgroup_sk_alloc_disable(); | 99 | cgroup_sk_alloc_disable(); |
| 108 | 100 | ||
| 109 | cs->classid = (u32)value; | 101 | cs->classid = (u32)value; |
| 110 | 102 | ||
| 111 | update_classid(css, (void *)(unsigned long)cs->classid); | 103 | css_task_iter_start(css, &it); |
| 104 | while ((p = css_task_iter_next(&it))) { | ||
| 105 | task_lock(p); | ||
| 106 | iterate_fd(p->files, 0, update_classid_sock, | ||
| 107 | (void *)(unsigned long)cs->classid); | ||
| 108 | task_unlock(p); | ||
| 109 | } | ||
| 110 | css_task_iter_end(&it); | ||
| 111 | |||
| 112 | return 0; | 112 | return 0; |
| 113 | } | 113 | } |
| 114 | 114 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index cd4ba8c6b609..9f781092fda9 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -3694,6 +3694,15 @@ static void sock_rmem_free(struct sk_buff *skb) | |||
| 3694 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); | 3694 | atomic_sub(skb->truesize, &sk->sk_rmem_alloc); |
| 3695 | } | 3695 | } |
| 3696 | 3696 | ||
| 3697 | static void skb_set_err_queue(struct sk_buff *skb) | ||
| 3698 | { | ||
| 3699 | /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING. | ||
| 3700 | * So, it is safe to (mis)use it to mark skbs on the error queue. | ||
| 3701 | */ | ||
| 3702 | skb->pkt_type = PACKET_OUTGOING; | ||
| 3703 | BUILD_BUG_ON(PACKET_OUTGOING == 0); | ||
| 3704 | } | ||
| 3705 | |||
| 3697 | /* | 3706 | /* |
| 3698 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) | 3707 | * Note: We dont mem charge error packets (no sk_forward_alloc changes) |
| 3699 | */ | 3708 | */ |
| @@ -3707,6 +3716,7 @@ int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) | |||
| 3707 | skb->sk = sk; | 3716 | skb->sk = sk; |
| 3708 | skb->destructor = sock_rmem_free; | 3717 | skb->destructor = sock_rmem_free; |
| 3709 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); | 3718 | atomic_add(skb->truesize, &sk->sk_rmem_alloc); |
| 3719 | skb_set_err_queue(skb); | ||
| 3710 | 3720 | ||
| 3711 | /* before exiting rcu section, make sure dst is refcounted */ | 3721 | /* before exiting rcu section, make sure dst is refcounted */ |
| 3712 | skb_dst_force(skb); | 3722 | skb_dst_force(skb); |
| @@ -3783,16 +3793,20 @@ EXPORT_SYMBOL(skb_clone_sk); | |||
| 3783 | 3793 | ||
| 3784 | static void __skb_complete_tx_timestamp(struct sk_buff *skb, | 3794 | static void __skb_complete_tx_timestamp(struct sk_buff *skb, |
| 3785 | struct sock *sk, | 3795 | struct sock *sk, |
| 3786 | int tstype) | 3796 | int tstype, |
| 3797 | bool opt_stats) | ||
| 3787 | { | 3798 | { |
| 3788 | struct sock_exterr_skb *serr; | 3799 | struct sock_exterr_skb *serr; |
| 3789 | int err; | 3800 | int err; |
| 3790 | 3801 | ||
| 3802 | BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); | ||
| 3803 | |||
| 3791 | serr = SKB_EXT_ERR(skb); | 3804 | serr = SKB_EXT_ERR(skb); |
| 3792 | memset(serr, 0, sizeof(*serr)); | 3805 | memset(serr, 0, sizeof(*serr)); |
| 3793 | serr->ee.ee_errno = ENOMSG; | 3806 | serr->ee.ee_errno = ENOMSG; |
| 3794 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; | 3807 | serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; |
| 3795 | serr->ee.ee_info = tstype; | 3808 | serr->ee.ee_info = tstype; |
| 3809 | serr->opt_stats = opt_stats; | ||
| 3796 | if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { | 3810 | if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) { |
| 3797 | serr->ee.ee_data = skb_shinfo(skb)->tskey; | 3811 | serr->ee.ee_data = skb_shinfo(skb)->tskey; |
| 3798 | if (sk->sk_protocol == IPPROTO_TCP && | 3812 | if (sk->sk_protocol == IPPROTO_TCP && |
| @@ -3833,7 +3847,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, | |||
| 3833 | */ | 3847 | */ |
| 3834 | if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { | 3848 | if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) { |
| 3835 | *skb_hwtstamps(skb) = *hwtstamps; | 3849 | *skb_hwtstamps(skb) = *hwtstamps; |
| 3836 | __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); | 3850 | __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); |
| 3837 | sock_put(sk); | 3851 | sock_put(sk); |
| 3838 | } | 3852 | } |
| 3839 | } | 3853 | } |
| @@ -3844,7 +3858,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, | |||
| 3844 | struct sock *sk, int tstype) | 3858 | struct sock *sk, int tstype) |
| 3845 | { | 3859 | { |
| 3846 | struct sk_buff *skb; | 3860 | struct sk_buff *skb; |
| 3847 | bool tsonly; | 3861 | bool tsonly, opt_stats = false; |
| 3848 | 3862 | ||
| 3849 | if (!sk) | 3863 | if (!sk) |
| 3850 | return; | 3864 | return; |
| @@ -3857,9 +3871,10 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, | |||
| 3857 | #ifdef CONFIG_INET | 3871 | #ifdef CONFIG_INET |
| 3858 | if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && | 3872 | if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) && |
| 3859 | sk->sk_protocol == IPPROTO_TCP && | 3873 | sk->sk_protocol == IPPROTO_TCP && |
| 3860 | sk->sk_type == SOCK_STREAM) | 3874 | sk->sk_type == SOCK_STREAM) { |
| 3861 | skb = tcp_get_timestamping_opt_stats(sk); | 3875 | skb = tcp_get_timestamping_opt_stats(sk); |
| 3862 | else | 3876 | opt_stats = true; |
| 3877 | } else | ||
| 3863 | #endif | 3878 | #endif |
| 3864 | skb = alloc_skb(0, GFP_ATOMIC); | 3879 | skb = alloc_skb(0, GFP_ATOMIC); |
| 3865 | } else { | 3880 | } else { |
| @@ -3878,7 +3893,7 @@ void __skb_tstamp_tx(struct sk_buff *orig_skb, | |||
| 3878 | else | 3893 | else |
| 3879 | skb->tstamp = ktime_get_real(); | 3894 | skb->tstamp = ktime_get_real(); |
| 3880 | 3895 | ||
| 3881 | __skb_complete_tx_timestamp(skb, sk, tstype); | 3896 | __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); |
| 3882 | } | 3897 | } |
| 3883 | EXPORT_SYMBOL_GPL(__skb_tstamp_tx); | 3898 | EXPORT_SYMBOL_GPL(__skb_tstamp_tx); |
| 3884 | 3899 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index a96d5f7a5734..2c4f574168fb 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
| @@ -1442,6 +1442,11 @@ static void __sk_destruct(struct rcu_head *head) | |||
| 1442 | pr_debug("%s: optmem leakage (%d bytes) detected\n", | 1442 | pr_debug("%s: optmem leakage (%d bytes) detected\n", |
| 1443 | __func__, atomic_read(&sk->sk_omem_alloc)); | 1443 | __func__, atomic_read(&sk->sk_omem_alloc)); |
| 1444 | 1444 | ||
| 1445 | if (sk->sk_frag.page) { | ||
| 1446 | put_page(sk->sk_frag.page); | ||
| 1447 | sk->sk_frag.page = NULL; | ||
| 1448 | } | ||
| 1449 | |||
| 1445 | if (sk->sk_peer_cred) | 1450 | if (sk->sk_peer_cred) |
| 1446 | put_cred(sk->sk_peer_cred); | 1451 | put_cred(sk->sk_peer_cred); |
| 1447 | put_pid(sk->sk_peer_pid); | 1452 | put_pid(sk->sk_peer_pid); |
| @@ -1539,6 +1544,12 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
| 1539 | is_charged = sk_filter_charge(newsk, filter); | 1544 | is_charged = sk_filter_charge(newsk, filter); |
| 1540 | 1545 | ||
| 1541 | if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { | 1546 | if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) { |
| 1547 | /* We need to make sure that we don't uncharge the new | ||
| 1548 | * socket if we couldn't charge it in the first place | ||
| 1549 | * as otherwise we uncharge the parent's filter. | ||
| 1550 | */ | ||
| 1551 | if (!is_charged) | ||
| 1552 | RCU_INIT_POINTER(newsk->sk_filter, NULL); | ||
| 1542 | sk_free_unlock_clone(newsk); | 1553 | sk_free_unlock_clone(newsk); |
| 1543 | newsk = NULL; | 1554 | newsk = NULL; |
| 1544 | goto out; | 1555 | goto out; |
| @@ -2787,11 +2798,6 @@ void sk_common_release(struct sock *sk) | |||
| 2787 | 2798 | ||
| 2788 | sk_refcnt_debug_release(sk); | 2799 | sk_refcnt_debug_release(sk); |
| 2789 | 2800 | ||
| 2790 | if (sk->sk_frag.page) { | ||
| 2791 | put_page(sk->sk_frag.page); | ||
| 2792 | sk->sk_frag.page = NULL; | ||
| 2793 | } | ||
| 2794 | |||
| 2795 | sock_put(sk); | 2801 | sock_put(sk); |
| 2796 | } | 2802 | } |
| 2797 | EXPORT_SYMBOL(sk_common_release); | 2803 | EXPORT_SYMBOL(sk_common_release); |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 42bfd08109dd..8f2133ffc2ff 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
| @@ -1083,7 +1083,8 @@ static void nl_fib_input(struct sk_buff *skb) | |||
| 1083 | 1083 | ||
| 1084 | net = sock_net(skb->sk); | 1084 | net = sock_net(skb->sk); |
| 1085 | nlh = nlmsg_hdr(skb); | 1085 | nlh = nlmsg_hdr(skb); |
| 1086 | if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len || | 1086 | if (skb->len < nlmsg_total_size(sizeof(*frn)) || |
| 1087 | skb->len < nlh->nlmsg_len || | ||
| 1087 | nlmsg_len(nlh) < sizeof(*frn)) | 1088 | nlmsg_len(nlh) < sizeof(*frn)) |
| 1088 | return; | 1089 | return; |
| 1089 | 1090 | ||
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index bbe7f72db9c1..b3cdeec85f1f 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
| @@ -198,6 +198,7 @@ static void ip_expire(unsigned long arg) | |||
| 198 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); | 198 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); |
| 199 | net = container_of(qp->q.net, struct net, ipv4.frags); | 199 | net = container_of(qp->q.net, struct net, ipv4.frags); |
| 200 | 200 | ||
| 201 | rcu_read_lock(); | ||
| 201 | spin_lock(&qp->q.lock); | 202 | spin_lock(&qp->q.lock); |
| 202 | 203 | ||
| 203 | if (qp->q.flags & INET_FRAG_COMPLETE) | 204 | if (qp->q.flags & INET_FRAG_COMPLETE) |
| @@ -207,7 +208,7 @@ static void ip_expire(unsigned long arg) | |||
| 207 | __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); | 208 | __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); |
| 208 | 209 | ||
| 209 | if (!inet_frag_evicting(&qp->q)) { | 210 | if (!inet_frag_evicting(&qp->q)) { |
| 210 | struct sk_buff *head = qp->q.fragments; | 211 | struct sk_buff *clone, *head = qp->q.fragments; |
| 211 | const struct iphdr *iph; | 212 | const struct iphdr *iph; |
| 212 | int err; | 213 | int err; |
| 213 | 214 | ||
| @@ -216,32 +217,40 @@ static void ip_expire(unsigned long arg) | |||
| 216 | if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) | 217 | if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) |
| 217 | goto out; | 218 | goto out; |
| 218 | 219 | ||
| 219 | rcu_read_lock(); | ||
| 220 | head->dev = dev_get_by_index_rcu(net, qp->iif); | 220 | head->dev = dev_get_by_index_rcu(net, qp->iif); |
| 221 | if (!head->dev) | 221 | if (!head->dev) |
| 222 | goto out_rcu_unlock; | 222 | goto out; |
| 223 | |||
| 223 | 224 | ||
| 224 | /* skb has no dst, perform route lookup again */ | 225 | /* skb has no dst, perform route lookup again */ |
| 225 | iph = ip_hdr(head); | 226 | iph = ip_hdr(head); |
| 226 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, | 227 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, |
| 227 | iph->tos, head->dev); | 228 | iph->tos, head->dev); |
| 228 | if (err) | 229 | if (err) |
| 229 | goto out_rcu_unlock; | 230 | goto out; |
| 230 | 231 | ||
| 231 | /* Only an end host needs to send an ICMP | 232 | /* Only an end host needs to send an ICMP |
| 232 | * "Fragment Reassembly Timeout" message, per RFC792. | 233 | * "Fragment Reassembly Timeout" message, per RFC792. |
| 233 | */ | 234 | */ |
| 234 | if (frag_expire_skip_icmp(qp->user) && | 235 | if (frag_expire_skip_icmp(qp->user) && |
| 235 | (skb_rtable(head)->rt_type != RTN_LOCAL)) | 236 | (skb_rtable(head)->rt_type != RTN_LOCAL)) |
| 236 | goto out_rcu_unlock; | 237 | goto out; |
| 238 | |||
| 239 | clone = skb_clone(head, GFP_ATOMIC); | ||
| 237 | 240 | ||
| 238 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | 241 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
| 239 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | 242 | if (clone) { |
| 240 | out_rcu_unlock: | 243 | spin_unlock(&qp->q.lock); |
| 241 | rcu_read_unlock(); | 244 | icmp_send(clone, ICMP_TIME_EXCEEDED, |
| 245 | ICMP_EXC_FRAGTIME, 0); | ||
| 246 | consume_skb(clone); | ||
| 247 | goto out_rcu_unlock; | ||
| 248 | } | ||
| 242 | } | 249 | } |
| 243 | out: | 250 | out: |
| 244 | spin_unlock(&qp->q.lock); | 251 | spin_unlock(&qp->q.lock); |
| 252 | out_rcu_unlock: | ||
| 253 | rcu_read_unlock(); | ||
| 245 | ipq_put(qp); | 254 | ipq_put(qp); |
| 246 | } | 255 | } |
| 247 | 256 | ||
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index bc1486f2c064..2e14ed11a35c 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | |||
| @@ -165,6 +165,10 @@ static unsigned int ipv4_conntrack_local(void *priv, | |||
| 165 | if (skb->len < sizeof(struct iphdr) || | 165 | if (skb->len < sizeof(struct iphdr) || |
| 166 | ip_hdrlen(skb) < sizeof(struct iphdr)) | 166 | ip_hdrlen(skb) < sizeof(struct iphdr)) |
| 167 | return NF_ACCEPT; | 167 | return NF_ACCEPT; |
| 168 | |||
| 169 | if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */ | ||
| 170 | return NF_ACCEPT; | ||
| 171 | |||
| 168 | return nf_conntrack_in(state->net, PF_INET, state->hook, skb); | 172 | return nf_conntrack_in(state->net, PF_INET, state->hook, skb); |
| 169 | } | 173 | } |
| 170 | 174 | ||
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index f8aad03d674b..6f5e8d01b876 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c | |||
| @@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb, | |||
| 255 | /* maniptype == SRC for postrouting. */ | 255 | /* maniptype == SRC for postrouting. */ |
| 256 | enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook); | 256 | enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook); |
| 257 | 257 | ||
| 258 | /* We never see fragments: conntrack defrags on pre-routing | ||
| 259 | * and local-out, and nf_nat_out protects post-routing. | ||
| 260 | */ | ||
| 261 | NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb))); | ||
| 262 | |||
| 263 | ct = nf_ct_get(skb, &ctinfo); | 258 | ct = nf_ct_get(skb, &ctinfo); |
| 264 | /* Can't track? It's not due to stress, or conntrack would | 259 | /* Can't track? It's not due to stress, or conntrack would |
| 265 | * have dropped it. Hence it's the user's responsibilty to | 260 | * have dropped it. Hence it's the user's responsibilty to |
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c index a0ea8aad1bf1..f18677277119 100644 --- a/net/ipv4/netfilter/nft_masq_ipv4.c +++ b/net/ipv4/netfilter/nft_masq_ipv4.c | |||
| @@ -26,10 +26,10 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr, | |||
| 26 | memset(&range, 0, sizeof(range)); | 26 | memset(&range, 0, sizeof(range)); |
| 27 | range.flags = priv->flags; | 27 | range.flags = priv->flags; |
| 28 | if (priv->sreg_proto_min) { | 28 | if (priv->sreg_proto_min) { |
| 29 | range.min_proto.all = | 29 | range.min_proto.all = (__force __be16)nft_reg_load16( |
| 30 | *(__be16 *)®s->data[priv->sreg_proto_min]; | 30 | ®s->data[priv->sreg_proto_min]); |
| 31 | range.max_proto.all = | 31 | range.max_proto.all = (__force __be16)nft_reg_load16( |
| 32 | *(__be16 *)®s->data[priv->sreg_proto_max]; | 32 | ®s->data[priv->sreg_proto_max]); |
| 33 | } | 33 | } |
| 34 | regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt), | 34 | regs->verdict.code = nf_nat_masquerade_ipv4(pkt->skb, nft_hook(pkt), |
| 35 | &range, nft_out(pkt)); | 35 | &range, nft_out(pkt)); |
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c index 1650ed23c15d..5120be1d3118 100644 --- a/net/ipv4/netfilter/nft_redir_ipv4.c +++ b/net/ipv4/netfilter/nft_redir_ipv4.c | |||
| @@ -26,10 +26,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr, | |||
| 26 | 26 | ||
| 27 | memset(&mr, 0, sizeof(mr)); | 27 | memset(&mr, 0, sizeof(mr)); |
| 28 | if (priv->sreg_proto_min) { | 28 | if (priv->sreg_proto_min) { |
| 29 | mr.range[0].min.all = | 29 | mr.range[0].min.all = (__force __be16)nft_reg_load16( |
| 30 | *(__be16 *)®s->data[priv->sreg_proto_min]; | 30 | ®s->data[priv->sreg_proto_min]); |
| 31 | mr.range[0].max.all = | 31 | mr.range[0].max.all = (__force __be16)nft_reg_load16( |
| 32 | *(__be16 *)®s->data[priv->sreg_proto_max]; | 32 | ®s->data[priv->sreg_proto_max]); |
| 33 | mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; | 33 | mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; |
| 34 | } | 34 | } |
| 35 | 35 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index cf4555581282..1e319a525d51 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -2770,7 +2770,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
| 2770 | { | 2770 | { |
| 2771 | const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ | 2771 | const struct tcp_sock *tp = tcp_sk(sk); /* iff sk_type == SOCK_STREAM */ |
| 2772 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2772 | const struct inet_connection_sock *icsk = inet_csk(sk); |
| 2773 | u32 now = tcp_time_stamp, intv; | 2773 | u32 now, intv; |
| 2774 | u64 rate64; | 2774 | u64 rate64; |
| 2775 | bool slow; | 2775 | bool slow; |
| 2776 | u32 rate; | 2776 | u32 rate; |
| @@ -2839,6 +2839,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
| 2839 | info->tcpi_retrans = tp->retrans_out; | 2839 | info->tcpi_retrans = tp->retrans_out; |
| 2840 | info->tcpi_fackets = tp->fackets_out; | 2840 | info->tcpi_fackets = tp->fackets_out; |
| 2841 | 2841 | ||
| 2842 | now = tcp_time_stamp; | ||
| 2842 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); | 2843 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); |
| 2843 | info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); | 2844 | info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); |
| 2844 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); | 2845 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 39c393cc0fd3..c43119726a62 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -5541,6 +5541,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) | |||
| 5541 | struct inet_connection_sock *icsk = inet_csk(sk); | 5541 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 5542 | 5542 | ||
| 5543 | tcp_set_state(sk, TCP_ESTABLISHED); | 5543 | tcp_set_state(sk, TCP_ESTABLISHED); |
| 5544 | icsk->icsk_ack.lrcvtime = tcp_time_stamp; | ||
| 5544 | 5545 | ||
| 5545 | if (skb) { | 5546 | if (skb) { |
| 5546 | icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); | 5547 | icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); |
| @@ -5759,7 +5760,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 5759 | * to stand against the temptation 8) --ANK | 5760 | * to stand against the temptation 8) --ANK |
| 5760 | */ | 5761 | */ |
| 5761 | inet_csk_schedule_ack(sk); | 5762 | inet_csk_schedule_ack(sk); |
| 5762 | icsk->icsk_ack.lrcvtime = tcp_time_stamp; | ||
| 5763 | tcp_enter_quickack_mode(sk); | 5763 | tcp_enter_quickack_mode(sk); |
| 5764 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, | 5764 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
| 5765 | TCP_DELACK_MAX, TCP_RTO_MAX); | 5765 | TCP_DELACK_MAX, TCP_RTO_MAX); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 7e16243cdb58..65c0f3d13eca 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
| @@ -460,6 +460,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, | |||
| 460 | newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); | 460 | newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); |
| 461 | minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U); | 461 | minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U); |
| 462 | newicsk->icsk_rto = TCP_TIMEOUT_INIT; | 462 | newicsk->icsk_rto = TCP_TIMEOUT_INIT; |
| 463 | newicsk->icsk_ack.lrcvtime = tcp_time_stamp; | ||
| 463 | 464 | ||
| 464 | newtp->packets_out = 0; | 465 | newtp->packets_out = 0; |
| 465 | newtp->retrans_out = 0; | 466 | newtp->retrans_out = 0; |
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c index 6c5b5b1830a7..4146536e9c15 100644 --- a/net/ipv6/netfilter/nft_masq_ipv6.c +++ b/net/ipv6/netfilter/nft_masq_ipv6.c | |||
| @@ -27,10 +27,10 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr, | |||
| 27 | memset(&range, 0, sizeof(range)); | 27 | memset(&range, 0, sizeof(range)); |
| 28 | range.flags = priv->flags; | 28 | range.flags = priv->flags; |
| 29 | if (priv->sreg_proto_min) { | 29 | if (priv->sreg_proto_min) { |
| 30 | range.min_proto.all = | 30 | range.min_proto.all = (__force __be16)nft_reg_load16( |
| 31 | *(__be16 *)®s->data[priv->sreg_proto_min]; | 31 | ®s->data[priv->sreg_proto_min]); |
| 32 | range.max_proto.all = | 32 | range.max_proto.all = (__force __be16)nft_reg_load16( |
| 33 | *(__be16 *)®s->data[priv->sreg_proto_max]; | 33 | ®s->data[priv->sreg_proto_max]); |
| 34 | } | 34 | } |
| 35 | regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, | 35 | regs->verdict.code = nf_nat_masquerade_ipv6(pkt->skb, &range, |
| 36 | nft_out(pkt)); | 36 | nft_out(pkt)); |
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c index f5ac080fc084..a27e424f690d 100644 --- a/net/ipv6/netfilter/nft_redir_ipv6.c +++ b/net/ipv6/netfilter/nft_redir_ipv6.c | |||
| @@ -26,10 +26,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr, | |||
| 26 | 26 | ||
| 27 | memset(&range, 0, sizeof(range)); | 27 | memset(&range, 0, sizeof(range)); |
| 28 | if (priv->sreg_proto_min) { | 28 | if (priv->sreg_proto_min) { |
| 29 | range.min_proto.all = | 29 | range.min_proto.all = (__force __be16)nft_reg_load16( |
| 30 | *(__be16 *)®s->data[priv->sreg_proto_min], | 30 | ®s->data[priv->sreg_proto_min]); |
| 31 | range.max_proto.all = | 31 | range.max_proto.all = (__force __be16)nft_reg_load16( |
| 32 | *(__be16 *)®s->data[priv->sreg_proto_max], | 32 | ®s->data[priv->sreg_proto_max]); |
| 33 | range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; | 33 | range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; |
| 34 | } | 34 | } |
| 35 | 35 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 35c58b669ebd..9db1418993f2 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -3423,6 +3423,8 @@ static int rt6_fill_node(struct net *net, | |||
| 3423 | } | 3423 | } |
| 3424 | else if (rt->rt6i_flags & RTF_LOCAL) | 3424 | else if (rt->rt6i_flags & RTF_LOCAL) |
| 3425 | rtm->rtm_type = RTN_LOCAL; | 3425 | rtm->rtm_type = RTN_LOCAL; |
| 3426 | else if (rt->rt6i_flags & RTF_ANYCAST) | ||
| 3427 | rtm->rtm_type = RTN_ANYCAST; | ||
| 3426 | else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) | 3428 | else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) |
| 3427 | rtm->rtm_type = RTN_LOCAL; | 3429 | rtm->rtm_type = RTN_LOCAL; |
| 3428 | else | 3430 | else |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 4e4c401e3bc6..e28082f0a307 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
| @@ -1035,6 +1035,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
| 1035 | ipc6.hlimit = -1; | 1035 | ipc6.hlimit = -1; |
| 1036 | ipc6.tclass = -1; | 1036 | ipc6.tclass = -1; |
| 1037 | ipc6.dontfrag = -1; | 1037 | ipc6.dontfrag = -1; |
| 1038 | sockc.tsflags = sk->sk_tsflags; | ||
| 1038 | 1039 | ||
| 1039 | /* destination address check */ | 1040 | /* destination address check */ |
| 1040 | if (sin6) { | 1041 | if (sin6) { |
| @@ -1159,7 +1160,6 @@ do_udp_sendmsg: | |||
| 1159 | 1160 | ||
| 1160 | fl6.flowi6_mark = sk->sk_mark; | 1161 | fl6.flowi6_mark = sk->sk_mark; |
| 1161 | fl6.flowi6_uid = sk->sk_uid; | 1162 | fl6.flowi6_uid = sk->sk_uid; |
| 1162 | sockc.tsflags = sk->sk_tsflags; | ||
| 1163 | 1163 | ||
| 1164 | if (msg->msg_controllen) { | 1164 | if (msg->msg_controllen) { |
| 1165 | opt = &opt_space; | 1165 | opt = &opt_space; |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 33211f9a2656..6414079aa729 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
| @@ -1269,6 +1269,8 @@ static void mpls_ifdown(struct net_device *dev, int event) | |||
| 1269 | { | 1269 | { |
| 1270 | struct mpls_route __rcu **platform_label; | 1270 | struct mpls_route __rcu **platform_label; |
| 1271 | struct net *net = dev_net(dev); | 1271 | struct net *net = dev_net(dev); |
| 1272 | unsigned int nh_flags = RTNH_F_DEAD | RTNH_F_LINKDOWN; | ||
| 1273 | unsigned int alive; | ||
| 1272 | unsigned index; | 1274 | unsigned index; |
| 1273 | 1275 | ||
| 1274 | platform_label = rtnl_dereference(net->mpls.platform_label); | 1276 | platform_label = rtnl_dereference(net->mpls.platform_label); |
| @@ -1278,9 +1280,11 @@ static void mpls_ifdown(struct net_device *dev, int event) | |||
| 1278 | if (!rt) | 1280 | if (!rt) |
| 1279 | continue; | 1281 | continue; |
| 1280 | 1282 | ||
| 1283 | alive = 0; | ||
| 1281 | change_nexthops(rt) { | 1284 | change_nexthops(rt) { |
| 1282 | if (rtnl_dereference(nh->nh_dev) != dev) | 1285 | if (rtnl_dereference(nh->nh_dev) != dev) |
| 1283 | continue; | 1286 | goto next; |
| 1287 | |||
| 1284 | switch (event) { | 1288 | switch (event) { |
| 1285 | case NETDEV_DOWN: | 1289 | case NETDEV_DOWN: |
| 1286 | case NETDEV_UNREGISTER: | 1290 | case NETDEV_UNREGISTER: |
| @@ -1288,13 +1292,16 @@ static void mpls_ifdown(struct net_device *dev, int event) | |||
| 1288 | /* fall through */ | 1292 | /* fall through */ |
| 1289 | case NETDEV_CHANGE: | 1293 | case NETDEV_CHANGE: |
| 1290 | nh->nh_flags |= RTNH_F_LINKDOWN; | 1294 | nh->nh_flags |= RTNH_F_LINKDOWN; |
| 1291 | if (event != NETDEV_UNREGISTER) | ||
| 1292 | ACCESS_ONCE(rt->rt_nhn_alive) = rt->rt_nhn_alive - 1; | ||
| 1293 | break; | 1295 | break; |
| 1294 | } | 1296 | } |
| 1295 | if (event == NETDEV_UNREGISTER) | 1297 | if (event == NETDEV_UNREGISTER) |
| 1296 | RCU_INIT_POINTER(nh->nh_dev, NULL); | 1298 | RCU_INIT_POINTER(nh->nh_dev, NULL); |
| 1299 | next: | ||
| 1300 | if (!(nh->nh_flags & nh_flags)) | ||
| 1301 | alive++; | ||
| 1297 | } endfor_nexthops(rt); | 1302 | } endfor_nexthops(rt); |
| 1303 | |||
| 1304 | WRITE_ONCE(rt->rt_nhn_alive, alive); | ||
| 1298 | } | 1305 | } |
| 1299 | } | 1306 | } |
| 1300 | 1307 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 071b97fcbefb..ffb78e5f7b70 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
| @@ -181,7 +181,11 @@ EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); | |||
| 181 | unsigned int nf_conntrack_max __read_mostly; | 181 | unsigned int nf_conntrack_max __read_mostly; |
| 182 | seqcount_t nf_conntrack_generation __read_mostly; | 182 | seqcount_t nf_conntrack_generation __read_mostly; |
| 183 | 183 | ||
| 184 | DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); | 184 | /* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used |
| 185 | * for the nfctinfo. We cheat by (ab)using the PER CPU cache line | ||
| 186 | * alignment to enforce this. | ||
| 187 | */ | ||
| 188 | DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked); | ||
| 185 | EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); | 189 | EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); |
| 186 | 190 | ||
| 187 | static unsigned int nf_conntrack_hash_rnd __read_mostly; | 191 | static unsigned int nf_conntrack_hash_rnd __read_mostly; |
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c index 31d358691af0..804e8a0ab36e 100644 --- a/net/netfilter/nf_nat_proto_sctp.c +++ b/net/netfilter/nf_nat_proto_sctp.c | |||
| @@ -33,8 +33,16 @@ sctp_manip_pkt(struct sk_buff *skb, | |||
| 33 | enum nf_nat_manip_type maniptype) | 33 | enum nf_nat_manip_type maniptype) |
| 34 | { | 34 | { |
| 35 | sctp_sctphdr_t *hdr; | 35 | sctp_sctphdr_t *hdr; |
| 36 | int hdrsize = 8; | ||
| 36 | 37 | ||
| 37 | if (!skb_make_writable(skb, hdroff + sizeof(*hdr))) | 38 | /* This could be an inner header returned in imcp packet; in such |
| 39 | * cases we cannot update the checksum field since it is outside | ||
| 40 | * of the 8 bytes of transport layer headers we are guaranteed. | ||
| 41 | */ | ||
| 42 | if (skb->len >= hdroff + sizeof(*hdr)) | ||
| 43 | hdrsize = sizeof(*hdr); | ||
| 44 | |||
| 45 | if (!skb_make_writable(skb, hdroff + hdrsize)) | ||
| 38 | return false; | 46 | return false; |
| 39 | 47 | ||
| 40 | hdr = (struct sctphdr *)(skb->data + hdroff); | 48 | hdr = (struct sctphdr *)(skb->data + hdroff); |
| @@ -47,6 +55,9 @@ sctp_manip_pkt(struct sk_buff *skb, | |||
| 47 | hdr->dest = tuple->dst.u.sctp.port; | 55 | hdr->dest = tuple->dst.u.sctp.port; |
| 48 | } | 56 | } |
| 49 | 57 | ||
| 58 | if (hdrsize < sizeof(*hdr)) | ||
| 59 | return true; | ||
| 60 | |||
| 50 | if (skb->ip_summed != CHECKSUM_PARTIAL) { | 61 | if (skb->ip_summed != CHECKSUM_PARTIAL) { |
| 51 | hdr->checksum = sctp_compute_cksum(skb, hdroff); | 62 | hdr->checksum = sctp_compute_cksum(skb, hdroff); |
| 52 | skb->ip_summed = CHECKSUM_NONE; | 63 | skb->ip_summed = CHECKSUM_NONE; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 5e0ccfd5bb37..434c739dfeca 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -3145,7 +3145,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | |||
| 3145 | iter.count = 0; | 3145 | iter.count = 0; |
| 3146 | iter.err = 0; | 3146 | iter.err = 0; |
| 3147 | iter.fn = nf_tables_bind_check_setelem; | 3147 | iter.fn = nf_tables_bind_check_setelem; |
| 3148 | iter.flush = false; | ||
| 3149 | 3148 | ||
| 3150 | set->ops->walk(ctx, set, &iter); | 3149 | set->ops->walk(ctx, set, &iter); |
| 3151 | if (iter.err < 0) | 3150 | if (iter.err < 0) |
| @@ -3399,7 +3398,6 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 3399 | args.iter.count = 0; | 3398 | args.iter.count = 0; |
| 3400 | args.iter.err = 0; | 3399 | args.iter.err = 0; |
| 3401 | args.iter.fn = nf_tables_dump_setelem; | 3400 | args.iter.fn = nf_tables_dump_setelem; |
| 3402 | args.iter.flush = false; | ||
| 3403 | set->ops->walk(&ctx, set, &args.iter); | 3401 | set->ops->walk(&ctx, set, &args.iter); |
| 3404 | 3402 | ||
| 3405 | nla_nest_end(skb, nest); | 3403 | nla_nest_end(skb, nest); |
| @@ -3963,7 +3961,6 @@ static int nf_tables_delsetelem(struct net *net, struct sock *nlsk, | |||
| 3963 | struct nft_set_iter iter = { | 3961 | struct nft_set_iter iter = { |
| 3964 | .genmask = genmask, | 3962 | .genmask = genmask, |
| 3965 | .fn = nft_flush_set, | 3963 | .fn = nft_flush_set, |
| 3966 | .flush = true, | ||
| 3967 | }; | 3964 | }; |
| 3968 | set->ops->walk(&ctx, set, &iter); | 3965 | set->ops->walk(&ctx, set, &iter); |
| 3969 | 3966 | ||
| @@ -5114,7 +5111,6 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, | |||
| 5114 | iter.count = 0; | 5111 | iter.count = 0; |
| 5115 | iter.err = 0; | 5112 | iter.err = 0; |
| 5116 | iter.fn = nf_tables_loop_check_setelem; | 5113 | iter.fn = nf_tables_loop_check_setelem; |
| 5117 | iter.flush = false; | ||
| 5118 | 5114 | ||
| 5119 | set->ops->walk(ctx, set, &iter); | 5115 | set->ops->walk(ctx, set, &iter); |
| 5120 | if (iter.err < 0) | 5116 | if (iter.err < 0) |
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index bf548a7a71ec..0264258c46fe 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c | |||
| @@ -83,7 +83,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, | |||
| 83 | 83 | ||
| 84 | switch (priv->key) { | 84 | switch (priv->key) { |
| 85 | case NFT_CT_DIRECTION: | 85 | case NFT_CT_DIRECTION: |
| 86 | *dest = CTINFO2DIR(ctinfo); | 86 | nft_reg_store8(dest, CTINFO2DIR(ctinfo)); |
| 87 | return; | 87 | return; |
| 88 | case NFT_CT_STATUS: | 88 | case NFT_CT_STATUS: |
| 89 | *dest = ct->status; | 89 | *dest = ct->status; |
| @@ -151,20 +151,22 @@ static void nft_ct_get_eval(const struct nft_expr *expr, | |||
| 151 | return; | 151 | return; |
| 152 | } | 152 | } |
| 153 | case NFT_CT_L3PROTOCOL: | 153 | case NFT_CT_L3PROTOCOL: |
| 154 | *dest = nf_ct_l3num(ct); | 154 | nft_reg_store8(dest, nf_ct_l3num(ct)); |
| 155 | return; | 155 | return; |
| 156 | case NFT_CT_PROTOCOL: | 156 | case NFT_CT_PROTOCOL: |
| 157 | *dest = nf_ct_protonum(ct); | 157 | nft_reg_store8(dest, nf_ct_protonum(ct)); |
| 158 | return; | 158 | return; |
| 159 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 159 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
| 160 | case NFT_CT_ZONE: { | 160 | case NFT_CT_ZONE: { |
| 161 | const struct nf_conntrack_zone *zone = nf_ct_zone(ct); | 161 | const struct nf_conntrack_zone *zone = nf_ct_zone(ct); |
| 162 | u16 zoneid; | ||
| 162 | 163 | ||
| 163 | if (priv->dir < IP_CT_DIR_MAX) | 164 | if (priv->dir < IP_CT_DIR_MAX) |
| 164 | *dest = nf_ct_zone_id(zone, priv->dir); | 165 | zoneid = nf_ct_zone_id(zone, priv->dir); |
| 165 | else | 166 | else |
| 166 | *dest = zone->id; | 167 | zoneid = zone->id; |
| 167 | 168 | ||
| 169 | nft_reg_store16(dest, zoneid); | ||
| 168 | return; | 170 | return; |
| 169 | } | 171 | } |
| 170 | #endif | 172 | #endif |
| @@ -183,10 +185,10 @@ static void nft_ct_get_eval(const struct nft_expr *expr, | |||
| 183 | nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); | 185 | nf_ct_l3num(ct) == NFPROTO_IPV4 ? 4 : 16); |
| 184 | return; | 186 | return; |
| 185 | case NFT_CT_PROTO_SRC: | 187 | case NFT_CT_PROTO_SRC: |
| 186 | *dest = (__force __u16)tuple->src.u.all; | 188 | nft_reg_store16(dest, (__force u16)tuple->src.u.all); |
| 187 | return; | 189 | return; |
| 188 | case NFT_CT_PROTO_DST: | 190 | case NFT_CT_PROTO_DST: |
| 189 | *dest = (__force __u16)tuple->dst.u.all; | 191 | nft_reg_store16(dest, (__force u16)tuple->dst.u.all); |
| 190 | return; | 192 | return; |
| 191 | default: | 193 | default: |
| 192 | break; | 194 | break; |
| @@ -205,7 +207,7 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr, | |||
| 205 | const struct nft_ct *priv = nft_expr_priv(expr); | 207 | const struct nft_ct *priv = nft_expr_priv(expr); |
| 206 | struct sk_buff *skb = pkt->skb; | 208 | struct sk_buff *skb = pkt->skb; |
| 207 | enum ip_conntrack_info ctinfo; | 209 | enum ip_conntrack_info ctinfo; |
| 208 | u16 value = regs->data[priv->sreg]; | 210 | u16 value = nft_reg_load16(®s->data[priv->sreg]); |
| 209 | struct nf_conn *ct; | 211 | struct nf_conn *ct; |
| 210 | 212 | ||
| 211 | ct = nf_ct_get(skb, &ctinfo); | 213 | ct = nf_ct_get(skb, &ctinfo); |
| @@ -542,7 +544,8 @@ static int nft_ct_set_init(const struct nft_ctx *ctx, | |||
| 542 | case IP_CT_DIR_REPLY: | 544 | case IP_CT_DIR_REPLY: |
| 543 | break; | 545 | break; |
| 544 | default: | 546 | default: |
| 545 | return -EINVAL; | 547 | err = -EINVAL; |
| 548 | goto err1; | ||
| 546 | } | 549 | } |
| 547 | } | 550 | } |
| 548 | 551 | ||
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index e1f5ca9b423b..7b60e01f38ff 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c | |||
| @@ -45,16 +45,15 @@ void nft_meta_get_eval(const struct nft_expr *expr, | |||
| 45 | *dest = skb->len; | 45 | *dest = skb->len; |
| 46 | break; | 46 | break; |
| 47 | case NFT_META_PROTOCOL: | 47 | case NFT_META_PROTOCOL: |
| 48 | *dest = 0; | 48 | nft_reg_store16(dest, (__force u16)skb->protocol); |
| 49 | *(__be16 *)dest = skb->protocol; | ||
| 50 | break; | 49 | break; |
| 51 | case NFT_META_NFPROTO: | 50 | case NFT_META_NFPROTO: |
| 52 | *dest = nft_pf(pkt); | 51 | nft_reg_store8(dest, nft_pf(pkt)); |
| 53 | break; | 52 | break; |
| 54 | case NFT_META_L4PROTO: | 53 | case NFT_META_L4PROTO: |
| 55 | if (!pkt->tprot_set) | 54 | if (!pkt->tprot_set) |
| 56 | goto err; | 55 | goto err; |
| 57 | *dest = pkt->tprot; | 56 | nft_reg_store8(dest, pkt->tprot); |
| 58 | break; | 57 | break; |
| 59 | case NFT_META_PRIORITY: | 58 | case NFT_META_PRIORITY: |
| 60 | *dest = skb->priority; | 59 | *dest = skb->priority; |
| @@ -85,14 +84,12 @@ void nft_meta_get_eval(const struct nft_expr *expr, | |||
| 85 | case NFT_META_IIFTYPE: | 84 | case NFT_META_IIFTYPE: |
| 86 | if (in == NULL) | 85 | if (in == NULL) |
| 87 | goto err; | 86 | goto err; |
| 88 | *dest = 0; | 87 | nft_reg_store16(dest, in->type); |
| 89 | *(u16 *)dest = in->type; | ||
| 90 | break; | 88 | break; |
| 91 | case NFT_META_OIFTYPE: | 89 | case NFT_META_OIFTYPE: |
| 92 | if (out == NULL) | 90 | if (out == NULL) |
| 93 | goto err; | 91 | goto err; |
| 94 | *dest = 0; | 92 | nft_reg_store16(dest, out->type); |
| 95 | *(u16 *)dest = out->type; | ||
| 96 | break; | 93 | break; |
| 97 | case NFT_META_SKUID: | 94 | case NFT_META_SKUID: |
| 98 | sk = skb_to_full_sk(skb); | 95 | sk = skb_to_full_sk(skb); |
| @@ -142,19 +139,19 @@ void nft_meta_get_eval(const struct nft_expr *expr, | |||
| 142 | #endif | 139 | #endif |
| 143 | case NFT_META_PKTTYPE: | 140 | case NFT_META_PKTTYPE: |
| 144 | if (skb->pkt_type != PACKET_LOOPBACK) { | 141 | if (skb->pkt_type != PACKET_LOOPBACK) { |
| 145 | *dest = skb->pkt_type; | 142 | nft_reg_store8(dest, skb->pkt_type); |
| 146 | break; | 143 | break; |
| 147 | } | 144 | } |
| 148 | 145 | ||
| 149 | switch (nft_pf(pkt)) { | 146 | switch (nft_pf(pkt)) { |
| 150 | case NFPROTO_IPV4: | 147 | case NFPROTO_IPV4: |
| 151 | if (ipv4_is_multicast(ip_hdr(skb)->daddr)) | 148 | if (ipv4_is_multicast(ip_hdr(skb)->daddr)) |
| 152 | *dest = PACKET_MULTICAST; | 149 | nft_reg_store8(dest, PACKET_MULTICAST); |
| 153 | else | 150 | else |
| 154 | *dest = PACKET_BROADCAST; | 151 | nft_reg_store8(dest, PACKET_BROADCAST); |
| 155 | break; | 152 | break; |
| 156 | case NFPROTO_IPV6: | 153 | case NFPROTO_IPV6: |
| 157 | *dest = PACKET_MULTICAST; | 154 | nft_reg_store8(dest, PACKET_MULTICAST); |
| 158 | break; | 155 | break; |
| 159 | case NFPROTO_NETDEV: | 156 | case NFPROTO_NETDEV: |
| 160 | switch (skb->protocol) { | 157 | switch (skb->protocol) { |
| @@ -168,14 +165,14 @@ void nft_meta_get_eval(const struct nft_expr *expr, | |||
| 168 | goto err; | 165 | goto err; |
| 169 | 166 | ||
| 170 | if (ipv4_is_multicast(iph->daddr)) | 167 | if (ipv4_is_multicast(iph->daddr)) |
| 171 | *dest = PACKET_MULTICAST; | 168 | nft_reg_store8(dest, PACKET_MULTICAST); |
| 172 | else | 169 | else |
| 173 | *dest = PACKET_BROADCAST; | 170 | nft_reg_store8(dest, PACKET_BROADCAST); |
| 174 | 171 | ||
| 175 | break; | 172 | break; |
| 176 | } | 173 | } |
| 177 | case htons(ETH_P_IPV6): | 174 | case htons(ETH_P_IPV6): |
| 178 | *dest = PACKET_MULTICAST; | 175 | nft_reg_store8(dest, PACKET_MULTICAST); |
| 179 | break; | 176 | break; |
| 180 | default: | 177 | default: |
| 181 | WARN_ON_ONCE(1); | 178 | WARN_ON_ONCE(1); |
| @@ -230,7 +227,9 @@ void nft_meta_set_eval(const struct nft_expr *expr, | |||
| 230 | { | 227 | { |
| 231 | const struct nft_meta *meta = nft_expr_priv(expr); | 228 | const struct nft_meta *meta = nft_expr_priv(expr); |
| 232 | struct sk_buff *skb = pkt->skb; | 229 | struct sk_buff *skb = pkt->skb; |
| 233 | u32 value = regs->data[meta->sreg]; | 230 | u32 *sreg = ®s->data[meta->sreg]; |
| 231 | u32 value = *sreg; | ||
| 232 | u8 pkt_type; | ||
| 234 | 233 | ||
| 235 | switch (meta->key) { | 234 | switch (meta->key) { |
| 236 | case NFT_META_MARK: | 235 | case NFT_META_MARK: |
| @@ -240,9 +239,12 @@ void nft_meta_set_eval(const struct nft_expr *expr, | |||
| 240 | skb->priority = value; | 239 | skb->priority = value; |
| 241 | break; | 240 | break; |
| 242 | case NFT_META_PKTTYPE: | 241 | case NFT_META_PKTTYPE: |
| 243 | if (skb->pkt_type != value && | 242 | pkt_type = nft_reg_load8(sreg); |
| 244 | skb_pkt_type_ok(value) && skb_pkt_type_ok(skb->pkt_type)) | 243 | |
| 245 | skb->pkt_type = value; | 244 | if (skb->pkt_type != pkt_type && |
| 245 | skb_pkt_type_ok(pkt_type) && | ||
| 246 | skb_pkt_type_ok(skb->pkt_type)) | ||
| 247 | skb->pkt_type = pkt_type; | ||
| 246 | break; | 248 | break; |
| 247 | case NFT_META_NFTRACE: | 249 | case NFT_META_NFTRACE: |
| 248 | skb->nf_trace = !!value; | 250 | skb->nf_trace = !!value; |
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 19a7bf3236f9..439e0bd152a0 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c | |||
| @@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr, | |||
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | if (priv->sreg_proto_min) { | 67 | if (priv->sreg_proto_min) { |
| 68 | range.min_proto.all = | 68 | range.min_proto.all = (__force __be16)nft_reg_load16( |
| 69 | *(__be16 *)®s->data[priv->sreg_proto_min]; | 69 | ®s->data[priv->sreg_proto_min]); |
| 70 | range.max_proto.all = | 70 | range.max_proto.all = (__force __be16)nft_reg_load16( |
| 71 | *(__be16 *)®s->data[priv->sreg_proto_max]; | 71 | ®s->data[priv->sreg_proto_max]); |
| 72 | range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; | 72 | range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; |
| 73 | } | 73 | } |
| 74 | 74 | ||
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c index 152d226552c1..8ebbc2940f4c 100644 --- a/net/netfilter/nft_set_bitmap.c +++ b/net/netfilter/nft_set_bitmap.c | |||
| @@ -15,6 +15,11 @@ | |||
| 15 | #include <linux/netfilter/nf_tables.h> | 15 | #include <linux/netfilter/nf_tables.h> |
| 16 | #include <net/netfilter/nf_tables.h> | 16 | #include <net/netfilter/nf_tables.h> |
| 17 | 17 | ||
| 18 | struct nft_bitmap_elem { | ||
| 19 | struct list_head head; | ||
| 20 | struct nft_set_ext ext; | ||
| 21 | }; | ||
| 22 | |||
| 18 | /* This bitmap uses two bits to represent one element. These two bits determine | 23 | /* This bitmap uses two bits to represent one element. These two bits determine |
| 19 | * the element state in the current and the future generation. | 24 | * the element state in the current and the future generation. |
| 20 | * | 25 | * |
| @@ -41,13 +46,22 @@ | |||
| 41 | * restore its previous state. | 46 | * restore its previous state. |
| 42 | */ | 47 | */ |
| 43 | struct nft_bitmap { | 48 | struct nft_bitmap { |
| 44 | u16 bitmap_size; | 49 | struct list_head list; |
| 45 | u8 bitmap[]; | 50 | u16 bitmap_size; |
| 51 | u8 bitmap[]; | ||
| 46 | }; | 52 | }; |
| 47 | 53 | ||
| 48 | static inline void nft_bitmap_location(u32 key, u32 *idx, u32 *off) | 54 | static inline void nft_bitmap_location(const struct nft_set *set, |
| 55 | const void *key, | ||
| 56 | u32 *idx, u32 *off) | ||
| 49 | { | 57 | { |
| 50 | u32 k = (key << 1); | 58 | u32 k; |
| 59 | |||
| 60 | if (set->klen == 2) | ||
| 61 | k = *(u16 *)key; | ||
| 62 | else | ||
| 63 | k = *(u8 *)key; | ||
| 64 | k <<= 1; | ||
| 51 | 65 | ||
| 52 | *idx = k / BITS_PER_BYTE; | 66 | *idx = k / BITS_PER_BYTE; |
| 53 | *off = k % BITS_PER_BYTE; | 67 | *off = k % BITS_PER_BYTE; |
| @@ -69,26 +83,48 @@ static bool nft_bitmap_lookup(const struct net *net, const struct nft_set *set, | |||
| 69 | u8 genmask = nft_genmask_cur(net); | 83 | u8 genmask = nft_genmask_cur(net); |
| 70 | u32 idx, off; | 84 | u32 idx, off; |
| 71 | 85 | ||
| 72 | nft_bitmap_location(*key, &idx, &off); | 86 | nft_bitmap_location(set, key, &idx, &off); |
| 73 | 87 | ||
| 74 | return nft_bitmap_active(priv->bitmap, idx, off, genmask); | 88 | return nft_bitmap_active(priv->bitmap, idx, off, genmask); |
| 75 | } | 89 | } |
| 76 | 90 | ||
| 91 | static struct nft_bitmap_elem * | ||
| 92 | nft_bitmap_elem_find(const struct nft_set *set, struct nft_bitmap_elem *this, | ||
| 93 | u8 genmask) | ||
| 94 | { | ||
| 95 | const struct nft_bitmap *priv = nft_set_priv(set); | ||
| 96 | struct nft_bitmap_elem *be; | ||
| 97 | |||
| 98 | list_for_each_entry_rcu(be, &priv->list, head) { | ||
| 99 | if (memcmp(nft_set_ext_key(&be->ext), | ||
| 100 | nft_set_ext_key(&this->ext), set->klen) || | ||
| 101 | !nft_set_elem_active(&be->ext, genmask)) | ||
| 102 | continue; | ||
| 103 | |||
| 104 | return be; | ||
| 105 | } | ||
| 106 | return NULL; | ||
| 107 | } | ||
| 108 | |||
| 77 | static int nft_bitmap_insert(const struct net *net, const struct nft_set *set, | 109 | static int nft_bitmap_insert(const struct net *net, const struct nft_set *set, |
| 78 | const struct nft_set_elem *elem, | 110 | const struct nft_set_elem *elem, |
| 79 | struct nft_set_ext **_ext) | 111 | struct nft_set_ext **ext) |
| 80 | { | 112 | { |
| 81 | struct nft_bitmap *priv = nft_set_priv(set); | 113 | struct nft_bitmap *priv = nft_set_priv(set); |
| 82 | struct nft_set_ext *ext = elem->priv; | 114 | struct nft_bitmap_elem *new = elem->priv, *be; |
| 83 | u8 genmask = nft_genmask_next(net); | 115 | u8 genmask = nft_genmask_next(net); |
| 84 | u32 idx, off; | 116 | u32 idx, off; |
| 85 | 117 | ||
| 86 | nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); | 118 | be = nft_bitmap_elem_find(set, new, genmask); |
| 87 | if (nft_bitmap_active(priv->bitmap, idx, off, genmask)) | 119 | if (be) { |
| 120 | *ext = &be->ext; | ||
| 88 | return -EEXIST; | 121 | return -EEXIST; |
| 122 | } | ||
| 89 | 123 | ||
| 124 | nft_bitmap_location(set, nft_set_ext_key(&new->ext), &idx, &off); | ||
| 90 | /* Enter 01 state. */ | 125 | /* Enter 01 state. */ |
| 91 | priv->bitmap[idx] |= (genmask << off); | 126 | priv->bitmap[idx] |= (genmask << off); |
| 127 | list_add_tail_rcu(&new->head, &priv->list); | ||
| 92 | 128 | ||
| 93 | return 0; | 129 | return 0; |
| 94 | } | 130 | } |
| @@ -98,13 +134,14 @@ static void nft_bitmap_remove(const struct net *net, | |||
| 98 | const struct nft_set_elem *elem) | 134 | const struct nft_set_elem *elem) |
| 99 | { | 135 | { |
| 100 | struct nft_bitmap *priv = nft_set_priv(set); | 136 | struct nft_bitmap *priv = nft_set_priv(set); |
| 101 | struct nft_set_ext *ext = elem->priv; | 137 | struct nft_bitmap_elem *be = elem->priv; |
| 102 | u8 genmask = nft_genmask_next(net); | 138 | u8 genmask = nft_genmask_next(net); |
| 103 | u32 idx, off; | 139 | u32 idx, off; |
| 104 | 140 | ||
| 105 | nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); | 141 | nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); |
| 106 | /* Enter 00 state. */ | 142 | /* Enter 00 state. */ |
| 107 | priv->bitmap[idx] &= ~(genmask << off); | 143 | priv->bitmap[idx] &= ~(genmask << off); |
| 144 | list_del_rcu(&be->head); | ||
| 108 | } | 145 | } |
| 109 | 146 | ||
| 110 | static void nft_bitmap_activate(const struct net *net, | 147 | static void nft_bitmap_activate(const struct net *net, |
| @@ -112,74 +149,52 @@ static void nft_bitmap_activate(const struct net *net, | |||
| 112 | const struct nft_set_elem *elem) | 149 | const struct nft_set_elem *elem) |
| 113 | { | 150 | { |
| 114 | struct nft_bitmap *priv = nft_set_priv(set); | 151 | struct nft_bitmap *priv = nft_set_priv(set); |
| 115 | struct nft_set_ext *ext = elem->priv; | 152 | struct nft_bitmap_elem *be = elem->priv; |
| 116 | u8 genmask = nft_genmask_next(net); | 153 | u8 genmask = nft_genmask_next(net); |
| 117 | u32 idx, off; | 154 | u32 idx, off; |
| 118 | 155 | ||
| 119 | nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); | 156 | nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); |
| 120 | /* Enter 11 state. */ | 157 | /* Enter 11 state. */ |
| 121 | priv->bitmap[idx] |= (genmask << off); | 158 | priv->bitmap[idx] |= (genmask << off); |
| 159 | nft_set_elem_change_active(net, set, &be->ext); | ||
| 122 | } | 160 | } |
| 123 | 161 | ||
| 124 | static bool nft_bitmap_flush(const struct net *net, | 162 | static bool nft_bitmap_flush(const struct net *net, |
| 125 | const struct nft_set *set, void *ext) | 163 | const struct nft_set *set, void *_be) |
| 126 | { | 164 | { |
| 127 | struct nft_bitmap *priv = nft_set_priv(set); | 165 | struct nft_bitmap *priv = nft_set_priv(set); |
| 128 | u8 genmask = nft_genmask_next(net); | 166 | u8 genmask = nft_genmask_next(net); |
| 167 | struct nft_bitmap_elem *be = _be; | ||
| 129 | u32 idx, off; | 168 | u32 idx, off; |
| 130 | 169 | ||
| 131 | nft_bitmap_location(nft_set_ext_key(ext)->data[0], &idx, &off); | 170 | nft_bitmap_location(set, nft_set_ext_key(&be->ext), &idx, &off); |
| 132 | /* Enter 10 state, similar to deactivation. */ | 171 | /* Enter 10 state, similar to deactivation. */ |
| 133 | priv->bitmap[idx] &= ~(genmask << off); | 172 | priv->bitmap[idx] &= ~(genmask << off); |
| 173 | nft_set_elem_change_active(net, set, &be->ext); | ||
| 134 | 174 | ||
| 135 | return true; | 175 | return true; |
| 136 | } | 176 | } |
| 137 | 177 | ||
| 138 | static struct nft_set_ext *nft_bitmap_ext_alloc(const struct nft_set *set, | ||
| 139 | const struct nft_set_elem *elem) | ||
| 140 | { | ||
| 141 | struct nft_set_ext_tmpl tmpl; | ||
| 142 | struct nft_set_ext *ext; | ||
| 143 | |||
| 144 | nft_set_ext_prepare(&tmpl); | ||
| 145 | nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); | ||
| 146 | |||
| 147 | ext = kzalloc(tmpl.len, GFP_KERNEL); | ||
| 148 | if (!ext) | ||
| 149 | return NULL; | ||
| 150 | |||
| 151 | nft_set_ext_init(ext, &tmpl); | ||
| 152 | memcpy(nft_set_ext_key(ext), elem->key.val.data, set->klen); | ||
| 153 | |||
| 154 | return ext; | ||
| 155 | } | ||
| 156 | |||
| 157 | static void *nft_bitmap_deactivate(const struct net *net, | 178 | static void *nft_bitmap_deactivate(const struct net *net, |
| 158 | const struct nft_set *set, | 179 | const struct nft_set *set, |
| 159 | const struct nft_set_elem *elem) | 180 | const struct nft_set_elem *elem) |
| 160 | { | 181 | { |
| 161 | struct nft_bitmap *priv = nft_set_priv(set); | 182 | struct nft_bitmap *priv = nft_set_priv(set); |
| 183 | struct nft_bitmap_elem *this = elem->priv, *be; | ||
| 162 | u8 genmask = nft_genmask_next(net); | 184 | u8 genmask = nft_genmask_next(net); |
| 163 | struct nft_set_ext *ext; | 185 | u32 idx, off; |
| 164 | u32 idx, off, key = 0; | ||
| 165 | |||
| 166 | memcpy(&key, elem->key.val.data, set->klen); | ||
| 167 | nft_bitmap_location(key, &idx, &off); | ||
| 168 | 186 | ||
| 169 | if (!nft_bitmap_active(priv->bitmap, idx, off, genmask)) | 187 | nft_bitmap_location(set, elem->key.val.data, &idx, &off); |
| 170 | return NULL; | ||
| 171 | 188 | ||
| 172 | /* We have no real set extension since this is a bitmap, allocate this | 189 | be = nft_bitmap_elem_find(set, this, genmask); |
| 173 | * dummy object that is released from the commit/abort path. | 190 | if (!be) |
| 174 | */ | ||
| 175 | ext = nft_bitmap_ext_alloc(set, elem); | ||
| 176 | if (!ext) | ||
| 177 | return NULL; | 191 | return NULL; |
| 178 | 192 | ||
| 179 | /* Enter 10 state. */ | 193 | /* Enter 10 state. */ |
| 180 | priv->bitmap[idx] &= ~(genmask << off); | 194 | priv->bitmap[idx] &= ~(genmask << off); |
| 195 | nft_set_elem_change_active(net, set, &be->ext); | ||
| 181 | 196 | ||
| 182 | return ext; | 197 | return be; |
| 183 | } | 198 | } |
| 184 | 199 | ||
| 185 | static void nft_bitmap_walk(const struct nft_ctx *ctx, | 200 | static void nft_bitmap_walk(const struct nft_ctx *ctx, |
| @@ -187,47 +202,23 @@ static void nft_bitmap_walk(const struct nft_ctx *ctx, | |||
| 187 | struct nft_set_iter *iter) | 202 | struct nft_set_iter *iter) |
| 188 | { | 203 | { |
| 189 | const struct nft_bitmap *priv = nft_set_priv(set); | 204 | const struct nft_bitmap *priv = nft_set_priv(set); |
| 190 | struct nft_set_ext_tmpl tmpl; | 205 | struct nft_bitmap_elem *be; |
| 191 | struct nft_set_elem elem; | 206 | struct nft_set_elem elem; |
| 192 | struct nft_set_ext *ext; | 207 | |
| 193 | int idx, off; | 208 | list_for_each_entry_rcu(be, &priv->list, head) { |
| 194 | u16 key; | 209 | if (iter->count < iter->skip) |
| 195 | 210 | goto cont; | |
| 196 | nft_set_ext_prepare(&tmpl); | 211 | if (!nft_set_elem_active(&be->ext, iter->genmask)) |
| 197 | nft_set_ext_add_length(&tmpl, NFT_SET_EXT_KEY, set->klen); | 212 | goto cont; |
| 198 | 213 | ||
| 199 | for (idx = 0; idx < priv->bitmap_size; idx++) { | 214 | elem.priv = be; |
| 200 | for (off = 0; off < BITS_PER_BYTE; off += 2) { | 215 | |
| 201 | if (iter->count < iter->skip) | 216 | iter->err = iter->fn(ctx, set, iter, &elem); |
| 202 | goto cont; | 217 | |
| 203 | 218 | if (iter->err < 0) | |
| 204 | if (!nft_bitmap_active(priv->bitmap, idx, off, | 219 | return; |
| 205 | iter->genmask)) | ||
| 206 | goto cont; | ||
| 207 | |||
| 208 | ext = kzalloc(tmpl.len, GFP_KERNEL); | ||
| 209 | if (!ext) { | ||
| 210 | iter->err = -ENOMEM; | ||
| 211 | return; | ||
| 212 | } | ||
| 213 | nft_set_ext_init(ext, &tmpl); | ||
| 214 | key = ((idx * BITS_PER_BYTE) + off) >> 1; | ||
| 215 | memcpy(nft_set_ext_key(ext), &key, set->klen); | ||
| 216 | |||
| 217 | elem.priv = ext; | ||
| 218 | iter->err = iter->fn(ctx, set, iter, &elem); | ||
| 219 | |||
| 220 | /* On set flush, this dummy extension object is released | ||
| 221 | * from the commit/abort path. | ||
| 222 | */ | ||
| 223 | if (!iter->flush) | ||
| 224 | kfree(ext); | ||
| 225 | |||
| 226 | if (iter->err < 0) | ||
| 227 | return; | ||
| 228 | cont: | 220 | cont: |
| 229 | iter->count++; | 221 | iter->count++; |
| 230 | } | ||
| 231 | } | 222 | } |
| 232 | } | 223 | } |
| 233 | 224 | ||
| @@ -258,6 +249,7 @@ static int nft_bitmap_init(const struct nft_set *set, | |||
| 258 | { | 249 | { |
| 259 | struct nft_bitmap *priv = nft_set_priv(set); | 250 | struct nft_bitmap *priv = nft_set_priv(set); |
| 260 | 251 | ||
| 252 | INIT_LIST_HEAD(&priv->list); | ||
| 261 | priv->bitmap_size = nft_bitmap_size(set->klen); | 253 | priv->bitmap_size = nft_bitmap_size(set->klen); |
| 262 | 254 | ||
| 263 | return 0; | 255 | return 0; |
| @@ -283,6 +275,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features, | |||
| 283 | 275 | ||
| 284 | static struct nft_set_ops nft_bitmap_ops __read_mostly = { | 276 | static struct nft_set_ops nft_bitmap_ops __read_mostly = { |
| 285 | .privsize = nft_bitmap_privsize, | 277 | .privsize = nft_bitmap_privsize, |
| 278 | .elemsize = offsetof(struct nft_bitmap_elem, ext), | ||
| 286 | .estimate = nft_bitmap_estimate, | 279 | .estimate = nft_bitmap_estimate, |
| 287 | .init = nft_bitmap_init, | 280 | .init = nft_bitmap_init, |
| 288 | .destroy = nft_bitmap_destroy, | 281 | .destroy = nft_bitmap_destroy, |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7b73c7c161a9..596eaff66649 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -96,6 +96,44 @@ EXPORT_SYMBOL_GPL(nl_table); | |||
| 96 | 96 | ||
| 97 | static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); | 97 | static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); |
| 98 | 98 | ||
| 99 | static struct lock_class_key nlk_cb_mutex_keys[MAX_LINKS]; | ||
| 100 | |||
| 101 | static const char *const nlk_cb_mutex_key_strings[MAX_LINKS + 1] = { | ||
| 102 | "nlk_cb_mutex-ROUTE", | ||
| 103 | "nlk_cb_mutex-1", | ||
| 104 | "nlk_cb_mutex-USERSOCK", | ||
| 105 | "nlk_cb_mutex-FIREWALL", | ||
| 106 | "nlk_cb_mutex-SOCK_DIAG", | ||
| 107 | "nlk_cb_mutex-NFLOG", | ||
| 108 | "nlk_cb_mutex-XFRM", | ||
| 109 | "nlk_cb_mutex-SELINUX", | ||
| 110 | "nlk_cb_mutex-ISCSI", | ||
| 111 | "nlk_cb_mutex-AUDIT", | ||
| 112 | "nlk_cb_mutex-FIB_LOOKUP", | ||
| 113 | "nlk_cb_mutex-CONNECTOR", | ||
| 114 | "nlk_cb_mutex-NETFILTER", | ||
| 115 | "nlk_cb_mutex-IP6_FW", | ||
| 116 | "nlk_cb_mutex-DNRTMSG", | ||
| 117 | "nlk_cb_mutex-KOBJECT_UEVENT", | ||
| 118 | "nlk_cb_mutex-GENERIC", | ||
| 119 | "nlk_cb_mutex-17", | ||
| 120 | "nlk_cb_mutex-SCSITRANSPORT", | ||
| 121 | "nlk_cb_mutex-ECRYPTFS", | ||
| 122 | "nlk_cb_mutex-RDMA", | ||
| 123 | "nlk_cb_mutex-CRYPTO", | ||
| 124 | "nlk_cb_mutex-SMC", | ||
| 125 | "nlk_cb_mutex-23", | ||
| 126 | "nlk_cb_mutex-24", | ||
| 127 | "nlk_cb_mutex-25", | ||
| 128 | "nlk_cb_mutex-26", | ||
| 129 | "nlk_cb_mutex-27", | ||
| 130 | "nlk_cb_mutex-28", | ||
| 131 | "nlk_cb_mutex-29", | ||
| 132 | "nlk_cb_mutex-30", | ||
| 133 | "nlk_cb_mutex-31", | ||
| 134 | "nlk_cb_mutex-MAX_LINKS" | ||
| 135 | }; | ||
| 136 | |||
| 99 | static int netlink_dump(struct sock *sk); | 137 | static int netlink_dump(struct sock *sk); |
| 100 | static void netlink_skb_destructor(struct sk_buff *skb); | 138 | static void netlink_skb_destructor(struct sk_buff *skb); |
| 101 | 139 | ||
| @@ -585,6 +623,9 @@ static int __netlink_create(struct net *net, struct socket *sock, | |||
| 585 | } else { | 623 | } else { |
| 586 | nlk->cb_mutex = &nlk->cb_def_mutex; | 624 | nlk->cb_mutex = &nlk->cb_def_mutex; |
| 587 | mutex_init(nlk->cb_mutex); | 625 | mutex_init(nlk->cb_mutex); |
| 626 | lockdep_set_class_and_name(nlk->cb_mutex, | ||
| 627 | nlk_cb_mutex_keys + protocol, | ||
| 628 | nlk_cb_mutex_key_strings[protocol]); | ||
| 588 | } | 629 | } |
| 589 | init_waitqueue_head(&nlk->wait); | 630 | init_waitqueue_head(&nlk->wait); |
| 590 | 631 | ||
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index fb6e10fdb217..92e0981f7404 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
| @@ -783,8 +783,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 783 | 783 | ||
| 784 | if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, | 784 | if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, |
| 785 | cb->nlh->nlmsg_seq, NLM_F_MULTI, | 785 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
| 786 | skb, CTRL_CMD_NEWFAMILY) < 0) | 786 | skb, CTRL_CMD_NEWFAMILY) < 0) { |
| 787 | n--; | ||
| 787 | break; | 788 | break; |
| 789 | } | ||
| 788 | } | 790 | } |
| 789 | 791 | ||
| 790 | cb->args[0] = n; | 792 | cb->args[0] = n; |
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 6f5fa50f716d..1105a838bab8 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c | |||
| @@ -604,7 +604,7 @@ static int ip_tun_from_nlattr(const struct nlattr *attr, | |||
| 604 | ipv4 = true; | 604 | ipv4 = true; |
| 605 | break; | 605 | break; |
| 606 | case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: | 606 | case OVS_TUNNEL_KEY_ATTR_IPV6_SRC: |
| 607 | SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst, | 607 | SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src, |
| 608 | nla_get_in6_addr(a), is_mask); | 608 | nla_get_in6_addr(a), is_mask); |
| 609 | ipv6 = true; | 609 | ipv6 = true; |
| 610 | break; | 610 | break; |
| @@ -665,6 +665,8 @@ static int ip_tun_from_nlattr(const struct nlattr *attr, | |||
| 665 | tun_flags |= TUNNEL_VXLAN_OPT; | 665 | tun_flags |= TUNNEL_VXLAN_OPT; |
| 666 | opts_type = type; | 666 | opts_type = type; |
| 667 | break; | 667 | break; |
| 668 | case OVS_TUNNEL_KEY_ATTR_PAD: | ||
| 669 | break; | ||
| 668 | default: | 670 | default: |
| 669 | OVS_NLERR(log, "Unknown IP tunnel attribute %d", | 671 | OVS_NLERR(log, "Unknown IP tunnel attribute %d", |
| 670 | type); | 672 | type); |
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c index 3f9d8d7ec632..b099b64366f3 100644 --- a/net/rxrpc/conn_event.c +++ b/net/rxrpc/conn_event.c | |||
| @@ -275,6 +275,10 @@ static int rxrpc_process_event(struct rxrpc_connection *conn, | |||
| 275 | rxrpc_conn_retransmit_call(conn, skb); | 275 | rxrpc_conn_retransmit_call(conn, skb); |
| 276 | return 0; | 276 | return 0; |
| 277 | 277 | ||
| 278 | case RXRPC_PACKET_TYPE_BUSY: | ||
| 279 | /* Just ignore BUSY packets for now. */ | ||
| 280 | return 0; | ||
| 281 | |||
| 278 | case RXRPC_PACKET_TYPE_ABORT: | 282 | case RXRPC_PACKET_TYPE_ABORT: |
| 279 | if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), | 283 | if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), |
| 280 | &wtmp, sizeof(wtmp)) < 0) | 284 | &wtmp, sizeof(wtmp)) < 0) |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 802ac7c2e5e8..5334e309f17f 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
| @@ -201,9 +201,13 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 201 | pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p); | 201 | pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p); |
| 202 | 202 | ||
| 203 | if (p->set_tc_index) { | 203 | if (p->set_tc_index) { |
| 204 | int wlen = skb_network_offset(skb); | ||
| 205 | |||
| 204 | switch (tc_skb_protocol(skb)) { | 206 | switch (tc_skb_protocol(skb)) { |
| 205 | case htons(ETH_P_IP): | 207 | case htons(ETH_P_IP): |
| 206 | if (skb_cow_head(skb, sizeof(struct iphdr))) | 208 | wlen += sizeof(struct iphdr); |
| 209 | if (!pskb_may_pull(skb, wlen) || | ||
| 210 | skb_try_make_writable(skb, wlen)) | ||
| 207 | goto drop; | 211 | goto drop; |
| 208 | 212 | ||
| 209 | skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) | 213 | skb->tc_index = ipv4_get_dsfield(ip_hdr(skb)) |
| @@ -211,7 +215,9 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, | |||
| 211 | break; | 215 | break; |
| 212 | 216 | ||
| 213 | case htons(ETH_P_IPV6): | 217 | case htons(ETH_P_IPV6): |
| 214 | if (skb_cow_head(skb, sizeof(struct ipv6hdr))) | 218 | wlen += sizeof(struct ipv6hdr); |
| 219 | if (!pskb_may_pull(skb, wlen) || | ||
| 220 | skb_try_make_writable(skb, wlen)) | ||
| 215 | goto drop; | 221 | goto drop; |
| 216 | 222 | ||
| 217 | skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb)) | 223 | skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb)) |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 2a6835b4562b..0439a1a68367 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
| @@ -71,9 +71,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
| 71 | { | 71 | { |
| 72 | struct net *net = sock_net(sk); | 72 | struct net *net = sock_net(sk); |
| 73 | struct sctp_sock *sp; | 73 | struct sctp_sock *sp; |
| 74 | int i; | ||
| 75 | sctp_paramhdr_t *p; | 74 | sctp_paramhdr_t *p; |
| 76 | int err; | 75 | int i; |
| 77 | 76 | ||
| 78 | /* Retrieve the SCTP per socket area. */ | 77 | /* Retrieve the SCTP per socket area. */ |
| 79 | sp = sctp_sk((struct sock *)sk); | 78 | sp = sctp_sk((struct sock *)sk); |
| @@ -264,8 +263,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a | |||
| 264 | 263 | ||
| 265 | /* AUTH related initializations */ | 264 | /* AUTH related initializations */ |
| 266 | INIT_LIST_HEAD(&asoc->endpoint_shared_keys); | 265 | INIT_LIST_HEAD(&asoc->endpoint_shared_keys); |
| 267 | err = sctp_auth_asoc_copy_shkeys(ep, asoc, gfp); | 266 | if (sctp_auth_asoc_copy_shkeys(ep, asoc, gfp)) |
| 268 | if (err) | ||
| 269 | goto fail_init; | 267 | goto fail_init; |
| 270 | 268 | ||
| 271 | asoc->active_key_id = ep->active_key_id; | 269 | asoc->active_key_id = ep->active_key_id; |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 71ce6b945dcb..1224421036b3 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -546,7 +546,6 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
| 546 | struct sctp_association *asoc = tp->asoc; | 546 | struct sctp_association *asoc = tp->asoc; |
| 547 | struct sctp_chunk *chunk, *tmp; | 547 | struct sctp_chunk *chunk, *tmp; |
| 548 | int pkt_count, gso = 0; | 548 | int pkt_count, gso = 0; |
| 549 | int confirm; | ||
| 550 | struct dst_entry *dst; | 549 | struct dst_entry *dst; |
| 551 | struct sk_buff *head; | 550 | struct sk_buff *head; |
| 552 | struct sctphdr *sh; | 551 | struct sctphdr *sh; |
| @@ -625,13 +624,13 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
| 625 | asoc->peer.last_sent_to = tp; | 624 | asoc->peer.last_sent_to = tp; |
| 626 | } | 625 | } |
| 627 | head->ignore_df = packet->ipfragok; | 626 | head->ignore_df = packet->ipfragok; |
| 628 | confirm = tp->dst_pending_confirm; | 627 | if (tp->dst_pending_confirm) |
| 629 | if (confirm) | ||
| 630 | skb_set_dst_pending_confirm(head, 1); | 628 | skb_set_dst_pending_confirm(head, 1); |
| 631 | /* neighbour should be confirmed on successful transmission or | 629 | /* neighbour should be confirmed on successful transmission or |
| 632 | * positive error | 630 | * positive error |
| 633 | */ | 631 | */ |
| 634 | if (tp->af_specific->sctp_xmit(head, tp) >= 0 && confirm) | 632 | if (tp->af_specific->sctp_xmit(head, tp) >= 0 && |
| 633 | tp->dst_pending_confirm) | ||
| 635 | tp->dst_pending_confirm = 0; | 634 | tp->dst_pending_confirm = 0; |
| 636 | 635 | ||
| 637 | out: | 636 | out: |
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index db352e5d61f8..025ccff67072 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c | |||
| @@ -382,17 +382,18 @@ static int sctp_prsctp_prune_sent(struct sctp_association *asoc, | |||
| 382 | } | 382 | } |
| 383 | 383 | ||
| 384 | static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, | 384 | static int sctp_prsctp_prune_unsent(struct sctp_association *asoc, |
| 385 | struct sctp_sndrcvinfo *sinfo, | 385 | struct sctp_sndrcvinfo *sinfo, int msg_len) |
| 386 | struct list_head *queue, int msg_len) | ||
| 387 | { | 386 | { |
| 387 | struct sctp_outq *q = &asoc->outqueue; | ||
| 388 | struct sctp_chunk *chk, *temp; | 388 | struct sctp_chunk *chk, *temp; |
| 389 | 389 | ||
| 390 | list_for_each_entry_safe(chk, temp, queue, list) { | 390 | list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) { |
| 391 | if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || | 391 | if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || |
| 392 | chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) | 392 | chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) |
| 393 | continue; | 393 | continue; |
| 394 | 394 | ||
| 395 | list_del_init(&chk->list); | 395 | list_del_init(&chk->list); |
| 396 | q->out_qlen -= chk->skb->len; | ||
| 396 | asoc->sent_cnt_removable--; | 397 | asoc->sent_cnt_removable--; |
| 397 | asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; | 398 | asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; |
| 398 | 399 | ||
| @@ -431,9 +432,7 @@ void sctp_prsctp_prune(struct sctp_association *asoc, | |||
| 431 | return; | 432 | return; |
| 432 | } | 433 | } |
| 433 | 434 | ||
| 434 | sctp_prsctp_prune_unsent(asoc, sinfo, | 435 | sctp_prsctp_prune_unsent(asoc, sinfo, msg_len); |
| 435 | &asoc->outqueue.out_chunk_list, | ||
| 436 | msg_len); | ||
| 437 | } | 436 | } |
| 438 | 437 | ||
| 439 | /* Mark all the eligible packets on a transport for retransmission. */ | 438 | /* Mark all the eligible packets on a transport for retransmission. */ |
diff --git a/net/socket.c b/net/socket.c index e034fe4164be..985ef06792d6 100644 --- a/net/socket.c +++ b/net/socket.c | |||
| @@ -652,6 +652,16 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg, | |||
| 652 | } | 652 | } |
| 653 | EXPORT_SYMBOL(kernel_sendmsg); | 653 | EXPORT_SYMBOL(kernel_sendmsg); |
| 654 | 654 | ||
| 655 | static bool skb_is_err_queue(const struct sk_buff *skb) | ||
| 656 | { | ||
| 657 | /* pkt_type of skbs enqueued on the error queue are set to | ||
| 658 | * PACKET_OUTGOING in skb_set_err_queue(). This is only safe to do | ||
| 659 | * in recvmsg, since skbs received on a local socket will never | ||
| 660 | * have a pkt_type of PACKET_OUTGOING. | ||
| 661 | */ | ||
| 662 | return skb->pkt_type == PACKET_OUTGOING; | ||
| 663 | } | ||
| 664 | |||
| 655 | /* | 665 | /* |
| 656 | * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) | 666 | * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP) |
| 657 | */ | 667 | */ |
| @@ -695,7 +705,8 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
| 695 | put_cmsg(msg, SOL_SOCKET, | 705 | put_cmsg(msg, SOL_SOCKET, |
| 696 | SCM_TIMESTAMPING, sizeof(tss), &tss); | 706 | SCM_TIMESTAMPING, sizeof(tss), &tss); |
| 697 | 707 | ||
| 698 | if (skb->len && (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS)) | 708 | if (skb_is_err_queue(skb) && skb->len && |
| 709 | SKB_EXT_ERR(skb)->opt_stats) | ||
| 699 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS, | 710 | put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPING_OPT_STATS, |
| 700 | skb->len, skb->data); | 711 | skb->len, skb->data); |
| 701 | } | 712 | } |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 81cd31acf690..3b332b395045 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
| @@ -503,7 +503,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
| 503 | struct ib_cq *sendcq, *recvcq; | 503 | struct ib_cq *sendcq, *recvcq; |
| 504 | int rc; | 504 | int rc; |
| 505 | 505 | ||
| 506 | max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES); | 506 | max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, |
| 507 | RPCRDMA_MAX_SEND_SGES); | ||
| 507 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { | 508 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
| 508 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); | 509 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); |
| 509 | return -ENOMEM; | 510 | return -ENOMEM; |
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 9d94e65d0894..271cd66e4b3b 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c | |||
| @@ -141,6 +141,11 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower, | |||
| 141 | static void tipc_subscrp_timeout(unsigned long data) | 141 | static void tipc_subscrp_timeout(unsigned long data) |
| 142 | { | 142 | { |
| 143 | struct tipc_subscription *sub = (struct tipc_subscription *)data; | 143 | struct tipc_subscription *sub = (struct tipc_subscription *)data; |
| 144 | struct tipc_subscriber *subscriber = sub->subscriber; | ||
| 145 | |||
| 146 | spin_lock_bh(&subscriber->lock); | ||
| 147 | tipc_nametbl_unsubscribe(sub); | ||
| 148 | spin_unlock_bh(&subscriber->lock); | ||
| 144 | 149 | ||
| 145 | /* Notify subscriber of timeout */ | 150 | /* Notify subscriber of timeout */ |
| 146 | tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, | 151 | tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper, |
| @@ -173,7 +178,6 @@ static void tipc_subscrp_kref_release(struct kref *kref) | |||
| 173 | struct tipc_subscriber *subscriber = sub->subscriber; | 178 | struct tipc_subscriber *subscriber = sub->subscriber; |
| 174 | 179 | ||
| 175 | spin_lock_bh(&subscriber->lock); | 180 | spin_lock_bh(&subscriber->lock); |
| 176 | tipc_nametbl_unsubscribe(sub); | ||
| 177 | list_del(&sub->subscrp_list); | 181 | list_del(&sub->subscrp_list); |
| 178 | atomic_dec(&tn->subscription_count); | 182 | atomic_dec(&tn->subscription_count); |
| 179 | spin_unlock_bh(&subscriber->lock); | 183 | spin_unlock_bh(&subscriber->lock); |
| @@ -205,6 +209,7 @@ static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber, | |||
| 205 | if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) | 209 | if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) |
| 206 | continue; | 210 | continue; |
| 207 | 211 | ||
| 212 | tipc_nametbl_unsubscribe(sub); | ||
| 208 | tipc_subscrp_get(sub); | 213 | tipc_subscrp_get(sub); |
| 209 | spin_unlock_bh(&subscriber->lock); | 214 | spin_unlock_bh(&subscriber->lock); |
| 210 | tipc_subscrp_delete(sub); | 215 | tipc_subscrp_delete(sub); |
diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 6a0d48525fcf..c36757e72844 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c | |||
| @@ -146,6 +146,7 @@ void unix_notinflight(struct user_struct *user, struct file *fp) | |||
| 146 | if (s) { | 146 | if (s) { |
| 147 | struct unix_sock *u = unix_sk(s); | 147 | struct unix_sock *u = unix_sk(s); |
| 148 | 148 | ||
| 149 | BUG_ON(!atomic_long_read(&u->inflight)); | ||
| 149 | BUG_ON(list_empty(&u->link)); | 150 | BUG_ON(list_empty(&u->link)); |
| 150 | 151 | ||
| 151 | if (atomic_long_dec_and_test(&u->inflight)) | 152 | if (atomic_long_dec_and_test(&u->inflight)) |
| @@ -341,6 +342,14 @@ void unix_gc(void) | |||
| 341 | } | 342 | } |
| 342 | list_del(&cursor); | 343 | list_del(&cursor); |
| 343 | 344 | ||
| 345 | /* Now gc_candidates contains only garbage. Restore original | ||
| 346 | * inflight counters for these as well, and remove the skbuffs | ||
| 347 | * which are creating the cycle(s). | ||
| 348 | */ | ||
| 349 | skb_queue_head_init(&hitlist); | ||
| 350 | list_for_each_entry(u, &gc_candidates, link) | ||
| 351 | scan_children(&u->sk, inc_inflight, &hitlist); | ||
| 352 | |||
| 344 | /* not_cycle_list contains those sockets which do not make up a | 353 | /* not_cycle_list contains those sockets which do not make up a |
| 345 | * cycle. Restore these to the inflight list. | 354 | * cycle. Restore these to the inflight list. |
| 346 | */ | 355 | */ |
| @@ -350,14 +359,6 @@ void unix_gc(void) | |||
| 350 | list_move_tail(&u->link, &gc_inflight_list); | 359 | list_move_tail(&u->link, &gc_inflight_list); |
| 351 | } | 360 | } |
| 352 | 361 | ||
| 353 | /* Now gc_candidates contains only garbage. Restore original | ||
| 354 | * inflight counters for these as well, and remove the skbuffs | ||
| 355 | * which are creating the cycle(s). | ||
| 356 | */ | ||
| 357 | skb_queue_head_init(&hitlist); | ||
| 358 | list_for_each_entry(u, &gc_candidates, link) | ||
| 359 | scan_children(&u->sk, inc_inflight, &hitlist); | ||
| 360 | |||
| 361 | spin_unlock(&unix_gc_lock); | 362 | spin_unlock(&unix_gc_lock); |
| 362 | 363 | ||
| 363 | /* Here we are. Hitlist is filled. Die. */ | 364 | /* Here we are. Hitlist is filled. Die. */ |
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 9f770f33c100..6f7f6757ceef 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
| @@ -1102,10 +1102,19 @@ static const struct proto_ops vsock_dgram_ops = { | |||
| 1102 | .sendpage = sock_no_sendpage, | 1102 | .sendpage = sock_no_sendpage, |
| 1103 | }; | 1103 | }; |
| 1104 | 1104 | ||
| 1105 | static int vsock_transport_cancel_pkt(struct vsock_sock *vsk) | ||
| 1106 | { | ||
| 1107 | if (!transport->cancel_pkt) | ||
| 1108 | return -EOPNOTSUPP; | ||
| 1109 | |||
| 1110 | return transport->cancel_pkt(vsk); | ||
| 1111 | } | ||
| 1112 | |||
| 1105 | static void vsock_connect_timeout(struct work_struct *work) | 1113 | static void vsock_connect_timeout(struct work_struct *work) |
| 1106 | { | 1114 | { |
| 1107 | struct sock *sk; | 1115 | struct sock *sk; |
| 1108 | struct vsock_sock *vsk; | 1116 | struct vsock_sock *vsk; |
| 1117 | int cancel = 0; | ||
| 1109 | 1118 | ||
| 1110 | vsk = container_of(work, struct vsock_sock, dwork.work); | 1119 | vsk = container_of(work, struct vsock_sock, dwork.work); |
| 1111 | sk = sk_vsock(vsk); | 1120 | sk = sk_vsock(vsk); |
| @@ -1116,8 +1125,11 @@ static void vsock_connect_timeout(struct work_struct *work) | |||
| 1116 | sk->sk_state = SS_UNCONNECTED; | 1125 | sk->sk_state = SS_UNCONNECTED; |
| 1117 | sk->sk_err = ETIMEDOUT; | 1126 | sk->sk_err = ETIMEDOUT; |
| 1118 | sk->sk_error_report(sk); | 1127 | sk->sk_error_report(sk); |
| 1128 | cancel = 1; | ||
| 1119 | } | 1129 | } |
| 1120 | release_sock(sk); | 1130 | release_sock(sk); |
| 1131 | if (cancel) | ||
| 1132 | vsock_transport_cancel_pkt(vsk); | ||
| 1121 | 1133 | ||
| 1122 | sock_put(sk); | 1134 | sock_put(sk); |
| 1123 | } | 1135 | } |
| @@ -1224,11 +1236,13 @@ static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr, | |||
| 1224 | err = sock_intr_errno(timeout); | 1236 | err = sock_intr_errno(timeout); |
| 1225 | sk->sk_state = SS_UNCONNECTED; | 1237 | sk->sk_state = SS_UNCONNECTED; |
| 1226 | sock->state = SS_UNCONNECTED; | 1238 | sock->state = SS_UNCONNECTED; |
| 1239 | vsock_transport_cancel_pkt(vsk); | ||
| 1227 | goto out_wait; | 1240 | goto out_wait; |
| 1228 | } else if (timeout == 0) { | 1241 | } else if (timeout == 0) { |
| 1229 | err = -ETIMEDOUT; | 1242 | err = -ETIMEDOUT; |
| 1230 | sk->sk_state = SS_UNCONNECTED; | 1243 | sk->sk_state = SS_UNCONNECTED; |
| 1231 | sock->state = SS_UNCONNECTED; | 1244 | sock->state = SS_UNCONNECTED; |
| 1245 | vsock_transport_cancel_pkt(vsk); | ||
| 1232 | goto out_wait; | 1246 | goto out_wait; |
| 1233 | } | 1247 | } |
| 1234 | 1248 | ||
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 9d24c0e958b1..68675a151f22 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c | |||
| @@ -213,6 +213,47 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt) | |||
| 213 | return len; | 213 | return len; |
| 214 | } | 214 | } |
| 215 | 215 | ||
| 216 | static int | ||
| 217 | virtio_transport_cancel_pkt(struct vsock_sock *vsk) | ||
| 218 | { | ||
| 219 | struct virtio_vsock *vsock; | ||
| 220 | struct virtio_vsock_pkt *pkt, *n; | ||
| 221 | int cnt = 0; | ||
| 222 | LIST_HEAD(freeme); | ||
| 223 | |||
| 224 | vsock = virtio_vsock_get(); | ||
| 225 | if (!vsock) { | ||
| 226 | return -ENODEV; | ||
| 227 | } | ||
| 228 | |||
| 229 | spin_lock_bh(&vsock->send_pkt_list_lock); | ||
| 230 | list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { | ||
| 231 | if (pkt->vsk != vsk) | ||
| 232 | continue; | ||
| 233 | list_move(&pkt->list, &freeme); | ||
| 234 | } | ||
| 235 | spin_unlock_bh(&vsock->send_pkt_list_lock); | ||
| 236 | |||
| 237 | list_for_each_entry_safe(pkt, n, &freeme, list) { | ||
| 238 | if (pkt->reply) | ||
| 239 | cnt++; | ||
| 240 | list_del(&pkt->list); | ||
| 241 | virtio_transport_free_pkt(pkt); | ||
| 242 | } | ||
| 243 | |||
| 244 | if (cnt) { | ||
| 245 | struct virtqueue *rx_vq = vsock->vqs[VSOCK_VQ_RX]; | ||
| 246 | int new_cnt; | ||
| 247 | |||
| 248 | new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); | ||
| 249 | if (new_cnt + cnt >= virtqueue_get_vring_size(rx_vq) && | ||
| 250 | new_cnt < virtqueue_get_vring_size(rx_vq)) | ||
| 251 | queue_work(virtio_vsock_workqueue, &vsock->rx_work); | ||
| 252 | } | ||
| 253 | |||
| 254 | return 0; | ||
| 255 | } | ||
| 256 | |||
| 216 | static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) | 257 | static void virtio_vsock_rx_fill(struct virtio_vsock *vsock) |
| 217 | { | 258 | { |
| 218 | int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; | 259 | int buf_len = VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE; |
| @@ -462,6 +503,7 @@ static struct virtio_transport virtio_transport = { | |||
| 462 | .release = virtio_transport_release, | 503 | .release = virtio_transport_release, |
| 463 | .connect = virtio_transport_connect, | 504 | .connect = virtio_transport_connect, |
| 464 | .shutdown = virtio_transport_shutdown, | 505 | .shutdown = virtio_transport_shutdown, |
| 506 | .cancel_pkt = virtio_transport_cancel_pkt, | ||
| 465 | 507 | ||
| 466 | .dgram_bind = virtio_transport_dgram_bind, | 508 | .dgram_bind = virtio_transport_dgram_bind, |
| 467 | .dgram_dequeue = virtio_transport_dgram_dequeue, | 509 | .dgram_dequeue = virtio_transport_dgram_dequeue, |
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index 8d592a45b597..af087b44ceea 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c | |||
| @@ -58,6 +58,7 @@ virtio_transport_alloc_pkt(struct virtio_vsock_pkt_info *info, | |||
| 58 | pkt->len = len; | 58 | pkt->len = len; |
| 59 | pkt->hdr.len = cpu_to_le32(len); | 59 | pkt->hdr.len = cpu_to_le32(len); |
| 60 | pkt->reply = info->reply; | 60 | pkt->reply = info->reply; |
| 61 | pkt->vsk = info->vsk; | ||
| 61 | 62 | ||
| 62 | if (info->msg && len > 0) { | 63 | if (info->msg && len > 0) { |
| 63 | pkt->buf = kmalloc(len, GFP_KERNEL); | 64 | pkt->buf = kmalloc(len, GFP_KERNEL); |
| @@ -180,6 +181,7 @@ static int virtio_transport_send_credit_update(struct vsock_sock *vsk, | |||
| 180 | struct virtio_vsock_pkt_info info = { | 181 | struct virtio_vsock_pkt_info info = { |
| 181 | .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, | 182 | .op = VIRTIO_VSOCK_OP_CREDIT_UPDATE, |
| 182 | .type = type, | 183 | .type = type, |
| 184 | .vsk = vsk, | ||
| 183 | }; | 185 | }; |
| 184 | 186 | ||
| 185 | return virtio_transport_send_pkt_info(vsk, &info); | 187 | return virtio_transport_send_pkt_info(vsk, &info); |
| @@ -519,6 +521,7 @@ int virtio_transport_connect(struct vsock_sock *vsk) | |||
| 519 | struct virtio_vsock_pkt_info info = { | 521 | struct virtio_vsock_pkt_info info = { |
| 520 | .op = VIRTIO_VSOCK_OP_REQUEST, | 522 | .op = VIRTIO_VSOCK_OP_REQUEST, |
| 521 | .type = VIRTIO_VSOCK_TYPE_STREAM, | 523 | .type = VIRTIO_VSOCK_TYPE_STREAM, |
| 524 | .vsk = vsk, | ||
| 522 | }; | 525 | }; |
| 523 | 526 | ||
| 524 | return virtio_transport_send_pkt_info(vsk, &info); | 527 | return virtio_transport_send_pkt_info(vsk, &info); |
| @@ -534,6 +537,7 @@ int virtio_transport_shutdown(struct vsock_sock *vsk, int mode) | |||
| 534 | VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | | 537 | VIRTIO_VSOCK_SHUTDOWN_RCV : 0) | |
| 535 | (mode & SEND_SHUTDOWN ? | 538 | (mode & SEND_SHUTDOWN ? |
| 536 | VIRTIO_VSOCK_SHUTDOWN_SEND : 0), | 539 | VIRTIO_VSOCK_SHUTDOWN_SEND : 0), |
| 540 | .vsk = vsk, | ||
| 537 | }; | 541 | }; |
| 538 | 542 | ||
| 539 | return virtio_transport_send_pkt_info(vsk, &info); | 543 | return virtio_transport_send_pkt_info(vsk, &info); |
| @@ -560,6 +564,7 @@ virtio_transport_stream_enqueue(struct vsock_sock *vsk, | |||
| 560 | .type = VIRTIO_VSOCK_TYPE_STREAM, | 564 | .type = VIRTIO_VSOCK_TYPE_STREAM, |
| 561 | .msg = msg, | 565 | .msg = msg, |
| 562 | .pkt_len = len, | 566 | .pkt_len = len, |
| 567 | .vsk = vsk, | ||
| 563 | }; | 568 | }; |
| 564 | 569 | ||
| 565 | return virtio_transport_send_pkt_info(vsk, &info); | 570 | return virtio_transport_send_pkt_info(vsk, &info); |
| @@ -581,6 +586,7 @@ static int virtio_transport_reset(struct vsock_sock *vsk, | |||
| 581 | .op = VIRTIO_VSOCK_OP_RST, | 586 | .op = VIRTIO_VSOCK_OP_RST, |
| 582 | .type = VIRTIO_VSOCK_TYPE_STREAM, | 587 | .type = VIRTIO_VSOCK_TYPE_STREAM, |
| 583 | .reply = !!pkt, | 588 | .reply = !!pkt, |
| 589 | .vsk = vsk, | ||
| 584 | }; | 590 | }; |
| 585 | 591 | ||
| 586 | /* Send RST only if the original pkt is not a RST pkt */ | 592 | /* Send RST only if the original pkt is not a RST pkt */ |
| @@ -826,6 +832,7 @@ virtio_transport_send_response(struct vsock_sock *vsk, | |||
| 826 | .remote_cid = le64_to_cpu(pkt->hdr.src_cid), | 832 | .remote_cid = le64_to_cpu(pkt->hdr.src_cid), |
| 827 | .remote_port = le32_to_cpu(pkt->hdr.src_port), | 833 | .remote_port = le32_to_cpu(pkt->hdr.src_port), |
| 828 | .reply = true, | 834 | .reply = true, |
| 835 | .vsk = vsk, | ||
| 829 | }; | 836 | }; |
| 830 | 837 | ||
| 831 | return virtio_transport_send_pkt_info(vsk, &info); | 838 | return virtio_transport_send_pkt_info(vsk, &info); |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d7f8be4e321a..2312dc2ffdb9 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
| @@ -545,22 +545,18 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, | |||
| 545 | { | 545 | { |
| 546 | int err; | 546 | int err; |
| 547 | 547 | ||
| 548 | rtnl_lock(); | ||
| 549 | |||
| 550 | if (!cb->args[0]) { | 548 | if (!cb->args[0]) { |
| 551 | err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, | 549 | err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, |
| 552 | genl_family_attrbuf(&nl80211_fam), | 550 | genl_family_attrbuf(&nl80211_fam), |
| 553 | nl80211_fam.maxattr, nl80211_policy); | 551 | nl80211_fam.maxattr, nl80211_policy); |
| 554 | if (err) | 552 | if (err) |
| 555 | goto out_unlock; | 553 | return err; |
| 556 | 554 | ||
| 557 | *wdev = __cfg80211_wdev_from_attrs( | 555 | *wdev = __cfg80211_wdev_from_attrs( |
| 558 | sock_net(skb->sk), | 556 | sock_net(skb->sk), |
| 559 | genl_family_attrbuf(&nl80211_fam)); | 557 | genl_family_attrbuf(&nl80211_fam)); |
| 560 | if (IS_ERR(*wdev)) { | 558 | if (IS_ERR(*wdev)) |
| 561 | err = PTR_ERR(*wdev); | 559 | return PTR_ERR(*wdev); |
| 562 | goto out_unlock; | ||
| 563 | } | ||
| 564 | *rdev = wiphy_to_rdev((*wdev)->wiphy); | 560 | *rdev = wiphy_to_rdev((*wdev)->wiphy); |
| 565 | /* 0 is the first index - add 1 to parse only once */ | 561 | /* 0 is the first index - add 1 to parse only once */ |
| 566 | cb->args[0] = (*rdev)->wiphy_idx + 1; | 562 | cb->args[0] = (*rdev)->wiphy_idx + 1; |
| @@ -570,10 +566,8 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, | |||
| 570 | struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); | 566 | struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); |
| 571 | struct wireless_dev *tmp; | 567 | struct wireless_dev *tmp; |
| 572 | 568 | ||
| 573 | if (!wiphy) { | 569 | if (!wiphy) |
| 574 | err = -ENODEV; | 570 | return -ENODEV; |
| 575 | goto out_unlock; | ||
| 576 | } | ||
| 577 | *rdev = wiphy_to_rdev(wiphy); | 571 | *rdev = wiphy_to_rdev(wiphy); |
| 578 | *wdev = NULL; | 572 | *wdev = NULL; |
| 579 | 573 | ||
| @@ -584,21 +578,11 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, | |||
| 584 | } | 578 | } |
| 585 | } | 579 | } |
| 586 | 580 | ||
| 587 | if (!*wdev) { | 581 | if (!*wdev) |
| 588 | err = -ENODEV; | 582 | return -ENODEV; |
| 589 | goto out_unlock; | ||
| 590 | } | ||
| 591 | } | 583 | } |
| 592 | 584 | ||
| 593 | return 0; | 585 | return 0; |
| 594 | out_unlock: | ||
| 595 | rtnl_unlock(); | ||
| 596 | return err; | ||
| 597 | } | ||
| 598 | |||
| 599 | static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev) | ||
| 600 | { | ||
| 601 | rtnl_unlock(); | ||
| 602 | } | 586 | } |
| 603 | 587 | ||
| 604 | /* IE validation */ | 588 | /* IE validation */ |
| @@ -2608,17 +2592,17 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * | |||
| 2608 | int filter_wiphy = -1; | 2592 | int filter_wiphy = -1; |
| 2609 | struct cfg80211_registered_device *rdev; | 2593 | struct cfg80211_registered_device *rdev; |
| 2610 | struct wireless_dev *wdev; | 2594 | struct wireless_dev *wdev; |
| 2595 | int ret; | ||
| 2611 | 2596 | ||
| 2612 | rtnl_lock(); | 2597 | rtnl_lock(); |
| 2613 | if (!cb->args[2]) { | 2598 | if (!cb->args[2]) { |
| 2614 | struct nl80211_dump_wiphy_state state = { | 2599 | struct nl80211_dump_wiphy_state state = { |
| 2615 | .filter_wiphy = -1, | 2600 | .filter_wiphy = -1, |
| 2616 | }; | 2601 | }; |
| 2617 | int ret; | ||
| 2618 | 2602 | ||
| 2619 | ret = nl80211_dump_wiphy_parse(skb, cb, &state); | 2603 | ret = nl80211_dump_wiphy_parse(skb, cb, &state); |
| 2620 | if (ret) | 2604 | if (ret) |
| 2621 | return ret; | 2605 | goto out_unlock; |
| 2622 | 2606 | ||
| 2623 | filter_wiphy = state.filter_wiphy; | 2607 | filter_wiphy = state.filter_wiphy; |
| 2624 | 2608 | ||
| @@ -2663,12 +2647,14 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback * | |||
| 2663 | wp_idx++; | 2647 | wp_idx++; |
| 2664 | } | 2648 | } |
| 2665 | out: | 2649 | out: |
| 2666 | rtnl_unlock(); | ||
| 2667 | |||
| 2668 | cb->args[0] = wp_idx; | 2650 | cb->args[0] = wp_idx; |
| 2669 | cb->args[1] = if_idx; | 2651 | cb->args[1] = if_idx; |
| 2670 | 2652 | ||
| 2671 | return skb->len; | 2653 | ret = skb->len; |
| 2654 | out_unlock: | ||
| 2655 | rtnl_unlock(); | ||
| 2656 | |||
| 2657 | return ret; | ||
| 2672 | } | 2658 | } |
| 2673 | 2659 | ||
| 2674 | static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) | 2660 | static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) |
| @@ -4452,9 +4438,10 @@ static int nl80211_dump_station(struct sk_buff *skb, | |||
| 4452 | int sta_idx = cb->args[2]; | 4438 | int sta_idx = cb->args[2]; |
| 4453 | int err; | 4439 | int err; |
| 4454 | 4440 | ||
| 4441 | rtnl_lock(); | ||
| 4455 | err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); | 4442 | err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); |
| 4456 | if (err) | 4443 | if (err) |
| 4457 | return err; | 4444 | goto out_err; |
| 4458 | 4445 | ||
| 4459 | if (!wdev->netdev) { | 4446 | if (!wdev->netdev) { |
| 4460 | err = -EINVAL; | 4447 | err = -EINVAL; |
| @@ -4489,7 +4476,7 @@ static int nl80211_dump_station(struct sk_buff *skb, | |||
| 4489 | cb->args[2] = sta_idx; | 4476 | cb->args[2] = sta_idx; |
| 4490 | err = skb->len; | 4477 | err = skb->len; |
| 4491 | out_err: | 4478 | out_err: |
| 4492 | nl80211_finish_wdev_dump(rdev); | 4479 | rtnl_unlock(); |
| 4493 | 4480 | ||
| 4494 | return err; | 4481 | return err; |
| 4495 | } | 4482 | } |
| @@ -5275,9 +5262,10 @@ static int nl80211_dump_mpath(struct sk_buff *skb, | |||
| 5275 | int path_idx = cb->args[2]; | 5262 | int path_idx = cb->args[2]; |
| 5276 | int err; | 5263 | int err; |
| 5277 | 5264 | ||
| 5265 | rtnl_lock(); | ||
| 5278 | err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); | 5266 | err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); |
| 5279 | if (err) | 5267 | if (err) |
| 5280 | return err; | 5268 | goto out_err; |
| 5281 | 5269 | ||
| 5282 | if (!rdev->ops->dump_mpath) { | 5270 | if (!rdev->ops->dump_mpath) { |
| 5283 | err = -EOPNOTSUPP; | 5271 | err = -EOPNOTSUPP; |
| @@ -5310,7 +5298,7 @@ static int nl80211_dump_mpath(struct sk_buff *skb, | |||
| 5310 | cb->args[2] = path_idx; | 5298 | cb->args[2] = path_idx; |
| 5311 | err = skb->len; | 5299 | err = skb->len; |
| 5312 | out_err: | 5300 | out_err: |
| 5313 | nl80211_finish_wdev_dump(rdev); | 5301 | rtnl_unlock(); |
| 5314 | return err; | 5302 | return err; |
| 5315 | } | 5303 | } |
| 5316 | 5304 | ||
| @@ -5470,9 +5458,10 @@ static int nl80211_dump_mpp(struct sk_buff *skb, | |||
| 5470 | int path_idx = cb->args[2]; | 5458 | int path_idx = cb->args[2]; |
| 5471 | int err; | 5459 | int err; |
| 5472 | 5460 | ||
| 5461 | rtnl_lock(); | ||
| 5473 | err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); | 5462 | err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); |
| 5474 | if (err) | 5463 | if (err) |
| 5475 | return err; | 5464 | goto out_err; |
| 5476 | 5465 | ||
| 5477 | if (!rdev->ops->dump_mpp) { | 5466 | if (!rdev->ops->dump_mpp) { |
| 5478 | err = -EOPNOTSUPP; | 5467 | err = -EOPNOTSUPP; |
| @@ -5505,7 +5494,7 @@ static int nl80211_dump_mpp(struct sk_buff *skb, | |||
| 5505 | cb->args[2] = path_idx; | 5494 | cb->args[2] = path_idx; |
| 5506 | err = skb->len; | 5495 | err = skb->len; |
| 5507 | out_err: | 5496 | out_err: |
| 5508 | nl80211_finish_wdev_dump(rdev); | 5497 | rtnl_unlock(); |
| 5509 | return err; | 5498 | return err; |
| 5510 | } | 5499 | } |
| 5511 | 5500 | ||
| @@ -7674,9 +7663,12 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 7674 | int start = cb->args[2], idx = 0; | 7663 | int start = cb->args[2], idx = 0; |
| 7675 | int err; | 7664 | int err; |
| 7676 | 7665 | ||
| 7666 | rtnl_lock(); | ||
| 7677 | err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); | 7667 | err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); |
| 7678 | if (err) | 7668 | if (err) { |
| 7669 | rtnl_unlock(); | ||
| 7679 | return err; | 7670 | return err; |
| 7671 | } | ||
| 7680 | 7672 | ||
| 7681 | wdev_lock(wdev); | 7673 | wdev_lock(wdev); |
| 7682 | spin_lock_bh(&rdev->bss_lock); | 7674 | spin_lock_bh(&rdev->bss_lock); |
| @@ -7699,7 +7691,7 @@ static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 7699 | wdev_unlock(wdev); | 7691 | wdev_unlock(wdev); |
| 7700 | 7692 | ||
| 7701 | cb->args[2] = idx; | 7693 | cb->args[2] = idx; |
| 7702 | nl80211_finish_wdev_dump(rdev); | 7694 | rtnl_unlock(); |
| 7703 | 7695 | ||
| 7704 | return skb->len; | 7696 | return skb->len; |
| 7705 | } | 7697 | } |
| @@ -7784,9 +7776,10 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 7784 | int res; | 7776 | int res; |
| 7785 | bool radio_stats; | 7777 | bool radio_stats; |
| 7786 | 7778 | ||
| 7779 | rtnl_lock(); | ||
| 7787 | res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); | 7780 | res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev); |
| 7788 | if (res) | 7781 | if (res) |
| 7789 | return res; | 7782 | goto out_err; |
| 7790 | 7783 | ||
| 7791 | /* prepare_wdev_dump parsed the attributes */ | 7784 | /* prepare_wdev_dump parsed the attributes */ |
| 7792 | radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; | 7785 | radio_stats = attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS]; |
| @@ -7827,7 +7820,7 @@ static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 7827 | cb->args[2] = survey_idx; | 7820 | cb->args[2] = survey_idx; |
| 7828 | res = skb->len; | 7821 | res = skb->len; |
| 7829 | out_err: | 7822 | out_err: |
| 7830 | nl80211_finish_wdev_dump(rdev); | 7823 | rtnl_unlock(); |
| 7831 | return res; | 7824 | return res; |
| 7832 | } | 7825 | } |
| 7833 | 7826 | ||
| @@ -11508,17 +11501,13 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, | |||
| 11508 | void *data = NULL; | 11501 | void *data = NULL; |
| 11509 | unsigned int data_len = 0; | 11502 | unsigned int data_len = 0; |
| 11510 | 11503 | ||
| 11511 | rtnl_lock(); | ||
| 11512 | |||
| 11513 | if (cb->args[0]) { | 11504 | if (cb->args[0]) { |
| 11514 | /* subtract the 1 again here */ | 11505 | /* subtract the 1 again here */ |
| 11515 | struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); | 11506 | struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); |
| 11516 | struct wireless_dev *tmp; | 11507 | struct wireless_dev *tmp; |
| 11517 | 11508 | ||
| 11518 | if (!wiphy) { | 11509 | if (!wiphy) |
| 11519 | err = -ENODEV; | 11510 | return -ENODEV; |
| 11520 | goto out_unlock; | ||
| 11521 | } | ||
| 11522 | *rdev = wiphy_to_rdev(wiphy); | 11511 | *rdev = wiphy_to_rdev(wiphy); |
| 11523 | *wdev = NULL; | 11512 | *wdev = NULL; |
| 11524 | 11513 | ||
| @@ -11538,23 +11527,19 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, | |||
| 11538 | err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, | 11527 | err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, |
| 11539 | attrbuf, nl80211_fam.maxattr, nl80211_policy); | 11528 | attrbuf, nl80211_fam.maxattr, nl80211_policy); |
| 11540 | if (err) | 11529 | if (err) |
| 11541 | goto out_unlock; | 11530 | return err; |
| 11542 | 11531 | ||
| 11543 | if (!attrbuf[NL80211_ATTR_VENDOR_ID] || | 11532 | if (!attrbuf[NL80211_ATTR_VENDOR_ID] || |
| 11544 | !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) { | 11533 | !attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) |
| 11545 | err = -EINVAL; | 11534 | return -EINVAL; |
| 11546 | goto out_unlock; | ||
| 11547 | } | ||
| 11548 | 11535 | ||
| 11549 | *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf); | 11536 | *wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk), attrbuf); |
| 11550 | if (IS_ERR(*wdev)) | 11537 | if (IS_ERR(*wdev)) |
| 11551 | *wdev = NULL; | 11538 | *wdev = NULL; |
| 11552 | 11539 | ||
| 11553 | *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); | 11540 | *rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk), attrbuf); |
| 11554 | if (IS_ERR(*rdev)) { | 11541 | if (IS_ERR(*rdev)) |
| 11555 | err = PTR_ERR(*rdev); | 11542 | return PTR_ERR(*rdev); |
| 11556 | goto out_unlock; | ||
| 11557 | } | ||
| 11558 | 11543 | ||
| 11559 | vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]); | 11544 | vid = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_ID]); |
| 11560 | subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); | 11545 | subcmd = nla_get_u32(attrbuf[NL80211_ATTR_VENDOR_SUBCMD]); |
| @@ -11567,19 +11552,15 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, | |||
| 11567 | if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) | 11552 | if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd) |
| 11568 | continue; | 11553 | continue; |
| 11569 | 11554 | ||
| 11570 | if (!vcmd->dumpit) { | 11555 | if (!vcmd->dumpit) |
| 11571 | err = -EOPNOTSUPP; | 11556 | return -EOPNOTSUPP; |
| 11572 | goto out_unlock; | ||
| 11573 | } | ||
| 11574 | 11557 | ||
| 11575 | vcmd_idx = i; | 11558 | vcmd_idx = i; |
| 11576 | break; | 11559 | break; |
| 11577 | } | 11560 | } |
| 11578 | 11561 | ||
| 11579 | if (vcmd_idx < 0) { | 11562 | if (vcmd_idx < 0) |
| 11580 | err = -EOPNOTSUPP; | 11563 | return -EOPNOTSUPP; |
| 11581 | goto out_unlock; | ||
| 11582 | } | ||
| 11583 | 11564 | ||
| 11584 | if (attrbuf[NL80211_ATTR_VENDOR_DATA]) { | 11565 | if (attrbuf[NL80211_ATTR_VENDOR_DATA]) { |
| 11585 | data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]); | 11566 | data = nla_data(attrbuf[NL80211_ATTR_VENDOR_DATA]); |
| @@ -11596,9 +11577,6 @@ static int nl80211_prepare_vendor_dump(struct sk_buff *skb, | |||
| 11596 | 11577 | ||
| 11597 | /* keep rtnl locked in successful case */ | 11578 | /* keep rtnl locked in successful case */ |
| 11598 | return 0; | 11579 | return 0; |
| 11599 | out_unlock: | ||
| 11600 | rtnl_unlock(); | ||
| 11601 | return err; | ||
| 11602 | } | 11580 | } |
| 11603 | 11581 | ||
| 11604 | static int nl80211_vendor_cmd_dump(struct sk_buff *skb, | 11582 | static int nl80211_vendor_cmd_dump(struct sk_buff *skb, |
| @@ -11613,9 +11591,10 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb, | |||
| 11613 | int err; | 11591 | int err; |
| 11614 | struct nlattr *vendor_data; | 11592 | struct nlattr *vendor_data; |
| 11615 | 11593 | ||
| 11594 | rtnl_lock(); | ||
| 11616 | err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); | 11595 | err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev); |
| 11617 | if (err) | 11596 | if (err) |
| 11618 | return err; | 11597 | goto out; |
| 11619 | 11598 | ||
| 11620 | vcmd_idx = cb->args[2]; | 11599 | vcmd_idx = cb->args[2]; |
| 11621 | data = (void *)cb->args[3]; | 11600 | data = (void *)cb->args[3]; |
| @@ -11624,15 +11603,21 @@ static int nl80211_vendor_cmd_dump(struct sk_buff *skb, | |||
| 11624 | 11603 | ||
| 11625 | if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | | 11604 | if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV | |
| 11626 | WIPHY_VENDOR_CMD_NEED_NETDEV)) { | 11605 | WIPHY_VENDOR_CMD_NEED_NETDEV)) { |
| 11627 | if (!wdev) | 11606 | if (!wdev) { |
| 11628 | return -EINVAL; | 11607 | err = -EINVAL; |
| 11608 | goto out; | ||
| 11609 | } | ||
| 11629 | if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && | 11610 | if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV && |
| 11630 | !wdev->netdev) | 11611 | !wdev->netdev) { |
| 11631 | return -EINVAL; | 11612 | err = -EINVAL; |
| 11613 | goto out; | ||
| 11614 | } | ||
| 11632 | 11615 | ||
| 11633 | if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { | 11616 | if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) { |
| 11634 | if (!wdev_running(wdev)) | 11617 | if (!wdev_running(wdev)) { |
| 11635 | return -ENETDOWN; | 11618 | err = -ENETDOWN; |
| 11619 | goto out; | ||
| 11620 | } | ||
| 11636 | } | 11621 | } |
| 11637 | } | 11622 | } |
| 11638 | 11623 | ||
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c index 4c935202ce23..f3b1d7f50b81 100644 --- a/sound/core/seq/seq_clientmgr.c +++ b/sound/core/seq/seq_clientmgr.c | |||
| @@ -1832,6 +1832,7 @@ static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client, | |||
| 1832 | info->output_pool != client->pool->size)) { | 1832 | info->output_pool != client->pool->size)) { |
| 1833 | if (snd_seq_write_pool_allocated(client)) { | 1833 | if (snd_seq_write_pool_allocated(client)) { |
| 1834 | /* remove all existing cells */ | 1834 | /* remove all existing cells */ |
| 1835 | snd_seq_pool_mark_closing(client->pool); | ||
| 1835 | snd_seq_queue_client_leave_cells(client->number); | 1836 | snd_seq_queue_client_leave_cells(client->number); |
| 1836 | snd_seq_pool_done(client->pool); | 1837 | snd_seq_pool_done(client->pool); |
| 1837 | } | 1838 | } |
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c index 448efd4e980e..33980d1c8037 100644 --- a/sound/core/seq/seq_fifo.c +++ b/sound/core/seq/seq_fifo.c | |||
| @@ -72,6 +72,9 @@ void snd_seq_fifo_delete(struct snd_seq_fifo **fifo) | |||
| 72 | return; | 72 | return; |
| 73 | *fifo = NULL; | 73 | *fifo = NULL; |
| 74 | 74 | ||
| 75 | if (f->pool) | ||
| 76 | snd_seq_pool_mark_closing(f->pool); | ||
| 77 | |||
| 75 | snd_seq_fifo_clear(f); | 78 | snd_seq_fifo_clear(f); |
| 76 | 79 | ||
| 77 | /* wake up clients if any */ | 80 | /* wake up clients if any */ |
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c index 1a1acf3ddda4..d4c61ec9be13 100644 --- a/sound/core/seq/seq_memory.c +++ b/sound/core/seq/seq_memory.c | |||
| @@ -415,6 +415,18 @@ int snd_seq_pool_init(struct snd_seq_pool *pool) | |||
| 415 | return 0; | 415 | return 0; |
| 416 | } | 416 | } |
| 417 | 417 | ||
| 418 | /* refuse the further insertion to the pool */ | ||
| 419 | void snd_seq_pool_mark_closing(struct snd_seq_pool *pool) | ||
| 420 | { | ||
| 421 | unsigned long flags; | ||
| 422 | |||
| 423 | if (snd_BUG_ON(!pool)) | ||
| 424 | return; | ||
| 425 | spin_lock_irqsave(&pool->lock, flags); | ||
| 426 | pool->closing = 1; | ||
| 427 | spin_unlock_irqrestore(&pool->lock, flags); | ||
| 428 | } | ||
| 429 | |||
| 418 | /* remove events */ | 430 | /* remove events */ |
| 419 | int snd_seq_pool_done(struct snd_seq_pool *pool) | 431 | int snd_seq_pool_done(struct snd_seq_pool *pool) |
| 420 | { | 432 | { |
| @@ -425,10 +437,6 @@ int snd_seq_pool_done(struct snd_seq_pool *pool) | |||
| 425 | return -EINVAL; | 437 | return -EINVAL; |
| 426 | 438 | ||
| 427 | /* wait for closing all threads */ | 439 | /* wait for closing all threads */ |
| 428 | spin_lock_irqsave(&pool->lock, flags); | ||
| 429 | pool->closing = 1; | ||
| 430 | spin_unlock_irqrestore(&pool->lock, flags); | ||
| 431 | |||
| 432 | if (waitqueue_active(&pool->output_sleep)) | 440 | if (waitqueue_active(&pool->output_sleep)) |
| 433 | wake_up(&pool->output_sleep); | 441 | wake_up(&pool->output_sleep); |
| 434 | 442 | ||
| @@ -485,6 +493,7 @@ int snd_seq_pool_delete(struct snd_seq_pool **ppool) | |||
| 485 | *ppool = NULL; | 493 | *ppool = NULL; |
| 486 | if (pool == NULL) | 494 | if (pool == NULL) |
| 487 | return 0; | 495 | return 0; |
| 496 | snd_seq_pool_mark_closing(pool); | ||
| 488 | snd_seq_pool_done(pool); | 497 | snd_seq_pool_done(pool); |
| 489 | kfree(pool); | 498 | kfree(pool); |
| 490 | return 0; | 499 | return 0; |
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h index 4a2ec779b8a7..32f959c17786 100644 --- a/sound/core/seq/seq_memory.h +++ b/sound/core/seq/seq_memory.h | |||
| @@ -84,6 +84,7 @@ static inline int snd_seq_total_cells(struct snd_seq_pool *pool) | |||
| 84 | int snd_seq_pool_init(struct snd_seq_pool *pool); | 84 | int snd_seq_pool_init(struct snd_seq_pool *pool); |
| 85 | 85 | ||
| 86 | /* done pool - free events */ | 86 | /* done pool - free events */ |
| 87 | void snd_seq_pool_mark_closing(struct snd_seq_pool *pool); | ||
| 87 | int snd_seq_pool_done(struct snd_seq_pool *pool); | 88 | int snd_seq_pool_done(struct snd_seq_pool *pool); |
| 88 | 89 | ||
| 89 | /* create pool */ | 90 | /* create pool */ |
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c index ab4cdab5cfa5..79edd88d5cd0 100644 --- a/sound/pci/ctxfi/cthw20k1.c +++ b/sound/pci/ctxfi/cthw20k1.c | |||
| @@ -1905,7 +1905,7 @@ static int hw_card_start(struct hw *hw) | |||
| 1905 | return err; | 1905 | return err; |
| 1906 | 1906 | ||
| 1907 | /* Set DMA transfer mask */ | 1907 | /* Set DMA transfer mask */ |
| 1908 | if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { | 1908 | if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) { |
| 1909 | dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); | 1909 | dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits)); |
| 1910 | } else { | 1910 | } else { |
| 1911 | dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); | 1911 | dma_set_mask(&pci->dev, DMA_BIT_MASK(32)); |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index c15c51bea26d..69266b8ea2ad 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
| @@ -261,6 +261,7 @@ enum { | |||
| 261 | CXT_FIXUP_HP_530, | 261 | CXT_FIXUP_HP_530, |
| 262 | CXT_FIXUP_CAP_MIX_AMP_5047, | 262 | CXT_FIXUP_CAP_MIX_AMP_5047, |
| 263 | CXT_FIXUP_MUTE_LED_EAPD, | 263 | CXT_FIXUP_MUTE_LED_EAPD, |
| 264 | CXT_FIXUP_HP_DOCK, | ||
| 264 | CXT_FIXUP_HP_SPECTRE, | 265 | CXT_FIXUP_HP_SPECTRE, |
| 265 | CXT_FIXUP_HP_GATE_MIC, | 266 | CXT_FIXUP_HP_GATE_MIC, |
| 266 | }; | 267 | }; |
| @@ -778,6 +779,14 @@ static const struct hda_fixup cxt_fixups[] = { | |||
| 778 | .type = HDA_FIXUP_FUNC, | 779 | .type = HDA_FIXUP_FUNC, |
| 779 | .v.func = cxt_fixup_mute_led_eapd, | 780 | .v.func = cxt_fixup_mute_led_eapd, |
| 780 | }, | 781 | }, |
| 782 | [CXT_FIXUP_HP_DOCK] = { | ||
| 783 | .type = HDA_FIXUP_PINS, | ||
| 784 | .v.pins = (const struct hda_pintbl[]) { | ||
| 785 | { 0x16, 0x21011020 }, /* line-out */ | ||
| 786 | { 0x18, 0x2181103f }, /* line-in */ | ||
| 787 | { } | ||
| 788 | } | ||
| 789 | }, | ||
| 781 | [CXT_FIXUP_HP_SPECTRE] = { | 790 | [CXT_FIXUP_HP_SPECTRE] = { |
| 782 | .type = HDA_FIXUP_PINS, | 791 | .type = HDA_FIXUP_PINS, |
| 783 | .v.pins = (const struct hda_pintbl[]) { | 792 | .v.pins = (const struct hda_pintbl[]) { |
| @@ -839,6 +848,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { | |||
| 839 | SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), | 848 | SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), |
| 840 | SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), | 849 | SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), |
| 841 | SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), | 850 | SND_PCI_QUIRK(0x1025, 0x054f, "Acer Aspire 4830T", CXT_FIXUP_ASPIRE_DMIC), |
| 851 | SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK), | ||
| 842 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), | 852 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), |
| 843 | SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), | 853 | SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), |
| 844 | SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), | 854 | SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), |
| @@ -871,6 +881,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { | |||
| 871 | { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, | 881 | { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, |
| 872 | { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, | 882 | { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, |
| 873 | { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" }, | 883 | { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" }, |
| 884 | { .id = CXT_FIXUP_HP_DOCK, .name = "hp-dock" }, | ||
| 874 | {} | 885 | {} |
| 875 | }; | 886 | }; |
| 876 | 887 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 4e112221d825..7f989898cbd9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -4847,6 +4847,7 @@ enum { | |||
| 4847 | ALC286_FIXUP_HP_GPIO_LED, | 4847 | ALC286_FIXUP_HP_GPIO_LED, |
| 4848 | ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, | 4848 | ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY, |
| 4849 | ALC280_FIXUP_HP_DOCK_PINS, | 4849 | ALC280_FIXUP_HP_DOCK_PINS, |
| 4850 | ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, | ||
| 4850 | ALC280_FIXUP_HP_9480M, | 4851 | ALC280_FIXUP_HP_9480M, |
| 4851 | ALC288_FIXUP_DELL_HEADSET_MODE, | 4852 | ALC288_FIXUP_DELL_HEADSET_MODE, |
| 4852 | ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, | 4853 | ALC288_FIXUP_DELL1_MIC_NO_PRESENCE, |
| @@ -5388,6 +5389,16 @@ static const struct hda_fixup alc269_fixups[] = { | |||
| 5388 | .chained = true, | 5389 | .chained = true, |
| 5389 | .chain_id = ALC280_FIXUP_HP_GPIO4 | 5390 | .chain_id = ALC280_FIXUP_HP_GPIO4 |
| 5390 | }, | 5391 | }, |
| 5392 | [ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED] = { | ||
| 5393 | .type = HDA_FIXUP_PINS, | ||
| 5394 | .v.pins = (const struct hda_pintbl[]) { | ||
| 5395 | { 0x1b, 0x21011020 }, /* line-out */ | ||
| 5396 | { 0x18, 0x2181103f }, /* line-in */ | ||
| 5397 | { }, | ||
| 5398 | }, | ||
| 5399 | .chained = true, | ||
| 5400 | .chain_id = ALC269_FIXUP_HP_GPIO_MIC1_LED | ||
| 5401 | }, | ||
| 5391 | [ALC280_FIXUP_HP_9480M] = { | 5402 | [ALC280_FIXUP_HP_9480M] = { |
| 5392 | .type = HDA_FIXUP_FUNC, | 5403 | .type = HDA_FIXUP_FUNC, |
| 5393 | .v.func = alc280_fixup_hp_9480m, | 5404 | .v.func = alc280_fixup_hp_9480m, |
| @@ -5647,7 +5658,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 5647 | SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), | 5658 | SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
| 5648 | SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), | 5659 | SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
| 5649 | SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), | 5660 | SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), |
| 5650 | SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), | 5661 | SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED), |
| 5651 | SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), | 5662 | SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
| 5652 | SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), | 5663 | SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
| 5653 | SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), | 5664 | SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
| @@ -5816,6 +5827,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { | |||
| 5816 | {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, | 5827 | {.id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC, .name = "headset-mode-no-hp-mic"}, |
| 5817 | {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, | 5828 | {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"}, |
| 5818 | {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, | 5829 | {.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"}, |
| 5830 | {.id = ALC269_FIXUP_HP_DOCK_GPIO_MIC1_LED, .name = "hp-dock-gpio-mic1-led"}, | ||
| 5819 | {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, | 5831 | {.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"}, |
| 5820 | {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, | 5832 | {.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"}, |
| 5821 | {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"}, | 5833 | {.id = ALC283_FIXUP_CHROME_BOOK, .name = "alc283-dac-wcaps"}, |
| @@ -6090,6 +6102,8 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
| 6090 | ALC295_STANDARD_PINS, | 6102 | ALC295_STANDARD_PINS, |
| 6091 | {0x17, 0x21014040}, | 6103 | {0x17, 0x21014040}, |
| 6092 | {0x18, 0x21a19050}), | 6104 | {0x18, 0x21a19050}), |
| 6105 | SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
| 6106 | ALC295_STANDARD_PINS), | ||
| 6093 | SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, | 6107 | SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, |
| 6094 | ALC298_STANDARD_PINS, | 6108 | ALC298_STANDARD_PINS, |
| 6095 | {0x17, 0x90170110}), | 6109 | {0x17, 0x90170110}), |
diff --git a/sound/x86/Kconfig b/sound/x86/Kconfig index 84c8f8fc597c..8adf4d1bd46e 100644 --- a/sound/x86/Kconfig +++ b/sound/x86/Kconfig | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | menuconfig SND_X86 | 1 | menuconfig SND_X86 |
| 2 | tristate "X86 sound devices" | 2 | bool "X86 sound devices" |
| 3 | depends on X86 | 3 | depends on X86 |
| 4 | default y | ||
| 4 | ---help--- | 5 | ---help--- |
| 5 | X86 sound devices that don't fall under SoC or PCI categories | 6 | X86 sound devices that don't fall under SoC or PCI categories |
| 6 | 7 | ||
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 67531f47781b..6a1ad58cb66f 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile | |||
| @@ -1,22 +1,23 @@ | |||
| 1 | LIBDIR := ../../../lib | 1 | LIBDIR := ../../../lib |
| 2 | BPFOBJ := $(LIBDIR)/bpf/bpf.o | 2 | BPFDIR := $(LIBDIR)/bpf |
| 3 | 3 | ||
| 4 | CFLAGS += -Wall -O2 -lcap -I../../../include/uapi -I$(LIBDIR) $(BPFOBJ) | 4 | CFLAGS += -Wall -O2 -I../../../include/uapi -I$(LIBDIR) |
| 5 | LDLIBS += -lcap | ||
| 5 | 6 | ||
| 6 | TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map | 7 | TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map |
| 7 | 8 | ||
| 8 | TEST_PROGS := test_kmod.sh | 9 | TEST_PROGS := test_kmod.sh |
| 9 | 10 | ||
| 10 | all: $(TEST_GEN_PROGS) | 11 | include ../lib.mk |
| 12 | |||
| 13 | BPFOBJ := $(OUTPUT)/bpf.o | ||
| 14 | |||
| 15 | $(TEST_GEN_PROGS): $(BPFOBJ) | ||
| 11 | 16 | ||
| 12 | .PHONY: all clean force | 17 | .PHONY: force |
| 13 | 18 | ||
| 14 | # force a rebuild of BPFOBJ when its dependencies are updated | 19 | # force a rebuild of BPFOBJ when its dependencies are updated |
| 15 | force: | 20 | force: |
| 16 | 21 | ||
| 17 | $(BPFOBJ): force | 22 | $(BPFOBJ): force |
| 18 | $(MAKE) -C $(dir $(BPFOBJ)) | 23 | $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ |
| 19 | |||
| 20 | $(test_objs): $(BPFOBJ) | ||
| 21 | |||
| 22 | include ../lib.mk | ||
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index cada17ac00b8..a0aa2009b0e0 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c | |||
| @@ -80,8 +80,9 @@ static void test_hashmap(int task, void *data) | |||
| 80 | assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); | 80 | assert(bpf_map_update_elem(fd, &key, &value, BPF_EXIST) == 0); |
| 81 | key = 2; | 81 | key = 2; |
| 82 | assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); | 82 | assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); |
| 83 | key = 1; | 83 | key = 3; |
| 84 | assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0); | 84 | assert(bpf_map_update_elem(fd, &key, &value, BPF_NOEXIST) == -1 && |
| 85 | errno == E2BIG); | ||
| 85 | 86 | ||
| 86 | /* Check that key = 0 doesn't exist. */ | 87 | /* Check that key = 0 doesn't exist. */ |
| 87 | key = 0; | 88 | key = 0; |
| @@ -110,6 +111,24 @@ static void test_hashmap(int task, void *data) | |||
| 110 | close(fd); | 111 | close(fd); |
| 111 | } | 112 | } |
| 112 | 113 | ||
| 114 | static void test_hashmap_sizes(int task, void *data) | ||
| 115 | { | ||
| 116 | int fd, i, j; | ||
| 117 | |||
| 118 | for (i = 1; i <= 512; i <<= 1) | ||
| 119 | for (j = 1; j <= 1 << 18; j <<= 1) { | ||
| 120 | fd = bpf_create_map(BPF_MAP_TYPE_HASH, i, j, | ||
| 121 | 2, map_flags); | ||
| 122 | if (fd < 0) { | ||
| 123 | printf("Failed to create hashmap key=%d value=%d '%s'\n", | ||
| 124 | i, j, strerror(errno)); | ||
| 125 | exit(1); | ||
| 126 | } | ||
| 127 | close(fd); | ||
| 128 | usleep(10); /* give kernel time to destroy */ | ||
| 129 | } | ||
| 130 | } | ||
| 131 | |||
| 113 | static void test_hashmap_percpu(int task, void *data) | 132 | static void test_hashmap_percpu(int task, void *data) |
| 114 | { | 133 | { |
| 115 | unsigned int nr_cpus = bpf_num_possible_cpus(); | 134 | unsigned int nr_cpus = bpf_num_possible_cpus(); |
| @@ -317,7 +336,10 @@ static void test_arraymap_percpu(int task, void *data) | |||
| 317 | static void test_arraymap_percpu_many_keys(void) | 336 | static void test_arraymap_percpu_many_keys(void) |
| 318 | { | 337 | { |
| 319 | unsigned int nr_cpus = bpf_num_possible_cpus(); | 338 | unsigned int nr_cpus = bpf_num_possible_cpus(); |
| 320 | unsigned int nr_keys = 20000; | 339 | /* nr_keys is not too large otherwise the test stresses percpu |
| 340 | * allocator more than anything else | ||
| 341 | */ | ||
| 342 | unsigned int nr_keys = 2000; | ||
| 321 | long values[nr_cpus]; | 343 | long values[nr_cpus]; |
| 322 | int key, fd, i; | 344 | int key, fd, i; |
| 323 | 345 | ||
| @@ -419,6 +441,7 @@ static void test_map_stress(void) | |||
| 419 | { | 441 | { |
| 420 | run_parallel(100, test_hashmap, NULL); | 442 | run_parallel(100, test_hashmap, NULL); |
| 421 | run_parallel(100, test_hashmap_percpu, NULL); | 443 | run_parallel(100, test_hashmap_percpu, NULL); |
| 444 | run_parallel(100, test_hashmap_sizes, NULL); | ||
| 422 | 445 | ||
| 423 | run_parallel(100, test_arraymap, NULL); | 446 | run_parallel(100, test_arraymap, NULL); |
| 424 | run_parallel(100, test_arraymap_percpu, NULL); | 447 | run_parallel(100, test_arraymap_percpu, NULL); |
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index a29786dd9522..4d28a9ddbee0 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c | |||
| @@ -870,7 +870,8 @@ kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx, | |||
| 870 | continue; | 870 | continue; |
| 871 | 871 | ||
| 872 | kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); | 872 | kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev); |
| 873 | kvm->buses[bus_idx]->ioeventfd_count--; | 873 | if (kvm->buses[bus_idx]) |
| 874 | kvm->buses[bus_idx]->ioeventfd_count--; | ||
| 874 | ioeventfd_release(p); | 875 | ioeventfd_release(p); |
| 875 | ret = 0; | 876 | ret = 0; |
| 876 | break; | 877 | break; |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index a17d78759727..88257b311cb5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -727,8 +727,11 @@ static void kvm_destroy_vm(struct kvm *kvm) | |||
| 727 | list_del(&kvm->vm_list); | 727 | list_del(&kvm->vm_list); |
| 728 | spin_unlock(&kvm_lock); | 728 | spin_unlock(&kvm_lock); |
| 729 | kvm_free_irq_routing(kvm); | 729 | kvm_free_irq_routing(kvm); |
| 730 | for (i = 0; i < KVM_NR_BUSES; i++) | 730 | for (i = 0; i < KVM_NR_BUSES; i++) { |
| 731 | kvm_io_bus_destroy(kvm->buses[i]); | 731 | if (kvm->buses[i]) |
| 732 | kvm_io_bus_destroy(kvm->buses[i]); | ||
| 733 | kvm->buses[i] = NULL; | ||
| 734 | } | ||
| 732 | kvm_coalesced_mmio_free(kvm); | 735 | kvm_coalesced_mmio_free(kvm); |
| 733 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) | 736 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
| 734 | mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); | 737 | mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); |
| @@ -1062,7 +1065,7 @@ int __kvm_set_memory_region(struct kvm *kvm, | |||
| 1062 | * changes) is disallowed above, so any other attribute changes getting | 1065 | * changes) is disallowed above, so any other attribute changes getting |
| 1063 | * here can be skipped. | 1066 | * here can be skipped. |
| 1064 | */ | 1067 | */ |
| 1065 | if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) { | 1068 | if (as_id == 0 && (change == KVM_MR_CREATE || change == KVM_MR_MOVE)) { |
| 1066 | r = kvm_iommu_map_pages(kvm, &new); | 1069 | r = kvm_iommu_map_pages(kvm, &new); |
| 1067 | return r; | 1070 | return r; |
| 1068 | } | 1071 | } |
| @@ -3474,6 +3477,8 @@ int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, | |||
| 3474 | }; | 3477 | }; |
| 3475 | 3478 | ||
| 3476 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); | 3479 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
| 3480 | if (!bus) | ||
| 3481 | return -ENOMEM; | ||
| 3477 | r = __kvm_io_bus_write(vcpu, bus, &range, val); | 3482 | r = __kvm_io_bus_write(vcpu, bus, &range, val); |
| 3478 | return r < 0 ? r : 0; | 3483 | return r < 0 ? r : 0; |
| 3479 | } | 3484 | } |
| @@ -3491,6 +3496,8 @@ int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, | |||
| 3491 | }; | 3496 | }; |
| 3492 | 3497 | ||
| 3493 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); | 3498 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
| 3499 | if (!bus) | ||
| 3500 | return -ENOMEM; | ||
| 3494 | 3501 | ||
| 3495 | /* First try the device referenced by cookie. */ | 3502 | /* First try the device referenced by cookie. */ |
| 3496 | if ((cookie >= 0) && (cookie < bus->dev_count) && | 3503 | if ((cookie >= 0) && (cookie < bus->dev_count) && |
| @@ -3541,6 +3548,8 @@ int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, | |||
| 3541 | }; | 3548 | }; |
| 3542 | 3549 | ||
| 3543 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); | 3550 | bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); |
| 3551 | if (!bus) | ||
| 3552 | return -ENOMEM; | ||
| 3544 | r = __kvm_io_bus_read(vcpu, bus, &range, val); | 3553 | r = __kvm_io_bus_read(vcpu, bus, &range, val); |
| 3545 | return r < 0 ? r : 0; | 3554 | return r < 0 ? r : 0; |
| 3546 | } | 3555 | } |
| @@ -3553,6 +3562,9 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | |||
| 3553 | struct kvm_io_bus *new_bus, *bus; | 3562 | struct kvm_io_bus *new_bus, *bus; |
| 3554 | 3563 | ||
| 3555 | bus = kvm->buses[bus_idx]; | 3564 | bus = kvm->buses[bus_idx]; |
| 3565 | if (!bus) | ||
| 3566 | return -ENOMEM; | ||
| 3567 | |||
| 3556 | /* exclude ioeventfd which is limited by maximum fd */ | 3568 | /* exclude ioeventfd which is limited by maximum fd */ |
| 3557 | if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) | 3569 | if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1) |
| 3558 | return -ENOSPC; | 3570 | return -ENOSPC; |
| @@ -3572,37 +3584,41 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, | |||
| 3572 | } | 3584 | } |
| 3573 | 3585 | ||
| 3574 | /* Caller must hold slots_lock. */ | 3586 | /* Caller must hold slots_lock. */ |
| 3575 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, | 3587 | void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 3576 | struct kvm_io_device *dev) | 3588 | struct kvm_io_device *dev) |
| 3577 | { | 3589 | { |
| 3578 | int i, r; | 3590 | int i; |
| 3579 | struct kvm_io_bus *new_bus, *bus; | 3591 | struct kvm_io_bus *new_bus, *bus; |
| 3580 | 3592 | ||
| 3581 | bus = kvm->buses[bus_idx]; | 3593 | bus = kvm->buses[bus_idx]; |
| 3582 | r = -ENOENT; | 3594 | if (!bus) |
| 3595 | return; | ||
| 3596 | |||
| 3583 | for (i = 0; i < bus->dev_count; i++) | 3597 | for (i = 0; i < bus->dev_count; i++) |
| 3584 | if (bus->range[i].dev == dev) { | 3598 | if (bus->range[i].dev == dev) { |
| 3585 | r = 0; | ||
| 3586 | break; | 3599 | break; |
| 3587 | } | 3600 | } |
| 3588 | 3601 | ||
| 3589 | if (r) | 3602 | if (i == bus->dev_count) |
| 3590 | return r; | 3603 | return; |
| 3591 | 3604 | ||
| 3592 | new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * | 3605 | new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) * |
| 3593 | sizeof(struct kvm_io_range)), GFP_KERNEL); | 3606 | sizeof(struct kvm_io_range)), GFP_KERNEL); |
| 3594 | if (!new_bus) | 3607 | if (!new_bus) { |
| 3595 | return -ENOMEM; | 3608 | pr_err("kvm: failed to shrink bus, removing it completely\n"); |
| 3609 | goto broken; | ||
| 3610 | } | ||
| 3596 | 3611 | ||
| 3597 | memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); | 3612 | memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range)); |
| 3598 | new_bus->dev_count--; | 3613 | new_bus->dev_count--; |
| 3599 | memcpy(new_bus->range + i, bus->range + i + 1, | 3614 | memcpy(new_bus->range + i, bus->range + i + 1, |
| 3600 | (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); | 3615 | (new_bus->dev_count - i) * sizeof(struct kvm_io_range)); |
| 3601 | 3616 | ||
| 3617 | broken: | ||
| 3602 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); | 3618 | rcu_assign_pointer(kvm->buses[bus_idx], new_bus); |
| 3603 | synchronize_srcu_expedited(&kvm->srcu); | 3619 | synchronize_srcu_expedited(&kvm->srcu); |
| 3604 | kfree(bus); | 3620 | kfree(bus); |
| 3605 | return r; | 3621 | return; |
| 3606 | } | 3622 | } |
| 3607 | 3623 | ||
| 3608 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, | 3624 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| @@ -3615,6 +3631,8 @@ struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, | |||
| 3615 | srcu_idx = srcu_read_lock(&kvm->srcu); | 3631 | srcu_idx = srcu_read_lock(&kvm->srcu); |
| 3616 | 3632 | ||
| 3617 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); | 3633 | bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); |
| 3634 | if (!bus) | ||
| 3635 | goto out_unlock; | ||
| 3618 | 3636 | ||
| 3619 | dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); | 3637 | dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); |
| 3620 | if (dev_idx < 0) | 3638 | if (dev_idx < 0) |
