diff options
197 files changed, 1786 insertions, 741 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 81d1d5a74728..19f4423e70d9 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
| @@ -4713,6 +4713,8 @@ | |||
| 4713 | prevent spurious wakeup); | 4713 | prevent spurious wakeup); |
| 4714 | n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a | 4714 | n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a |
| 4715 | pause after every control message); | 4715 | pause after every control message); |
| 4716 | o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra | ||
| 4717 | delay after resetting its port); | ||
| 4716 | Example: quirks=0781:5580:bk,0a5c:5834:gij | 4718 | Example: quirks=0781:5580:bk,0a5c:5834:gij |
| 4717 | 4719 | ||
| 4718 | usbhid.mousepoll= | 4720 | usbhid.mousepoll= |
diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst index 164bf71149fd..30187d49dc2c 100644 --- a/Documentation/admin-guide/security-bugs.rst +++ b/Documentation/admin-guide/security-bugs.rst | |||
| @@ -32,16 +32,17 @@ Disclosure and embargoed information | |||
| 32 | The security list is not a disclosure channel. For that, see Coordination | 32 | The security list is not a disclosure channel. For that, see Coordination |
| 33 | below. | 33 | below. |
| 34 | 34 | ||
| 35 | Once a robust fix has been developed, our preference is to release the | 35 | Once a robust fix has been developed, the release process starts. Fixes |
| 36 | fix in a timely fashion, treating it no differently than any of the other | 36 | for publicly known bugs are released immediately. |
| 37 | thousands of changes and fixes the Linux kernel project releases every | 37 | |
| 38 | month. | 38 | Although our preference is to release fixes for publicly undisclosed bugs |
| 39 | 39 | as soon as they become available, this may be postponed at the request of | |
| 40 | However, at the request of the reporter, we will postpone releasing the | 40 | the reporter or an affected party for up to 7 calendar days from the start |
| 41 | fix for up to 5 business days after the date of the report or after the | 41 | of the release process, with an exceptional extension to 14 calendar days |
| 42 | embargo has lifted; whichever comes first. The only exception to that | 42 | if it is agreed that the criticality of the bug requires more time. The |
| 43 | rule is if the bug is publicly known, in which case the preference is to | 43 | only valid reason for deferring the publication of a fix is to accommodate |
| 44 | release the fix as soon as it's available. | 44 | the logistics of QA and large scale rollouts which require release |
| 45 | coordination. | ||
| 45 | 46 | ||
| 46 | Whilst embargoed information may be shared with trusted individuals in | 47 | Whilst embargoed information may be shared with trusted individuals in |
| 47 | order to develop a fix, such information will not be published alongside | 48 | order to develop a fix, such information will not be published alongside |
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt index 3ceeb8de1196..35694c0c376b 100644 --- a/Documentation/devicetree/bindings/net/dsa/dsa.txt +++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt | |||
| @@ -7,7 +7,7 @@ limitations. | |||
| 7 | Current Binding | 7 | Current Binding |
| 8 | --------------- | 8 | --------------- |
| 9 | 9 | ||
| 10 | Switches are true Linux devices and can be probes by any means. Once | 10 | Switches are true Linux devices and can be probed by any means. Once |
| 11 | probed, they register to the DSA framework, passing a node | 11 | probed, they register to the DSA framework, passing a node |
| 12 | pointer. This node is expected to fulfil the following binding, and | 12 | pointer. This node is expected to fulfil the following binding, and |
| 13 | may contain additional properties as required by the device it is | 13 | may contain additional properties as required by the device it is |
diff --git a/Documentation/media/uapi/v4l/dev-meta.rst b/Documentation/media/uapi/v4l/dev-meta.rst index f7ac8d0d3af1..b65dc078abeb 100644 --- a/Documentation/media/uapi/v4l/dev-meta.rst +++ b/Documentation/media/uapi/v4l/dev-meta.rst | |||
| @@ -40,7 +40,7 @@ To use the :ref:`format` ioctls applications set the ``type`` field of the | |||
| 40 | the desired operation. Both drivers and applications must set the remainder of | 40 | the desired operation. Both drivers and applications must set the remainder of |
| 41 | the :c:type:`v4l2_format` structure to 0. | 41 | the :c:type:`v4l2_format` structure to 0. |
| 42 | 42 | ||
| 43 | .. _v4l2-meta-format: | 43 | .. c:type:: v4l2_meta_format |
| 44 | 44 | ||
| 45 | .. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}| | 45 | .. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}| |
| 46 | 46 | ||
diff --git a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst index 3ead350e099f..9ea494a8faca 100644 --- a/Documentation/media/uapi/v4l/vidioc-g-fmt.rst +++ b/Documentation/media/uapi/v4l/vidioc-g-fmt.rst | |||
| @@ -133,6 +133,11 @@ The format as returned by :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` must be identical | |||
| 133 | - Definition of a data format, see :ref:`pixfmt`, used by SDR | 133 | - Definition of a data format, see :ref:`pixfmt`, used by SDR |
| 134 | capture and output devices. | 134 | capture and output devices. |
| 135 | * - | 135 | * - |
| 136 | - struct :c:type:`v4l2_meta_format` | ||
| 137 | - ``meta`` | ||
| 138 | - Definition of a metadata format, see :ref:`meta-formats`, used by | ||
| 139 | metadata capture devices. | ||
| 140 | * - | ||
| 136 | - __u8 | 141 | - __u8 |
| 137 | - ``raw_data``\ [200] | 142 | - ``raw_data``\ [200] |
| 138 | - Place holder for future extensions. | 143 | - Place holder for future extensions. |
diff --git a/MAINTAINERS b/MAINTAINERS index 68528f176875..81319971ca9a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -180,6 +180,7 @@ F: drivers/net/hamradio/6pack.c | |||
| 180 | 180 | ||
| 181 | 8169 10/100/1000 GIGABIT ETHERNET DRIVER | 181 | 8169 10/100/1000 GIGABIT ETHERNET DRIVER |
| 182 | M: Realtek linux nic maintainers <nic_swsd@realtek.com> | 182 | M: Realtek linux nic maintainers <nic_swsd@realtek.com> |
| 183 | M: Heiner Kallweit <hkallweit1@gmail.com> | ||
| 183 | L: netdev@vger.kernel.org | 184 | L: netdev@vger.kernel.org |
| 184 | S: Maintained | 185 | S: Maintained |
| 185 | F: drivers/net/ethernet/realtek/r8169.c | 186 | F: drivers/net/ethernet/realtek/r8169.c |
| @@ -5534,6 +5535,7 @@ F: net/bridge/ | |||
| 5534 | ETHERNET PHY LIBRARY | 5535 | ETHERNET PHY LIBRARY |
| 5535 | M: Andrew Lunn <andrew@lunn.ch> | 5536 | M: Andrew Lunn <andrew@lunn.ch> |
| 5536 | M: Florian Fainelli <f.fainelli@gmail.com> | 5537 | M: Florian Fainelli <f.fainelli@gmail.com> |
| 5538 | M: Heiner Kallweit <hkallweit1@gmail.com> | ||
| 5537 | L: netdev@vger.kernel.org | 5539 | L: netdev@vger.kernel.org |
| 5538 | S: Maintained | 5540 | S: Maintained |
| 5539 | F: Documentation/ABI/testing/sysfs-bus-mdio | 5541 | F: Documentation/ABI/testing/sysfs-bus-mdio |
| @@ -6305,6 +6307,7 @@ F: tools/testing/selftests/gpio/ | |||
| 6305 | 6307 | ||
| 6306 | GPIO SUBSYSTEM | 6308 | GPIO SUBSYSTEM |
| 6307 | M: Linus Walleij <linus.walleij@linaro.org> | 6309 | M: Linus Walleij <linus.walleij@linaro.org> |
| 6310 | M: Bartosz Golaszewski <bgolaszewski@baylibre.com> | ||
| 6308 | L: linux-gpio@vger.kernel.org | 6311 | L: linux-gpio@vger.kernel.org |
| 6309 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git | 6312 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git |
| 6310 | S: Maintained | 6313 | S: Maintained |
| @@ -7442,6 +7445,20 @@ S: Maintained | |||
| 7442 | F: Documentation/fb/intelfb.txt | 7445 | F: Documentation/fb/intelfb.txt |
| 7443 | F: drivers/video/fbdev/intelfb/ | 7446 | F: drivers/video/fbdev/intelfb/ |
| 7444 | 7447 | ||
| 7448 | INTEL GPIO DRIVERS | ||
| 7449 | M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
| 7450 | L: linux-gpio@vger.kernel.org | ||
| 7451 | S: Maintained | ||
| 7452 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git | ||
| 7453 | F: drivers/gpio/gpio-ich.c | ||
| 7454 | F: drivers/gpio/gpio-intel-mid.c | ||
| 7455 | F: drivers/gpio/gpio-lynxpoint.c | ||
| 7456 | F: drivers/gpio/gpio-merrifield.c | ||
| 7457 | F: drivers/gpio/gpio-ml-ioh.c | ||
| 7458 | F: drivers/gpio/gpio-pch.c | ||
| 7459 | F: drivers/gpio/gpio-sch.c | ||
| 7460 | F: drivers/gpio/gpio-sodaville.c | ||
| 7461 | |||
| 7445 | INTEL GVT-g DRIVERS (Intel GPU Virtualization) | 7462 | INTEL GVT-g DRIVERS (Intel GPU Virtualization) |
| 7446 | M: Zhenyu Wang <zhenyuw@linux.intel.com> | 7463 | M: Zhenyu Wang <zhenyuw@linux.intel.com> |
| 7447 | M: Zhi Wang <zhi.a.wang@intel.com> | 7464 | M: Zhi Wang <zhi.a.wang@intel.com> |
| @@ -7452,12 +7469,6 @@ T: git https://github.com/intel/gvt-linux.git | |||
| 7452 | S: Supported | 7469 | S: Supported |
| 7453 | F: drivers/gpu/drm/i915/gvt/ | 7470 | F: drivers/gpu/drm/i915/gvt/ |
| 7454 | 7471 | ||
| 7455 | INTEL PMIC GPIO DRIVER | ||
| 7456 | R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
| 7457 | S: Maintained | ||
| 7458 | F: drivers/gpio/gpio-*cove.c | ||
| 7459 | F: drivers/gpio/gpio-msic.c | ||
| 7460 | |||
| 7461 | INTEL HID EVENT DRIVER | 7472 | INTEL HID EVENT DRIVER |
| 7462 | M: Alex Hung <alex.hung@canonical.com> | 7473 | M: Alex Hung <alex.hung@canonical.com> |
| 7463 | L: platform-driver-x86@vger.kernel.org | 7474 | L: platform-driver-x86@vger.kernel.org |
| @@ -7545,12 +7556,6 @@ W: https://01.org/linux-acpi | |||
| 7545 | S: Supported | 7556 | S: Supported |
| 7546 | F: drivers/platform/x86/intel_menlow.c | 7557 | F: drivers/platform/x86/intel_menlow.c |
| 7547 | 7558 | ||
| 7548 | INTEL MERRIFIELD GPIO DRIVER | ||
| 7549 | M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
| 7550 | L: linux-gpio@vger.kernel.org | ||
| 7551 | S: Maintained | ||
| 7552 | F: drivers/gpio/gpio-merrifield.c | ||
| 7553 | |||
| 7554 | INTEL MIC DRIVERS (mic) | 7559 | INTEL MIC DRIVERS (mic) |
| 7555 | M: Sudeep Dutt <sudeep.dutt@intel.com> | 7560 | M: Sudeep Dutt <sudeep.dutt@intel.com> |
| 7556 | M: Ashutosh Dixit <ashutosh.dixit@intel.com> | 7561 | M: Ashutosh Dixit <ashutosh.dixit@intel.com> |
| @@ -7583,6 +7588,13 @@ F: drivers/platform/x86/intel_punit_ipc.c | |||
| 7583 | F: arch/x86/include/asm/intel_pmc_ipc.h | 7588 | F: arch/x86/include/asm/intel_pmc_ipc.h |
| 7584 | F: arch/x86/include/asm/intel_punit_ipc.h | 7589 | F: arch/x86/include/asm/intel_punit_ipc.h |
| 7585 | 7590 | ||
| 7591 | INTEL PMIC GPIO DRIVERS | ||
| 7592 | M: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | ||
| 7593 | S: Maintained | ||
| 7594 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git | ||
| 7595 | F: drivers/gpio/gpio-*cove.c | ||
| 7596 | F: drivers/gpio/gpio-msic.c | ||
| 7597 | |||
| 7586 | INTEL MULTIFUNCTION PMIC DEVICE DRIVERS | 7598 | INTEL MULTIFUNCTION PMIC DEVICE DRIVERS |
| 7587 | R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> | 7599 | R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> |
| 7588 | S: Maintained | 7600 | S: Maintained |
| @@ -14086,6 +14098,7 @@ F: Documentation/devicetree/bindings/iio/proximity/vl53l0x.txt | |||
| 14086 | 14098 | ||
| 14087 | STABLE BRANCH | 14099 | STABLE BRANCH |
| 14088 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 14100 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
| 14101 | M: Sasha Levin <sashal@kernel.org> | ||
| 14089 | L: stable@vger.kernel.org | 14102 | L: stable@vger.kernel.org |
| 14090 | S: Supported | 14103 | S: Supported |
| 14091 | F: Documentation/process/stable-kernel-rules.rst | 14104 | F: Documentation/process/stable-kernel-rules.rst |
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 0c909c4a932f..842fb9572661 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h | |||
| @@ -468,7 +468,7 @@ | |||
| 468 | SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ | 468 | SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ |
| 469 | SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) | 469 | SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) |
| 470 | 470 | ||
| 471 | #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff | 471 | #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL |
| 472 | #error "Inconsistent SCTLR_EL2 set/clear bits" | 472 | #error "Inconsistent SCTLR_EL2 set/clear bits" |
| 473 | #endif | 473 | #endif |
| 474 | 474 | ||
| @@ -509,7 +509,7 @@ | |||
| 509 | SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ | 509 | SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ |
| 510 | SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0) | 510 | SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0) |
| 511 | 511 | ||
| 512 | #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff | 512 | #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL |
| 513 | #error "Inconsistent SCTLR_EL1 set/clear bits" | 513 | #error "Inconsistent SCTLR_EL1 set/clear bits" |
| 514 | #endif | 514 | #endif |
| 515 | 515 | ||
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index af50064dea51..aec5ecb85737 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
| @@ -1333,7 +1333,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { | |||
| 1333 | .cpu_enable = cpu_enable_hw_dbm, | 1333 | .cpu_enable = cpu_enable_hw_dbm, |
| 1334 | }, | 1334 | }, |
| 1335 | #endif | 1335 | #endif |
| 1336 | #ifdef CONFIG_ARM64_SSBD | ||
| 1337 | { | 1336 | { |
| 1338 | .desc = "CRC32 instructions", | 1337 | .desc = "CRC32 instructions", |
| 1339 | .capability = ARM64_HAS_CRC32, | 1338 | .capability = ARM64_HAS_CRC32, |
| @@ -1343,6 +1342,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { | |||
| 1343 | .field_pos = ID_AA64ISAR0_CRC32_SHIFT, | 1342 | .field_pos = ID_AA64ISAR0_CRC32_SHIFT, |
| 1344 | .min_field_value = 1, | 1343 | .min_field_value = 1, |
| 1345 | }, | 1344 | }, |
| 1345 | #ifdef CONFIG_ARM64_SSBD | ||
| 1346 | { | 1346 | { |
| 1347 | .desc = "Speculative Store Bypassing Safe (SSBS)", | 1347 | .desc = "Speculative Store Bypassing Safe (SSBS)", |
| 1348 | .capability = ARM64_SSBS, | 1348 | .capability = ARM64_SSBS, |
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index 490b12af103c..c52d0efacd14 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig | |||
| @@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y | |||
| 140 | CONFIG_RTC_DRV_DS1307=y | 140 | CONFIG_RTC_DRV_DS1307=y |
| 141 | CONFIG_STAGING=y | 141 | CONFIG_STAGING=y |
| 142 | CONFIG_OCTEON_ETHERNET=y | 142 | CONFIG_OCTEON_ETHERNET=y |
| 143 | CONFIG_OCTEON_USB=y | ||
| 143 | # CONFIG_IOMMU_SUPPORT is not set | 144 | # CONFIG_IOMMU_SUPPORT is not set |
| 144 | CONFIG_RAS=y | 145 | CONFIG_RAS=y |
| 145 | CONFIG_EXT4_FS=y | 146 | CONFIG_EXT4_FS=y |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index ea09ed6a80a9..8c6c48ed786a 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
| @@ -794,6 +794,7 @@ static void __init arch_mem_init(char **cmdline_p) | |||
| 794 | 794 | ||
| 795 | /* call board setup routine */ | 795 | /* call board setup routine */ |
| 796 | plat_mem_setup(); | 796 | plat_mem_setup(); |
| 797 | memblock_set_bottom_up(true); | ||
| 797 | 798 | ||
| 798 | /* | 799 | /* |
| 799 | * Make sure all kernel memory is in the maps. The "UP" and | 800 | * Make sure all kernel memory is in the maps. The "UP" and |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 0f852e1b5891..15e103c6d799 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
| @@ -2260,10 +2260,8 @@ void __init trap_init(void) | |||
| 2260 | unsigned long size = 0x200 + VECTORSPACING*64; | 2260 | unsigned long size = 0x200 + VECTORSPACING*64; |
| 2261 | phys_addr_t ebase_pa; | 2261 | phys_addr_t ebase_pa; |
| 2262 | 2262 | ||
| 2263 | memblock_set_bottom_up(true); | ||
| 2264 | ebase = (unsigned long) | 2263 | ebase = (unsigned long) |
| 2265 | memblock_alloc_from(size, 1 << fls(size), 0); | 2264 | memblock_alloc_from(size, 1 << fls(size), 0); |
| 2266 | memblock_set_bottom_up(false); | ||
| 2267 | 2265 | ||
| 2268 | /* | 2266 | /* |
| 2269 | * Try to ensure ebase resides in KSeg0 if possible. | 2267 | * Try to ensure ebase resides in KSeg0 if possible. |
| @@ -2307,6 +2305,7 @@ void __init trap_init(void) | |||
| 2307 | if (board_ebase_setup) | 2305 | if (board_ebase_setup) |
| 2308 | board_ebase_setup(); | 2306 | board_ebase_setup(); |
| 2309 | per_cpu_trap_init(true); | 2307 | per_cpu_trap_init(true); |
| 2308 | memblock_set_bottom_up(false); | ||
| 2310 | 2309 | ||
| 2311 | /* | 2310 | /* |
| 2312 | * Copy the generic exception handlers to their final destination. | 2311 | * Copy the generic exception handlers to their final destination. |
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c index 622761878cd1..60bf0a1cb757 100644 --- a/arch/mips/loongson64/loongson-3/numa.c +++ b/arch/mips/loongson64/loongson-3/numa.c | |||
| @@ -231,6 +231,8 @@ static __init void prom_meminit(void) | |||
| 231 | cpumask_clear(&__node_data[(node)]->cpumask); | 231 | cpumask_clear(&__node_data[(node)]->cpumask); |
| 232 | } | 232 | } |
| 233 | } | 233 | } |
| 234 | max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); | ||
| 235 | |||
| 234 | for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { | 236 | for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { |
| 235 | node = cpu / loongson_sysconf.cores_per_node; | 237 | node = cpu / loongson_sysconf.cores_per_node; |
| 236 | if (node >= num_online_nodes()) | 238 | if (node >= num_online_nodes()) |
| @@ -248,19 +250,9 @@ static __init void prom_meminit(void) | |||
| 248 | 250 | ||
| 249 | void __init paging_init(void) | 251 | void __init paging_init(void) |
| 250 | { | 252 | { |
| 251 | unsigned node; | ||
| 252 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; | 253 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; |
| 253 | 254 | ||
| 254 | pagetable_init(); | 255 | pagetable_init(); |
| 255 | |||
| 256 | for_each_online_node(node) { | ||
| 257 | unsigned long start_pfn, end_pfn; | ||
| 258 | |||
| 259 | get_pfn_range_for_nid(node, &start_pfn, &end_pfn); | ||
| 260 | |||
| 261 | if (end_pfn > max_low_pfn) | ||
| 262 | max_low_pfn = end_pfn; | ||
| 263 | } | ||
| 264 | #ifdef CONFIG_ZONE_DMA32 | 256 | #ifdef CONFIG_ZONE_DMA32 |
| 265 | zones_size[ZONE_DMA32] = MAX_DMA32_PFN; | 257 | zones_size[ZONE_DMA32] = MAX_DMA32_PFN; |
| 266 | #endif | 258 | #endif |
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index d8b8444d6795..813d13f92957 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c | |||
| @@ -435,6 +435,7 @@ void __init prom_meminit(void) | |||
| 435 | 435 | ||
| 436 | mlreset(); | 436 | mlreset(); |
| 437 | szmem(); | 437 | szmem(); |
| 438 | max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); | ||
| 438 | 439 | ||
| 439 | for (node = 0; node < MAX_COMPACT_NODES; node++) { | 440 | for (node = 0; node < MAX_COMPACT_NODES; node++) { |
| 440 | if (node_online(node)) { | 441 | if (node_online(node)) { |
| @@ -455,18 +456,8 @@ extern void setup_zero_pages(void); | |||
| 455 | void __init paging_init(void) | 456 | void __init paging_init(void) |
| 456 | { | 457 | { |
| 457 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; | 458 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; |
| 458 | unsigned node; | ||
| 459 | 459 | ||
| 460 | pagetable_init(); | 460 | pagetable_init(); |
| 461 | |||
| 462 | for_each_online_node(node) { | ||
| 463 | unsigned long start_pfn, end_pfn; | ||
| 464 | |||
| 465 | get_pfn_range_for_nid(node, &start_pfn, &end_pfn); | ||
| 466 | |||
| 467 | if (end_pfn > max_low_pfn) | ||
| 468 | max_low_pfn = end_pfn; | ||
| 469 | } | ||
| 470 | zones_size[ZONE_NORMAL] = max_low_pfn; | 461 | zones_size[ZONE_NORMAL] = max_low_pfn; |
| 471 | free_area_init_nodes(zones_size); | 462 | free_area_init_nodes(zones_size); |
| 472 | } | 463 | } |
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 4af153a182b0..4b594f2e4f7e 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile | |||
| @@ -71,6 +71,10 @@ KBUILD_CFLAGS += $(call cc-option,-mstrict-align) | |||
| 71 | # arch specific predefines for sparse | 71 | # arch specific predefines for sparse |
| 72 | CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) | 72 | CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) |
| 73 | 73 | ||
| 74 | # Default target when executing plain make | ||
| 75 | boot := arch/riscv/boot | ||
| 76 | KBUILD_IMAGE := $(boot)/Image.gz | ||
| 77 | |||
| 74 | head-y := arch/riscv/kernel/head.o | 78 | head-y := arch/riscv/kernel/head.o |
| 75 | 79 | ||
| 76 | core-y += arch/riscv/kernel/ arch/riscv/mm/ | 80 | core-y += arch/riscv/kernel/ arch/riscv/mm/ |
| @@ -81,4 +85,13 @@ PHONY += vdso_install | |||
| 81 | vdso_install: | 85 | vdso_install: |
| 82 | $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ | 86 | $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ |
| 83 | 87 | ||
| 84 | all: vmlinux | 88 | all: Image.gz |
| 89 | |||
| 90 | Image: vmlinux | ||
| 91 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | ||
| 92 | |||
| 93 | Image.%: Image | ||
| 94 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | ||
| 95 | |||
| 96 | zinstall install: | ||
| 97 | $(Q)$(MAKE) $(build)=$(boot) $@ | ||
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore new file mode 100644 index 000000000000..8dab0bb6ae66 --- /dev/null +++ b/arch/riscv/boot/.gitignore | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | Image | ||
| 2 | Image.gz | ||
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile new file mode 100644 index 000000000000..0990a9fdbe5d --- /dev/null +++ b/arch/riscv/boot/Makefile | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | # | ||
| 2 | # arch/riscv/boot/Makefile | ||
| 3 | # | ||
| 4 | # This file is included by the global makefile so that you can add your own | ||
| 5 | # architecture-specific flags and dependencies. | ||
| 6 | # | ||
| 7 | # This file is subject to the terms and conditions of the GNU General Public | ||
| 8 | # License. See the file "COPYING" in the main directory of this archive | ||
| 9 | # for more details. | ||
| 10 | # | ||
| 11 | # Copyright (C) 2018, Anup Patel. | ||
| 12 | # Author: Anup Patel <anup@brainfault.org> | ||
| 13 | # | ||
| 14 | # Based on the ia64 and arm64 boot/Makefile. | ||
| 15 | # | ||
| 16 | |||
| 17 | OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S | ||
| 18 | |||
| 19 | targets := Image | ||
| 20 | |||
| 21 | $(obj)/Image: vmlinux FORCE | ||
| 22 | $(call if_changed,objcopy) | ||
| 23 | |||
| 24 | $(obj)/Image.gz: $(obj)/Image FORCE | ||
| 25 | $(call if_changed,gzip) | ||
| 26 | |||
| 27 | install: | ||
| 28 | $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ | ||
| 29 | $(obj)/Image System.map "$(INSTALL_PATH)" | ||
| 30 | |||
| 31 | zinstall: | ||
| 32 | $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ | ||
| 33 | $(obj)/Image.gz System.map "$(INSTALL_PATH)" | ||
diff --git a/arch/riscv/boot/install.sh b/arch/riscv/boot/install.sh new file mode 100644 index 000000000000..18c39159c0ff --- /dev/null +++ b/arch/riscv/boot/install.sh | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | # | ||
| 3 | # arch/riscv/boot/install.sh | ||
| 4 | # | ||
| 5 | # This file is subject to the terms and conditions of the GNU General Public | ||
| 6 | # License. See the file "COPYING" in the main directory of this archive | ||
| 7 | # for more details. | ||
| 8 | # | ||
| 9 | # Copyright (C) 1995 by Linus Torvalds | ||
| 10 | # | ||
| 11 | # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin | ||
| 12 | # Adapted from code in arch/i386/boot/install.sh by Russell King | ||
| 13 | # | ||
| 14 | # "make install" script for the RISC-V Linux port | ||
| 15 | # | ||
| 16 | # Arguments: | ||
| 17 | # $1 - kernel version | ||
| 18 | # $2 - kernel image file | ||
| 19 | # $3 - kernel map file | ||
| 20 | # $4 - default install path (blank if root directory) | ||
| 21 | # | ||
| 22 | |||
| 23 | verify () { | ||
| 24 | if [ ! -f "$1" ]; then | ||
| 25 | echo "" 1>&2 | ||
| 26 | echo " *** Missing file: $1" 1>&2 | ||
| 27 | echo ' *** You need to run "make" before "make install".' 1>&2 | ||
| 28 | echo "" 1>&2 | ||
| 29 | exit 1 | ||
| 30 | fi | ||
| 31 | } | ||
| 32 | |||
| 33 | # Make sure the files actually exist | ||
| 34 | verify "$2" | ||
| 35 | verify "$3" | ||
| 36 | |||
| 37 | # User may have a custom install script | ||
| 38 | if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi | ||
| 39 | if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi | ||
| 40 | |||
| 41 | if [ "$(basename $2)" = "Image.gz" ]; then | ||
| 42 | # Compressed install | ||
| 43 | echo "Installing compressed kernel" | ||
| 44 | base=vmlinuz | ||
| 45 | else | ||
| 46 | # Normal install | ||
| 47 | echo "Installing normal kernel" | ||
| 48 | base=vmlinux | ||
| 49 | fi | ||
| 50 | |||
| 51 | if [ -f $4/$base-$1 ]; then | ||
| 52 | mv $4/$base-$1 $4/$base-$1.old | ||
| 53 | fi | ||
| 54 | cat $2 > $4/$base-$1 | ||
| 55 | |||
| 56 | # Install system map file | ||
| 57 | if [ -f $4/System.map-$1 ]; then | ||
| 58 | mv $4/System.map-$1 $4/System.map-$1.old | ||
| 59 | fi | ||
| 60 | cp $3 $4/System.map-$1 | ||
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h index 349df33808c4..cd2af4b013e3 100644 --- a/arch/riscv/include/asm/module.h +++ b/arch/riscv/include/asm/module.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | #define MODULE_ARCH_VERMAGIC "riscv" | 9 | #define MODULE_ARCH_VERMAGIC "riscv" |
| 10 | 10 | ||
| 11 | struct module; | ||
| 11 | u64 module_emit_got_entry(struct module *mod, u64 val); | 12 | u64 module_emit_got_entry(struct module *mod, u64 val); |
| 12 | u64 module_emit_plt_entry(struct module *mod, u64 val); | 13 | u64 module_emit_plt_entry(struct module *mod, u64 val); |
| 13 | 14 | ||
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 473cfc84e412..8c3e3e3c8be1 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h | |||
| @@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to, | |||
| 400 | static inline unsigned long | 400 | static inline unsigned long |
| 401 | raw_copy_from_user(void *to, const void __user *from, unsigned long n) | 401 | raw_copy_from_user(void *to, const void __user *from, unsigned long n) |
| 402 | { | 402 | { |
| 403 | return __asm_copy_to_user(to, from, n); | 403 | return __asm_copy_from_user(to, from, n); |
| 404 | } | 404 | } |
| 405 | 405 | ||
| 406 | static inline unsigned long | 406 | static inline unsigned long |
| 407 | raw_copy_to_user(void __user *to, const void *from, unsigned long n) | 407 | raw_copy_to_user(void __user *to, const void *from, unsigned long n) |
| 408 | { | 408 | { |
| 409 | return __asm_copy_from_user(to, from, n); | 409 | return __asm_copy_to_user(to, from, n); |
| 410 | } | 410 | } |
| 411 | 411 | ||
| 412 | extern long strncpy_from_user(char *dest, const char __user *src, long count); | 412 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h index eff7aa9aa163..fef96f117b4d 100644 --- a/arch/riscv/include/asm/unistd.h +++ b/arch/riscv/include/asm/unistd.h | |||
| @@ -13,10 +13,9 @@ | |||
| 13 | 13 | ||
| 14 | /* | 14 | /* |
| 15 | * There is explicitly no include guard here because this file is expected to | 15 | * There is explicitly no include guard here because this file is expected to |
| 16 | * be included multiple times. See uapi/asm/syscalls.h for more info. | 16 | * be included multiple times. |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #define __ARCH_WANT_NEW_STAT | ||
| 20 | #define __ARCH_WANT_SYS_CLONE | 19 | #define __ARCH_WANT_SYS_CLONE |
| 20 | |||
| 21 | #include <uapi/asm/unistd.h> | 21 | #include <uapi/asm/unistd.h> |
| 22 | #include <uapi/asm/syscalls.h> | ||
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/unistd.h index 206dc4b0f6ea..1f3bd3ebbb0d 100644 --- a/arch/riscv/include/uapi/asm/syscalls.h +++ b/arch/riscv/include/uapi/asm/unistd.h | |||
| @@ -1,13 +1,25 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
| 2 | /* | 2 | /* |
| 3 | * Copyright (C) 2017-2018 SiFive | 3 | * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com> |
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License version 2 as | ||
| 7 | * published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 4 | */ | 16 | */ |
| 5 | 17 | ||
| 6 | /* | 18 | #ifdef __LP64__ |
| 7 | * There is explicitly no include guard here because this file is expected to | 19 | #define __ARCH_WANT_NEW_STAT |
| 8 | * be included multiple times in order to define the syscall macros via | 20 | #endif /* __LP64__ */ |
| 9 | * __SYSCALL. | 21 | |
| 10 | */ | 22 | #include <asm-generic/unistd.h> |
| 11 | 23 | ||
| 12 | /* | 24 | /* |
| 13 | * Allows the instruction cache to be flushed from userspace. Despite RISC-V | 25 | * Allows the instruction cache to be flushed from userspace. Despite RISC-V |
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index 3a5a2ee31547..b4a7d4427fbb 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c | |||
| @@ -64,7 +64,7 @@ int riscv_of_processor_hartid(struct device_node *node) | |||
| 64 | 64 | ||
| 65 | static void print_isa(struct seq_file *f, const char *orig_isa) | 65 | static void print_isa(struct seq_file *f, const char *orig_isa) |
| 66 | { | 66 | { |
| 67 | static const char *ext = "mafdc"; | 67 | static const char *ext = "mafdcsu"; |
| 68 | const char *isa = orig_isa; | 68 | const char *isa = orig_isa; |
| 69 | const char *e; | 69 | const char *e; |
| 70 | 70 | ||
| @@ -88,11 +88,14 @@ static void print_isa(struct seq_file *f, const char *orig_isa) | |||
| 88 | /* | 88 | /* |
| 89 | * Check the rest of the ISA string for valid extensions, printing those | 89 | * Check the rest of the ISA string for valid extensions, printing those |
| 90 | * we find. RISC-V ISA strings define an order, so we only print the | 90 | * we find. RISC-V ISA strings define an order, so we only print the |
| 91 | * extension bits when they're in order. | 91 | * extension bits when they're in order. Hide the supervisor (S) |
| 92 | * extension from userspace as it's not accessible from there. | ||
| 92 | */ | 93 | */ |
| 93 | for (e = ext; *e != '\0'; ++e) { | 94 | for (e = ext; *e != '\0'; ++e) { |
| 94 | if (isa[0] == e[0]) { | 95 | if (isa[0] == e[0]) { |
| 95 | seq_write(f, isa, 1); | 96 | if (isa[0] != 's') |
| 97 | seq_write(f, isa, 1); | ||
| 98 | |||
| 96 | isa++; | 99 | isa++; |
| 97 | } | 100 | } |
| 98 | } | 101 | } |
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 711190d473d4..fe884cd69abd 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S | |||
| @@ -44,6 +44,16 @@ ENTRY(_start) | |||
| 44 | amoadd.w a3, a2, (a3) | 44 | amoadd.w a3, a2, (a3) |
| 45 | bnez a3, .Lsecondary_start | 45 | bnez a3, .Lsecondary_start |
| 46 | 46 | ||
| 47 | /* Clear BSS for flat non-ELF images */ | ||
| 48 | la a3, __bss_start | ||
| 49 | la a4, __bss_stop | ||
| 50 | ble a4, a3, clear_bss_done | ||
| 51 | clear_bss: | ||
| 52 | REG_S zero, (a3) | ||
| 53 | add a3, a3, RISCV_SZPTR | ||
| 54 | blt a3, a4, clear_bss | ||
| 55 | clear_bss_done: | ||
| 56 | |||
| 47 | /* Save hart ID and DTB physical address */ | 57 | /* Save hart ID and DTB physical address */ |
| 48 | mv s0, a0 | 58 | mv s0, a0 |
| 49 | mv s1, a1 | 59 | mv s1, a1 |
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index ece84991609c..65df1dfdc303 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S | |||
| @@ -74,7 +74,7 @@ SECTIONS | |||
| 74 | *(.sbss*) | 74 | *(.sbss*) |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | BSS_SECTION(0, 0, 0) | 77 | BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) |
| 78 | 78 | ||
| 79 | EXCEPTION_TABLE(0x10) | 79 | EXCEPTION_TABLE(0x10) |
| 80 | NOTES | 80 | NOTES |
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c index eaa60c94205a..1f32caa87686 100644 --- a/drivers/acpi/acpi_platform.c +++ b/drivers/acpi/acpi_platform.c | |||
| @@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = { | |||
| 30 | {"PNP0200", 0}, /* AT DMA Controller */ | 30 | {"PNP0200", 0}, /* AT DMA Controller */ |
| 31 | {"ACPI0009", 0}, /* IOxAPIC */ | 31 | {"ACPI0009", 0}, /* IOxAPIC */ |
| 32 | {"ACPI000A", 0}, /* IOAPIC */ | 32 | {"ACPI000A", 0}, /* IOAPIC */ |
| 33 | {"SMB0001", 0}, /* ACPI SMBUS virtual device */ | ||
| 33 | {"", 0}, | 34 | {"", 0}, |
| 34 | }; | 35 | }; |
| 35 | 36 | ||
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 3f0e2a14895a..22b53bf26817 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c | |||
| @@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = { | |||
| 201 | {}, | 201 | {}, |
| 202 | }; | 202 | }; |
| 203 | 203 | ||
| 204 | static const struct of_device_id *ti_cpufreq_match_node(void) | ||
| 205 | { | ||
| 206 | struct device_node *np; | ||
| 207 | const struct of_device_id *match; | ||
| 208 | |||
| 209 | np = of_find_node_by_path("/"); | ||
| 210 | match = of_match_node(ti_cpufreq_of_match, np); | ||
| 211 | of_node_put(np); | ||
| 212 | |||
| 213 | return match; | ||
| 214 | } | ||
| 215 | |||
| 204 | static int ti_cpufreq_probe(struct platform_device *pdev) | 216 | static int ti_cpufreq_probe(struct platform_device *pdev) |
| 205 | { | 217 | { |
| 206 | u32 version[VERSION_COUNT]; | 218 | u32 version[VERSION_COUNT]; |
| 207 | struct device_node *np; | ||
| 208 | const struct of_device_id *match; | 219 | const struct of_device_id *match; |
| 209 | struct opp_table *ti_opp_table; | 220 | struct opp_table *ti_opp_table; |
| 210 | struct ti_cpufreq_data *opp_data; | 221 | struct ti_cpufreq_data *opp_data; |
| 211 | const char * const reg_names[] = {"vdd", "vbb"}; | 222 | const char * const reg_names[] = {"vdd", "vbb"}; |
| 212 | int ret; | 223 | int ret; |
| 213 | 224 | ||
| 214 | np = of_find_node_by_path("/"); | 225 | match = dev_get_platdata(&pdev->dev); |
| 215 | match = of_match_node(ti_cpufreq_of_match, np); | ||
| 216 | of_node_put(np); | ||
| 217 | if (!match) | 226 | if (!match) |
| 218 | return -ENODEV; | 227 | return -ENODEV; |
| 219 | 228 | ||
| @@ -290,7 +299,14 @@ fail_put_node: | |||
| 290 | 299 | ||
| 291 | static int ti_cpufreq_init(void) | 300 | static int ti_cpufreq_init(void) |
| 292 | { | 301 | { |
| 293 | platform_device_register_simple("ti-cpufreq", -1, NULL, 0); | 302 | const struct of_device_id *match; |
| 303 | |||
| 304 | /* Check to ensure we are on a compatible platform */ | ||
| 305 | match = ti_cpufreq_match_node(); | ||
| 306 | if (match) | ||
| 307 | platform_device_register_data(NULL, "ti-cpufreq", -1, match, | ||
| 308 | sizeof(*match)); | ||
| 309 | |||
| 294 | return 0; | 310 | return 0; |
| 295 | } | 311 | } |
| 296 | module_init(ti_cpufreq_init); | 312 | module_init(ti_cpufreq_init); |
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index 5b44ef226904..fc359ca4503d 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c | |||
| @@ -184,6 +184,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head, | |||
| 184 | exp_info.ops = &udmabuf_ops; | 184 | exp_info.ops = &udmabuf_ops; |
| 185 | exp_info.size = ubuf->pagecount << PAGE_SHIFT; | 185 | exp_info.size = ubuf->pagecount << PAGE_SHIFT; |
| 186 | exp_info.priv = ubuf; | 186 | exp_info.priv = ubuf; |
| 187 | exp_info.flags = O_RDWR; | ||
| 187 | 188 | ||
| 188 | buf = dma_buf_export(&exp_info); | 189 | buf = dma_buf_export(&exp_info); |
| 189 | if (IS_ERR(buf)) { | 190 | if (IS_ERR(buf)) { |
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c index b01ba4438501..31e891f00175 100644 --- a/drivers/gnss/serial.c +++ b/drivers/gnss/serial.c | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/of.h> | 13 | #include <linux/of.h> |
| 14 | #include <linux/pm.h> | 14 | #include <linux/pm.h> |
| 15 | #include <linux/pm_runtime.h> | 15 | #include <linux/pm_runtime.h> |
| 16 | #include <linux/sched.h> | ||
| 16 | #include <linux/serdev.h> | 17 | #include <linux/serdev.h> |
| 17 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
| 18 | 19 | ||
| @@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev, | |||
| 63 | int ret; | 64 | int ret; |
| 64 | 65 | ||
| 65 | /* write is only buffered synchronously */ | 66 | /* write is only buffered synchronously */ |
| 66 | ret = serdev_device_write(serdev, buf, count, 0); | 67 | ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT); |
| 67 | if (ret < 0) | 68 | if (ret < 0) |
| 68 | return ret; | 69 | return ret; |
| 69 | 70 | ||
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c index 79cb98950013..71d014edd167 100644 --- a/drivers/gnss/sirf.c +++ b/drivers/gnss/sirf.c | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/pm.h> | 16 | #include <linux/pm.h> |
| 17 | #include <linux/pm_runtime.h> | 17 | #include <linux/pm_runtime.h> |
| 18 | #include <linux/regulator/consumer.h> | 18 | #include <linux/regulator/consumer.h> |
| 19 | #include <linux/sched.h> | ||
| 19 | #include <linux/serdev.h> | 20 | #include <linux/serdev.h> |
| 20 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
| 21 | #include <linux/wait.h> | 22 | #include <linux/wait.h> |
| @@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf, | |||
| 83 | int ret; | 84 | int ret; |
| 84 | 85 | ||
| 85 | /* write is only buffered synchronously */ | 86 | /* write is only buffered synchronously */ |
| 86 | ret = serdev_device_write(serdev, buf, count, 0); | 87 | ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT); |
| 87 | if (ret < 0) | 88 | if (ret < 0) |
| 88 | return ret; | 89 | return ret; |
| 89 | 90 | ||
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 8269cffc2967..6a50f9f59c90 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c | |||
| @@ -35,8 +35,8 @@ | |||
| 35 | #define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__) | 35 | #define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__) |
| 36 | 36 | ||
| 37 | enum { | 37 | enum { |
| 38 | GPIO_MOCKUP_DIR_OUT = 0, | 38 | GPIO_MOCKUP_DIR_IN = 0, |
| 39 | GPIO_MOCKUP_DIR_IN = 1, | 39 | GPIO_MOCKUP_DIR_OUT = 1, |
| 40 | }; | 40 | }; |
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| @@ -131,7 +131,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset) | |||
| 131 | { | 131 | { |
| 132 | struct gpio_mockup_chip *chip = gpiochip_get_data(gc); | 132 | struct gpio_mockup_chip *chip = gpiochip_get_data(gc); |
| 133 | 133 | ||
| 134 | return chip->lines[offset].dir; | 134 | return !chip->lines[offset].dir; |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset) | 137 | static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset) |
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index bfe4c5c9f41c..e9600b556f39 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c | |||
| @@ -268,8 +268,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
| 268 | 268 | ||
| 269 | if (pxa_gpio_has_pinctrl()) { | 269 | if (pxa_gpio_has_pinctrl()) { |
| 270 | ret = pinctrl_gpio_direction_input(chip->base + offset); | 270 | ret = pinctrl_gpio_direction_input(chip->base + offset); |
| 271 | if (!ret) | 271 | if (ret) |
| 272 | return 0; | 272 | return ret; |
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | spin_lock_irqsave(&gpio_lock, flags); | 275 | spin_lock_irqsave(&gpio_lock, flags); |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 230e41562462..a2cbb474901c 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -1295,7 +1295,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, | |||
| 1295 | gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); | 1295 | gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); |
| 1296 | if (!gdev->descs) { | 1296 | if (!gdev->descs) { |
| 1297 | status = -ENOMEM; | 1297 | status = -ENOMEM; |
| 1298 | goto err_free_gdev; | 1298 | goto err_free_ida; |
| 1299 | } | 1299 | } |
| 1300 | 1300 | ||
| 1301 | if (chip->ngpio == 0) { | 1301 | if (chip->ngpio == 0) { |
| @@ -1427,8 +1427,9 @@ err_free_label: | |||
| 1427 | kfree_const(gdev->label); | 1427 | kfree_const(gdev->label); |
| 1428 | err_free_descs: | 1428 | err_free_descs: |
| 1429 | kfree(gdev->descs); | 1429 | kfree(gdev->descs); |
| 1430 | err_free_gdev: | 1430 | err_free_ida: |
| 1431 | ida_simple_remove(&gpio_ida, gdev->id); | 1431 | ida_simple_remove(&gpio_ida, gdev->id); |
| 1432 | err_free_gdev: | ||
| 1432 | /* failures here can mean systems won't boot... */ | 1433 | /* failures here can mean systems won't boot... */ |
| 1433 | pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, | 1434 | pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, |
| 1434 | gdev->base, gdev->base + gdev->ngpio - 1, | 1435 | gdev->base, gdev->base + gdev->ngpio - 1, |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index c31a8849e9f8..1580ec60b89f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | |||
| @@ -501,8 +501,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle) | |||
| 501 | { | 501 | { |
| 502 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; | 502 | struct amdgpu_device *adev = (struct amdgpu_device *)kgd; |
| 503 | 503 | ||
| 504 | amdgpu_dpm_switch_power_profile(adev, | 504 | if (adev->powerplay.pp_funcs && |
| 505 | PP_SMC_POWER_PROFILE_COMPUTE, !idle); | 505 | adev->powerplay.pp_funcs->switch_power_profile) |
| 506 | amdgpu_dpm_switch_power_profile(adev, | ||
| 507 | PP_SMC_POWER_PROFILE_COMPUTE, | ||
| 508 | !idle); | ||
| 506 | } | 509 | } |
| 507 | 510 | ||
| 508 | bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) | 511 | bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 6748cd7fc129..686a26de50f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | |||
| @@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev) | |||
| 626 | "dither", | 626 | "dither", |
| 627 | amdgpu_dither_enum_list, sz); | 627 | amdgpu_dither_enum_list, sz); |
| 628 | 628 | ||
| 629 | if (amdgpu_device_has_dc_support(adev)) { | ||
| 630 | adev->mode_info.max_bpc_property = | ||
| 631 | drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16); | ||
| 632 | if (!adev->mode_info.max_bpc_property) | ||
| 633 | return -ENOMEM; | ||
| 634 | } | ||
| 635 | |||
| 629 | return 0; | 636 | return 0; |
| 630 | } | 637 | } |
| 631 | 638 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index b9e9e8b02fb7..d1b4d9b6aae0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h | |||
| @@ -339,6 +339,8 @@ struct amdgpu_mode_info { | |||
| 339 | struct drm_property *audio_property; | 339 | struct drm_property *audio_property; |
| 340 | /* FMT dithering */ | 340 | /* FMT dithering */ |
| 341 | struct drm_property *dither_property; | 341 | struct drm_property *dither_property; |
| 342 | /* maximum number of bits per channel for monitor color */ | ||
| 343 | struct drm_property *max_bpc_property; | ||
| 342 | /* hardcoded DFP edid from BIOS */ | 344 | /* hardcoded DFP edid from BIOS */ |
| 343 | struct edid *bios_hardcoded_edid; | 345 | struct edid *bios_hardcoded_edid; |
| 344 | int bios_hardcoded_edid_size; | 346 | int bios_hardcoded_edid_size; |
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index e1c2b4e9c7b2..73ad02aea2b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | |||
| @@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin"); | |||
| 46 | MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); | 46 | MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin"); |
| 47 | MODULE_FIRMWARE("amdgpu/verde_mc.bin"); | 47 | MODULE_FIRMWARE("amdgpu/verde_mc.bin"); |
| 48 | MODULE_FIRMWARE("amdgpu/oland_mc.bin"); | 48 | MODULE_FIRMWARE("amdgpu/oland_mc.bin"); |
| 49 | MODULE_FIRMWARE("amdgpu/hainan_mc.bin"); | ||
| 49 | MODULE_FIRMWARE("amdgpu/si58_mc.bin"); | 50 | MODULE_FIRMWARE("amdgpu/si58_mc.bin"); |
| 50 | 51 | ||
| 51 | #define MC_SEQ_MISC0__MT__MASK 0xf0000000 | 52 | #define MC_SEQ_MISC0__MT__MASK 0xf0000000 |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index bf5e6a413dee..4cc0dcb1a187 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c | |||
| @@ -65,6 +65,13 @@ | |||
| 65 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba | 65 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba |
| 66 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 | 66 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 |
| 67 | 67 | ||
| 68 | /* for Vega20 register name change */ | ||
| 69 | #define mmHDP_MEM_POWER_CTRL 0x00d4 | ||
| 70 | #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L | ||
| 71 | #define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L | ||
| 72 | #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L | ||
| 73 | #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L | ||
| 74 | #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 | ||
| 68 | /* | 75 | /* |
| 69 | * Indirect registers accessor | 76 | * Indirect registers accessor |
| 70 | */ | 77 | */ |
| @@ -870,15 +877,33 @@ static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable | |||
| 870 | { | 877 | { |
| 871 | uint32_t def, data; | 878 | uint32_t def, data; |
| 872 | 879 | ||
| 873 | def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); | 880 | if (adev->asic_type == CHIP_VEGA20) { |
| 881 | def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); | ||
| 874 | 882 | ||
| 875 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) | 883 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) |
| 876 | data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; | 884 | data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | |
| 877 | else | 885 | HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | |
| 878 | data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; | 886 | HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | |
| 887 | HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; | ||
| 888 | else | ||
| 889 | data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | | ||
| 890 | HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | | ||
| 891 | HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | | ||
| 892 | HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); | ||
| 879 | 893 | ||
| 880 | if (def != data) | 894 | if (def != data) |
| 881 | WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); | 895 | WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); |
| 896 | } else { | ||
| 897 | def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); | ||
| 898 | |||
| 899 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) | ||
| 900 | data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; | ||
| 901 | else | ||
| 902 | data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; | ||
| 903 | |||
| 904 | if (def != data) | ||
| 905 | WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); | ||
| 906 | } | ||
| 882 | } | 907 | } |
| 883 | 908 | ||
| 884 | static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) | 909 | static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c1262f62cd9f..ca925200fe09 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
| @@ -2358,8 +2358,15 @@ static void update_stream_scaling_settings(const struct drm_display_mode *mode, | |||
| 2358 | static enum dc_color_depth | 2358 | static enum dc_color_depth |
| 2359 | convert_color_depth_from_display_info(const struct drm_connector *connector) | 2359 | convert_color_depth_from_display_info(const struct drm_connector *connector) |
| 2360 | { | 2360 | { |
| 2361 | struct dm_connector_state *dm_conn_state = | ||
| 2362 | to_dm_connector_state(connector->state); | ||
| 2361 | uint32_t bpc = connector->display_info.bpc; | 2363 | uint32_t bpc = connector->display_info.bpc; |
| 2362 | 2364 | ||
| 2365 | /* TODO: Remove this when there's support for max_bpc in drm */ | ||
| 2366 | if (dm_conn_state && bpc > dm_conn_state->max_bpc) | ||
| 2367 | /* Round down to nearest even number. */ | ||
| 2368 | bpc = dm_conn_state->max_bpc - (dm_conn_state->max_bpc & 1); | ||
| 2369 | |||
| 2363 | switch (bpc) { | 2370 | switch (bpc) { |
| 2364 | case 0: | 2371 | case 0: |
| 2365 | /* | 2372 | /* |
| @@ -2943,6 +2950,9 @@ int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, | |||
| 2943 | } else if (property == adev->mode_info.underscan_property) { | 2950 | } else if (property == adev->mode_info.underscan_property) { |
| 2944 | dm_new_state->underscan_enable = val; | 2951 | dm_new_state->underscan_enable = val; |
| 2945 | ret = 0; | 2952 | ret = 0; |
| 2953 | } else if (property == adev->mode_info.max_bpc_property) { | ||
| 2954 | dm_new_state->max_bpc = val; | ||
| 2955 | ret = 0; | ||
| 2946 | } | 2956 | } |
| 2947 | 2957 | ||
| 2948 | return ret; | 2958 | return ret; |
| @@ -2985,6 +2995,9 @@ int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, | |||
| 2985 | } else if (property == adev->mode_info.underscan_property) { | 2995 | } else if (property == adev->mode_info.underscan_property) { |
| 2986 | *val = dm_state->underscan_enable; | 2996 | *val = dm_state->underscan_enable; |
| 2987 | ret = 0; | 2997 | ret = 0; |
| 2998 | } else if (property == adev->mode_info.max_bpc_property) { | ||
| 2999 | *val = dm_state->max_bpc; | ||
| 3000 | ret = 0; | ||
| 2988 | } | 3001 | } |
| 2989 | return ret; | 3002 | return ret; |
| 2990 | } | 3003 | } |
| @@ -3795,6 +3808,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, | |||
| 3795 | drm_object_attach_property(&aconnector->base.base, | 3808 | drm_object_attach_property(&aconnector->base.base, |
| 3796 | adev->mode_info.underscan_vborder_property, | 3809 | adev->mode_info.underscan_vborder_property, |
| 3797 | 0); | 3810 | 0); |
| 3811 | drm_object_attach_property(&aconnector->base.base, | ||
| 3812 | adev->mode_info.max_bpc_property, | ||
| 3813 | 0); | ||
| 3798 | 3814 | ||
| 3799 | } | 3815 | } |
| 3800 | 3816 | ||
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 924a38a1fc44..6e069d777ab2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | |||
| @@ -204,6 +204,7 @@ struct dm_connector_state { | |||
| 204 | enum amdgpu_rmx_type scaling; | 204 | enum amdgpu_rmx_type scaling; |
| 205 | uint8_t underscan_vborder; | 205 | uint8_t underscan_vborder; |
| 206 | uint8_t underscan_hborder; | 206 | uint8_t underscan_hborder; |
| 207 | uint8_t max_bpc; | ||
| 207 | bool underscan_enable; | 208 | bool underscan_enable; |
| 208 | bool freesync_enable; | 209 | bool freesync_enable; |
| 209 | bool freesync_capable; | 210 | bool freesync_capable; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index ed35ec0341e6..88f6b35ea6fe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
| @@ -4525,12 +4525,12 @@ static int smu7_get_sclk_od(struct pp_hwmgr *hwmgr) | |||
| 4525 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); | 4525 | struct smu7_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); |
| 4526 | struct smu7_single_dpm_table *golden_sclk_table = | 4526 | struct smu7_single_dpm_table *golden_sclk_table = |
| 4527 | &(data->golden_dpm_table.sclk_table); | 4527 | &(data->golden_dpm_table.sclk_table); |
| 4528 | int value; | 4528 | int value = sclk_table->dpm_levels[sclk_table->count - 1].value; |
| 4529 | int golden_value = golden_sclk_table->dpm_levels | ||
| 4530 | [golden_sclk_table->count - 1].value; | ||
| 4529 | 4531 | ||
| 4530 | value = (sclk_table->dpm_levels[sclk_table->count - 1].value - | 4532 | value -= golden_value; |
| 4531 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * | 4533 | value = DIV_ROUND_UP(value * 100, golden_value); |
| 4532 | 100 / | ||
| 4533 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; | ||
| 4534 | 4534 | ||
| 4535 | return value; | 4535 | return value; |
| 4536 | } | 4536 | } |
| @@ -4567,12 +4567,12 @@ static int smu7_get_mclk_od(struct pp_hwmgr *hwmgr) | |||
| 4567 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); | 4567 | struct smu7_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); |
| 4568 | struct smu7_single_dpm_table *golden_mclk_table = | 4568 | struct smu7_single_dpm_table *golden_mclk_table = |
| 4569 | &(data->golden_dpm_table.mclk_table); | 4569 | &(data->golden_dpm_table.mclk_table); |
| 4570 | int value; | 4570 | int value = mclk_table->dpm_levels[mclk_table->count - 1].value; |
| 4571 | int golden_value = golden_mclk_table->dpm_levels | ||
| 4572 | [golden_mclk_table->count - 1].value; | ||
| 4571 | 4573 | ||
| 4572 | value = (mclk_table->dpm_levels[mclk_table->count - 1].value - | 4574 | value -= golden_value; |
| 4573 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * | 4575 | value = DIV_ROUND_UP(value * 100, golden_value); |
| 4574 | 100 / | ||
| 4575 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; | ||
| 4576 | 4576 | ||
| 4577 | return value; | 4577 | return value; |
| 4578 | } | 4578 | } |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index 8c4db86bb4b7..e2bc6e0c229f 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | |||
| @@ -4522,15 +4522,13 @@ static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr) | |||
| 4522 | struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); | 4522 | struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); |
| 4523 | struct vega10_single_dpm_table *golden_sclk_table = | 4523 | struct vega10_single_dpm_table *golden_sclk_table = |
| 4524 | &(data->golden_dpm_table.gfx_table); | 4524 | &(data->golden_dpm_table.gfx_table); |
| 4525 | int value; | 4525 | int value = sclk_table->dpm_levels[sclk_table->count - 1].value; |
| 4526 | 4526 | int golden_value = golden_sclk_table->dpm_levels | |
| 4527 | value = (sclk_table->dpm_levels[sclk_table->count - 1].value - | ||
| 4528 | golden_sclk_table->dpm_levels | ||
| 4529 | [golden_sclk_table->count - 1].value) * | ||
| 4530 | 100 / | ||
| 4531 | golden_sclk_table->dpm_levels | ||
| 4532 | [golden_sclk_table->count - 1].value; | 4527 | [golden_sclk_table->count - 1].value; |
| 4533 | 4528 | ||
| 4529 | value -= golden_value; | ||
| 4530 | value = DIV_ROUND_UP(value * 100, golden_value); | ||
| 4531 | |||
| 4534 | return value; | 4532 | return value; |
| 4535 | } | 4533 | } |
| 4536 | 4534 | ||
| @@ -4575,16 +4573,13 @@ static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr) | |||
| 4575 | struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); | 4573 | struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); |
| 4576 | struct vega10_single_dpm_table *golden_mclk_table = | 4574 | struct vega10_single_dpm_table *golden_mclk_table = |
| 4577 | &(data->golden_dpm_table.mem_table); | 4575 | &(data->golden_dpm_table.mem_table); |
| 4578 | int value; | 4576 | int value = mclk_table->dpm_levels[mclk_table->count - 1].value; |
| 4579 | 4577 | int golden_value = golden_mclk_table->dpm_levels | |
| 4580 | value = (mclk_table->dpm_levels | ||
| 4581 | [mclk_table->count - 1].value - | ||
| 4582 | golden_mclk_table->dpm_levels | ||
| 4583 | [golden_mclk_table->count - 1].value) * | ||
| 4584 | 100 / | ||
| 4585 | golden_mclk_table->dpm_levels | ||
| 4586 | [golden_mclk_table->count - 1].value; | 4578 | [golden_mclk_table->count - 1].value; |
| 4587 | 4579 | ||
| 4580 | value -= golden_value; | ||
| 4581 | value = DIV_ROUND_UP(value * 100, golden_value); | ||
| 4582 | |||
| 4588 | return value; | 4583 | return value; |
| 4589 | } | 4584 | } |
| 4590 | 4585 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c index 74bc37308dc0..54364444ecd1 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c | |||
| @@ -2243,12 +2243,12 @@ static int vega12_get_sclk_od(struct pp_hwmgr *hwmgr) | |||
| 2243 | struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); | 2243 | struct vega12_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table); |
| 2244 | struct vega12_single_dpm_table *golden_sclk_table = | 2244 | struct vega12_single_dpm_table *golden_sclk_table = |
| 2245 | &(data->golden_dpm_table.gfx_table); | 2245 | &(data->golden_dpm_table.gfx_table); |
| 2246 | int value; | 2246 | int value = sclk_table->dpm_levels[sclk_table->count - 1].value; |
| 2247 | int golden_value = golden_sclk_table->dpm_levels | ||
| 2248 | [golden_sclk_table->count - 1].value; | ||
| 2247 | 2249 | ||
| 2248 | value = (sclk_table->dpm_levels[sclk_table->count - 1].value - | 2250 | value -= golden_value; |
| 2249 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * | 2251 | value = DIV_ROUND_UP(value * 100, golden_value); |
| 2250 | 100 / | ||
| 2251 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; | ||
| 2252 | 2252 | ||
| 2253 | return value; | 2253 | return value; |
| 2254 | } | 2254 | } |
| @@ -2264,16 +2264,13 @@ static int vega12_get_mclk_od(struct pp_hwmgr *hwmgr) | |||
| 2264 | struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); | 2264 | struct vega12_single_dpm_table *mclk_table = &(data->dpm_table.mem_table); |
| 2265 | struct vega12_single_dpm_table *golden_mclk_table = | 2265 | struct vega12_single_dpm_table *golden_mclk_table = |
| 2266 | &(data->golden_dpm_table.mem_table); | 2266 | &(data->golden_dpm_table.mem_table); |
| 2267 | int value; | 2267 | int value = mclk_table->dpm_levels[mclk_table->count - 1].value; |
| 2268 | 2268 | int golden_value = golden_mclk_table->dpm_levels | |
| 2269 | value = (mclk_table->dpm_levels | ||
| 2270 | [mclk_table->count - 1].value - | ||
| 2271 | golden_mclk_table->dpm_levels | ||
| 2272 | [golden_mclk_table->count - 1].value) * | ||
| 2273 | 100 / | ||
| 2274 | golden_mclk_table->dpm_levels | ||
| 2275 | [golden_mclk_table->count - 1].value; | 2269 | [golden_mclk_table->count - 1].value; |
| 2276 | 2270 | ||
| 2271 | value -= golden_value; | ||
| 2272 | value = DIV_ROUND_UP(value * 100, golden_value); | ||
| 2273 | |||
| 2277 | return value; | 2274 | return value; |
| 2278 | } | 2275 | } |
| 2279 | 2276 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index 99861f32b1f9..b4eadd47f3a4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c | |||
| @@ -75,7 +75,17 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr) | |||
| 75 | data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; | 75 | data->phy_clk_quad_eqn_b = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; |
| 76 | data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; | 76 | data->phy_clk_quad_eqn_c = PPREGKEY_VEGA20QUADRATICEQUATION_DFLT; |
| 77 | 77 | ||
| 78 | data->registry_data.disallowed_features = 0x0; | 78 | /* |
| 79 | * Disable the following features for now: | ||
| 80 | * GFXCLK DS | ||
| 81 | * SOCLK DS | ||
| 82 | * LCLK DS | ||
| 83 | * DCEFCLK DS | ||
| 84 | * FCLK DS | ||
| 85 | * MP1CLK DS | ||
| 86 | * MP0CLK DS | ||
| 87 | */ | ||
| 88 | data->registry_data.disallowed_features = 0xE0041C00; | ||
| 79 | data->registry_data.od_state_in_dc_support = 0; | 89 | data->registry_data.od_state_in_dc_support = 0; |
| 80 | data->registry_data.thermal_support = 1; | 90 | data->registry_data.thermal_support = 1; |
| 81 | data->registry_data.skip_baco_hardware = 0; | 91 | data->registry_data.skip_baco_hardware = 0; |
| @@ -1313,12 +1323,13 @@ static int vega20_get_sclk_od( | |||
| 1313 | &(data->dpm_table.gfx_table); | 1323 | &(data->dpm_table.gfx_table); |
| 1314 | struct vega20_single_dpm_table *golden_sclk_table = | 1324 | struct vega20_single_dpm_table *golden_sclk_table = |
| 1315 | &(data->golden_dpm_table.gfx_table); | 1325 | &(data->golden_dpm_table.gfx_table); |
| 1316 | int value; | 1326 | int value = sclk_table->dpm_levels[sclk_table->count - 1].value; |
| 1327 | int golden_value = golden_sclk_table->dpm_levels | ||
| 1328 | [golden_sclk_table->count - 1].value; | ||
| 1317 | 1329 | ||
| 1318 | /* od percentage */ | 1330 | /* od percentage */ |
| 1319 | value = DIV_ROUND_UP((sclk_table->dpm_levels[sclk_table->count - 1].value - | 1331 | value -= golden_value; |
| 1320 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * 100, | 1332 | value = DIV_ROUND_UP(value * 100, golden_value); |
| 1321 | golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value); | ||
| 1322 | 1333 | ||
| 1323 | return value; | 1334 | return value; |
| 1324 | } | 1335 | } |
| @@ -1358,12 +1369,13 @@ static int vega20_get_mclk_od( | |||
| 1358 | &(data->dpm_table.mem_table); | 1369 | &(data->dpm_table.mem_table); |
| 1359 | struct vega20_single_dpm_table *golden_mclk_table = | 1370 | struct vega20_single_dpm_table *golden_mclk_table = |
| 1360 | &(data->golden_dpm_table.mem_table); | 1371 | &(data->golden_dpm_table.mem_table); |
| 1361 | int value; | 1372 | int value = mclk_table->dpm_levels[mclk_table->count - 1].value; |
| 1373 | int golden_value = golden_mclk_table->dpm_levels | ||
| 1374 | [golden_mclk_table->count - 1].value; | ||
| 1362 | 1375 | ||
| 1363 | /* od percentage */ | 1376 | /* od percentage */ |
| 1364 | value = DIV_ROUND_UP((mclk_table->dpm_levels[mclk_table->count - 1].value - | 1377 | value -= golden_value; |
| 1365 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * 100, | 1378 | value = DIV_ROUND_UP(value * 100, golden_value); |
| 1366 | golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value); | ||
| 1367 | 1379 | ||
| 1368 | return value; | 1380 | return value; |
| 1369 | } | 1381 | } |
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 69dab82a3771..bf589c53b908 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c | |||
| @@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = { | |||
| 60 | 60 | ||
| 61 | MODULE_DEVICE_TABLE(pci, pciidlist); | 61 | MODULE_DEVICE_TABLE(pci, pciidlist); |
| 62 | 62 | ||
| 63 | static void ast_kick_out_firmware_fb(struct pci_dev *pdev) | ||
| 64 | { | ||
| 65 | struct apertures_struct *ap; | ||
| 66 | bool primary = false; | ||
| 67 | |||
| 68 | ap = alloc_apertures(1); | ||
| 69 | if (!ap) | ||
| 70 | return; | ||
| 71 | |||
| 72 | ap->ranges[0].base = pci_resource_start(pdev, 0); | ||
| 73 | ap->ranges[0].size = pci_resource_len(pdev, 0); | ||
| 74 | |||
| 75 | #ifdef CONFIG_X86 | ||
| 76 | primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; | ||
| 77 | #endif | ||
| 78 | drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary); | ||
| 79 | kfree(ap); | ||
| 80 | } | ||
| 81 | |||
| 63 | static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 82 | static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 64 | { | 83 | { |
| 84 | ast_kick_out_firmware_fb(pdev); | ||
| 85 | |||
| 65 | return drm_get_pci_dev(pdev, ent, &driver); | 86 | return drm_get_pci_dev(pdev, ent, &driver); |
| 66 | } | 87 | } |
| 67 | 88 | ||
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 5e77d456d9bb..7c6ac3cadb6b 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c | |||
| @@ -568,6 +568,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc, | |||
| 568 | } | 568 | } |
| 569 | ast_bo_unreserve(bo); | 569 | ast_bo_unreserve(bo); |
| 570 | 570 | ||
| 571 | ast_set_offset_reg(crtc); | ||
| 571 | ast_set_start_address_crt1(crtc, (u32)gpu_addr); | 572 | ast_set_start_address_crt1(crtc, (u32)gpu_addr); |
| 572 | 573 | ||
| 573 | return 0; | 574 | return 0; |
| @@ -1254,7 +1255,7 @@ static int ast_cursor_move(struct drm_crtc *crtc, | |||
| 1254 | ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07)); | 1255 | ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07)); |
| 1255 | 1256 | ||
| 1256 | /* dummy write to fire HWC */ | 1257 | /* dummy write to fire HWC */ |
| 1257 | ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00); | 1258 | ast_show_cursor(crtc); |
| 1258 | 1259 | ||
| 1259 | return 0; | 1260 | return 0; |
| 1260 | } | 1261 | } |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index a502f3e519fd..dd852a25d375 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -219,6 +219,9 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) | |||
| 219 | mutex_lock(&fb_helper->lock); | 219 | mutex_lock(&fb_helper->lock); |
| 220 | drm_connector_list_iter_begin(dev, &conn_iter); | 220 | drm_connector_list_iter_begin(dev, &conn_iter); |
| 221 | drm_for_each_connector_iter(connector, &conn_iter) { | 221 | drm_for_each_connector_iter(connector, &conn_iter) { |
| 222 | if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) | ||
| 223 | continue; | ||
| 224 | |||
| 222 | ret = __drm_fb_helper_add_one_connector(fb_helper, connector); | 225 | ret = __drm_fb_helper_add_one_connector(fb_helper, connector); |
| 223 | if (ret) | 226 | if (ret) |
| 224 | goto fail; | 227 | goto fail; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 1aaccbe7e1de..d4fac09095f8 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -1268,7 +1268,7 @@ relocate_entry(struct i915_vma *vma, | |||
| 1268 | else if (gen >= 4) | 1268 | else if (gen >= 4) |
| 1269 | len = 4; | 1269 | len = 4; |
| 1270 | else | 1270 | else |
| 1271 | len = 3; | 1271 | len = 6; |
| 1272 | 1272 | ||
| 1273 | batch = reloc_gpu(eb, vma, len); | 1273 | batch = reloc_gpu(eb, vma, len); |
| 1274 | if (IS_ERR(batch)) | 1274 | if (IS_ERR(batch)) |
| @@ -1309,6 +1309,11 @@ relocate_entry(struct i915_vma *vma, | |||
| 1309 | *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; | 1309 | *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; |
| 1310 | *batch++ = addr; | 1310 | *batch++ = addr; |
| 1311 | *batch++ = target_offset; | 1311 | *batch++ = target_offset; |
| 1312 | |||
| 1313 | /* And again for good measure (blb/pnv) */ | ||
| 1314 | *batch++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL; | ||
| 1315 | *batch++ = addr; | ||
| 1316 | *batch++ = target_offset; | ||
| 1312 | } | 1317 | } |
| 1313 | 1318 | ||
| 1314 | goto out; | 1319 | goto out; |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 47c302543799..07999fe09ad2 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
| @@ -3413,6 +3413,11 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) | |||
| 3413 | ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; | 3413 | ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL; |
| 3414 | if (ggtt->vm.clear_range != nop_clear_range) | 3414 | if (ggtt->vm.clear_range != nop_clear_range) |
| 3415 | ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; | 3415 | ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL; |
| 3416 | |||
| 3417 | /* Prevent recursively calling stop_machine() and deadlocks. */ | ||
| 3418 | dev_info(dev_priv->drm.dev, | ||
| 3419 | "Disabling error capture for VT-d workaround\n"); | ||
| 3420 | i915_disable_error_state(dev_priv, -ENODEV); | ||
| 3416 | } | 3421 | } |
| 3417 | 3422 | ||
| 3418 | ggtt->invalidate = gen6_ggtt_invalidate; | 3423 | ggtt->invalidate = gen6_ggtt_invalidate; |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 8762d17b6659..3eb33e000d6f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
| @@ -648,6 +648,9 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, | |||
| 648 | return 0; | 648 | return 0; |
| 649 | } | 649 | } |
| 650 | 650 | ||
| 651 | if (IS_ERR(error)) | ||
| 652 | return PTR_ERR(error); | ||
| 653 | |||
| 651 | if (*error->error_msg) | 654 | if (*error->error_msg) |
| 652 | err_printf(m, "%s\n", error->error_msg); | 655 | err_printf(m, "%s\n", error->error_msg); |
| 653 | err_printf(m, "Kernel: " UTS_RELEASE "\n"); | 656 | err_printf(m, "Kernel: " UTS_RELEASE "\n"); |
| @@ -1859,6 +1862,7 @@ void i915_capture_error_state(struct drm_i915_private *i915, | |||
| 1859 | error = i915_capture_gpu_state(i915); | 1862 | error = i915_capture_gpu_state(i915); |
| 1860 | if (!error) { | 1863 | if (!error) { |
| 1861 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); | 1864 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
| 1865 | i915_disable_error_state(i915, -ENOMEM); | ||
| 1862 | return; | 1866 | return; |
| 1863 | } | 1867 | } |
| 1864 | 1868 | ||
| @@ -1914,5 +1918,14 @@ void i915_reset_error_state(struct drm_i915_private *i915) | |||
| 1914 | i915->gpu_error.first_error = NULL; | 1918 | i915->gpu_error.first_error = NULL; |
| 1915 | spin_unlock_irq(&i915->gpu_error.lock); | 1919 | spin_unlock_irq(&i915->gpu_error.lock); |
| 1916 | 1920 | ||
| 1917 | i915_gpu_state_put(error); | 1921 | if (!IS_ERR(error)) |
| 1922 | i915_gpu_state_put(error); | ||
| 1923 | } | ||
| 1924 | |||
| 1925 | void i915_disable_error_state(struct drm_i915_private *i915, int err) | ||
| 1926 | { | ||
| 1927 | spin_lock_irq(&i915->gpu_error.lock); | ||
| 1928 | if (!i915->gpu_error.first_error) | ||
| 1929 | i915->gpu_error.first_error = ERR_PTR(err); | ||
| 1930 | spin_unlock_irq(&i915->gpu_error.lock); | ||
| 1918 | } | 1931 | } |
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index 8710fb18ed74..3ec89a504de5 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h | |||
| @@ -343,6 +343,7 @@ static inline void i915_gpu_state_put(struct i915_gpu_state *gpu) | |||
| 343 | 343 | ||
| 344 | struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); | 344 | struct i915_gpu_state *i915_first_error_state(struct drm_i915_private *i915); |
| 345 | void i915_reset_error_state(struct drm_i915_private *i915); | 345 | void i915_reset_error_state(struct drm_i915_private *i915); |
| 346 | void i915_disable_error_state(struct drm_i915_private *i915, int err); | ||
| 346 | 347 | ||
| 347 | #else | 348 | #else |
| 348 | 349 | ||
| @@ -355,13 +356,18 @@ static inline void i915_capture_error_state(struct drm_i915_private *dev_priv, | |||
| 355 | static inline struct i915_gpu_state * | 356 | static inline struct i915_gpu_state * |
| 356 | i915_first_error_state(struct drm_i915_private *i915) | 357 | i915_first_error_state(struct drm_i915_private *i915) |
| 357 | { | 358 | { |
| 358 | return NULL; | 359 | return ERR_PTR(-ENODEV); |
| 359 | } | 360 | } |
| 360 | 361 | ||
| 361 | static inline void i915_reset_error_state(struct drm_i915_private *i915) | 362 | static inline void i915_reset_error_state(struct drm_i915_private *i915) |
| 362 | { | 363 | { |
| 363 | } | 364 | } |
| 364 | 365 | ||
| 366 | static inline void i915_disable_error_state(struct drm_i915_private *i915, | ||
| 367 | int err) | ||
| 368 | { | ||
| 369 | } | ||
| 370 | |||
| 365 | #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ | 371 | #endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */ |
| 366 | 372 | ||
| 367 | #endif /* _I915_GPU_ERROR_H_ */ | 373 | #endif /* _I915_GPU_ERROR_H_ */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index a54843fdeb2f..c9878dd1f7cd 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -2890,6 +2890,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, | |||
| 2890 | return; | 2890 | return; |
| 2891 | 2891 | ||
| 2892 | valid_fb: | 2892 | valid_fb: |
| 2893 | intel_state->base.rotation = plane_config->rotation; | ||
| 2893 | intel_fill_fb_ggtt_view(&intel_state->view, fb, | 2894 | intel_fill_fb_ggtt_view(&intel_state->view, fb, |
| 2894 | intel_state->base.rotation); | 2895 | intel_state->base.rotation); |
| 2895 | intel_state->color_plane[0].stride = | 2896 | intel_state->color_plane[0].stride = |
| @@ -7882,8 +7883,15 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, | |||
| 7882 | plane_config->tiling = I915_TILING_X; | 7883 | plane_config->tiling = I915_TILING_X; |
| 7883 | fb->modifier = I915_FORMAT_MOD_X_TILED; | 7884 | fb->modifier = I915_FORMAT_MOD_X_TILED; |
| 7884 | } | 7885 | } |
| 7886 | |||
| 7887 | if (val & DISPPLANE_ROTATE_180) | ||
| 7888 | plane_config->rotation = DRM_MODE_ROTATE_180; | ||
| 7885 | } | 7889 | } |
| 7886 | 7890 | ||
| 7891 | if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && | ||
| 7892 | val & DISPPLANE_MIRROR) | ||
| 7893 | plane_config->rotation |= DRM_MODE_REFLECT_X; | ||
| 7894 | |||
| 7887 | pixel_format = val & DISPPLANE_PIXFORMAT_MASK; | 7895 | pixel_format = val & DISPPLANE_PIXFORMAT_MASK; |
| 7888 | fourcc = i9xx_format_to_fourcc(pixel_format); | 7896 | fourcc = i9xx_format_to_fourcc(pixel_format); |
| 7889 | fb->format = drm_format_info(fourcc); | 7897 | fb->format = drm_format_info(fourcc); |
| @@ -8952,6 +8960,29 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, | |||
| 8952 | goto error; | 8960 | goto error; |
| 8953 | } | 8961 | } |
| 8954 | 8962 | ||
| 8963 | /* | ||
| 8964 | * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr | ||
| 8965 | * while i915 HW rotation is clockwise, thats why this swapping. | ||
| 8966 | */ | ||
| 8967 | switch (val & PLANE_CTL_ROTATE_MASK) { | ||
| 8968 | case PLANE_CTL_ROTATE_0: | ||
| 8969 | plane_config->rotation = DRM_MODE_ROTATE_0; | ||
| 8970 | break; | ||
| 8971 | case PLANE_CTL_ROTATE_90: | ||
| 8972 | plane_config->rotation = DRM_MODE_ROTATE_270; | ||
| 8973 | break; | ||
| 8974 | case PLANE_CTL_ROTATE_180: | ||
| 8975 | plane_config->rotation = DRM_MODE_ROTATE_180; | ||
| 8976 | break; | ||
| 8977 | case PLANE_CTL_ROTATE_270: | ||
| 8978 | plane_config->rotation = DRM_MODE_ROTATE_90; | ||
| 8979 | break; | ||
| 8980 | } | ||
| 8981 | |||
| 8982 | if (INTEL_GEN(dev_priv) >= 10 && | ||
| 8983 | val & PLANE_CTL_FLIP_HORIZONTAL) | ||
| 8984 | plane_config->rotation |= DRM_MODE_REFLECT_X; | ||
| 8985 | |||
| 8955 | base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; | 8986 | base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000; |
| 8956 | plane_config->base = base; | 8987 | plane_config->base = base; |
| 8957 | 8988 | ||
| @@ -15267,6 +15298,14 @@ retry: | |||
| 15267 | ret = drm_atomic_add_affected_planes(state, crtc); | 15298 | ret = drm_atomic_add_affected_planes(state, crtc); |
| 15268 | if (ret) | 15299 | if (ret) |
| 15269 | goto out; | 15300 | goto out; |
| 15301 | |||
| 15302 | /* | ||
| 15303 | * FIXME hack to force a LUT update to avoid the | ||
| 15304 | * plane update forcing the pipe gamma on without | ||
| 15305 | * having a proper LUT loaded. Remove once we | ||
| 15306 | * have readout for pipe gamma enable. | ||
| 15307 | */ | ||
| 15308 | crtc_state->color_mgmt_changed = true; | ||
| 15270 | } | 15309 | } |
| 15271 | } | 15310 | } |
| 15272 | 15311 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 8b298e5f012d..db6fa1d0cbda 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -547,6 +547,7 @@ struct intel_initial_plane_config { | |||
| 547 | unsigned int tiling; | 547 | unsigned int tiling; |
| 548 | int size; | 548 | int size; |
| 549 | u32 base; | 549 | u32 base; |
| 550 | u8 rotation; | ||
| 550 | }; | 551 | }; |
| 551 | 552 | ||
| 552 | #define SKL_MIN_SRC_W 8 | 553 | #define SKL_MIN_SRC_W 8 |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 245f0022bcfd..3fe358db1276 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -2493,6 +2493,9 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, | |||
| 2493 | uint32_t method1, method2; | 2493 | uint32_t method1, method2; |
| 2494 | int cpp; | 2494 | int cpp; |
| 2495 | 2495 | ||
| 2496 | if (mem_value == 0) | ||
| 2497 | return U32_MAX; | ||
| 2498 | |||
| 2496 | if (!intel_wm_plane_visible(cstate, pstate)) | 2499 | if (!intel_wm_plane_visible(cstate, pstate)) |
| 2497 | return 0; | 2500 | return 0; |
| 2498 | 2501 | ||
| @@ -2522,6 +2525,9 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, | |||
| 2522 | uint32_t method1, method2; | 2525 | uint32_t method1, method2; |
| 2523 | int cpp; | 2526 | int cpp; |
| 2524 | 2527 | ||
| 2528 | if (mem_value == 0) | ||
| 2529 | return U32_MAX; | ||
| 2530 | |||
| 2525 | if (!intel_wm_plane_visible(cstate, pstate)) | 2531 | if (!intel_wm_plane_visible(cstate, pstate)) |
| 2526 | return 0; | 2532 | return 0; |
| 2527 | 2533 | ||
| @@ -2545,6 +2551,9 @@ static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, | |||
| 2545 | { | 2551 | { |
| 2546 | int cpp; | 2552 | int cpp; |
| 2547 | 2553 | ||
| 2554 | if (mem_value == 0) | ||
| 2555 | return U32_MAX; | ||
| 2556 | |||
| 2548 | if (!intel_wm_plane_visible(cstate, pstate)) | 2557 | if (!intel_wm_plane_visible(cstate, pstate)) |
| 2549 | return 0; | 2558 | return 0; |
| 2550 | 2559 | ||
| @@ -3008,6 +3017,34 @@ static void snb_wm_latency_quirk(struct drm_i915_private *dev_priv) | |||
| 3008 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | 3017 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); |
| 3009 | } | 3018 | } |
| 3010 | 3019 | ||
| 3020 | static void snb_wm_lp3_irq_quirk(struct drm_i915_private *dev_priv) | ||
| 3021 | { | ||
| 3022 | /* | ||
| 3023 | * On some SNB machines (Thinkpad X220 Tablet at least) | ||
| 3024 | * LP3 usage can cause vblank interrupts to be lost. | ||
| 3025 | * The DEIIR bit will go high but it looks like the CPU | ||
| 3026 | * never gets interrupted. | ||
| 3027 | * | ||
| 3028 | * It's not clear whether other interrupt source could | ||
| 3029 | * be affected or if this is somehow limited to vblank | ||
| 3030 | * interrupts only. To play it safe we disable LP3 | ||
| 3031 | * watermarks entirely. | ||
| 3032 | */ | ||
| 3033 | if (dev_priv->wm.pri_latency[3] == 0 && | ||
| 3034 | dev_priv->wm.spr_latency[3] == 0 && | ||
| 3035 | dev_priv->wm.cur_latency[3] == 0) | ||
| 3036 | return; | ||
| 3037 | |||
| 3038 | dev_priv->wm.pri_latency[3] = 0; | ||
| 3039 | dev_priv->wm.spr_latency[3] = 0; | ||
| 3040 | dev_priv->wm.cur_latency[3] = 0; | ||
| 3041 | |||
| 3042 | DRM_DEBUG_KMS("LP3 watermarks disabled due to potential for lost interrupts\n"); | ||
| 3043 | intel_print_wm_latency(dev_priv, "Primary", dev_priv->wm.pri_latency); | ||
| 3044 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); | ||
| 3045 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | ||
| 3046 | } | ||
| 3047 | |||
| 3011 | static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) | 3048 | static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) |
| 3012 | { | 3049 | { |
| 3013 | intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); | 3050 | intel_read_wm_latency(dev_priv, dev_priv->wm.pri_latency); |
| @@ -3024,8 +3061,10 @@ static void ilk_setup_wm_latency(struct drm_i915_private *dev_priv) | |||
| 3024 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); | 3061 | intel_print_wm_latency(dev_priv, "Sprite", dev_priv->wm.spr_latency); |
| 3025 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); | 3062 | intel_print_wm_latency(dev_priv, "Cursor", dev_priv->wm.cur_latency); |
| 3026 | 3063 | ||
| 3027 | if (IS_GEN6(dev_priv)) | 3064 | if (IS_GEN6(dev_priv)) { |
| 3028 | snb_wm_latency_quirk(dev_priv); | 3065 | snb_wm_latency_quirk(dev_priv); |
| 3066 | snb_wm_lp3_irq_quirk(dev_priv); | ||
| 3067 | } | ||
| 3029 | } | 3068 | } |
| 3030 | 3069 | ||
| 3031 | static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) | 3070 | static void skl_setup_wm_latency(struct drm_i915_private *dev_priv) |
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 127468785f74..1f94b9affe4b 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c | |||
| @@ -214,6 +214,12 @@ static int vc4_atomic_commit(struct drm_device *dev, | |||
| 214 | return 0; | 214 | return 0; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | /* We know for sure we don't want an async update here. Set | ||
| 218 | * state->legacy_cursor_update to false to prevent | ||
| 219 | * drm_atomic_helper_setup_commit() from auto-completing | ||
| 220 | * commit->flip_done. | ||
| 221 | */ | ||
| 222 | state->legacy_cursor_update = false; | ||
| 217 | ret = drm_atomic_helper_setup_commit(state, nonblock); | 223 | ret = drm_atomic_helper_setup_commit(state, nonblock); |
| 218 | if (ret) | 224 | if (ret) |
| 219 | return ret; | 225 | return ret; |
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 9dc3fcbd290b..c6635f23918a 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c | |||
| @@ -807,7 +807,7 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb) | |||
| 807 | static void vc4_plane_atomic_async_update(struct drm_plane *plane, | 807 | static void vc4_plane_atomic_async_update(struct drm_plane *plane, |
| 808 | struct drm_plane_state *state) | 808 | struct drm_plane_state *state) |
| 809 | { | 809 | { |
| 810 | struct vc4_plane_state *vc4_state = to_vc4_plane_state(plane->state); | 810 | struct vc4_plane_state *vc4_state, *new_vc4_state; |
| 811 | 811 | ||
| 812 | if (plane->state->fb != state->fb) { | 812 | if (plane->state->fb != state->fb) { |
| 813 | vc4_plane_async_set_fb(plane, state->fb); | 813 | vc4_plane_async_set_fb(plane, state->fb); |
| @@ -828,7 +828,18 @@ static void vc4_plane_atomic_async_update(struct drm_plane *plane, | |||
| 828 | plane->state->src_y = state->src_y; | 828 | plane->state->src_y = state->src_y; |
| 829 | 829 | ||
| 830 | /* Update the display list based on the new crtc_x/y. */ | 830 | /* Update the display list based on the new crtc_x/y. */ |
| 831 | vc4_plane_atomic_check(plane, plane->state); | 831 | vc4_plane_atomic_check(plane, state); |
| 832 | |||
| 833 | new_vc4_state = to_vc4_plane_state(state); | ||
| 834 | vc4_state = to_vc4_plane_state(plane->state); | ||
| 835 | |||
| 836 | /* Update the current vc4_state pos0, pos2 and ptr0 dlist entries. */ | ||
| 837 | vc4_state->dlist[vc4_state->pos0_offset] = | ||
| 838 | new_vc4_state->dlist[vc4_state->pos0_offset]; | ||
| 839 | vc4_state->dlist[vc4_state->pos2_offset] = | ||
| 840 | new_vc4_state->dlist[vc4_state->pos2_offset]; | ||
| 841 | vc4_state->dlist[vc4_state->ptr0_offset] = | ||
| 842 | new_vc4_state->dlist[vc4_state->ptr0_offset]; | ||
| 832 | 843 | ||
| 833 | /* Note that we can't just call vc4_plane_write_dlist() | 844 | /* Note that we can't just call vc4_plane_write_dlist() |
| 834 | * because that would smash the context data that the HVS is | 845 | * because that would smash the context data that the HVS is |
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index a7513a8a8e37..d6106e1a0d4a 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c | |||
| @@ -353,6 +353,9 @@ static void process_ib_ipinfo(void *in_msg, void *out_msg, int op) | |||
| 353 | 353 | ||
| 354 | out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled; | 354 | out->body.kvp_ip_val.dhcp_enabled = in->kvp_ip_val.dhcp_enabled; |
| 355 | 355 | ||
| 356 | /* fallthrough */ | ||
| 357 | |||
| 358 | case KVP_OP_GET_IP_INFO: | ||
| 356 | utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id, | 359 | utf16s_to_utf8s((wchar_t *)in->kvp_ip_val.adapter_id, |
| 357 | MAX_ADAPTER_ID_SIZE, | 360 | MAX_ADAPTER_ID_SIZE, |
| 358 | UTF16_LITTLE_ENDIAN, | 361 | UTF16_LITTLE_ENDIAN, |
| @@ -405,7 +408,11 @@ kvp_send_key(struct work_struct *dummy) | |||
| 405 | process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO); | 408 | process_ib_ipinfo(in_msg, message, KVP_OP_SET_IP_INFO); |
| 406 | break; | 409 | break; |
| 407 | case KVP_OP_GET_IP_INFO: | 410 | case KVP_OP_GET_IP_INFO: |
| 408 | /* We only need to pass on message->kvp_hdr.operation. */ | 411 | /* |
| 412 | * We only need to pass on the info of operation, adapter_id | ||
| 413 | * and addr_family to the userland kvp daemon. | ||
| 414 | */ | ||
| 415 | process_ib_ipinfo(in_msg, message, KVP_OP_GET_IP_INFO); | ||
| 409 | break; | 416 | break; |
| 410 | case KVP_OP_SET: | 417 | case KVP_OP_SET: |
| 411 | switch (in_msg->body.kvp_set.data.value_type) { | 418 | switch (in_msg->body.kvp_set.data.value_type) { |
| @@ -446,9 +453,9 @@ kvp_send_key(struct work_struct *dummy) | |||
| 446 | 453 | ||
| 447 | } | 454 | } |
| 448 | 455 | ||
| 449 | break; | 456 | /* |
| 450 | 457 | * The key is always a string - utf16 encoding. | |
| 451 | case KVP_OP_GET: | 458 | */ |
| 452 | message->body.kvp_set.data.key_size = | 459 | message->body.kvp_set.data.key_size = |
| 453 | utf16s_to_utf8s( | 460 | utf16s_to_utf8s( |
| 454 | (wchar_t *)in_msg->body.kvp_set.data.key, | 461 | (wchar_t *)in_msg->body.kvp_set.data.key, |
| @@ -456,6 +463,17 @@ kvp_send_key(struct work_struct *dummy) | |||
| 456 | UTF16_LITTLE_ENDIAN, | 463 | UTF16_LITTLE_ENDIAN, |
| 457 | message->body.kvp_set.data.key, | 464 | message->body.kvp_set.data.key, |
| 458 | HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1; | 465 | HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1; |
| 466 | |||
| 467 | break; | ||
| 468 | |||
| 469 | case KVP_OP_GET: | ||
| 470 | message->body.kvp_get.data.key_size = | ||
| 471 | utf16s_to_utf8s( | ||
| 472 | (wchar_t *)in_msg->body.kvp_get.data.key, | ||
| 473 | in_msg->body.kvp_get.data.key_size, | ||
| 474 | UTF16_LITTLE_ENDIAN, | ||
| 475 | message->body.kvp_get.data.key, | ||
| 476 | HV_KVP_EXCHANGE_MAX_KEY_SIZE - 1) + 1; | ||
| 459 | break; | 477 | break; |
| 460 | 478 | ||
| 461 | case KVP_OP_DELETE: | 479 | case KVP_OP_DELETE: |
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index bb2cd29e1658..d8f7000a466a 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c | |||
| @@ -797,7 +797,8 @@ static int iommu_init_ga_log(struct amd_iommu *iommu) | |||
| 797 | entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; | 797 | entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512; |
| 798 | memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, | 798 | memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET, |
| 799 | &entry, sizeof(entry)); | 799 | &entry, sizeof(entry)); |
| 800 | entry = (iommu_virt_to_phys(iommu->ga_log) & 0xFFFFFFFFFFFFFULL) & ~7ULL; | 800 | entry = (iommu_virt_to_phys(iommu->ga_log_tail) & |
| 801 | (BIT_ULL(52)-1)) & ~7ULL; | ||
| 801 | memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, | 802 | memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET, |
| 802 | &entry, sizeof(entry)); | 803 | &entry, sizeof(entry)); |
| 803 | writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); | 804 | writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET); |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index f3ccf025108b..41a4b8808802 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
| @@ -3075,7 +3075,7 @@ static int copy_context_table(struct intel_iommu *iommu, | |||
| 3075 | } | 3075 | } |
| 3076 | 3076 | ||
| 3077 | if (old_ce) | 3077 | if (old_ce) |
| 3078 | iounmap(old_ce); | 3078 | memunmap(old_ce); |
| 3079 | 3079 | ||
| 3080 | ret = 0; | 3080 | ret = 0; |
| 3081 | if (devfn < 0x80) | 3081 | if (devfn < 0x80) |
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index db301efe126d..887150907526 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c | |||
| @@ -595,7 +595,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) | |||
| 595 | pr_err("%s: Page request without PASID: %08llx %08llx\n", | 595 | pr_err("%s: Page request without PASID: %08llx %08llx\n", |
| 596 | iommu->name, ((unsigned long long *)req)[0], | 596 | iommu->name, ((unsigned long long *)req)[0], |
| 597 | ((unsigned long long *)req)[1]); | 597 | ((unsigned long long *)req)[1]); |
| 598 | goto bad_req; | 598 | goto no_pasid; |
| 599 | } | 599 | } |
| 600 | 600 | ||
| 601 | if (!svm || svm->pasid != req->pasid) { | 601 | if (!svm || svm->pasid != req->pasid) { |
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index b98a03189580..ddf3a492e1d5 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c | |||
| @@ -498,6 +498,9 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) | |||
| 498 | 498 | ||
| 499 | static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) | 499 | static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) |
| 500 | { | 500 | { |
| 501 | if (!domain->mmu) | ||
| 502 | return; | ||
| 503 | |||
| 501 | /* | 504 | /* |
| 502 | * Disable the context. Flush the TLB as required when modifying the | 505 | * Disable the context. Flush the TLB as required when modifying the |
| 503 | * context registers. | 506 | * context registers. |
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index 31d1f4ab915e..65a933a21e68 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c | |||
| @@ -807,7 +807,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg, | |||
| 807 | } | 807 | } |
| 808 | 808 | ||
| 809 | if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) { | 809 | if (adap->transmit_queue_sz >= CEC_MAX_MSG_TX_QUEUE_SZ) { |
| 810 | dprintk(1, "%s: transmit queue full\n", __func__); | 810 | dprintk(2, "%s: transmit queue full\n", __func__); |
| 811 | return -EBUSY; | 811 | return -EBUSY; |
| 812 | } | 812 | } |
| 813 | 813 | ||
| @@ -1180,6 +1180,8 @@ static int cec_config_log_addr(struct cec_adapter *adap, | |||
| 1180 | { | 1180 | { |
| 1181 | struct cec_log_addrs *las = &adap->log_addrs; | 1181 | struct cec_log_addrs *las = &adap->log_addrs; |
| 1182 | struct cec_msg msg = { }; | 1182 | struct cec_msg msg = { }; |
| 1183 | const unsigned int max_retries = 2; | ||
| 1184 | unsigned int i; | ||
| 1183 | int err; | 1185 | int err; |
| 1184 | 1186 | ||
| 1185 | if (cec_has_log_addr(adap, log_addr)) | 1187 | if (cec_has_log_addr(adap, log_addr)) |
| @@ -1188,19 +1190,44 @@ static int cec_config_log_addr(struct cec_adapter *adap, | |||
| 1188 | /* Send poll message */ | 1190 | /* Send poll message */ |
| 1189 | msg.len = 1; | 1191 | msg.len = 1; |
| 1190 | msg.msg[0] = (log_addr << 4) | log_addr; | 1192 | msg.msg[0] = (log_addr << 4) | log_addr; |
| 1191 | err = cec_transmit_msg_fh(adap, &msg, NULL, true); | ||
| 1192 | 1193 | ||
| 1193 | /* | 1194 | for (i = 0; i < max_retries; i++) { |
| 1194 | * While trying to poll the physical address was reset | 1195 | err = cec_transmit_msg_fh(adap, &msg, NULL, true); |
| 1195 | * and the adapter was unconfigured, so bail out. | ||
| 1196 | */ | ||
| 1197 | if (!adap->is_configuring) | ||
| 1198 | return -EINTR; | ||
| 1199 | 1196 | ||
| 1200 | if (err) | 1197 | /* |
| 1201 | return err; | 1198 | * While trying to poll the physical address was reset |
| 1199 | * and the adapter was unconfigured, so bail out. | ||
| 1200 | */ | ||
| 1201 | if (!adap->is_configuring) | ||
| 1202 | return -EINTR; | ||
| 1203 | |||
| 1204 | if (err) | ||
| 1205 | return err; | ||
| 1202 | 1206 | ||
| 1203 | if (msg.tx_status & CEC_TX_STATUS_OK) | 1207 | /* |
| 1208 | * The message was aborted due to a disconnect or | ||
| 1209 | * unconfigure, just bail out. | ||
| 1210 | */ | ||
| 1211 | if (msg.tx_status & CEC_TX_STATUS_ABORTED) | ||
| 1212 | return -EINTR; | ||
| 1213 | if (msg.tx_status & CEC_TX_STATUS_OK) | ||
| 1214 | return 0; | ||
| 1215 | if (msg.tx_status & CEC_TX_STATUS_NACK) | ||
| 1216 | break; | ||
| 1217 | /* | ||
| 1218 | * Retry up to max_retries times if the message was neither | ||
| 1219 | * OKed or NACKed. This can happen due to e.g. a Lost | ||
| 1220 | * Arbitration condition. | ||
| 1221 | */ | ||
| 1222 | } | ||
| 1223 | |||
| 1224 | /* | ||
| 1225 | * If we are unable to get an OK or a NACK after max_retries attempts | ||
| 1226 | * (and note that each attempt already consists of four polls), then | ||
| 1227 | * then we assume that something is really weird and that it is not a | ||
| 1228 | * good idea to try and claim this logical address. | ||
| 1229 | */ | ||
| 1230 | if (i == max_retries) | ||
| 1204 | return 0; | 1231 | return 0; |
| 1205 | 1232 | ||
| 1206 | /* | 1233 | /* |
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c index ca5d92942820..41d470d9ca94 100644 --- a/drivers/media/i2c/tc358743.c +++ b/drivers/media/i2c/tc358743.c | |||
| @@ -1918,7 +1918,6 @@ static int tc358743_probe_of(struct tc358743_state *state) | |||
| 1918 | ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep), &endpoint); | 1918 | ret = v4l2_fwnode_endpoint_alloc_parse(of_fwnode_handle(ep), &endpoint); |
| 1919 | if (ret) { | 1919 | if (ret) { |
| 1920 | dev_err(dev, "failed to parse endpoint\n"); | 1920 | dev_err(dev, "failed to parse endpoint\n"); |
| 1921 | ret = ret; | ||
| 1922 | goto put_node; | 1921 | goto put_node; |
| 1923 | } | 1922 | } |
| 1924 | 1923 | ||
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 452eb9b42140..447baaebca44 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c | |||
| @@ -1844,14 +1844,12 @@ fail_mutex_destroy: | |||
| 1844 | static void cio2_pci_remove(struct pci_dev *pci_dev) | 1844 | static void cio2_pci_remove(struct pci_dev *pci_dev) |
| 1845 | { | 1845 | { |
| 1846 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); | 1846 | struct cio2_device *cio2 = pci_get_drvdata(pci_dev); |
| 1847 | unsigned int i; | ||
| 1848 | 1847 | ||
| 1848 | media_device_unregister(&cio2->media_dev); | ||
| 1849 | cio2_notifier_exit(cio2); | 1849 | cio2_notifier_exit(cio2); |
| 1850 | cio2_queues_exit(cio2); | ||
| 1850 | cio2_fbpt_exit_dummy(cio2); | 1851 | cio2_fbpt_exit_dummy(cio2); |
| 1851 | for (i = 0; i < CIO2_QUEUES; i++) | ||
| 1852 | cio2_queue_exit(cio2, &cio2->queue[i]); | ||
| 1853 | v4l2_device_unregister(&cio2->v4l2_dev); | 1852 | v4l2_device_unregister(&cio2->v4l2_dev); |
| 1854 | media_device_unregister(&cio2->media_dev); | ||
| 1855 | media_device_cleanup(&cio2->media_dev); | 1853 | media_device_cleanup(&cio2->media_dev); |
| 1856 | mutex_destroy(&cio2->lock); | 1854 | mutex_destroy(&cio2->lock); |
| 1857 | } | 1855 | } |
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c index 77fb7987b42f..13f2828d880d 100644 --- a/drivers/media/platform/omap3isp/isp.c +++ b/drivers/media/platform/omap3isp/isp.c | |||
| @@ -1587,6 +1587,8 @@ static void isp_pm_complete(struct device *dev) | |||
| 1587 | 1587 | ||
| 1588 | static void isp_unregister_entities(struct isp_device *isp) | 1588 | static void isp_unregister_entities(struct isp_device *isp) |
| 1589 | { | 1589 | { |
| 1590 | media_device_unregister(&isp->media_dev); | ||
| 1591 | |||
| 1590 | omap3isp_csi2_unregister_entities(&isp->isp_csi2a); | 1592 | omap3isp_csi2_unregister_entities(&isp->isp_csi2a); |
| 1591 | omap3isp_ccp2_unregister_entities(&isp->isp_ccp2); | 1593 | omap3isp_ccp2_unregister_entities(&isp->isp_ccp2); |
| 1592 | omap3isp_ccdc_unregister_entities(&isp->isp_ccdc); | 1594 | omap3isp_ccdc_unregister_entities(&isp->isp_ccdc); |
| @@ -1597,7 +1599,6 @@ static void isp_unregister_entities(struct isp_device *isp) | |||
| 1597 | omap3isp_stat_unregister_entities(&isp->isp_hist); | 1599 | omap3isp_stat_unregister_entities(&isp->isp_hist); |
| 1598 | 1600 | ||
| 1599 | v4l2_device_unregister(&isp->v4l2_dev); | 1601 | v4l2_device_unregister(&isp->v4l2_dev); |
| 1600 | media_device_unregister(&isp->media_dev); | ||
| 1601 | media_device_cleanup(&isp->media_dev); | 1602 | media_device_cleanup(&isp->media_dev); |
| 1602 | } | 1603 | } |
| 1603 | 1604 | ||
diff --git a/drivers/media/platform/vicodec/vicodec-core.c b/drivers/media/platform/vicodec/vicodec-core.c index 1eb9132bfc85..b292cff26c86 100644 --- a/drivers/media/platform/vicodec/vicodec-core.c +++ b/drivers/media/platform/vicodec/vicodec-core.c | |||
| @@ -42,7 +42,7 @@ MODULE_PARM_DESC(debug, " activates debug info"); | |||
| 42 | #define MAX_WIDTH 4096U | 42 | #define MAX_WIDTH 4096U |
| 43 | #define MIN_WIDTH 640U | 43 | #define MIN_WIDTH 640U |
| 44 | #define MAX_HEIGHT 2160U | 44 | #define MAX_HEIGHT 2160U |
| 45 | #define MIN_HEIGHT 480U | 45 | #define MIN_HEIGHT 360U |
| 46 | 46 | ||
| 47 | #define dprintk(dev, fmt, arg...) \ | 47 | #define dprintk(dev, fmt, arg...) \ |
| 48 | v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) | 48 | v4l2_dbg(1, debug, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg) |
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c index af150a0395df..d82db738f174 100644 --- a/drivers/media/platform/vim2m.c +++ b/drivers/media/platform/vim2m.c | |||
| @@ -1009,7 +1009,7 @@ static const struct v4l2_m2m_ops m2m_ops = { | |||
| 1009 | 1009 | ||
| 1010 | static const struct media_device_ops m2m_media_ops = { | 1010 | static const struct media_device_ops m2m_media_ops = { |
| 1011 | .req_validate = vb2_request_validate, | 1011 | .req_validate = vb2_request_validate, |
| 1012 | .req_queue = vb2_m2m_request_queue, | 1012 | .req_queue = v4l2_m2m_request_queue, |
| 1013 | }; | 1013 | }; |
| 1014 | 1014 | ||
| 1015 | static int vim2m_probe(struct platform_device *pdev) | 1015 | static int vim2m_probe(struct platform_device *pdev) |
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c index 6e37950292cd..5f2b033a7a42 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls.c +++ b/drivers/media/v4l2-core/v4l2-ctrls.c | |||
| @@ -1664,6 +1664,11 @@ static int std_validate(const struct v4l2_ctrl *ctrl, u32 idx, | |||
| 1664 | p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME) | 1664 | p_mpeg2_slice_params->forward_ref_index >= VIDEO_MAX_FRAME) |
| 1665 | return -EINVAL; | 1665 | return -EINVAL; |
| 1666 | 1666 | ||
| 1667 | if (p_mpeg2_slice_params->pad || | ||
| 1668 | p_mpeg2_slice_params->picture.pad || | ||
| 1669 | p_mpeg2_slice_params->sequence.pad) | ||
| 1670 | return -EINVAL; | ||
| 1671 | |||
| 1667 | return 0; | 1672 | return 0; |
| 1668 | 1673 | ||
| 1669 | case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION: | 1674 | case V4L2_CTRL_TYPE_MPEG2_QUANTIZATION: |
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c index a3ef1f50a4b3..481e3c65cf97 100644 --- a/drivers/media/v4l2-core/v4l2-event.c +++ b/drivers/media/v4l2-core/v4l2-event.c | |||
| @@ -193,6 +193,22 @@ int v4l2_event_pending(struct v4l2_fh *fh) | |||
| 193 | } | 193 | } |
| 194 | EXPORT_SYMBOL_GPL(v4l2_event_pending); | 194 | EXPORT_SYMBOL_GPL(v4l2_event_pending); |
| 195 | 195 | ||
| 196 | static void __v4l2_event_unsubscribe(struct v4l2_subscribed_event *sev) | ||
| 197 | { | ||
| 198 | struct v4l2_fh *fh = sev->fh; | ||
| 199 | unsigned int i; | ||
| 200 | |||
| 201 | lockdep_assert_held(&fh->subscribe_lock); | ||
| 202 | assert_spin_locked(&fh->vdev->fh_lock); | ||
| 203 | |||
| 204 | /* Remove any pending events for this subscription */ | ||
| 205 | for (i = 0; i < sev->in_use; i++) { | ||
| 206 | list_del(&sev->events[sev_pos(sev, i)].list); | ||
| 207 | fh->navailable--; | ||
| 208 | } | ||
| 209 | list_del(&sev->list); | ||
| 210 | } | ||
| 211 | |||
| 196 | int v4l2_event_subscribe(struct v4l2_fh *fh, | 212 | int v4l2_event_subscribe(struct v4l2_fh *fh, |
| 197 | const struct v4l2_event_subscription *sub, unsigned elems, | 213 | const struct v4l2_event_subscription *sub, unsigned elems, |
| 198 | const struct v4l2_subscribed_event_ops *ops) | 214 | const struct v4l2_subscribed_event_ops *ops) |
| @@ -224,27 +240,23 @@ int v4l2_event_subscribe(struct v4l2_fh *fh, | |||
| 224 | 240 | ||
| 225 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | 241 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); |
| 226 | found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); | 242 | found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); |
| 243 | if (!found_ev) | ||
| 244 | list_add(&sev->list, &fh->subscribed); | ||
| 227 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | 245 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); |
| 228 | 246 | ||
| 229 | if (found_ev) { | 247 | if (found_ev) { |
| 230 | /* Already listening */ | 248 | /* Already listening */ |
| 231 | kvfree(sev); | 249 | kvfree(sev); |
| 232 | goto out_unlock; | 250 | } else if (sev->ops && sev->ops->add) { |
| 233 | } | ||
| 234 | |||
| 235 | if (sev->ops && sev->ops->add) { | ||
| 236 | ret = sev->ops->add(sev, elems); | 251 | ret = sev->ops->add(sev, elems); |
| 237 | if (ret) { | 252 | if (ret) { |
| 253 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | ||
| 254 | __v4l2_event_unsubscribe(sev); | ||
| 255 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | ||
| 238 | kvfree(sev); | 256 | kvfree(sev); |
| 239 | goto out_unlock; | ||
| 240 | } | 257 | } |
| 241 | } | 258 | } |
| 242 | 259 | ||
| 243 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | ||
| 244 | list_add(&sev->list, &fh->subscribed); | ||
| 245 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | ||
| 246 | |||
| 247 | out_unlock: | ||
| 248 | mutex_unlock(&fh->subscribe_lock); | 260 | mutex_unlock(&fh->subscribe_lock); |
| 249 | 261 | ||
| 250 | return ret; | 262 | return ret; |
| @@ -279,7 +291,6 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, | |||
| 279 | { | 291 | { |
| 280 | struct v4l2_subscribed_event *sev; | 292 | struct v4l2_subscribed_event *sev; |
| 281 | unsigned long flags; | 293 | unsigned long flags; |
| 282 | int i; | ||
| 283 | 294 | ||
| 284 | if (sub->type == V4L2_EVENT_ALL) { | 295 | if (sub->type == V4L2_EVENT_ALL) { |
| 285 | v4l2_event_unsubscribe_all(fh); | 296 | v4l2_event_unsubscribe_all(fh); |
| @@ -291,14 +302,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh, | |||
| 291 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); | 302 | spin_lock_irqsave(&fh->vdev->fh_lock, flags); |
| 292 | 303 | ||
| 293 | sev = v4l2_event_subscribed(fh, sub->type, sub->id); | 304 | sev = v4l2_event_subscribed(fh, sub->type, sub->id); |
| 294 | if (sev != NULL) { | 305 | if (sev != NULL) |
| 295 | /* Remove any pending events for this subscription */ | 306 | __v4l2_event_unsubscribe(sev); |
| 296 | for (i = 0; i < sev->in_use; i++) { | ||
| 297 | list_del(&sev->events[sev_pos(sev, i)].list); | ||
| 298 | fh->navailable--; | ||
| 299 | } | ||
| 300 | list_del(&sev->list); | ||
| 301 | } | ||
| 302 | 307 | ||
| 303 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); | 308 | spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); |
| 304 | 309 | ||
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c index d7806db222d8..1ed2465972ac 100644 --- a/drivers/media/v4l2-core/v4l2-mem2mem.c +++ b/drivers/media/v4l2-core/v4l2-mem2mem.c | |||
| @@ -953,7 +953,7 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, | |||
| 953 | } | 953 | } |
| 954 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); | 954 | EXPORT_SYMBOL_GPL(v4l2_m2m_buf_queue); |
| 955 | 955 | ||
| 956 | void vb2_m2m_request_queue(struct media_request *req) | 956 | void v4l2_m2m_request_queue(struct media_request *req) |
| 957 | { | 957 | { |
| 958 | struct media_request_object *obj, *obj_safe; | 958 | struct media_request_object *obj, *obj_safe; |
| 959 | struct v4l2_m2m_ctx *m2m_ctx = NULL; | 959 | struct v4l2_m2m_ctx *m2m_ctx = NULL; |
| @@ -997,7 +997,7 @@ void vb2_m2m_request_queue(struct media_request *req) | |||
| 997 | if (m2m_ctx) | 997 | if (m2m_ctx) |
| 998 | v4l2_m2m_try_schedule(m2m_ctx); | 998 | v4l2_m2m_try_schedule(m2m_ctx); |
| 999 | } | 999 | } |
| 1000 | EXPORT_SYMBOL_GPL(vb2_m2m_request_queue); | 1000 | EXPORT_SYMBOL_GPL(v4l2_m2m_request_queue); |
| 1001 | 1001 | ||
| 1002 | /* Videobuf2 ioctl helpers */ | 1002 | /* Videobuf2 ioctl helpers */ |
| 1003 | 1003 | ||
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index b2a0340f277e..d8e3cc2dc747 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c | |||
| @@ -132,7 +132,7 @@ static const struct of_device_id atmel_ssc_dt_ids[] = { | |||
| 132 | MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids); | 132 | MODULE_DEVICE_TABLE(of, atmel_ssc_dt_ids); |
| 133 | #endif | 133 | #endif |
| 134 | 134 | ||
| 135 | static inline const struct atmel_ssc_platform_data * __init | 135 | static inline const struct atmel_ssc_platform_data * |
| 136 | atmel_ssc_get_driver_data(struct platform_device *pdev) | 136 | atmel_ssc_get_driver_data(struct platform_device *pdev) |
| 137 | { | 137 | { |
| 138 | if (pdev->dev.of_node) { | 138 | if (pdev->dev.of_node) { |
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c index 313da3150262..1540a7785e14 100644 --- a/drivers/misc/sgi-gru/grukdump.c +++ b/drivers/misc/sgi-gru/grukdump.c | |||
| @@ -27,6 +27,9 @@ | |||
| 27 | #include <linux/delay.h> | 27 | #include <linux/delay.h> |
| 28 | #include <linux/bitops.h> | 28 | #include <linux/bitops.h> |
| 29 | #include <asm/uv/uv_hub.h> | 29 | #include <asm/uv/uv_hub.h> |
| 30 | |||
| 31 | #include <linux/nospec.h> | ||
| 32 | |||
| 30 | #include "gru.h" | 33 | #include "gru.h" |
| 31 | #include "grutables.h" | 34 | #include "grutables.h" |
| 32 | #include "gruhandles.h" | 35 | #include "gruhandles.h" |
| @@ -196,6 +199,7 @@ int gru_dump_chiplet_request(unsigned long arg) | |||
| 196 | /* Currently, only dump by gid is implemented */ | 199 | /* Currently, only dump by gid is implemented */ |
| 197 | if (req.gid >= gru_max_gids) | 200 | if (req.gid >= gru_max_gids) |
| 198 | return -EINVAL; | 201 | return -EINVAL; |
| 202 | req.gid = array_index_nospec(req.gid, gru_max_gids); | ||
| 199 | 203 | ||
| 200 | gru = GID_TO_GRU(req.gid); | 204 | gru = GID_TO_GRU(req.gid); |
| 201 | ubuf = req.buf; | 205 | ubuf = req.buf; |
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 7bfd366d970d..c4115bae5db1 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | * - JMicron (hardware and technical support) | 12 | * - JMicron (hardware and technical support) |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #include <linux/bitfield.h> | ||
| 15 | #include <linux/string.h> | 16 | #include <linux/string.h> |
| 16 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
| 17 | #include <linux/highmem.h> | 18 | #include <linux/highmem.h> |
| @@ -462,6 +463,9 @@ struct intel_host { | |||
| 462 | u32 dsm_fns; | 463 | u32 dsm_fns; |
| 463 | int drv_strength; | 464 | int drv_strength; |
| 464 | bool d3_retune; | 465 | bool d3_retune; |
| 466 | bool rpm_retune_ok; | ||
| 467 | u32 glk_rx_ctrl1; | ||
| 468 | u32 glk_tun_val; | ||
| 465 | }; | 469 | }; |
| 466 | 470 | ||
| 467 | static const guid_t intel_dsm_guid = | 471 | static const guid_t intel_dsm_guid = |
| @@ -791,6 +795,77 @@ cleanup: | |||
| 791 | return ret; | 795 | return ret; |
| 792 | } | 796 | } |
| 793 | 797 | ||
| 798 | #ifdef CONFIG_PM | ||
| 799 | #define GLK_RX_CTRL1 0x834 | ||
| 800 | #define GLK_TUN_VAL 0x840 | ||
| 801 | #define GLK_PATH_PLL GENMASK(13, 8) | ||
| 802 | #define GLK_DLY GENMASK(6, 0) | ||
| 803 | /* Workaround firmware failing to restore the tuning value */ | ||
| 804 | static void glk_rpm_retune_wa(struct sdhci_pci_chip *chip, bool susp) | ||
| 805 | { | ||
| 806 | struct sdhci_pci_slot *slot = chip->slots[0]; | ||
| 807 | struct intel_host *intel_host = sdhci_pci_priv(slot); | ||
| 808 | struct sdhci_host *host = slot->host; | ||
| 809 | u32 glk_rx_ctrl1; | ||
| 810 | u32 glk_tun_val; | ||
| 811 | u32 dly; | ||
| 812 | |||
| 813 | if (intel_host->rpm_retune_ok || !mmc_can_retune(host->mmc)) | ||
| 814 | return; | ||
| 815 | |||
| 816 | glk_rx_ctrl1 = sdhci_readl(host, GLK_RX_CTRL1); | ||
| 817 | glk_tun_val = sdhci_readl(host, GLK_TUN_VAL); | ||
| 818 | |||
| 819 | if (susp) { | ||
| 820 | intel_host->glk_rx_ctrl1 = glk_rx_ctrl1; | ||
| 821 | intel_host->glk_tun_val = glk_tun_val; | ||
| 822 | return; | ||
| 823 | } | ||
| 824 | |||
| 825 | if (!intel_host->glk_tun_val) | ||
| 826 | return; | ||
| 827 | |||
| 828 | if (glk_rx_ctrl1 != intel_host->glk_rx_ctrl1) { | ||
| 829 | intel_host->rpm_retune_ok = true; | ||
| 830 | return; | ||
| 831 | } | ||
| 832 | |||
| 833 | dly = FIELD_PREP(GLK_DLY, FIELD_GET(GLK_PATH_PLL, glk_rx_ctrl1) + | ||
| 834 | (intel_host->glk_tun_val << 1)); | ||
| 835 | if (dly == FIELD_GET(GLK_DLY, glk_rx_ctrl1)) | ||
| 836 | return; | ||
| 837 | |||
| 838 | glk_rx_ctrl1 = (glk_rx_ctrl1 & ~GLK_DLY) | dly; | ||
| 839 | sdhci_writel(host, glk_rx_ctrl1, GLK_RX_CTRL1); | ||
| 840 | |||
| 841 | intel_host->rpm_retune_ok = true; | ||
| 842 | chip->rpm_retune = true; | ||
| 843 | mmc_retune_needed(host->mmc); | ||
| 844 | pr_info("%s: Requiring re-tune after rpm resume", mmc_hostname(host->mmc)); | ||
| 845 | } | ||
| 846 | |||
| 847 | static void glk_rpm_retune_chk(struct sdhci_pci_chip *chip, bool susp) | ||
| 848 | { | ||
| 849 | if (chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && | ||
| 850 | !chip->rpm_retune) | ||
| 851 | glk_rpm_retune_wa(chip, susp); | ||
| 852 | } | ||
| 853 | |||
| 854 | static int glk_runtime_suspend(struct sdhci_pci_chip *chip) | ||
| 855 | { | ||
| 856 | glk_rpm_retune_chk(chip, true); | ||
| 857 | |||
| 858 | return sdhci_cqhci_runtime_suspend(chip); | ||
| 859 | } | ||
| 860 | |||
| 861 | static int glk_runtime_resume(struct sdhci_pci_chip *chip) | ||
| 862 | { | ||
| 863 | glk_rpm_retune_chk(chip, false); | ||
| 864 | |||
| 865 | return sdhci_cqhci_runtime_resume(chip); | ||
| 866 | } | ||
| 867 | #endif | ||
| 868 | |||
| 794 | #ifdef CONFIG_ACPI | 869 | #ifdef CONFIG_ACPI |
| 795 | static int ni_set_max_freq(struct sdhci_pci_slot *slot) | 870 | static int ni_set_max_freq(struct sdhci_pci_slot *slot) |
| 796 | { | 871 | { |
| @@ -879,8 +954,8 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = { | |||
| 879 | .resume = sdhci_cqhci_resume, | 954 | .resume = sdhci_cqhci_resume, |
| 880 | #endif | 955 | #endif |
| 881 | #ifdef CONFIG_PM | 956 | #ifdef CONFIG_PM |
| 882 | .runtime_suspend = sdhci_cqhci_runtime_suspend, | 957 | .runtime_suspend = glk_runtime_suspend, |
| 883 | .runtime_resume = sdhci_cqhci_runtime_resume, | 958 | .runtime_resume = glk_runtime_resume, |
| 884 | #endif | 959 | #endif |
| 885 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, | 960 | .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, |
| 886 | .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | | 961 | .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | |
| @@ -1762,8 +1837,13 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( | |||
| 1762 | device_init_wakeup(&pdev->dev, true); | 1837 | device_init_wakeup(&pdev->dev, true); |
| 1763 | 1838 | ||
| 1764 | if (slot->cd_idx >= 0) { | 1839 | if (slot->cd_idx >= 0) { |
| 1765 | ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, | 1840 | ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, |
| 1766 | slot->cd_override_level, 0, NULL); | 1841 | slot->cd_override_level, 0, NULL); |
| 1842 | if (ret && ret != -EPROBE_DEFER) | ||
| 1843 | ret = mmc_gpiod_request_cd(host->mmc, NULL, | ||
| 1844 | slot->cd_idx, | ||
| 1845 | slot->cd_override_level, | ||
| 1846 | 0, NULL); | ||
| 1767 | if (ret == -EPROBE_DEFER) | 1847 | if (ret == -EPROBE_DEFER) |
| 1768 | goto remove; | 1848 | goto remove; |
| 1769 | 1849 | ||
diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index fb33f6be7c4f..ad720494e8f7 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c | |||
| @@ -2032,8 +2032,7 @@ atmel_hsmc_nand_controller_legacy_init(struct atmel_hsmc_nand_controller *nc) | |||
| 2032 | int ret; | 2032 | int ret; |
| 2033 | 2033 | ||
| 2034 | nand_np = dev->of_node; | 2034 | nand_np = dev->of_node; |
| 2035 | nfc_np = of_find_compatible_node(dev->of_node, NULL, | 2035 | nfc_np = of_get_compatible_child(dev->of_node, "atmel,sama5d3-nfc"); |
| 2036 | "atmel,sama5d3-nfc"); | ||
| 2037 | if (!nfc_np) { | 2036 | if (!nfc_np) { |
| 2038 | dev_err(dev, "Could not find device node for sama5d3-nfc\n"); | 2037 | dev_err(dev, "Could not find device node for sama5d3-nfc\n"); |
| 2039 | return -ENODEV; | 2038 | return -ENODEV; |
| @@ -2447,15 +2446,19 @@ static int atmel_nand_controller_probe(struct platform_device *pdev) | |||
| 2447 | } | 2446 | } |
| 2448 | 2447 | ||
| 2449 | if (caps->legacy_of_bindings) { | 2448 | if (caps->legacy_of_bindings) { |
| 2449 | struct device_node *nfc_node; | ||
| 2450 | u32 ale_offs = 21; | 2450 | u32 ale_offs = 21; |
| 2451 | 2451 | ||
| 2452 | /* | 2452 | /* |
| 2453 | * If we are parsing legacy DT props and the DT contains a | 2453 | * If we are parsing legacy DT props and the DT contains a |
| 2454 | * valid NFC node, forward the request to the sama5 logic. | 2454 | * valid NFC node, forward the request to the sama5 logic. |
| 2455 | */ | 2455 | */ |
| 2456 | if (of_find_compatible_node(pdev->dev.of_node, NULL, | 2456 | nfc_node = of_get_compatible_child(pdev->dev.of_node, |
| 2457 | "atmel,sama5d3-nfc")) | 2457 | "atmel,sama5d3-nfc"); |
| 2458 | if (nfc_node) { | ||
| 2458 | caps = &atmel_sama5_nand_caps; | 2459 | caps = &atmel_sama5_nand_caps; |
| 2460 | of_node_put(nfc_node); | ||
| 2461 | } | ||
| 2459 | 2462 | ||
| 2460 | /* | 2463 | /* |
| 2461 | * Even if the compatible says we are dealing with an | 2464 | * Even if the compatible says we are dealing with an |
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index ef75dfa62a4f..699d3cf49c6d 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c | |||
| @@ -150,15 +150,15 @@ | |||
| 150 | #define NAND_VERSION_MINOR_SHIFT 16 | 150 | #define NAND_VERSION_MINOR_SHIFT 16 |
| 151 | 151 | ||
| 152 | /* NAND OP_CMDs */ | 152 | /* NAND OP_CMDs */ |
| 153 | #define PAGE_READ 0x2 | 153 | #define OP_PAGE_READ 0x2 |
| 154 | #define PAGE_READ_WITH_ECC 0x3 | 154 | #define OP_PAGE_READ_WITH_ECC 0x3 |
| 155 | #define PAGE_READ_WITH_ECC_SPARE 0x4 | 155 | #define OP_PAGE_READ_WITH_ECC_SPARE 0x4 |
| 156 | #define PROGRAM_PAGE 0x6 | 156 | #define OP_PROGRAM_PAGE 0x6 |
| 157 | #define PAGE_PROGRAM_WITH_ECC 0x7 | 157 | #define OP_PAGE_PROGRAM_WITH_ECC 0x7 |
| 158 | #define PROGRAM_PAGE_SPARE 0x9 | 158 | #define OP_PROGRAM_PAGE_SPARE 0x9 |
| 159 | #define BLOCK_ERASE 0xa | 159 | #define OP_BLOCK_ERASE 0xa |
| 160 | #define FETCH_ID 0xb | 160 | #define OP_FETCH_ID 0xb |
| 161 | #define RESET_DEVICE 0xd | 161 | #define OP_RESET_DEVICE 0xd |
| 162 | 162 | ||
| 163 | /* Default Value for NAND_DEV_CMD_VLD */ | 163 | /* Default Value for NAND_DEV_CMD_VLD */ |
| 164 | #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ | 164 | #define NAND_DEV_CMD_VLD_VAL (READ_START_VLD | WRITE_START_VLD | \ |
| @@ -692,11 +692,11 @@ static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read) | |||
| 692 | 692 | ||
| 693 | if (read) { | 693 | if (read) { |
| 694 | if (host->use_ecc) | 694 | if (host->use_ecc) |
| 695 | cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; | 695 | cmd = OP_PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE; |
| 696 | else | 696 | else |
| 697 | cmd = PAGE_READ | PAGE_ACC | LAST_PAGE; | 697 | cmd = OP_PAGE_READ | PAGE_ACC | LAST_PAGE; |
| 698 | } else { | 698 | } else { |
| 699 | cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; | 699 | cmd = OP_PROGRAM_PAGE | PAGE_ACC | LAST_PAGE; |
| 700 | } | 700 | } |
| 701 | 701 | ||
| 702 | if (host->use_ecc) { | 702 | if (host->use_ecc) { |
| @@ -1170,7 +1170,7 @@ static int nandc_param(struct qcom_nand_host *host) | |||
| 1170 | * in use. we configure the controller to perform a raw read of 512 | 1170 | * in use. we configure the controller to perform a raw read of 512 |
| 1171 | * bytes to read onfi params | 1171 | * bytes to read onfi params |
| 1172 | */ | 1172 | */ |
| 1173 | nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE); | 1173 | nandc_set_reg(nandc, NAND_FLASH_CMD, OP_PAGE_READ | PAGE_ACC | LAST_PAGE); |
| 1174 | nandc_set_reg(nandc, NAND_ADDR0, 0); | 1174 | nandc_set_reg(nandc, NAND_ADDR0, 0); |
| 1175 | nandc_set_reg(nandc, NAND_ADDR1, 0); | 1175 | nandc_set_reg(nandc, NAND_ADDR1, 0); |
| 1176 | nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE | 1176 | nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE |
| @@ -1224,7 +1224,7 @@ static int erase_block(struct qcom_nand_host *host, int page_addr) | |||
| 1224 | struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); | 1224 | struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); |
| 1225 | 1225 | ||
| 1226 | nandc_set_reg(nandc, NAND_FLASH_CMD, | 1226 | nandc_set_reg(nandc, NAND_FLASH_CMD, |
| 1227 | BLOCK_ERASE | PAGE_ACC | LAST_PAGE); | 1227 | OP_BLOCK_ERASE | PAGE_ACC | LAST_PAGE); |
| 1228 | nandc_set_reg(nandc, NAND_ADDR0, page_addr); | 1228 | nandc_set_reg(nandc, NAND_ADDR0, page_addr); |
| 1229 | nandc_set_reg(nandc, NAND_ADDR1, 0); | 1229 | nandc_set_reg(nandc, NAND_ADDR1, 0); |
| 1230 | nandc_set_reg(nandc, NAND_DEV0_CFG0, | 1230 | nandc_set_reg(nandc, NAND_DEV0_CFG0, |
| @@ -1255,7 +1255,7 @@ static int read_id(struct qcom_nand_host *host, int column) | |||
| 1255 | if (column == -1) | 1255 | if (column == -1) |
| 1256 | return 0; | 1256 | return 0; |
| 1257 | 1257 | ||
| 1258 | nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID); | 1258 | nandc_set_reg(nandc, NAND_FLASH_CMD, OP_FETCH_ID); |
| 1259 | nandc_set_reg(nandc, NAND_ADDR0, column); | 1259 | nandc_set_reg(nandc, NAND_ADDR0, column); |
| 1260 | nandc_set_reg(nandc, NAND_ADDR1, 0); | 1260 | nandc_set_reg(nandc, NAND_ADDR1, 0); |
| 1261 | nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, | 1261 | nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, |
| @@ -1276,7 +1276,7 @@ static int reset(struct qcom_nand_host *host) | |||
| 1276 | struct nand_chip *chip = &host->chip; | 1276 | struct nand_chip *chip = &host->chip; |
| 1277 | struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); | 1277 | struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip); |
| 1278 | 1278 | ||
| 1279 | nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE); | 1279 | nandc_set_reg(nandc, NAND_FLASH_CMD, OP_RESET_DEVICE); |
| 1280 | nandc_set_reg(nandc, NAND_EXEC_CMD, 1); | 1280 | nandc_set_reg(nandc, NAND_EXEC_CMD, 1); |
| 1281 | 1281 | ||
| 1282 | write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); | 1282 | write_reg_dma(nandc, NAND_FLASH_CMD, 1, NAND_BAM_NEXT_SGL); |
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index d846428ef038..04cedd3a2bf6 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c | |||
| @@ -644,9 +644,23 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr, | |||
| 644 | ndelay(cqspi->wr_delay); | 644 | ndelay(cqspi->wr_delay); |
| 645 | 645 | ||
| 646 | while (remaining > 0) { | 646 | while (remaining > 0) { |
| 647 | size_t write_words, mod_bytes; | ||
| 648 | |||
| 647 | write_bytes = remaining > page_size ? page_size : remaining; | 649 | write_bytes = remaining > page_size ? page_size : remaining; |
| 648 | iowrite32_rep(cqspi->ahb_base, txbuf, | 650 | write_words = write_bytes / 4; |
| 649 | DIV_ROUND_UP(write_bytes, 4)); | 651 | mod_bytes = write_bytes % 4; |
| 652 | /* Write 4 bytes at a time then single bytes. */ | ||
| 653 | if (write_words) { | ||
| 654 | iowrite32_rep(cqspi->ahb_base, txbuf, write_words); | ||
| 655 | txbuf += (write_words * 4); | ||
| 656 | } | ||
| 657 | if (mod_bytes) { | ||
| 658 | unsigned int temp = 0xFFFFFFFF; | ||
| 659 | |||
| 660 | memcpy(&temp, txbuf, mod_bytes); | ||
| 661 | iowrite32(temp, cqspi->ahb_base); | ||
| 662 | txbuf += mod_bytes; | ||
| 663 | } | ||
| 650 | 664 | ||
| 651 | if (!wait_for_completion_timeout(&cqspi->transfer_complete, | 665 | if (!wait_for_completion_timeout(&cqspi->transfer_complete, |
| 652 | msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { | 666 | msecs_to_jiffies(CQSPI_TIMEOUT_MS))) { |
| @@ -655,7 +669,6 @@ static int cqspi_indirect_write_execute(struct spi_nor *nor, loff_t to_addr, | |||
| 655 | goto failwr; | 669 | goto failwr; |
| 656 | } | 670 | } |
| 657 | 671 | ||
| 658 | txbuf += write_bytes; | ||
| 659 | remaining -= write_bytes; | 672 | remaining -= write_bytes; |
| 660 | 673 | ||
| 661 | if (remaining > 0) | 674 | if (remaining > 0) |
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 3e54e31889c7..93c9bc8931fc 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c | |||
| @@ -2156,7 +2156,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, | |||
| 2156 | * @nor: pointer to a 'struct spi_nor' | 2156 | * @nor: pointer to a 'struct spi_nor' |
| 2157 | * @addr: offset in the serial flash memory | 2157 | * @addr: offset in the serial flash memory |
| 2158 | * @len: number of bytes to read | 2158 | * @len: number of bytes to read |
| 2159 | * @buf: buffer where the data is copied into | 2159 | * @buf: buffer where the data is copied into (dma-safe memory) |
| 2160 | * | 2160 | * |
| 2161 | * Return: 0 on success, -errno otherwise. | 2161 | * Return: 0 on success, -errno otherwise. |
| 2162 | */ | 2162 | */ |
| @@ -2522,6 +2522,34 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r) | |||
| 2522 | } | 2522 | } |
| 2523 | 2523 | ||
| 2524 | /** | 2524 | /** |
| 2525 | * spi_nor_sort_erase_mask() - sort erase mask | ||
| 2526 | * @map: the erase map of the SPI NOR | ||
| 2527 | * @erase_mask: the erase type mask to be sorted | ||
| 2528 | * | ||
| 2529 | * Replicate the sort done for the map's erase types in BFPT: sort the erase | ||
| 2530 | * mask in ascending order with the smallest erase type size starting from | ||
| 2531 | * BIT(0) in the sorted erase mask. | ||
| 2532 | * | ||
| 2533 | * Return: sorted erase mask. | ||
| 2534 | */ | ||
| 2535 | static u8 spi_nor_sort_erase_mask(struct spi_nor_erase_map *map, u8 erase_mask) | ||
| 2536 | { | ||
| 2537 | struct spi_nor_erase_type *erase_type = map->erase_type; | ||
| 2538 | int i; | ||
| 2539 | u8 sorted_erase_mask = 0; | ||
| 2540 | |||
| 2541 | if (!erase_mask) | ||
| 2542 | return 0; | ||
| 2543 | |||
| 2544 | /* Replicate the sort done for the map's erase types. */ | ||
| 2545 | for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) | ||
| 2546 | if (erase_type[i].size && erase_mask & BIT(erase_type[i].idx)) | ||
| 2547 | sorted_erase_mask |= BIT(i); | ||
| 2548 | |||
| 2549 | return sorted_erase_mask; | ||
| 2550 | } | ||
| 2551 | |||
| 2552 | /** | ||
| 2525 | * spi_nor_regions_sort_erase_types() - sort erase types in each region | 2553 | * spi_nor_regions_sort_erase_types() - sort erase types in each region |
| 2526 | * @map: the erase map of the SPI NOR | 2554 | * @map: the erase map of the SPI NOR |
| 2527 | * | 2555 | * |
| @@ -2536,19 +2564,13 @@ static int spi_nor_map_cmp_erase_type(const void *l, const void *r) | |||
| 2536 | static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) | 2564 | static void spi_nor_regions_sort_erase_types(struct spi_nor_erase_map *map) |
| 2537 | { | 2565 | { |
| 2538 | struct spi_nor_erase_region *region = map->regions; | 2566 | struct spi_nor_erase_region *region = map->regions; |
| 2539 | struct spi_nor_erase_type *erase_type = map->erase_type; | ||
| 2540 | int i; | ||
| 2541 | u8 region_erase_mask, sorted_erase_mask; | 2567 | u8 region_erase_mask, sorted_erase_mask; |
| 2542 | 2568 | ||
| 2543 | while (region) { | 2569 | while (region) { |
| 2544 | region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; | 2570 | region_erase_mask = region->offset & SNOR_ERASE_TYPE_MASK; |
| 2545 | 2571 | ||
| 2546 | /* Replicate the sort done for the map's erase types. */ | 2572 | sorted_erase_mask = spi_nor_sort_erase_mask(map, |
| 2547 | sorted_erase_mask = 0; | 2573 | region_erase_mask); |
| 2548 | for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) | ||
| 2549 | if (erase_type[i].size && | ||
| 2550 | region_erase_mask & BIT(erase_type[i].idx)) | ||
| 2551 | sorted_erase_mask |= BIT(i); | ||
| 2552 | 2574 | ||
| 2553 | /* Overwrite erase mask. */ | 2575 | /* Overwrite erase mask. */ |
| 2554 | region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | | 2576 | region->offset = (region->offset & ~SNOR_ERASE_TYPE_MASK) | |
| @@ -2855,52 +2877,84 @@ static u8 spi_nor_smpt_read_dummy(const struct spi_nor *nor, const u32 settings) | |||
| 2855 | * spi_nor_get_map_in_use() - get the configuration map in use | 2877 | * spi_nor_get_map_in_use() - get the configuration map in use |
| 2856 | * @nor: pointer to a 'struct spi_nor' | 2878 | * @nor: pointer to a 'struct spi_nor' |
| 2857 | * @smpt: pointer to the sector map parameter table | 2879 | * @smpt: pointer to the sector map parameter table |
| 2880 | * @smpt_len: sector map parameter table length | ||
| 2881 | * | ||
| 2882 | * Return: pointer to the map in use, ERR_PTR(-errno) otherwise. | ||
| 2858 | */ | 2883 | */ |
| 2859 | static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt) | 2884 | static const u32 *spi_nor_get_map_in_use(struct spi_nor *nor, const u32 *smpt, |
| 2885 | u8 smpt_len) | ||
| 2860 | { | 2886 | { |
| 2861 | const u32 *ret = NULL; | 2887 | const u32 *ret; |
| 2862 | u32 i, addr; | 2888 | u8 *buf; |
| 2889 | u32 addr; | ||
| 2863 | int err; | 2890 | int err; |
| 2891 | u8 i; | ||
| 2864 | u8 addr_width, read_opcode, read_dummy; | 2892 | u8 addr_width, read_opcode, read_dummy; |
| 2865 | u8 read_data_mask, data_byte, map_id; | 2893 | u8 read_data_mask, map_id; |
| 2894 | |||
| 2895 | /* Use a kmalloc'ed bounce buffer to guarantee it is DMA-able. */ | ||
| 2896 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); | ||
| 2897 | if (!buf) | ||
| 2898 | return ERR_PTR(-ENOMEM); | ||
| 2866 | 2899 | ||
| 2867 | addr_width = nor->addr_width; | 2900 | addr_width = nor->addr_width; |
| 2868 | read_dummy = nor->read_dummy; | 2901 | read_dummy = nor->read_dummy; |
| 2869 | read_opcode = nor->read_opcode; | 2902 | read_opcode = nor->read_opcode; |
| 2870 | 2903 | ||
| 2871 | map_id = 0; | 2904 | map_id = 0; |
| 2872 | i = 0; | ||
| 2873 | /* Determine if there are any optional Detection Command Descriptors */ | 2905 | /* Determine if there are any optional Detection Command Descriptors */ |
| 2874 | while (!(smpt[i] & SMPT_DESC_TYPE_MAP)) { | 2906 | for (i = 0; i < smpt_len; i += 2) { |
| 2907 | if (smpt[i] & SMPT_DESC_TYPE_MAP) | ||
| 2908 | break; | ||
| 2909 | |||
| 2875 | read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); | 2910 | read_data_mask = SMPT_CMD_READ_DATA(smpt[i]); |
| 2876 | nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); | 2911 | nor->addr_width = spi_nor_smpt_addr_width(nor, smpt[i]); |
| 2877 | nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); | 2912 | nor->read_dummy = spi_nor_smpt_read_dummy(nor, smpt[i]); |
| 2878 | nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); | 2913 | nor->read_opcode = SMPT_CMD_OPCODE(smpt[i]); |
| 2879 | addr = smpt[i + 1]; | 2914 | addr = smpt[i + 1]; |
| 2880 | 2915 | ||
| 2881 | err = spi_nor_read_raw(nor, addr, 1, &data_byte); | 2916 | err = spi_nor_read_raw(nor, addr, 1, buf); |
| 2882 | if (err) | 2917 | if (err) { |
| 2918 | ret = ERR_PTR(err); | ||
| 2883 | goto out; | 2919 | goto out; |
| 2920 | } | ||
| 2884 | 2921 | ||
| 2885 | /* | 2922 | /* |
| 2886 | * Build an index value that is used to select the Sector Map | 2923 | * Build an index value that is used to select the Sector Map |
| 2887 | * Configuration that is currently in use. | 2924 | * Configuration that is currently in use. |
| 2888 | */ | 2925 | */ |
| 2889 | map_id = map_id << 1 | !!(data_byte & read_data_mask); | 2926 | map_id = map_id << 1 | !!(*buf & read_data_mask); |
| 2890 | i = i + 2; | ||
| 2891 | } | 2927 | } |
| 2892 | 2928 | ||
| 2893 | /* Find the matching configuration map */ | 2929 | /* |
| 2894 | while (SMPT_MAP_ID(smpt[i]) != map_id) { | 2930 | * If command descriptors are provided, they always precede map |
| 2931 | * descriptors in the table. There is no need to start the iteration | ||
| 2932 | * over smpt array all over again. | ||
| 2933 | * | ||
| 2934 | * Find the matching configuration map. | ||
| 2935 | */ | ||
| 2936 | ret = ERR_PTR(-EINVAL); | ||
| 2937 | while (i < smpt_len) { | ||
| 2938 | if (SMPT_MAP_ID(smpt[i]) == map_id) { | ||
| 2939 | ret = smpt + i; | ||
| 2940 | break; | ||
| 2941 | } | ||
| 2942 | |||
| 2943 | /* | ||
| 2944 | * If there are no more configuration map descriptors and no | ||
| 2945 | * configuration ID matched the configuration identifier, the | ||
| 2946 | * sector address map is unknown. | ||
| 2947 | */ | ||
| 2895 | if (smpt[i] & SMPT_DESC_END) | 2948 | if (smpt[i] & SMPT_DESC_END) |
| 2896 | goto out; | 2949 | break; |
| 2950 | |||
| 2897 | /* increment the table index to the next map */ | 2951 | /* increment the table index to the next map */ |
| 2898 | i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; | 2952 | i += SMPT_MAP_REGION_COUNT(smpt[i]) + 1; |
| 2899 | } | 2953 | } |
| 2900 | 2954 | ||
| 2901 | ret = smpt + i; | ||
| 2902 | /* fall through */ | 2955 | /* fall through */ |
| 2903 | out: | 2956 | out: |
| 2957 | kfree(buf); | ||
| 2904 | nor->addr_width = addr_width; | 2958 | nor->addr_width = addr_width; |
| 2905 | nor->read_dummy = read_dummy; | 2959 | nor->read_dummy = read_dummy; |
| 2906 | nor->read_opcode = read_opcode; | 2960 | nor->read_opcode = read_opcode; |
| @@ -2946,7 +3000,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, | |||
| 2946 | u64 offset; | 3000 | u64 offset; |
| 2947 | u32 region_count; | 3001 | u32 region_count; |
| 2948 | int i, j; | 3002 | int i, j; |
| 2949 | u8 erase_type; | 3003 | u8 erase_type, uniform_erase_type; |
| 2950 | 3004 | ||
| 2951 | region_count = SMPT_MAP_REGION_COUNT(*smpt); | 3005 | region_count = SMPT_MAP_REGION_COUNT(*smpt); |
| 2952 | /* | 3006 | /* |
| @@ -2959,7 +3013,7 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, | |||
| 2959 | return -ENOMEM; | 3013 | return -ENOMEM; |
| 2960 | map->regions = region; | 3014 | map->regions = region; |
| 2961 | 3015 | ||
| 2962 | map->uniform_erase_type = 0xff; | 3016 | uniform_erase_type = 0xff; |
| 2963 | offset = 0; | 3017 | offset = 0; |
| 2964 | /* Populate regions. */ | 3018 | /* Populate regions. */ |
| 2965 | for (i = 0; i < region_count; i++) { | 3019 | for (i = 0; i < region_count; i++) { |
| @@ -2974,12 +3028,15 @@ static int spi_nor_init_non_uniform_erase_map(struct spi_nor *nor, | |||
| 2974 | * Save the erase types that are supported in all regions and | 3028 | * Save the erase types that are supported in all regions and |
| 2975 | * can erase the entire flash memory. | 3029 | * can erase the entire flash memory. |
| 2976 | */ | 3030 | */ |
| 2977 | map->uniform_erase_type &= erase_type; | 3031 | uniform_erase_type &= erase_type; |
| 2978 | 3032 | ||
| 2979 | offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + | 3033 | offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) + |
| 2980 | region[i].size; | 3034 | region[i].size; |
| 2981 | } | 3035 | } |
| 2982 | 3036 | ||
| 3037 | map->uniform_erase_type = spi_nor_sort_erase_mask(map, | ||
| 3038 | uniform_erase_type); | ||
| 3039 | |||
| 2983 | spi_nor_region_mark_end(®ion[i - 1]); | 3040 | spi_nor_region_mark_end(®ion[i - 1]); |
| 2984 | 3041 | ||
| 2985 | return 0; | 3042 | return 0; |
| @@ -3020,9 +3077,9 @@ static int spi_nor_parse_smpt(struct spi_nor *nor, | |||
| 3020 | for (i = 0; i < smpt_header->length; i++) | 3077 | for (i = 0; i < smpt_header->length; i++) |
| 3021 | smpt[i] = le32_to_cpu(smpt[i]); | 3078 | smpt[i] = le32_to_cpu(smpt[i]); |
| 3022 | 3079 | ||
| 3023 | sector_map = spi_nor_get_map_in_use(nor, smpt); | 3080 | sector_map = spi_nor_get_map_in_use(nor, smpt, smpt_header->length); |
| 3024 | if (!sector_map) { | 3081 | if (IS_ERR(sector_map)) { |
| 3025 | ret = -EINVAL; | 3082 | ret = PTR_ERR(sector_map); |
| 3026 | goto out; | 3083 | goto out; |
| 3027 | } | 3084 | } |
| 3028 | 3085 | ||
| @@ -3125,7 +3182,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor, | |||
| 3125 | if (err) | 3182 | if (err) |
| 3126 | goto exit; | 3183 | goto exit; |
| 3127 | 3184 | ||
| 3128 | /* Parse other parameter headers. */ | 3185 | /* Parse optional parameter tables. */ |
| 3129 | for (i = 0; i < header.nph; i++) { | 3186 | for (i = 0; i < header.nph; i++) { |
| 3130 | param_header = ¶m_headers[i]; | 3187 | param_header = ¶m_headers[i]; |
| 3131 | 3188 | ||
| @@ -3138,8 +3195,17 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor, | |||
| 3138 | break; | 3195 | break; |
| 3139 | } | 3196 | } |
| 3140 | 3197 | ||
| 3141 | if (err) | 3198 | if (err) { |
| 3142 | goto exit; | 3199 | dev_warn(dev, "Failed to parse optional parameter table: %04x\n", |
| 3200 | SFDP_PARAM_HEADER_ID(param_header)); | ||
| 3201 | /* | ||
| 3202 | * Let's not drop all information we extracted so far | ||
| 3203 | * if optional table parsers fail. In case of failing, | ||
| 3204 | * each optional parser is responsible to roll back to | ||
| 3205 | * the previously known spi_nor data. | ||
| 3206 | */ | ||
| 3207 | err = 0; | ||
| 3208 | } | ||
| 3143 | } | 3209 | } |
| 3144 | 3210 | ||
| 3145 | exit: | 3211 | exit: |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 18956e7604a3..a70bb1bb90e7 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
| @@ -1848,6 +1848,8 @@ static void ena_down(struct ena_adapter *adapter) | |||
| 1848 | rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); | 1848 | rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); |
| 1849 | if (rc) | 1849 | if (rc) |
| 1850 | dev_err(&adapter->pdev->dev, "Device reset failed\n"); | 1850 | dev_err(&adapter->pdev->dev, "Device reset failed\n"); |
| 1851 | /* stop submitting admin commands on a device that was reset */ | ||
| 1852 | ena_com_set_admin_running_state(adapter->ena_dev, false); | ||
| 1851 | } | 1853 | } |
| 1852 | 1854 | ||
| 1853 | ena_destroy_all_io_queues(adapter); | 1855 | ena_destroy_all_io_queues(adapter); |
| @@ -1914,6 +1916,9 @@ static int ena_close(struct net_device *netdev) | |||
| 1914 | 1916 | ||
| 1915 | netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); | 1917 | netif_dbg(adapter, ifdown, netdev, "%s\n", __func__); |
| 1916 | 1918 | ||
| 1919 | if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | ||
| 1920 | return 0; | ||
| 1921 | |||
| 1917 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | 1922 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
| 1918 | ena_down(adapter); | 1923 | ena_down(adapter); |
| 1919 | 1924 | ||
| @@ -2613,9 +2618,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) | |||
| 2613 | ena_down(adapter); | 2618 | ena_down(adapter); |
| 2614 | 2619 | ||
| 2615 | /* Stop the device from sending AENQ events (in case reset flag is set | 2620 | /* Stop the device from sending AENQ events (in case reset flag is set |
| 2616 | * and device is up, ena_close already reset the device | 2621 | * and device is up, ena_down() already reset the device. |
| 2617 | * In case the reset flag is set and the device is up, ena_down() | ||
| 2618 | * already perform the reset, so it can be skipped. | ||
| 2619 | */ | 2622 | */ |
| 2620 | if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) | 2623 | if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up)) |
| 2621 | ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); | 2624 | ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason); |
| @@ -2694,8 +2697,8 @@ err_device_destroy: | |||
| 2694 | ena_com_abort_admin_commands(ena_dev); | 2697 | ena_com_abort_admin_commands(ena_dev); |
| 2695 | ena_com_wait_for_abort_completion(ena_dev); | 2698 | ena_com_wait_for_abort_completion(ena_dev); |
| 2696 | ena_com_admin_destroy(ena_dev); | 2699 | ena_com_admin_destroy(ena_dev); |
| 2697 | ena_com_mmio_reg_read_request_destroy(ena_dev); | ||
| 2698 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); | 2700 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_DRIVER_INVALID_STATE); |
| 2701 | ena_com_mmio_reg_read_request_destroy(ena_dev); | ||
| 2699 | err: | 2702 | err: |
| 2700 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | 2703 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
| 2701 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | 2704 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); |
| @@ -3452,6 +3455,8 @@ err_rss: | |||
| 3452 | ena_com_rss_destroy(ena_dev); | 3455 | ena_com_rss_destroy(ena_dev); |
| 3453 | err_free_msix: | 3456 | err_free_msix: |
| 3454 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); | 3457 | ena_com_dev_reset(ena_dev, ENA_REGS_RESET_INIT_ERR); |
| 3458 | /* stop submitting admin commands on a device that was reset */ | ||
| 3459 | ena_com_set_admin_running_state(ena_dev, false); | ||
| 3455 | ena_free_mgmnt_irq(adapter); | 3460 | ena_free_mgmnt_irq(adapter); |
| 3456 | ena_disable_msix(adapter); | 3461 | ena_disable_msix(adapter); |
| 3457 | err_worker_destroy: | 3462 | err_worker_destroy: |
| @@ -3498,18 +3503,12 @@ static void ena_remove(struct pci_dev *pdev) | |||
| 3498 | 3503 | ||
| 3499 | cancel_work_sync(&adapter->reset_task); | 3504 | cancel_work_sync(&adapter->reset_task); |
| 3500 | 3505 | ||
| 3501 | unregister_netdev(netdev); | ||
| 3502 | |||
| 3503 | /* If the device is running then we want to make sure the device will be | ||
| 3504 | * reset to make sure no more events will be issued by the device. | ||
| 3505 | */ | ||
| 3506 | if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | ||
| 3507 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
| 3508 | |||
| 3509 | rtnl_lock(); | 3506 | rtnl_lock(); |
| 3510 | ena_destroy_device(adapter, true); | 3507 | ena_destroy_device(adapter, true); |
| 3511 | rtnl_unlock(); | 3508 | rtnl_unlock(); |
| 3512 | 3509 | ||
| 3510 | unregister_netdev(netdev); | ||
| 3511 | |||
| 3513 | free_netdev(netdev); | 3512 | free_netdev(netdev); |
| 3514 | 3513 | ||
| 3515 | ena_com_rss_destroy(ena_dev); | 3514 | ena_com_rss_destroy(ena_dev); |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 521873642339..dc8b6173d8d8 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
| @@ -45,7 +45,7 @@ | |||
| 45 | 45 | ||
| 46 | #define DRV_MODULE_VER_MAJOR 2 | 46 | #define DRV_MODULE_VER_MAJOR 2 |
| 47 | #define DRV_MODULE_VER_MINOR 0 | 47 | #define DRV_MODULE_VER_MINOR 0 |
| 48 | #define DRV_MODULE_VER_SUBMINOR 1 | 48 | #define DRV_MODULE_VER_SUBMINOR 2 |
| 49 | 49 | ||
| 50 | #define DRV_MODULE_NAME "ena" | 50 | #define DRV_MODULE_NAME "ena" |
| 51 | #ifndef DRV_MODULE_VERSION | 51 | #ifndef DRV_MODULE_VERSION |
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index b4fc0ed5bce8..9d4899826823 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c | |||
| @@ -1419,7 +1419,7 @@ static int sparc_lance_probe_one(struct platform_device *op, | |||
| 1419 | 1419 | ||
| 1420 | prop = of_get_property(nd, "tpe-link-test?", NULL); | 1420 | prop = of_get_property(nd, "tpe-link-test?", NULL); |
| 1421 | if (!prop) | 1421 | if (!prop) |
| 1422 | goto no_link_test; | 1422 | goto node_put; |
| 1423 | 1423 | ||
| 1424 | if (strcmp(prop, "true")) { | 1424 | if (strcmp(prop, "true")) { |
| 1425 | printk(KERN_NOTICE "SunLance: warning: overriding option " | 1425 | printk(KERN_NOTICE "SunLance: warning: overriding option " |
| @@ -1428,6 +1428,8 @@ static int sparc_lance_probe_one(struct platform_device *op, | |||
| 1428 | "to ecd@skynet.be\n"); | 1428 | "to ecd@skynet.be\n"); |
| 1429 | auxio_set_lte(AUXIO_LTE_ON); | 1429 | auxio_set_lte(AUXIO_LTE_ON); |
| 1430 | } | 1430 | } |
| 1431 | node_put: | ||
| 1432 | of_node_put(nd); | ||
| 1431 | no_link_test: | 1433 | no_link_test: |
| 1432 | lp->auto_select = 1; | 1434 | lp->auto_select = 1; |
| 1433 | lp->tpe = 0; | 1435 | lp->tpe = 0; |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index dc155c692c40..3b1397af81f7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
| @@ -12434,6 +12434,7 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
| 12434 | { | 12434 | { |
| 12435 | struct tg3 *tp = netdev_priv(dev); | 12435 | struct tg3 *tp = netdev_priv(dev); |
| 12436 | int i, irq_sync = 0, err = 0; | 12436 | int i, irq_sync = 0, err = 0; |
| 12437 | bool reset_phy = false; | ||
| 12437 | 12438 | ||
| 12438 | if ((ering->rx_pending > tp->rx_std_ring_mask) || | 12439 | if ((ering->rx_pending > tp->rx_std_ring_mask) || |
| 12439 | (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || | 12440 | (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) || |
| @@ -12465,7 +12466,13 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e | |||
| 12465 | 12466 | ||
| 12466 | if (netif_running(dev)) { | 12467 | if (netif_running(dev)) { |
| 12467 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 12468 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
| 12468 | err = tg3_restart_hw(tp, false); | 12469 | /* Reset PHY to avoid PHY lock up */ |
| 12470 | if (tg3_asic_rev(tp) == ASIC_REV_5717 || | ||
| 12471 | tg3_asic_rev(tp) == ASIC_REV_5719 || | ||
| 12472 | tg3_asic_rev(tp) == ASIC_REV_5720) | ||
| 12473 | reset_phy = true; | ||
| 12474 | |||
| 12475 | err = tg3_restart_hw(tp, reset_phy); | ||
| 12469 | if (!err) | 12476 | if (!err) |
| 12470 | tg3_netif_start(tp); | 12477 | tg3_netif_start(tp); |
| 12471 | } | 12478 | } |
| @@ -12499,6 +12506,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
| 12499 | { | 12506 | { |
| 12500 | struct tg3 *tp = netdev_priv(dev); | 12507 | struct tg3 *tp = netdev_priv(dev); |
| 12501 | int err = 0; | 12508 | int err = 0; |
| 12509 | bool reset_phy = false; | ||
| 12502 | 12510 | ||
| 12503 | if (tp->link_config.autoneg == AUTONEG_ENABLE) | 12511 | if (tp->link_config.autoneg == AUTONEG_ENABLE) |
| 12504 | tg3_warn_mgmt_link_flap(tp); | 12512 | tg3_warn_mgmt_link_flap(tp); |
| @@ -12568,7 +12576,13 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam | |||
| 12568 | 12576 | ||
| 12569 | if (netif_running(dev)) { | 12577 | if (netif_running(dev)) { |
| 12570 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); | 12578 | tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); |
| 12571 | err = tg3_restart_hw(tp, false); | 12579 | /* Reset PHY to avoid PHY lock up */ |
| 12580 | if (tg3_asic_rev(tp) == ASIC_REV_5717 || | ||
| 12581 | tg3_asic_rev(tp) == ASIC_REV_5719 || | ||
| 12582 | tg3_asic_rev(tp) == ASIC_REV_5720) | ||
| 12583 | reset_phy = true; | ||
| 12584 | |||
| 12585 | err = tg3_restart_hw(tp, reset_phy); | ||
| 12572 | if (!err) | 12586 | if (!err) |
| 12573 | tg3_netif_start(tp); | 12587 | tg3_netif_start(tp); |
| 12574 | } | 12588 | } |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 768f584f8392..88f8a8fa93cd 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
| @@ -1784,6 +1784,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) | |||
| 1784 | bool if_up = netif_running(nic->netdev); | 1784 | bool if_up = netif_running(nic->netdev); |
| 1785 | struct bpf_prog *old_prog; | 1785 | struct bpf_prog *old_prog; |
| 1786 | bool bpf_attached = false; | 1786 | bool bpf_attached = false; |
| 1787 | int ret = 0; | ||
| 1787 | 1788 | ||
| 1788 | /* For now just support only the usual MTU sized frames */ | 1789 | /* For now just support only the usual MTU sized frames */ |
| 1789 | if (prog && (dev->mtu > 1500)) { | 1790 | if (prog && (dev->mtu > 1500)) { |
| @@ -1817,8 +1818,12 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) | |||
| 1817 | if (nic->xdp_prog) { | 1818 | if (nic->xdp_prog) { |
| 1818 | /* Attach BPF program */ | 1819 | /* Attach BPF program */ |
| 1819 | nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); | 1820 | nic->xdp_prog = bpf_prog_add(nic->xdp_prog, nic->rx_queues - 1); |
| 1820 | if (!IS_ERR(nic->xdp_prog)) | 1821 | if (!IS_ERR(nic->xdp_prog)) { |
| 1821 | bpf_attached = true; | 1822 | bpf_attached = true; |
| 1823 | } else { | ||
| 1824 | ret = PTR_ERR(nic->xdp_prog); | ||
| 1825 | nic->xdp_prog = NULL; | ||
| 1826 | } | ||
| 1822 | } | 1827 | } |
| 1823 | 1828 | ||
| 1824 | /* Calculate Tx queues needed for XDP and network stack */ | 1829 | /* Calculate Tx queues needed for XDP and network stack */ |
| @@ -1830,7 +1835,7 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog) | |||
| 1830 | netif_trans_update(nic->netdev); | 1835 | netif_trans_update(nic->netdev); |
| 1831 | } | 1836 | } |
| 1832 | 1837 | ||
| 1833 | return 0; | 1838 | return ret; |
| 1834 | } | 1839 | } |
| 1835 | 1840 | ||
| 1836 | static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) | 1841 | static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp) |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index 187a249ff2d1..fcaf18fa3904 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
| @@ -585,10 +585,12 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) | |||
| 585 | if (!sq->dmem.base) | 585 | if (!sq->dmem.base) |
| 586 | return; | 586 | return; |
| 587 | 587 | ||
| 588 | if (sq->tso_hdrs) | 588 | if (sq->tso_hdrs) { |
| 589 | dma_free_coherent(&nic->pdev->dev, | 589 | dma_free_coherent(&nic->pdev->dev, |
| 590 | sq->dmem.q_len * TSO_HEADER_SIZE, | 590 | sq->dmem.q_len * TSO_HEADER_SIZE, |
| 591 | sq->tso_hdrs, sq->tso_hdrs_phys); | 591 | sq->tso_hdrs, sq->tso_hdrs_phys); |
| 592 | sq->tso_hdrs = NULL; | ||
| 593 | } | ||
| 592 | 594 | ||
| 593 | /* Free pending skbs in the queue */ | 595 | /* Free pending skbs in the queue */ |
| 594 | smp_rmb(); | 596 | smp_rmb(); |
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index ceec467f590d..949103db8a8a 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c | |||
| @@ -660,7 +660,7 @@ static void gmac_clean_txq(struct net_device *netdev, struct gmac_txq *txq, | |||
| 660 | 660 | ||
| 661 | u64_stats_update_begin(&port->tx_stats_syncp); | 661 | u64_stats_update_begin(&port->tx_stats_syncp); |
| 662 | port->tx_frag_stats[nfrags]++; | 662 | port->tx_frag_stats[nfrags]++; |
| 663 | u64_stats_update_end(&port->ir_stats_syncp); | 663 | u64_stats_update_end(&port->tx_stats_syncp); |
| 664 | } | 664 | } |
| 665 | } | 665 | } |
| 666 | 666 | ||
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 570caeb8ee9e..084f24daf2b5 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c | |||
| @@ -872,11 +872,10 @@ static irqreturn_t ftmac100_interrupt(int irq, void *dev_id) | |||
| 872 | struct net_device *netdev = dev_id; | 872 | struct net_device *netdev = dev_id; |
| 873 | struct ftmac100 *priv = netdev_priv(netdev); | 873 | struct ftmac100 *priv = netdev_priv(netdev); |
| 874 | 874 | ||
| 875 | if (likely(netif_running(netdev))) { | 875 | /* Disable interrupts for polling */ |
| 876 | /* Disable interrupts for polling */ | 876 | ftmac100_disable_all_int(priv); |
| 877 | ftmac100_disable_all_int(priv); | 877 | if (likely(netif_running(netdev))) |
| 878 | napi_schedule(&priv->napi); | 878 | napi_schedule(&priv->napi); |
| 879 | } | ||
| 880 | 879 | ||
| 881 | return IRQ_HANDLED; | 880 | return IRQ_HANDLED; |
| 882 | } | 881 | } |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index c9d5d0a7fbf1..c0203a0d5e3b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
| @@ -485,8 +485,8 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter) | |||
| 485 | 485 | ||
| 486 | for (j = 0; j < rx_pool->size; j++) { | 486 | for (j = 0; j < rx_pool->size; j++) { |
| 487 | if (rx_pool->rx_buff[j].skb) { | 487 | if (rx_pool->rx_buff[j].skb) { |
| 488 | dev_kfree_skb_any(rx_pool->rx_buff[i].skb); | 488 | dev_kfree_skb_any(rx_pool->rx_buff[j].skb); |
| 489 | rx_pool->rx_buff[i].skb = NULL; | 489 | rx_pool->rx_buff[j].skb = NULL; |
| 490 | } | 490 | } |
| 491 | } | 491 | } |
| 492 | 492 | ||
| @@ -1103,20 +1103,15 @@ static int ibmvnic_open(struct net_device *netdev) | |||
| 1103 | return 0; | 1103 | return 0; |
| 1104 | } | 1104 | } |
| 1105 | 1105 | ||
| 1106 | mutex_lock(&adapter->reset_lock); | ||
| 1107 | |||
| 1108 | if (adapter->state != VNIC_CLOSED) { | 1106 | if (adapter->state != VNIC_CLOSED) { |
| 1109 | rc = ibmvnic_login(netdev); | 1107 | rc = ibmvnic_login(netdev); |
| 1110 | if (rc) { | 1108 | if (rc) |
| 1111 | mutex_unlock(&adapter->reset_lock); | ||
| 1112 | return rc; | 1109 | return rc; |
| 1113 | } | ||
| 1114 | 1110 | ||
| 1115 | rc = init_resources(adapter); | 1111 | rc = init_resources(adapter); |
| 1116 | if (rc) { | 1112 | if (rc) { |
| 1117 | netdev_err(netdev, "failed to initialize resources\n"); | 1113 | netdev_err(netdev, "failed to initialize resources\n"); |
| 1118 | release_resources(adapter); | 1114 | release_resources(adapter); |
| 1119 | mutex_unlock(&adapter->reset_lock); | ||
| 1120 | return rc; | 1115 | return rc; |
| 1121 | } | 1116 | } |
| 1122 | } | 1117 | } |
| @@ -1124,8 +1119,6 @@ static int ibmvnic_open(struct net_device *netdev) | |||
| 1124 | rc = __ibmvnic_open(netdev); | 1119 | rc = __ibmvnic_open(netdev); |
| 1125 | netif_carrier_on(netdev); | 1120 | netif_carrier_on(netdev); |
| 1126 | 1121 | ||
| 1127 | mutex_unlock(&adapter->reset_lock); | ||
| 1128 | |||
| 1129 | return rc; | 1122 | return rc; |
| 1130 | } | 1123 | } |
| 1131 | 1124 | ||
| @@ -1269,10 +1262,8 @@ static int ibmvnic_close(struct net_device *netdev) | |||
| 1269 | return 0; | 1262 | return 0; |
| 1270 | } | 1263 | } |
| 1271 | 1264 | ||
| 1272 | mutex_lock(&adapter->reset_lock); | ||
| 1273 | rc = __ibmvnic_close(netdev); | 1265 | rc = __ibmvnic_close(netdev); |
| 1274 | ibmvnic_cleanup(netdev); | 1266 | ibmvnic_cleanup(netdev); |
| 1275 | mutex_unlock(&adapter->reset_lock); | ||
| 1276 | 1267 | ||
| 1277 | return rc; | 1268 | return rc; |
| 1278 | } | 1269 | } |
| @@ -1746,6 +1737,7 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1746 | struct ibmvnic_rwi *rwi, u32 reset_state) | 1737 | struct ibmvnic_rwi *rwi, u32 reset_state) |
| 1747 | { | 1738 | { |
| 1748 | u64 old_num_rx_queues, old_num_tx_queues; | 1739 | u64 old_num_rx_queues, old_num_tx_queues; |
| 1740 | u64 old_num_rx_slots, old_num_tx_slots; | ||
| 1749 | struct net_device *netdev = adapter->netdev; | 1741 | struct net_device *netdev = adapter->netdev; |
| 1750 | int i, rc; | 1742 | int i, rc; |
| 1751 | 1743 | ||
| @@ -1757,6 +1749,8 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1757 | 1749 | ||
| 1758 | old_num_rx_queues = adapter->req_rx_queues; | 1750 | old_num_rx_queues = adapter->req_rx_queues; |
| 1759 | old_num_tx_queues = adapter->req_tx_queues; | 1751 | old_num_tx_queues = adapter->req_tx_queues; |
| 1752 | old_num_rx_slots = adapter->req_rx_add_entries_per_subcrq; | ||
| 1753 | old_num_tx_slots = adapter->req_tx_entries_per_subcrq; | ||
| 1760 | 1754 | ||
| 1761 | ibmvnic_cleanup(netdev); | 1755 | ibmvnic_cleanup(netdev); |
| 1762 | 1756 | ||
| @@ -1819,21 +1813,20 @@ static int do_reset(struct ibmvnic_adapter *adapter, | |||
| 1819 | if (rc) | 1813 | if (rc) |
| 1820 | return rc; | 1814 | return rc; |
| 1821 | } else if (adapter->req_rx_queues != old_num_rx_queues || | 1815 | } else if (adapter->req_rx_queues != old_num_rx_queues || |
| 1822 | adapter->req_tx_queues != old_num_tx_queues) { | 1816 | adapter->req_tx_queues != old_num_tx_queues || |
| 1823 | adapter->map_id = 1; | 1817 | adapter->req_rx_add_entries_per_subcrq != |
| 1818 | old_num_rx_slots || | ||
| 1819 | adapter->req_tx_entries_per_subcrq != | ||
| 1820 | old_num_tx_slots) { | ||
| 1824 | release_rx_pools(adapter); | 1821 | release_rx_pools(adapter); |
| 1825 | release_tx_pools(adapter); | 1822 | release_tx_pools(adapter); |
| 1826 | rc = init_rx_pools(netdev); | ||
| 1827 | if (rc) | ||
| 1828 | return rc; | ||
| 1829 | rc = init_tx_pools(netdev); | ||
| 1830 | if (rc) | ||
| 1831 | return rc; | ||
| 1832 | |||
| 1833 | release_napi(adapter); | 1823 | release_napi(adapter); |
| 1834 | rc = init_napi(adapter); | 1824 | release_vpd_data(adapter); |
| 1825 | |||
| 1826 | rc = init_resources(adapter); | ||
| 1835 | if (rc) | 1827 | if (rc) |
| 1836 | return rc; | 1828 | return rc; |
| 1829 | |||
| 1837 | } else { | 1830 | } else { |
| 1838 | rc = reset_tx_pools(adapter); | 1831 | rc = reset_tx_pools(adapter); |
| 1839 | if (rc) | 1832 | if (rc) |
| @@ -1917,17 +1910,8 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter, | |||
| 1917 | adapter->state = VNIC_PROBED; | 1910 | adapter->state = VNIC_PROBED; |
| 1918 | return 0; | 1911 | return 0; |
| 1919 | } | 1912 | } |
| 1920 | /* netif_set_real_num_xx_queues needs to take rtnl lock here | 1913 | |
| 1921 | * unless wait_for_reset is set, in which case the rtnl lock | 1914 | rc = init_resources(adapter); |
| 1922 | * has already been taken before initializing the reset | ||
| 1923 | */ | ||
| 1924 | if (!adapter->wait_for_reset) { | ||
| 1925 | rtnl_lock(); | ||
| 1926 | rc = init_resources(adapter); | ||
| 1927 | rtnl_unlock(); | ||
| 1928 | } else { | ||
| 1929 | rc = init_resources(adapter); | ||
| 1930 | } | ||
| 1931 | if (rc) | 1915 | if (rc) |
| 1932 | return rc; | 1916 | return rc; |
| 1933 | 1917 | ||
| @@ -1986,13 +1970,21 @@ static void __ibmvnic_reset(struct work_struct *work) | |||
| 1986 | struct ibmvnic_rwi *rwi; | 1970 | struct ibmvnic_rwi *rwi; |
| 1987 | struct ibmvnic_adapter *adapter; | 1971 | struct ibmvnic_adapter *adapter; |
| 1988 | struct net_device *netdev; | 1972 | struct net_device *netdev; |
| 1973 | bool we_lock_rtnl = false; | ||
| 1989 | u32 reset_state; | 1974 | u32 reset_state; |
| 1990 | int rc = 0; | 1975 | int rc = 0; |
| 1991 | 1976 | ||
| 1992 | adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); | 1977 | adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset); |
| 1993 | netdev = adapter->netdev; | 1978 | netdev = adapter->netdev; |
| 1994 | 1979 | ||
| 1995 | mutex_lock(&adapter->reset_lock); | 1980 | /* netif_set_real_num_xx_queues needs to take rtnl lock here |
| 1981 | * unless wait_for_reset is set, in which case the rtnl lock | ||
| 1982 | * has already been taken before initializing the reset | ||
| 1983 | */ | ||
| 1984 | if (!adapter->wait_for_reset) { | ||
| 1985 | rtnl_lock(); | ||
| 1986 | we_lock_rtnl = true; | ||
| 1987 | } | ||
| 1996 | reset_state = adapter->state; | 1988 | reset_state = adapter->state; |
| 1997 | 1989 | ||
| 1998 | rwi = get_next_rwi(adapter); | 1990 | rwi = get_next_rwi(adapter); |
| @@ -2020,12 +2012,11 @@ static void __ibmvnic_reset(struct work_struct *work) | |||
| 2020 | if (rc) { | 2012 | if (rc) { |
| 2021 | netdev_dbg(adapter->netdev, "Reset failed\n"); | 2013 | netdev_dbg(adapter->netdev, "Reset failed\n"); |
| 2022 | free_all_rwi(adapter); | 2014 | free_all_rwi(adapter); |
| 2023 | mutex_unlock(&adapter->reset_lock); | ||
| 2024 | return; | ||
| 2025 | } | 2015 | } |
| 2026 | 2016 | ||
| 2027 | adapter->resetting = false; | 2017 | adapter->resetting = false; |
| 2028 | mutex_unlock(&adapter->reset_lock); | 2018 | if (we_lock_rtnl) |
| 2019 | rtnl_unlock(); | ||
| 2029 | } | 2020 | } |
| 2030 | 2021 | ||
| 2031 | static int ibmvnic_reset(struct ibmvnic_adapter *adapter, | 2022 | static int ibmvnic_reset(struct ibmvnic_adapter *adapter, |
| @@ -4768,7 +4759,6 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
| 4768 | 4759 | ||
| 4769 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); | 4760 | INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); |
| 4770 | INIT_LIST_HEAD(&adapter->rwi_list); | 4761 | INIT_LIST_HEAD(&adapter->rwi_list); |
| 4771 | mutex_init(&adapter->reset_lock); | ||
| 4772 | mutex_init(&adapter->rwi_lock); | 4762 | mutex_init(&adapter->rwi_lock); |
| 4773 | adapter->resetting = false; | 4763 | adapter->resetting = false; |
| 4774 | 4764 | ||
| @@ -4840,8 +4830,8 @@ static int ibmvnic_remove(struct vio_dev *dev) | |||
| 4840 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); | 4830 | struct ibmvnic_adapter *adapter = netdev_priv(netdev); |
| 4841 | 4831 | ||
| 4842 | adapter->state = VNIC_REMOVING; | 4832 | adapter->state = VNIC_REMOVING; |
| 4843 | unregister_netdev(netdev); | 4833 | rtnl_lock(); |
| 4844 | mutex_lock(&adapter->reset_lock); | 4834 | unregister_netdevice(netdev); |
| 4845 | 4835 | ||
| 4846 | release_resources(adapter); | 4836 | release_resources(adapter); |
| 4847 | release_sub_crqs(adapter, 1); | 4837 | release_sub_crqs(adapter, 1); |
| @@ -4852,7 +4842,7 @@ static int ibmvnic_remove(struct vio_dev *dev) | |||
| 4852 | 4842 | ||
| 4853 | adapter->state = VNIC_REMOVED; | 4843 | adapter->state = VNIC_REMOVED; |
| 4854 | 4844 | ||
| 4855 | mutex_unlock(&adapter->reset_lock); | 4845 | rtnl_unlock(); |
| 4856 | device_remove_file(&dev->dev, &dev_attr_failover); | 4846 | device_remove_file(&dev->dev, &dev_attr_failover); |
| 4857 | free_netdev(netdev); | 4847 | free_netdev(netdev); |
| 4858 | dev_set_drvdata(&dev->dev, NULL); | 4848 | dev_set_drvdata(&dev->dev, NULL); |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index 18103b811d4d..99c4f8d331ce 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h | |||
| @@ -1075,7 +1075,7 @@ struct ibmvnic_adapter { | |||
| 1075 | struct tasklet_struct tasklet; | 1075 | struct tasklet_struct tasklet; |
| 1076 | enum vnic_state state; | 1076 | enum vnic_state state; |
| 1077 | enum ibmvnic_reset_reason reset_reason; | 1077 | enum ibmvnic_reset_reason reset_reason; |
| 1078 | struct mutex reset_lock, rwi_lock; | 1078 | struct mutex rwi_lock; |
| 1079 | struct list_head rwi_list; | 1079 | struct list_head rwi_list; |
| 1080 | struct work_struct ibmvnic_reset; | 1080 | struct work_struct ibmvnic_reset; |
| 1081 | bool resetting; | 1081 | bool resetting; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index d7fbd5b6ac95..118324802926 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
| @@ -569,6 +569,7 @@ struct mlx5e_rq { | |||
| 569 | 569 | ||
| 570 | unsigned long state; | 570 | unsigned long state; |
| 571 | int ix; | 571 | int ix; |
| 572 | unsigned int hw_mtu; | ||
| 572 | 573 | ||
| 573 | struct net_dim dim; /* Dynamic Interrupt Moderation */ | 574 | struct net_dim dim; /* Dynamic Interrupt Moderation */ |
| 574 | 575 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 023dc4bccd28..4a37713023be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c | |||
| @@ -88,10 +88,8 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed) | |||
| 88 | 88 | ||
| 89 | eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); | 89 | eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper); |
| 90 | *speed = mlx5e_port_ptys2speed(eth_proto_oper); | 90 | *speed = mlx5e_port_ptys2speed(eth_proto_oper); |
| 91 | if (!(*speed)) { | 91 | if (!(*speed)) |
| 92 | mlx5_core_warn(mdev, "cannot get port speed\n"); | ||
| 93 | err = -EINVAL; | 92 | err = -EINVAL; |
| 94 | } | ||
| 95 | 93 | ||
| 96 | return err; | 94 | return err; |
| 97 | } | 95 | } |
| @@ -258,7 +256,7 @@ static int mlx5e_fec_admin_field(u32 *pplm, | |||
| 258 | case 40000: | 256 | case 40000: |
| 259 | if (!write) | 257 | if (!write) |
| 260 | *fec_policy = MLX5_GET(pplm_reg, pplm, | 258 | *fec_policy = MLX5_GET(pplm_reg, pplm, |
| 261 | fec_override_cap_10g_40g); | 259 | fec_override_admin_10g_40g); |
| 262 | else | 260 | else |
| 263 | MLX5_SET(pplm_reg, pplm, | 261 | MLX5_SET(pplm_reg, pplm, |
| 264 | fec_override_admin_10g_40g, *fec_policy); | 262 | fec_override_admin_10g_40g, *fec_policy); |
| @@ -310,7 +308,7 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, | |||
| 310 | case 10000: | 308 | case 10000: |
| 311 | case 40000: | 309 | case 40000: |
| 312 | *fec_cap = MLX5_GET(pplm_reg, pplm, | 310 | *fec_cap = MLX5_GET(pplm_reg, pplm, |
| 313 | fec_override_admin_10g_40g); | 311 | fec_override_cap_10g_40g); |
| 314 | break; | 312 | break; |
| 315 | case 25000: | 313 | case 25000: |
| 316 | *fec_cap = MLX5_GET(pplm_reg, pplm, | 314 | *fec_cap = MLX5_GET(pplm_reg, pplm, |
| @@ -394,12 +392,12 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active, | |||
| 394 | 392 | ||
| 395 | int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) | 393 | int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) |
| 396 | { | 394 | { |
| 395 | u8 fec_policy_nofec = BIT(MLX5E_FEC_NOFEC); | ||
| 397 | bool fec_mode_not_supp_in_speed = false; | 396 | bool fec_mode_not_supp_in_speed = false; |
| 398 | u8 no_fec_policy = BIT(MLX5E_FEC_NOFEC); | ||
| 399 | u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; | 397 | u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {}; |
| 400 | u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; | 398 | u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {}; |
| 401 | int sz = MLX5_ST_SZ_BYTES(pplm_reg); | 399 | int sz = MLX5_ST_SZ_BYTES(pplm_reg); |
| 402 | u32 current_fec_speed; | 400 | u8 fec_policy_auto = 0; |
| 403 | u8 fec_caps = 0; | 401 | u8 fec_caps = 0; |
| 404 | int err; | 402 | int err; |
| 405 | int i; | 403 | int i; |
| @@ -415,23 +413,19 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) | |||
| 415 | if (err) | 413 | if (err) |
| 416 | return err; | 414 | return err; |
| 417 | 415 | ||
| 418 | err = mlx5e_port_linkspeed(dev, ¤t_fec_speed); | 416 | MLX5_SET(pplm_reg, out, local_port, 1); |
| 419 | if (err) | ||
| 420 | return err; | ||
| 421 | 417 | ||
| 422 | memset(in, 0, sz); | 418 | for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS; i++) { |
| 423 | MLX5_SET(pplm_reg, in, local_port, 1); | ||
| 424 | for (i = 0; i < MLX5E_FEC_SUPPORTED_SPEEDS && !!fec_policy; i++) { | ||
| 425 | mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]); | 419 | mlx5e_get_fec_cap_field(out, &fec_caps, fec_supported_speeds[i]); |
| 426 | /* policy supported for link speed */ | 420 | /* policy supported for link speed, or policy is auto */ |
| 427 | if (!!(fec_caps & fec_policy)) { | 421 | if (fec_caps & fec_policy || fec_policy == fec_policy_auto) { |
| 428 | mlx5e_fec_admin_field(in, &fec_policy, 1, | 422 | mlx5e_fec_admin_field(out, &fec_policy, 1, |
| 429 | fec_supported_speeds[i]); | 423 | fec_supported_speeds[i]); |
| 430 | } else { | 424 | } else { |
| 431 | if (fec_supported_speeds[i] == current_fec_speed) | 425 | /* turn off FEC if supported. Else, leave it the same */ |
| 432 | return -EOPNOTSUPP; | 426 | if (fec_caps & fec_policy_nofec) |
| 433 | mlx5e_fec_admin_field(in, &no_fec_policy, 1, | 427 | mlx5e_fec_admin_field(out, &fec_policy_nofec, 1, |
| 434 | fec_supported_speeds[i]); | 428 | fec_supported_speeds[i]); |
| 435 | fec_mode_not_supp_in_speed = true; | 429 | fec_mode_not_supp_in_speed = true; |
| 436 | } | 430 | } |
| 437 | } | 431 | } |
| @@ -441,5 +435,5 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u8 fec_policy) | |||
| 441 | "FEC policy 0x%x is not supported for some speeds", | 435 | "FEC policy 0x%x is not supported for some speeds", |
| 442 | fec_policy); | 436 | fec_policy); |
| 443 | 437 | ||
| 444 | return mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 1); | 438 | return mlx5_core_access_reg(dev, out, sz, out, sz, MLX5_REG_PPLM, 0, 1); |
| 445 | } | 439 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c index c047da8752da..eac245a93f91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c | |||
| @@ -130,8 +130,10 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) | |||
| 130 | int err; | 130 | int err; |
| 131 | 131 | ||
| 132 | err = mlx5e_port_linkspeed(priv->mdev, &speed); | 132 | err = mlx5e_port_linkspeed(priv->mdev, &speed); |
| 133 | if (err) | 133 | if (err) { |
| 134 | mlx5_core_warn(priv->mdev, "cannot get port speed\n"); | ||
| 134 | return 0; | 135 | return 0; |
| 136 | } | ||
| 135 | 137 | ||
| 136 | xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; | 138 | xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; |
| 137 | 139 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3e770abfd802..25c1c4f96841 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
| @@ -843,8 +843,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, | |||
| 843 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, | 843 | ethtool_link_ksettings_add_link_mode(link_ksettings, supported, |
| 844 | Autoneg); | 844 | Autoneg); |
| 845 | 845 | ||
| 846 | err = get_fec_supported_advertised(mdev, link_ksettings); | 846 | if (get_fec_supported_advertised(mdev, link_ksettings)) |
| 847 | if (err) | ||
| 848 | netdev_dbg(netdev, "%s: FEC caps query failed: %d\n", | 847 | netdev_dbg(netdev, "%s: FEC caps query failed: %d\n", |
| 849 | __func__, err); | 848 | __func__, err); |
| 850 | 849 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 1243edbedc9e..871313d6b34d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
| @@ -502,6 +502,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, | |||
| 502 | rq->channel = c; | 502 | rq->channel = c; |
| 503 | rq->ix = c->ix; | 503 | rq->ix = c->ix; |
| 504 | rq->mdev = mdev; | 504 | rq->mdev = mdev; |
| 505 | rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); | ||
| 505 | rq->stats = &c->priv->channel_stats[c->ix].rq; | 506 | rq->stats = &c->priv->channel_stats[c->ix].rq; |
| 506 | 507 | ||
| 507 | rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; | 508 | rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; |
| @@ -1623,13 +1624,15 @@ static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev, | |||
| 1623 | int err; | 1624 | int err; |
| 1624 | u32 i; | 1625 | u32 i; |
| 1625 | 1626 | ||
| 1627 | err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); | ||
| 1628 | if (err) | ||
| 1629 | return err; | ||
| 1630 | |||
| 1626 | err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, | 1631 | err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, |
| 1627 | &cq->wq_ctrl); | 1632 | &cq->wq_ctrl); |
| 1628 | if (err) | 1633 | if (err) |
| 1629 | return err; | 1634 | return err; |
| 1630 | 1635 | ||
| 1631 | mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn); | ||
| 1632 | |||
| 1633 | mcq->cqe_sz = 64; | 1636 | mcq->cqe_sz = 64; |
| 1634 | mcq->set_ci_db = cq->wq_ctrl.db.db; | 1637 | mcq->set_ci_db = cq->wq_ctrl.db.db; |
| 1635 | mcq->arm_db = cq->wq_ctrl.db.db + 1; | 1638 | mcq->arm_db = cq->wq_ctrl.db.db + 1; |
| @@ -1687,6 +1690,10 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) | |||
| 1687 | int eqn; | 1690 | int eqn; |
| 1688 | int err; | 1691 | int err; |
| 1689 | 1692 | ||
| 1693 | err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); | ||
| 1694 | if (err) | ||
| 1695 | return err; | ||
| 1696 | |||
| 1690 | inlen = MLX5_ST_SZ_BYTES(create_cq_in) + | 1697 | inlen = MLX5_ST_SZ_BYTES(create_cq_in) + |
| 1691 | sizeof(u64) * cq->wq_ctrl.buf.npages; | 1698 | sizeof(u64) * cq->wq_ctrl.buf.npages; |
| 1692 | in = kvzalloc(inlen, GFP_KERNEL); | 1699 | in = kvzalloc(inlen, GFP_KERNEL); |
| @@ -1700,8 +1707,6 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) | |||
| 1700 | mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, | 1707 | mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, |
| 1701 | (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); | 1708 | (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); |
| 1702 | 1709 | ||
| 1703 | mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used); | ||
| 1704 | |||
| 1705 | MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); | 1710 | MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); |
| 1706 | MLX5_SET(cqc, cqc, c_eqn, eqn); | 1711 | MLX5_SET(cqc, cqc, c_eqn, eqn); |
| 1707 | MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); | 1712 | MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); |
| @@ -1921,6 +1926,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
| 1921 | int err; | 1926 | int err; |
| 1922 | int eqn; | 1927 | int eqn; |
| 1923 | 1928 | ||
| 1929 | err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); | ||
| 1930 | if (err) | ||
| 1931 | return err; | ||
| 1932 | |||
| 1924 | c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); | 1933 | c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu)); |
| 1925 | if (!c) | 1934 | if (!c) |
| 1926 | return -ENOMEM; | 1935 | return -ENOMEM; |
| @@ -1937,7 +1946,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
| 1937 | c->xdp = !!params->xdp_prog; | 1946 | c->xdp = !!params->xdp_prog; |
| 1938 | c->stats = &priv->channel_stats[ix].ch; | 1947 | c->stats = &priv->channel_stats[ix].ch; |
| 1939 | 1948 | ||
| 1940 | mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); | ||
| 1941 | c->irq_desc = irq_to_desc(irq); | 1949 | c->irq_desc = irq_to_desc(irq); |
| 1942 | 1950 | ||
| 1943 | netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); | 1951 | netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); |
| @@ -3574,6 +3582,7 @@ static int set_feature_cvlan_filter(struct net_device *netdev, bool enable) | |||
| 3574 | return 0; | 3582 | return 0; |
| 3575 | } | 3583 | } |
| 3576 | 3584 | ||
| 3585 | #ifdef CONFIG_MLX5_ESWITCH | ||
| 3577 | static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) | 3586 | static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) |
| 3578 | { | 3587 | { |
| 3579 | struct mlx5e_priv *priv = netdev_priv(netdev); | 3588 | struct mlx5e_priv *priv = netdev_priv(netdev); |
| @@ -3586,6 +3595,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) | |||
| 3586 | 3595 | ||
| 3587 | return 0; | 3596 | return 0; |
| 3588 | } | 3597 | } |
| 3598 | #endif | ||
| 3589 | 3599 | ||
| 3590 | static int set_feature_rx_all(struct net_device *netdev, bool enable) | 3600 | static int set_feature_rx_all(struct net_device *netdev, bool enable) |
| 3591 | { | 3601 | { |
| @@ -3684,7 +3694,9 @@ static int mlx5e_set_features(struct net_device *netdev, | |||
| 3684 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); | 3694 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); |
| 3685 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, | 3695 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, |
| 3686 | set_feature_cvlan_filter); | 3696 | set_feature_cvlan_filter); |
| 3697 | #ifdef CONFIG_MLX5_ESWITCH | ||
| 3687 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); | 3698 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters); |
| 3699 | #endif | ||
| 3688 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); | 3700 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); |
| 3689 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); | 3701 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); |
| 3690 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); | 3702 | err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); |
| @@ -3755,10 +3767,11 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, | |||
| 3755 | } | 3767 | } |
| 3756 | 3768 | ||
| 3757 | if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { | 3769 | if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { |
| 3770 | bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params); | ||
| 3758 | u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); | 3771 | u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); |
| 3759 | u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); | 3772 | u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); |
| 3760 | 3773 | ||
| 3761 | reset = reset && (ppw_old != ppw_new); | 3774 | reset = reset && (is_linear || (ppw_old != ppw_new)); |
| 3762 | } | 3775 | } |
| 3763 | 3776 | ||
| 3764 | if (!reset) { | 3777 | if (!reset) { |
| @@ -4678,7 +4691,9 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) | |||
| 4678 | FT_CAP(modify_root) && | 4691 | FT_CAP(modify_root) && |
| 4679 | FT_CAP(identified_miss_table_mode) && | 4692 | FT_CAP(identified_miss_table_mode) && |
| 4680 | FT_CAP(flow_table_modify)) { | 4693 | FT_CAP(flow_table_modify)) { |
| 4694 | #ifdef CONFIG_MLX5_ESWITCH | ||
| 4681 | netdev->hw_features |= NETIF_F_HW_TC; | 4695 | netdev->hw_features |= NETIF_F_HW_TC; |
| 4696 | #endif | ||
| 4682 | #ifdef CONFIG_MLX5_EN_ARFS | 4697 | #ifdef CONFIG_MLX5_EN_ARFS |
| 4683 | netdev->hw_features |= NETIF_F_NTUPLE; | 4698 | netdev->hw_features |= NETIF_F_NTUPLE; |
| 4684 | #endif | 4699 | #endif |
| @@ -5004,11 +5019,21 @@ err_free_netdev: | |||
| 5004 | int mlx5e_attach_netdev(struct mlx5e_priv *priv) | 5019 | int mlx5e_attach_netdev(struct mlx5e_priv *priv) |
| 5005 | { | 5020 | { |
| 5006 | const struct mlx5e_profile *profile; | 5021 | const struct mlx5e_profile *profile; |
| 5022 | int max_nch; | ||
| 5007 | int err; | 5023 | int err; |
| 5008 | 5024 | ||
| 5009 | profile = priv->profile; | 5025 | profile = priv->profile; |
| 5010 | clear_bit(MLX5E_STATE_DESTROYING, &priv->state); | 5026 | clear_bit(MLX5E_STATE_DESTROYING, &priv->state); |
| 5011 | 5027 | ||
| 5028 | /* max number of channels may have changed */ | ||
| 5029 | max_nch = mlx5e_get_max_num_channels(priv->mdev); | ||
| 5030 | if (priv->channels.params.num_channels > max_nch) { | ||
| 5031 | mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); | ||
| 5032 | priv->channels.params.num_channels = max_nch; | ||
| 5033 | mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt, | ||
| 5034 | MLX5E_INDIR_RQT_SIZE, max_nch); | ||
| 5035 | } | ||
| 5036 | |||
| 5012 | err = profile->init_tx(priv); | 5037 | err = profile->init_tx(priv); |
| 5013 | if (err) | 5038 | if (err) |
| 5014 | goto out; | 5039 | goto out; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 79638dcbae78..16985ca3248d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
| @@ -1104,6 +1104,12 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, | |||
| 1104 | u32 frag_size; | 1104 | u32 frag_size; |
| 1105 | bool consumed; | 1105 | bool consumed; |
| 1106 | 1106 | ||
| 1107 | /* Check packet size. Note LRO doesn't use linear SKB */ | ||
| 1108 | if (unlikely(cqe_bcnt > rq->hw_mtu)) { | ||
| 1109 | rq->stats->oversize_pkts_sw_drop++; | ||
| 1110 | return NULL; | ||
| 1111 | } | ||
| 1112 | |||
| 1107 | va = page_address(di->page) + head_offset; | 1113 | va = page_address(di->page) + head_offset; |
| 1108 | data = va + rx_headroom; | 1114 | data = va + rx_headroom; |
| 1109 | frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); | 1115 | frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c index 35ded91203f5..4382ef85488c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c | |||
| @@ -98,18 +98,17 @@ static int mlx5e_test_link_speed(struct mlx5e_priv *priv) | |||
| 98 | return 1; | 98 | return 1; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | #ifdef CONFIG_INET | ||
| 102 | /* loopback test */ | ||
| 103 | #define MLX5E_TEST_PKT_SIZE (MLX5E_RX_MAX_HEAD - NET_IP_ALIGN) | ||
| 104 | static const char mlx5e_test_text[ETH_GSTRING_LEN] = "MLX5E SELF TEST"; | ||
| 105 | #define MLX5E_TEST_MAGIC 0x5AEED15C001ULL | ||
| 106 | |||
| 107 | struct mlx5ehdr { | 101 | struct mlx5ehdr { |
| 108 | __be32 version; | 102 | __be32 version; |
| 109 | __be64 magic; | 103 | __be64 magic; |
| 110 | char text[ETH_GSTRING_LEN]; | ||
| 111 | }; | 104 | }; |
| 112 | 105 | ||
| 106 | #ifdef CONFIG_INET | ||
| 107 | /* loopback test */ | ||
| 108 | #define MLX5E_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) +\ | ||
| 109 | sizeof(struct udphdr) + sizeof(struct mlx5ehdr)) | ||
| 110 | #define MLX5E_TEST_MAGIC 0x5AEED15C001ULL | ||
| 111 | |||
| 113 | static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) | 112 | static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) |
| 114 | { | 113 | { |
| 115 | struct sk_buff *skb = NULL; | 114 | struct sk_buff *skb = NULL; |
| @@ -117,10 +116,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) | |||
| 117 | struct ethhdr *ethh; | 116 | struct ethhdr *ethh; |
| 118 | struct udphdr *udph; | 117 | struct udphdr *udph; |
| 119 | struct iphdr *iph; | 118 | struct iphdr *iph; |
| 120 | int datalen, iplen; | 119 | int iplen; |
| 121 | |||
| 122 | datalen = MLX5E_TEST_PKT_SIZE - | ||
| 123 | (sizeof(*ethh) + sizeof(*iph) + sizeof(*udph)); | ||
| 124 | 120 | ||
| 125 | skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); | 121 | skb = netdev_alloc_skb(priv->netdev, MLX5E_TEST_PKT_SIZE); |
| 126 | if (!skb) { | 122 | if (!skb) { |
| @@ -149,7 +145,7 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) | |||
| 149 | /* Fill UDP header */ | 145 | /* Fill UDP header */ |
| 150 | udph->source = htons(9); | 146 | udph->source = htons(9); |
| 151 | udph->dest = htons(9); /* Discard Protocol */ | 147 | udph->dest = htons(9); /* Discard Protocol */ |
| 152 | udph->len = htons(datalen + sizeof(struct udphdr)); | 148 | udph->len = htons(sizeof(struct mlx5ehdr) + sizeof(struct udphdr)); |
| 153 | udph->check = 0; | 149 | udph->check = 0; |
| 154 | 150 | ||
| 155 | /* Fill IP header */ | 151 | /* Fill IP header */ |
| @@ -157,7 +153,8 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) | |||
| 157 | iph->ttl = 32; | 153 | iph->ttl = 32; |
| 158 | iph->version = 4; | 154 | iph->version = 4; |
| 159 | iph->protocol = IPPROTO_UDP; | 155 | iph->protocol = IPPROTO_UDP; |
| 160 | iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + datalen; | 156 | iplen = sizeof(struct iphdr) + sizeof(struct udphdr) + |
| 157 | sizeof(struct mlx5ehdr); | ||
| 161 | iph->tot_len = htons(iplen); | 158 | iph->tot_len = htons(iplen); |
| 162 | iph->frag_off = 0; | 159 | iph->frag_off = 0; |
| 163 | iph->saddr = 0; | 160 | iph->saddr = 0; |
| @@ -170,9 +167,6 @@ static struct sk_buff *mlx5e_test_get_udp_skb(struct mlx5e_priv *priv) | |||
| 170 | mlxh = skb_put(skb, sizeof(*mlxh)); | 167 | mlxh = skb_put(skb, sizeof(*mlxh)); |
| 171 | mlxh->version = 0; | 168 | mlxh->version = 0; |
| 172 | mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); | 169 | mlxh->magic = cpu_to_be64(MLX5E_TEST_MAGIC); |
| 173 | strlcpy(mlxh->text, mlx5e_test_text, sizeof(mlxh->text)); | ||
| 174 | datalen -= sizeof(*mlxh); | ||
| 175 | skb_put_zero(skb, datalen); | ||
| 176 | 170 | ||
| 177 | skb->csum = 0; | 171 | skb->csum = 0; |
| 178 | skb->ip_summed = CHECKSUM_PARTIAL; | 172 | skb->ip_summed = CHECKSUM_PARTIAL; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 1e55b9c27ffc..3e99d0728b2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c | |||
| @@ -83,6 +83,7 @@ static const struct counter_desc sw_stats_desc[] = { | |||
| 83 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, | 83 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, |
| 84 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, | 84 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, |
| 85 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, | 85 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, |
| 86 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) }, | ||
| 86 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, | 87 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) }, |
| 87 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, | 88 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) }, |
| 88 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, | 89 | { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) }, |
| @@ -161,6 +162,7 @@ void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv) | |||
| 161 | s->rx_wqe_err += rq_stats->wqe_err; | 162 | s->rx_wqe_err += rq_stats->wqe_err; |
| 162 | s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; | 163 | s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; |
| 163 | s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; | 164 | s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; |
| 165 | s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop; | ||
| 164 | s->rx_buff_alloc_err += rq_stats->buff_alloc_err; | 166 | s->rx_buff_alloc_err += rq_stats->buff_alloc_err; |
| 165 | s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; | 167 | s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks; |
| 166 | s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; | 168 | s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts; |
| @@ -1189,6 +1191,7 @@ static const struct counter_desc rq_stats_desc[] = { | |||
| 1189 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, | 1191 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, |
| 1190 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, | 1192 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) }, |
| 1191 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, | 1193 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) }, |
| 1194 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) }, | ||
| 1192 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, | 1195 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) }, |
| 1193 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, | 1196 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) }, |
| 1194 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, | 1197 | { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) }, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 77f74ce11280..3f8e870ef4c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h | |||
| @@ -96,6 +96,7 @@ struct mlx5e_sw_stats { | |||
| 96 | u64 rx_wqe_err; | 96 | u64 rx_wqe_err; |
| 97 | u64 rx_mpwqe_filler_cqes; | 97 | u64 rx_mpwqe_filler_cqes; |
| 98 | u64 rx_mpwqe_filler_strides; | 98 | u64 rx_mpwqe_filler_strides; |
| 99 | u64 rx_oversize_pkts_sw_drop; | ||
| 99 | u64 rx_buff_alloc_err; | 100 | u64 rx_buff_alloc_err; |
| 100 | u64 rx_cqe_compress_blks; | 101 | u64 rx_cqe_compress_blks; |
| 101 | u64 rx_cqe_compress_pkts; | 102 | u64 rx_cqe_compress_pkts; |
| @@ -193,6 +194,7 @@ struct mlx5e_rq_stats { | |||
| 193 | u64 wqe_err; | 194 | u64 wqe_err; |
| 194 | u64 mpwqe_filler_cqes; | 195 | u64 mpwqe_filler_cqes; |
| 195 | u64 mpwqe_filler_strides; | 196 | u64 mpwqe_filler_strides; |
| 197 | u64 oversize_pkts_sw_drop; | ||
| 196 | u64 buff_alloc_err; | 198 | u64 buff_alloc_err; |
| 197 | u64 cqe_compress_blks; | 199 | u64 cqe_compress_blks; |
| 198 | u64 cqe_compress_pkts; | 200 | u64 cqe_compress_pkts; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 608025ca5c04..fca6f4132c91 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
| @@ -1447,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
| 1447 | inner_headers); | 1447 | inner_headers); |
| 1448 | } | 1448 | } |
| 1449 | 1449 | ||
| 1450 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { | 1450 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { |
| 1451 | struct flow_dissector_key_eth_addrs *key = | 1451 | struct flow_dissector_key_basic *key = |
| 1452 | skb_flow_dissector_target(f->dissector, | 1452 | skb_flow_dissector_target(f->dissector, |
| 1453 | FLOW_DISSECTOR_KEY_ETH_ADDRS, | 1453 | FLOW_DISSECTOR_KEY_BASIC, |
| 1454 | f->key); | 1454 | f->key); |
| 1455 | struct flow_dissector_key_eth_addrs *mask = | 1455 | struct flow_dissector_key_basic *mask = |
| 1456 | skb_flow_dissector_target(f->dissector, | 1456 | skb_flow_dissector_target(f->dissector, |
| 1457 | FLOW_DISSECTOR_KEY_ETH_ADDRS, | 1457 | FLOW_DISSECTOR_KEY_BASIC, |
| 1458 | f->mask); | 1458 | f->mask); |
| 1459 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, | ||
| 1460 | ntohs(mask->n_proto)); | ||
| 1461 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, | ||
| 1462 | ntohs(key->n_proto)); | ||
| 1459 | 1463 | ||
| 1460 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, | 1464 | if (mask->n_proto) |
| 1461 | dmac_47_16), | ||
| 1462 | mask->dst); | ||
| 1463 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, | ||
| 1464 | dmac_47_16), | ||
| 1465 | key->dst); | ||
| 1466 | |||
| 1467 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, | ||
| 1468 | smac_47_16), | ||
| 1469 | mask->src); | ||
| 1470 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, | ||
| 1471 | smac_47_16), | ||
| 1472 | key->src); | ||
| 1473 | |||
| 1474 | if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) | ||
| 1475 | *match_level = MLX5_MATCH_L2; | 1465 | *match_level = MLX5_MATCH_L2; |
| 1476 | } | 1466 | } |
| 1477 | 1467 | ||
| @@ -1505,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
| 1505 | 1495 | ||
| 1506 | *match_level = MLX5_MATCH_L2; | 1496 | *match_level = MLX5_MATCH_L2; |
| 1507 | } | 1497 | } |
| 1508 | } else { | 1498 | } else if (*match_level != MLX5_MATCH_NONE) { |
| 1509 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); | 1499 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1); |
| 1510 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); | 1500 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); |
| 1501 | *match_level = MLX5_MATCH_L2; | ||
| 1511 | } | 1502 | } |
| 1512 | 1503 | ||
| 1513 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { | 1504 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { |
| @@ -1545,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
| 1545 | } | 1536 | } |
| 1546 | } | 1537 | } |
| 1547 | 1538 | ||
| 1548 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) { | 1539 | if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { |
| 1549 | struct flow_dissector_key_basic *key = | 1540 | struct flow_dissector_key_eth_addrs *key = |
| 1550 | skb_flow_dissector_target(f->dissector, | 1541 | skb_flow_dissector_target(f->dissector, |
| 1551 | FLOW_DISSECTOR_KEY_BASIC, | 1542 | FLOW_DISSECTOR_KEY_ETH_ADDRS, |
| 1552 | f->key); | 1543 | f->key); |
| 1553 | struct flow_dissector_key_basic *mask = | 1544 | struct flow_dissector_key_eth_addrs *mask = |
| 1554 | skb_flow_dissector_target(f->dissector, | 1545 | skb_flow_dissector_target(f->dissector, |
| 1555 | FLOW_DISSECTOR_KEY_BASIC, | 1546 | FLOW_DISSECTOR_KEY_ETH_ADDRS, |
| 1556 | f->mask); | 1547 | f->mask); |
| 1557 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype, | ||
| 1558 | ntohs(mask->n_proto)); | ||
| 1559 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, | ||
| 1560 | ntohs(key->n_proto)); | ||
| 1561 | 1548 | ||
| 1562 | if (mask->n_proto) | 1549 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, |
| 1550 | dmac_47_16), | ||
| 1551 | mask->dst); | ||
| 1552 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, | ||
| 1553 | dmac_47_16), | ||
| 1554 | key->dst); | ||
| 1555 | |||
| 1556 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c, | ||
| 1557 | smac_47_16), | ||
| 1558 | mask->src); | ||
| 1559 | ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, | ||
| 1560 | smac_47_16), | ||
| 1561 | key->src); | ||
| 1562 | |||
| 1563 | if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst)) | ||
| 1563 | *match_level = MLX5_MATCH_L2; | 1564 | *match_level = MLX5_MATCH_L2; |
| 1564 | } | 1565 | } |
| 1565 | 1566 | ||
| @@ -1586,10 +1587,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
| 1586 | 1587 | ||
| 1587 | /* the HW doesn't need L3 inline to match on frag=no */ | 1588 | /* the HW doesn't need L3 inline to match on frag=no */ |
| 1588 | if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) | 1589 | if (!(key->flags & FLOW_DIS_IS_FRAGMENT)) |
| 1589 | *match_level = MLX5_INLINE_MODE_L2; | 1590 | *match_level = MLX5_MATCH_L2; |
| 1590 | /* *** L2 attributes parsing up to here *** */ | 1591 | /* *** L2 attributes parsing up to here *** */ |
| 1591 | else | 1592 | else |
| 1592 | *match_level = MLX5_INLINE_MODE_IP; | 1593 | *match_level = MLX5_MATCH_L3; |
| 1593 | } | 1594 | } |
| 1594 | } | 1595 | } |
| 1595 | 1596 | ||
| @@ -2979,7 +2980,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
| 2979 | if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) | 2980 | if (!actions_match_supported(priv, exts, parse_attr, flow, extack)) |
| 2980 | return -EOPNOTSUPP; | 2981 | return -EOPNOTSUPP; |
| 2981 | 2982 | ||
| 2982 | if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { | 2983 | if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { |
| 2983 | NL_SET_ERR_MSG_MOD(extack, | 2984 | NL_SET_ERR_MSG_MOD(extack, |
| 2984 | "current firmware doesn't support split rule for port mirroring"); | 2985 | "current firmware doesn't support split rule for port mirroring"); |
| 2985 | netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); | 2986 | netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 515e3d6de051..5a22c5874f3b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | |||
| @@ -83,8 +83,14 @@ struct mlx5_fpga_ipsec_rule { | |||
| 83 | }; | 83 | }; |
| 84 | 84 | ||
| 85 | static const struct rhashtable_params rhash_sa = { | 85 | static const struct rhashtable_params rhash_sa = { |
| 86 | .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), | 86 | /* Keep out "cmd" field from the key as it's |
| 87 | .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa), | 87 | * value is not constant during the lifetime |
| 88 | * of the key object. | ||
| 89 | */ | ||
| 90 | .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - | ||
| 91 | FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), | ||
| 92 | .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + | ||
| 93 | FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), | ||
| 88 | .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), | 94 | .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), |
| 89 | .automatic_shrinking = true, | 95 | .automatic_shrinking = true, |
| 90 | .min_size = 1, | 96 | .min_size = 1, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index b59953daf8b4..11dabd62e2c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c | |||
| @@ -560,9 +560,9 @@ static int mlx5i_close(struct net_device *netdev) | |||
| 560 | 560 | ||
| 561 | netif_carrier_off(epriv->netdev); | 561 | netif_carrier_off(epriv->netdev); |
| 562 | mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); | 562 | mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn); |
| 563 | mlx5i_uninit_underlay_qp(epriv); | ||
| 564 | mlx5e_deactivate_priv_channels(epriv); | 563 | mlx5e_deactivate_priv_channels(epriv); |
| 565 | mlx5e_close_channels(&epriv->channels); | 564 | mlx5e_close_channels(&epriv->channels); |
| 565 | mlx5i_uninit_underlay_qp(epriv); | ||
| 566 | unlock: | 566 | unlock: |
| 567 | mutex_unlock(&epriv->state_lock); | 567 | mutex_unlock(&epriv->state_lock); |
| 568 | return 0; | 568 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index cff141077558..88a8576ca9ce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
| @@ -485,8 +485,16 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, | |||
| 485 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; | 485 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
| 486 | 486 | ||
| 487 | /* Can't have multiple flags set here */ | 487 | /* Can't have multiple flags set here */ |
| 488 | if (bitmap_weight((unsigned long *)&pq_flags, sizeof(pq_flags)) > 1) | 488 | if (bitmap_weight((unsigned long *)&pq_flags, |
| 489 | sizeof(pq_flags) * BITS_PER_BYTE) > 1) { | ||
| 490 | DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); | ||
| 489 | goto err; | 491 | goto err; |
| 492 | } | ||
| 493 | |||
| 494 | if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { | ||
| 495 | DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); | ||
| 496 | goto err; | ||
| 497 | } | ||
| 490 | 498 | ||
| 491 | switch (pq_flags) { | 499 | switch (pq_flags) { |
| 492 | case PQ_FLAGS_RLS: | 500 | case PQ_FLAGS_RLS: |
| @@ -510,8 +518,7 @@ static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, | |||
| 510 | } | 518 | } |
| 511 | 519 | ||
| 512 | err: | 520 | err: |
| 513 | DP_ERR(p_hwfn, "BAD pq flags %d\n", pq_flags); | 521 | return &qm_info->start_pq; |
| 514 | return NULL; | ||
| 515 | } | 522 | } |
| 516 | 523 | ||
| 517 | /* save pq index in qm info */ | 524 | /* save pq index in qm info */ |
| @@ -535,20 +542,32 @@ u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc) | |||
| 535 | { | 542 | { |
| 536 | u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); | 543 | u8 max_tc = qed_init_qm_get_num_tcs(p_hwfn); |
| 537 | 544 | ||
| 545 | if (max_tc == 0) { | ||
| 546 | DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", | ||
| 547 | PQ_FLAGS_MCOS); | ||
| 548 | return p_hwfn->qm_info.start_pq; | ||
| 549 | } | ||
| 550 | |||
| 538 | if (tc > max_tc) | 551 | if (tc > max_tc) |
| 539 | DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); | 552 | DP_ERR(p_hwfn, "tc %d must be smaller than %d\n", tc, max_tc); |
| 540 | 553 | ||
| 541 | return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + tc; | 554 | return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_MCOS) + (tc % max_tc); |
| 542 | } | 555 | } |
| 543 | 556 | ||
| 544 | u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) | 557 | u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf) |
| 545 | { | 558 | { |
| 546 | u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); | 559 | u16 max_vf = qed_init_qm_get_num_vfs(p_hwfn); |
| 547 | 560 | ||
| 561 | if (max_vf == 0) { | ||
| 562 | DP_ERR(p_hwfn, "pq with flag 0x%lx do not exist\n", | ||
| 563 | PQ_FLAGS_VFS); | ||
| 564 | return p_hwfn->qm_info.start_pq; | ||
| 565 | } | ||
| 566 | |||
| 548 | if (vf > max_vf) | 567 | if (vf > max_vf) |
| 549 | DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); | 568 | DP_ERR(p_hwfn, "vf %d must be smaller than %d\n", vf, max_vf); |
| 550 | 569 | ||
| 551 | return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + vf; | 570 | return qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_VFS) + (vf % max_vf); |
| 552 | } | 571 | } |
| 553 | 572 | ||
| 554 | u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) | 573 | u16 qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn *p_hwfn, u8 tc) |
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 62269e578718..cfe680f78a3f 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c | |||
| @@ -810,17 +810,13 @@ static int vsc85xx_default_config(struct phy_device *phydev) | |||
| 810 | 810 | ||
| 811 | phydev->mdix_ctrl = ETH_TP_MDI_AUTO; | 811 | phydev->mdix_ctrl = ETH_TP_MDI_AUTO; |
| 812 | mutex_lock(&phydev->lock); | 812 | mutex_lock(&phydev->lock); |
| 813 | rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); | ||
| 814 | if (rc < 0) | ||
| 815 | goto out_unlock; | ||
| 816 | 813 | ||
| 817 | reg_val = phy_read(phydev, MSCC_PHY_RGMII_CNTL); | 814 | reg_val = RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS; |
| 818 | reg_val &= ~(RGMII_RX_CLK_DELAY_MASK); | 815 | |
| 819 | reg_val |= (RGMII_RX_CLK_DELAY_1_1_NS << RGMII_RX_CLK_DELAY_POS); | 816 | rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_EXTENDED_2, |
| 820 | phy_write(phydev, MSCC_PHY_RGMII_CNTL, reg_val); | 817 | MSCC_PHY_RGMII_CNTL, RGMII_RX_CLK_DELAY_MASK, |
| 818 | reg_val); | ||
| 821 | 819 | ||
| 822 | out_unlock: | ||
| 823 | rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); | ||
| 824 | mutex_unlock(&phydev->lock); | 820 | mutex_unlock(&phydev->lock); |
| 825 | 821 | ||
| 826 | return rc; | 822 | return rc; |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index db633ae9f784..364f514d56d8 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
| @@ -985,8 +985,6 @@ static void team_port_disable(struct team *team, | |||
| 985 | team->en_port_count--; | 985 | team->en_port_count--; |
| 986 | team_queue_override_port_del(team, port); | 986 | team_queue_override_port_del(team, port); |
| 987 | team_adjust_ops(team); | 987 | team_adjust_ops(team); |
| 988 | team_notify_peers(team); | ||
| 989 | team_mcast_rejoin(team); | ||
| 990 | team_lower_state_changed(port); | 988 | team_lower_state_changed(port); |
| 991 | } | 989 | } |
| 992 | 990 | ||
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3e2c041d76ac..cecfd77c9f3c 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
| @@ -70,7 +70,8 @@ static const unsigned long guest_offloads[] = { | |||
| 70 | VIRTIO_NET_F_GUEST_TSO4, | 70 | VIRTIO_NET_F_GUEST_TSO4, |
| 71 | VIRTIO_NET_F_GUEST_TSO6, | 71 | VIRTIO_NET_F_GUEST_TSO6, |
| 72 | VIRTIO_NET_F_GUEST_ECN, | 72 | VIRTIO_NET_F_GUEST_ECN, |
| 73 | VIRTIO_NET_F_GUEST_UFO | 73 | VIRTIO_NET_F_GUEST_UFO, |
| 74 | VIRTIO_NET_F_GUEST_CSUM | ||
| 74 | }; | 75 | }; |
| 75 | 76 | ||
| 76 | struct virtnet_stat_desc { | 77 | struct virtnet_stat_desc { |
| @@ -2334,9 +2335,6 @@ static int virtnet_clear_guest_offloads(struct virtnet_info *vi) | |||
| 2334 | if (!vi->guest_offloads) | 2335 | if (!vi->guest_offloads) |
| 2335 | return 0; | 2336 | return 0; |
| 2336 | 2337 | ||
| 2337 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) | ||
| 2338 | offloads = 1ULL << VIRTIO_NET_F_GUEST_CSUM; | ||
| 2339 | |||
| 2340 | return virtnet_set_guest_offloads(vi, offloads); | 2338 | return virtnet_set_guest_offloads(vi, offloads); |
| 2341 | } | 2339 | } |
| 2342 | 2340 | ||
| @@ -2346,8 +2344,6 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi) | |||
| 2346 | 2344 | ||
| 2347 | if (!vi->guest_offloads) | 2345 | if (!vi->guest_offloads) |
| 2348 | return 0; | 2346 | return 0; |
| 2349 | if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM)) | ||
| 2350 | offloads |= 1ULL << VIRTIO_NET_F_GUEST_CSUM; | ||
| 2351 | 2347 | ||
| 2352 | return virtnet_set_guest_offloads(vi, offloads); | 2348 | return virtnet_set_guest_offloads(vi, offloads); |
| 2353 | } | 2349 | } |
| @@ -2365,8 +2361,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
| 2365 | && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || | 2361 | && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) || |
| 2366 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || | 2362 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) || |
| 2367 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || | 2363 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) || |
| 2368 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO))) { | 2364 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) || |
| 2369 | NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO, disable LRO first"); | 2365 | virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) { |
| 2366 | NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first"); | ||
| 2370 | return -EOPNOTSUPP; | 2367 | return -EOPNOTSUPP; |
| 2371 | } | 2368 | } |
| 2372 | 2369 | ||
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index a1c2801ded10..7e49342bae38 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c | |||
| @@ -6867,7 +6867,7 @@ static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | |||
| 6867 | u32 bitmap; | 6867 | u32 bitmap; |
| 6868 | 6868 | ||
| 6869 | if (drop) { | 6869 | if (drop) { |
| 6870 | if (vif->type == NL80211_IFTYPE_STATION) { | 6870 | if (vif && vif->type == NL80211_IFTYPE_STATION) { |
| 6871 | bitmap = ~(1 << WMI_MGMT_TID); | 6871 | bitmap = ~(1 << WMI_MGMT_TID); |
| 6872 | list_for_each_entry(arvif, &ar->arvifs, list) { | 6872 | list_for_each_entry(arvif, &ar->arvifs, list) { |
| 6873 | if (arvif->vdev_type == WMI_VDEV_TYPE_STA) | 6873 | if (arvif->vdev_type == WMI_VDEV_TYPE_STA) |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 1e3b5f4a4cf9..f23cb2f3d296 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
| @@ -1251,6 +1251,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, | |||
| 1251 | struct ath_vif *avp = (void *)vif->drv_priv; | 1251 | struct ath_vif *avp = (void *)vif->drv_priv; |
| 1252 | struct ath_node *an = &avp->mcast_node; | 1252 | struct ath_node *an = &avp->mcast_node; |
| 1253 | 1253 | ||
| 1254 | mutex_lock(&sc->mutex); | ||
| 1254 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) { | 1255 | if (IS_ENABLED(CONFIG_ATH9K_TX99)) { |
| 1255 | if (sc->cur_chan->nvifs >= 1) { | 1256 | if (sc->cur_chan->nvifs >= 1) { |
| 1256 | mutex_unlock(&sc->mutex); | 1257 | mutex_unlock(&sc->mutex); |
| @@ -1259,8 +1260,6 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, | |||
| 1259 | sc->tx99_vif = vif; | 1260 | sc->tx99_vif = vif; |
| 1260 | } | 1261 | } |
| 1261 | 1262 | ||
| 1262 | mutex_lock(&sc->mutex); | ||
| 1263 | |||
| 1264 | ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); | 1263 | ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); |
| 1265 | sc->cur_chan->nvifs++; | 1264 | sc->cur_chan->nvifs++; |
| 1266 | 1265 | ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 230a378c26fc..7f0a5bade70a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
| @@ -6005,7 +6005,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, | |||
| 6005 | * for subsequent chanspecs. | 6005 | * for subsequent chanspecs. |
| 6006 | */ | 6006 | */ |
| 6007 | channel->flags = IEEE80211_CHAN_NO_HT40 | | 6007 | channel->flags = IEEE80211_CHAN_NO_HT40 | |
| 6008 | IEEE80211_CHAN_NO_80MHZ; | 6008 | IEEE80211_CHAN_NO_80MHZ | |
| 6009 | IEEE80211_CHAN_NO_160MHZ; | ||
| 6009 | ch.bw = BRCMU_CHAN_BW_20; | 6010 | ch.bw = BRCMU_CHAN_BW_20; |
| 6010 | cfg->d11inf.encchspec(&ch); | 6011 | cfg->d11inf.encchspec(&ch); |
| 6011 | chaninfo = ch.chspec; | 6012 | chaninfo = ch.chspec; |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c index e7584b842dce..eb5db94f5745 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c | |||
| @@ -193,6 +193,9 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) | |||
| 193 | } | 193 | } |
| 194 | break; | 194 | break; |
| 195 | case BRCMU_CHSPEC_D11AC_BW_160: | 195 | case BRCMU_CHSPEC_D11AC_BW_160: |
| 196 | ch->bw = BRCMU_CHAN_BW_160; | ||
| 197 | ch->sb = brcmu_maskget16(ch->chspec, BRCMU_CHSPEC_D11AC_SB_MASK, | ||
| 198 | BRCMU_CHSPEC_D11AC_SB_SHIFT); | ||
| 196 | switch (ch->sb) { | 199 | switch (ch->sb) { |
| 197 | case BRCMU_CHAN_SB_LLL: | 200 | case BRCMU_CHAN_SB_LLL: |
| 198 | ch->control_ch_num -= CH_70MHZ_APART; | 201 | ch->control_ch_num -= CH_70MHZ_APART; |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 2439e98431ee..7492dfb6729b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | * GPL LICENSE SUMMARY | 6 | * GPL LICENSE SUMMARY |
| 7 | * | 7 | * |
| 8 | * Copyright(c) 2017 Intel Deutschland GmbH | 8 | * Copyright(c) 2017 Intel Deutschland GmbH |
| 9 | * Copyright(c) 2018 Intel Corporation | ||
| 9 | * | 10 | * |
| 10 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
| 11 | * it under the terms of version 2 of the GNU General Public License as | 12 | * it under the terms of version 2 of the GNU General Public License as |
| @@ -26,6 +27,7 @@ | |||
| 26 | * BSD LICENSE | 27 | * BSD LICENSE |
| 27 | * | 28 | * |
| 28 | * Copyright(c) 2017 Intel Deutschland GmbH | 29 | * Copyright(c) 2017 Intel Deutschland GmbH |
| 30 | * Copyright(c) 2018 Intel Corporation | ||
| 29 | * All rights reserved. | 31 | * All rights reserved. |
| 30 | * | 32 | * |
| 31 | * Redistribution and use in source and binary forms, with or without | 33 | * Redistribution and use in source and binary forms, with or without |
| @@ -81,7 +83,7 @@ | |||
| 81 | #define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) | 83 | #define ACPI_WRDS_WIFI_DATA_SIZE (ACPI_SAR_TABLE_SIZE + 2) |
| 82 | #define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ | 84 | #define ACPI_EWRD_WIFI_DATA_SIZE ((ACPI_SAR_PROFILE_NUM - 1) * \ |
| 83 | ACPI_SAR_TABLE_SIZE + 3) | 85 | ACPI_SAR_TABLE_SIZE + 3) |
| 84 | #define ACPI_WGDS_WIFI_DATA_SIZE 18 | 86 | #define ACPI_WGDS_WIFI_DATA_SIZE 19 |
| 85 | #define ACPI_WRDD_WIFI_DATA_SIZE 2 | 87 | #define ACPI_WRDD_WIFI_DATA_SIZE 2 |
| 86 | #define ACPI_SPLC_WIFI_DATA_SIZE 2 | 88 | #define ACPI_SPLC_WIFI_DATA_SIZE 2 |
| 87 | 89 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 6b95d0e75889..2b8b50a77990 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h | |||
| @@ -154,7 +154,11 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, | |||
| 154 | const struct iwl_fw_runtime_ops *ops, void *ops_ctx, | 154 | const struct iwl_fw_runtime_ops *ops, void *ops_ctx, |
| 155 | struct dentry *dbgfs_dir); | 155 | struct dentry *dbgfs_dir); |
| 156 | 156 | ||
| 157 | void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); | 157 | static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt) |
| 158 | { | ||
| 159 | kfree(fwrt->dump.d3_debug_data); | ||
| 160 | fwrt->dump.d3_debug_data = NULL; | ||
| 161 | } | ||
| 158 | 162 | ||
| 159 | void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); | 163 | void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt); |
| 160 | 164 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index dade206d5511..2ba890445c35 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c | |||
| @@ -893,7 +893,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) | |||
| 893 | IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); | 893 | IWL_DEBUG_RADIO(mvm, "Sending GEO_TX_POWER_LIMIT\n"); |
| 894 | 894 | ||
| 895 | BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * | 895 | BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES * ACPI_WGDS_NUM_BANDS * |
| 896 | ACPI_WGDS_TABLE_SIZE != ACPI_WGDS_WIFI_DATA_SIZE); | 896 | ACPI_WGDS_TABLE_SIZE + 1 != ACPI_WGDS_WIFI_DATA_SIZE); |
| 897 | 897 | ||
| 898 | BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); | 898 | BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES > IWL_NUM_GEO_PROFILES); |
| 899 | 899 | ||
| @@ -928,6 +928,11 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm) | |||
| 928 | return -ENOENT; | 928 | return -ENOENT; |
| 929 | } | 929 | } |
| 930 | 930 | ||
| 931 | static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm) | ||
| 932 | { | ||
| 933 | return -ENOENT; | ||
| 934 | } | ||
| 935 | |||
| 931 | static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) | 936 | static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm) |
| 932 | { | 937 | { |
| 933 | return 0; | 938 | return 0; |
| @@ -954,8 +959,11 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) | |||
| 954 | IWL_DEBUG_RADIO(mvm, | 959 | IWL_DEBUG_RADIO(mvm, |
| 955 | "WRDS SAR BIOS table invalid or unavailable. (%d)\n", | 960 | "WRDS SAR BIOS table invalid or unavailable. (%d)\n", |
| 956 | ret); | 961 | ret); |
| 957 | /* if not available, don't fail and don't bother with EWRD */ | 962 | /* |
| 958 | return 0; | 963 | * If not available, don't fail and don't bother with EWRD. |
| 964 | * Return 1 to tell that we can't use WGDS either. | ||
| 965 | */ | ||
| 966 | return 1; | ||
| 959 | } | 967 | } |
| 960 | 968 | ||
| 961 | ret = iwl_mvm_sar_get_ewrd_table(mvm); | 969 | ret = iwl_mvm_sar_get_ewrd_table(mvm); |
| @@ -968,9 +976,13 @@ static int iwl_mvm_sar_init(struct iwl_mvm *mvm) | |||
| 968 | /* choose profile 1 (WRDS) as default for both chains */ | 976 | /* choose profile 1 (WRDS) as default for both chains */ |
| 969 | ret = iwl_mvm_sar_select_profile(mvm, 1, 1); | 977 | ret = iwl_mvm_sar_select_profile(mvm, 1, 1); |
| 970 | 978 | ||
| 971 | /* if we don't have profile 0 from BIOS, just skip it */ | 979 | /* |
| 980 | * If we don't have profile 0 from BIOS, just skip it. This | ||
| 981 | * means that SAR Geo will not be enabled either, even if we | ||
| 982 | * have other valid profiles. | ||
| 983 | */ | ||
| 972 | if (ret == -ENOENT) | 984 | if (ret == -ENOENT) |
| 973 | return 0; | 985 | return 1; |
| 974 | 986 | ||
| 975 | return ret; | 987 | return ret; |
| 976 | } | 988 | } |
| @@ -1168,11 +1180,19 @@ int iwl_mvm_up(struct iwl_mvm *mvm) | |||
| 1168 | iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); | 1180 | iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); |
| 1169 | 1181 | ||
| 1170 | ret = iwl_mvm_sar_init(mvm); | 1182 | ret = iwl_mvm_sar_init(mvm); |
| 1171 | if (ret) | 1183 | if (ret == 0) { |
| 1172 | goto error; | 1184 | ret = iwl_mvm_sar_geo_init(mvm); |
| 1185 | } else if (ret > 0 && !iwl_mvm_sar_get_wgds_table(mvm)) { | ||
| 1186 | /* | ||
| 1187 | * If basic SAR is not available, we check for WGDS, | ||
| 1188 | * which should *not* be available either. If it is | ||
| 1189 | * available, issue an error, because we can't use SAR | ||
| 1190 | * Geo without basic SAR. | ||
| 1191 | */ | ||
| 1192 | IWL_ERR(mvm, "BIOS contains WGDS but no WRDS\n"); | ||
| 1193 | } | ||
| 1173 | 1194 | ||
| 1174 | ret = iwl_mvm_sar_geo_init(mvm); | 1195 | if (ret < 0) |
| 1175 | if (ret) | ||
| 1176 | goto error; | 1196 | goto error; |
| 1177 | 1197 | ||
| 1178 | iwl_mvm_leds_sync(mvm); | 1198 | iwl_mvm_leds_sync(mvm); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 505b0385d800..00f831d88366 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
| @@ -301,8 +301,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, | |||
| 301 | goto out; | 301 | goto out; |
| 302 | } | 302 | } |
| 303 | 303 | ||
| 304 | if (changed) | 304 | if (changed) { |
| 305 | *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); | 305 | u32 status = le32_to_cpu(resp->status); |
| 306 | |||
| 307 | *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || | ||
| 308 | status == MCC_RESP_ILLEGAL); | ||
| 309 | } | ||
| 306 | 310 | ||
| 307 | regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, | 311 | regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, |
| 308 | __le32_to_cpu(resp->n_channels), | 312 | __le32_to_cpu(resp->n_channels), |
| @@ -4444,10 +4448,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, | |||
| 4444 | sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); | 4448 | sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); |
| 4445 | } | 4449 | } |
| 4446 | 4450 | ||
| 4447 | if (!fw_has_capa(&mvm->fw->ucode_capa, | ||
| 4448 | IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) | ||
| 4449 | return; | ||
| 4450 | |||
| 4451 | /* if beacon filtering isn't on mac80211 does it anyway */ | 4451 | /* if beacon filtering isn't on mac80211 does it anyway */ |
| 4452 | if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) | 4452 | if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) |
| 4453 | return; | 4453 | return; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 3633f27d048a..6fc5cc1f2b5b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | |||
| @@ -539,9 +539,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, | |||
| 539 | } | 539 | } |
| 540 | 540 | ||
| 541 | IWL_DEBUG_LAR(mvm, | 541 | IWL_DEBUG_LAR(mvm, |
| 542 | "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", | 542 | "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", |
| 543 | status, mcc, mcc >> 8, mcc & 0xff, | 543 | status, mcc, mcc >> 8, mcc & 0xff, n_channels); |
| 544 | !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels); | ||
| 545 | 544 | ||
| 546 | exit: | 545 | exit: |
| 547 | iwl_free_resp(&cmd); | 546 | iwl_free_resp(&cmd); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 0e2092526fae..af3fba10abc1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
| @@ -858,6 +858,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
| 858 | iwl_mvm_thermal_exit(mvm); | 858 | iwl_mvm_thermal_exit(mvm); |
| 859 | out_free: | 859 | out_free: |
| 860 | iwl_fw_flush_dump(&mvm->fwrt); | 860 | iwl_fw_flush_dump(&mvm->fwrt); |
| 861 | iwl_fw_runtime_free(&mvm->fwrt); | ||
| 861 | 862 | ||
| 862 | if (iwlmvm_mod_params.init_dbg) | 863 | if (iwlmvm_mod_params.init_dbg) |
| 863 | return op_mode; | 864 | return op_mode; |
| @@ -910,6 +911,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode) | |||
| 910 | 911 | ||
| 911 | iwl_mvm_tof_clean(mvm); | 912 | iwl_mvm_tof_clean(mvm); |
| 912 | 913 | ||
| 914 | iwl_fw_runtime_free(&mvm->fwrt); | ||
| 913 | mutex_destroy(&mvm->mutex); | 915 | mutex_destroy(&mvm->mutex); |
| 914 | mutex_destroy(&mvm->d0i3_suspend_mutex); | 916 | mutex_destroy(&mvm->d0i3_suspend_mutex); |
| 915 | 917 | ||
diff --git a/drivers/net/wireless/mediatek/mt76/Kconfig b/drivers/net/wireless/mediatek/mt76/Kconfig index 0ccbcd7e887d..c30d8f5bbf2a 100644 --- a/drivers/net/wireless/mediatek/mt76/Kconfig +++ b/drivers/net/wireless/mediatek/mt76/Kconfig | |||
| @@ -1,6 +1,12 @@ | |||
| 1 | config MT76_CORE | 1 | config MT76_CORE |
| 2 | tristate | 2 | tristate |
| 3 | 3 | ||
| 4 | config MT76_LEDS | ||
| 5 | bool | ||
| 6 | depends on MT76_CORE | ||
| 7 | depends on LEDS_CLASS=y || MT76_CORE=LEDS_CLASS | ||
| 8 | default y | ||
| 9 | |||
| 4 | config MT76_USB | 10 | config MT76_USB |
| 5 | tristate | 11 | tristate |
| 6 | depends on MT76_CORE | 12 | depends on MT76_CORE |
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 2a699e8b79bf..7d219ff2d480 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c | |||
| @@ -345,9 +345,11 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, | |||
| 345 | mt76_check_sband(dev, NL80211_BAND_2GHZ); | 345 | mt76_check_sband(dev, NL80211_BAND_2GHZ); |
| 346 | mt76_check_sband(dev, NL80211_BAND_5GHZ); | 346 | mt76_check_sband(dev, NL80211_BAND_5GHZ); |
| 347 | 347 | ||
| 348 | ret = mt76_led_init(dev); | 348 | if (IS_ENABLED(CONFIG_MT76_LEDS)) { |
| 349 | if (ret) | 349 | ret = mt76_led_init(dev); |
| 350 | return ret; | 350 | if (ret) |
| 351 | return ret; | ||
| 352 | } | ||
| 351 | 353 | ||
| 352 | return ieee80211_register_hw(hw); | 354 | return ieee80211_register_hw(hw); |
| 353 | } | 355 | } |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 47c42c607964..7806963b1905 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h | |||
| @@ -71,7 +71,6 @@ struct mt76x02_dev { | |||
| 71 | struct mac_address macaddr_list[8]; | 71 | struct mac_address macaddr_list[8]; |
| 72 | 72 | ||
| 73 | struct mutex phy_mutex; | 73 | struct mutex phy_mutex; |
| 74 | struct mutex mutex; | ||
| 75 | 74 | ||
| 76 | u8 txdone_seq; | 75 | u8 txdone_seq; |
| 77 | DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); | 76 | DECLARE_KFIFO_PTR(txstatus_fifo, struct mt76x02_tx_status); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index 3824290b219d..fd125722d1fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c | |||
| @@ -507,8 +507,10 @@ int mt76x2_register_device(struct mt76x02_dev *dev) | |||
| 507 | mt76x2_dfs_init_detector(dev); | 507 | mt76x2_dfs_init_detector(dev); |
| 508 | 508 | ||
| 509 | /* init led callbacks */ | 509 | /* init led callbacks */ |
| 510 | dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; | 510 | if (IS_ENABLED(CONFIG_MT76_LEDS)) { |
| 511 | dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; | 511 | dev->mt76.led_cdev.brightness_set = mt76x2_led_set_brightness; |
| 512 | dev->mt76.led_cdev.blink_set = mt76x2_led_set_blink; | ||
| 513 | } | ||
| 512 | 514 | ||
| 513 | ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, | 515 | ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, |
| 514 | ARRAY_SIZE(mt76x02_rates)); | 516 | ARRAY_SIZE(mt76x02_rates)); |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index 034a06295668..3f001bd6806c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c | |||
| @@ -272,9 +272,9 @@ mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val) | |||
| 272 | if (val != ~0 && val > 0xffff) | 272 | if (val != ~0 && val > 0xffff) |
| 273 | return -EINVAL; | 273 | return -EINVAL; |
| 274 | 274 | ||
| 275 | mutex_lock(&dev->mutex); | 275 | mutex_lock(&dev->mt76.mutex); |
| 276 | mt76x2_mac_set_tx_protection(dev, val); | 276 | mt76x2_mac_set_tx_protection(dev, val); |
| 277 | mutex_unlock(&dev->mutex); | 277 | mutex_unlock(&dev->mt76.mutex); |
| 278 | 278 | ||
| 279 | return 0; | 279 | return 0; |
| 280 | } | 280 | } |
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index 4c2154b9e6a3..bd10165d7eec 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c | |||
| @@ -285,7 +285,7 @@ static int wl1271_probe(struct sdio_func *func, | |||
| 285 | struct resource res[2]; | 285 | struct resource res[2]; |
| 286 | mmc_pm_flag_t mmcflags; | 286 | mmc_pm_flag_t mmcflags; |
| 287 | int ret = -ENOMEM; | 287 | int ret = -ENOMEM; |
| 288 | int irq, wakeirq; | 288 | int irq, wakeirq, num_irqs; |
| 289 | const char *chip_family; | 289 | const char *chip_family; |
| 290 | 290 | ||
| 291 | /* We are only able to handle the wlan function */ | 291 | /* We are only able to handle the wlan function */ |
| @@ -353,12 +353,17 @@ static int wl1271_probe(struct sdio_func *func, | |||
| 353 | irqd_get_trigger_type(irq_get_irq_data(irq)); | 353 | irqd_get_trigger_type(irq_get_irq_data(irq)); |
| 354 | res[0].name = "irq"; | 354 | res[0].name = "irq"; |
| 355 | 355 | ||
| 356 | res[1].start = wakeirq; | ||
| 357 | res[1].flags = IORESOURCE_IRQ | | ||
| 358 | irqd_get_trigger_type(irq_get_irq_data(wakeirq)); | ||
| 359 | res[1].name = "wakeirq"; | ||
| 360 | 356 | ||
| 361 | ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res)); | 357 | if (wakeirq > 0) { |
| 358 | res[1].start = wakeirq; | ||
| 359 | res[1].flags = IORESOURCE_IRQ | | ||
| 360 | irqd_get_trigger_type(irq_get_irq_data(wakeirq)); | ||
| 361 | res[1].name = "wakeirq"; | ||
| 362 | num_irqs = 2; | ||
| 363 | } else { | ||
| 364 | num_irqs = 1; | ||
| 365 | } | ||
| 366 | ret = platform_device_add_resources(glue->core, res, num_irqs); | ||
| 362 | if (ret) { | 367 | if (ret) { |
| 363 | dev_err(glue->dev, "can't add resources\n"); | 368 | dev_err(glue->dev, "can't add resources\n"); |
| 364 | goto out_dev_put; | 369 | goto out_dev_put; |
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 0b70c8bab045..54032c466636 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
| @@ -152,6 +152,7 @@ struct nvme_fc_ctrl { | |||
| 152 | 152 | ||
| 153 | bool ioq_live; | 153 | bool ioq_live; |
| 154 | bool assoc_active; | 154 | bool assoc_active; |
| 155 | atomic_t err_work_active; | ||
| 155 | u64 association_id; | 156 | u64 association_id; |
| 156 | 157 | ||
| 157 | struct list_head ctrl_list; /* rport->ctrl_list */ | 158 | struct list_head ctrl_list; /* rport->ctrl_list */ |
| @@ -160,6 +161,7 @@ struct nvme_fc_ctrl { | |||
| 160 | struct blk_mq_tag_set tag_set; | 161 | struct blk_mq_tag_set tag_set; |
| 161 | 162 | ||
| 162 | struct delayed_work connect_work; | 163 | struct delayed_work connect_work; |
| 164 | struct work_struct err_work; | ||
| 163 | 165 | ||
| 164 | struct kref ref; | 166 | struct kref ref; |
| 165 | u32 flags; | 167 | u32 flags; |
| @@ -1531,6 +1533,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) | |||
| 1531 | struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; | 1533 | struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; |
| 1532 | int i; | 1534 | int i; |
| 1533 | 1535 | ||
| 1536 | /* ensure we've initialized the ops once */ | ||
| 1537 | if (!(aen_op->flags & FCOP_FLAGS_AEN)) | ||
| 1538 | return; | ||
| 1539 | |||
| 1534 | for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) | 1540 | for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) |
| 1535 | __nvme_fc_abort_op(ctrl, aen_op); | 1541 | __nvme_fc_abort_op(ctrl, aen_op); |
| 1536 | } | 1542 | } |
| @@ -2049,7 +2055,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl) | |||
| 2049 | static void | 2055 | static void |
| 2050 | nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) | 2056 | nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) |
| 2051 | { | 2057 | { |
| 2052 | /* only proceed if in LIVE state - e.g. on first error */ | 2058 | int active; |
| 2059 | |||
| 2060 | /* | ||
| 2061 | * if an error (io timeout, etc) while (re)connecting, | ||
| 2062 | * it's an error on creating the new association. | ||
| 2063 | * Start the error recovery thread if it hasn't already | ||
| 2064 | * been started. It is expected there could be multiple | ||
| 2065 | * ios hitting this path before things are cleaned up. | ||
| 2066 | */ | ||
| 2067 | if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { | ||
| 2068 | active = atomic_xchg(&ctrl->err_work_active, 1); | ||
| 2069 | if (!active && !schedule_work(&ctrl->err_work)) { | ||
| 2070 | atomic_set(&ctrl->err_work_active, 0); | ||
| 2071 | WARN_ON(1); | ||
| 2072 | } | ||
| 2073 | return; | ||
| 2074 | } | ||
| 2075 | |||
| 2076 | /* Otherwise, only proceed if in LIVE state - e.g. on first error */ | ||
| 2053 | if (ctrl->ctrl.state != NVME_CTRL_LIVE) | 2077 | if (ctrl->ctrl.state != NVME_CTRL_LIVE) |
| 2054 | return; | 2078 | return; |
| 2055 | 2079 | ||
| @@ -2814,6 +2838,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl) | |||
| 2814 | { | 2838 | { |
| 2815 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); | 2839 | struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); |
| 2816 | 2840 | ||
| 2841 | cancel_work_sync(&ctrl->err_work); | ||
| 2817 | cancel_delayed_work_sync(&ctrl->connect_work); | 2842 | cancel_delayed_work_sync(&ctrl->connect_work); |
| 2818 | /* | 2843 | /* |
| 2819 | * kill the association on the link side. this will block | 2844 | * kill the association on the link side. this will block |
| @@ -2866,23 +2891,30 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) | |||
| 2866 | } | 2891 | } |
| 2867 | 2892 | ||
| 2868 | static void | 2893 | static void |
| 2869 | nvme_fc_reset_ctrl_work(struct work_struct *work) | 2894 | __nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl) |
| 2870 | { | 2895 | { |
| 2871 | struct nvme_fc_ctrl *ctrl = | 2896 | nvme_stop_keep_alive(&ctrl->ctrl); |
| 2872 | container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); | ||
| 2873 | int ret; | ||
| 2874 | |||
| 2875 | nvme_stop_ctrl(&ctrl->ctrl); | ||
| 2876 | 2897 | ||
| 2877 | /* will block will waiting for io to terminate */ | 2898 | /* will block will waiting for io to terminate */ |
| 2878 | nvme_fc_delete_association(ctrl); | 2899 | nvme_fc_delete_association(ctrl); |
| 2879 | 2900 | ||
| 2880 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { | 2901 | if (ctrl->ctrl.state != NVME_CTRL_CONNECTING && |
| 2902 | !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) | ||
| 2881 | dev_err(ctrl->ctrl.device, | 2903 | dev_err(ctrl->ctrl.device, |
| 2882 | "NVME-FC{%d}: error_recovery: Couldn't change state " | 2904 | "NVME-FC{%d}: error_recovery: Couldn't change state " |
| 2883 | "to CONNECTING\n", ctrl->cnum); | 2905 | "to CONNECTING\n", ctrl->cnum); |
| 2884 | return; | 2906 | } |
| 2885 | } | 2907 | |
| 2908 | static void | ||
| 2909 | nvme_fc_reset_ctrl_work(struct work_struct *work) | ||
| 2910 | { | ||
| 2911 | struct nvme_fc_ctrl *ctrl = | ||
| 2912 | container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); | ||
| 2913 | int ret; | ||
| 2914 | |||
| 2915 | __nvme_fc_terminate_io(ctrl); | ||
| 2916 | |||
| 2917 | nvme_stop_ctrl(&ctrl->ctrl); | ||
| 2886 | 2918 | ||
| 2887 | if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) | 2919 | if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) |
| 2888 | ret = nvme_fc_create_association(ctrl); | 2920 | ret = nvme_fc_create_association(ctrl); |
| @@ -2897,6 +2929,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work) | |||
| 2897 | ctrl->cnum); | 2929 | ctrl->cnum); |
| 2898 | } | 2930 | } |
| 2899 | 2931 | ||
| 2932 | static void | ||
| 2933 | nvme_fc_connect_err_work(struct work_struct *work) | ||
| 2934 | { | ||
| 2935 | struct nvme_fc_ctrl *ctrl = | ||
| 2936 | container_of(work, struct nvme_fc_ctrl, err_work); | ||
| 2937 | |||
| 2938 | __nvme_fc_terminate_io(ctrl); | ||
| 2939 | |||
| 2940 | atomic_set(&ctrl->err_work_active, 0); | ||
| 2941 | |||
| 2942 | /* | ||
| 2943 | * Rescheduling the connection after recovering | ||
| 2944 | * from the io error is left to the reconnect work | ||
| 2945 | * item, which is what should have stalled waiting on | ||
| 2946 | * the io that had the error that scheduled this work. | ||
| 2947 | */ | ||
| 2948 | } | ||
| 2949 | |||
| 2900 | static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { | 2950 | static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = { |
| 2901 | .name = "fc", | 2951 | .name = "fc", |
| 2902 | .module = THIS_MODULE, | 2952 | .module = THIS_MODULE, |
| @@ -3007,6 +3057,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
| 3007 | ctrl->cnum = idx; | 3057 | ctrl->cnum = idx; |
| 3008 | ctrl->ioq_live = false; | 3058 | ctrl->ioq_live = false; |
| 3009 | ctrl->assoc_active = false; | 3059 | ctrl->assoc_active = false; |
| 3060 | atomic_set(&ctrl->err_work_active, 0); | ||
| 3010 | init_waitqueue_head(&ctrl->ioabort_wait); | 3061 | init_waitqueue_head(&ctrl->ioabort_wait); |
| 3011 | 3062 | ||
| 3012 | get_device(ctrl->dev); | 3063 | get_device(ctrl->dev); |
| @@ -3014,6 +3065,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
| 3014 | 3065 | ||
| 3015 | INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); | 3066 | INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); |
| 3016 | INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); | 3067 | INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); |
| 3068 | INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work); | ||
| 3017 | spin_lock_init(&ctrl->lock); | 3069 | spin_lock_init(&ctrl->lock); |
| 3018 | 3070 | ||
| 3019 | /* io queue count */ | 3071 | /* io queue count */ |
| @@ -3103,6 +3155,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
| 3103 | fail_ctrl: | 3155 | fail_ctrl: |
| 3104 | nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); | 3156 | nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); |
| 3105 | cancel_work_sync(&ctrl->ctrl.reset_work); | 3157 | cancel_work_sync(&ctrl->ctrl.reset_work); |
| 3158 | cancel_work_sync(&ctrl->err_work); | ||
| 3106 | cancel_delayed_work_sync(&ctrl->connect_work); | 3159 | cancel_delayed_work_sync(&ctrl->connect_work); |
| 3107 | 3160 | ||
| 3108 | ctrl->ctrl.opts = NULL; | 3161 | ctrl->ctrl.opts = NULL; |
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 9b18ce90f907..27f67dfa649d 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c | |||
| @@ -44,6 +44,7 @@ struct nvmem_cell { | |||
| 44 | int bytes; | 44 | int bytes; |
| 45 | int bit_offset; | 45 | int bit_offset; |
| 46 | int nbits; | 46 | int nbits; |
| 47 | struct device_node *np; | ||
| 47 | struct nvmem_device *nvmem; | 48 | struct nvmem_device *nvmem; |
| 48 | struct list_head node; | 49 | struct list_head node; |
| 49 | }; | 50 | }; |
| @@ -298,6 +299,7 @@ static void nvmem_cell_drop(struct nvmem_cell *cell) | |||
| 298 | mutex_lock(&nvmem_mutex); | 299 | mutex_lock(&nvmem_mutex); |
| 299 | list_del(&cell->node); | 300 | list_del(&cell->node); |
| 300 | mutex_unlock(&nvmem_mutex); | 301 | mutex_unlock(&nvmem_mutex); |
| 302 | of_node_put(cell->np); | ||
| 301 | kfree(cell->name); | 303 | kfree(cell->name); |
| 302 | kfree(cell); | 304 | kfree(cell); |
| 303 | } | 305 | } |
| @@ -530,6 +532,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem) | |||
| 530 | return -ENOMEM; | 532 | return -ENOMEM; |
| 531 | 533 | ||
| 532 | cell->nvmem = nvmem; | 534 | cell->nvmem = nvmem; |
| 535 | cell->np = of_node_get(child); | ||
| 533 | cell->offset = be32_to_cpup(addr++); | 536 | cell->offset = be32_to_cpup(addr++); |
| 534 | cell->bytes = be32_to_cpup(addr); | 537 | cell->bytes = be32_to_cpup(addr); |
| 535 | cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); | 538 | cell->name = kasprintf(GFP_KERNEL, "%pOFn", child); |
| @@ -960,14 +963,13 @@ out: | |||
| 960 | 963 | ||
| 961 | #if IS_ENABLED(CONFIG_OF) | 964 | #if IS_ENABLED(CONFIG_OF) |
| 962 | static struct nvmem_cell * | 965 | static struct nvmem_cell * |
| 963 | nvmem_find_cell_by_index(struct nvmem_device *nvmem, int index) | 966 | nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np) |
| 964 | { | 967 | { |
| 965 | struct nvmem_cell *cell = NULL; | 968 | struct nvmem_cell *cell = NULL; |
| 966 | int i = 0; | ||
| 967 | 969 | ||
| 968 | mutex_lock(&nvmem_mutex); | 970 | mutex_lock(&nvmem_mutex); |
| 969 | list_for_each_entry(cell, &nvmem->cells, node) { | 971 | list_for_each_entry(cell, &nvmem->cells, node) { |
| 970 | if (index == i++) | 972 | if (np == cell->np) |
| 971 | break; | 973 | break; |
| 972 | } | 974 | } |
| 973 | mutex_unlock(&nvmem_mutex); | 975 | mutex_unlock(&nvmem_mutex); |
| @@ -1011,7 +1013,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) | |||
| 1011 | if (IS_ERR(nvmem)) | 1013 | if (IS_ERR(nvmem)) |
| 1012 | return ERR_CAST(nvmem); | 1014 | return ERR_CAST(nvmem); |
| 1013 | 1015 | ||
| 1014 | cell = nvmem_find_cell_by_index(nvmem, index); | 1016 | cell = nvmem_find_cell_by_node(nvmem, cell_np); |
| 1015 | if (!cell) { | 1017 | if (!cell) { |
| 1016 | __nvmem_device_put(nvmem); | 1018 | __nvmem_device_put(nvmem); |
| 1017 | return ERR_PTR(-ENOENT); | 1019 | return ERR_PTR(-ENOENT); |
diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c index 9e5a9a3112c9..3f4fb4dbbe33 100644 --- a/drivers/opp/ti-opp-supply.c +++ b/drivers/opp/ti-opp-supply.c | |||
| @@ -288,7 +288,10 @@ static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data) | |||
| 288 | int ret; | 288 | int ret; |
| 289 | 289 | ||
| 290 | vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data, | 290 | vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data, |
| 291 | new_supply_vbb->u_volt); | 291 | new_supply_vdd->u_volt); |
| 292 | |||
| 293 | if (new_supply_vdd->u_volt_min < vdd_uv) | ||
| 294 | new_supply_vdd->u_volt_min = vdd_uv; | ||
| 292 | 295 | ||
| 293 | /* Scaling up? Scale voltage before frequency */ | 296 | /* Scaling up? Scale voltage before frequency */ |
| 294 | if (freq > old_freq) { | 297 | if (freq > old_freq) { |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 20c85eed1a75..b658b9a5eb1e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
| @@ -1749,7 +1749,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) | |||
| 1749 | static void | 1749 | static void |
| 1750 | __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) | 1750 | __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) |
| 1751 | { | 1751 | { |
| 1752 | int cnt; | 1752 | int cnt, status; |
| 1753 | unsigned long flags; | 1753 | unsigned long flags; |
| 1754 | srb_t *sp; | 1754 | srb_t *sp; |
| 1755 | scsi_qla_host_t *vha = qp->vha; | 1755 | scsi_qla_host_t *vha = qp->vha; |
| @@ -1799,10 +1799,16 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) | |||
| 1799 | if (!sp_get(sp)) { | 1799 | if (!sp_get(sp)) { |
| 1800 | spin_unlock_irqrestore | 1800 | spin_unlock_irqrestore |
| 1801 | (qp->qp_lock_ptr, flags); | 1801 | (qp->qp_lock_ptr, flags); |
| 1802 | qla2xxx_eh_abort( | 1802 | status = qla2xxx_eh_abort( |
| 1803 | GET_CMD_SP(sp)); | 1803 | GET_CMD_SP(sp)); |
| 1804 | spin_lock_irqsave | 1804 | spin_lock_irqsave |
| 1805 | (qp->qp_lock_ptr, flags); | 1805 | (qp->qp_lock_ptr, flags); |
| 1806 | /* | ||
| 1807 | * Get rid of extra reference caused | ||
| 1808 | * by early exit from qla2xxx_eh_abort | ||
| 1809 | */ | ||
| 1810 | if (status == FAST_IO_FAIL) | ||
| 1811 | atomic_dec(&sp->ref_count); | ||
| 1806 | } | 1812 | } |
| 1807 | } | 1813 | } |
| 1808 | sp->done(sp, res); | 1814 | sp->done(sp, res); |
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c index 46df707e6f2c..452e19f8fb47 100644 --- a/drivers/scsi/ufs/ufs-hisi.c +++ b/drivers/scsi/ufs/ufs-hisi.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include "unipro.h" | 20 | #include "unipro.h" |
| 21 | #include "ufs-hisi.h" | 21 | #include "ufs-hisi.h" |
| 22 | #include "ufshci.h" | 22 | #include "ufshci.h" |
| 23 | #include "ufs_quirks.h" | ||
| 23 | 24 | ||
| 24 | static int ufs_hisi_check_hibern8(struct ufs_hba *hba) | 25 | static int ufs_hisi_check_hibern8(struct ufs_hba *hba) |
| 25 | { | 26 | { |
| @@ -390,6 +391,14 @@ static void ufs_hisi_set_dev_cap(struct ufs_hisi_dev_params *hisi_param) | |||
| 390 | 391 | ||
| 391 | static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) | 392 | static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba) |
| 392 | { | 393 | { |
| 394 | if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME) { | ||
| 395 | pr_info("ufs flash device must set VS_DebugSaveConfigTime 0x10\n"); | ||
| 396 | /* VS_DebugSaveConfigTime */ | ||
| 397 | ufshcd_dme_set(hba, UIC_ARG_MIB(0xD0A0), 0x10); | ||
| 398 | /* sync length */ | ||
| 399 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x1556), 0x48); | ||
| 400 | } | ||
| 401 | |||
| 393 | /* update */ | 402 | /* update */ |
| 394 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1); | 403 | ufshcd_dme_set(hba, UIC_ARG_MIB(0x15A8), 0x1); |
| 395 | /* PA_TxSkip */ | 404 | /* PA_TxSkip */ |
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index 71f73d1d1ad1..5d2dfdb41a6f 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h | |||
| @@ -131,4 +131,10 @@ struct ufs_dev_fix { | |||
| 131 | */ | 131 | */ |
| 132 | #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8) | 132 | #define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME (1 << 8) |
| 133 | 133 | ||
| 134 | /* | ||
| 135 | * Some UFS devices require VS_DebugSaveConfigTime is 0x10, | ||
| 136 | * enabling this quirk ensure this. | ||
| 137 | */ | ||
| 138 | #define UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME (1 << 9) | ||
| 139 | |||
| 134 | #endif /* UFS_QUIRKS_H_ */ | 140 | #endif /* UFS_QUIRKS_H_ */ |
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index 27db55b0ca7f..f1c57cd33b5b 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
| @@ -231,6 +231,8 @@ static struct ufs_dev_fix ufs_fixups[] = { | |||
| 231 | UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), | 231 | UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ), |
| 232 | UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, | 232 | UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, |
| 233 | UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME), | 233 | UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME), |
| 234 | UFS_FIX(UFS_VENDOR_SKHYNIX, "hB8aL1" /*H28U62301AMR*/, | ||
| 235 | UFS_DEVICE_QUIRK_HOST_VS_DEBUGSAVECONFIGTIME), | ||
| 234 | 236 | ||
| 235 | END_FIX | 237 | END_FIX |
| 236 | }; | 238 | }; |
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c index 7218fb963d0a..1382a8df6c75 100644 --- a/drivers/slimbus/qcom-ngd-ctrl.c +++ b/drivers/slimbus/qcom-ngd-ctrl.c | |||
| @@ -777,9 +777,6 @@ static int qcom_slim_ngd_xfer_msg(struct slim_controller *sctrl, | |||
| 777 | u8 la = txn->la; | 777 | u8 la = txn->la; |
| 778 | bool usr_msg = false; | 778 | bool usr_msg = false; |
| 779 | 779 | ||
| 780 | if (txn->mc & SLIM_MSG_CLK_PAUSE_SEQ_FLG) | ||
| 781 | return -EPROTONOSUPPORT; | ||
| 782 | |||
| 783 | if (txn->mt == SLIM_MSG_MT_CORE && | 780 | if (txn->mt == SLIM_MSG_MT_CORE && |
| 784 | (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION && | 781 | (txn->mc >= SLIM_MSG_MC_BEGIN_RECONFIGURATION && |
| 785 | txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW)) | 782 | txn->mc <= SLIM_MSG_MC_RECONFIGURE_NOW)) |
diff --git a/drivers/slimbus/slimbus.h b/drivers/slimbus/slimbus.h index 4399d1873e2d..9be41089edde 100644 --- a/drivers/slimbus/slimbus.h +++ b/drivers/slimbus/slimbus.h | |||
| @@ -61,12 +61,6 @@ | |||
| 61 | #define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL 0x58 | 61 | #define SLIM_MSG_MC_NEXT_REMOVE_CHANNEL 0x58 |
| 62 | #define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F | 62 | #define SLIM_MSG_MC_RECONFIGURE_NOW 0x5F |
| 63 | 63 | ||
| 64 | /* | ||
| 65 | * Clock pause flag to indicate that the reconfig message | ||
| 66 | * corresponds to clock pause sequence | ||
| 67 | */ | ||
| 68 | #define SLIM_MSG_CLK_PAUSE_SEQ_FLG (1U << 8) | ||
| 69 | |||
| 70 | /* Clock pause values per SLIMbus spec */ | 64 | /* Clock pause values per SLIMbus spec */ |
| 71 | #define SLIM_CLK_FAST 0 | 65 | #define SLIM_CLK_FAST 0 |
| 72 | #define SLIM_CLK_CONST_PHASE 1 | 66 | #define SLIM_CLK_CONST_PHASE 1 |
diff --git a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c index a53231b08d30..e3425bf082ae 100644 --- a/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c +++ b/drivers/staging/media/davinci_vpfe/dm365_ipipeif.c | |||
| @@ -310,6 +310,7 @@ static int ipipeif_hw_setup(struct v4l2_subdev *sd) | |||
| 310 | ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2); | 310 | ipipeif_write(val, ipipeif_base_addr, IPIPEIF_CFG2); |
| 311 | break; | 311 | break; |
| 312 | } | 312 | } |
| 313 | /* fall through */ | ||
| 313 | 314 | ||
| 314 | case IPIPEIF_SDRAM_YUV: | 315 | case IPIPEIF_SDRAM_YUV: |
| 315 | /* Set clock divider */ | 316 | /* Set clock divider */ |
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c index 82558455384a..dd121f66fa2d 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus.c | |||
| @@ -253,7 +253,7 @@ static const struct v4l2_m2m_ops cedrus_m2m_ops = { | |||
| 253 | 253 | ||
| 254 | static const struct media_device_ops cedrus_m2m_media_ops = { | 254 | static const struct media_device_ops cedrus_m2m_media_ops = { |
| 255 | .req_validate = cedrus_request_validate, | 255 | .req_validate = cedrus_request_validate, |
| 256 | .req_queue = vb2_m2m_request_queue, | 256 | .req_queue = v4l2_m2m_request_queue, |
| 257 | }; | 257 | }; |
| 258 | 258 | ||
| 259 | static int cedrus_probe(struct platform_device *pdev) | 259 | static int cedrus_probe(struct platform_device *pdev) |
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 85644669fbe7..0a357db4b31b 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c | |||
| @@ -961,6 +961,8 @@ int __uio_register_device(struct module *owner, | |||
| 961 | if (ret) | 961 | if (ret) |
| 962 | goto err_uio_dev_add_attributes; | 962 | goto err_uio_dev_add_attributes; |
| 963 | 963 | ||
| 964 | info->uio_dev = idev; | ||
| 965 | |||
| 964 | if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { | 966 | if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { |
| 965 | /* | 967 | /* |
| 966 | * Note that we deliberately don't use devm_request_irq | 968 | * Note that we deliberately don't use devm_request_irq |
| @@ -972,11 +974,12 @@ int __uio_register_device(struct module *owner, | |||
| 972 | */ | 974 | */ |
| 973 | ret = request_irq(info->irq, uio_interrupt, | 975 | ret = request_irq(info->irq, uio_interrupt, |
| 974 | info->irq_flags, info->name, idev); | 976 | info->irq_flags, info->name, idev); |
| 975 | if (ret) | 977 | if (ret) { |
| 978 | info->uio_dev = NULL; | ||
| 976 | goto err_request_irq; | 979 | goto err_request_irq; |
| 980 | } | ||
| 977 | } | 981 | } |
| 978 | 982 | ||
| 979 | info->uio_dev = idev; | ||
| 980 | return 0; | 983 | return 0; |
| 981 | 984 | ||
| 982 | err_request_irq: | 985 | err_request_irq: |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 47d75c20c211..1b68fed464cb 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
| @@ -1696,6 +1696,9 @@ static const struct usb_device_id acm_ids[] = { | |||
| 1696 | { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ | 1696 | { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ |
| 1697 | .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ | 1697 | .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ |
| 1698 | }, | 1698 | }, |
| 1699 | { USB_DEVICE(0x0572, 0x1349), /* Hiro (Conexant) USB MODEM H50228 */ | ||
| 1700 | .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ | ||
| 1701 | }, | ||
| 1699 | { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */ | 1702 | { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */ |
| 1700 | .driver_info = QUIRK_CONTROL_LINE_STATE, }, | 1703 | .driver_info = QUIRK_CONTROL_LINE_STATE, }, |
| 1701 | { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ | 1704 | { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index c6077d582d29..0f9381b69a3b 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
| @@ -2794,6 +2794,7 @@ static int hub_port_reset(struct usb_hub *hub, int port1, | |||
| 2794 | int i, status; | 2794 | int i, status; |
| 2795 | u16 portchange, portstatus; | 2795 | u16 portchange, portstatus; |
| 2796 | struct usb_port *port_dev = hub->ports[port1 - 1]; | 2796 | struct usb_port *port_dev = hub->ports[port1 - 1]; |
| 2797 | int reset_recovery_time; | ||
| 2797 | 2798 | ||
| 2798 | if (!hub_is_superspeed(hub->hdev)) { | 2799 | if (!hub_is_superspeed(hub->hdev)) { |
| 2799 | if (warm) { | 2800 | if (warm) { |
| @@ -2849,7 +2850,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1, | |||
| 2849 | USB_PORT_FEAT_C_BH_PORT_RESET); | 2850 | USB_PORT_FEAT_C_BH_PORT_RESET); |
| 2850 | usb_clear_port_feature(hub->hdev, port1, | 2851 | usb_clear_port_feature(hub->hdev, port1, |
| 2851 | USB_PORT_FEAT_C_PORT_LINK_STATE); | 2852 | USB_PORT_FEAT_C_PORT_LINK_STATE); |
| 2852 | usb_clear_port_feature(hub->hdev, port1, | 2853 | |
| 2854 | if (udev) | ||
| 2855 | usb_clear_port_feature(hub->hdev, port1, | ||
| 2853 | USB_PORT_FEAT_C_CONNECTION); | 2856 | USB_PORT_FEAT_C_CONNECTION); |
| 2854 | 2857 | ||
| 2855 | /* | 2858 | /* |
| @@ -2885,11 +2888,18 @@ static int hub_port_reset(struct usb_hub *hub, int port1, | |||
| 2885 | 2888 | ||
| 2886 | done: | 2889 | done: |
| 2887 | if (status == 0) { | 2890 | if (status == 0) { |
| 2888 | /* TRSTRCY = 10 ms; plus some extra */ | ||
| 2889 | if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM) | 2891 | if (port_dev->quirks & USB_PORT_QUIRK_FAST_ENUM) |
| 2890 | usleep_range(10000, 12000); | 2892 | usleep_range(10000, 12000); |
| 2891 | else | 2893 | else { |
| 2892 | msleep(10 + 40); | 2894 | /* TRSTRCY = 10 ms; plus some extra */ |
| 2895 | reset_recovery_time = 10 + 40; | ||
| 2896 | |||
| 2897 | /* Hub needs extra delay after resetting its port. */ | ||
| 2898 | if (hub->hdev->quirks & USB_QUIRK_HUB_SLOW_RESET) | ||
| 2899 | reset_recovery_time += 100; | ||
| 2900 | |||
| 2901 | msleep(reset_recovery_time); | ||
| 2902 | } | ||
| 2893 | 2903 | ||
| 2894 | if (udev) { | 2904 | if (udev) { |
| 2895 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); | 2905 | struct usb_hcd *hcd = bus_to_hcd(udev->bus); |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 178d6c6063c0..f9ff03e6af93 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
| @@ -128,6 +128,9 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp) | |||
| 128 | case 'n': | 128 | case 'n': |
| 129 | flags |= USB_QUIRK_DELAY_CTRL_MSG; | 129 | flags |= USB_QUIRK_DELAY_CTRL_MSG; |
| 130 | break; | 130 | break; |
| 131 | case 'o': | ||
| 132 | flags |= USB_QUIRK_HUB_SLOW_RESET; | ||
| 133 | break; | ||
| 131 | /* Ignore unrecognized flag characters */ | 134 | /* Ignore unrecognized flag characters */ |
| 132 | } | 135 | } |
| 133 | } | 136 | } |
| @@ -380,6 +383,9 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 380 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = | 383 | { USB_DEVICE(0x1a0a, 0x0200), .driver_info = |
| 381 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, | 384 | USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, |
| 382 | 385 | ||
| 386 | /* Terminus Technology Inc. Hub */ | ||
| 387 | { USB_DEVICE(0x1a40, 0x0101), .driver_info = USB_QUIRK_HUB_SLOW_RESET }, | ||
| 388 | |||
| 383 | /* Corsair K70 RGB */ | 389 | /* Corsair K70 RGB */ |
| 384 | { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, | 390 | { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, |
| 385 | 391 | ||
| @@ -391,6 +397,9 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 391 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | | 397 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | |
| 392 | USB_QUIRK_DELAY_CTRL_MSG }, | 398 | USB_QUIRK_DELAY_CTRL_MSG }, |
| 393 | 399 | ||
| 400 | /* Corsair K70 LUX RGB */ | ||
| 401 | { USB_DEVICE(0x1b1c, 0x1b33), .driver_info = USB_QUIRK_DELAY_INIT }, | ||
| 402 | |||
| 394 | /* Corsair K70 LUX */ | 403 | /* Corsair K70 LUX */ |
| 395 | { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, | 404 | { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, |
| 396 | 405 | ||
| @@ -411,6 +420,11 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
| 411 | { USB_DEVICE(0x2040, 0x7200), .driver_info = | 420 | { USB_DEVICE(0x2040, 0x7200), .driver_info = |
| 412 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 421 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
| 413 | 422 | ||
| 423 | /* Raydium Touchscreen */ | ||
| 424 | { USB_DEVICE(0x2386, 0x3114), .driver_info = USB_QUIRK_NO_LPM }, | ||
| 425 | |||
| 426 | { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM }, | ||
| 427 | |||
| 414 | /* DJI CineSSD */ | 428 | /* DJI CineSSD */ |
| 415 | { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, | 429 | { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM }, |
| 416 | 430 | ||
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c index d257c541e51b..7afc10872f1f 100644 --- a/drivers/usb/dwc2/pci.c +++ b/drivers/usb/dwc2/pci.c | |||
| @@ -120,6 +120,7 @@ static int dwc2_pci_probe(struct pci_dev *pci, | |||
| 120 | dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO); | 120 | dwc2 = platform_device_alloc("dwc2", PLATFORM_DEVID_AUTO); |
| 121 | if (!dwc2) { | 121 | if (!dwc2) { |
| 122 | dev_err(dev, "couldn't allocate dwc2 device\n"); | 122 | dev_err(dev, "couldn't allocate dwc2 device\n"); |
| 123 | ret = -ENOMEM; | ||
| 123 | goto err; | 124 | goto err; |
| 124 | } | 125 | } |
| 125 | 126 | ||
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index becfbb87f791..2f2048aa5fde 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c | |||
| @@ -1499,6 +1499,7 @@ static int dwc3_probe(struct platform_device *pdev) | |||
| 1499 | 1499 | ||
| 1500 | err5: | 1500 | err5: |
| 1501 | dwc3_event_buffers_cleanup(dwc); | 1501 | dwc3_event_buffers_cleanup(dwc); |
| 1502 | dwc3_ulpi_exit(dwc); | ||
| 1502 | 1503 | ||
| 1503 | err4: | 1504 | err4: |
| 1504 | dwc3_free_scratch_buffers(dwc); | 1505 | dwc3_free_scratch_buffers(dwc); |
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 1286076a8890..842795856bf4 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c | |||
| @@ -283,8 +283,10 @@ err: | |||
| 283 | static void dwc3_pci_remove(struct pci_dev *pci) | 283 | static void dwc3_pci_remove(struct pci_dev *pci) |
| 284 | { | 284 | { |
| 285 | struct dwc3_pci *dwc = pci_get_drvdata(pci); | 285 | struct dwc3_pci *dwc = pci_get_drvdata(pci); |
| 286 | struct pci_dev *pdev = dwc->pci; | ||
| 286 | 287 | ||
| 287 | gpiod_remove_lookup_table(&platform_bytcr_gpios); | 288 | if (pdev->device == PCI_DEVICE_ID_INTEL_BYT) |
| 289 | gpiod_remove_lookup_table(&platform_bytcr_gpios); | ||
| 288 | #ifdef CONFIG_PM | 290 | #ifdef CONFIG_PM |
| 289 | cancel_work_sync(&dwc->wakeup_work); | 291 | cancel_work_sync(&dwc->wakeup_work); |
| 290 | #endif | 292 | #endif |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 679c12e14522..9faad896b3a1 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -1081,7 +1081,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, | |||
| 1081 | /* Now prepare one extra TRB to align transfer size */ | 1081 | /* Now prepare one extra TRB to align transfer size */ |
| 1082 | trb = &dep->trb_pool[dep->trb_enqueue]; | 1082 | trb = &dep->trb_pool[dep->trb_enqueue]; |
| 1083 | __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, | 1083 | __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, |
| 1084 | maxp - rem, false, 0, | 1084 | maxp - rem, false, 1, |
| 1085 | req->request.stream_id, | 1085 | req->request.stream_id, |
| 1086 | req->request.short_not_ok, | 1086 | req->request.short_not_ok, |
| 1087 | req->request.no_interrupt); | 1087 | req->request.no_interrupt); |
| @@ -1125,7 +1125,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, | |||
| 1125 | /* Now prepare one extra TRB to align transfer size */ | 1125 | /* Now prepare one extra TRB to align transfer size */ |
| 1126 | trb = &dep->trb_pool[dep->trb_enqueue]; | 1126 | trb = &dep->trb_pool[dep->trb_enqueue]; |
| 1127 | __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, | 1127 | __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, |
| 1128 | false, 0, req->request.stream_id, | 1128 | false, 1, req->request.stream_id, |
| 1129 | req->request.short_not_ok, | 1129 | req->request.short_not_ok, |
| 1130 | req->request.no_interrupt); | 1130 | req->request.no_interrupt); |
| 1131 | } else if (req->request.zero && req->request.length && | 1131 | } else if (req->request.zero && req->request.length && |
| @@ -1141,7 +1141,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, | |||
| 1141 | /* Now prepare one extra TRB to handle ZLP */ | 1141 | /* Now prepare one extra TRB to handle ZLP */ |
| 1142 | trb = &dep->trb_pool[dep->trb_enqueue]; | 1142 | trb = &dep->trb_pool[dep->trb_enqueue]; |
| 1143 | __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, | 1143 | __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, |
| 1144 | false, 0, req->request.stream_id, | 1144 | false, 1, req->request.stream_id, |
| 1145 | req->request.short_not_ok, | 1145 | req->request.short_not_ok, |
| 1146 | req->request.no_interrupt); | 1146 | req->request.no_interrupt); |
| 1147 | } else { | 1147 | } else { |
| @@ -2259,7 +2259,7 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, | |||
| 2259 | * with one TRB pending in the ring. We need to manually clear HWO bit | 2259 | * with one TRB pending in the ring. We need to manually clear HWO bit |
| 2260 | * from that TRB. | 2260 | * from that TRB. |
| 2261 | */ | 2261 | */ |
| 2262 | if ((req->zero || req->unaligned) && (trb->ctrl & DWC3_TRB_CTRL_HWO)) { | 2262 | if ((req->zero || req->unaligned) && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) { |
| 2263 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; | 2263 | trb->ctrl &= ~DWC3_TRB_CTRL_HWO; |
| 2264 | return 1; | 2264 | return 1; |
| 2265 | } | 2265 | } |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 3ada83d81bda..31e8bf3578c8 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
| @@ -215,7 +215,6 @@ struct ffs_io_data { | |||
| 215 | 215 | ||
| 216 | struct mm_struct *mm; | 216 | struct mm_struct *mm; |
| 217 | struct work_struct work; | 217 | struct work_struct work; |
| 218 | struct work_struct cancellation_work; | ||
| 219 | 218 | ||
| 220 | struct usb_ep *ep; | 219 | struct usb_ep *ep; |
| 221 | struct usb_request *req; | 220 | struct usb_request *req; |
| @@ -1073,31 +1072,22 @@ ffs_epfile_open(struct inode *inode, struct file *file) | |||
| 1073 | return 0; | 1072 | return 0; |
| 1074 | } | 1073 | } |
| 1075 | 1074 | ||
| 1076 | static void ffs_aio_cancel_worker(struct work_struct *work) | ||
| 1077 | { | ||
| 1078 | struct ffs_io_data *io_data = container_of(work, struct ffs_io_data, | ||
| 1079 | cancellation_work); | ||
| 1080 | |||
| 1081 | ENTER(); | ||
| 1082 | |||
| 1083 | usb_ep_dequeue(io_data->ep, io_data->req); | ||
| 1084 | } | ||
| 1085 | |||
| 1086 | static int ffs_aio_cancel(struct kiocb *kiocb) | 1075 | static int ffs_aio_cancel(struct kiocb *kiocb) |
| 1087 | { | 1076 | { |
| 1088 | struct ffs_io_data *io_data = kiocb->private; | 1077 | struct ffs_io_data *io_data = kiocb->private; |
| 1089 | struct ffs_data *ffs = io_data->ffs; | 1078 | struct ffs_epfile *epfile = kiocb->ki_filp->private_data; |
| 1090 | int value; | 1079 | int value; |
| 1091 | 1080 | ||
| 1092 | ENTER(); | 1081 | ENTER(); |
| 1093 | 1082 | ||
| 1094 | if (likely(io_data && io_data->ep && io_data->req)) { | 1083 | spin_lock_irq(&epfile->ffs->eps_lock); |
| 1095 | INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker); | 1084 | |
| 1096 | queue_work(ffs->io_completion_wq, &io_data->cancellation_work); | 1085 | if (likely(io_data && io_data->ep && io_data->req)) |
| 1097 | value = -EINPROGRESS; | 1086 | value = usb_ep_dequeue(io_data->ep, io_data->req); |
| 1098 | } else { | 1087 | else |
| 1099 | value = -EINVAL; | 1088 | value = -EINVAL; |
| 1100 | } | 1089 | |
| 1090 | spin_unlock_irq(&epfile->ffs->eps_lock); | ||
| 1101 | 1091 | ||
| 1102 | return value; | 1092 | return value; |
| 1103 | } | 1093 | } |
diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c index 27f00160332e..3c4abb5a1c3f 100644 --- a/drivers/usb/host/xhci-histb.c +++ b/drivers/usb/host/xhci-histb.c | |||
| @@ -325,14 +325,16 @@ static int xhci_histb_remove(struct platform_device *dev) | |||
| 325 | struct xhci_hcd_histb *histb = platform_get_drvdata(dev); | 325 | struct xhci_hcd_histb *histb = platform_get_drvdata(dev); |
| 326 | struct usb_hcd *hcd = histb->hcd; | 326 | struct usb_hcd *hcd = histb->hcd; |
| 327 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 327 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 328 | struct usb_hcd *shared_hcd = xhci->shared_hcd; | ||
| 328 | 329 | ||
| 329 | xhci->xhc_state |= XHCI_STATE_REMOVING; | 330 | xhci->xhc_state |= XHCI_STATE_REMOVING; |
| 330 | 331 | ||
| 331 | usb_remove_hcd(xhci->shared_hcd); | 332 | usb_remove_hcd(shared_hcd); |
| 333 | xhci->shared_hcd = NULL; | ||
| 332 | device_wakeup_disable(&dev->dev); | 334 | device_wakeup_disable(&dev->dev); |
| 333 | 335 | ||
| 334 | usb_remove_hcd(hcd); | 336 | usb_remove_hcd(hcd); |
| 335 | usb_put_hcd(xhci->shared_hcd); | 337 | usb_put_hcd(shared_hcd); |
| 336 | 338 | ||
| 337 | xhci_histb_host_disable(histb); | 339 | xhci_histb_host_disable(histb); |
| 338 | usb_put_hcd(hcd); | 340 | usb_put_hcd(hcd); |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 12eea73d9f20..94aca1b5ac8a 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
| @@ -876,7 +876,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, | |||
| 876 | status |= USB_PORT_STAT_SUSPEND; | 876 | status |= USB_PORT_STAT_SUSPEND; |
| 877 | } | 877 | } |
| 878 | if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME && | 878 | if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME && |
| 879 | !DEV_SUPERSPEED_ANY(raw_port_status)) { | 879 | !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) { |
| 880 | if ((raw_port_status & PORT_RESET) || | 880 | if ((raw_port_status & PORT_RESET) || |
| 881 | !(raw_port_status & PORT_PE)) | 881 | !(raw_port_status & PORT_PE)) |
| 882 | return 0xffffffff; | 882 | return 0xffffffff; |
| @@ -921,7 +921,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, | |||
| 921 | time_left = wait_for_completion_timeout( | 921 | time_left = wait_for_completion_timeout( |
| 922 | &bus_state->rexit_done[wIndex], | 922 | &bus_state->rexit_done[wIndex], |
| 923 | msecs_to_jiffies( | 923 | msecs_to_jiffies( |
| 924 | XHCI_MAX_REXIT_TIMEOUT)); | 924 | XHCI_MAX_REXIT_TIMEOUT_MS)); |
| 925 | spin_lock_irqsave(&xhci->lock, flags); | 925 | spin_lock_irqsave(&xhci->lock, flags); |
| 926 | 926 | ||
| 927 | if (time_left) { | 927 | if (time_left) { |
| @@ -935,7 +935,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, | |||
| 935 | } else { | 935 | } else { |
| 936 | int port_status = readl(port->addr); | 936 | int port_status = readl(port->addr); |
| 937 | xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n", | 937 | xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n", |
| 938 | XHCI_MAX_REXIT_TIMEOUT, | 938 | XHCI_MAX_REXIT_TIMEOUT_MS, |
| 939 | port_status); | 939 | port_status); |
| 940 | status |= USB_PORT_STAT_SUSPEND; | 940 | status |= USB_PORT_STAT_SUSPEND; |
| 941 | clear_bit(wIndex, &bus_state->rexit_ports); | 941 | clear_bit(wIndex, &bus_state->rexit_ports); |
| @@ -1474,15 +1474,18 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
| 1474 | unsigned long flags; | 1474 | unsigned long flags; |
| 1475 | struct xhci_hub *rhub; | 1475 | struct xhci_hub *rhub; |
| 1476 | struct xhci_port **ports; | 1476 | struct xhci_port **ports; |
| 1477 | u32 portsc_buf[USB_MAXCHILDREN]; | ||
| 1478 | bool wake_enabled; | ||
| 1477 | 1479 | ||
| 1478 | rhub = xhci_get_rhub(hcd); | 1480 | rhub = xhci_get_rhub(hcd); |
| 1479 | ports = rhub->ports; | 1481 | ports = rhub->ports; |
| 1480 | max_ports = rhub->num_ports; | 1482 | max_ports = rhub->num_ports; |
| 1481 | bus_state = &xhci->bus_state[hcd_index(hcd)]; | 1483 | bus_state = &xhci->bus_state[hcd_index(hcd)]; |
| 1484 | wake_enabled = hcd->self.root_hub->do_remote_wakeup; | ||
| 1482 | 1485 | ||
| 1483 | spin_lock_irqsave(&xhci->lock, flags); | 1486 | spin_lock_irqsave(&xhci->lock, flags); |
| 1484 | 1487 | ||
| 1485 | if (hcd->self.root_hub->do_remote_wakeup) { | 1488 | if (wake_enabled) { |
| 1486 | if (bus_state->resuming_ports || /* USB2 */ | 1489 | if (bus_state->resuming_ports || /* USB2 */ |
| 1487 | bus_state->port_remote_wakeup) { /* USB3 */ | 1490 | bus_state->port_remote_wakeup) { /* USB3 */ |
| 1488 | spin_unlock_irqrestore(&xhci->lock, flags); | 1491 | spin_unlock_irqrestore(&xhci->lock, flags); |
| @@ -1490,26 +1493,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
| 1490 | return -EBUSY; | 1493 | return -EBUSY; |
| 1491 | } | 1494 | } |
| 1492 | } | 1495 | } |
| 1493 | 1496 | /* | |
| 1494 | port_index = max_ports; | 1497 | * Prepare ports for suspend, but don't write anything before all ports |
| 1498 | * are checked and we know bus suspend can proceed | ||
| 1499 | */ | ||
| 1495 | bus_state->bus_suspended = 0; | 1500 | bus_state->bus_suspended = 0; |
| 1501 | port_index = max_ports; | ||
| 1496 | while (port_index--) { | 1502 | while (port_index--) { |
| 1497 | /* suspend the port if the port is not suspended */ | ||
| 1498 | u32 t1, t2; | 1503 | u32 t1, t2; |
| 1499 | int slot_id; | ||
| 1500 | 1504 | ||
| 1501 | t1 = readl(ports[port_index]->addr); | 1505 | t1 = readl(ports[port_index]->addr); |
| 1502 | t2 = xhci_port_state_to_neutral(t1); | 1506 | t2 = xhci_port_state_to_neutral(t1); |
| 1507 | portsc_buf[port_index] = 0; | ||
| 1503 | 1508 | ||
| 1504 | if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) { | 1509 | /* Bail out if a USB3 port has a new device in link training */ |
| 1505 | xhci_dbg(xhci, "port %d not suspended\n", port_index); | 1510 | if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) { |
| 1506 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, | 1511 | bus_state->bus_suspended = 0; |
| 1507 | port_index + 1); | 1512 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1508 | if (slot_id) { | 1513 | xhci_dbg(xhci, "Bus suspend bailout, port in polling\n"); |
| 1514 | return -EBUSY; | ||
| 1515 | } | ||
| 1516 | |||
| 1517 | /* suspend ports in U0, or bail out for new connect changes */ | ||
| 1518 | if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { | ||
| 1519 | if ((t1 & PORT_CSC) && wake_enabled) { | ||
| 1520 | bus_state->bus_suspended = 0; | ||
| 1509 | spin_unlock_irqrestore(&xhci->lock, flags); | 1521 | spin_unlock_irqrestore(&xhci->lock, flags); |
| 1510 | xhci_stop_device(xhci, slot_id, 1); | 1522 | xhci_dbg(xhci, "Bus suspend bailout, port connect change\n"); |
| 1511 | spin_lock_irqsave(&xhci->lock, flags); | 1523 | return -EBUSY; |
| 1512 | } | 1524 | } |
| 1525 | xhci_dbg(xhci, "port %d not suspended\n", port_index); | ||
| 1513 | t2 &= ~PORT_PLS_MASK; | 1526 | t2 &= ~PORT_PLS_MASK; |
| 1514 | t2 |= PORT_LINK_STROBE | XDEV_U3; | 1527 | t2 |= PORT_LINK_STROBE | XDEV_U3; |
| 1515 | set_bit(port_index, &bus_state->bus_suspended); | 1528 | set_bit(port_index, &bus_state->bus_suspended); |
| @@ -1518,7 +1531,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
| 1518 | * including the USB 3.0 roothub, but only if CONFIG_PM | 1531 | * including the USB 3.0 roothub, but only if CONFIG_PM |
| 1519 | * is enabled, so also enable remote wake here. | 1532 | * is enabled, so also enable remote wake here. |
| 1520 | */ | 1533 | */ |
| 1521 | if (hcd->self.root_hub->do_remote_wakeup) { | 1534 | if (wake_enabled) { |
| 1522 | if (t1 & PORT_CONNECT) { | 1535 | if (t1 & PORT_CONNECT) { |
| 1523 | t2 |= PORT_WKOC_E | PORT_WKDISC_E; | 1536 | t2 |= PORT_WKOC_E | PORT_WKDISC_E; |
| 1524 | t2 &= ~PORT_WKCONN_E; | 1537 | t2 &= ~PORT_WKCONN_E; |
| @@ -1538,7 +1551,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
| 1538 | 1551 | ||
| 1539 | t1 = xhci_port_state_to_neutral(t1); | 1552 | t1 = xhci_port_state_to_neutral(t1); |
| 1540 | if (t1 != t2) | 1553 | if (t1 != t2) |
| 1541 | writel(t2, ports[port_index]->addr); | 1554 | portsc_buf[port_index] = t2; |
| 1555 | } | ||
| 1556 | |||
| 1557 | /* write port settings, stopping and suspending ports if needed */ | ||
| 1558 | port_index = max_ports; | ||
| 1559 | while (port_index--) { | ||
| 1560 | if (!portsc_buf[port_index]) | ||
| 1561 | continue; | ||
| 1562 | if (test_bit(port_index, &bus_state->bus_suspended)) { | ||
| 1563 | int slot_id; | ||
| 1564 | |||
| 1565 | slot_id = xhci_find_slot_id_by_port(hcd, xhci, | ||
| 1566 | port_index + 1); | ||
| 1567 | if (slot_id) { | ||
| 1568 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
| 1569 | xhci_stop_device(xhci, slot_id, 1); | ||
| 1570 | spin_lock_irqsave(&xhci->lock, flags); | ||
| 1571 | } | ||
| 1572 | } | ||
| 1573 | writel(portsc_buf[port_index], ports[port_index]->addr); | ||
| 1542 | } | 1574 | } |
| 1543 | hcd->state = HC_STATE_SUSPENDED; | 1575 | hcd->state = HC_STATE_SUSPENDED; |
| 1544 | bus_state->next_statechange = jiffies + msecs_to_jiffies(10); | 1576 | bus_state->next_statechange = jiffies + msecs_to_jiffies(10); |
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 71d0d33c3286..60987c787e44 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c | |||
| @@ -590,12 +590,14 @@ static int xhci_mtk_remove(struct platform_device *dev) | |||
| 590 | struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev); | 590 | struct xhci_hcd_mtk *mtk = platform_get_drvdata(dev); |
| 591 | struct usb_hcd *hcd = mtk->hcd; | 591 | struct usb_hcd *hcd = mtk->hcd; |
| 592 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 592 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 593 | struct usb_hcd *shared_hcd = xhci->shared_hcd; | ||
| 593 | 594 | ||
| 594 | usb_remove_hcd(xhci->shared_hcd); | 595 | usb_remove_hcd(shared_hcd); |
| 596 | xhci->shared_hcd = NULL; | ||
| 595 | device_init_wakeup(&dev->dev, false); | 597 | device_init_wakeup(&dev->dev, false); |
| 596 | 598 | ||
| 597 | usb_remove_hcd(hcd); | 599 | usb_remove_hcd(hcd); |
| 598 | usb_put_hcd(xhci->shared_hcd); | 600 | usb_put_hcd(shared_hcd); |
| 599 | usb_put_hcd(hcd); | 601 | usb_put_hcd(hcd); |
| 600 | xhci_mtk_sch_exit(mtk); | 602 | xhci_mtk_sch_exit(mtk); |
| 601 | xhci_mtk_clks_disable(mtk); | 603 | xhci_mtk_clks_disable(mtk); |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 01c57055c0c5..a9515265db4d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
| @@ -248,6 +248,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
| 248 | if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) | 248 | if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) |
| 249 | xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; | 249 | xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; |
| 250 | 250 | ||
| 251 | if ((pdev->vendor == PCI_VENDOR_ID_BROADCOM || | ||
| 252 | pdev->vendor == PCI_VENDOR_ID_CAVIUM) && | ||
| 253 | pdev->device == 0x9026) | ||
| 254 | xhci->quirks |= XHCI_RESET_PLL_ON_DISCONNECT; | ||
| 255 | |||
| 251 | if (xhci->quirks & XHCI_RESET_ON_RESUME) | 256 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
| 252 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, | 257 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
| 253 | "QUIRK: Resetting on resume"); | 258 | "QUIRK: Resetting on resume"); |
| @@ -380,6 +385,7 @@ static void xhci_pci_remove(struct pci_dev *dev) | |||
| 380 | if (xhci->shared_hcd) { | 385 | if (xhci->shared_hcd) { |
| 381 | usb_remove_hcd(xhci->shared_hcd); | 386 | usb_remove_hcd(xhci->shared_hcd); |
| 382 | usb_put_hcd(xhci->shared_hcd); | 387 | usb_put_hcd(xhci->shared_hcd); |
| 388 | xhci->shared_hcd = NULL; | ||
| 383 | } | 389 | } |
| 384 | 390 | ||
| 385 | /* Workaround for spurious wakeups at shutdown with HSW */ | 391 | /* Workaround for spurious wakeups at shutdown with HSW */ |
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 32b5574ad5c5..ef09cb06212f 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c | |||
| @@ -362,14 +362,16 @@ static int xhci_plat_remove(struct platform_device *dev) | |||
| 362 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | 362 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); |
| 363 | struct clk *clk = xhci->clk; | 363 | struct clk *clk = xhci->clk; |
| 364 | struct clk *reg_clk = xhci->reg_clk; | 364 | struct clk *reg_clk = xhci->reg_clk; |
| 365 | struct usb_hcd *shared_hcd = xhci->shared_hcd; | ||
| 365 | 366 | ||
| 366 | xhci->xhc_state |= XHCI_STATE_REMOVING; | 367 | xhci->xhc_state |= XHCI_STATE_REMOVING; |
| 367 | 368 | ||
| 368 | usb_remove_hcd(xhci->shared_hcd); | 369 | usb_remove_hcd(shared_hcd); |
| 370 | xhci->shared_hcd = NULL; | ||
| 369 | usb_phy_shutdown(hcd->usb_phy); | 371 | usb_phy_shutdown(hcd->usb_phy); |
| 370 | 372 | ||
| 371 | usb_remove_hcd(hcd); | 373 | usb_remove_hcd(hcd); |
| 372 | usb_put_hcd(xhci->shared_hcd); | 374 | usb_put_hcd(shared_hcd); |
| 373 | 375 | ||
| 374 | clk_disable_unprepare(clk); | 376 | clk_disable_unprepare(clk); |
| 375 | clk_disable_unprepare(reg_clk); | 377 | clk_disable_unprepare(reg_clk); |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a8d92c90fb58..65750582133f 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -1521,6 +1521,35 @@ static void handle_device_notification(struct xhci_hcd *xhci, | |||
| 1521 | usb_wakeup_notification(udev->parent, udev->portnum); | 1521 | usb_wakeup_notification(udev->parent, udev->portnum); |
| 1522 | } | 1522 | } |
| 1523 | 1523 | ||
| 1524 | /* | ||
| 1525 | * Quirk hanlder for errata seen on Cavium ThunderX2 processor XHCI | ||
| 1526 | * Controller. | ||
| 1527 | * As per ThunderX2errata-129 USB 2 device may come up as USB 1 | ||
| 1528 | * If a connection to a USB 1 device is followed by another connection | ||
| 1529 | * to a USB 2 device. | ||
| 1530 | * | ||
| 1531 | * Reset the PHY after the USB device is disconnected if device speed | ||
| 1532 | * is less than HCD_USB3. | ||
| 1533 | * Retry the reset sequence max of 4 times checking the PLL lock status. | ||
| 1534 | * | ||
| 1535 | */ | ||
| 1536 | static void xhci_cavium_reset_phy_quirk(struct xhci_hcd *xhci) | ||
| 1537 | { | ||
| 1538 | struct usb_hcd *hcd = xhci_to_hcd(xhci); | ||
| 1539 | u32 pll_lock_check; | ||
| 1540 | u32 retry_count = 4; | ||
| 1541 | |||
| 1542 | do { | ||
| 1543 | /* Assert PHY reset */ | ||
| 1544 | writel(0x6F, hcd->regs + 0x1048); | ||
| 1545 | udelay(10); | ||
| 1546 | /* De-assert the PHY reset */ | ||
| 1547 | writel(0x7F, hcd->regs + 0x1048); | ||
| 1548 | udelay(200); | ||
| 1549 | pll_lock_check = readl(hcd->regs + 0x1070); | ||
| 1550 | } while (!(pll_lock_check & 0x1) && --retry_count); | ||
| 1551 | } | ||
| 1552 | |||
| 1524 | static void handle_port_status(struct xhci_hcd *xhci, | 1553 | static void handle_port_status(struct xhci_hcd *xhci, |
| 1525 | union xhci_trb *event) | 1554 | union xhci_trb *event) |
| 1526 | { | 1555 | { |
| @@ -1556,6 +1585,13 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
| 1556 | goto cleanup; | 1585 | goto cleanup; |
| 1557 | } | 1586 | } |
| 1558 | 1587 | ||
| 1588 | /* We might get interrupts after shared_hcd is removed */ | ||
| 1589 | if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) { | ||
| 1590 | xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n"); | ||
| 1591 | bogus_port_status = true; | ||
| 1592 | goto cleanup; | ||
| 1593 | } | ||
| 1594 | |||
| 1559 | hcd = port->rhub->hcd; | 1595 | hcd = port->rhub->hcd; |
| 1560 | bus_state = &xhci->bus_state[hcd_index(hcd)]; | 1596 | bus_state = &xhci->bus_state[hcd_index(hcd)]; |
| 1561 | hcd_portnum = port->hcd_portnum; | 1597 | hcd_portnum = port->hcd_portnum; |
| @@ -1639,7 +1675,7 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
| 1639 | * RExit to a disconnect state). If so, let the the driver know it's | 1675 | * RExit to a disconnect state). If so, let the the driver know it's |
| 1640 | * out of the RExit state. | 1676 | * out of the RExit state. |
| 1641 | */ | 1677 | */ |
| 1642 | if (!DEV_SUPERSPEED_ANY(portsc) && | 1678 | if (!DEV_SUPERSPEED_ANY(portsc) && hcd->speed < HCD_USB3 && |
| 1643 | test_and_clear_bit(hcd_portnum, | 1679 | test_and_clear_bit(hcd_portnum, |
| 1644 | &bus_state->rexit_ports)) { | 1680 | &bus_state->rexit_ports)) { |
| 1645 | complete(&bus_state->rexit_done[hcd_portnum]); | 1681 | complete(&bus_state->rexit_done[hcd_portnum]); |
| @@ -1647,8 +1683,12 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
| 1647 | goto cleanup; | 1683 | goto cleanup; |
| 1648 | } | 1684 | } |
| 1649 | 1685 | ||
| 1650 | if (hcd->speed < HCD_USB3) | 1686 | if (hcd->speed < HCD_USB3) { |
| 1651 | xhci_test_and_clear_bit(xhci, port, PORT_PLC); | 1687 | xhci_test_and_clear_bit(xhci, port, PORT_PLC); |
| 1688 | if ((xhci->quirks & XHCI_RESET_PLL_ON_DISCONNECT) && | ||
| 1689 | (portsc & PORT_CSC) && !(portsc & PORT_CONNECT)) | ||
| 1690 | xhci_cavium_reset_phy_quirk(xhci); | ||
| 1691 | } | ||
| 1652 | 1692 | ||
| 1653 | cleanup: | 1693 | cleanup: |
| 1654 | /* Update event ring dequeue pointer before dropping the lock */ | 1694 | /* Update event ring dequeue pointer before dropping the lock */ |
| @@ -2266,6 +2306,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
| 2266 | goto cleanup; | 2306 | goto cleanup; |
| 2267 | case COMP_RING_UNDERRUN: | 2307 | case COMP_RING_UNDERRUN: |
| 2268 | case COMP_RING_OVERRUN: | 2308 | case COMP_RING_OVERRUN: |
| 2309 | case COMP_STOPPED_LENGTH_INVALID: | ||
| 2269 | goto cleanup; | 2310 | goto cleanup; |
| 2270 | default: | 2311 | default: |
| 2271 | xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", | 2312 | xhci_err(xhci, "ERROR Transfer event for unknown stream ring slot %u ep %u\n", |
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 6b5db344de30..938ff06c0349 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c | |||
| @@ -1303,6 +1303,7 @@ static int tegra_xusb_remove(struct platform_device *pdev) | |||
| 1303 | 1303 | ||
| 1304 | usb_remove_hcd(xhci->shared_hcd); | 1304 | usb_remove_hcd(xhci->shared_hcd); |
| 1305 | usb_put_hcd(xhci->shared_hcd); | 1305 | usb_put_hcd(xhci->shared_hcd); |
| 1306 | xhci->shared_hcd = NULL; | ||
| 1306 | usb_remove_hcd(tegra->hcd); | 1307 | usb_remove_hcd(tegra->hcd); |
| 1307 | usb_put_hcd(tegra->hcd); | 1308 | usb_put_hcd(tegra->hcd); |
| 1308 | 1309 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 0420eefa647a..c928dbbff881 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
| @@ -719,8 +719,6 @@ static void xhci_stop(struct usb_hcd *hcd) | |||
| 719 | 719 | ||
| 720 | /* Only halt host and free memory after both hcds are removed */ | 720 | /* Only halt host and free memory after both hcds are removed */ |
| 721 | if (!usb_hcd_is_primary_hcd(hcd)) { | 721 | if (!usb_hcd_is_primary_hcd(hcd)) { |
| 722 | /* usb core will free this hcd shortly, unset pointer */ | ||
| 723 | xhci->shared_hcd = NULL; | ||
| 724 | mutex_unlock(&xhci->mutex); | 722 | mutex_unlock(&xhci->mutex); |
| 725 | return; | 723 | return; |
| 726 | } | 724 | } |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index bf0b3692dc9a..260b259b72bc 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
| @@ -1680,7 +1680,7 @@ struct xhci_bus_state { | |||
| 1680 | * It can take up to 20 ms to transition from RExit to U0 on the | 1680 | * It can take up to 20 ms to transition from RExit to U0 on the |
| 1681 | * Intel Lynx Point LP xHCI host. | 1681 | * Intel Lynx Point LP xHCI host. |
| 1682 | */ | 1682 | */ |
| 1683 | #define XHCI_MAX_REXIT_TIMEOUT (20 * 1000) | 1683 | #define XHCI_MAX_REXIT_TIMEOUT_MS 20 |
| 1684 | 1684 | ||
| 1685 | static inline unsigned int hcd_index(struct usb_hcd *hcd) | 1685 | static inline unsigned int hcd_index(struct usb_hcd *hcd) |
| 1686 | { | 1686 | { |
| @@ -1849,6 +1849,7 @@ struct xhci_hcd { | |||
| 1849 | #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) | 1849 | #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) |
| 1850 | #define XHCI_ZERO_64B_REGS BIT_ULL(32) | 1850 | #define XHCI_ZERO_64B_REGS BIT_ULL(32) |
| 1851 | #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) | 1851 | #define XHCI_DEFAULT_PM_RUNTIME_ALLOW BIT_ULL(33) |
| 1852 | #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) | ||
| 1852 | 1853 | ||
| 1853 | unsigned int num_active_eps; | 1854 | unsigned int num_active_eps; |
| 1854 | unsigned int limit_active_eps; | 1855 | unsigned int limit_active_eps; |
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index bd539f3058bc..85b48c6ddc7e 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c | |||
| @@ -50,6 +50,7 @@ static const struct usb_device_id appledisplay_table[] = { | |||
| 50 | { APPLEDISPLAY_DEVICE(0x9219) }, | 50 | { APPLEDISPLAY_DEVICE(0x9219) }, |
| 51 | { APPLEDISPLAY_DEVICE(0x921c) }, | 51 | { APPLEDISPLAY_DEVICE(0x921c) }, |
| 52 | { APPLEDISPLAY_DEVICE(0x921d) }, | 52 | { APPLEDISPLAY_DEVICE(0x921d) }, |
| 53 | { APPLEDISPLAY_DEVICE(0x9222) }, | ||
| 53 | { APPLEDISPLAY_DEVICE(0x9236) }, | 54 | { APPLEDISPLAY_DEVICE(0x9236) }, |
| 54 | 55 | ||
| 55 | /* Terminating entry */ | 56 | /* Terminating entry */ |
| @@ -62,6 +62,7 @@ | |||
| 62 | #include <linux/oom.h> | 62 | #include <linux/oom.h> |
| 63 | #include <linux/compat.h> | 63 | #include <linux/compat.h> |
| 64 | #include <linux/vmalloc.h> | 64 | #include <linux/vmalloc.h> |
| 65 | #include <linux/freezer.h> | ||
| 65 | 66 | ||
| 66 | #include <linux/uaccess.h> | 67 | #include <linux/uaccess.h> |
| 67 | #include <asm/mmu_context.h> | 68 | #include <asm/mmu_context.h> |
| @@ -1083,7 +1084,7 @@ static int de_thread(struct task_struct *tsk) | |||
| 1083 | while (sig->notify_count) { | 1084 | while (sig->notify_count) { |
| 1084 | __set_current_state(TASK_KILLABLE); | 1085 | __set_current_state(TASK_KILLABLE); |
| 1085 | spin_unlock_irq(lock); | 1086 | spin_unlock_irq(lock); |
| 1086 | schedule(); | 1087 | freezable_schedule(); |
| 1087 | if (unlikely(__fatal_signal_pending(tsk))) | 1088 | if (unlikely(__fatal_signal_pending(tsk))) |
| 1088 | goto killed; | 1089 | goto killed; |
| 1089 | spin_lock_irq(lock); | 1090 | spin_lock_irq(lock); |
| @@ -1111,7 +1112,7 @@ static int de_thread(struct task_struct *tsk) | |||
| 1111 | __set_current_state(TASK_KILLABLE); | 1112 | __set_current_state(TASK_KILLABLE); |
| 1112 | write_unlock_irq(&tasklist_lock); | 1113 | write_unlock_irq(&tasklist_lock); |
| 1113 | cgroup_threadgroup_change_end(tsk); | 1114 | cgroup_threadgroup_change_end(tsk); |
| 1114 | schedule(); | 1115 | freezable_schedule(); |
| 1115 | if (unlikely(__fatal_signal_pending(tsk))) | 1116 | if (unlikely(__fatal_signal_pending(tsk))) |
| 1116 | goto killed; | 1117 | goto killed; |
| 1117 | } | 1118 | } |
diff --git a/fs/iomap.c b/fs/iomap.c index 64ce240217a1..3ffb776fbebe 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
| @@ -142,13 +142,14 @@ static void | |||
| 142 | iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, | 142 | iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, |
| 143 | loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) | 143 | loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp) |
| 144 | { | 144 | { |
| 145 | loff_t orig_pos = *pos; | ||
| 146 | loff_t isize = i_size_read(inode); | ||
| 145 | unsigned block_bits = inode->i_blkbits; | 147 | unsigned block_bits = inode->i_blkbits; |
| 146 | unsigned block_size = (1 << block_bits); | 148 | unsigned block_size = (1 << block_bits); |
| 147 | unsigned poff = offset_in_page(*pos); | 149 | unsigned poff = offset_in_page(*pos); |
| 148 | unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); | 150 | unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length); |
| 149 | unsigned first = poff >> block_bits; | 151 | unsigned first = poff >> block_bits; |
| 150 | unsigned last = (poff + plen - 1) >> block_bits; | 152 | unsigned last = (poff + plen - 1) >> block_bits; |
| 151 | unsigned end = offset_in_page(i_size_read(inode)) >> block_bits; | ||
| 152 | 153 | ||
| 153 | /* | 154 | /* |
| 154 | * If the block size is smaller than the page size we need to check the | 155 | * If the block size is smaller than the page size we need to check the |
| @@ -183,8 +184,12 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop, | |||
| 183 | * handle both halves separately so that we properly zero data in the | 184 | * handle both halves separately so that we properly zero data in the |
| 184 | * page cache for blocks that are entirely outside of i_size. | 185 | * page cache for blocks that are entirely outside of i_size. |
| 185 | */ | 186 | */ |
| 186 | if (first <= end && last > end) | 187 | if (orig_pos <= isize && orig_pos + length > isize) { |
| 187 | plen -= (last - end) * block_size; | 188 | unsigned end = offset_in_page(isize - 1) >> block_bits; |
| 189 | |||
| 190 | if (first <= end && last > end) | ||
| 191 | plen -= (last - end) * block_size; | ||
| 192 | } | ||
| 188 | 193 | ||
| 189 | *offp = poff; | 194 | *offp = poff; |
| 190 | *lenp = plen; | 195 | *lenp = plen; |
| @@ -1580,7 +1585,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, | |||
| 1580 | struct bio *bio; | 1585 | struct bio *bio; |
| 1581 | bool need_zeroout = false; | 1586 | bool need_zeroout = false; |
| 1582 | bool use_fua = false; | 1587 | bool use_fua = false; |
| 1583 | int nr_pages, ret; | 1588 | int nr_pages, ret = 0; |
| 1584 | size_t copied = 0; | 1589 | size_t copied = 0; |
| 1585 | 1590 | ||
| 1586 | if ((pos | length | align) & ((1 << blkbits) - 1)) | 1591 | if ((pos | length | align) & ((1 << blkbits) - 1)) |
| @@ -1596,12 +1601,13 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, | |||
| 1596 | 1601 | ||
| 1597 | if (iomap->flags & IOMAP_F_NEW) { | 1602 | if (iomap->flags & IOMAP_F_NEW) { |
| 1598 | need_zeroout = true; | 1603 | need_zeroout = true; |
| 1599 | } else { | 1604 | } else if (iomap->type == IOMAP_MAPPED) { |
| 1600 | /* | 1605 | /* |
| 1601 | * Use a FUA write if we need datasync semantics, this | 1606 | * Use a FUA write if we need datasync semantics, this is a pure |
| 1602 | * is a pure data IO that doesn't require any metadata | 1607 | * data IO that doesn't require any metadata updates (including |
| 1603 | * updates and the underlying device supports FUA. This | 1608 | * after IO completion such as unwritten extent conversion) and |
| 1604 | * allows us to avoid cache flushes on IO completion. | 1609 | * the underlying device supports FUA. This allows us to avoid |
| 1610 | * cache flushes on IO completion. | ||
| 1605 | */ | 1611 | */ |
| 1606 | if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && | 1612 | if (!(iomap->flags & (IOMAP_F_SHARED|IOMAP_F_DIRTY)) && |
| 1607 | (dio->flags & IOMAP_DIO_WRITE_FUA) && | 1613 | (dio->flags & IOMAP_DIO_WRITE_FUA) && |
| @@ -1644,8 +1650,14 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, | |||
| 1644 | 1650 | ||
| 1645 | ret = bio_iov_iter_get_pages(bio, &iter); | 1651 | ret = bio_iov_iter_get_pages(bio, &iter); |
| 1646 | if (unlikely(ret)) { | 1652 | if (unlikely(ret)) { |
| 1653 | /* | ||
| 1654 | * We have to stop part way through an IO. We must fall | ||
| 1655 | * through to the sub-block tail zeroing here, otherwise | ||
| 1656 | * this short IO may expose stale data in the tail of | ||
| 1657 | * the block we haven't written data to. | ||
| 1658 | */ | ||
| 1647 | bio_put(bio); | 1659 | bio_put(bio); |
| 1648 | return copied ? copied : ret; | 1660 | goto zero_tail; |
| 1649 | } | 1661 | } |
| 1650 | 1662 | ||
| 1651 | n = bio->bi_iter.bi_size; | 1663 | n = bio->bi_iter.bi_size; |
| @@ -1676,13 +1688,21 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos, loff_t length, | |||
| 1676 | dio->submit.cookie = submit_bio(bio); | 1688 | dio->submit.cookie = submit_bio(bio); |
| 1677 | } while (nr_pages); | 1689 | } while (nr_pages); |
| 1678 | 1690 | ||
| 1679 | if (need_zeroout) { | 1691 | /* |
| 1692 | * We need to zeroout the tail of a sub-block write if the extent type | ||
| 1693 | * requires zeroing or the write extends beyond EOF. If we don't zero | ||
| 1694 | * the block tail in the latter case, we can expose stale data via mmap | ||
| 1695 | * reads of the EOF block. | ||
| 1696 | */ | ||
| 1697 | zero_tail: | ||
| 1698 | if (need_zeroout || | ||
| 1699 | ((dio->flags & IOMAP_DIO_WRITE) && pos >= i_size_read(inode))) { | ||
| 1680 | /* zero out from the end of the write to the end of the block */ | 1700 | /* zero out from the end of the write to the end of the block */ |
| 1681 | pad = pos & (fs_block_size - 1); | 1701 | pad = pos & (fs_block_size - 1); |
| 1682 | if (pad) | 1702 | if (pad) |
| 1683 | iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); | 1703 | iomap_dio_zero(dio, iomap, pos, fs_block_size - pad); |
| 1684 | } | 1704 | } |
| 1685 | return copied; | 1705 | return copied ? copied : ret; |
| 1686 | } | 1706 | } |
| 1687 | 1707 | ||
| 1688 | static loff_t | 1708 | static loff_t |
| @@ -1857,6 +1877,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
| 1857 | dio->wait_for_completion = true; | 1877 | dio->wait_for_completion = true; |
| 1858 | ret = 0; | 1878 | ret = 0; |
| 1859 | } | 1879 | } |
| 1880 | |||
| 1881 | /* | ||
| 1882 | * Splicing to pipes can fail on a full pipe. We have to | ||
| 1883 | * swallow this to make it look like a short IO | ||
| 1884 | * otherwise the higher splice layers will completely | ||
| 1885 | * mishandle the error and stop moving data. | ||
| 1886 | */ | ||
| 1887 | if (ret == -EFAULT) | ||
| 1888 | ret = 0; | ||
| 1860 | break; | 1889 | break; |
| 1861 | } | 1890 | } |
| 1862 | pos += ret; | 1891 | pos += ret; |
diff --git a/fs/read_write.c b/fs/read_write.c index bfcb4ced5664..4dae0399c75a 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
| @@ -2094,17 +2094,18 @@ int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same) | |||
| 2094 | off = same->src_offset; | 2094 | off = same->src_offset; |
| 2095 | len = same->src_length; | 2095 | len = same->src_length; |
| 2096 | 2096 | ||
| 2097 | ret = -EISDIR; | ||
| 2098 | if (S_ISDIR(src->i_mode)) | 2097 | if (S_ISDIR(src->i_mode)) |
| 2099 | goto out; | 2098 | return -EISDIR; |
| 2100 | 2099 | ||
| 2101 | ret = -EINVAL; | ||
| 2102 | if (!S_ISREG(src->i_mode)) | 2100 | if (!S_ISREG(src->i_mode)) |
| 2103 | goto out; | 2101 | return -EINVAL; |
| 2102 | |||
| 2103 | if (!file->f_op->remap_file_range) | ||
| 2104 | return -EOPNOTSUPP; | ||
| 2104 | 2105 | ||
| 2105 | ret = remap_verify_area(file, off, len, false); | 2106 | ret = remap_verify_area(file, off, len, false); |
| 2106 | if (ret < 0) | 2107 | if (ret < 0) |
| 2107 | goto out; | 2108 | return ret; |
| 2108 | ret = 0; | 2109 | ret = 0; |
| 2109 | 2110 | ||
| 2110 | if (off + len > i_size_read(src)) | 2111 | if (off + len > i_size_read(src)) |
| @@ -2147,10 +2148,8 @@ next_fdput: | |||
| 2147 | fdput(dst_fd); | 2148 | fdput(dst_fd); |
| 2148 | next_loop: | 2149 | next_loop: |
| 2149 | if (fatal_signal_pending(current)) | 2150 | if (fatal_signal_pending(current)) |
| 2150 | goto out; | 2151 | break; |
| 2151 | } | 2152 | } |
| 2152 | |||
| 2153 | out: | ||
| 2154 | return ret; | 2153 | return ret; |
| 2155 | } | 2154 | } |
| 2156 | EXPORT_SYMBOL(vfs_dedupe_file_range); | 2155 | EXPORT_SYMBOL(vfs_dedupe_file_range); |
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 74d7228e755b..19e921d1586f 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
| @@ -1694,10 +1694,13 @@ xfs_bmap_add_extent_delay_real( | |||
| 1694 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: | 1694 | case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG: |
| 1695 | /* | 1695 | /* |
| 1696 | * Filling in all of a previously delayed allocation extent. | 1696 | * Filling in all of a previously delayed allocation extent. |
| 1697 | * The right neighbor is contiguous, the left is not. | 1697 | * The right neighbor is contiguous, the left is not. Take care |
| 1698 | * with delay -> unwritten extent allocation here because the | ||
| 1699 | * delalloc record we are overwriting is always written. | ||
| 1698 | */ | 1700 | */ |
| 1699 | PREV.br_startblock = new->br_startblock; | 1701 | PREV.br_startblock = new->br_startblock; |
| 1700 | PREV.br_blockcount += RIGHT.br_blockcount; | 1702 | PREV.br_blockcount += RIGHT.br_blockcount; |
| 1703 | PREV.br_state = new->br_state; | ||
| 1701 | 1704 | ||
| 1702 | xfs_iext_next(ifp, &bma->icur); | 1705 | xfs_iext_next(ifp, &bma->icur); |
| 1703 | xfs_iext_remove(bma->ip, &bma->icur, state); | 1706 | xfs_iext_remove(bma->ip, &bma->icur, state); |
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index 86c50208a143..7fbf8af0b159 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c | |||
| @@ -538,15 +538,18 @@ xfs_inobt_rec_check_count( | |||
| 538 | 538 | ||
| 539 | static xfs_extlen_t | 539 | static xfs_extlen_t |
| 540 | xfs_inobt_max_size( | 540 | xfs_inobt_max_size( |
| 541 | struct xfs_mount *mp) | 541 | struct xfs_mount *mp, |
| 542 | xfs_agnumber_t agno) | ||
| 542 | { | 543 | { |
| 544 | xfs_agblock_t agblocks = xfs_ag_block_count(mp, agno); | ||
| 545 | |||
| 543 | /* Bail out if we're uninitialized, which can happen in mkfs. */ | 546 | /* Bail out if we're uninitialized, which can happen in mkfs. */ |
| 544 | if (mp->m_inobt_mxr[0] == 0) | 547 | if (mp->m_inobt_mxr[0] == 0) |
| 545 | return 0; | 548 | return 0; |
| 546 | 549 | ||
| 547 | return xfs_btree_calc_size(mp->m_inobt_mnr, | 550 | return xfs_btree_calc_size(mp->m_inobt_mnr, |
| 548 | (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock / | 551 | (uint64_t)agblocks * mp->m_sb.sb_inopblock / |
| 549 | XFS_INODES_PER_CHUNK); | 552 | XFS_INODES_PER_CHUNK); |
| 550 | } | 553 | } |
| 551 | 554 | ||
| 552 | static int | 555 | static int |
| @@ -594,7 +597,7 @@ xfs_finobt_calc_reserves( | |||
| 594 | if (error) | 597 | if (error) |
| 595 | return error; | 598 | return error; |
| 596 | 599 | ||
| 597 | *ask += xfs_inobt_max_size(mp); | 600 | *ask += xfs_inobt_max_size(mp, agno); |
| 598 | *used += tree_len; | 601 | *used += tree_len; |
| 599 | return 0; | 602 | return 0; |
| 600 | } | 603 | } |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 5d263dfdb3bc..404e581f1ea1 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
| @@ -1042,7 +1042,7 @@ out_trans_cancel: | |||
| 1042 | goto out_unlock; | 1042 | goto out_unlock; |
| 1043 | } | 1043 | } |
| 1044 | 1044 | ||
| 1045 | static int | 1045 | int |
| 1046 | xfs_flush_unmap_range( | 1046 | xfs_flush_unmap_range( |
| 1047 | struct xfs_inode *ip, | 1047 | struct xfs_inode *ip, |
| 1048 | xfs_off_t offset, | 1048 | xfs_off_t offset, |
| @@ -1195,13 +1195,7 @@ xfs_prepare_shift( | |||
| 1195 | * Writeback and invalidate cache for the remainder of the file as we're | 1195 | * Writeback and invalidate cache for the remainder of the file as we're |
| 1196 | * about to shift down every extent from offset to EOF. | 1196 | * about to shift down every extent from offset to EOF. |
| 1197 | */ | 1197 | */ |
| 1198 | error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, offset, -1); | 1198 | error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip)); |
| 1199 | if (error) | ||
| 1200 | return error; | ||
| 1201 | error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, | ||
| 1202 | offset >> PAGE_SHIFT, -1); | ||
| 1203 | if (error) | ||
| 1204 | return error; | ||
| 1205 | 1199 | ||
| 1206 | /* | 1200 | /* |
| 1207 | * Clean out anything hanging around in the cow fork now that | 1201 | * Clean out anything hanging around in the cow fork now that |
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h index 87363d136bb6..7a78229cf1a7 100644 --- a/fs/xfs/xfs_bmap_util.h +++ b/fs/xfs/xfs_bmap_util.h | |||
| @@ -80,4 +80,7 @@ int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, | |||
| 80 | int whichfork, xfs_extnum_t *nextents, | 80 | int whichfork, xfs_extnum_t *nextents, |
| 81 | xfs_filblks_t *count); | 81 | xfs_filblks_t *count); |
| 82 | 82 | ||
| 83 | int xfs_flush_unmap_range(struct xfs_inode *ip, xfs_off_t offset, | ||
| 84 | xfs_off_t len); | ||
| 85 | |||
| 83 | #endif /* __XFS_BMAP_UTIL_H__ */ | 86 | #endif /* __XFS_BMAP_UTIL_H__ */ |
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 12d8455bfbb2..010db5f8fb00 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
| @@ -1233,9 +1233,23 @@ xfs_buf_iodone( | |||
| 1233 | } | 1233 | } |
| 1234 | 1234 | ||
| 1235 | /* | 1235 | /* |
| 1236 | * Requeue a failed buffer for writeback | 1236 | * Requeue a failed buffer for writeback. |
| 1237 | * | 1237 | * |
| 1238 | * Return true if the buffer has been re-queued properly, false otherwise | 1238 | * We clear the log item failed state here as well, but we have to be careful |
| 1239 | * about reference counts because the only active reference counts on the buffer | ||
| 1240 | * may be the failed log items. Hence if we clear the log item failed state | ||
| 1241 | * before queuing the buffer for IO we can release all active references to | ||
| 1242 | * the buffer and free it, leading to use after free problems in | ||
| 1243 | * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which | ||
| 1244 | * order we process them in - the buffer is locked, and we own the buffer list | ||
| 1245 | * so nothing on them is going to change while we are performing this action. | ||
| 1246 | * | ||
| 1247 | * Hence we can safely queue the buffer for IO before we clear the failed log | ||
| 1248 | * item state, therefore always having an active reference to the buffer and | ||
| 1249 | * avoiding the transient zero-reference state that leads to use-after-free. | ||
| 1250 | * | ||
| 1251 | * Return true if the buffer was added to the buffer list, false if it was | ||
| 1252 | * already on the buffer list. | ||
| 1239 | */ | 1253 | */ |
| 1240 | bool | 1254 | bool |
| 1241 | xfs_buf_resubmit_failed_buffers( | 1255 | xfs_buf_resubmit_failed_buffers( |
| @@ -1243,16 +1257,16 @@ xfs_buf_resubmit_failed_buffers( | |||
| 1243 | struct list_head *buffer_list) | 1257 | struct list_head *buffer_list) |
| 1244 | { | 1258 | { |
| 1245 | struct xfs_log_item *lip; | 1259 | struct xfs_log_item *lip; |
| 1260 | bool ret; | ||
| 1261 | |||
| 1262 | ret = xfs_buf_delwri_queue(bp, buffer_list); | ||
| 1246 | 1263 | ||
| 1247 | /* | 1264 | /* |
| 1248 | * Clear XFS_LI_FAILED flag from all items before resubmit | 1265 | * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this |
| 1249 | * | ||
| 1250 | * XFS_LI_FAILED set/clear is protected by ail_lock, caller this | ||
| 1251 | * function already have it acquired | 1266 | * function already have it acquired |
| 1252 | */ | 1267 | */ |
| 1253 | list_for_each_entry(lip, &bp->b_li_list, li_bio_list) | 1268 | list_for_each_entry(lip, &bp->b_li_list, li_bio_list) |
| 1254 | xfs_clear_li_failed(lip); | 1269 | xfs_clear_li_failed(lip); |
| 1255 | 1270 | ||
| 1256 | /* Add this buffer back to the delayed write list */ | 1271 | return ret; |
| 1257 | return xfs_buf_delwri_queue(bp, buffer_list); | ||
| 1258 | } | 1272 | } |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 53c9ab8fb777..e47425071e65 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
| @@ -920,7 +920,7 @@ out_unlock: | |||
| 920 | } | 920 | } |
| 921 | 921 | ||
| 922 | 922 | ||
| 923 | loff_t | 923 | STATIC loff_t |
| 924 | xfs_file_remap_range( | 924 | xfs_file_remap_range( |
| 925 | struct file *file_in, | 925 | struct file *file_in, |
| 926 | loff_t pos_in, | 926 | loff_t pos_in, |
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index ecdb086bc23e..322a852ce284 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c | |||
| @@ -296,6 +296,7 @@ xfs_reflink_reserve_cow( | |||
| 296 | if (error) | 296 | if (error) |
| 297 | return error; | 297 | return error; |
| 298 | 298 | ||
| 299 | xfs_trim_extent(imap, got.br_startoff, got.br_blockcount); | ||
| 299 | trace_xfs_reflink_cow_alloc(ip, &got); | 300 | trace_xfs_reflink_cow_alloc(ip, &got); |
| 300 | return 0; | 301 | return 0; |
| 301 | } | 302 | } |
| @@ -1351,10 +1352,19 @@ xfs_reflink_remap_prep( | |||
| 1351 | if (ret) | 1352 | if (ret) |
| 1352 | goto out_unlock; | 1353 | goto out_unlock; |
| 1353 | 1354 | ||
| 1354 | /* Zap any page cache for the destination file's range. */ | 1355 | /* |
| 1355 | truncate_inode_pages_range(&inode_out->i_data, | 1356 | * If pos_out > EOF, we may have dirtied blocks between EOF and |
| 1356 | round_down(pos_out, PAGE_SIZE), | 1357 | * pos_out. In that case, we need to extend the flush and unmap to cover |
| 1357 | round_up(pos_out + *len, PAGE_SIZE) - 1); | 1358 | * from EOF to the end of the copy length. |
| 1359 | */ | ||
| 1360 | if (pos_out > XFS_ISIZE(dest)) { | ||
| 1361 | loff_t flen = *len + (pos_out - XFS_ISIZE(dest)); | ||
| 1362 | ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen); | ||
| 1363 | } else { | ||
| 1364 | ret = xfs_flush_unmap_range(dest, pos_out, *len); | ||
| 1365 | } | ||
| 1366 | if (ret) | ||
| 1367 | goto out_unlock; | ||
| 1358 | 1368 | ||
| 1359 | return 1; | 1369 | return 1; |
| 1360 | out_unlock: | 1370 | out_unlock: |
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 3043e5ed6495..8a6532aae779 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h | |||
| @@ -280,7 +280,10 @@ DECLARE_EVENT_CLASS(xfs_buf_class, | |||
| 280 | ), | 280 | ), |
| 281 | TP_fast_assign( | 281 | TP_fast_assign( |
| 282 | __entry->dev = bp->b_target->bt_dev; | 282 | __entry->dev = bp->b_target->bt_dev; |
| 283 | __entry->bno = bp->b_bn; | 283 | if (bp->b_bn == XFS_BUF_DADDR_NULL) |
| 284 | __entry->bno = bp->b_maps[0].bm_bn; | ||
| 285 | else | ||
| 286 | __entry->bno = bp->b_bn; | ||
| 284 | __entry->nblks = bp->b_length; | 287 | __entry->nblks = bp->b_length; |
| 285 | __entry->hold = atomic_read(&bp->b_hold); | 288 | __entry->hold = atomic_read(&bp->b_hold); |
| 286 | __entry->pincount = atomic_read(&bp->b_pin_count); | 289 | __entry->pincount = atomic_read(&bp->b_pin_count); |
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h index c79e859408e6..fd458389f7d1 100644 --- a/include/linux/net_dim.h +++ b/include/linux/net_dim.h | |||
| @@ -406,6 +406,8 @@ static inline void net_dim(struct net_dim *dim, | |||
| 406 | } | 406 | } |
| 407 | /* fall through */ | 407 | /* fall through */ |
| 408 | case NET_DIM_START_MEASURE: | 408 | case NET_DIM_START_MEASURE: |
| 409 | net_dim_sample(end_sample.event_ctr, end_sample.pkt_ctr, end_sample.byte_ctr, | ||
| 410 | &dim->start_sample); | ||
| 409 | dim->state = NET_DIM_MEASURE_IN_PROGRESS; | 411 | dim->state = NET_DIM_MEASURE_IN_PROGRESS; |
| 410 | break; | 412 | break; |
| 411 | case NET_DIM_APPLY_NEW_PROFILE: | 413 | case NET_DIM_APPLY_NEW_PROFILE: |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a2e8297a5b00..f17a7452ac7b 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -1334,6 +1334,22 @@ static inline void skb_zcopy_set(struct sk_buff *skb, struct ubuf_info *uarg) | |||
| 1334 | } | 1334 | } |
| 1335 | } | 1335 | } |
| 1336 | 1336 | ||
| 1337 | static inline void skb_zcopy_set_nouarg(struct sk_buff *skb, void *val) | ||
| 1338 | { | ||
| 1339 | skb_shinfo(skb)->destructor_arg = (void *)((uintptr_t) val | 0x1UL); | ||
| 1340 | skb_shinfo(skb)->tx_flags |= SKBTX_ZEROCOPY_FRAG; | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | static inline bool skb_zcopy_is_nouarg(struct sk_buff *skb) | ||
| 1344 | { | ||
| 1345 | return (uintptr_t) skb_shinfo(skb)->destructor_arg & 0x1UL; | ||
| 1346 | } | ||
| 1347 | |||
| 1348 | static inline void *skb_zcopy_get_nouarg(struct sk_buff *skb) | ||
| 1349 | { | ||
| 1350 | return (void *)((uintptr_t) skb_shinfo(skb)->destructor_arg & ~0x1UL); | ||
| 1351 | } | ||
| 1352 | |||
| 1337 | /* Release a reference on a zerocopy structure */ | 1353 | /* Release a reference on a zerocopy structure */ |
| 1338 | static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) | 1354 | static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) |
| 1339 | { | 1355 | { |
| @@ -1343,7 +1359,7 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy) | |||
| 1343 | if (uarg->callback == sock_zerocopy_callback) { | 1359 | if (uarg->callback == sock_zerocopy_callback) { |
| 1344 | uarg->zerocopy = uarg->zerocopy && zerocopy; | 1360 | uarg->zerocopy = uarg->zerocopy && zerocopy; |
| 1345 | sock_zerocopy_put(uarg); | 1361 | sock_zerocopy_put(uarg); |
| 1346 | } else { | 1362 | } else if (!skb_zcopy_is_nouarg(skb)) { |
| 1347 | uarg->callback(uarg, zerocopy); | 1363 | uarg->callback(uarg, zerocopy); |
| 1348 | } | 1364 | } |
| 1349 | 1365 | ||
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 8ed77bb4ed86..a9b0280687d5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
| @@ -196,6 +196,7 @@ struct tcp_sock { | |||
| 196 | u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ | 196 | u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */ |
| 197 | u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ | 197 | u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ |
| 198 | u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ | 198 | u32 last_oow_ack_time; /* timestamp of last out-of-window ACK */ |
| 199 | u32 compressed_ack_rcv_nxt; | ||
| 199 | 200 | ||
| 200 | u32 tsoffset; /* timestamp offset */ | 201 | u32 tsoffset; /* timestamp offset */ |
| 201 | 202 | ||
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index b7a99ce56bc9..a1be64c9940f 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h | |||
| @@ -66,4 +66,7 @@ | |||
| 66 | /* Device needs a pause after every control message. */ | 66 | /* Device needs a pause after every control message. */ |
| 67 | #define USB_QUIRK_DELAY_CTRL_MSG BIT(13) | 67 | #define USB_QUIRK_DELAY_CTRL_MSG BIT(13) |
| 68 | 68 | ||
| 69 | /* Hub needs extra delay after resetting its port. */ | ||
| 70 | #define USB_QUIRK_HUB_SLOW_RESET BIT(14) | ||
| 71 | |||
| 69 | #endif /* __LINUX_USB_QUIRKS_H */ | 72 | #endif /* __LINUX_USB_QUIRKS_H */ |
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 58c1ecf3d648..5467264771ec 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h | |||
| @@ -624,7 +624,7 @@ v4l2_m2m_dst_buf_remove_by_idx(struct v4l2_m2m_ctx *m2m_ctx, unsigned int idx) | |||
| 624 | 624 | ||
| 625 | /* v4l2 request helper */ | 625 | /* v4l2 request helper */ |
| 626 | 626 | ||
| 627 | void vb2_m2m_request_queue(struct media_request *req); | 627 | void v4l2_m2m_request_queue(struct media_request *req); |
| 628 | 628 | ||
| 629 | /* v4l2 ioctl helpers */ | 629 | /* v4l2 ioctl helpers */ |
| 630 | 630 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index cdf2e80abc44..38da91d1b4c6 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
| @@ -608,4 +608,16 @@ static inline __u32 sctp_dst_mtu(const struct dst_entry *dst) | |||
| 608 | SCTP_DEFAULT_MINSEGMENT)); | 608 | SCTP_DEFAULT_MINSEGMENT)); |
| 609 | } | 609 | } |
| 610 | 610 | ||
| 611 | static inline bool sctp_transport_pmtu_check(struct sctp_transport *t) | ||
| 612 | { | ||
| 613 | __u32 pmtu = sctp_dst_mtu(t->dst); | ||
| 614 | |||
| 615 | if (t->pathmtu == pmtu) | ||
| 616 | return true; | ||
| 617 | |||
| 618 | t->pathmtu = pmtu; | ||
| 619 | |||
| 620 | return false; | ||
| 621 | } | ||
| 622 | |||
| 611 | #endif /* __net_sctp_h__ */ | 623 | #endif /* __net_sctp_h__ */ |
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 51b095898f4b..998983a6e6b7 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h | |||
| @@ -50,6 +50,8 @@ | |||
| 50 | #ifndef __LINUX_V4L2_CONTROLS_H | 50 | #ifndef __LINUX_V4L2_CONTROLS_H |
| 51 | #define __LINUX_V4L2_CONTROLS_H | 51 | #define __LINUX_V4L2_CONTROLS_H |
| 52 | 52 | ||
| 53 | #include <linux/types.h> | ||
| 54 | |||
| 53 | /* Control classes */ | 55 | /* Control classes */ |
| 54 | #define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */ | 56 | #define V4L2_CTRL_CLASS_USER 0x00980000 /* Old-style 'user' controls */ |
| 55 | #define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */ | 57 | #define V4L2_CTRL_CLASS_MPEG 0x00990000 /* MPEG-compression controls */ |
| @@ -1110,6 +1112,7 @@ struct v4l2_mpeg2_sequence { | |||
| 1110 | __u8 profile_and_level_indication; | 1112 | __u8 profile_and_level_indication; |
| 1111 | __u8 progressive_sequence; | 1113 | __u8 progressive_sequence; |
| 1112 | __u8 chroma_format; | 1114 | __u8 chroma_format; |
| 1115 | __u8 pad; | ||
| 1113 | }; | 1116 | }; |
| 1114 | 1117 | ||
| 1115 | struct v4l2_mpeg2_picture { | 1118 | struct v4l2_mpeg2_picture { |
| @@ -1128,6 +1131,7 @@ struct v4l2_mpeg2_picture { | |||
| 1128 | __u8 alternate_scan; | 1131 | __u8 alternate_scan; |
| 1129 | __u8 repeat_first_field; | 1132 | __u8 repeat_first_field; |
| 1130 | __u8 progressive_frame; | 1133 | __u8 progressive_frame; |
| 1134 | __u8 pad; | ||
| 1131 | }; | 1135 | }; |
| 1132 | 1136 | ||
| 1133 | struct v4l2_ctrl_mpeg2_slice_params { | 1137 | struct v4l2_ctrl_mpeg2_slice_params { |
| @@ -1142,6 +1146,7 @@ struct v4l2_ctrl_mpeg2_slice_params { | |||
| 1142 | 1146 | ||
| 1143 | __u8 backward_ref_index; | 1147 | __u8 backward_ref_index; |
| 1144 | __u8 forward_ref_index; | 1148 | __u8 forward_ref_index; |
| 1149 | __u8 pad; | ||
| 1145 | }; | 1150 | }; |
| 1146 | 1151 | ||
| 1147 | struct v4l2_ctrl_mpeg2_quantization { | 1152 | struct v4l2_ctrl_mpeg2_quantization { |
diff --git a/lib/test_firmware.c b/lib/test_firmware.c index b984806d7d7b..7cab9a9869ac 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c | |||
| @@ -837,6 +837,7 @@ static ssize_t read_firmware_show(struct device *dev, | |||
| 837 | if (req->fw->size > PAGE_SIZE) { | 837 | if (req->fw->size > PAGE_SIZE) { |
| 838 | pr_err("Testing interface must use PAGE_SIZE firmware for now\n"); | 838 | pr_err("Testing interface must use PAGE_SIZE firmware for now\n"); |
| 839 | rc = -EINVAL; | 839 | rc = -EINVAL; |
| 840 | goto out; | ||
| 840 | } | 841 | } |
| 841 | memcpy(buf, req->fw->data, req->fw->size); | 842 | memcpy(buf, req->fw->data, req->fw->size); |
| 842 | 843 | ||
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 57fcc6b4bf6e..2f126eff275d 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -580,9 +580,15 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page, | |||
| 580 | struct bio_vec bvec; | 580 | struct bio_vec bvec; |
| 581 | int ret; | 581 | int ret; |
| 582 | 582 | ||
| 583 | /* sendpage cannot properly handle pages with page_count == 0, | 583 | /* |
| 584 | * we need to fallback to sendmsg if that's the case */ | 584 | * sendpage cannot properly handle pages with page_count == 0, |
| 585 | if (page_count(page) >= 1) | 585 | * we need to fall back to sendmsg if that's the case. |
| 586 | * | ||
| 587 | * Same goes for slab pages: skb_can_coalesce() allows | ||
| 588 | * coalescing neighboring slab objects into a single frag which | ||
| 589 | * triggers one of hardened usercopy checks. | ||
| 590 | */ | ||
| 591 | if (page_count(page) >= 1 && !PageSlab(page)) | ||
| 586 | return __ceph_tcp_sendpage(sock, page, offset, size, more); | 592 | return __ceph_tcp_sendpage(sock, page, offset, size, more); |
| 587 | 593 | ||
| 588 | bvec.bv_page = page; | 594 | bvec.bv_page = page; |
diff --git a/net/core/dev.c b/net/core/dev.c index d83582623cd7..f69b2fcdee40 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
| @@ -5981,11 +5981,14 @@ bool napi_complete_done(struct napi_struct *n, int work_done) | |||
| 5981 | if (work_done) | 5981 | if (work_done) |
| 5982 | timeout = n->dev->gro_flush_timeout; | 5982 | timeout = n->dev->gro_flush_timeout; |
| 5983 | 5983 | ||
| 5984 | /* When the NAPI instance uses a timeout and keeps postponing | ||
| 5985 | * it, we need to bound somehow the time packets are kept in | ||
| 5986 | * the GRO layer | ||
| 5987 | */ | ||
| 5988 | napi_gro_flush(n, !!timeout); | ||
| 5984 | if (timeout) | 5989 | if (timeout) |
| 5985 | hrtimer_start(&n->timer, ns_to_ktime(timeout), | 5990 | hrtimer_start(&n->timer, ns_to_ktime(timeout), |
| 5986 | HRTIMER_MODE_REL_PINNED); | 5991 | HRTIMER_MODE_REL_PINNED); |
| 5987 | else | ||
| 5988 | napi_gro_flush(n, false); | ||
| 5989 | } | 5992 | } |
| 5990 | if (unlikely(!list_empty(&n->poll_list))) { | 5993 | if (unlikely(!list_empty(&n->poll_list))) { |
| 5991 | /* If n->poll_list is not empty, we need to mask irqs */ | 5994 | /* If n->poll_list is not empty, we need to mask irqs */ |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index edaaebfbcd46..568dbf3b711a 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -4269,7 +4269,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq) | |||
| 4269 | * If the sack array is full, forget about the last one. | 4269 | * If the sack array is full, forget about the last one. |
| 4270 | */ | 4270 | */ |
| 4271 | if (this_sack >= TCP_NUM_SACKS) { | 4271 | if (this_sack >= TCP_NUM_SACKS) { |
| 4272 | if (tp->compressed_ack) | 4272 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) |
| 4273 | tcp_send_ack(sk); | 4273 | tcp_send_ack(sk); |
| 4274 | this_sack--; | 4274 | this_sack--; |
| 4275 | tp->rx_opt.num_sacks--; | 4275 | tp->rx_opt.num_sacks--; |
| @@ -4364,6 +4364,7 @@ static bool tcp_try_coalesce(struct sock *sk, | |||
| 4364 | if (TCP_SKB_CB(from)->has_rxtstamp) { | 4364 | if (TCP_SKB_CB(from)->has_rxtstamp) { |
| 4365 | TCP_SKB_CB(to)->has_rxtstamp = true; | 4365 | TCP_SKB_CB(to)->has_rxtstamp = true; |
| 4366 | to->tstamp = from->tstamp; | 4366 | to->tstamp = from->tstamp; |
| 4367 | skb_hwtstamps(to)->hwtstamp = skb_hwtstamps(from)->hwtstamp; | ||
| 4367 | } | 4368 | } |
| 4368 | 4369 | ||
| 4369 | return true; | 4370 | return true; |
| @@ -5189,7 +5190,17 @@ send_now: | |||
| 5189 | if (!tcp_is_sack(tp) || | 5190 | if (!tcp_is_sack(tp) || |
| 5190 | tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) | 5191 | tp->compressed_ack >= sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr) |
| 5191 | goto send_now; | 5192 | goto send_now; |
| 5192 | tp->compressed_ack++; | 5193 | |
| 5194 | if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { | ||
| 5195 | tp->compressed_ack_rcv_nxt = tp->rcv_nxt; | ||
| 5196 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) | ||
| 5197 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, | ||
| 5198 | tp->compressed_ack - TCP_FASTRETRANS_THRESH); | ||
| 5199 | tp->compressed_ack = 0; | ||
| 5200 | } | ||
| 5201 | |||
| 5202 | if (++tp->compressed_ack <= TCP_FASTRETRANS_THRESH) | ||
| 5203 | goto send_now; | ||
| 5193 | 5204 | ||
| 5194 | if (hrtimer_is_queued(&tp->compressed_ack_timer)) | 5205 | if (hrtimer_is_queued(&tp->compressed_ack_timer)) |
| 5195 | return; | 5206 | return; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d40d4cc53319..c5dc4c4fdadd 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -180,10 +180,10 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, | |||
| 180 | { | 180 | { |
| 181 | struct tcp_sock *tp = tcp_sk(sk); | 181 | struct tcp_sock *tp = tcp_sk(sk); |
| 182 | 182 | ||
| 183 | if (unlikely(tp->compressed_ack)) { | 183 | if (unlikely(tp->compressed_ack > TCP_FASTRETRANS_THRESH)) { |
| 184 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, | 184 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPACKCOMPRESSED, |
| 185 | tp->compressed_ack); | 185 | tp->compressed_ack - TCP_FASTRETRANS_THRESH); |
| 186 | tp->compressed_ack = 0; | 186 | tp->compressed_ack = TCP_FASTRETRANS_THRESH; |
| 187 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) | 187 | if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) |
| 188 | __sock_put(sk); | 188 | __sock_put(sk); |
| 189 | } | 189 | } |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 676020663ce8..5f8b6d3cd855 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -740,7 +740,7 @@ static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer) | |||
| 740 | 740 | ||
| 741 | bh_lock_sock(sk); | 741 | bh_lock_sock(sk); |
| 742 | if (!sock_owned_by_user(sk)) { | 742 | if (!sock_owned_by_user(sk)) { |
| 743 | if (tp->compressed_ack) | 743 | if (tp->compressed_ack > TCP_FASTRETRANS_THRESH) |
| 744 | tcp_send_ack(sk); | 744 | tcp_send_ack(sk); |
| 745 | } else { | 745 | } else { |
| 746 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, | 746 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 63a808d5af15..045597b9a7c0 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
| @@ -179,7 +179,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp); | |||
| 179 | static void addrconf_dad_work(struct work_struct *w); | 179 | static void addrconf_dad_work(struct work_struct *w); |
| 180 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, | 180 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, |
| 181 | bool send_na); | 181 | bool send_na); |
| 182 | static void addrconf_dad_run(struct inet6_dev *idev); | 182 | static void addrconf_dad_run(struct inet6_dev *idev, bool restart); |
| 183 | static void addrconf_rs_timer(struct timer_list *t); | 183 | static void addrconf_rs_timer(struct timer_list *t); |
| 184 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | 184 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); |
| 185 | static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | 185 | static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); |
| @@ -3439,6 +3439,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
| 3439 | void *ptr) | 3439 | void *ptr) |
| 3440 | { | 3440 | { |
| 3441 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); | 3441 | struct net_device *dev = netdev_notifier_info_to_dev(ptr); |
| 3442 | struct netdev_notifier_change_info *change_info; | ||
| 3442 | struct netdev_notifier_changeupper_info *info; | 3443 | struct netdev_notifier_changeupper_info *info; |
| 3443 | struct inet6_dev *idev = __in6_dev_get(dev); | 3444 | struct inet6_dev *idev = __in6_dev_get(dev); |
| 3444 | struct net *net = dev_net(dev); | 3445 | struct net *net = dev_net(dev); |
| @@ -3513,7 +3514,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
| 3513 | break; | 3514 | break; |
| 3514 | } | 3515 | } |
| 3515 | 3516 | ||
| 3516 | if (idev) { | 3517 | if (!IS_ERR_OR_NULL(idev)) { |
| 3517 | if (idev->if_flags & IF_READY) { | 3518 | if (idev->if_flags & IF_READY) { |
| 3518 | /* device is already configured - | 3519 | /* device is already configured - |
| 3519 | * but resend MLD reports, we might | 3520 | * but resend MLD reports, we might |
| @@ -3521,6 +3522,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
| 3521 | * multicast snooping switches | 3522 | * multicast snooping switches |
| 3522 | */ | 3523 | */ |
| 3523 | ipv6_mc_up(idev); | 3524 | ipv6_mc_up(idev); |
| 3525 | change_info = ptr; | ||
| 3526 | if (change_info->flags_changed & IFF_NOARP) | ||
| 3527 | addrconf_dad_run(idev, true); | ||
| 3524 | rt6_sync_up(dev, RTNH_F_LINKDOWN); | 3528 | rt6_sync_up(dev, RTNH_F_LINKDOWN); |
| 3525 | break; | 3529 | break; |
| 3526 | } | 3530 | } |
| @@ -3555,7 +3559,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, | |||
| 3555 | 3559 | ||
| 3556 | if (!IS_ERR_OR_NULL(idev)) { | 3560 | if (!IS_ERR_OR_NULL(idev)) { |
| 3557 | if (run_pending) | 3561 | if (run_pending) |
| 3558 | addrconf_dad_run(idev); | 3562 | addrconf_dad_run(idev, false); |
| 3559 | 3563 | ||
| 3560 | /* Device has an address by now */ | 3564 | /* Device has an address by now */ |
| 3561 | rt6_sync_up(dev, RTNH_F_DEAD); | 3565 | rt6_sync_up(dev, RTNH_F_DEAD); |
| @@ -4173,16 +4177,19 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id, | |||
| 4173 | addrconf_verify_rtnl(); | 4177 | addrconf_verify_rtnl(); |
| 4174 | } | 4178 | } |
| 4175 | 4179 | ||
| 4176 | static void addrconf_dad_run(struct inet6_dev *idev) | 4180 | static void addrconf_dad_run(struct inet6_dev *idev, bool restart) |
| 4177 | { | 4181 | { |
| 4178 | struct inet6_ifaddr *ifp; | 4182 | struct inet6_ifaddr *ifp; |
| 4179 | 4183 | ||
| 4180 | read_lock_bh(&idev->lock); | 4184 | read_lock_bh(&idev->lock); |
| 4181 | list_for_each_entry(ifp, &idev->addr_list, if_list) { | 4185 | list_for_each_entry(ifp, &idev->addr_list, if_list) { |
| 4182 | spin_lock(&ifp->lock); | 4186 | spin_lock(&ifp->lock); |
| 4183 | if (ifp->flags & IFA_F_TENTATIVE && | 4187 | if ((ifp->flags & IFA_F_TENTATIVE && |
| 4184 | ifp->state == INET6_IFADDR_STATE_DAD) | 4188 | ifp->state == INET6_IFADDR_STATE_DAD) || restart) { |
| 4189 | if (restart) | ||
| 4190 | ifp->state = INET6_IFADDR_STATE_PREDAD; | ||
| 4185 | addrconf_dad_kick(ifp); | 4191 | addrconf_dad_kick(ifp); |
| 4192 | } | ||
| 4186 | spin_unlock(&ifp->lock); | 4193 | spin_unlock(&ifp->lock); |
| 4187 | } | 4194 | } |
| 4188 | read_unlock_bh(&idev->lock); | 4195 | read_unlock_bh(&idev->lock); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ec3095f13aae..a74650e98f42 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
| @@ -2394,7 +2394,7 @@ static void tpacket_destruct_skb(struct sk_buff *skb) | |||
| 2394 | void *ph; | 2394 | void *ph; |
| 2395 | __u32 ts; | 2395 | __u32 ts; |
| 2396 | 2396 | ||
| 2397 | ph = skb_shinfo(skb)->destructor_arg; | 2397 | ph = skb_zcopy_get_nouarg(skb); |
| 2398 | packet_dec_pending(&po->tx_ring); | 2398 | packet_dec_pending(&po->tx_ring); |
| 2399 | 2399 | ||
| 2400 | ts = __packet_set_timestamp(po, ph, skb); | 2400 | ts = __packet_set_timestamp(po, ph, skb); |
| @@ -2461,7 +2461,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, | |||
| 2461 | skb->mark = po->sk.sk_mark; | 2461 | skb->mark = po->sk.sk_mark; |
| 2462 | skb->tstamp = sockc->transmit_time; | 2462 | skb->tstamp = sockc->transmit_time; |
| 2463 | sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); | 2463 | sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags); |
| 2464 | skb_shinfo(skb)->destructor_arg = ph.raw; | 2464 | skb_zcopy_set_nouarg(skb, ph.raw); |
| 2465 | 2465 | ||
| 2466 | skb_reserve(skb, hlen); | 2466 | skb_reserve(skb, hlen); |
| 2467 | skb_reset_network_header(skb); | 2467 | skb_reset_network_header(skb); |
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 052855d47354..37c9b8f0e10f 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c | |||
| @@ -27,10 +27,7 @@ struct tcf_police_params { | |||
| 27 | u32 tcfp_ewma_rate; | 27 | u32 tcfp_ewma_rate; |
| 28 | s64 tcfp_burst; | 28 | s64 tcfp_burst; |
| 29 | u32 tcfp_mtu; | 29 | u32 tcfp_mtu; |
| 30 | s64 tcfp_toks; | ||
| 31 | s64 tcfp_ptoks; | ||
| 32 | s64 tcfp_mtu_ptoks; | 30 | s64 tcfp_mtu_ptoks; |
| 33 | s64 tcfp_t_c; | ||
| 34 | struct psched_ratecfg rate; | 31 | struct psched_ratecfg rate; |
| 35 | bool rate_present; | 32 | bool rate_present; |
| 36 | struct psched_ratecfg peak; | 33 | struct psched_ratecfg peak; |
| @@ -41,6 +38,11 @@ struct tcf_police_params { | |||
| 41 | struct tcf_police { | 38 | struct tcf_police { |
| 42 | struct tc_action common; | 39 | struct tc_action common; |
| 43 | struct tcf_police_params __rcu *params; | 40 | struct tcf_police_params __rcu *params; |
| 41 | |||
| 42 | spinlock_t tcfp_lock ____cacheline_aligned_in_smp; | ||
| 43 | s64 tcfp_toks; | ||
| 44 | s64 tcfp_ptoks; | ||
| 45 | s64 tcfp_t_c; | ||
| 44 | }; | 46 | }; |
| 45 | 47 | ||
| 46 | #define to_police(pc) ((struct tcf_police *)pc) | 48 | #define to_police(pc) ((struct tcf_police *)pc) |
| @@ -122,6 +124,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 122 | return ret; | 124 | return ret; |
| 123 | } | 125 | } |
| 124 | ret = ACT_P_CREATED; | 126 | ret = ACT_P_CREATED; |
| 127 | spin_lock_init(&(to_police(*a)->tcfp_lock)); | ||
| 125 | } else if (!ovr) { | 128 | } else if (!ovr) { |
| 126 | tcf_idr_release(*a, bind); | 129 | tcf_idr_release(*a, bind); |
| 127 | return -EEXIST; | 130 | return -EEXIST; |
| @@ -186,12 +189,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 186 | } | 189 | } |
| 187 | 190 | ||
| 188 | new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); | 191 | new->tcfp_burst = PSCHED_TICKS2NS(parm->burst); |
| 189 | new->tcfp_toks = new->tcfp_burst; | 192 | if (new->peak_present) |
| 190 | if (new->peak_present) { | ||
| 191 | new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, | 193 | new->tcfp_mtu_ptoks = (s64)psched_l2t_ns(&new->peak, |
| 192 | new->tcfp_mtu); | 194 | new->tcfp_mtu); |
| 193 | new->tcfp_ptoks = new->tcfp_mtu_ptoks; | ||
| 194 | } | ||
| 195 | 195 | ||
| 196 | if (tb[TCA_POLICE_AVRATE]) | 196 | if (tb[TCA_POLICE_AVRATE]) |
| 197 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); | 197 | new->tcfp_ewma_rate = nla_get_u32(tb[TCA_POLICE_AVRATE]); |
| @@ -207,7 +207,12 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, | |||
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | spin_lock_bh(&police->tcf_lock); | 209 | spin_lock_bh(&police->tcf_lock); |
| 210 | new->tcfp_t_c = ktime_get_ns(); | 210 | spin_lock_bh(&police->tcfp_lock); |
| 211 | police->tcfp_t_c = ktime_get_ns(); | ||
| 212 | police->tcfp_toks = new->tcfp_burst; | ||
| 213 | if (new->peak_present) | ||
| 214 | police->tcfp_ptoks = new->tcfp_mtu_ptoks; | ||
| 215 | spin_unlock_bh(&police->tcfp_lock); | ||
| 211 | police->tcf_action = parm->action; | 216 | police->tcf_action = parm->action; |
| 212 | rcu_swap_protected(police->params, | 217 | rcu_swap_protected(police->params, |
| 213 | new, | 218 | new, |
| @@ -257,25 +262,28 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, | |||
| 257 | } | 262 | } |
| 258 | 263 | ||
| 259 | now = ktime_get_ns(); | 264 | now = ktime_get_ns(); |
| 260 | toks = min_t(s64, now - p->tcfp_t_c, p->tcfp_burst); | 265 | spin_lock_bh(&police->tcfp_lock); |
| 266 | toks = min_t(s64, now - police->tcfp_t_c, p->tcfp_burst); | ||
| 261 | if (p->peak_present) { | 267 | if (p->peak_present) { |
| 262 | ptoks = toks + p->tcfp_ptoks; | 268 | ptoks = toks + police->tcfp_ptoks; |
| 263 | if (ptoks > p->tcfp_mtu_ptoks) | 269 | if (ptoks > p->tcfp_mtu_ptoks) |
| 264 | ptoks = p->tcfp_mtu_ptoks; | 270 | ptoks = p->tcfp_mtu_ptoks; |
| 265 | ptoks -= (s64)psched_l2t_ns(&p->peak, | 271 | ptoks -= (s64)psched_l2t_ns(&p->peak, |
| 266 | qdisc_pkt_len(skb)); | 272 | qdisc_pkt_len(skb)); |
| 267 | } | 273 | } |
| 268 | toks += p->tcfp_toks; | 274 | toks += police->tcfp_toks; |
| 269 | if (toks > p->tcfp_burst) | 275 | if (toks > p->tcfp_burst) |
| 270 | toks = p->tcfp_burst; | 276 | toks = p->tcfp_burst; |
| 271 | toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); | 277 | toks -= (s64)psched_l2t_ns(&p->rate, qdisc_pkt_len(skb)); |
| 272 | if ((toks|ptoks) >= 0) { | 278 | if ((toks|ptoks) >= 0) { |
| 273 | p->tcfp_t_c = now; | 279 | police->tcfp_t_c = now; |
| 274 | p->tcfp_toks = toks; | 280 | police->tcfp_toks = toks; |
| 275 | p->tcfp_ptoks = ptoks; | 281 | police->tcfp_ptoks = ptoks; |
| 282 | spin_unlock_bh(&police->tcfp_lock); | ||
| 276 | ret = p->tcfp_result; | 283 | ret = p->tcfp_result; |
| 277 | goto inc_drops; | 284 | goto inc_drops; |
| 278 | } | 285 | } |
| 286 | spin_unlock_bh(&police->tcfp_lock); | ||
| 279 | } | 287 | } |
| 280 | 288 | ||
| 281 | inc_overlimits: | 289 | inc_overlimits: |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 67939ad99c01..b0e74a3e77ec 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
| @@ -118,6 +118,9 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, | |||
| 118 | sctp_transport_route(tp, NULL, sp); | 118 | sctp_transport_route(tp, NULL, sp); |
| 119 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | 119 | if (asoc->param_flags & SPP_PMTUD_ENABLE) |
| 120 | sctp_assoc_sync_pmtu(asoc); | 120 | sctp_assoc_sync_pmtu(asoc); |
| 121 | } else if (!sctp_transport_pmtu_check(tp)) { | ||
| 122 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | ||
| 123 | sctp_assoc_sync_pmtu(asoc); | ||
| 121 | } | 124 | } |
| 122 | 125 | ||
| 123 | if (asoc->pmtu_pending) { | 126 | if (asoc->pmtu_pending) { |
| @@ -396,25 +399,6 @@ finish: | |||
| 396 | return retval; | 399 | return retval; |
| 397 | } | 400 | } |
| 398 | 401 | ||
| 399 | static void sctp_packet_release_owner(struct sk_buff *skb) | ||
| 400 | { | ||
| 401 | sk_free(skb->sk); | ||
| 402 | } | ||
| 403 | |||
| 404 | static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) | ||
| 405 | { | ||
| 406 | skb_orphan(skb); | ||
| 407 | skb->sk = sk; | ||
| 408 | skb->destructor = sctp_packet_release_owner; | ||
| 409 | |||
| 410 | /* | ||
| 411 | * The data chunks have already been accounted for in sctp_sendmsg(), | ||
| 412 | * therefore only reserve a single byte to keep socket around until | ||
| 413 | * the packet has been transmitted. | ||
| 414 | */ | ||
| 415 | refcount_inc(&sk->sk_wmem_alloc); | ||
| 416 | } | ||
| 417 | |||
| 418 | static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) | 402 | static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) |
| 419 | { | 403 | { |
| 420 | if (SCTP_OUTPUT_CB(head)->last == head) | 404 | if (SCTP_OUTPUT_CB(head)->last == head) |
| @@ -601,7 +585,7 @@ int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp) | |||
| 601 | if (!head) | 585 | if (!head) |
| 602 | goto out; | 586 | goto out; |
| 603 | skb_reserve(head, packet->overhead + MAX_HEADER); | 587 | skb_reserve(head, packet->overhead + MAX_HEADER); |
| 604 | sctp_packet_set_owner_w(head, sk); | 588 | skb_set_owner_w(head, sk); |
| 605 | 589 | ||
| 606 | /* set sctp header */ | 590 | /* set sctp header */ |
| 607 | sh = skb_push(head, sizeof(struct sctphdr)); | 591 | sh = skb_push(head, sizeof(struct sctphdr)); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e16c090e89f0..1fb2cad94597 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -3951,32 +3951,16 @@ static int sctp_setsockopt_pr_supported(struct sock *sk, | |||
| 3951 | unsigned int optlen) | 3951 | unsigned int optlen) |
| 3952 | { | 3952 | { |
| 3953 | struct sctp_assoc_value params; | 3953 | struct sctp_assoc_value params; |
| 3954 | struct sctp_association *asoc; | ||
| 3955 | int retval = -EINVAL; | ||
| 3956 | 3954 | ||
| 3957 | if (optlen != sizeof(params)) | 3955 | if (optlen != sizeof(params)) |
| 3958 | goto out; | 3956 | return -EINVAL; |
| 3959 | |||
| 3960 | if (copy_from_user(¶ms, optval, optlen)) { | ||
| 3961 | retval = -EFAULT; | ||
| 3962 | goto out; | ||
| 3963 | } | ||
| 3964 | |||
| 3965 | asoc = sctp_id2assoc(sk, params.assoc_id); | ||
| 3966 | if (asoc) { | ||
| 3967 | asoc->prsctp_enable = !!params.assoc_value; | ||
| 3968 | } else if (!params.assoc_id) { | ||
| 3969 | struct sctp_sock *sp = sctp_sk(sk); | ||
| 3970 | 3957 | ||
| 3971 | sp->ep->prsctp_enable = !!params.assoc_value; | 3958 | if (copy_from_user(¶ms, optval, optlen)) |
| 3972 | } else { | 3959 | return -EFAULT; |
| 3973 | goto out; | ||
| 3974 | } | ||
| 3975 | 3960 | ||
| 3976 | retval = 0; | 3961 | sctp_sk(sk)->ep->prsctp_enable = !!params.assoc_value; |
| 3977 | 3962 | ||
| 3978 | out: | 3963 | return 0; |
| 3979 | return retval; | ||
| 3980 | } | 3964 | } |
| 3981 | 3965 | ||
| 3982 | static int sctp_setsockopt_default_prinfo(struct sock *sk, | 3966 | static int sctp_setsockopt_default_prinfo(struct sock *sk, |
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index ffb940d3b57c..3892e7630f3a 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
| @@ -535,7 +535,6 @@ int sctp_send_add_streams(struct sctp_association *asoc, | |||
| 535 | goto out; | 535 | goto out; |
| 536 | } | 536 | } |
| 537 | 537 | ||
| 538 | stream->incnt = incnt; | ||
| 539 | stream->outcnt = outcnt; | 538 | stream->outcnt = outcnt; |
| 540 | 539 | ||
| 541 | asoc->strreset_outstanding = !!out + !!in; | 540 | asoc->strreset_outstanding = !!out + !!in; |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 4b865250e238..63f08b4e51d6 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
| @@ -127,6 +127,8 @@ static int smc_release(struct socket *sock) | |||
| 127 | smc = smc_sk(sk); | 127 | smc = smc_sk(sk); |
| 128 | 128 | ||
| 129 | /* cleanup for a dangling non-blocking connect */ | 129 | /* cleanup for a dangling non-blocking connect */ |
| 130 | if (smc->connect_info && sk->sk_state == SMC_INIT) | ||
| 131 | tcp_abort(smc->clcsock->sk, ECONNABORTED); | ||
| 130 | flush_work(&smc->connect_work); | 132 | flush_work(&smc->connect_work); |
| 131 | kfree(smc->connect_info); | 133 | kfree(smc->connect_info); |
| 132 | smc->connect_info = NULL; | 134 | smc->connect_info = NULL; |
| @@ -551,7 +553,8 @@ static int smc_connect_rdma(struct smc_sock *smc, | |||
| 551 | 553 | ||
| 552 | mutex_lock(&smc_create_lgr_pending); | 554 | mutex_lock(&smc_create_lgr_pending); |
| 553 | local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, | 555 | local_contact = smc_conn_create(smc, false, aclc->hdr.flag, ibdev, |
| 554 | ibport, &aclc->lcl, NULL, 0); | 556 | ibport, ntoh24(aclc->qpn), &aclc->lcl, |
| 557 | NULL, 0); | ||
| 555 | if (local_contact < 0) { | 558 | if (local_contact < 0) { |
| 556 | if (local_contact == -ENOMEM) | 559 | if (local_contact == -ENOMEM) |
| 557 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ | 560 | reason_code = SMC_CLC_DECL_MEM;/* insufficient memory*/ |
| @@ -621,7 +624,7 @@ static int smc_connect_ism(struct smc_sock *smc, | |||
| 621 | int rc = 0; | 624 | int rc = 0; |
| 622 | 625 | ||
| 623 | mutex_lock(&smc_create_lgr_pending); | 626 | mutex_lock(&smc_create_lgr_pending); |
| 624 | local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, | 627 | local_contact = smc_conn_create(smc, true, aclc->hdr.flag, NULL, 0, 0, |
| 625 | NULL, ismdev, aclc->gid); | 628 | NULL, ismdev, aclc->gid); |
| 626 | if (local_contact < 0) | 629 | if (local_contact < 0) |
| 627 | return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); | 630 | return smc_connect_abort(smc, SMC_CLC_DECL_MEM, 0); |
| @@ -1086,7 +1089,7 @@ static int smc_listen_rdma_init(struct smc_sock *new_smc, | |||
| 1086 | int *local_contact) | 1089 | int *local_contact) |
| 1087 | { | 1090 | { |
| 1088 | /* allocate connection / link group */ | 1091 | /* allocate connection / link group */ |
| 1089 | *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, | 1092 | *local_contact = smc_conn_create(new_smc, false, 0, ibdev, ibport, 0, |
| 1090 | &pclc->lcl, NULL, 0); | 1093 | &pclc->lcl, NULL, 0); |
| 1091 | if (*local_contact < 0) { | 1094 | if (*local_contact < 0) { |
| 1092 | if (*local_contact == -ENOMEM) | 1095 | if (*local_contact == -ENOMEM) |
| @@ -1110,7 +1113,7 @@ static int smc_listen_ism_init(struct smc_sock *new_smc, | |||
| 1110 | struct smc_clc_msg_smcd *pclc_smcd; | 1113 | struct smc_clc_msg_smcd *pclc_smcd; |
| 1111 | 1114 | ||
| 1112 | pclc_smcd = smc_get_clc_msg_smcd(pclc); | 1115 | pclc_smcd = smc_get_clc_msg_smcd(pclc); |
| 1113 | *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, NULL, | 1116 | *local_contact = smc_conn_create(new_smc, true, 0, NULL, 0, 0, NULL, |
| 1114 | ismdev, pclc_smcd->gid); | 1117 | ismdev, pclc_smcd->gid); |
| 1115 | if (*local_contact < 0) { | 1118 | if (*local_contact < 0) { |
| 1116 | if (*local_contact == -ENOMEM) | 1119 | if (*local_contact == -ENOMEM) |
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index ed5dcf03fe0b..db83332ac1c8 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
| @@ -81,7 +81,7 @@ static inline void smc_cdc_add_pending_send(struct smc_connection *conn, | |||
| 81 | sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, | 81 | sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, |
| 82 | "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); | 82 | "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); |
| 83 | BUILD_BUG_ON_MSG( | 83 | BUILD_BUG_ON_MSG( |
| 84 | sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE, | 84 | offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE, |
| 85 | "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); | 85 | "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); |
| 86 | BUILD_BUG_ON_MSG( | 86 | BUILD_BUG_ON_MSG( |
| 87 | sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, | 87 | sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, |
| @@ -177,23 +177,24 @@ void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) | |||
| 177 | int smcd_cdc_msg_send(struct smc_connection *conn) | 177 | int smcd_cdc_msg_send(struct smc_connection *conn) |
| 178 | { | 178 | { |
| 179 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); | 179 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); |
| 180 | union smc_host_cursor curs; | ||
| 180 | struct smcd_cdc_msg cdc; | 181 | struct smcd_cdc_msg cdc; |
| 181 | int rc, diff; | 182 | int rc, diff; |
| 182 | 183 | ||
| 183 | memset(&cdc, 0, sizeof(cdc)); | 184 | memset(&cdc, 0, sizeof(cdc)); |
| 184 | cdc.common.type = SMC_CDC_MSG_TYPE; | 185 | cdc.common.type = SMC_CDC_MSG_TYPE; |
| 185 | cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap; | 186 | curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs); |
| 186 | cdc.prod_count = conn->local_tx_ctrl.prod.count; | 187 | cdc.prod.wrap = curs.wrap; |
| 187 | 188 | cdc.prod.count = curs.count; | |
| 188 | cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap; | 189 | curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs); |
| 189 | cdc.cons_count = conn->local_tx_ctrl.cons.count; | 190 | cdc.cons.wrap = curs.wrap; |
| 190 | cdc.prod_flags = conn->local_tx_ctrl.prod_flags; | 191 | cdc.cons.count = curs.count; |
| 191 | cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; | 192 | cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags; |
| 193 | cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; | ||
| 192 | rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); | 194 | rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); |
| 193 | if (rc) | 195 | if (rc) |
| 194 | return rc; | 196 | return rc; |
| 195 | smc_curs_copy(&conn->rx_curs_confirmed, &conn->local_tx_ctrl.cons, | 197 | smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn); |
| 196 | conn); | ||
| 197 | /* Calculate transmitted data and increment free send buffer space */ | 198 | /* Calculate transmitted data and increment free send buffer space */ |
| 198 | diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, | 199 | diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, |
| 199 | &conn->tx_curs_sent); | 200 | &conn->tx_curs_sent); |
| @@ -331,13 +332,16 @@ static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc) | |||
| 331 | static void smcd_cdc_rx_tsklet(unsigned long data) | 332 | static void smcd_cdc_rx_tsklet(unsigned long data) |
| 332 | { | 333 | { |
| 333 | struct smc_connection *conn = (struct smc_connection *)data; | 334 | struct smc_connection *conn = (struct smc_connection *)data; |
| 335 | struct smcd_cdc_msg *data_cdc; | ||
| 334 | struct smcd_cdc_msg cdc; | 336 | struct smcd_cdc_msg cdc; |
| 335 | struct smc_sock *smc; | 337 | struct smc_sock *smc; |
| 336 | 338 | ||
| 337 | if (!conn) | 339 | if (!conn) |
| 338 | return; | 340 | return; |
| 339 | 341 | ||
| 340 | memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc)); | 342 | data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr; |
| 343 | smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn); | ||
| 344 | smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn); | ||
| 341 | smc = container_of(conn, struct smc_sock, conn); | 345 | smc = container_of(conn, struct smc_sock, conn); |
| 342 | smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); | 346 | smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); |
| 343 | } | 347 | } |
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index 934df4473a7c..b5bfe38c7f9b 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h | |||
| @@ -48,21 +48,31 @@ struct smc_cdc_msg { | |||
| 48 | struct smc_cdc_producer_flags prod_flags; | 48 | struct smc_cdc_producer_flags prod_flags; |
| 49 | struct smc_cdc_conn_state_flags conn_state_flags; | 49 | struct smc_cdc_conn_state_flags conn_state_flags; |
| 50 | u8 reserved[18]; | 50 | u8 reserved[18]; |
| 51 | } __packed; /* format defined in RFC7609 */ | 51 | }; |
| 52 | |||
| 53 | /* SMC-D cursor format */ | ||
| 54 | union smcd_cdc_cursor { | ||
| 55 | struct { | ||
| 56 | u16 wrap; | ||
| 57 | u32 count; | ||
| 58 | struct smc_cdc_producer_flags prod_flags; | ||
| 59 | struct smc_cdc_conn_state_flags conn_state_flags; | ||
| 60 | } __packed; | ||
| 61 | #ifdef KERNEL_HAS_ATOMIC64 | ||
| 62 | atomic64_t acurs; /* for atomic processing */ | ||
| 63 | #else | ||
| 64 | u64 acurs; /* for atomic processing */ | ||
| 65 | #endif | ||
| 66 | } __aligned(8); | ||
| 52 | 67 | ||
| 53 | /* CDC message for SMC-D */ | 68 | /* CDC message for SMC-D */ |
| 54 | struct smcd_cdc_msg { | 69 | struct smcd_cdc_msg { |
| 55 | struct smc_wr_rx_hdr common; /* Type = 0xFE */ | 70 | struct smc_wr_rx_hdr common; /* Type = 0xFE */ |
| 56 | u8 res1[7]; | 71 | u8 res1[7]; |
| 57 | u16 prod_wrap; | 72 | union smcd_cdc_cursor prod; |
| 58 | u32 prod_count; | 73 | union smcd_cdc_cursor cons; |
| 59 | u8 res2[2]; | ||
| 60 | u16 cons_wrap; | ||
| 61 | u32 cons_count; | ||
| 62 | struct smc_cdc_producer_flags prod_flags; | ||
| 63 | struct smc_cdc_conn_state_flags conn_state_flags; | ||
| 64 | u8 res3[8]; | 74 | u8 res3[8]; |
| 65 | } __packed; | 75 | } __aligned(8); |
| 66 | 76 | ||
| 67 | static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) | 77 | static inline bool smc_cdc_rxed_any_close(struct smc_connection *conn) |
| 68 | { | 78 | { |
| @@ -135,6 +145,21 @@ static inline void smc_curs_copy_net(union smc_cdc_cursor *tgt, | |||
| 135 | #endif | 145 | #endif |
| 136 | } | 146 | } |
| 137 | 147 | ||
| 148 | static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt, | ||
| 149 | union smcd_cdc_cursor *src, | ||
| 150 | struct smc_connection *conn) | ||
| 151 | { | ||
| 152 | #ifndef KERNEL_HAS_ATOMIC64 | ||
| 153 | unsigned long flags; | ||
| 154 | |||
| 155 | spin_lock_irqsave(&conn->acurs_lock, flags); | ||
| 156 | tgt->acurs = src->acurs; | ||
| 157 | spin_unlock_irqrestore(&conn->acurs_lock, flags); | ||
| 158 | #else | ||
| 159 | atomic64_set(&tgt->acurs, atomic64_read(&src->acurs)); | ||
| 160 | #endif | ||
| 161 | } | ||
| 162 | |||
| 138 | /* calculate cursor difference between old and new, where old <= new */ | 163 | /* calculate cursor difference between old and new, where old <= new */ |
| 139 | static inline int smc_curs_diff(unsigned int size, | 164 | static inline int smc_curs_diff(unsigned int size, |
| 140 | union smc_host_cursor *old, | 165 | union smc_host_cursor *old, |
| @@ -222,12 +247,17 @@ static inline void smcr_cdc_msg_to_host(struct smc_host_cdc_msg *local, | |||
| 222 | static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, | 247 | static inline void smcd_cdc_msg_to_host(struct smc_host_cdc_msg *local, |
| 223 | struct smcd_cdc_msg *peer) | 248 | struct smcd_cdc_msg *peer) |
| 224 | { | 249 | { |
| 225 | local->prod.wrap = peer->prod_wrap; | 250 | union smc_host_cursor temp; |
| 226 | local->prod.count = peer->prod_count; | 251 | |
| 227 | local->cons.wrap = peer->cons_wrap; | 252 | temp.wrap = peer->prod.wrap; |
| 228 | local->cons.count = peer->cons_count; | 253 | temp.count = peer->prod.count; |
| 229 | local->prod_flags = peer->prod_flags; | 254 | atomic64_set(&local->prod.acurs, atomic64_read(&temp.acurs)); |
| 230 | local->conn_state_flags = peer->conn_state_flags; | 255 | |
| 256 | temp.wrap = peer->cons.wrap; | ||
| 257 | temp.count = peer->cons.count; | ||
| 258 | atomic64_set(&local->cons.acurs, atomic64_read(&temp.acurs)); | ||
| 259 | local->prod_flags = peer->cons.prod_flags; | ||
| 260 | local->conn_state_flags = peer->cons.conn_state_flags; | ||
| 231 | } | 261 | } |
| 232 | 262 | ||
| 233 | static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, | 263 | static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 1382ddae591e..35c1cdc93e1c 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
| @@ -189,6 +189,8 @@ free: | |||
| 189 | 189 | ||
| 190 | if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) | 190 | if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) |
| 191 | smc_llc_link_inactive(lnk); | 191 | smc_llc_link_inactive(lnk); |
| 192 | if (lgr->is_smcd) | ||
| 193 | smc_ism_signal_shutdown(lgr); | ||
| 192 | smc_lgr_free(lgr); | 194 | smc_lgr_free(lgr); |
| 193 | } | 195 | } |
| 194 | } | 196 | } |
| @@ -495,7 +497,7 @@ void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport) | |||
| 495 | } | 497 | } |
| 496 | 498 | ||
| 497 | /* Called when SMC-D device is terminated or peer is lost */ | 499 | /* Called when SMC-D device is terminated or peer is lost */ |
| 498 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | 500 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, unsigned short vlan) |
| 499 | { | 501 | { |
| 500 | struct smc_link_group *lgr, *l; | 502 | struct smc_link_group *lgr, *l; |
| 501 | LIST_HEAD(lgr_free_list); | 503 | LIST_HEAD(lgr_free_list); |
| @@ -505,7 +507,7 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | |||
| 505 | list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { | 507 | list_for_each_entry_safe(lgr, l, &smc_lgr_list.list, list) { |
| 506 | if (lgr->is_smcd && lgr->smcd == dev && | 508 | if (lgr->is_smcd && lgr->smcd == dev && |
| 507 | (!peer_gid || lgr->peer_gid == peer_gid) && | 509 | (!peer_gid || lgr->peer_gid == peer_gid) && |
| 508 | !list_empty(&lgr->list)) { | 510 | (vlan == VLAN_VID_MASK || lgr->vlan_id == vlan)) { |
| 509 | __smc_lgr_terminate(lgr); | 511 | __smc_lgr_terminate(lgr); |
| 510 | list_move(&lgr->list, &lgr_free_list); | 512 | list_move(&lgr->list, &lgr_free_list); |
| 511 | } | 513 | } |
| @@ -516,6 +518,8 @@ void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid) | |||
| 516 | list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { | 518 | list_for_each_entry_safe(lgr, l, &lgr_free_list, list) { |
| 517 | list_del_init(&lgr->list); | 519 | list_del_init(&lgr->list); |
| 518 | cancel_delayed_work_sync(&lgr->free_work); | 520 | cancel_delayed_work_sync(&lgr->free_work); |
| 521 | if (!peer_gid && vlan == VLAN_VID_MASK) /* dev terminated? */ | ||
| 522 | smc_ism_signal_shutdown(lgr); | ||
| 519 | smc_lgr_free(lgr); | 523 | smc_lgr_free(lgr); |
| 520 | } | 524 | } |
| 521 | } | 525 | } |
| @@ -569,7 +573,7 @@ out: | |||
| 569 | 573 | ||
| 570 | static bool smcr_lgr_match(struct smc_link_group *lgr, | 574 | static bool smcr_lgr_match(struct smc_link_group *lgr, |
| 571 | struct smc_clc_msg_local *lcl, | 575 | struct smc_clc_msg_local *lcl, |
| 572 | enum smc_lgr_role role) | 576 | enum smc_lgr_role role, u32 clcqpn) |
| 573 | { | 577 | { |
| 574 | return !memcmp(lgr->peer_systemid, lcl->id_for_peer, | 578 | return !memcmp(lgr->peer_systemid, lcl->id_for_peer, |
| 575 | SMC_SYSTEMID_LEN) && | 579 | SMC_SYSTEMID_LEN) && |
| @@ -577,7 +581,9 @@ static bool smcr_lgr_match(struct smc_link_group *lgr, | |||
| 577 | SMC_GID_SIZE) && | 581 | SMC_GID_SIZE) && |
| 578 | !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, | 582 | !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, |
| 579 | sizeof(lcl->mac)) && | 583 | sizeof(lcl->mac)) && |
| 580 | lgr->role == role; | 584 | lgr->role == role && |
| 585 | (lgr->role == SMC_SERV || | ||
| 586 | lgr->lnk[SMC_SINGLE_LINK].peer_qpn == clcqpn); | ||
| 581 | } | 587 | } |
| 582 | 588 | ||
| 583 | static bool smcd_lgr_match(struct smc_link_group *lgr, | 589 | static bool smcd_lgr_match(struct smc_link_group *lgr, |
| @@ -588,7 +594,7 @@ static bool smcd_lgr_match(struct smc_link_group *lgr, | |||
| 588 | 594 | ||
| 589 | /* create a new SMC connection (and a new link group if necessary) */ | 595 | /* create a new SMC connection (and a new link group if necessary) */ |
| 590 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | 596 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, |
| 591 | struct smc_ib_device *smcibdev, u8 ibport, | 597 | struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn, |
| 592 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, | 598 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, |
| 593 | u64 peer_gid) | 599 | u64 peer_gid) |
| 594 | { | 600 | { |
| @@ -613,7 +619,7 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | |||
| 613 | list_for_each_entry(lgr, &smc_lgr_list.list, list) { | 619 | list_for_each_entry(lgr, &smc_lgr_list.list, list) { |
| 614 | write_lock_bh(&lgr->conns_lock); | 620 | write_lock_bh(&lgr->conns_lock); |
| 615 | if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : | 621 | if ((is_smcd ? smcd_lgr_match(lgr, smcd, peer_gid) : |
| 616 | smcr_lgr_match(lgr, lcl, role)) && | 622 | smcr_lgr_match(lgr, lcl, role, clcqpn)) && |
| 617 | !lgr->sync_err && | 623 | !lgr->sync_err && |
| 618 | lgr->vlan_id == vlan_id && | 624 | lgr->vlan_id == vlan_id && |
| 619 | (role == SMC_CLNT || | 625 | (role == SMC_CLNT || |
| @@ -1034,6 +1040,8 @@ void smc_core_exit(void) | |||
| 1034 | smc_llc_link_inactive(lnk); | 1040 | smc_llc_link_inactive(lnk); |
| 1035 | } | 1041 | } |
| 1036 | cancel_delayed_work_sync(&lgr->free_work); | 1042 | cancel_delayed_work_sync(&lgr->free_work); |
| 1043 | if (lgr->is_smcd) | ||
| 1044 | smc_ism_signal_shutdown(lgr); | ||
| 1037 | smc_lgr_free(lgr); /* free link group */ | 1045 | smc_lgr_free(lgr); /* free link group */ |
| 1038 | } | 1046 | } |
| 1039 | } | 1047 | } |
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index e177c6675038..b00287989a3d 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h | |||
| @@ -249,7 +249,8 @@ struct smc_clc_msg_local; | |||
| 249 | void smc_lgr_forget(struct smc_link_group *lgr); | 249 | void smc_lgr_forget(struct smc_link_group *lgr); |
| 250 | void smc_lgr_terminate(struct smc_link_group *lgr); | 250 | void smc_lgr_terminate(struct smc_link_group *lgr); |
| 251 | void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); | 251 | void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport); |
| 252 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid); | 252 | void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid, |
| 253 | unsigned short vlan); | ||
| 253 | int smc_buf_create(struct smc_sock *smc, bool is_smcd); | 254 | int smc_buf_create(struct smc_sock *smc, bool is_smcd); |
| 254 | int smc_uncompress_bufsize(u8 compressed); | 255 | int smc_uncompress_bufsize(u8 compressed); |
| 255 | int smc_rmb_rtoken_handling(struct smc_connection *conn, | 256 | int smc_rmb_rtoken_handling(struct smc_connection *conn, |
| @@ -264,7 +265,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id); | |||
| 264 | 265 | ||
| 265 | void smc_conn_free(struct smc_connection *conn); | 266 | void smc_conn_free(struct smc_connection *conn); |
| 266 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | 267 | int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, |
| 267 | struct smc_ib_device *smcibdev, u8 ibport, | 268 | struct smc_ib_device *smcibdev, u8 ibport, u32 clcqpn, |
| 268 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, | 269 | struct smc_clc_msg_local *lcl, struct smcd_dev *smcd, |
| 269 | u64 peer_gid); | 270 | u64 peer_gid); |
| 270 | void smcd_conn_free(struct smc_connection *conn); | 271 | void smcd_conn_free(struct smc_connection *conn); |
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c index e36f21ce7252..2fff79db1a59 100644 --- a/net/smc/smc_ism.c +++ b/net/smc/smc_ism.c | |||
| @@ -187,22 +187,28 @@ struct smc_ism_event_work { | |||
| 187 | #define ISM_EVENT_REQUEST 0x0001 | 187 | #define ISM_EVENT_REQUEST 0x0001 |
| 188 | #define ISM_EVENT_RESPONSE 0x0002 | 188 | #define ISM_EVENT_RESPONSE 0x0002 |
| 189 | #define ISM_EVENT_REQUEST_IR 0x00000001 | 189 | #define ISM_EVENT_REQUEST_IR 0x00000001 |
| 190 | #define ISM_EVENT_CODE_SHUTDOWN 0x80 | ||
| 190 | #define ISM_EVENT_CODE_TESTLINK 0x83 | 191 | #define ISM_EVENT_CODE_TESTLINK 0x83 |
| 191 | 192 | ||
| 193 | union smcd_sw_event_info { | ||
| 194 | u64 info; | ||
| 195 | struct { | ||
| 196 | u8 uid[SMC_LGR_ID_SIZE]; | ||
| 197 | unsigned short vlan_id; | ||
| 198 | u16 code; | ||
| 199 | }; | ||
| 200 | }; | ||
| 201 | |||
| 192 | static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) | 202 | static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) |
| 193 | { | 203 | { |
| 194 | union { | 204 | union smcd_sw_event_info ev_info; |
| 195 | u64 info; | ||
| 196 | struct { | ||
| 197 | u32 uid; | ||
| 198 | unsigned short vlanid; | ||
| 199 | u16 code; | ||
| 200 | }; | ||
| 201 | } ev_info; | ||
| 202 | 205 | ||
| 206 | ev_info.info = wrk->event.info; | ||
| 203 | switch (wrk->event.code) { | 207 | switch (wrk->event.code) { |
| 208 | case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */ | ||
| 209 | smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); | ||
| 210 | break; | ||
| 204 | case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ | 211 | case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ |
| 205 | ev_info.info = wrk->event.info; | ||
| 206 | if (ev_info.code == ISM_EVENT_REQUEST) { | 212 | if (ev_info.code == ISM_EVENT_REQUEST) { |
| 207 | ev_info.code = ISM_EVENT_RESPONSE; | 213 | ev_info.code = ISM_EVENT_RESPONSE; |
| 208 | wrk->smcd->ops->signal_event(wrk->smcd, | 214 | wrk->smcd->ops->signal_event(wrk->smcd, |
| @@ -215,6 +221,21 @@ static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) | |||
| 215 | } | 221 | } |
| 216 | } | 222 | } |
| 217 | 223 | ||
| 224 | int smc_ism_signal_shutdown(struct smc_link_group *lgr) | ||
| 225 | { | ||
| 226 | int rc; | ||
| 227 | union smcd_sw_event_info ev_info; | ||
| 228 | |||
| 229 | memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE); | ||
| 230 | ev_info.vlan_id = lgr->vlan_id; | ||
| 231 | ev_info.code = ISM_EVENT_REQUEST; | ||
| 232 | rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid, | ||
| 233 | ISM_EVENT_REQUEST_IR, | ||
| 234 | ISM_EVENT_CODE_SHUTDOWN, | ||
| 235 | ev_info.info); | ||
| 236 | return rc; | ||
| 237 | } | ||
| 238 | |||
| 218 | /* worker for SMC-D events */ | 239 | /* worker for SMC-D events */ |
| 219 | static void smc_ism_event_work(struct work_struct *work) | 240 | static void smc_ism_event_work(struct work_struct *work) |
| 220 | { | 241 | { |
| @@ -223,7 +244,7 @@ static void smc_ism_event_work(struct work_struct *work) | |||
| 223 | 244 | ||
| 224 | switch (wrk->event.type) { | 245 | switch (wrk->event.type) { |
| 225 | case ISM_EVENT_GID: /* GID event, token is peer GID */ | 246 | case ISM_EVENT_GID: /* GID event, token is peer GID */ |
| 226 | smc_smcd_terminate(wrk->smcd, wrk->event.tok); | 247 | smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); |
| 227 | break; | 248 | break; |
| 228 | case ISM_EVENT_DMB: | 249 | case ISM_EVENT_DMB: |
| 229 | break; | 250 | break; |
| @@ -289,7 +310,7 @@ void smcd_unregister_dev(struct smcd_dev *smcd) | |||
| 289 | spin_unlock(&smcd_dev_list.lock); | 310 | spin_unlock(&smcd_dev_list.lock); |
| 290 | flush_workqueue(smcd->event_wq); | 311 | flush_workqueue(smcd->event_wq); |
| 291 | destroy_workqueue(smcd->event_wq); | 312 | destroy_workqueue(smcd->event_wq); |
| 292 | smc_smcd_terminate(smcd, 0); | 313 | smc_smcd_terminate(smcd, 0, VLAN_VID_MASK); |
| 293 | 314 | ||
| 294 | device_del(&smcd->dev); | 315 | device_del(&smcd->dev); |
| 295 | } | 316 | } |
diff --git a/net/smc/smc_ism.h b/net/smc/smc_ism.h index aee45b860b79..4da946cbfa29 100644 --- a/net/smc/smc_ism.h +++ b/net/smc/smc_ism.h | |||
| @@ -45,4 +45,5 @@ int smc_ism_register_dmb(struct smc_link_group *lgr, int buf_size, | |||
| 45 | int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); | 45 | int smc_ism_unregister_dmb(struct smcd_dev *dev, struct smc_buf_desc *dmb_desc); |
| 46 | int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, | 46 | int smc_ism_write(struct smcd_dev *dev, const struct smc_ism_position *pos, |
| 47 | void *data, size_t len); | 47 | void *data, size_t len); |
| 48 | int smc_ism_signal_shutdown(struct smc_link_group *lgr); | ||
| 48 | #endif | 49 | #endif |
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index 3c458d279855..c2694750a6a8 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c | |||
| @@ -215,12 +215,14 @@ int smc_wr_tx_put_slot(struct smc_link *link, | |||
| 215 | 215 | ||
| 216 | pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); | 216 | pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); |
| 217 | if (pend->idx < link->wr_tx_cnt) { | 217 | if (pend->idx < link->wr_tx_cnt) { |
| 218 | u32 idx = pend->idx; | ||
| 219 | |||
| 218 | /* clear the full struct smc_wr_tx_pend including .priv */ | 220 | /* clear the full struct smc_wr_tx_pend including .priv */ |
| 219 | memset(&link->wr_tx_pends[pend->idx], 0, | 221 | memset(&link->wr_tx_pends[pend->idx], 0, |
| 220 | sizeof(link->wr_tx_pends[pend->idx])); | 222 | sizeof(link->wr_tx_pends[pend->idx])); |
| 221 | memset(&link->wr_tx_bufs[pend->idx], 0, | 223 | memset(&link->wr_tx_bufs[pend->idx], 0, |
| 222 | sizeof(link->wr_tx_bufs[pend->idx])); | 224 | sizeof(link->wr_tx_bufs[pend->idx])); |
| 223 | test_and_clear_bit(pend->idx, link->wr_tx_mask); | 225 | test_and_clear_bit(idx, link->wr_tx_mask); |
| 224 | return 1; | 226 | return 1; |
| 225 | } | 227 | } |
| 226 | 228 | ||
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c index f8d4a419f3af..467039b342b5 100644 --- a/sound/core/oss/pcm_oss.c +++ b/sound/core/oss/pcm_oss.c | |||
| @@ -1062,8 +1062,8 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream) | |||
| 1062 | runtime->oss.channels = params_channels(params); | 1062 | runtime->oss.channels = params_channels(params); |
| 1063 | runtime->oss.rate = params_rate(params); | 1063 | runtime->oss.rate = params_rate(params); |
| 1064 | 1064 | ||
| 1065 | vfree(runtime->oss.buffer); | 1065 | kvfree(runtime->oss.buffer); |
| 1066 | runtime->oss.buffer = vmalloc(runtime->oss.period_bytes); | 1066 | runtime->oss.buffer = kvzalloc(runtime->oss.period_bytes, GFP_KERNEL); |
| 1067 | if (!runtime->oss.buffer) { | 1067 | if (!runtime->oss.buffer) { |
| 1068 | err = -ENOMEM; | 1068 | err = -ENOMEM; |
| 1069 | goto failure; | 1069 | goto failure; |
| @@ -2328,7 +2328,7 @@ static void snd_pcm_oss_release_substream(struct snd_pcm_substream *substream) | |||
| 2328 | { | 2328 | { |
| 2329 | struct snd_pcm_runtime *runtime; | 2329 | struct snd_pcm_runtime *runtime; |
| 2330 | runtime = substream->runtime; | 2330 | runtime = substream->runtime; |
| 2331 | vfree(runtime->oss.buffer); | 2331 | kvfree(runtime->oss.buffer); |
| 2332 | runtime->oss.buffer = NULL; | 2332 | runtime->oss.buffer = NULL; |
| 2333 | #ifdef CONFIG_SND_PCM_OSS_PLUGINS | 2333 | #ifdef CONFIG_SND_PCM_OSS_PLUGINS |
| 2334 | snd_pcm_oss_plugin_clear(substream); | 2334 | snd_pcm_oss_plugin_clear(substream); |
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c index 141c5f3a9575..31cb2acf8afc 100644 --- a/sound/core/oss/pcm_plugin.c +++ b/sound/core/oss/pcm_plugin.c | |||
| @@ -66,8 +66,8 @@ static int snd_pcm_plugin_alloc(struct snd_pcm_plugin *plugin, snd_pcm_uframes_t | |||
| 66 | return -ENXIO; | 66 | return -ENXIO; |
| 67 | size /= 8; | 67 | size /= 8; |
| 68 | if (plugin->buf_frames < frames) { | 68 | if (plugin->buf_frames < frames) { |
| 69 | vfree(plugin->buf); | 69 | kvfree(plugin->buf); |
| 70 | plugin->buf = vmalloc(size); | 70 | plugin->buf = kvzalloc(size, GFP_KERNEL); |
| 71 | plugin->buf_frames = frames; | 71 | plugin->buf_frames = frames; |
| 72 | } | 72 | } |
| 73 | if (!plugin->buf) { | 73 | if (!plugin->buf) { |
| @@ -191,7 +191,7 @@ int snd_pcm_plugin_free(struct snd_pcm_plugin *plugin) | |||
| 191 | if (plugin->private_free) | 191 | if (plugin->private_free) |
| 192 | plugin->private_free(plugin); | 192 | plugin->private_free(plugin); |
| 193 | kfree(plugin->buf_channels); | 193 | kfree(plugin->buf_channels); |
| 194 | vfree(plugin->buf); | 194 | kvfree(plugin->buf); |
| 195 | kfree(plugin); | 195 | kfree(plugin); |
| 196 | return 0; | 196 | return 0; |
| 197 | } | 197 | } |
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index 0a24037184c3..0a567634e5fa 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c | |||
| @@ -1177,6 +1177,7 @@ static const struct snd_pci_quirk ca0132_quirks[] = { | |||
| 1177 | SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), | 1177 | SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE), |
| 1178 | SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), | 1178 | SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ), |
| 1179 | SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), | 1179 | SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ), |
| 1180 | SND_PCI_QUIRK(0x1102, 0x0033, "Sound Blaster ZxR", QUIRK_SBZ), | ||
| 1180 | SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), | 1181 | SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI), |
| 1181 | SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), | 1182 | SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI), |
| 1182 | SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), | 1183 | SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI), |
| @@ -8413,7 +8414,7 @@ static void ca0132_free(struct hda_codec *codec) | |||
| 8413 | 8414 | ||
| 8414 | snd_hda_power_down(codec); | 8415 | snd_hda_power_down(codec); |
| 8415 | if (spec->mem_base) | 8416 | if (spec->mem_base) |
| 8416 | iounmap(spec->mem_base); | 8417 | pci_iounmap(codec->bus->pci, spec->mem_base); |
| 8417 | kfree(spec->spec_init_verbs); | 8418 | kfree(spec->spec_init_verbs); |
| 8418 | kfree(codec->spec); | 8419 | kfree(codec->spec); |
| 8419 | } | 8420 | } |
| @@ -8488,7 +8489,7 @@ static void ca0132_config(struct hda_codec *codec) | |||
| 8488 | break; | 8489 | break; |
| 8489 | case QUIRK_AE5: | 8490 | case QUIRK_AE5: |
| 8490 | codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__); | 8491 | codec_dbg(codec, "%s: QUIRK_AE5 applied.\n", __func__); |
| 8491 | snd_hda_apply_pincfgs(codec, r3di_pincfgs); | 8492 | snd_hda_apply_pincfgs(codec, ae5_pincfgs); |
| 8492 | break; | 8493 | break; |
| 8493 | } | 8494 | } |
| 8494 | 8495 | ||
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index fa61674a5605..970bc44a378b 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -6481,6 +6481,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
| 6481 | SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), | 6481 | SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
| 6482 | SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), | 6482 | SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
| 6483 | SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), | 6483 | SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC), |
| 6484 | SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), | ||
| 6484 | SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC), | 6485 | SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC), |
| 6485 | SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360), | 6486 | SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360), |
| 6486 | SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE), | 6487 | SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE), |
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index 1dd5f4fcffd5..db66a952c173 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile | |||
| @@ -129,7 +129,7 @@ WARNINGS += $(call cc-supports,-Wno-pointer-sign) | |||
| 129 | WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) | 129 | WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) |
| 130 | WARNINGS += -Wshadow | 130 | WARNINGS += -Wshadow |
| 131 | 131 | ||
| 132 | CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \ | 132 | override CFLAGS += -DVERSION=\"$(VERSION)\" -DPACKAGE=\"$(PACKAGE)\" \ |
| 133 | -DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE | 133 | -DPACKAGE_BUGREPORT=\"$(PACKAGE_BUGREPORT)\" -D_GNU_SOURCE |
| 134 | 134 | ||
| 135 | UTIL_OBJS = utils/helpers/amd.o utils/helpers/msr.o \ | 135 | UTIL_OBJS = utils/helpers/amd.o utils/helpers/msr.o \ |
| @@ -156,12 +156,12 @@ LIB_SRC = lib/cpufreq.c lib/cpupower.c lib/cpuidle.c | |||
| 156 | LIB_OBJS = lib/cpufreq.o lib/cpupower.o lib/cpuidle.o | 156 | LIB_OBJS = lib/cpufreq.o lib/cpupower.o lib/cpuidle.o |
| 157 | LIB_OBJS := $(addprefix $(OUTPUT),$(LIB_OBJS)) | 157 | LIB_OBJS := $(addprefix $(OUTPUT),$(LIB_OBJS)) |
| 158 | 158 | ||
| 159 | CFLAGS += -pipe | 159 | override CFLAGS += -pipe |
| 160 | 160 | ||
| 161 | ifeq ($(strip $(NLS)),true) | 161 | ifeq ($(strip $(NLS)),true) |
| 162 | INSTALL_NLS += install-gmo | 162 | INSTALL_NLS += install-gmo |
| 163 | COMPILE_NLS += create-gmo | 163 | COMPILE_NLS += create-gmo |
| 164 | CFLAGS += -DNLS | 164 | override CFLAGS += -DNLS |
| 165 | endif | 165 | endif |
| 166 | 166 | ||
| 167 | ifeq ($(strip $(CPUFREQ_BENCH)),true) | 167 | ifeq ($(strip $(CPUFREQ_BENCH)),true) |
| @@ -175,7 +175,7 @@ ifeq ($(strip $(STATIC)),true) | |||
| 175 | UTIL_SRC += $(LIB_SRC) | 175 | UTIL_SRC += $(LIB_SRC) |
| 176 | endif | 176 | endif |
| 177 | 177 | ||
| 178 | CFLAGS += $(WARNINGS) | 178 | override CFLAGS += $(WARNINGS) |
| 179 | 179 | ||
| 180 | ifeq ($(strip $(V)),false) | 180 | ifeq ($(strip $(V)),false) |
| 181 | QUIET=@ | 181 | QUIET=@ |
| @@ -188,10 +188,10 @@ export QUIET ECHO | |||
| 188 | 188 | ||
| 189 | # if DEBUG is enabled, then we do not strip or optimize | 189 | # if DEBUG is enabled, then we do not strip or optimize |
| 190 | ifeq ($(strip $(DEBUG)),true) | 190 | ifeq ($(strip $(DEBUG)),true) |
| 191 | CFLAGS += -O1 -g -DDEBUG | 191 | override CFLAGS += -O1 -g -DDEBUG |
| 192 | STRIPCMD = /bin/true -Since_we_are_debugging | 192 | STRIPCMD = /bin/true -Since_we_are_debugging |
| 193 | else | 193 | else |
| 194 | CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer | 194 | override CFLAGS += $(OPTIMIZATION) -fomit-frame-pointer |
| 195 | STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment | 195 | STRIPCMD = $(STRIP) -s --remove-section=.note --remove-section=.comment |
| 196 | endif | 196 | endif |
| 197 | 197 | ||
diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile index d79ab161cc75..f68b4bc55273 100644 --- a/tools/power/cpupower/bench/Makefile +++ b/tools/power/cpupower/bench/Makefile | |||
| @@ -9,7 +9,7 @@ endif | |||
| 9 | ifeq ($(strip $(STATIC)),true) | 9 | ifeq ($(strip $(STATIC)),true) |
| 10 | LIBS = -L../ -L$(OUTPUT) -lm | 10 | LIBS = -L../ -L$(OUTPUT) -lm |
| 11 | OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \ | 11 | OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \ |
| 12 | $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o | 12 | $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/cpupower.o |
| 13 | else | 13 | else |
| 14 | LIBS = -L../ -L$(OUTPUT) -lm -lcpupower | 14 | LIBS = -L../ -L$(OUTPUT) -lm -lcpupower |
| 15 | OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o | 15 | OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o |
diff --git a/tools/power/cpupower/debug/x86_64/Makefile b/tools/power/cpupower/debug/x86_64/Makefile index 59af84b8ef45..b1b6c43644e7 100644 --- a/tools/power/cpupower/debug/x86_64/Makefile +++ b/tools/power/cpupower/debug/x86_64/Makefile | |||
| @@ -13,10 +13,10 @@ INSTALL = /usr/bin/install | |||
| 13 | default: all | 13 | default: all |
| 14 | 14 | ||
| 15 | $(OUTPUT)centrino-decode: ../i386/centrino-decode.c | 15 | $(OUTPUT)centrino-decode: ../i386/centrino-decode.c |
| 16 | $(CC) $(CFLAGS) -o $@ $< | 16 | $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $< |
| 17 | 17 | ||
| 18 | $(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c | 18 | $(OUTPUT)powernow-k8-decode: ../i386/powernow-k8-decode.c |
| 19 | $(CC) $(CFLAGS) -o $@ $< | 19 | $(CC) $(CFLAGS) -o $@ $(LDFLAGS) $< |
| 20 | 20 | ||
| 21 | all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode | 21 | all: $(OUTPUT)centrino-decode $(OUTPUT)powernow-k8-decode |
| 22 | 22 | ||
diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c index 1b993fe1ce23..0c0f3e3f0d80 100644 --- a/tools/power/cpupower/lib/cpufreq.c +++ b/tools/power/cpupower/lib/cpufreq.c | |||
| @@ -28,7 +28,7 @@ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname, | |||
| 28 | 28 | ||
| 29 | snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s", | 29 | snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s", |
| 30 | cpu, fname); | 30 | cpu, fname); |
| 31 | return sysfs_read_file(path, buf, buflen); | 31 | return cpupower_read_sysfs(path, buf, buflen); |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | /* helper function to write a new value to a /sys file */ | 34 | /* helper function to write a new value to a /sys file */ |
diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c index 9bd4c7655fdb..852d25462388 100644 --- a/tools/power/cpupower/lib/cpuidle.c +++ b/tools/power/cpupower/lib/cpuidle.c | |||
| @@ -319,7 +319,7 @@ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf, | |||
| 319 | 319 | ||
| 320 | snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname); | 320 | snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname); |
| 321 | 321 | ||
| 322 | return sysfs_read_file(path, buf, buflen); | 322 | return cpupower_read_sysfs(path, buf, buflen); |
| 323 | } | 323 | } |
| 324 | 324 | ||
| 325 | 325 | ||
diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c index 9c395ec924de..9711d628b0f4 100644 --- a/tools/power/cpupower/lib/cpupower.c +++ b/tools/power/cpupower/lib/cpupower.c | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | #include "cpupower.h" | 15 | #include "cpupower.h" |
| 16 | #include "cpupower_intern.h" | 16 | #include "cpupower_intern.h" |
| 17 | 17 | ||
| 18 | unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) | 18 | unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen) |
| 19 | { | 19 | { |
| 20 | int fd; | 20 | int fd; |
| 21 | ssize_t numread; | 21 | ssize_t numread; |
| @@ -95,7 +95,7 @@ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *re | |||
| 95 | 95 | ||
| 96 | snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s", | 96 | snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s", |
| 97 | cpu, fname); | 97 | cpu, fname); |
| 98 | if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) | 98 | if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0) |
| 99 | return -1; | 99 | return -1; |
| 100 | *result = strtol(linebuf, &endp, 0); | 100 | *result = strtol(linebuf, &endp, 0); |
| 101 | if (endp == linebuf || errno == ERANGE) | 101 | if (endp == linebuf || errno == ERANGE) |
diff --git a/tools/power/cpupower/lib/cpupower_intern.h b/tools/power/cpupower/lib/cpupower_intern.h index 92affdfbe417..4887c76d23f8 100644 --- a/tools/power/cpupower/lib/cpupower_intern.h +++ b/tools/power/cpupower/lib/cpupower_intern.h | |||
| @@ -3,4 +3,4 @@ | |||
| 3 | #define MAX_LINE_LEN 4096 | 3 | #define MAX_LINE_LEN 4096 |
| 4 | #define SYSFS_PATH_MAX 255 | 4 | #define SYSFS_PATH_MAX 255 |
| 5 | 5 | ||
| 6 | unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); | 6 | unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen); |
