aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/block/null_blk_main.c5
-rw-r--r--drivers/block/paride/pcd.c14
-rw-r--r--drivers/block/paride/pf.c12
-rw-r--r--drivers/block/xsysace.c2
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c13
-rw-r--r--drivers/hid/hid-quirks.c11
-rw-r--r--drivers/hid/hid-steam.c26
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-init.c2
-rw-r--r--drivers/md/dm-integrity.c16
-rw-r--r--drivers/md/dm-rq.c11
-rw-r--r--drivers/md/dm-table.c39
-rw-r--r--drivers/md/dm.c30
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c42
-rw-r--r--drivers/mfd/twl-core.c23
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c6
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c30
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c34
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/parisc/iosapic.c6
-rw-r--r--drivers/rtc/Kconfig4
-rw-r--r--drivers/rtc/rtc-cros-ec.c4
-rw-r--r--drivers/rtc/rtc-da9063.c7
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c7
-rw-r--r--drivers/scsi/qedi/qedi_main.c7
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_dh.c1
-rw-r--r--drivers/scsi/storvsc_drv.c15
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c14
-rw-r--r--drivers/xen/privcmd-buf.c3
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
114 files changed, 767 insertions, 440 deletions
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 62d3aa74277b..5e9d7348c16f 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
81 81
82 ACPI_FUNCTION_TRACE(ev_enable_gpe); 82 ACPI_FUNCTION_TRACE(ev_enable_gpe);
83 83
84 /* Enable the requested GPE */ 84 /* Clear the GPE status */
85 status = acpi_hw_clear_gpe(gpe_event_info);
86 if (ACPI_FAILURE(status))
87 return_ACPI_STATUS(status);
85 88
89 /* Enable the requested GPE */
86 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); 90 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
87 return_ACPI_STATUS(status); 91 return_ACPI_STATUS(status);
88} 92}
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 417a9f15c116..d7ac09c092f2 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1748,6 +1748,11 @@ static int __init null_init(void)
1748 return -EINVAL; 1748 return -EINVAL;
1749 } 1749 }
1750 1750
1751 if (g_home_node != NUMA_NO_NODE && g_home_node >= nr_online_nodes) {
1752 pr_err("null_blk: invalid home_node value\n");
1753 g_home_node = NUMA_NO_NODE;
1754 }
1755
1751 if (g_queue_mode == NULL_Q_RQ) { 1756 if (g_queue_mode == NULL_Q_RQ) {
1752 pr_err("null_blk: legacy IO path no longer available\n"); 1757 pr_err("null_blk: legacy IO path no longer available\n");
1753 return -EINVAL; 1758 return -EINVAL;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 377a694dc228..6d415b20fb70 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -314,6 +314,7 @@ static void pcd_init_units(void)
314 disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops, 314 disk->queue = blk_mq_init_sq_queue(&cd->tag_set, &pcd_mq_ops,
315 1, BLK_MQ_F_SHOULD_MERGE); 315 1, BLK_MQ_F_SHOULD_MERGE);
316 if (IS_ERR(disk->queue)) { 316 if (IS_ERR(disk->queue)) {
317 put_disk(disk);
317 disk->queue = NULL; 318 disk->queue = NULL;
318 continue; 319 continue;
319 } 320 }
@@ -750,6 +751,8 @@ static int pcd_detect(void)
750 751
751 printk("%s: No CD-ROM drive found\n", name); 752 printk("%s: No CD-ROM drive found\n", name);
752 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { 753 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
754 if (!cd->disk)
755 continue;
753 blk_cleanup_queue(cd->disk->queue); 756 blk_cleanup_queue(cd->disk->queue);
754 cd->disk->queue = NULL; 757 cd->disk->queue = NULL;
755 blk_mq_free_tag_set(&cd->tag_set); 758 blk_mq_free_tag_set(&cd->tag_set);
@@ -1010,8 +1013,14 @@ static int __init pcd_init(void)
1010 pcd_probe_capabilities(); 1013 pcd_probe_capabilities();
1011 1014
1012 if (register_blkdev(major, name)) { 1015 if (register_blkdev(major, name)) {
1013 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) 1016 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
1017 if (!cd->disk)
1018 continue;
1019
1020 blk_cleanup_queue(cd->disk->queue);
1021 blk_mq_free_tag_set(&cd->tag_set);
1014 put_disk(cd->disk); 1022 put_disk(cd->disk);
1023 }
1015 return -EBUSY; 1024 return -EBUSY;
1016 } 1025 }
1017 1026
@@ -1032,6 +1041,9 @@ static void __exit pcd_exit(void)
1032 int unit; 1041 int unit;
1033 1042
1034 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) { 1043 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
1044 if (!cd->disk)
1045 continue;
1046
1035 if (cd->present) { 1047 if (cd->present) {
1036 del_gendisk(cd->disk); 1048 del_gendisk(cd->disk);
1037 pi_release(cd->pi); 1049 pi_release(cd->pi);
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index 103b617cdc31..35e6e271b219 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -762,6 +762,8 @@ static int pf_detect(void)
762 762
763 printk("%s: No ATAPI disk detected\n", name); 763 printk("%s: No ATAPI disk detected\n", name);
764 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { 764 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
765 if (!pf->disk)
766 continue;
765 blk_cleanup_queue(pf->disk->queue); 767 blk_cleanup_queue(pf->disk->queue);
766 pf->disk->queue = NULL; 768 pf->disk->queue = NULL;
767 blk_mq_free_tag_set(&pf->tag_set); 769 blk_mq_free_tag_set(&pf->tag_set);
@@ -1029,8 +1031,13 @@ static int __init pf_init(void)
1029 pf_busy = 0; 1031 pf_busy = 0;
1030 1032
1031 if (register_blkdev(major, name)) { 1033 if (register_blkdev(major, name)) {
1032 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) 1034 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
1035 if (!pf->disk)
1036 continue;
1037 blk_cleanup_queue(pf->disk->queue);
1038 blk_mq_free_tag_set(&pf->tag_set);
1033 put_disk(pf->disk); 1039 put_disk(pf->disk);
1040 }
1034 return -EBUSY; 1041 return -EBUSY;
1035 } 1042 }
1036 1043
@@ -1051,6 +1058,9 @@ static void __exit pf_exit(void)
1051 int unit; 1058 int unit;
1052 unregister_blkdev(major, name); 1059 unregister_blkdev(major, name);
1053 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { 1060 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
1061 if (!pf->disk)
1062 continue;
1063
1054 if (pf->present) 1064 if (pf->present)
1055 del_gendisk(pf->disk); 1065 del_gendisk(pf->disk);
1056 1066
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index 87ccef4bd69e..32a21b8d1d85 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1090,6 +1090,8 @@ static int ace_setup(struct ace_device *ace)
1090 return 0; 1090 return 0;
1091 1091
1092err_read: 1092err_read:
1093 /* prevent double queue cleanup */
1094 ace->gd->queue = NULL;
1093 put_disk(ace->gd); 1095 put_disk(ace->gd);
1094err_alloc_disk: 1096err_alloc_disk:
1095 blk_cleanup_queue(ace->queue); 1097 blk_cleanup_queue(ace->queue);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72866a004f07..466ebd84ad17 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -348,7 +348,7 @@ config XILINX_HWICAP
348 348
349config R3964 349config R3964
350 tristate "Siemens R3964 line discipline" 350 tristate "Siemens R3964 line discipline"
351 depends on TTY 351 depends on TTY && BROKEN
352 ---help--- 352 ---help---
353 This driver allows synchronous communication with devices using the 353 This driver allows synchronous communication with devices using the
354 Siemens R3964 packet protocol. Unless you are dealing with special 354 Siemens R3964 packet protocol. Unless you are dealing with special
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b599c7318aab..2986119dd31f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2596,6 +2596,9 @@ static int __init intel_pstate_init(void)
2596 const struct x86_cpu_id *id; 2596 const struct x86_cpu_id *id;
2597 int rc; 2597 int rc;
2598 2598
2599 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2600 return -ENODEV;
2601
2599 if (no_load) 2602 if (no_load)
2600 return -ENODEV; 2603 return -ENODEV;
2601 2604
@@ -2611,7 +2614,7 @@ static int __init intel_pstate_init(void)
2611 } else { 2614 } else {
2612 id = x86_match_cpu(intel_pstate_cpu_ids); 2615 id = x86_match_cpu(intel_pstate_cpu_ids);
2613 if (!id) { 2616 if (!id) {
2614 pr_info("CPU ID not supported\n"); 2617 pr_info("CPU model not supported\n");
2615 return -ENODEV; 2618 return -ENODEV;
2616 } 2619 }
2617 2620
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4f8fb4ecde34..ac0d646a7b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3625,6 +3625,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
3625 struct pci_dev *pdev = adev->pdev; 3625 struct pci_dev *pdev = adev->pdev;
3626 enum pci_bus_speed cur_speed; 3626 enum pci_bus_speed cur_speed;
3627 enum pcie_link_width cur_width; 3627 enum pcie_link_width cur_width;
3628 u32 ret = 1;
3628 3629
3629 *speed = PCI_SPEED_UNKNOWN; 3630 *speed = PCI_SPEED_UNKNOWN;
3630 *width = PCIE_LNK_WIDTH_UNKNOWN; 3631 *width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3633,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
3632 while (pdev) { 3633 while (pdev) {
3633 cur_speed = pcie_get_speed_cap(pdev); 3634 cur_speed = pcie_get_speed_cap(pdev);
3634 cur_width = pcie_get_width_cap(pdev); 3635 cur_width = pcie_get_width_cap(pdev);
3636 ret = pcie_bandwidth_available(adev->pdev, NULL,
3637 NULL, &cur_width);
3638 if (!ret)
3639 cur_width = PCIE_LNK_WIDTH_RESRV;
3635 3640
3636 if (cur_speed != PCI_SPEED_UNKNOWN) { 3641 if (cur_speed != PCI_SPEED_UNKNOWN) {
3637 if (*speed == PCI_SPEED_UNKNOWN) 3642 if (*speed == PCI_SPEED_UNKNOWN)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d0309e8c9d12..a11db2b1a63f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2405 /* disable CG */ 2405 /* disable CG */
2406 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); 2406 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2407 2407
2408 adev->gfx.rlc.funcs->reset(adev);
2409
2410 gfx_v9_0_init_pg(adev); 2408 gfx_v9_0_init_pg(adev);
2411 2409
2412 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2410 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 4eba3c4800b6..ea18e9c2d8ce 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2660,12 +2660,18 @@ void core_link_enable_stream(
2660void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) 2660void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
2661{ 2661{
2662 struct dc *core_dc = pipe_ctx->stream->ctx->dc; 2662 struct dc *core_dc = pipe_ctx->stream->ctx->dc;
2663 struct dc_stream_state *stream = pipe_ctx->stream;
2663 2664
2664 core_dc->hwss.blank_stream(pipe_ctx); 2665 core_dc->hwss.blank_stream(pipe_ctx);
2665 2666
2666 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 2667 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
2667 deallocate_mst_payload(pipe_ctx); 2668 deallocate_mst_payload(pipe_ctx);
2668 2669
2670 if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
2671 dal_ddc_service_write_scdc_data(
2672 stream->link->ddc, 0,
2673 stream->timing.flags.LTE_340MCSC_SCRAMBLE);
2674
2669 core_dc->hwss.disable_stream(pipe_ctx, option); 2675 core_dc->hwss.disable_stream(pipe_ctx, option);
2670 2676
2671 disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); 2677 disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 9aa7bec1b5fe..23b5b94a4939 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
91 * MP0CLK DS 91 * MP0CLK DS
92 */ 92 */
93 data->registry_data.disallowed_features = 0xE0041C00; 93 data->registry_data.disallowed_features = 0xE0041C00;
94 /* ECC feature should be disabled on old SMUs */
95 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
96 hwmgr->smu_version = smum_get_argument(hwmgr);
97 if (hwmgr->smu_version < 0x282100)
98 data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
99
94 data->registry_data.od_state_in_dc_support = 0; 100 data->registry_data.od_state_in_dc_support = 0;
95 data->registry_data.thermal_support = 1; 101 data->registry_data.thermal_support = 1;
96 data->registry_data.skip_baco_hardware = 0; 102 data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
357 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; 363 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
358 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; 364 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
359 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; 365 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
366 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
360 367
361 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 368 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
362 data->smu_features[i].smu_feature_bitmap = 369 data->smu_features[i].smu_feature_bitmap =
@@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
3020 "FCLK_DS", 3027 "FCLK_DS",
3021 "MP1CLK_DS", 3028 "MP1CLK_DS",
3022 "MP0CLK_DS", 3029 "MP0CLK_DS",
3023 "XGMI"}; 3030 "XGMI",
3031 "ECC"};
3024 static const char *output_title[] = { 3032 static const char *output_title[] = {
3025 "FEATURES", 3033 "FEATURES",
3026 "BITMASK", 3034 "BITMASK",
@@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3462 struct vega20_single_dpm_table *dpm_table; 3470 struct vega20_single_dpm_table *dpm_table;
3463 bool vblank_too_short = false; 3471 bool vblank_too_short = false;
3464 bool disable_mclk_switching; 3472 bool disable_mclk_switching;
3473 bool disable_fclk_switching;
3465 uint32_t i, latency; 3474 uint32_t i, latency;
3466 3475
3467 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && 3476 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3537 if (hwmgr->display_config->nb_pstate_switch_disable) 3546 if (hwmgr->display_config->nb_pstate_switch_disable)
3538 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3547 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3539 3548
3549 if ((disable_mclk_switching &&
3550 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
3551 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
3552 disable_fclk_switching = true;
3553 else
3554 disable_fclk_switching = false;
3555
3540 /* fclk */ 3556 /* fclk */
3541 dpm_table = &(data->dpm_table.fclk_table); 3557 dpm_table = &(data->dpm_table.fclk_table);
3542 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3558 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3543 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3559 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3544 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3560 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3545 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3561 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3546 if (hwmgr->display_config->nb_pstate_switch_disable) 3562 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
3547 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3563 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3548 3564
3549 /* vclk */ 3565 /* vclk */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index a5bc758ae097..ac2a3118a0ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -80,6 +80,7 @@ enum {
80 GNLD_DS_MP1CLK, 80 GNLD_DS_MP1CLK,
81 GNLD_DS_MP0CLK, 81 GNLD_DS_MP0CLK,
82 GNLD_XGMI, 82 GNLD_XGMI,
83 GNLD_ECC,
83 84
84 GNLD_FEATURES_MAX 85 GNLD_FEATURES_MAX
85}; 86};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 63d5cf691549..195c4ae67058 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -99,7 +99,7 @@
99#define FEATURE_DS_MP1CLK_BIT 30 99#define FEATURE_DS_MP1CLK_BIT 30
100#define FEATURE_DS_MP0CLK_BIT 31 100#define FEATURE_DS_MP0CLK_BIT 31
101#define FEATURE_XGMI_BIT 32 101#define FEATURE_XGMI_BIT 32
102#define FEATURE_SPARE_33_BIT 33 102#define FEATURE_ECC_BIT 33
103#define FEATURE_SPARE_34_BIT 34 103#define FEATURE_SPARE_34_BIT 34
104#define FEATURE_SPARE_35_BIT 35 104#define FEATURE_SPARE_35_BIT 35
105#define FEATURE_SPARE_36_BIT 36 105#define FEATURE_SPARE_36_BIT 36
@@ -165,7 +165,8 @@
165#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) 165#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
166#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) 166#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
167#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) 167#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
168#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT ) 168#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT )
169#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT )
169 170
170#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 171#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
171#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 172#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 035479e273be..e3f9caa7839f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
448/** 448/**
449 * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU 449 * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
450 * @vgpu: a vGPU 450 * @vgpu: a vGPU
451 * @conncted: link state 451 * @connected: link state
452 * 452 *
453 * This function is used to trigger hotplug interrupt for vGPU 453 * This function is used to trigger hotplug interrupt for vGPU
454 * 454 *
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 3e7e2b80c857..5d887f7cc0d5 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -238,9 +238,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
238 default: 238 default:
239 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); 239 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
240 } 240 }
241
242 info->size = (((p.stride * p.height * p.bpp) / 8) +
243 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
244 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { 241 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
245 ret = intel_vgpu_decode_cursor_plane(vgpu, &c); 242 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
246 if (ret) 243 if (ret)
@@ -262,14 +259,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
262 info->x_hot = UINT_MAX; 259 info->x_hot = UINT_MAX;
263 info->y_hot = UINT_MAX; 260 info->y_hot = UINT_MAX;
264 } 261 }
265
266 info->size = (((info->stride * c.height * c.bpp) / 8)
267 + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
268 } else { 262 } else {
269 gvt_vgpu_err("invalid plane id:%d\n", plane_id); 263 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
270 return -EINVAL; 264 return -EINVAL;
271 } 265 }
272 266
267 info->size = (info->stride * info->height + PAGE_SIZE - 1)
268 >> PAGE_SHIFT;
273 if (info->size == 0) { 269 if (info->size == 0) {
274 gvt_vgpu_err("fb size is zero\n"); 270 gvt_vgpu_err("fb size is zero\n");
275 return -EINVAL; 271 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index d7052ab7908c..cf133ef03873 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1946,7 +1946,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1946 */ 1946 */
1947void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1947void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1948{ 1948{
1949 atomic_dec(&mm->pincount); 1949 atomic_dec_if_positive(&mm->pincount);
1950} 1950}
1951 1951
1952/** 1952/**
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 159192c097cc..05b953793316 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1486,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1486 intel_runtime_pm_put_unchecked(dev_priv); 1486 intel_runtime_pm_put_unchecked(dev_priv);
1487 } 1487 }
1488 1488
1489 if (ret && (vgpu_is_vm_unhealthy(ret))) { 1489 if (ret) {
1490 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); 1490 if (vgpu_is_vm_unhealthy(ret))
1491 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1491 intel_vgpu_destroy_workload(workload); 1492 intel_vgpu_destroy_workload(workload);
1492 return ERR_PTR(ret); 1493 return ERR_PTR(ret);
1493 } 1494 }
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0bd890c04fe4..f6f6e5b78e97 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4830 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 4830 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4831 &ctx); 4831 &ctx);
4832 if (ret) { 4832 if (ret) {
4833 ret = -EINTR; 4833 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4834 try_again = true;
4835 continue;
4836 }
4834 break; 4837 break;
4835 } 4838 }
4836 crtc = connector->state->crtc; 4839 crtc = connector->state->crtc;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6ca8d322b487..4ca0cdfa6b33 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -150,6 +150,7 @@ config HID_ASUS
150 tristate "Asus" 150 tristate "Asus"
151 depends on LEDS_CLASS 151 depends on LEDS_CLASS
152 depends on ASUS_WMI || ASUS_WMI=n 152 depends on ASUS_WMI || ASUS_WMI=n
153 select POWER_SUPPLY
153 ---help--- 154 ---help---
154 Support for Asus notebook built-in keyboard and touchpad via i2c, and 155 Support for Asus notebook built-in keyboard and touchpad via i2c, and
155 the Asus Republic of Gamers laptop keyboard special keys. 156 the Asus Republic of Gamers laptop keyboard special keys.
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9993b692598f..860e21ec6a49 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
1301u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1301u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1302 unsigned offset, unsigned n) 1302 unsigned offset, unsigned n)
1303{ 1303{
1304 if (n > 32) { 1304 if (n > 256) {
1305 hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", 1305 hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
1306 n, current->comm); 1306 n, current->comm);
1307 n = 32; 1307 n = 256;
1308 } 1308 }
1309 1309
1310 return __extract(report, offset, n); 1310 return __extract(report, offset, n);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ac9fda1b5a72..1384e57182af 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
1060 seq_printf(f, "\n\n"); 1060 seq_printf(f, "\n\n");
1061 1061
1062 /* dump parsed data and input mappings */ 1062 /* dump parsed data and input mappings */
1063 if (down_interruptible(&hdev->driver_input_lock))
1064 return 0;
1065
1063 hid_dump_device(hdev, f); 1066 hid_dump_device(hdev, f);
1064 seq_printf(f, "\n"); 1067 seq_printf(f, "\n");
1065 hid_dump_input_mapping(hdev, f); 1068 hid_dump_input_mapping(hdev, f);
1066 1069
1070 up(&hdev->driver_input_lock);
1071
1067 return 0; 1072 return 0;
1068} 1073}
1069 1074
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b6d93f4ad037..adce58f24f76 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1083,6 +1083,7 @@
1083#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 1083#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
1084#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 1084#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
1085#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 1085#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
1086#define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e
1086 1087
1087#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 1088#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
1088#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 1089#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b10b1922c5bd..1fce0076e7dc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -998,6 +998,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
998 case 0x1b8: map_key_clear(KEY_VIDEO); break; 998 case 0x1b8: map_key_clear(KEY_VIDEO); break;
999 case 0x1bc: map_key_clear(KEY_MESSENGER); break; 999 case 0x1bc: map_key_clear(KEY_MESSENGER); break;
1000 case 0x1bd: map_key_clear(KEY_INFO); break; 1000 case 0x1bd: map_key_clear(KEY_INFO); break;
1001 case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
1001 case 0x201: map_key_clear(KEY_NEW); break; 1002 case 0x201: map_key_clear(KEY_NEW); break;
1002 case 0x202: map_key_clear(KEY_OPEN); break; 1003 case 0x202: map_key_clear(KEY_OPEN); break;
1003 case 0x203: map_key_clear(KEY_CLOSE); break; 1004 case 0x203: map_key_clear(KEY_CLOSE); break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 15ed6177a7a3..199cc256e9d9 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
2111 kfree(data); 2111 kfree(data);
2112 return -ENOMEM; 2112 return -ENOMEM;
2113 } 2113 }
2114 data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
2115 if (!data->wq) {
2116 kfree(data->effect_ids);
2117 kfree(data);
2118 return -ENOMEM;
2119 }
2120
2114 data->hidpp = hidpp; 2121 data->hidpp = hidpp;
2115 data->feature_index = feature_index; 2122 data->feature_index = feature_index;
2116 data->version = version; 2123 data->version = version;
@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
2155 /* ignore boost value at response.fap.params[2] */ 2162 /* ignore boost value at response.fap.params[2] */
2156 2163
2157 /* init the hardware command queue */ 2164 /* init the hardware command queue */
2158 data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
2159 atomic_set(&data->workqueue_size, 0); 2165 atomic_set(&data->workqueue_size, 0);
2160 2166
2161 /* initialize with zero autocenter to get wheel in usable state */ 2167 /* initialize with zero autocenter to get wheel in usable state */
@@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
2608 input_report_rel(mydata->input, REL_Y, v); 2614 input_report_rel(mydata->input, REL_Y, v);
2609 2615
2610 v = hid_snto32(data[6], 8); 2616 v = hid_snto32(data[6], 8);
2611 hidpp_scroll_counter_handle_scroll( 2617 if (v != 0)
2612 &hidpp->vertical_wheel_counter, v); 2618 hidpp_scroll_counter_handle_scroll(
2619 &hidpp->vertical_wheel_counter, v);
2613 2620
2614 input_sync(mydata->input); 2621 input_sync(mydata->input);
2615 } 2622 }
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 953908f2267c..77ffba48cc73 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = {
715 { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, 715 { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
716 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, 716 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
717 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, 717 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
718 { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
719 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, 718 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
720 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, 719 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
721 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, 720 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = {
855 { } 854 { }
856}; 855};
857 856
858/** 857/*
859 * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer 858 * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
860 * 859 *
861 * There are composite devices for which we want to ignore only a certain 860 * There are composite devices for which we want to ignore only a certain
@@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev)
996 if (hdev->product == 0x0401 && 995 if (hdev->product == 0x0401 &&
997 strncmp(hdev->name, "ELAN0800", 8) != 0) 996 strncmp(hdev->name, "ELAN0800", 8) != 0)
998 return true; 997 return true;
998 /* Same with product id 0x0400 */
999 if (hdev->product == 0x0400 &&
1000 strncmp(hdev->name, "QTEC0001", 8) != 0)
1001 return true;
999 break; 1002 break;
1000 } 1003 }
1001 1004
@@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
1042 } 1045 }
1043 1046
1044 if (bl_entry != NULL) 1047 if (bl_entry != NULL)
1045 dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n", 1048 dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
1046 bl_entry->driver_data, bl_entry->vendor, 1049 bl_entry->driver_data, bl_entry->vendor,
1047 bl_entry->product); 1050 bl_entry->product);
1048 1051
@@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
1209 quirks |= bl_entry->driver_data; 1212 quirks |= bl_entry->driver_data;
1210 1213
1211 if (quirks) 1214 if (quirks)
1212 dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n", 1215 dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
1213 quirks, hdev->vendor, hdev->product); 1216 quirks, hdev->vendor, hdev->product);
1214 return quirks; 1217 return quirks;
1215} 1218}
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8141cadfca0e..8dae0f9b819e 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
499static int steam_register(struct steam_device *steam) 499static int steam_register(struct steam_device *steam)
500{ 500{
501 int ret; 501 int ret;
502 bool client_opened;
502 503
503 /* 504 /*
504 * This function can be called several times in a row with the 505 * This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
511 * Unlikely, but getting the serial could fail, and it is not so 512 * Unlikely, but getting the serial could fail, and it is not so
512 * important, so make up a serial number and go on. 513 * important, so make up a serial number and go on.
513 */ 514 */
515 mutex_lock(&steam->mutex);
514 if (steam_get_serial(steam) < 0) 516 if (steam_get_serial(steam) < 0)
515 strlcpy(steam->serial_no, "XXXXXXXXXX", 517 strlcpy(steam->serial_no, "XXXXXXXXXX",
516 sizeof(steam->serial_no)); 518 sizeof(steam->serial_no));
519 mutex_unlock(&steam->mutex);
517 520
518 hid_info(steam->hdev, "Steam Controller '%s' connected", 521 hid_info(steam->hdev, "Steam Controller '%s' connected",
519 steam->serial_no); 522 steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
528 } 531 }
529 532
530 mutex_lock(&steam->mutex); 533 mutex_lock(&steam->mutex);
531 if (!steam->client_opened) { 534 client_opened = steam->client_opened;
535 if (!client_opened)
532 steam_set_lizard_mode(steam, lizard_mode); 536 steam_set_lizard_mode(steam, lizard_mode);
537 mutex_unlock(&steam->mutex);
538
539 if (!client_opened)
533 ret = steam_input_register(steam); 540 ret = steam_input_register(steam);
534 } else { 541 else
535 ret = 0; 542 ret = 0;
536 }
537 mutex_unlock(&steam->mutex);
538 543
539 return ret; 544 return ret;
540} 545}
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
630{ 635{
631 struct steam_device *steam = hdev->driver_data; 636 struct steam_device *steam = hdev->driver_data;
632 637
638 unsigned long flags;
639 bool connected;
640
641 spin_lock_irqsave(&steam->lock, flags);
642 connected = steam->connected;
643 spin_unlock_irqrestore(&steam->lock, flags);
644
633 mutex_lock(&steam->mutex); 645 mutex_lock(&steam->mutex);
634 steam->client_opened = false; 646 steam->client_opened = false;
647 if (connected)
648 steam_set_lizard_mode(steam, lizard_mode);
635 mutex_unlock(&steam->mutex); 649 mutex_unlock(&steam->mutex);
636 650
637 if (steam->connected) { 651 if (connected)
638 steam_set_lizard_mode(steam, lizard_mode);
639 steam_input_register(steam); 652 steam_input_register(steam);
640 }
641} 653}
642 654
643static int steam_client_ll_raw_request(struct hid_device *hdev, 655static int steam_client_ll_raw_request(struct hid_device *hdev,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 7710d9f957da..0187c9f8fc22 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
735 goto cleanup; 735 goto cleanup;
736 } 736 }
737 rc = usb_string(udev, 201, ver_ptr, ver_len); 737 rc = usb_string(udev, 201, ver_ptr, ver_len);
738 if (ver_ptr == NULL) {
739 rc = -ENOMEM;
740 goto cleanup;
741 }
742 if (rc == -EPIPE) { 738 if (rc == -EPIPE) {
743 *ver_ptr = '\0'; 739 *ver_ptr = '\0';
744 } else if (rc < 0) { 740 } else if (rc < 0) {
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 90164fed08d3..4d1f24ee249c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -184,6 +184,8 @@ static const struct i2c_hid_quirks {
184 I2C_HID_QUIRK_NO_RUNTIME_PM }, 184 I2C_HID_QUIRK_NO_RUNTIME_PM },
185 { USB_VENDOR_ID_ELAN, HID_ANY_ID, 185 { USB_VENDOR_ID_ELAN, HID_ANY_ID,
186 I2C_HID_QUIRK_BOGUS_IRQ }, 186 I2C_HID_QUIRK_BOGUS_IRQ },
187 { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
188 I2C_HID_QUIRK_NO_RUNTIME_PM },
187 { 0, 0 } 189 { 0, 0 }
188}; 190};
189 191
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6f929bfa9fcd..d0f1dfe2bcbb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
1759config SENSORS_W83773G 1759config SENSORS_W83773G
1760 tristate "Nuvoton W83773G" 1760 tristate "Nuvoton W83773G"
1761 depends on I2C 1761 depends on I2C
1762 select REGMAP_I2C
1762 help 1763 help
1763 If you say yes here you get support for the Nuvoton W83773G hardware 1764 If you say yes here you get support for the Nuvoton W83773G hardware
1764 monitoring chip. 1765 monitoring chip.
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e4f9f7ce92fa..f9abeeeead9e 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = {
640}; 640};
641 641
642static const u32 ntc_temp_config[] = { 642static const u32 ntc_temp_config[] = {
643 HWMON_T_INPUT, HWMON_T_TYPE, 643 HWMON_T_INPUT | HWMON_T_TYPE,
644 0 644 0
645}; 645};
646 646
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index b91a80abf724..4679acb4918e 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
890 s++; 890 s++;
891 } 891 }
892 } 892 }
893
894 s = (sensors->power.num_sensors * 4) + 1;
893 } else { 895 } else {
894 for (i = 0; i < sensors->power.num_sensors; ++i) { 896 for (i = 0; i < sensors->power.num_sensors; ++i) {
895 s = i + 1; 897 s = i + 1;
@@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
918 show_power, NULL, 3, i); 920 show_power, NULL, 3, i);
919 attr++; 921 attr++;
920 } 922 }
921 }
922 923
923 if (sensors->caps.num_sensors >= 1) {
924 s = sensors->power.num_sensors + 1; 924 s = sensors->power.num_sensors + 1;
925 }
925 926
927 if (sensors->caps.num_sensors >= 1) {
926 snprintf(attr->name, sizeof(attr->name), "power%d_label", s); 928 snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
927 attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 929 attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
928 0, 0); 930 0, 0);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 42fed40198a0..c0c3043b5d61 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
1169 /* Init DMA config if supported */ 1169 /* Init DMA config if supported */
1170 ret = i2c_imx_dma_request(i2c_imx, phy_addr); 1170 ret = i2c_imx_dma_request(i2c_imx, phy_addr);
1171 if (ret < 0) 1171 if (ret < 0)
1172 goto clk_notifier_unregister; 1172 goto del_adapter;
1173 1173
1174 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); 1174 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1175 return 0; /* Return OK */ 1175 return 0; /* Return OK */
1176 1176
1177del_adapter:
1178 i2c_del_adapter(&i2c_imx->adapter);
1177clk_notifier_unregister: 1179clk_notifier_unregister:
1178 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); 1180 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
1179rpm_disable: 1181rpm_disable:
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 95c6d86ab5e8..c4ef1fceead6 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -115,6 +115,7 @@ struct mapped_device {
115 struct srcu_struct io_barrier; 115 struct srcu_struct io_barrier;
116}; 116};
117 117
118void disable_discard(struct mapped_device *md);
118void disable_write_same(struct mapped_device *md); 119void disable_write_same(struct mapped_device *md);
119void disable_write_zeroes(struct mapped_device *md); 120void disable_write_zeroes(struct mapped_device *md);
120 121
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index b53f30f16b4d..4b76f84424c3 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -36,7 +36,7 @@ struct dm_device {
36 struct list_head list; 36 struct list_head list;
37}; 37};
38 38
39const char *dm_allowed_targets[] __initconst = { 39const char * const dm_allowed_targets[] __initconst = {
40 "crypt", 40 "crypt",
41 "delay", 41 "delay",
42 "linear", 42 "linear",
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index d57d997a52c8..7c678f50aaa3 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
913static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) 913static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
914{ 914{
915 return range1->logical_sector < range2->logical_sector + range2->n_sectors && 915 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
916 range2->logical_sector + range2->n_sectors > range2->logical_sector; 916 range1->logical_sector + range1->n_sectors > range2->logical_sector;
917} 917}
918 918
919static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) 919static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
@@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
959 struct dm_integrity_range *last_range = 959 struct dm_integrity_range *last_range =
960 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); 960 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
961 struct task_struct *last_range_task; 961 struct task_struct *last_range_task;
962 if (!ranges_overlap(range, last_range))
963 break;
964 last_range_task = last_range->task; 962 last_range_task = last_range->task;
965 list_del(&last_range->wait_entry); 963 list_del(&last_range->wait_entry);
966 if (!add_new_range(ic, last_range, false)) { 964 if (!add_new_range(ic, last_range, false)) {
@@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3185 journal_watermark = val; 3183 journal_watermark = val;
3186 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) 3184 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3187 sync_msec = val; 3185 sync_msec = val;
3188 else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) { 3186 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3189 if (ic->meta_dev) { 3187 if (ic->meta_dev) {
3190 dm_put_device(ti, ic->meta_dev); 3188 dm_put_device(ti, ic->meta_dev);
3191 ic->meta_dev = NULL; 3189 ic->meta_dev = NULL;
@@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3204 goto bad; 3202 goto bad;
3205 } 3203 }
3206 ic->sectors_per_block = val >> SECTOR_SHIFT; 3204 ic->sectors_per_block = val >> SECTOR_SHIFT;
3207 } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { 3205 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3208 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, 3206 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3209 "Invalid internal_hash argument"); 3207 "Invalid internal_hash argument");
3210 if (r) 3208 if (r)
3211 goto bad; 3209 goto bad;
3212 } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { 3210 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3213 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, 3211 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3214 "Invalid journal_crypt argument"); 3212 "Invalid journal_crypt argument");
3215 if (r) 3213 if (r)
3216 goto bad; 3214 goto bad;
3217 } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { 3215 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3218 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, 3216 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
3219 "Invalid journal_mac argument"); 3217 "Invalid journal_mac argument");
3220 if (r) 3218 if (r)
@@ -3616,7 +3614,7 @@ static struct target_type integrity_target = {
3616 .io_hints = dm_integrity_io_hints, 3614 .io_hints = dm_integrity_io_hints,
3617}; 3615};
3618 3616
3619int __init dm_integrity_init(void) 3617static int __init dm_integrity_init(void)
3620{ 3618{
3621 int r; 3619 int r;
3622 3620
@@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void)
3635 return r; 3633 return r;
3636} 3634}
3637 3635
3638void dm_integrity_exit(void) 3636static void __exit dm_integrity_exit(void)
3639{ 3637{
3640 dm_unregister_target(&integrity_target); 3638 dm_unregister_target(&integrity_target);
3641 kmem_cache_destroy(journal_io_cache); 3639 kmem_cache_destroy(journal_io_cache);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 09773636602d..b66745bd08bb 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
222 } 222 }
223 223
224 if (unlikely(error == BLK_STS_TARGET)) { 224 if (unlikely(error == BLK_STS_TARGET)) {
225 if (req_op(clone) == REQ_OP_WRITE_SAME && 225 if (req_op(clone) == REQ_OP_DISCARD &&
226 !clone->q->limits.max_write_same_sectors) 226 !clone->q->limits.max_discard_sectors)
227 disable_discard(tio->md);
228 else if (req_op(clone) == REQ_OP_WRITE_SAME &&
229 !clone->q->limits.max_write_same_sectors)
227 disable_write_same(tio->md); 230 disable_write_same(tio->md);
228 if (req_op(clone) == REQ_OP_WRITE_ZEROES && 231 else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
229 !clone->q->limits.max_write_zeroes_sectors) 232 !clone->q->limits.max_write_zeroes_sectors)
230 disable_write_zeroes(tio->md); 233 disable_write_zeroes(tio->md);
231 } 234 }
232 235
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ba9481f1bf3c..cde3b49b2a91 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
1844 return true; 1844 return true;
1845} 1845}
1846 1846
1847static int device_requires_stable_pages(struct dm_target *ti,
1848 struct dm_dev *dev, sector_t start,
1849 sector_t len, void *data)
1850{
1851 struct request_queue *q = bdev_get_queue(dev->bdev);
1852
1853 return q && bdi_cap_stable_pages_required(q->backing_dev_info);
1854}
1855
1856/*
1857 * If any underlying device requires stable pages, a table must require
1858 * them as well. Only targets that support iterate_devices are considered:
1859 * don't want error, zero, etc to require stable pages.
1860 */
1861static bool dm_table_requires_stable_pages(struct dm_table *t)
1862{
1863 struct dm_target *ti;
1864 unsigned i;
1865
1866 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1867 ti = dm_table_get_target(t, i);
1868
1869 if (ti->type->iterate_devices &&
1870 ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
1871 return true;
1872 }
1873
1874 return false;
1875}
1876
1847void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1877void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1848 struct queue_limits *limits) 1878 struct queue_limits *limits)
1849{ 1879{
@@ -1897,6 +1927,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1897 dm_table_verify_integrity(t); 1927 dm_table_verify_integrity(t);
1898 1928
1899 /* 1929 /*
1930 * Some devices don't use blk_integrity but still want stable pages
1931 * because they do their own checksumming.
1932 */
1933 if (dm_table_requires_stable_pages(t))
1934 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
1935 else
1936 q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
1937
1938 /*
1900 * Determine whether or not this queue's I/O timings contribute 1939 * Determine whether or not this queue's I/O timings contribute
1901 * to the entropy pool, Only request-based targets use this. 1940 * to the entropy pool, Only request-based targets use this.
1902 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not 1941 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 68d24056d0b1..043f0761e4a0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
945 } 945 }
946} 946}
947 947
948void disable_discard(struct mapped_device *md)
949{
950 struct queue_limits *limits = dm_get_queue_limits(md);
951
952 /* device doesn't really support DISCARD, disable it */
953 limits->max_discard_sectors = 0;
954 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
955}
956
948void disable_write_same(struct mapped_device *md) 957void disable_write_same(struct mapped_device *md)
949{ 958{
950 struct queue_limits *limits = dm_get_queue_limits(md); 959 struct queue_limits *limits = dm_get_queue_limits(md);
@@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio)
970 dm_endio_fn endio = tio->ti->type->end_io; 979 dm_endio_fn endio = tio->ti->type->end_io;
971 980
972 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 981 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
973 if (bio_op(bio) == REQ_OP_WRITE_SAME && 982 if (bio_op(bio) == REQ_OP_DISCARD &&
974 !bio->bi_disk->queue->limits.max_write_same_sectors) 983 !bio->bi_disk->queue->limits.max_discard_sectors)
984 disable_discard(md);
985 else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
986 !bio->bi_disk->queue->limits.max_write_same_sectors)
975 disable_write_same(md); 987 disable_write_same(md);
976 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 988 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
977 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 989 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
978 disable_write_zeroes(md); 990 disable_write_zeroes(md);
979 } 991 }
980 992
@@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1042 return -EINVAL; 1054 return -EINVAL;
1043 } 1055 }
1044 1056
1045 /* 1057 ti->max_io_len = (uint32_t) len;
1046 * BIO based queue uses its own splitting. When multipage bvecs
1047 * is switched on, size of the incoming bio may be too big to
1048 * be handled in some targets, such as crypt.
1049 *
1050 * When these targets are ready for the big bio, we can remove
1051 * the limit.
1052 */
1053 ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
1054 1058
1055 return 0; 1059 return 0;
1056} 1060}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0ce2d8dfc5f1..26ad6468d13a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1246,7 +1246,7 @@ config MFD_STA2X11
1246 1246
1247config MFD_SUN6I_PRCM 1247config MFD_SUN6I_PRCM
1248 bool "Allwinner A31 PRCM controller" 1248 bool "Allwinner A31 PRCM controller"
1249 depends on ARCH_SUNXI 1249 depends on ARCH_SUNXI || COMPILE_TEST
1250 select MFD_CORE 1250 select MFD_CORE
1251 help 1251 help
1252 Support for the PRCM (Power/Reset/Clock Management) unit available 1252 Support for the PRCM (Power/Reset/Clock Management) unit available
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 69df27769c21..43ac71691fe4 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
53static const struct mfd_cell sprd_pmic_devs[] = { 53static const struct mfd_cell sprd_pmic_devs[] = {
54 { 54 {
55 .name = "sc27xx-wdt", 55 .name = "sc27xx-wdt",
56 .of_compatible = "sprd,sc27xx-wdt", 56 .of_compatible = "sprd,sc2731-wdt",
57 }, { 57 }, {
58 .name = "sc27xx-rtc", 58 .name = "sc27xx-rtc",
59 .of_compatible = "sprd,sc27xx-rtc", 59 .of_compatible = "sprd,sc2731-rtc",
60 }, { 60 }, {
61 .name = "sc27xx-charger", 61 .name = "sc27xx-charger",
62 .of_compatible = "sprd,sc27xx-charger", 62 .of_compatible = "sprd,sc2731-charger",
63 }, { 63 }, {
64 .name = "sc27xx-chg-timer", 64 .name = "sc27xx-chg-timer",
65 .of_compatible = "sprd,sc27xx-chg-timer", 65 .of_compatible = "sprd,sc2731-chg-timer",
66 }, { 66 }, {
67 .name = "sc27xx-fast-chg", 67 .name = "sc27xx-fast-chg",
68 .of_compatible = "sprd,sc27xx-fast-chg", 68 .of_compatible = "sprd,sc2731-fast-chg",
69 }, { 69 }, {
70 .name = "sc27xx-chg-wdt", 70 .name = "sc27xx-chg-wdt",
71 .of_compatible = "sprd,sc27xx-chg-wdt", 71 .of_compatible = "sprd,sc2731-chg-wdt",
72 }, { 72 }, {
73 .name = "sc27xx-typec", 73 .name = "sc27xx-typec",
74 .of_compatible = "sprd,sc27xx-typec", 74 .of_compatible = "sprd,sc2731-typec",
75 }, { 75 }, {
76 .name = "sc27xx-flash", 76 .name = "sc27xx-flash",
77 .of_compatible = "sprd,sc27xx-flash", 77 .of_compatible = "sprd,sc2731-flash",
78 }, { 78 }, {
79 .name = "sc27xx-eic", 79 .name = "sc27xx-eic",
80 .of_compatible = "sprd,sc27xx-eic", 80 .of_compatible = "sprd,sc2731-eic",
81 }, { 81 }, {
82 .name = "sc27xx-efuse", 82 .name = "sc27xx-efuse",
83 .of_compatible = "sprd,sc27xx-efuse", 83 .of_compatible = "sprd,sc2731-efuse",
84 }, { 84 }, {
85 .name = "sc27xx-thermal", 85 .name = "sc27xx-thermal",
86 .of_compatible = "sprd,sc27xx-thermal", 86 .of_compatible = "sprd,sc2731-thermal",
87 }, { 87 }, {
88 .name = "sc27xx-adc", 88 .name = "sc27xx-adc",
89 .of_compatible = "sprd,sc27xx-adc", 89 .of_compatible = "sprd,sc2731-adc",
90 }, { 90 }, {
91 .name = "sc27xx-audio-codec", 91 .name = "sc27xx-audio-codec",
92 .of_compatible = "sprd,sc27xx-audio-codec", 92 .of_compatible = "sprd,sc2731-audio-codec",
93 }, { 93 }, {
94 .name = "sc27xx-regulator", 94 .name = "sc27xx-regulator",
95 .of_compatible = "sprd,sc27xx-regulator", 95 .of_compatible = "sprd,sc2731-regulator",
96 }, { 96 }, {
97 .name = "sc27xx-vibrator", 97 .name = "sc27xx-vibrator",
98 .of_compatible = "sprd,sc27xx-vibrator", 98 .of_compatible = "sprd,sc2731-vibrator",
99 }, { 99 }, {
100 .name = "sc27xx-keypad-led", 100 .name = "sc27xx-keypad-led",
101 .of_compatible = "sprd,sc27xx-keypad-led", 101 .of_compatible = "sprd,sc2731-keypad-led",
102 }, { 102 }, {
103 .name = "sc27xx-bltc", 103 .name = "sc27xx-bltc",
104 .of_compatible = "sprd,sc27xx-bltc", 104 .of_compatible = "sprd,sc2731-bltc",
105 }, { 105 }, {
106 .name = "sc27xx-fgu", 106 .name = "sc27xx-fgu",
107 .of_compatible = "sprd,sc27xx-fgu", 107 .of_compatible = "sprd,sc2731-fgu",
108 }, { 108 }, {
109 .name = "sc27xx-7sreset", 109 .name = "sc27xx-7sreset",
110 .of_compatible = "sprd,sc27xx-7sreset", 110 .of_compatible = "sprd,sc2731-7sreset",
111 }, { 111 }, {
112 .name = "sc27xx-poweroff", 112 .name = "sc27xx-poweroff",
113 .of_compatible = "sprd,sc27xx-poweroff", 113 .of_compatible = "sprd,sc2731-poweroff",
114 }, { 114 }, {
115 .name = "sc27xx-syscon", 115 .name = "sc27xx-syscon",
116 .of_compatible = "sprd,sc27xx-syscon", 116 .of_compatible = "sprd,sc2731-syscon",
117 }, 117 },
118}; 118};
119 119
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 299016bc46d9..104477b512a2 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1245,6 +1245,28 @@ free:
1245 return status; 1245 return status;
1246} 1246}
1247 1247
1248static int __maybe_unused twl_suspend(struct device *dev)
1249{
1250 struct i2c_client *client = to_i2c_client(dev);
1251
1252 if (client->irq)
1253 disable_irq(client->irq);
1254
1255 return 0;
1256}
1257
1258static int __maybe_unused twl_resume(struct device *dev)
1259{
1260 struct i2c_client *client = to_i2c_client(dev);
1261
1262 if (client->irq)
1263 enable_irq(client->irq);
1264
1265 return 0;
1266}
1267
1268static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
1269
1248static const struct i2c_device_id twl_ids[] = { 1270static const struct i2c_device_id twl_ids[] = {
1249 { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ 1271 { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
1250 { "twl5030", 0 }, /* T2 updated */ 1272 { "twl5030", 0 }, /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
1262/* One Client Driver , 4 Clients */ 1284/* One Client Driver , 4 Clients */
1263static struct i2c_driver twl_driver = { 1285static struct i2c_driver twl_driver = {
1264 .driver.name = DRIVER_NAME, 1286 .driver.name = DRIVER_NAME,
1287 .driver.pm = &twl_dev_pm_ops,
1265 .id_table = twl_ids, 1288 .id_table = twl_ids,
1266 .probe = twl_probe, 1289 .probe = twl_probe,
1267 .remove = twl_remove, 1290 .remove = twl_remove,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 72428b6bfc47..7b7286b4d81e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1876 continue; 1876 continue;
1877 } 1877 }
1878 1878
1879 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1879 /*
1880 * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
1881 * the failure due to scheduling.
1882 */
1883 if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
1880 break; 1884 break;
1881 1885
1882 if (chip_good(map, adr, datum)) { 1886 if (chip_good(map, adr, datum)) {
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2f120b2ffef0..4985268e2273 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
55 55
56static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) 56static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
57{ 57{
58 return sprintf(buf, "%pM\n", slave->perm_hwaddr); 58 return sprintf(buf, "%*phC\n",
59 slave->dev->addr_len,
60 slave->perm_hwaddr);
59} 61}
60static SLAVE_ATTR_RO(perm_hwaddr); 62static SLAVE_ATTR_RO(perm_hwaddr);
61 63
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dce84a2a65c7..c44b2822e4dd 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
427 return 0; 427 return 0;
428 428
429 lane = mv88e6390x_serdes_get_lane(chip, port); 429 lane = mv88e6390x_serdes_get_lane(chip, port);
430 if (lane < 0) 430 if (lane < 0 && lane != -ENODEV)
431 return lane; 431 return lane;
432 432
433 if (chip->ports[port].serdes_irq) { 433 if (lane >= 0) {
434 err = mv88e6390_serdes_irq_disable(chip, port, lane); 434 if (chip->ports[port].serdes_irq) {
435 err = mv88e6390_serdes_irq_disable(chip, port, lane);
436 if (err)
437 return err;
438 }
439
440 err = mv88e6390x_serdes_power(chip, port, false);
435 if (err) 441 if (err)
436 return err; 442 return err;
437 } 443 }
438 444
439 err = mv88e6390x_serdes_power(chip, port, false); 445 chip->ports[port].cmode = 0;
440 if (err)
441 return err;
442 446
443 if (cmode) { 447 if (cmode) {
444 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg); 448 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
452 if (err) 456 if (err)
453 return err; 457 return err;
454 458
459 chip->ports[port].cmode = cmode;
460
461 lane = mv88e6390x_serdes_get_lane(chip, port);
462 if (lane < 0)
463 return lane;
464
455 err = mv88e6390x_serdes_power(chip, port, true); 465 err = mv88e6390x_serdes_power(chip, port, true);
456 if (err) 466 if (err)
457 return err; 467 return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
463 } 473 }
464 } 474 }
465 475
466 chip->ports[port].cmode = cmode;
467
468 return 0; 476 return 0;
469} 477}
470 478
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aa2be4807191..28eac9056211 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
1328 struct nicvf_cq_poll *cq_poll = NULL; 1328 struct nicvf_cq_poll *cq_poll = NULL;
1329 union nic_mbx mbx = {}; 1329 union nic_mbx mbx = {};
1330 1330
1331 cancel_delayed_work_sync(&nic->link_change_work);
1332
1333 /* wait till all queued set_rx_mode tasks completes */ 1331 /* wait till all queued set_rx_mode tasks completes */
1334 drain_workqueue(nic->nicvf_rx_mode_wq); 1332 if (nic->nicvf_rx_mode_wq) {
1333 cancel_delayed_work_sync(&nic->link_change_work);
1334 drain_workqueue(nic->nicvf_rx_mode_wq);
1335 }
1335 1336
1336 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1337 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1337 nicvf_send_msg_to_pf(nic, &mbx); 1338 nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev)
1452 struct nicvf_cq_poll *cq_poll = NULL; 1453 struct nicvf_cq_poll *cq_poll = NULL;
1453 1454
1454 /* wait till all queued set_rx_mode tasks completes if any */ 1455 /* wait till all queued set_rx_mode tasks completes if any */
1455 drain_workqueue(nic->nicvf_rx_mode_wq); 1456 if (nic->nicvf_rx_mode_wq)
1457 drain_workqueue(nic->nicvf_rx_mode_wq);
1456 1458
1457 netif_carrier_off(netdev); 1459 netif_carrier_off(netdev);
1458 1460
@@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev)
1550 /* Send VF config done msg to PF */ 1552 /* Send VF config done msg to PF */
1551 nicvf_send_cfg_done(nic); 1553 nicvf_send_cfg_done(nic);
1552 1554
1553 INIT_DELAYED_WORK(&nic->link_change_work, 1555 if (nic->nicvf_rx_mode_wq) {
1554 nicvf_link_status_check_task); 1556 INIT_DELAYED_WORK(&nic->link_change_work,
1555 queue_delayed_work(nic->nicvf_rx_mode_wq, 1557 nicvf_link_status_check_task);
1556 &nic->link_change_work, 0); 1558 queue_delayed_work(nic->nicvf_rx_mode_wq,
1559 &nic->link_change_work, 0);
1560 }
1557 1561
1558 return 0; 1562 return 0;
1559cleanup: 1563cleanup:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 5b4d3badcb73..e246f9733bb8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
105 /* Check if page can be recycled */ 105 /* Check if page can be recycled */
106 if (page) { 106 if (page) {
107 ref_count = page_ref_count(page); 107 ref_count = page_ref_count(page);
108 /* Check if this page has been used once i.e 'put_page' 108 /* This page can be recycled if internal ref_count and page's
109 * called after packet transmission i.e internal ref_count 109 * ref_count are equal, indicating that the page has been used
110 * and page's ref_count are equal i.e page can be recycled. 110 * once for packet transmission. For non-XDP mode, internal
111 * ref_count is always '1'.
111 */ 112 */
112 if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) 113 if (rbdr->is_xdp) {
113 pgcache->ref_count--; 114 if (ref_count == pgcache->ref_count)
114 else 115 pgcache->ref_count--;
115 page = NULL; 116 else
116 117 page = NULL;
117 /* In non-XDP mode, page's ref_count needs to be '1' for it 118 } else if (ref_count != 1) {
118 * to be recycled.
119 */
120 if (!rbdr->is_xdp && (ref_count != 1))
121 page = NULL; 119 page = NULL;
120 }
122 } 121 }
123 122
124 if (!page) { 123 if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
365 while (head < rbdr->pgcnt) { 364 while (head < rbdr->pgcnt) {
366 pgcache = &rbdr->pgcache[head]; 365 pgcache = &rbdr->pgcache[head];
367 if (pgcache->page && page_ref_count(pgcache->page) != 0) { 366 if (pgcache->page && page_ref_count(pgcache->page) != 0) {
368 if (!rbdr->is_xdp) { 367 if (rbdr->is_xdp) {
369 put_page(pgcache->page); 368 page_ref_sub(pgcache->page,
370 continue; 369 pgcache->ref_count - 1);
371 } 370 }
372 page_ref_sub(pgcache->page, pgcache->ref_count - 1);
373 put_page(pgcache->page); 371 put_page(pgcache->page);
374 } 372 }
375 head++; 373 head++;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be5f004..e2919005ead3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
354 ppmax = max; 354 ppmax = max;
355 355
356 /* pool size must be multiple of unsigned long */ 356 /* pool size must be multiple of unsigned long */
357 bmap = BITS_TO_LONGS(ppmax); 357 bmap = ppmax / BITS_PER_TYPE(unsigned long);
358 if (!bmap)
359 return NULL;
360
358 ppmax = (bmap * sizeof(unsigned long)) << 3; 361 ppmax = (bmap * sizeof(unsigned long)) << 3;
359 362
360 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; 363 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
402 if (reserve_factor) { 405 if (reserve_factor) {
403 ppmax_pool = ppmax / reserve_factor; 406 ppmax_pool = ppmax / reserve_factor;
404 pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); 407 pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
408 if (!pool) {
409 ppmax_pool = 0;
410 reserve_factor = 0;
411 }
405 412
406 pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", 413 pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
407 ndev->name, ppmax, ppmax_pool, pool_index_max); 414 ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 79d03f8ee7b1..c7fa97a7e1f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -150,7 +150,6 @@ out_buffer_fail:
150/* free desc along with its attached buffer */ 150/* free desc along with its attached buffer */
151static void hnae_free_desc(struct hnae_ring *ring) 151static void hnae_free_desc(struct hnae_ring *ring)
152{ 152{
153 hnae_free_buffers(ring);
154 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 153 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
155 ring->desc_num * sizeof(ring->desc[0]), 154 ring->desc_num * sizeof(ring->desc[0]),
156 ring_to_dma_dir(ring)); 155 ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
183/* fini ring, also free the buffer for the ring */ 182/* fini ring, also free the buffer for the ring */
184static void hnae_fini_ring(struct hnae_ring *ring) 183static void hnae_fini_ring(struct hnae_ring *ring)
185{ 184{
185 if (is_rx_ring(ring))
186 hnae_free_buffers(ring);
187
186 hnae_free_desc(ring); 188 hnae_free_desc(ring);
187 kfree(ring->desc_cb); 189 kfree(ring->desc_cb);
188 ring->desc_cb = NULL; 190 ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 08a750fb60c4..d6fb83437230 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
357}; 357};
358 358
359struct hnae_queue { 359struct hnae_queue {
360 void __iomem *io_base; 360 u8 __iomem *io_base;
361 phys_addr_t phy_base; 361 phys_addr_t phy_base;
362 struct hnae_ae_dev *dev; /* the device who use this queue */ 362 struct hnae_ae_dev *dev; /* the device who use this queue */
363 struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; 363 struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a97228c93831..6c0507921623 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
370static void hns_mac_param_get(struct mac_params *param, 370static void hns_mac_param_get(struct mac_params *param,
371 struct hns_mac_cb *mac_cb) 371 struct hns_mac_cb *mac_cb)
372{ 372{
373 param->vaddr = (void *)mac_cb->vaddr; 373 param->vaddr = mac_cb->vaddr;
374 param->mac_mode = hns_get_enet_interface(mac_cb); 374 param->mac_mode = hns_get_enet_interface(mac_cb);
375 ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); 375 ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
376 param->mac_id = mac_cb->mac_id; 376 param->mac_id = mac_cb->mac_id;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index fbc75341bef7..22589799f1a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -187,7 +187,7 @@ struct mac_statistics {
187/*mac para struct ,mac get param from nic or dsaf when initialize*/ 187/*mac para struct ,mac get param from nic or dsaf when initialize*/
188struct mac_params { 188struct mac_params {
189 char addr[ETH_ALEN]; 189 char addr[ETH_ALEN];
190 void *vaddr; /*virtual address*/ 190 u8 __iomem *vaddr; /*virtual address*/
191 struct device *dev; 191 struct device *dev;
192 u8 mac_id; 192 u8 mac_id;
193 /**< Ethernet operation mode (MAC-PHY interface and speed) */ 193 /**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
402 enum mac_mode mac_mode; 402 enum mac_mode mac_mode;
403 u8 mac_id; 403 u8 mac_id;
404 struct hns_mac_cb *mac_cb; 404 struct hns_mac_cb *mac_cb;
405 void __iomem *io_base; 405 u8 __iomem *io_base;
406 unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ 406 unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
407 unsigned int virt_dev_num; 407 unsigned int virt_dev_num;
408 struct device *dev; 408 struct device *dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ac55db065f16..61eea6ac846f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
1602 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); 1602 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
1603 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, 1603 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
1604 DSAF_TBL_TCAM_KEY_PORT_S, port); 1604 DSAF_TBL_TCAM_KEY_PORT_S, port);
1605
1606 mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
1607} 1605}
1608 1606
1609/** 1607/**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
1663 /* default config dvc to 0 */ 1661 /* default config dvc to 0 */
1664 mac_data.tbl_ucast_dvc = 0; 1662 mac_data.tbl_ucast_dvc = 0;
1665 mac_data.tbl_ucast_out_port = mac_entry->port_num; 1663 mac_data.tbl_ucast_out_port = mac_entry->port_num;
1666 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 1664 tcam_data.tbl_tcam_data_high = mac_key.high.val;
1667 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 1665 tcam_data.tbl_tcam_data_low = mac_key.low.val;
1668 1666
1669 hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); 1667 hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
1670 1668
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1786 0xff, 1784 0xff,
1787 mc_mask); 1785 mc_mask);
1788 1786
1789 mask_key.high.val = le32_to_cpu(mask_key.high.val);
1790 mask_key.low.val = le32_to_cpu(mask_key.low.val);
1791
1792 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); 1787 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
1793 } 1788 }
1794 1789
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1840 dsaf_dev->ae_dev.name, mac_key.high.val, 1835 dsaf_dev->ae_dev.name, mac_key.high.val,
1841 mac_key.low.val, entry_index); 1836 mac_key.low.val, entry_index);
1842 1837
1843 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 1838 tcam_data.tbl_tcam_data_high = mac_key.high.val;
1844 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 1839 tcam_data.tbl_tcam_data_low = mac_key.low.val;
1845 1840
1846 /* config mc entry with mask */ 1841 /* config mc entry with mask */
1847 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, 1842 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
1956 /* config key mask */ 1951 /* config key mask */
1957 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); 1952 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
1958 1953
1959 mask_key.high.val = le32_to_cpu(mask_key.high.val);
1960 mask_key.low.val = le32_to_cpu(mask_key.low.val);
1961
1962 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); 1954 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
1963 } 1955 }
1964 1956
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
2012 soft_mac_entry += entry_index; 2004 soft_mac_entry += entry_index;
2013 soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; 2005 soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
2014 } else { /* not zero, just del port, update */ 2006 } else { /* not zero, just del port, update */
2015 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 2007 tcam_data.tbl_tcam_data_high = mac_key.high.val;
2016 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 2008 tcam_data.tbl_tcam_data_low = mac_key.low.val;
2017 2009
2018 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, 2010 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
2019 &tcam_data, 2011 &tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
2750 return DSAF_DUMP_REGS_NUM; 2742 return DSAF_DUMP_REGS_NUM;
2751} 2743}
2752 2744
2745static int hns_dsaf_get_port_id(u8 port)
2746{
2747 if (port < DSAF_SERVICE_NW_NUM)
2748 return port;
2749
2750 if (port >= DSAF_BASE_INNER_PORT_NUM)
2751 return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
2752
2753 return -EINVAL;
2754}
2755
2753static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) 2756static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
2754{ 2757{
2755 struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; 2758 struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
2815 memset(&temp_key, 0x0, sizeof(temp_key)); 2818 memset(&temp_key, 0x0, sizeof(temp_key));
2816 mask_entry.addr[0] = 0x01; 2819 mask_entry.addr[0] = 0x01;
2817 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, 2820 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
2818 port, mask_entry.addr); 2821 0xf, mask_entry.addr);
2819 tbl_tcam_mcast.tbl_mcast_item_vld = 1; 2822 tbl_tcam_mcast.tbl_mcast_item_vld = 1;
2820 tbl_tcam_mcast.tbl_mcast_old_en = 0; 2823 tbl_tcam_mcast.tbl_mcast_old_en = 0;
2821 2824
2822 if (port < DSAF_SERVICE_NW_NUM) { 2825 /* set MAC port to handle multicast */
2823 mskid = port; 2826 mskid = hns_dsaf_get_port_id(port);
2824 } else if (port >= DSAF_BASE_INNER_PORT_NUM) { 2827 if (mskid == -EINVAL) {
2825 mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
2826 } else {
2827 dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", 2828 dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
2828 dsaf_dev->ae_dev.name, port, 2829 dsaf_dev->ae_dev.name, port,
2829 mask_key.high.val, mask_key.low.val); 2830 mask_key.high.val, mask_key.low.val);
2830 return; 2831 return;
2831 } 2832 }
2833 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
2834 mskid % 32, 1);
2832 2835
2836 /* set pool bit map to handle multicast */
2837 mskid = hns_dsaf_get_port_id(port_num);
2838 if (mskid == -EINVAL) {
2839 dev_err(dsaf_dev->dev,
2840 "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
2841 dsaf_dev->ae_dev.name, port_num,
2842 mask_key.high.val, mask_key.low.val);
2843 return;
2844 }
2833 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], 2845 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
2834 mskid % 32, 1); 2846 mskid % 32, 1);
2847
2835 memcpy(&temp_key, &mask_key, sizeof(mask_key)); 2848 memcpy(&temp_key, &mask_key, sizeof(mask_key));
2836 hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, 2849 hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
2837 (struct dsaf_tbl_tcam_data *)(&mask_key), 2850 (struct dsaf_tbl_tcam_data *)(&mask_key),
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0e1cd99831a6..76cc8887e1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
467 u8 mac_id, u8 port_num); 467 u8 mac_id, u8 port_num);
468int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); 468int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
469 469
470int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
471
470#endif /* __HNS_DSAF_MAIN_H__ */ 472#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 16294cd3c954..19b94879691f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
670 dsaf_set_field(origin, 1ull << 10, 10, en); 670 dsaf_set_field(origin, 1ull << 10, 10, en);
671 dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); 671 dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
672 } else { 672 } else {
673 u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + 673 u8 __iomem *base_addr = mac_cb->serdes_vaddr +
674 (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); 674 (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
675 dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); 675 dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
676 } 676 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 3d07c8a7639d..17c019106e6e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
61 } 61 }
62} 62}
63 63
64static void __iomem * 64static u8 __iomem *
65hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) 65hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
66{ 66{
67 return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; 67 return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
111 dsaf_dev->ppe_common[comm_index] = NULL; 111 dsaf_dev->ppe_common[comm_index] = NULL;
112} 112}
113 113
114static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, 114static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
115 int ppe_idx) 115 int ppe_idx)
116{ 116{
117 return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; 117 return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
118} 118}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index f670e63a5a01..110c6e8222c7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
80 struct hns_ppe_hw_stats hw_stats; 80 struct hns_ppe_hw_stats hw_stats;
81 81
82 u8 index; /* index in a ppe common device */ 82 u8 index; /* index in a ppe common device */
83 void __iomem *io_base; 83 u8 __iomem *io_base;
84 int virq; 84 int virq;
85 u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ 85 u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
86 u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ 86 u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
89struct ppe_common_cb { 89struct ppe_common_cb {
90 struct device *dev; 90 struct device *dev;
91 struct dsaf_device *dsaf_dev; 91 struct dsaf_device *dsaf_dev;
92 void __iomem *io_base; 92 u8 __iomem *io_base;
93 93
94 enum ppe_common_mode ppe_mode; 94 enum ppe_common_mode ppe_mode;
95 95
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6bf346c11b25..ac3518ca4d7b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
458 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; 458 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
459 } else { 459 } else {
460 ring = &q->tx_ring; 460 ring = &q->tx_ring;
461 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + 461 ring->io_base = ring_pair_cb->q.io_base +
462 HNS_RCB_TX_REG_OFFSET; 462 HNS_RCB_TX_REG_OFFSET;
463 irq_idx = HNS_RCB_IRQ_IDX_TX; 463 irq_idx = HNS_RCB_IRQ_IDX_TX;
464 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : 464 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
764 } 764 }
765} 765}
766 766
767static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) 767static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
768{ 768{
769 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; 769 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
770 770
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b9733b0b8482..b9e7f11f0896 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1018,7 +1018,7 @@
1018#define XGMAC_PAUSE_CTL_RSP_MODE_B 2 1018#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
1019#define XGMAC_PAUSE_CTL_TX_XOFF_B 3 1019#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
1020 1020
1021static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) 1021static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
1022{ 1022{
1023 writel(value, base + reg); 1023 writel(value, base + reg);
1024} 1024}
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
1053#define dsaf_set_bit(origin, shift, val) \ 1053#define dsaf_set_bit(origin, shift, val) \
1054 dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) 1054 dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
1055 1055
1056static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, 1056static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
1057 u32 shift, u32 val) 1057 u32 shift, u32 val)
1058{ 1058{
1059 u32 origin = dsaf_read_reg(base, reg); 1059 u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
1073#define dsaf_get_bit(origin, shift) \ 1073#define dsaf_get_bit(origin, shift) \
1074 dsaf_get_field((origin), (1ull << (shift)), (shift)) 1074 dsaf_get_field((origin), (1ull << (shift)), (shift))
1075 1075
1076static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, 1076static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
1077 u32 shift) 1077 u32 shift)
1078{ 1078{
1079 u32 origin; 1079 u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
1089 dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) 1089 dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
1090 1090
1091#define dsaf_write_b(addr, data)\ 1091#define dsaf_write_b(addr, data)\
1092 writeb((data), (__iomem unsigned char *)(addr)) 1092 writeb((data), (__iomem u8 *)(addr))
1093#define dsaf_read_b(addr)\ 1093#define dsaf_read_b(addr)\
1094 readb((__iomem unsigned char *)(addr)) 1094 readb((__iomem u8 *)(addr))
1095 1095
1096#define hns_mac_reg_read64(drv, offset) \ 1096#define hns_mac_reg_read64(drv, offset) \
1097 readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) 1097 readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
1098 1098
1099#endif /* _DSAF_REG_H */ 1099#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index ba4316910dea..a60f207768fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
129 dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); 129 dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
130 dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); 130 dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
131 dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); 131 dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
132 dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); 132 dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
133} 133}
134 134
135/** 135/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 60e7d7ae3787..4cd86ba1f050 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -29,9 +29,6 @@
29 29
30#define SERVICE_TIMER_HZ (1 * HZ) 30#define SERVICE_TIMER_HZ (1 * HZ)
31 31
32#define NIC_TX_CLEAN_MAX_NUM 256
33#define NIC_RX_CLEAN_MAX_NUM 64
34
35#define RCB_IRQ_NOT_INITED 0 32#define RCB_IRQ_NOT_INITED 0
36#define RCB_IRQ_INITED 1 33#define RCB_IRQ_INITED 1
37#define HNS_BUFFER_SIZE_2048 2048 34#define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
376 wmb(); /* commit all data before submit */ 373 wmb(); /* commit all data before submit */
377 assert(skb->queue_mapping < priv->ae_handle->q_num); 374 assert(skb->queue_mapping < priv->ae_handle->q_num);
378 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); 375 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
379 ring->stats.tx_pkts++;
380 ring->stats.tx_bytes += skb->len;
381 376
382 return NETDEV_TX_OK; 377 return NETDEV_TX_OK;
383 378
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
999 /* issue prefetch for next Tx descriptor */ 994 /* issue prefetch for next Tx descriptor */
1000 prefetch(&ring->desc_cb[ring->next_to_clean]); 995 prefetch(&ring->desc_cb[ring->next_to_clean]);
1001 } 996 }
997 /* update tx ring statistics. */
998 ring->stats.tx_pkts += pkts;
999 ring->stats.tx_bytes += bytes;
1002 1000
1003 NETIF_TX_UNLOCK(ring); 1001 NETIF_TX_UNLOCK(ring);
1004 1002
@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2152 hns_nic_tx_fini_pro_v2; 2150 hns_nic_tx_fini_pro_v2;
2153 2151
2154 netif_napi_add(priv->netdev, &rd->napi, 2152 netif_napi_add(priv->netdev, &rd->napi,
2155 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 2153 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2156 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2154 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2157 } 2155 }
2158 for (i = h->q_num; i < h->q_num * 2; i++) { 2156 for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2165 hns_nic_rx_fini_pro_v2; 2163 hns_nic_rx_fini_pro_v2;
2166 2164
2167 netif_napi_add(priv->netdev, &rd->napi, 2165 netif_napi_add(priv->netdev, &rd->napi,
2168 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 2166 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2169 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2167 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2170 } 2168 }
2171 2169
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index fffe8c1c45d3..0fb61d440d3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -3,7 +3,7 @@
3# Makefile for the HISILICON network device drivers. 3# Makefile for the HISILICON network device drivers.
4# 4#
5 5
6ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 6ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
7 7
8obj-$(CONFIG_HNS3_HCLGE) += hclge.o 8obj-$(CONFIG_HNS3_HCLGE) += hclge.o
9hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o 9hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index fb93bbd35845..6193f8fa7cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -3,7 +3,7 @@
3# Makefile for the HISILICON network device drivers. 3# Makefile for the HISILICON network device drivers.
4# 4#
5 5
6ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 6ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
7 7
8obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o 8obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
9hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file 9hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc251f32..8b8a7d00e8e0 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
39}; 39};
40 40
41struct hns_mdio_device { 41struct hns_mdio_device {
42 void *vbase; /* mdio reg base address */ 42 u8 __iomem *vbase; /* mdio reg base address */
43 struct regmap *subctrl_vbase; 43 struct regmap *subctrl_vbase;
44 struct hns_mdio_sc_reg sc_reg; 44 struct hns_mdio_sc_reg sc_reg;
45}; 45};
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
96#define MDIO_SC_CLK_ST 0x531C 96#define MDIO_SC_CLK_ST 0x531C
97#define MDIO_SC_RESET_ST 0x5A1C 97#define MDIO_SC_RESET_ST 0x5A1C
98 98
99static void mdio_write_reg(void *base, u32 reg, u32 value) 99static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
100{ 100{
101 u8 __iomem *reg_addr = (u8 __iomem *)base; 101 writel_relaxed(value, base + reg);
102
103 writel_relaxed(value, reg_addr + reg);
104} 102}
105 103
106#define MDIO_WRITE_REG(a, reg, value) \ 104#define MDIO_WRITE_REG(a, reg, value) \
107 mdio_write_reg((a)->vbase, (reg), (value)) 105 mdio_write_reg((a)->vbase, (reg), (value))
108 106
109static u32 mdio_read_reg(void *base, u32 reg) 107static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
110{ 108{
111 u8 __iomem *reg_addr = (u8 __iomem *)base; 109 return readl_relaxed(base + reg);
112
113 return readl_relaxed(reg_addr + reg);
114} 110}
115 111
116#define mdio_set_field(origin, mask, shift, val) \ 112#define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
121 117
122#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) 118#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
123 119
124static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, 120static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
125 u32 val) 121 u32 val)
126{ 122{
127 u32 origin = mdio_read_reg(base, reg); 123 u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
133#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ 129#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
134 mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) 130 mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
135 131
136static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) 132static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
137{ 133{
138 u32 origin; 134 u32 origin;
139 135
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ecbb1adcf3b..51cfe95f3e24 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
1885 */ 1885 */
1886 adapter->state = VNIC_PROBED; 1886 adapter->state = VNIC_PROBED;
1887 1887
1888 reinit_completion(&adapter->init_done);
1888 rc = init_crq_queue(adapter); 1889 rc = init_crq_queue(adapter);
1889 if (rc) { 1890 if (rc) {
1890 netdev_err(adapter->netdev, 1891 netdev_err(adapter->netdev,
@@ -4625,7 +4626,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4625 old_num_rx_queues = adapter->req_rx_queues; 4626 old_num_rx_queues = adapter->req_rx_queues;
4626 old_num_tx_queues = adapter->req_tx_queues; 4627 old_num_tx_queues = adapter->req_tx_queues;
4627 4628
4628 init_completion(&adapter->init_done); 4629 reinit_completion(&adapter->init_done);
4629 adapter->init_done_rc = 0; 4630 adapter->init_done_rc = 0;
4630 ibmvnic_send_crq_init(adapter); 4631 ibmvnic_send_crq_init(adapter);
4631 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4632 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4680,7 +4681,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4680 4681
4681 adapter->from_passive_init = false; 4682 adapter->from_passive_init = false;
4682 4683
4683 init_completion(&adapter->init_done);
4684 adapter->init_done_rc = 0; 4684 adapter->init_done_rc = 0;
4685 ibmvnic_send_crq_init(adapter); 4685 ibmvnic_send_crq_init(adapter);
4686 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4686 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4759,6 +4759,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4759 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4759 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4760 INIT_LIST_HEAD(&adapter->rwi_list); 4760 INIT_LIST_HEAD(&adapter->rwi_list);
4761 spin_lock_init(&adapter->rwi_lock); 4761 spin_lock_init(&adapter->rwi_lock);
4762 init_completion(&adapter->init_done);
4762 adapter->resetting = false; 4763 adapter->resetting = false;
4763 4764
4764 adapter->mac_change_pending = false; 4765 adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5a0419421511..ecef949f3baa 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
41 /* create driver workqueue */ 41 /* create driver workqueue */
42 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, 42 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
43 fm10k_driver_name); 43 fm10k_driver_name);
44 if (!fm10k_workqueue)
45 return -ENOMEM;
44 46
45 fm10k_dbg_init(); 47 fm10k_dbg_init();
46 48
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d684998ba2b0..d3cc3427caad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -790,6 +790,8 @@ struct i40e_vsi {
790 790
791 /* VSI specific handlers */ 791 /* VSI specific handlers */
792 irqreturn_t (*irq_handler)(int irq, void *data); 792 irqreturn_t (*irq_handler)(int irq, void *data);
793
794 unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
793} ____cacheline_internodealigned_in_smp; 795} ____cacheline_internodealigned_in_smp;
794 796
795struct i40e_netdev_priv { 797struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
1096 return !!vsi->xdp_prog; 1098 return !!vsi->xdp_prog;
1097} 1099}
1098 1100
1099static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
1100{
1101 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
1102 int qid = ring->queue_index;
1103
1104 if (ring_is_xdp(ring))
1105 qid -= ring->vsi->alloc_queue_pairs;
1106
1107 if (!xdp_on)
1108 return NULL;
1109
1110 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
1111}
1112
1113int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); 1101int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
1114int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); 1102int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
1115int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, 1103int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4c885801fa26..7874d0ec7fb0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2573 return -EOPNOTSUPP; 2573 return -EOPNOTSUPP;
2574 2574
2575 /* only magic packet is supported */ 2575 /* only magic packet is supported */
2576 if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) 2576 if (wol->wolopts & ~WAKE_MAGIC)
2577 | (wol->wolopts != WAKE_FILTER))
2578 return -EOPNOTSUPP; 2577 return -EOPNOTSUPP;
2579 2578
2580 /* is this a new value? */ 2579 /* is this a new value? */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index da62218eb70a..b1c265012c8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3064} 3064}
3065 3065
3066/** 3066/**
3067 * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
3068 * @ring: The Tx or Rx ring
3069 *
3070 * Returns the UMEM or NULL.
3071 **/
3072static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3073{
3074 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3075 int qid = ring->queue_index;
3076
3077 if (ring_is_xdp(ring))
3078 qid -= ring->vsi->alloc_queue_pairs;
3079
3080 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3081 return NULL;
3082
3083 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3084}
3085
3086/**
3067 * i40e_configure_tx_ring - Configure a transmit ring context and rest 3087 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3068 * @ring: The Tx ring to configure 3088 * @ring: The Tx ring to configure
3069 * 3089 *
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10064 hash_init(vsi->mac_filter_hash); 10084 hash_init(vsi->mac_filter_hash);
10065 vsi->irqs_ready = false; 10085 vsi->irqs_ready = false;
10066 10086
10087 if (type == I40E_VSI_MAIN) {
10088 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10089 if (!vsi->af_xdp_zc_qps)
10090 goto err_rings;
10091 }
10092
10067 ret = i40e_set_num_rings_in_vsi(vsi); 10093 ret = i40e_set_num_rings_in_vsi(vsi);
10068 if (ret) 10094 if (ret)
10069 goto err_rings; 10095 goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10082 goto unlock_pf; 10108 goto unlock_pf;
10083 10109
10084err_rings: 10110err_rings:
10111 bitmap_free(vsi->af_xdp_zc_qps);
10085 pf->next_vsi = i - 1; 10112 pf->next_vsi = i - 1;
10086 kfree(vsi); 10113 kfree(vsi);
10087unlock_pf: 10114unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
10162 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 10189 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10163 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 10190 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10164 10191
10192 bitmap_free(vsi->af_xdp_zc_qps);
10165 i40e_vsi_free_arrays(vsi, true); 10193 i40e_vsi_free_arrays(vsi, true);
10166 i40e_clear_rss_config_user(vsi); 10194 i40e_clear_rss_config_user(vsi);
10167 10195
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5fb4353c742b..31575c0bb884 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
146static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 146static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
147{ 147{
148 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); 148 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
149 struct timespec64 now; 149 struct timespec64 now, then;
150 150
151 then = ns_to_timespec64(delta);
151 mutex_lock(&pf->tmreg_lock); 152 mutex_lock(&pf->tmreg_lock);
152 153
153 i40e_ptp_read(pf, &now, NULL); 154 i40e_ptp_read(pf, &now, NULL);
154 timespec64_add_ns(&now, delta); 155 now = timespec64_add(now, then);
155 i40e_ptp_write(pf, (const struct timespec64 *)&now); 156 i40e_ptp_write(pf, (const struct timespec64 *)&now);
156 157
157 mutex_unlock(&pf->tmreg_lock); 158 mutex_unlock(&pf->tmreg_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b5c182e688e3..1b17486543ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
102 if (err) 102 if (err)
103 return err; 103 return err;
104 104
105 set_bit(qid, vsi->af_xdp_zc_qps);
106
105 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 107 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
106 108
107 if (if_running) { 109 if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
148 return err; 150 return err;
149 } 151 }
150 152
153 clear_bit(qid, vsi->af_xdp_zc_qps);
151 i40e_xsk_umem_dma_unmap(vsi, umem); 154 i40e_xsk_umem_dma_unmap(vsi, umem);
152 155
153 if (if_running) { 156 if (if_running) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 01fcfc6f3415..d2e2c50ce257 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -194,6 +194,8 @@
194/* enable link status from external LINK_0 and LINK_1 pins */ 194/* enable link status from external LINK_0 and LINK_1 pins */
195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
197#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
198#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
197#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ 199#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
198#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ 200#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
199#define E1000_CTRL_RST 0x04000000 /* Global reset */ 201#define E1000_CTRL_RST 0x04000000 /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 69b230c53fed..3269d8e94744 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8740 struct e1000_hw *hw = &adapter->hw; 8740 struct e1000_hw *hw = &adapter->hw;
8741 u32 ctrl, rctl, status; 8741 u32 ctrl, rctl, status;
8742 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 8742 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8743#ifdef CONFIG_PM 8743 bool wake;
8744 int retval = 0;
8745#endif
8746 8744
8747 rtnl_lock(); 8745 rtnl_lock();
8748 netif_device_detach(netdev); 8746 netif_device_detach(netdev);
@@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8755 igb_clear_interrupt_scheme(adapter); 8753 igb_clear_interrupt_scheme(adapter);
8756 rtnl_unlock(); 8754 rtnl_unlock();
8757 8755
8758#ifdef CONFIG_PM
8759 if (!runtime) {
8760 retval = pci_save_state(pdev);
8761 if (retval)
8762 return retval;
8763 }
8764#endif
8765
8766 status = rd32(E1000_STATUS); 8756 status = rd32(E1000_STATUS);
8767 if (status & E1000_STATUS_LU) 8757 if (status & E1000_STATUS_LU)
8768 wufc &= ~E1000_WUFC_LNKC; 8758 wufc &= ~E1000_WUFC_LNKC;
@@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8779 } 8769 }
8780 8770
8781 ctrl = rd32(E1000_CTRL); 8771 ctrl = rd32(E1000_CTRL);
8782 /* advertise wake from D3Cold */
8783 #define E1000_CTRL_ADVD3WUC 0x00100000
8784 /* phy power management enable */
8785 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
8786 ctrl |= E1000_CTRL_ADVD3WUC; 8772 ctrl |= E1000_CTRL_ADVD3WUC;
8787 wr32(E1000_CTRL, ctrl); 8773 wr32(E1000_CTRL, ctrl);
8788 8774
@@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8796 wr32(E1000_WUFC, 0); 8782 wr32(E1000_WUFC, 0);
8797 } 8783 }
8798 8784
8799 *enable_wake = wufc || adapter->en_mng_pt; 8785 wake = wufc || adapter->en_mng_pt;
8800 if (!*enable_wake) 8786 if (!wake)
8801 igb_power_down_link(adapter); 8787 igb_power_down_link(adapter);
8802 else 8788 else
8803 igb_power_up_link(adapter); 8789 igb_power_up_link(adapter);
8804 8790
8791 if (enable_wake)
8792 *enable_wake = wake;
8793
8805 /* Release control of h/w to f/w. If f/w is AMT enabled, this 8794 /* Release control of h/w to f/w. If f/w is AMT enabled, this
8806 * would have already happened in close and is redundant. 8795 * would have already happened in close and is redundant.
8807 */ 8796 */
@@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
8844 8833
8845static int __maybe_unused igb_suspend(struct device *dev) 8834static int __maybe_unused igb_suspend(struct device *dev)
8846{ 8835{
8847 int retval; 8836 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
8848 bool wake;
8849 struct pci_dev *pdev = to_pci_dev(dev);
8850
8851 retval = __igb_shutdown(pdev, &wake, 0);
8852 if (retval)
8853 return retval;
8854
8855 if (wake) {
8856 pci_prepare_to_sleep(pdev);
8857 } else {
8858 pci_wake_from_d3(pdev, false);
8859 pci_set_power_state(pdev, PCI_D3hot);
8860 }
8861
8862 return 0;
8863} 8837}
8864 8838
8865static int __maybe_unused igb_resume(struct device *dev) 8839static int __maybe_unused igb_resume(struct device *dev)
@@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
8930 8904
8931static int __maybe_unused igb_runtime_suspend(struct device *dev) 8905static int __maybe_unused igb_runtime_suspend(struct device *dev)
8932{ 8906{
8933 struct pci_dev *pdev = to_pci_dev(dev); 8907 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
8934 int retval;
8935 bool wake;
8936
8937 retval = __igb_shutdown(pdev, &wake, 1);
8938 if (retval)
8939 return retval;
8940
8941 if (wake) {
8942 pci_prepare_to_sleep(pdev);
8943 } else {
8944 pci_wake_from_d3(pdev, false);
8945 pci_set_power_state(pdev, PCI_D3hot);
8946 }
8947
8948 return 0;
8949} 8908}
8950 8909
8951static int __maybe_unused igb_runtime_resume(struct device *dev) 8910static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index cc4907f9ff02..2fb97967961c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
905 struct pci_dev *pdev = adapter->pdev; 905 struct pci_dev *pdev = adapter->pdev;
906 struct device *dev = &adapter->netdev->dev; 906 struct device *dev = &adapter->netdev->dev;
907 struct mii_bus *bus; 907 struct mii_bus *bus;
908 int err = -ENODEV;
908 909
909 adapter->mii_bus = devm_mdiobus_alloc(dev); 910 bus = devm_mdiobus_alloc(dev);
910 if (!adapter->mii_bus) 911 if (!bus)
911 return -ENOMEM; 912 return -ENOMEM;
912 913
913 bus = adapter->mii_bus;
914
915 switch (hw->device_id) { 914 switch (hw->device_id) {
916 /* C3000 SoCs */ 915 /* C3000 SoCs */
917 case IXGBE_DEV_ID_X550EM_A_KR: 916 case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
949 */ 948 */
950 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; 949 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
951 950
952 return mdiobus_register(bus); 951 err = mdiobus_register(bus);
952 if (!err) {
953 adapter->mii_bus = bus;
954 return 0;
955 }
953 956
954ixgbe_no_mii_bus: 957ixgbe_no_mii_bus:
955 devm_mdiobus_free(dev, bus); 958 devm_mdiobus_free(dev, bus);
956 adapter->mii_bus = NULL; 959 return err;
957 return -ENODEV;
958} 960}
959 961
960/** 962/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 122927f3a600..d5e5afbdca6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
96 if (!eproto) 96 if (!eproto)
97 return -EINVAL; 97 return -EINVAL;
98 98
99 if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
100 return -EOPNOTSUPP;
101
102 err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); 99 err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
103 if (err) 100 if (err)
104 return err; 101 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index eac245a93f91..4ab0d030b544 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -122,7 +122,9 @@ out:
122 return err; 122 return err;
123} 123}
124 124
125/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ 125/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
126 * minimum speed value is 40Gbps
127 */
126static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) 128static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
127{ 129{
128 u32 speed; 130 u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
130 int err; 132 int err;
131 133
132 err = mlx5e_port_linkspeed(priv->mdev, &speed); 134 err = mlx5e_port_linkspeed(priv->mdev, &speed);
133 if (err) { 135 if (err)
134 mlx5_core_warn(priv->mdev, "cannot get port speed\n"); 136 speed = SPEED_40000;
135 return 0; 137 speed = max_t(u32, speed, SPEED_40000);
136 }
137 138
138 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; 139 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
139 140
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
142} 143}
143 144
144static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, 145static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
145 u32 xoff, unsigned int mtu) 146 u32 xoff, unsigned int max_mtu)
146{ 147{
147 int i; 148 int i;
148 149
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
154 } 155 }
155 156
156 if (port_buffer->buffer[i].size < 157 if (port_buffer->buffer[i].size <
157 (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) 158 (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
158 return -ENOMEM; 159 return -ENOMEM;
159 160
160 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; 161 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
161 port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; 162 port_buffer->buffer[i].xon =
163 port_buffer->buffer[i].xoff - max_mtu;
162 } 164 }
163 165
164 return 0; 166 return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
166 168
167/** 169/**
168 * update_buffer_lossy() 170 * update_buffer_lossy()
169 * mtu: device's MTU 171 * max_mtu: netdev's max_mtu
170 * pfc_en: <input> current pfc configuration 172 * pfc_en: <input> current pfc configuration
171 * buffer: <input> current prio to buffer mapping 173 * buffer: <input> current prio to buffer mapping
172 * xoff: <input> xoff value 174 * xoff: <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
183 * Return 0 if no error. 185 * Return 0 if no error.
184 * Set change to true if buffer configuration is modified. 186 * Set change to true if buffer configuration is modified.
185 */ 187 */
186static int update_buffer_lossy(unsigned int mtu, 188static int update_buffer_lossy(unsigned int max_mtu,
187 u8 pfc_en, u8 *buffer, u32 xoff, 189 u8 pfc_en, u8 *buffer, u32 xoff,
188 struct mlx5e_port_buffer *port_buffer, 190 struct mlx5e_port_buffer *port_buffer,
189 bool *change) 191 bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
220 } 222 }
221 223
222 if (changed) { 224 if (changed) {
223 err = update_xoff_threshold(port_buffer, xoff, mtu); 225 err = update_xoff_threshold(port_buffer, xoff, max_mtu);
224 if (err) 226 if (err)
225 return err; 227 return err;
226 228
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
230 return 0; 232 return 0;
231} 233}
232 234
235#define MINIMUM_MAX_MTU 9216
233int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, 236int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
234 u32 change, unsigned int mtu, 237 u32 change, unsigned int mtu,
235 struct ieee_pfc *pfc, 238 struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
241 bool update_prio2buffer = false; 244 bool update_prio2buffer = false;
242 u8 buffer[MLX5E_MAX_PRIORITY]; 245 u8 buffer[MLX5E_MAX_PRIORITY];
243 bool update_buffer = false; 246 bool update_buffer = false;
247 unsigned int max_mtu;
244 u32 total_used = 0; 248 u32 total_used = 0;
245 u8 curr_pfc_en; 249 u8 curr_pfc_en;
246 int err; 250 int err;
247 int i; 251 int i;
248 252
249 mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); 253 mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
254 max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
250 255
251 err = mlx5e_port_query_buffer(priv, &port_buffer); 256 err = mlx5e_port_query_buffer(priv, &port_buffer);
252 if (err) 257 if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
254 259
255 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { 260 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
256 update_buffer = true; 261 update_buffer = true;
257 err = update_xoff_threshold(&port_buffer, xoff, mtu); 262 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
258 if (err) 263 if (err)
259 return err; 264 return err;
260 } 265 }
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
264 if (err) 269 if (err)
265 return err; 270 return err;
266 271
267 err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, 272 err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
268 &port_buffer, &update_buffer); 273 &port_buffer, &update_buffer);
269 if (err) 274 if (err)
270 return err; 275 return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
276 if (err) 281 if (err)
277 return err; 282 return err;
278 283
279 err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, 284 err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
280 &port_buffer, &update_buffer); 285 xoff, &port_buffer, &update_buffer);
281 if (err) 286 if (err)
282 return err; 287 return err;
283 } 288 }
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
301 return -EINVAL; 306 return -EINVAL;
302 307
303 update_buffer = true; 308 update_buffer = true;
304 err = update_xoff_threshold(&port_buffer, xoff, mtu); 309 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
305 if (err) 310 if (err)
306 return err; 311 return err;
307 } 312 }
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
309 /* Need to update buffer configuration if xoff value is changed */ 314 /* Need to update buffer configuration if xoff value is changed */
310 if (!update_buffer && xoff != priv->dcbx.xoff) { 315 if (!update_buffer && xoff != priv->dcbx.xoff) {
311 update_buffer = true; 316 update_buffer = true;
312 err = update_xoff_threshold(&port_buffer, xoff, mtu); 317 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
313 if (err) 318 if (err)
314 return err; 319 return err;
315 } 320 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 3078491cc0d0..1539cf3de5dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
45 if (err) 45 if (err)
46 return err; 46 return err;
47 47
48 mutex_lock(&mdev->mlx5e_res.td.list_lock);
48 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); 49 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
50 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
49 51
50 return 0; 52 return 0;
51} 53}
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
53void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, 55void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
54 struct mlx5e_tir *tir) 56 struct mlx5e_tir *tir)
55{ 57{
58 mutex_lock(&mdev->mlx5e_res.td.list_lock);
56 mlx5_core_destroy_tir(mdev, tir->tirn); 59 mlx5_core_destroy_tir(mdev, tir->tirn);
57 list_del(&tir->list); 60 list_del(&tir->list);
61 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
58} 62}
59 63
60static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, 64static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
114 } 118 }
115 119
116 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); 120 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
121 mutex_init(&mdev->mlx5e_res.td.list_lock);
117 122
118 return 0; 123 return 0;
119 124
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
141{ 146{
142 struct mlx5_core_dev *mdev = priv->mdev; 147 struct mlx5_core_dev *mdev = priv->mdev;
143 struct mlx5e_tir *tir; 148 struct mlx5e_tir *tir;
144 int err = -ENOMEM; 149 int err = 0;
145 u32 tirn = 0; 150 u32 tirn = 0;
146 int inlen; 151 int inlen;
147 void *in; 152 void *in;
148 153
149 inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 154 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
150 in = kvzalloc(inlen, GFP_KERNEL); 155 in = kvzalloc(inlen, GFP_KERNEL);
151 if (!in) 156 if (!in) {
157 err = -ENOMEM;
152 goto out; 158 goto out;
159 }
153 160
154 if (enable_uc_lb) 161 if (enable_uc_lb)
155 MLX5_SET(modify_tir_in, in, ctx.self_lb_block, 162 MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
157 164
158 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); 165 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
159 166
167 mutex_lock(&mdev->mlx5e_res.td.list_lock);
160 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { 168 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
161 tirn = tir->tirn; 169 tirn = tir->tirn;
162 err = mlx5_core_modify_tir(mdev, tirn, in, inlen); 170 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
168 kvfree(in); 176 kvfree(in);
169 if (err) 177 if (err)
170 netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); 178 netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
179 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
171 180
172 return err; 181 return err;
173} 182}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a0987cc5fe4a..5efce4a3ff79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
603 __ETHTOOL_LINK_MODE_MASK_NBITS); 603 __ETHTOOL_LINK_MODE_MASK_NBITS);
604} 604}
605 605
606static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, 606static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
607 unsigned long *advertising_modes, 607 u32 eth_proto_cap, bool ext)
608 u32 eth_proto_cap)
609{ 608{
610 unsigned long proto_cap = eth_proto_cap; 609 unsigned long proto_cap = eth_proto_cap;
611 struct ptys2ethtool_config *table; 610 struct ptys2ethtool_config *table;
612 u32 max_size; 611 u32 max_size;
613 int proto; 612 int proto;
614 613
615 mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); 614 table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
615 max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
616 ARRAY_SIZE(ptys2legacy_ethtool_table);
617
616 for_each_set_bit(proto, &proto_cap, max_size) 618 for_each_set_bit(proto, &proto_cap, max_size)
617 bitmap_or(advertising_modes, advertising_modes, 619 bitmap_or(advertising_modes, advertising_modes,
618 table[proto].advertised, 620 table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
794 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); 796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
795} 797}
796 798
797static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, 799static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
798 u8 tx_pause, u8 rx_pause, 800 struct ethtool_link_ksettings *link_ksettings,
799 struct ethtool_link_ksettings *link_ksettings) 801 bool ext)
800{ 802{
801 unsigned long *advertising = link_ksettings->link_modes.advertising; 803 unsigned long *advertising = link_ksettings->link_modes.advertising;
802 ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); 804 ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
803 805
804 if (rx_pause) 806 if (rx_pause)
805 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); 807 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
854 struct ethtool_link_ksettings *link_ksettings) 856 struct ethtool_link_ksettings *link_ksettings)
855{ 857{
856 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; 858 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
859 bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
857 860
858 ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); 861 ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
859} 862}
860 863
861int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, 864int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
872 u8 an_disable_admin; 875 u8 an_disable_admin;
873 u8 an_status; 876 u8 an_status;
874 u8 connector_type; 877 u8 connector_type;
878 bool admin_ext;
875 bool ext; 879 bool ext;
876 int err; 880 int err;
877 881
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
886 eth_proto_capability); 890 eth_proto_capability);
887 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 891 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
888 eth_proto_admin); 892 eth_proto_admin);
893 /* Fields: eth_proto_admin and ext_eth_proto_admin are
894 * mutually exclusive. Hence try reading legacy advertising
895 * when extended advertising is zero.
896 * admin_ext indicates how eth_proto_admin should be
897 * interpreted
898 */
899 admin_ext = ext;
900 if (ext && !eth_proto_admin) {
901 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
902 eth_proto_admin);
903 admin_ext = false;
904 }
905
889 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 906 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
890 eth_proto_oper); 907 eth_proto_oper);
891 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); 908 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
899 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 916 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
900 917
901 get_supported(mdev, eth_proto_cap, link_ksettings); 918 get_supported(mdev, eth_proto_cap, link_ksettings);
902 get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); 919 get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
920 admin_ext);
903 get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); 921 get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
904 922
905 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 923 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
997 1015
998#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) 1016#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
999 1017
1000 ext_requested = (link_ksettings->link_modes.advertising[0] > 1018 ext_requested = !!(link_ksettings->link_modes.advertising[0] >
1001 MLX5E_PTYS_EXT); 1019 MLX5E_PTYS_EXT ||
1020 link_ksettings->link_modes.advertising[1]);
1002 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 1021 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
1003 1022 ext_requested &= ext_supported;
1004 /*when ptys_extended_ethernet is set legacy link modes are deprecated */
1005 if (ext_requested != ext_supported)
1006 return -EPROTONOSUPPORT;
1007 1023
1008 speed = link_ksettings->base.speed; 1024 speed = link_ksettings->base.speed;
1009 ethtool2ptys_adver_func = ext_requested ? 1025 ethtool2ptys_adver_func = ext_requested ?
1010 mlx5e_ethtool2ptys_ext_adver_link : 1026 mlx5e_ethtool2ptys_ext_adver_link :
1011 mlx5e_ethtool2ptys_adver_link; 1027 mlx5e_ethtool2ptys_adver_link;
1012 err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); 1028 err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
1013 if (err) { 1029 if (err) {
1014 netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", 1030 netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
1015 __func__, err); 1031 __func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1037 if (!an_changes && link_modes == eproto.admin) 1053 if (!an_changes && link_modes == eproto.admin)
1038 goto out; 1054 goto out;
1039 1055
1040 mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); 1056 mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
1041 mlx5_toggle_port_link(mdev); 1057 mlx5_toggle_port_link(mdev);
1042 1058
1043out: 1059out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b4967a0ff8c7..d75dc44eb2ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
2158 return true; 2158 return true;
2159} 2159}
2160 2160
2161struct ip_ttl_word {
2162 __u8 ttl;
2163 __u8 protocol;
2164 __sum16 check;
2165};
2166
2167struct ipv6_hoplimit_word {
2168 __be16 payload_len;
2169 __u8 nexthdr;
2170 __u8 hop_limit;
2171};
2172
2173static bool is_action_keys_supported(const struct flow_action_entry *act)
2174{
2175 u32 mask, offset;
2176 u8 htype;
2177
2178 htype = act->mangle.htype;
2179 offset = act->mangle.offset;
2180 mask = ~act->mangle.mask;
2181 /* For IPv4 & IPv6 header check 4 byte word,
2182 * to determine that modified fields
2183 * are NOT ttl & hop_limit only.
2184 */
2185 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2186 struct ip_ttl_word *ttl_word =
2187 (struct ip_ttl_word *)&mask;
2188
2189 if (offset != offsetof(struct iphdr, ttl) ||
2190 ttl_word->protocol ||
2191 ttl_word->check) {
2192 return true;
2193 }
2194 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2195 struct ipv6_hoplimit_word *hoplimit_word =
2196 (struct ipv6_hoplimit_word *)&mask;
2197
2198 if (offset != offsetof(struct ipv6hdr, payload_len) ||
2199 hoplimit_word->payload_len ||
2200 hoplimit_word->nexthdr) {
2201 return true;
2202 }
2203 }
2204 return false;
2205}
2206
2161static bool modify_header_match_supported(struct mlx5_flow_spec *spec, 2207static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2162 struct flow_action *flow_action, 2208 struct flow_action *flow_action,
2163 u32 actions, 2209 u32 actions,
@@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2165{ 2211{
2166 const struct flow_action_entry *act; 2212 const struct flow_action_entry *act;
2167 bool modify_ip_header; 2213 bool modify_ip_header;
2168 u8 htype, ip_proto;
2169 void *headers_v; 2214 void *headers_v;
2170 u16 ethertype; 2215 u16 ethertype;
2216 u8 ip_proto;
2171 int i; 2217 int i;
2172 2218
2173 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) 2219 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2187 act->id != FLOW_ACTION_ADD) 2233 act->id != FLOW_ACTION_ADD)
2188 continue; 2234 continue;
2189 2235
2190 htype = act->mangle.htype; 2236 if (is_action_keys_supported(act)) {
2191 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
2192 htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2193 modify_ip_header = true; 2237 modify_ip_header = true;
2194 break; 2238 break;
2195 } 2239 }
@@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
2340 return 0; 2384 return 0;
2341} 2385}
2342 2386
2343static inline int cmp_encap_info(struct ip_tunnel_key *a, 2387struct encap_key {
2344 struct ip_tunnel_key *b) 2388 struct ip_tunnel_key *ip_tun_key;
2389 int tunnel_type;
2390};
2391
2392static inline int cmp_encap_info(struct encap_key *a,
2393 struct encap_key *b)
2345{ 2394{
2346 return memcmp(a, b, sizeof(*a)); 2395 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
2396 a->tunnel_type != b->tunnel_type;
2347} 2397}
2348 2398
2349static inline int hash_encap_info(struct ip_tunnel_key *key) 2399static inline int hash_encap_info(struct encap_key *key)
2350{ 2400{
2351 return jhash(key, sizeof(*key), 0); 2401 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
2402 key->tunnel_type);
2352} 2403}
2353 2404
2354 2405
@@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2379 struct mlx5_esw_flow_attr *attr = flow->esw_attr; 2430 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2380 struct mlx5e_tc_flow_parse_attr *parse_attr; 2431 struct mlx5e_tc_flow_parse_attr *parse_attr;
2381 struct ip_tunnel_info *tun_info; 2432 struct ip_tunnel_info *tun_info;
2382 struct ip_tunnel_key *key; 2433 struct encap_key key, e_key;
2383 struct mlx5e_encap_entry *e; 2434 struct mlx5e_encap_entry *e;
2384 unsigned short family; 2435 unsigned short family;
2385 uintptr_t hash_key; 2436 uintptr_t hash_key;
@@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2389 parse_attr = attr->parse_attr; 2440 parse_attr = attr->parse_attr;
2390 tun_info = &parse_attr->tun_info[out_index]; 2441 tun_info = &parse_attr->tun_info[out_index];
2391 family = ip_tunnel_info_af(tun_info); 2442 family = ip_tunnel_info_af(tun_info);
2392 key = &tun_info->key; 2443 key.ip_tun_key = &tun_info->key;
2444 key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
2393 2445
2394 hash_key = hash_encap_info(key); 2446 hash_key = hash_encap_info(&key);
2395 2447
2396 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, 2448 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2397 encap_hlist, hash_key) { 2449 encap_hlist, hash_key) {
2398 if (!cmp_encap_info(&e->tun_info.key, key)) { 2450 e_key.ip_tun_key = &e->tun_info.key;
2451 e_key.tunnel_type = e->tunnel_type;
2452 if (!cmp_encap_info(&e_key, &key)) {
2399 found = true; 2453 found = true;
2400 break; 2454 break;
2401 } 2455 }
@@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
2657 2711
2658 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || 2712 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2659 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { 2713 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2660 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, 2714 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
2661 parse_attr, hdrs, extack); 2715 parse_attr, hdrs, extack);
2662 if (err) 2716 if (err)
2663 return err; 2717 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ecd2c747f726..8a67fd197b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
105 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 105 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
106 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); 106 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
107 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 107 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
108 if (vport) 108 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
109 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
110 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 109 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
111 in, nic_vport_context); 110 in, nic_vport_context);
112 111
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
134 MLX5_SET(modify_esw_vport_context_in, in, opcode, 133 MLX5_SET(modify_esw_vport_context_in, in, opcode,
135 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); 134 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
136 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 135 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
137 if (vport) 136 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
138 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
139 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 137 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
140} 138}
141 139
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
431{ 429{
432 int err; 430 int err;
433 431
432 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
433
434 err = esw_create_legacy_vepa_table(esw); 434 err = esw_create_legacy_vepa_table(esw);
435 if (err) 435 if (err)
436 return err; 436 return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2157 2157
2158 /* Star rule to forward all traffic to uplink vport */ 2158 /* Star rule to forward all traffic to uplink vport */
2159 memset(spec, 0, sizeof(*spec)); 2159 memset(spec, 0, sizeof(*spec));
2160 memset(&dest, 0, sizeof(dest));
2160 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2161 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2161 dest.vport.num = MLX5_VPORT_UPLINK; 2162 dest.vport.num = MLX5_VPORT_UPLINK;
2162 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2163 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f2260391be5b..9b2d78ee22b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
1611{ 1611{
1612 int err; 1612 int err;
1613 1613
1614 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
1614 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); 1615 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1615 1616
1616 err = esw_create_offloads_fdb_tables(esw, nvports); 1617 err = esw_create_offloads_fdb_tables(esw, nvports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 5cf5f2a9d51f..8de64e88c670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
217 void *cmd; 217 void *cmd;
218 int ret; 218 int ret;
219 219
220 rcu_read_lock();
221 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
222 rcu_read_unlock();
223
224 if (!flow) {
225 WARN_ONCE(1, "Received NULL pointer for handle\n");
226 return -EINVAL;
227 }
228
220 buf = kzalloc(size, GFP_ATOMIC); 229 buf = kzalloc(size, GFP_ATOMIC);
221 if (!buf) 230 if (!buf)
222 return -ENOMEM; 231 return -ENOMEM;
223 232
224 cmd = (buf + 1); 233 cmd = (buf + 1);
225 234
226 rcu_read_lock();
227 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
228 rcu_read_unlock();
229 mlx5_fpga_tls_flow_to_cmd(flow, cmd); 235 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
230 236
231 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); 237 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
238 buf->complete = mlx_tls_kfree_complete; 244 buf->complete = mlx_tls_kfree_complete;
239 245
240 ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); 246 ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
247 if (ret < 0)
248 kfree(buf);
241 249
242 return ret; 250 return ret;
243} 251}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 70cc906a102b..76716419370d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
164 .size = 8, 164 .size = 8,
165 .limit = 4 165 .limit = 4
166 }, 166 },
167 .mr_cache[16] = {
168 .size = 8,
169 .limit = 4
170 },
171 .mr_cache[17] = {
172 .size = 8,
173 .limit = 4
174 },
175 .mr_cache[18] = {
176 .size = 8,
177 .limit = 4
178 },
179 .mr_cache[19] = {
180 .size = 4,
181 .limit = 2
182 },
183 .mr_cache[20] = {
184 .size = 4,
185 .limit = 2
186 },
187 }, 167 },
188}; 168};
189 169
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index eeda4ed98333..e336f6ee94f5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
48 48
49 tmp_push_vlan_tci = 49 tmp_push_vlan_tci =
50 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | 50 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
51 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | 51 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
52 NFP_FL_PUSH_VLAN_CFI;
53 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); 52 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
54} 53}
55 54
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4fcaf11ed56e..0ed51e79db00 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,7 +26,7 @@
26#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) 26#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
27 27
28#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) 28#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
29#define NFP_FLOWER_MASK_VLAN_CFI BIT(12) 29#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
30#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) 30#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
31 31
32#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) 32#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
@@ -82,7 +82,6 @@
82#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) 82#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
83 83
84#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) 84#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
85#define NFP_FL_PUSH_VLAN_CFI BIT(12)
86#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) 85#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
87 86
88#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) 87#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e03c8ef2c28c..9b8b843d0340 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
30 30
31 flow_rule_match_vlan(rule, &match); 31 flow_rule_match_vlan(rule, &match);
32 /* Populate the tci field. */ 32 /* Populate the tci field. */
33 if (match.key->vlan_id || match.key->vlan_priority) { 33 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
34 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 34 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
35 match.key->vlan_priority) | 35 match.key->vlan_priority) |
36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
37 match.key->vlan_id) | 37 match.key->vlan_id);
38 NFP_FLOWER_MASK_VLAN_CFI; 38 ext->tci = cpu_to_be16(tmp_tci);
39 ext->tci = cpu_to_be16(tmp_tci); 39
40 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 40 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
41 match.mask->vlan_priority) | 41 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
42 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 42 match.mask->vlan_priority) |
43 match.mask->vlan_id) | 43 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
44 NFP_FLOWER_MASK_VLAN_CFI; 44 match.mask->vlan_id);
45 msk->tci = cpu_to_be16(tmp_tci); 45 msk->tci = cpu_to_be16(tmp_tci);
46 }
47 } 46 }
48} 47}
49 48
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d2c803bb4e56..94d228c04496 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
195 ret = dev_queue_xmit(skb); 195 ret = dev_queue_xmit(skb);
196 nfp_repr_inc_tx_stats(netdev, len, ret); 196 nfp_repr_inc_tx_stats(netdev, len, ret);
197 197
198 return ret; 198 return NETDEV_TX_OK;
199} 199}
200 200
201static int nfp_repr_stop(struct net_device *netdev) 201static int nfp_repr_stop(struct net_device *netdev)
@@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
383 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 383 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
384 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; 384 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
385 385
386 netdev->priv_flags |= IFF_NO_QUEUE; 386 netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
387 netdev->features |= NETIF_F_LLTX; 387 netdev->features |= NETIF_F_LLTX;
388 388
389 if (nfp_app_has_tc(app)) { 389 if (nfp_app_has_tc(app)) {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7562ccbbb39a..19efa88f3f02 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5460,7 +5460,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
5460 tp->cp_cmd |= PktCntrDisable | INTT_1; 5460 tp->cp_cmd |= PktCntrDisable | INTT_1;
5461 RTL_W16(tp, CPlusCmd, tp->cp_cmd); 5461 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5462 5462
5463 RTL_W16(tp, IntrMitigate, 0x5151); 5463 RTL_W16(tp, IntrMitigate, 0x5100);
5464 5464
5465 /* Work around for RxFIFO overflow. */ 5465 /* Work around for RxFIFO overflow. */
5466 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 5466 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40d6356a7e73..3dfb07a78952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -29,11 +29,13 @@
29/* Specific functions used for Ring mode */ 29/* Specific functions used for Ring mode */
30 30
31/* Enhanced descriptors */ 31/* Enhanced descriptors */
32static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) 32static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
33 int bfsize)
33{ 34{
34 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB 35 if (bfsize == BUF_SIZE_16KiB)
35 << ERDES1_BUFFER2_SIZE_SHIFT) 36 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
36 & ERDES1_BUFFER2_SIZE_MASK); 37 << ERDES1_BUFFER2_SIZE_SHIFT)
38 & ERDES1_BUFFER2_SIZE_MASK);
37 39
38 if (end) 40 if (end)
39 p->des1 |= cpu_to_le32(ERDES1_END_RING); 41 p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
59} 61}
60 62
61/* Normal descriptors */ 63/* Normal descriptors */
62static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) 64static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
63{ 65{
64 p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) 66 if (bfsize >= BUF_SIZE_2KiB) {
65 << RDES1_BUFFER2_SIZE_SHIFT) 67 int bfsize2;
66 & RDES1_BUFFER2_SIZE_MASK); 68
69 bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
70 p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
71 & RDES1_BUFFER2_SIZE_MASK);
72 }
67 73
68 if (end) 74 if (end)
69 p->des1 |= cpu_to_le32(RDES1_END_RING); 75 p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7fbb6a4dbf51..e061e9f5fad7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -296,7 +296,7 @@ exit:
296} 296}
297 297
298static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 298static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
299 int mode, int end) 299 int mode, int end, int bfsize)
300{ 300{
301 dwmac4_set_rx_owner(p, disable_rx_ic); 301 dwmac4_set_rx_owner(p, disable_rx_ic);
302} 302}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 1d858fdec997..98fa471da7c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
123} 123}
124 124
125static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 125static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
126 int mode, int end) 126 int mode, int end, int bfsize)
127{ 127{
128 dwxgmac2_set_rx_owner(p, disable_rx_ic); 128 dwxgmac2_set_rx_owner(p, disable_rx_ic);
129} 129}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 5ef91a790f9d..5202d6ad7919 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
201 if (unlikely(rdes0 & RDES0_OWN)) 201 if (unlikely(rdes0 & RDES0_OWN))
202 return dma_own; 202 return dma_own;
203 203
204 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
205 stats->rx_length_errors++;
206 return discard_frame;
207 }
208
204 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { 209 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
205 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { 210 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
206 x->rx_desc++; 211 x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
231 * It doesn't match with the information reported into the databook. 236 * It doesn't match with the information reported into the databook.
232 * At any rate, we need to understand if the CSUM hw computation is ok 237 * At any rate, we need to understand if the CSUM hw computation is ok
233 * and report this info to the upper layers. */ 238 * and report this info to the upper layers. */
234 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), 239 if (likely(ret == good_frame))
235 !!(rdes0 & RDES0_FRAME_TYPE), 240 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
236 !!(rdes0 & ERDES0_RX_MAC_ADDR)); 241 !!(rdes0 & RDES0_FRAME_TYPE),
242 !!(rdes0 & ERDES0_RX_MAC_ADDR));
237 243
238 if (unlikely(rdes0 & RDES0_DRIBBLING)) 244 if (unlikely(rdes0 & RDES0_DRIBBLING))
239 x->dribbling_bit++; 245 x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
259} 265}
260 266
261static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 267static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
262 int mode, int end) 268 int mode, int end, int bfsize)
263{ 269{
270 int bfsize1;
271
264 p->des0 |= cpu_to_le32(RDES0_OWN); 272 p->des0 |= cpu_to_le32(RDES0_OWN);
265 p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); 273
274 bfsize1 = min(bfsize, BUF_SIZE_8KiB);
275 p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
266 276
267 if (mode == STMMAC_CHAIN_MODE) 277 if (mode == STMMAC_CHAIN_MODE)
268 ehn_desc_rx_set_on_chain(p); 278 ehn_desc_rx_set_on_chain(p);
269 else 279 else
270 ehn_desc_rx_set_on_ring(p, end); 280 ehn_desc_rx_set_on_ring(p, end, bfsize);
271 281
272 if (disable_rx_ic) 282 if (disable_rx_ic)
273 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); 283 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 92b8944f26e3..5bb00234d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -33,7 +33,7 @@ struct dma_extended_desc;
33struct stmmac_desc_ops { 33struct stmmac_desc_ops {
34 /* DMA RX descriptor ring initialization */ 34 /* DMA RX descriptor ring initialization */
35 void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, 35 void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
36 int end); 36 int end, int bfsize);
37 /* DMA TX descriptor ring initialization */ 37 /* DMA TX descriptor ring initialization */
38 void (*init_tx_desc)(struct dma_desc *p, int mode, int end); 38 void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
39 /* Invoked by the xmit function to prepare the tx descriptor */ 39 /* Invoked by the xmit function to prepare the tx descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index de65bb29feba..b7dd4e3c760d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
91 return dma_own; 91 return dma_own;
92 92
93 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { 93 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
94 pr_warn("%s: Oversized frame spanned multiple buffers\n",
95 __func__);
96 stats->rx_length_errors++; 94 stats->rx_length_errors++;
97 return discard_frame; 95 return discard_frame;
98 } 96 }
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
135} 133}
136 134
137static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, 135static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
138 int end) 136 int end, int bfsize)
139{ 137{
138 int bfsize1;
139
140 p->des0 |= cpu_to_le32(RDES0_OWN); 140 p->des0 |= cpu_to_le32(RDES0_OWN);
141 p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); 141
142 bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
143 p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
142 144
143 if (mode == STMMAC_CHAIN_MODE) 145 if (mode == STMMAC_CHAIN_MODE)
144 ndesc_rx_set_on_chain(p, end); 146 ndesc_rx_set_on_chain(p, end);
145 else 147 else
146 ndesc_rx_set_on_ring(p, end); 148 ndesc_rx_set_on_ring(p, end, bfsize);
147 149
148 if (disable_rx_ic) 150 if (disable_rx_ic)
149 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); 151 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6a2e1031a62a..a26e36dbb5df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1136 if (priv->extend_desc) 1136 if (priv->extend_desc)
1137 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1137 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1138 priv->use_riwt, priv->mode, 1138 priv->use_riwt, priv->mode,
1139 (i == DMA_RX_SIZE - 1)); 1139 (i == DMA_RX_SIZE - 1),
1140 priv->dma_buf_sz);
1140 else 1141 else
1141 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1142 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1142 priv->use_riwt, priv->mode, 1143 priv->use_riwt, priv->mode,
1143 (i == DMA_RX_SIZE - 1)); 1144 (i == DMA_RX_SIZE - 1),
1145 priv->dma_buf_sz);
1144} 1146}
1145 1147
1146/** 1148/**
@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3352{ 3354{
3353 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3355 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3354 struct stmmac_channel *ch = &priv->channel[queue]; 3356 struct stmmac_channel *ch = &priv->channel[queue];
3355 unsigned int entry = rx_q->cur_rx; 3357 unsigned int next_entry = rx_q->cur_rx;
3356 int coe = priv->hw->rx_csum; 3358 int coe = priv->hw->rx_csum;
3357 unsigned int next_entry;
3358 unsigned int count = 0; 3359 unsigned int count = 0;
3359 bool xmac; 3360 bool xmac;
3360 3361
@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3372 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); 3373 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3373 } 3374 }
3374 while (count < limit) { 3375 while (count < limit) {
3375 int status; 3376 int entry, status;
3376 struct dma_desc *p; 3377 struct dma_desc *p;
3377 struct dma_desc *np; 3378 struct dma_desc *np;
3378 3379
3380 entry = next_entry;
3381
3379 if (priv->extend_desc) 3382 if (priv->extend_desc)
3380 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3383 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3381 else 3384 else
@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3431 * ignored 3434 * ignored
3432 */ 3435 */
3433 if (frame_len > priv->dma_buf_sz) { 3436 if (frame_len > priv->dma_buf_sz) {
3434 netdev_err(priv->dev, 3437 if (net_ratelimit())
3435 "len %d larger than size (%d)\n", 3438 netdev_err(priv->dev,
3436 frame_len, priv->dma_buf_sz); 3439 "len %d larger than size (%d)\n",
3440 frame_len, priv->dma_buf_sz);
3437 priv->dev->stats.rx_length_errors++; 3441 priv->dev->stats.rx_length_errors++;
3438 break; 3442 continue;
3439 } 3443 }
3440 3444
3441 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3445 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3470 dev_warn(priv->device, 3474 dev_warn(priv->device,
3471 "packet dropped\n"); 3475 "packet dropped\n");
3472 priv->dev->stats.rx_dropped++; 3476 priv->dev->stats.rx_dropped++;
3473 break; 3477 continue;
3474 } 3478 }
3475 3479
3476 dma_sync_single_for_cpu(priv->device, 3480 dma_sync_single_for_cpu(priv->device,
@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3490 } else { 3494 } else {
3491 skb = rx_q->rx_skbuff[entry]; 3495 skb = rx_q->rx_skbuff[entry];
3492 if (unlikely(!skb)) { 3496 if (unlikely(!skb)) {
3493 netdev_err(priv->dev, 3497 if (net_ratelimit())
3494 "%s: Inconsistent Rx chain\n", 3498 netdev_err(priv->dev,
3495 priv->dev->name); 3499 "%s: Inconsistent Rx chain\n",
3500 priv->dev->name);
3496 priv->dev->stats.rx_dropped++; 3501 priv->dev->stats.rx_dropped++;
3497 break; 3502 continue;
3498 } 3503 }
3499 prefetch(skb->data - NET_IP_ALIGN); 3504 prefetch(skb->data - NET_IP_ALIGN);
3500 rx_q->rx_skbuff[entry] = NULL; 3505 rx_q->rx_skbuff[entry] = NULL;
@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3529 priv->dev->stats.rx_packets++; 3534 priv->dev->stats.rx_packets++;
3530 priv->dev->stats.rx_bytes += frame_len; 3535 priv->dev->stats.rx_bytes += frame_len;
3531 } 3536 }
3532 entry = next_entry;
3533 } 3537 }
3534 3538
3535 stmmac_rx_refill(priv, queue); 3539 stmmac_rx_refill(priv, queue);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e859ae2e42d5..49f41b64077b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -987,6 +987,7 @@ struct netvsc_device {
987 987
988 wait_queue_head_t wait_drain; 988 wait_queue_head_t wait_drain;
989 bool destroy; 989 bool destroy;
990 bool tx_disable; /* if true, do not wake up queue again */
990 991
991 /* Receive buffer allocated by us but manages by NetVSP */ 992 /* Receive buffer allocated by us but manages by NetVSP */
992 void *recv_buf; 993 void *recv_buf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 813d195bbd57..e0dce373cdd9 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
110 110
111 init_waitqueue_head(&net_device->wait_drain); 111 init_waitqueue_head(&net_device->wait_drain);
112 net_device->destroy = false; 112 net_device->destroy = false;
113 net_device->tx_disable = false;
113 114
114 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 115 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
115 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 116 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
719 } else { 720 } else {
720 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); 721 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
721 722
722 if (netif_tx_queue_stopped(txq) && 723 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
723 (hv_get_avail_to_write_percent(&channel->outbound) > 724 (hv_get_avail_to_write_percent(&channel->outbound) >
724 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { 725 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
725 netif_tx_wake_queue(txq); 726 netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
874 } else if (ret == -EAGAIN) { 875 } else if (ret == -EAGAIN) {
875 netif_tx_stop_queue(txq); 876 netif_tx_stop_queue(txq);
876 ndev_ctx->eth_stats.stop_queue++; 877 ndev_ctx->eth_stats.stop_queue++;
877 if (atomic_read(&nvchan->queue_sends) < 1) { 878 if (atomic_read(&nvchan->queue_sends) < 1 &&
879 !net_device->tx_disable) {
878 netif_tx_wake_queue(txq); 880 netif_tx_wake_queue(txq);
879 ndev_ctx->eth_stats.wake_queue++; 881 ndev_ctx->eth_stats.wake_queue++;
880 ret = -ENOSPC; 882 ret = -ENOSPC;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf4897043e83..b20fb0fb595b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
109 rcu_read_unlock(); 109 rcu_read_unlock();
110} 110}
111 111
112static void netvsc_tx_enable(struct netvsc_device *nvscdev,
113 struct net_device *ndev)
114{
115 nvscdev->tx_disable = false;
116 virt_wmb(); /* ensure queue wake up mechanism is on */
117
118 netif_tx_wake_all_queues(ndev);
119}
120
112static int netvsc_open(struct net_device *net) 121static int netvsc_open(struct net_device *net)
113{ 122{
114 struct net_device_context *ndev_ctx = netdev_priv(net); 123 struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
129 rdev = nvdev->extension; 138 rdev = nvdev->extension;
130 if (!rdev->link_state) { 139 if (!rdev->link_state) {
131 netif_carrier_on(net); 140 netif_carrier_on(net);
132 netif_tx_wake_all_queues(net); 141 netvsc_tx_enable(nvdev, net);
133 } 142 }
134 143
135 if (vf_netdev) { 144 if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
184 } 193 }
185} 194}
186 195
196static void netvsc_tx_disable(struct netvsc_device *nvscdev,
197 struct net_device *ndev)
198{
199 if (nvscdev) {
200 nvscdev->tx_disable = true;
201 virt_wmb(); /* ensure txq will not wake up after stop */
202 }
203
204 netif_tx_disable(ndev);
205}
206
187static int netvsc_close(struct net_device *net) 207static int netvsc_close(struct net_device *net)
188{ 208{
189 struct net_device_context *net_device_ctx = netdev_priv(net); 209 struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
192 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 212 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
193 int ret; 213 int ret;
194 214
195 netif_tx_disable(net); 215 netvsc_tx_disable(nvdev, net);
196 216
197 /* No need to close rndis filter if it is removed already */ 217 /* No need to close rndis filter if it is removed already */
198 if (!nvdev) 218 if (!nvdev)
@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
920 940
921 /* If device was up (receiving) then shutdown */ 941 /* If device was up (receiving) then shutdown */
922 if (netif_running(ndev)) { 942 if (netif_running(ndev)) {
923 netif_tx_disable(ndev); 943 netvsc_tx_disable(nvdev, ndev);
924 944
925 ret = rndis_filter_close(nvdev); 945 ret = rndis_filter_close(nvdev);
926 if (ret) { 946 if (ret) {
@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
1908 if (rdev->link_state) { 1928 if (rdev->link_state) {
1909 rdev->link_state = false; 1929 rdev->link_state = false;
1910 netif_carrier_on(net); 1930 netif_carrier_on(net);
1911 netif_tx_wake_all_queues(net); 1931 netvsc_tx_enable(net_device, net);
1912 } else { 1932 } else {
1913 notify = true; 1933 notify = true;
1914 } 1934 }
@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
1918 if (!rdev->link_state) { 1938 if (!rdev->link_state) {
1919 rdev->link_state = true; 1939 rdev->link_state = true;
1920 netif_carrier_off(net); 1940 netif_carrier_off(net);
1921 netif_tx_stop_all_queues(net); 1941 netvsc_tx_disable(net_device, net);
1922 } 1942 }
1923 kfree(event); 1943 kfree(event);
1924 break; 1944 break;
@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
1927 if (!rdev->link_state) { 1947 if (!rdev->link_state) {
1928 rdev->link_state = true; 1948 rdev->link_state = true;
1929 netif_carrier_off(net); 1949 netif_carrier_off(net);
1930 netif_tx_stop_all_queues(net); 1950 netvsc_tx_disable(net_device, net);
1931 event->event = RNDIS_STATUS_MEDIA_CONNECT; 1951 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1932 spin_lock_irqsave(&ndev_ctx->lock, flags); 1952 spin_lock_irqsave(&ndev_ctx->lock, flags);
1933 list_add(&event->list, &ndev_ctx->reconfig_events); 1953 list_add(&event->list, &ndev_ctx->reconfig_events);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 74bebbdb4b15..9195f3476b1d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
1203 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1203 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1204 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1204 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1205 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ 1205 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1206 {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1206 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ 1207 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1207 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1208 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1208 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1209 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7c1430ed0244..6d1a1abbed27 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,7 @@ static void vrf_setup(struct net_device *dev)
1273 1273
1274 /* default to no qdisc; user can add if desired */ 1274 /* default to no qdisc; user can add if desired */
1275 dev->priv_flags |= IFF_NO_QUEUE; 1275 dev->priv_flags |= IFF_NO_QUEUE;
1276 dev->priv_flags |= IFF_NO_RX_HANDLER;
1276 1277
1277 dev->min_mtu = 0; 1278 dev->min_mtu = 0;
1278 dev->max_mtu = 0; 1279 dev->max_mtu = 0;
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 1be571c20062..6bad04cbb1d3 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -157,8 +157,12 @@
157#define DBG_IRT(x...) 157#define DBG_IRT(x...)
158#endif 158#endif
159 159
160#ifdef CONFIG_64BIT
161#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa))
162#else
160#define COMPARE_IRTE_ADDR(irte, hpa) \ 163#define COMPARE_IRTE_ADDR(irte, hpa) \
161 ((irte)->dest_iosapic_addr == F_EXTEND(hpa)) 164 ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
165#endif
162 166
163#define IOSAPIC_REG_SELECT 0x00 167#define IOSAPIC_REG_SELECT 0x00
164#define IOSAPIC_REG_WINDOW 0x10 168#define IOSAPIC_REG_WINDOW 0x10
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a71734c41693..f933c06bff4f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -667,9 +667,9 @@ config RTC_DRV_S5M
667 will be called rtc-s5m. 667 will be called rtc-s5m.
668 668
669config RTC_DRV_SD3078 669config RTC_DRV_SD3078
670 tristate "ZXW Crystal SD3078" 670 tristate "ZXW Shenzhen whwave SD3078"
671 help 671 help
672 If you say yes here you get support for the ZXW Crystal 672 If you say yes here you get support for the ZXW Shenzhen whwave
673 SD3078 RTC chips. 673 SD3078 RTC chips.
674 674
675 This driver can also be built as a module. If so, the module 675 This driver can also be built as a module. If so, the module
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index e5444296075e..4d6bf9304ceb 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
298 struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); 298 struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
299 299
300 if (device_may_wakeup(dev)) 300 if (device_may_wakeup(dev))
301 enable_irq_wake(cros_ec_rtc->cros_ec->irq); 301 return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
302 302
303 return 0; 303 return 0;
304} 304}
@@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
309 struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); 309 struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
310 310
311 if (device_may_wakeup(dev)) 311 if (device_may_wakeup(dev))
312 disable_irq_wake(cros_ec_rtc->cros_ec->irq); 312 return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
313 313
314 return 0; 314 return 0;
315} 315}
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index b4e054c64bad..69b54e5556c0 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
480 da9063_data_to_tm(data, &rtc->alarm_time, rtc); 480 da9063_data_to_tm(data, &rtc->alarm_time, rtc);
481 rtc->rtc_sync = false; 481 rtc->rtc_sync = false;
482 482
483 /*
484 * TODO: some models have alarms on a minute boundary but still support
485 * real hardware interrupts. Add this once the core supports it.
486 */
487 if (config->rtc_data_start != RTC_SEC)
488 rtc->rtc_dev->uie_unsupported = 1;
489
483 irq_alarm = platform_get_irq_byname(pdev, "ALARM"); 490 irq_alarm = platform_get_irq_byname(pdev, "ALARM");
484 ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, 491 ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
485 da9063_alarm_event, 492 da9063_alarm_event,
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index d417b203cbc5..1d3de2a3d1a4 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
374static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) 374static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
375{ 375{
376 unsigned int byte; 376 unsigned int byte;
377 int value = 0xff; /* return 0xff for ignored values */ 377 int value = -1; /* return -1 for ignored values */
378 378
379 byte = readb(rtc->regbase + reg_off); 379 byte = readb(rtc->regbase + reg_off);
380 if (byte & AR_ENB) { 380 if (byte & AR_ENB) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c98f264f1d83..a497b2c0cb79 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3878 * wake up the thread. 3878 * wake up the thread.
3879 */ 3879 */
3880 spin_lock(&lpfc_cmd->buf_lock); 3880 spin_lock(&lpfc_cmd->buf_lock);
3881 if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) { 3881 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
3882 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; 3882 if (lpfc_cmd->waitq) {
3883 if (lpfc_cmd->waitq) 3883 wake_up(lpfc_cmd->waitq);
3884 wake_up(lpfc_cmd->waitq);
3885 lpfc_cmd->waitq = NULL; 3884 lpfc_cmd->waitq = NULL;
3886 } 3885 }
3887 spin_unlock(&lpfc_cmd->buf_lock); 3886 spin_unlock(&lpfc_cmd->buf_lock);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e74a62448ba4..e5db9a9954dc 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1392 1392
1393static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) 1393static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1394{ 1394{
1395 struct qedi_nvm_iscsi_image nvm_image;
1396
1397 qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, 1395 qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
1398 sizeof(nvm_image), 1396 sizeof(struct qedi_nvm_iscsi_image),
1399 &qedi->nvm_buf_dma, GFP_KERNEL); 1397 &qedi->nvm_buf_dma, GFP_KERNEL);
1400 if (!qedi->iscsi_image) { 1398 if (!qedi->iscsi_image) {
1401 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); 1399 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
@@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data)
2236static int qedi_get_boot_info(struct qedi_ctx *qedi) 2234static int qedi_get_boot_info(struct qedi_ctx *qedi)
2237{ 2235{
2238 int ret = 1; 2236 int ret = 1;
2239 struct qedi_nvm_iscsi_image nvm_image;
2240 2237
2241 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 2238 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2242 "Get NVM iSCSI CFG image\n"); 2239 "Get NVM iSCSI CFG image\n");
2243 ret = qedi_ops->common->nvm_get_image(qedi->cdev, 2240 ret = qedi_ops->common->nvm_get_image(qedi->cdev,
2244 QED_NVM_IMAGE_ISCSI_CFG, 2241 QED_NVM_IMAGE_ISCSI_CFG,
2245 (char *)qedi->iscsi_image, 2242 (char *)qedi->iscsi_image,
2246 sizeof(nvm_image)); 2243 sizeof(struct qedi_nvm_iscsi_image));
2247 if (ret) 2244 if (ret)
2248 QEDI_ERR(&qedi->dbg_ctx, 2245 QEDI_ERR(&qedi->dbg_ctx,
2249 "Could not get NVM image. ret = %d\n", ret); 2246 "Could not get NVM image. ret = %d\n", ret);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c4cbfd07b916..a08ff3bd6310 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -238,6 +238,7 @@ static struct {
238 {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 238 {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
239 {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 239 {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
240 {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 240 {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
241 {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
241 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, 242 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
242 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, 243 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
243 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ 244 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 5a58cbf3a75d..c14006ac98f9 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
75 {"NETAPP", "INF-01-00", "rdac", }, 75 {"NETAPP", "INF-01-00", "rdac", },
76 {"LSI", "INF-01-00", "rdac", }, 76 {"LSI", "INF-01-00", "rdac", },
77 {"ENGENIO", "INF-01-00", "rdac", }, 77 {"ENGENIO", "INF-01-00", "rdac", },
78 {"LENOVO", "DE_Series", "rdac", },
78 {NULL, NULL, NULL }, 79 {NULL, NULL, NULL },
79}; 80};
80 81
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 84380bae20f1..8472de1007ff 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -385,7 +385,7 @@ enum storvsc_request_type {
385 * This is the end of Protocol specific defines. 385 * This is the end of Protocol specific defines.
386 */ 386 */
387 387
388static int storvsc_ringbuffer_size = (256 * PAGE_SIZE); 388static int storvsc_ringbuffer_size = (128 * 1024);
389static u32 max_outstanding_req_per_channel; 389static u32 max_outstanding_req_per_channel;
390 390
391static int storvsc_vcpus_per_sub_channel = 4; 391static int storvsc_vcpus_per_sub_channel = 4;
@@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
668{ 668{
669 struct device *dev = &device->device; 669 struct device *dev = &device->device;
670 struct storvsc_device *stor_device; 670 struct storvsc_device *stor_device;
671 int num_cpus = num_online_cpus();
672 int num_sc; 671 int num_sc;
673 struct storvsc_cmd_request *request; 672 struct storvsc_cmd_request *request;
674 struct vstor_packet *vstor_packet; 673 struct vstor_packet *vstor_packet;
675 int ret, t; 674 int ret, t;
676 675
677 num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); 676 /*
677 * If the number of CPUs is artificially restricted, such as
678 * with maxcpus=1 on the kernel boot line, Hyper-V could offer
679 * sub-channels >= the number of CPUs. These sub-channels
680 * should not be created. The primary channel is already created
681 * and assigned to one CPU, so check against # CPUs - 1.
682 */
683 num_sc = min((int)(num_online_cpus() - 1), max_chns);
684 if (!num_sc)
685 return;
686
678 stor_device = get_out_stor_device(device); 687 stor_device = get_out_stor_device(device);
679 if (!stor_device) 688 if (!stor_device)
680 return; 689 return;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index a25659b5a5d1..3fa20e95a6bb 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void)
1661 rc = pci_add_dynid(&vfio_pci_driver, vendor, device, 1661 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1662 subvendor, subdevice, class, class_mask, 0); 1662 subvendor, subdevice, class, class_mask, 0);
1663 if (rc) 1663 if (rc)
1664 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n", 1664 pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
1665 vendor, device, subvendor, subdevice, 1665 vendor, device, subvendor, subdevice,
1666 class, class_mask, rc); 1666 class, class_mask, rc);
1667 else 1667 else
1668 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n", 1668 pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
1669 vendor, device, subvendor, subdevice, 1669 vendor, device, subvendor, subdevice,
1670 class, class_mask); 1670 class, class_mask);
1671 } 1671 }
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 8dbb270998f4..6b64e45a5269 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1398,7 +1398,7 @@ unlock_exit:
1398 mutex_unlock(&container->lock); 1398 mutex_unlock(&container->lock);
1399} 1399}
1400 1400
1401const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { 1401static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1402 .name = "iommu-vfio-powerpc", 1402 .name = "iommu-vfio-powerpc",
1403 .owner = THIS_MODULE, 1403 .owner = THIS_MODULE,
1404 .open = tce_iommu_open, 1404 .open = tce_iommu_open,
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 73652e21efec..d0f731c9920a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
58MODULE_PARM_DESC(disable_hugepages, 58MODULE_PARM_DESC(disable_hugepages,
59 "Disable VFIO IOMMU support for IOMMU hugepages."); 59 "Disable VFIO IOMMU support for IOMMU hugepages.");
60 60
61static unsigned int dma_entry_limit __read_mostly = U16_MAX;
62module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
63MODULE_PARM_DESC(dma_entry_limit,
64 "Maximum number of user DMA mappings per container (65535).");
65
61struct vfio_iommu { 66struct vfio_iommu {
62 struct list_head domain_list; 67 struct list_head domain_list;
63 struct vfio_domain *external_domain; /* domain for external user */ 68 struct vfio_domain *external_domain; /* domain for external user */
64 struct mutex lock; 69 struct mutex lock;
65 struct rb_root dma_list; 70 struct rb_root dma_list;
66 struct blocking_notifier_head notifier; 71 struct blocking_notifier_head notifier;
72 unsigned int dma_avail;
67 bool v2; 73 bool v2;
68 bool nesting; 74 bool nesting;
69}; 75};
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
836 vfio_unlink_dma(iommu, dma); 842 vfio_unlink_dma(iommu, dma);
837 put_task_struct(dma->task); 843 put_task_struct(dma->task);
838 kfree(dma); 844 kfree(dma);
845 iommu->dma_avail++;
839} 846}
840 847
841static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) 848static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
1081 goto out_unlock; 1088 goto out_unlock;
1082 } 1089 }
1083 1090
1091 if (!iommu->dma_avail) {
1092 ret = -ENOSPC;
1093 goto out_unlock;
1094 }
1095
1084 dma = kzalloc(sizeof(*dma), GFP_KERNEL); 1096 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
1085 if (!dma) { 1097 if (!dma) {
1086 ret = -ENOMEM; 1098 ret = -ENOMEM;
1087 goto out_unlock; 1099 goto out_unlock;
1088 } 1100 }
1089 1101
1102 iommu->dma_avail--;
1090 dma->iova = iova; 1103 dma->iova = iova;
1091 dma->vaddr = vaddr; 1104 dma->vaddr = vaddr;
1092 dma->prot = prot; 1105 dma->prot = prot;
@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
1583 1596
1584 INIT_LIST_HEAD(&iommu->domain_list); 1597 INIT_LIST_HEAD(&iommu->domain_list);
1585 iommu->dma_list = RB_ROOT; 1598 iommu->dma_list = RB_ROOT;
1599 iommu->dma_avail = dma_entry_limit;
1586 mutex_init(&iommu->lock); 1600 mutex_init(&iommu->lock);
1587 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); 1601 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
1588 1602
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
index de01a6d0059d..a1c61e351d3f 100644
--- a/drivers/xen/privcmd-buf.c
+++ b/drivers/xen/privcmd-buf.c
@@ -140,8 +140,7 @@ static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
140 if (!(vma->vm_flags & VM_SHARED)) 140 if (!(vma->vm_flags & VM_SHARED))
141 return -EINVAL; 141 return -EINVAL;
142 142
143 vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *), 143 vma_priv = kzalloc(struct_size(vma_priv, pages, count), GFP_KERNEL);
144 GFP_KERNEL);
145 if (!vma_priv) 144 if (!vma_priv)
146 return -ENOMEM; 145 return -ENOMEM;
147 146
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c3e201025ef0..0782ff3c2273 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
622 if (xen_store_evtchn == 0) 622 if (xen_store_evtchn == 0)
623 return -ENOENT; 623 return -ENOENT;
624 624
625 nonseekable_open(inode, filp); 625 stream_open(inode, filp);
626
627 filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
628 626
629 u = kzalloc(sizeof(*u), GFP_KERNEL); 627 u = kzalloc(sizeof(*u), GFP_KERNEL);
630 if (u == NULL) 628 if (u == NULL)