aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-core.c14
-rw-r--r--drivers/block/floppy.c3
-rw-r--r--drivers/block/xen-blkfront.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/clk/x86/clk-pmc-atom.c18
-rw-r--r--drivers/clocksource/timer-atmel-pit.c20
-rw-r--r--drivers/clocksource/timer-fttmr010.c18
-rw-r--r--drivers/clocksource/timer-ti-32k.c3
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c4
-rw-r--r--drivers/crypto/ccp/psp-dev.c46
-rw-r--r--drivers/dax/device.c6
-rw-r--r--drivers/firmware/efi/Kconfig9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c139
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c12
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c25
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h3
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c25
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h2
-rw-r--r--drivers/gpu/drm/drm_atomic.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c3
-rw-r--r--drivers/gpu/drm/drm_panel.c10
-rw-r--r--drivers/gpu/drm/drm_syncobj.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c28
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c1
-rw-r--r--drivers/gpu/drm/pl111/pl111_vexpress.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c24
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c1
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c8
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c42
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c24
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c27
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c1
-rw-r--r--drivers/hwmon/nct6775.c72
-rw-r--r--drivers/hwtracing/intel_th/core.c16
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/infiniband/core/cache.c68
-rw-r--r--drivers/infiniband/core/ucma.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c68
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/core/uverbs_uapi.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c93
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c6
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c51
-rw-r--r--drivers/infiniband/hw/hfi1/pio.h2
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c2
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c8
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c5
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c6
-rw-r--r--drivers/input/keyboard/atakbd.c74
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mouse/elantech.c2
-rw-r--r--drivers/input/touchscreen/egalax_ts.c6
-rw-r--r--drivers/iommu/amd_iommu.c6
-rw-r--r--drivers/iommu/intel-iommu.c6
-rw-r--r--drivers/iommu/intel-pasid.h2
-rw-r--r--drivers/iommu/rockchip-iommu.c6
-rw-r--r--drivers/md/bcache/bcache.h1
-rw-r--r--drivers/md/bcache/journal.c6
-rw-r--r--drivers/md/bcache/super.c8
-rw-r--r--drivers/media/i2c/mt9v111.c41
-rw-r--r--drivers/media/platform/Kconfig2
-rw-r--r--drivers/media/platform/qcom/camss/camss-csid.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-csiphy.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-ispif.c5
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c1
-rw-r--r--drivers/media/platform/qcom/camss/camss.c15
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-event.c38
-rw-r--r--drivers/media/v4l2-core/v4l2-fh.c2
-rw-r--r--drivers/mfd/omap-usb-host.c11
-rw-r--r--drivers/mmc/core/host.c2
-rw-r--r--drivers/mmc/core/slot-gpio.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_sys_dmac.c3
-rw-r--r--drivers/mtd/devices/m25p80.c26
-rw-r--r--drivers/mtd/mtdpart.c5
-rw-r--r--drivers/mtd/nand/raw/denali.c6
-rw-r--r--drivers/mtd/nand/raw/marvell_nand.c4
-rw-r--r--drivers/net/appletalk/ipddp.c8
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c4
-rw-r--r--drivers/net/ethernet/apple/mace.c4
-rw-r--r--drivers/net/ethernet/apple/macmace.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c32
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c27
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c20
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h1
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c4
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c5
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c15
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c22
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c26
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c27
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c30
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c25
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c21
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c13
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c6
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c12
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c18
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c45
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h6
-rw-r--r--drivers/net/ethernet/realtek/r8169.c49
-rw-r--r--drivers/net/ethernet/renesas/ravb.h5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c11
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c2
-rw-r--r--drivers/net/ethernet/seeq/ether3.c5
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c3
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c238
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/hyperv/netvsc.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c67
-rw-r--r--drivers/net/phy/sfp-bus.c4
-rw-r--r--drivers/net/ppp/pppoe.c3
-rw-r--r--drivers/net/tun.c43
-rw-r--r--drivers/net/usb/qmi_wwan.c14
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/nvme/host/multipath.c6
-rw-r--r--drivers/nvme/target/admin-cmd.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.h3
-rw-r--r--drivers/pci/controller/pci-hyperv.c39
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c11
-rw-r--r--drivers/pinctrl/intel/pinctrl-cannonlake.c35
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c143
-rw-r--r--drivers/pinctrl/pinctrl-amd.c33
-rw-r--r--drivers/platform/x86/alienware-wmi.c1
-rw-r--r--drivers/platform/x86/dell-smbios-wmi.c1
-rw-r--r--drivers/regulator/bd71837-regulator.c19
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/of_regulator.c2
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c5
-rw-r--r--drivers/scsi/ipr.c106
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c15
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/ufs/ufshcd.c7
-rw-r--r--drivers/soc/fsl/qbman/qman.c3
-rw-r--r--drivers/soc/fsl/qe/ucc.c2
-rw-r--r--drivers/soundwire/stream.c23
-rw-r--r--drivers/spi/spi-fsl-dspi.c6
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-rspi.c34
-rw-r--r--drivers/spi/spi-sh-msiof.c28
-rw-r--r--drivers/spi/spi-tegra20-slink.c31
-rw-r--r--drivers/spi/spi.c13
-rw-r--r--drivers/staging/media/mt9t031/Kconfig6
-rw-r--r--drivers/target/iscsi/iscsi_target.c22
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c45
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c10
-rw-r--r--drivers/tty/serial/fsl_lpuart.c3
-rw-r--r--drivers/tty/serial/imx.c8
-rw-r--r--drivers/tty/serial/mvebu-uart.c1
-rw-r--r--drivers/tty/tty_io.c11
-rw-r--r--drivers/tty/vt/vt_ioctl.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/common/roles.c15
-rw-r--r--drivers/usb/core/devio.c24
-rw-r--r--drivers/usb/core/driver.c28
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c12
-rw-r--r--drivers/usb/typec/mux.c17
-rw-r--r--drivers/video/fbdev/efifb.c6
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c5
-rw-r--r--drivers/video/fbdev/pxa168fb.c6
-rw-r--r--drivers/video/fbdev/stifb.c2
-rw-r--r--drivers/xen/grant-table.c27
237 files changed, 1973 insertions, 1411 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 599e01bcdef2..a9dd4ea7467d 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -5359,10 +5359,20 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
5359 */ 5359 */
5360int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) 5360int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
5361{ 5361{
5362 u64 done_mask, ap_qc_active = ap->qc_active;
5362 int nr_done = 0; 5363 int nr_done = 0;
5363 u64 done_mask;
5364 5364
5365 done_mask = ap->qc_active ^ qc_active; 5365 /*
5366 * If the internal tag is set on ap->qc_active, then we care about
5367 * bit0 on the passed in qc_active mask. Move that bit up to match
5368 * the internal tag.
5369 */
5370 if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) {
5371 qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL;
5372 qc_active ^= qc_active & 0x01;
5373 }
5374
5375 done_mask = ap_qc_active ^ qc_active;
5366 5376
5367 if (unlikely(done_mask & qc_active)) { 5377 if (unlikely(done_mask & qc_active)) {
5368 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", 5378 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 48f622728ce6..f2b6f4da1034 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3467,6 +3467,9 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3467 (struct floppy_struct **)&outparam); 3467 (struct floppy_struct **)&outparam);
3468 if (ret) 3468 if (ret)
3469 return ret; 3469 return ret;
3470 memcpy(&inparam.g, outparam,
3471 offsetof(struct floppy_struct, name));
3472 outparam = &inparam.g;
3470 break; 3473 break;
3471 case FDMSGON: 3474 case FDMSGON:
3472 UDP->flags |= FTD_MSG; 3475 UDP->flags |= FTD_MSG;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index a71d817e900d..429d20131c7e 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info)
2670 list_del(&gnt_list_entry->node); 2670 list_del(&gnt_list_entry->node);
2671 gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL); 2671 gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
2672 rinfo->persistent_gnts_c--; 2672 rinfo->persistent_gnts_c--;
2673 __free_page(gnt_list_entry->page); 2673 gnt_list_entry->gref = GRANT_INVALID_REF;
2674 kfree(gnt_list_entry); 2674 list_add_tail(&gnt_list_entry->node, &rinfo->grants);
2675 } 2675 }
2676 2676
2677 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 2677 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 963bb0309e25..ea6238ed5c0e 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -543,6 +543,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
543 } 543 }
544 clear_bit(HCI_UART_PROTO_SET, &hu->flags); 544 clear_bit(HCI_UART_PROTO_SET, &hu->flags);
545 545
546 percpu_free_rwsem(&hu->proto_lock);
547
546 kfree(hu); 548 kfree(hu);
547} 549}
548 550
diff --git a/drivers/clk/x86/clk-pmc-atom.c b/drivers/clk/x86/clk-pmc-atom.c
index 08ef69945ffb..d977193842df 100644
--- a/drivers/clk/x86/clk-pmc-atom.c
+++ b/drivers/clk/x86/clk-pmc-atom.c
@@ -55,6 +55,7 @@ struct clk_plt_data {
55 u8 nparents; 55 u8 nparents;
56 struct clk_plt *clks[PMC_CLK_NUM]; 56 struct clk_plt *clks[PMC_CLK_NUM];
57 struct clk_lookup *mclk_lookup; 57 struct clk_lookup *mclk_lookup;
58 struct clk_lookup *ether_clk_lookup;
58}; 59};
59 60
60/* Return an index in parent table */ 61/* Return an index in parent table */
@@ -186,13 +187,6 @@ static struct clk_plt *plt_clk_register(struct platform_device *pdev, int id,
186 pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE; 187 pclk->reg = base + PMC_CLK_CTL_OFFSET + id * PMC_CLK_CTL_SIZE;
187 spin_lock_init(&pclk->lock); 188 spin_lock_init(&pclk->lock);
188 189
189 /*
190 * If the clock was already enabled by the firmware mark it as critical
191 * to avoid it being gated by the clock framework if no driver owns it.
192 */
193 if (plt_clk_is_enabled(&pclk->hw))
194 init.flags |= CLK_IS_CRITICAL;
195
196 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw); 190 ret = devm_clk_hw_register(&pdev->dev, &pclk->hw);
197 if (ret) { 191 if (ret) {
198 pclk = ERR_PTR(ret); 192 pclk = ERR_PTR(ret);
@@ -351,11 +345,20 @@ static int plt_clk_probe(struct platform_device *pdev)
351 goto err_unreg_clk_plt; 345 goto err_unreg_clk_plt;
352 } 346 }
353 347
348 data->ether_clk_lookup = clkdev_hw_create(&data->clks[4]->hw,
349 "ether_clk", NULL);
350 if (!data->ether_clk_lookup) {
351 err = -ENOMEM;
352 goto err_drop_mclk;
353 }
354
354 plt_clk_free_parent_names_loop(parent_names, data->nparents); 355 plt_clk_free_parent_names_loop(parent_names, data->nparents);
355 356
356 platform_set_drvdata(pdev, data); 357 platform_set_drvdata(pdev, data);
357 return 0; 358 return 0;
358 359
360err_drop_mclk:
361 clkdev_drop(data->mclk_lookup);
359err_unreg_clk_plt: 362err_unreg_clk_plt:
360 plt_clk_unregister_loop(data, i); 363 plt_clk_unregister_loop(data, i);
361 plt_clk_unregister_parents(data); 364 plt_clk_unregister_parents(data);
@@ -369,6 +372,7 @@ static int plt_clk_remove(struct platform_device *pdev)
369 372
370 data = platform_get_drvdata(pdev); 373 data = platform_get_drvdata(pdev);
371 374
375 clkdev_drop(data->ether_clk_lookup);
372 clkdev_drop(data->mclk_lookup); 376 clkdev_drop(data->mclk_lookup);
373 plt_clk_unregister_loop(data, PMC_CLK_NUM); 377 plt_clk_unregister_loop(data, PMC_CLK_NUM);
374 plt_clk_unregister_parents(data); 378 plt_clk_unregister_parents(data);
diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
index ec8a4376f74f..2fab18fae4fc 100644
--- a/drivers/clocksource/timer-atmel-pit.c
+++ b/drivers/clocksource/timer-atmel-pit.c
@@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
180 data->base = of_iomap(node, 0); 180 data->base = of_iomap(node, 0);
181 if (!data->base) { 181 if (!data->base) {
182 pr_err("Could not map PIT address\n"); 182 pr_err("Could not map PIT address\n");
183 return -ENXIO; 183 ret = -ENXIO;
184 goto exit;
184 } 185 }
185 186
186 data->mck = of_clk_get(node, 0); 187 data->mck = of_clk_get(node, 0);
187 if (IS_ERR(data->mck)) { 188 if (IS_ERR(data->mck)) {
188 pr_err("Unable to get mck clk\n"); 189 pr_err("Unable to get mck clk\n");
189 return PTR_ERR(data->mck); 190 ret = PTR_ERR(data->mck);
191 goto exit;
190 } 192 }
191 193
192 ret = clk_prepare_enable(data->mck); 194 ret = clk_prepare_enable(data->mck);
193 if (ret) { 195 if (ret) {
194 pr_err("Unable to enable mck\n"); 196 pr_err("Unable to enable mck\n");
195 return ret; 197 goto exit;
196 } 198 }
197 199
198 /* Get the interrupts property */ 200 /* Get the interrupts property */
199 data->irq = irq_of_parse_and_map(node, 0); 201 data->irq = irq_of_parse_and_map(node, 0);
200 if (!data->irq) { 202 if (!data->irq) {
201 pr_err("Unable to get IRQ from DT\n"); 203 pr_err("Unable to get IRQ from DT\n");
202 return -EINVAL; 204 ret = -EINVAL;
205 goto exit;
203 } 206 }
204 207
205 /* 208 /*
@@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
227 ret = clocksource_register_hz(&data->clksrc, pit_rate); 230 ret = clocksource_register_hz(&data->clksrc, pit_rate);
228 if (ret) { 231 if (ret) {
229 pr_err("Failed to register clocksource\n"); 232 pr_err("Failed to register clocksource\n");
230 return ret; 233 goto exit;
231 } 234 }
232 235
233 /* Set up irq handler */ 236 /* Set up irq handler */
@@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
236 "at91_tick", data); 239 "at91_tick", data);
237 if (ret) { 240 if (ret) {
238 pr_err("Unable to setup IRQ\n"); 241 pr_err("Unable to setup IRQ\n");
239 return ret; 242 clocksource_unregister(&data->clksrc);
243 goto exit;
240 } 244 }
241 245
242 /* Set up and register clockevents */ 246 /* Set up and register clockevents */
@@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
254 clockevents_register_device(&data->clkevt); 258 clockevents_register_device(&data->clkevt);
255 259
256 return 0; 260 return 0;
261
262exit:
263 kfree(data);
264 return ret;
257} 265}
258TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit", 266TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
259 at91sam926x_pit_dt_init); 267 at91sam926x_pit_dt_init);
diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c
index c020038ebfab..cf93f6419b51 100644
--- a/drivers/clocksource/timer-fttmr010.c
+++ b/drivers/clocksource/timer-fttmr010.c
@@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
130 cr &= ~fttmr010->t1_enable_val; 130 cr &= ~fttmr010->t1_enable_val;
131 writel(cr, fttmr010->base + TIMER_CR); 131 writel(cr, fttmr010->base + TIMER_CR);
132 132
133 /* Setup the match register forward/backward in time */ 133 if (fttmr010->count_down) {
134 cr = readl(fttmr010->base + TIMER1_COUNT); 134 /*
135 if (fttmr010->count_down) 135 * ASPEED Timer Controller will load TIMER1_LOAD register
136 cr -= cycles; 136 * into TIMER1_COUNT register when the timer is re-enabled.
137 else 137 */
138 cr += cycles; 138 writel(cycles, fttmr010->base + TIMER1_LOAD);
139 writel(cr, fttmr010->base + TIMER1_MATCH1); 139 } else {
140 /* Setup the match register forward in time */
141 cr = readl(fttmr010->base + TIMER1_COUNT);
142 writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
143 }
140 144
141 /* Start */ 145 /* Start */
142 cr = readl(fttmr010->base + TIMER_CR); 146 cr = readl(fttmr010->base + TIMER_CR);
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 29e2e1a78a43..6949a9113dbb 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
97 return -ENXIO; 97 return -ENXIO;
98 } 98 }
99 99
100 if (!of_machine_is_compatible("ti,am43"))
101 ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
102
100 ti_32k_timer.counter = ti_32k_timer.base; 103 ti_32k_timer.counter = ti_32k_timer.base;
101 104
102 /* 105 /*
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index a1830fa25fc5..2a3675c24032 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -44,7 +44,7 @@ enum _msm8996_version {
44 44
45struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev; 45struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
46 46
47static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void) 47static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
48{ 48{
49 size_t len; 49 size_t len;
50 u32 *msm_id; 50 u32 *msm_id;
@@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
222} 222}
223module_init(qcom_cpufreq_kryo_init); 223module_init(qcom_cpufreq_kryo_init);
224 224
225static void __init qcom_cpufreq_kryo_exit(void) 225static void __exit qcom_cpufreq_kryo_exit(void)
226{ 226{
227 platform_device_unregister(kryo_cpufreq_pdev); 227 platform_device_unregister(kryo_cpufreq_pdev);
228 platform_driver_unregister(&qcom_cpufreq_kryo_driver); 228 platform_driver_unregister(&qcom_cpufreq_kryo_driver);
diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c
index 218739b961fe..72790d88236d 100644
--- a/drivers/crypto/ccp/psp-dev.c
+++ b/drivers/crypto/ccp/psp-dev.c
@@ -38,6 +38,17 @@ static DEFINE_MUTEX(sev_cmd_mutex);
38static struct sev_misc_dev *misc_dev; 38static struct sev_misc_dev *misc_dev;
39static struct psp_device *psp_master; 39static struct psp_device *psp_master;
40 40
41static int psp_cmd_timeout = 100;
42module_param(psp_cmd_timeout, int, 0644);
43MODULE_PARM_DESC(psp_cmd_timeout, " default timeout value, in seconds, for PSP commands");
44
45static int psp_probe_timeout = 5;
46module_param(psp_probe_timeout, int, 0644);
47MODULE_PARM_DESC(psp_probe_timeout, " default timeout value, in seconds, during PSP device probe");
48
49static bool psp_dead;
50static int psp_timeout;
51
41static struct psp_device *psp_alloc_struct(struct sp_device *sp) 52static struct psp_device *psp_alloc_struct(struct sp_device *sp)
42{ 53{
43 struct device *dev = sp->dev; 54 struct device *dev = sp->dev;
@@ -82,10 +93,19 @@ done:
82 return IRQ_HANDLED; 93 return IRQ_HANDLED;
83} 94}
84 95
85static void sev_wait_cmd_ioc(struct psp_device *psp, unsigned int *reg) 96static int sev_wait_cmd_ioc(struct psp_device *psp,
97 unsigned int *reg, unsigned int timeout)
86{ 98{
87 wait_event(psp->sev_int_queue, psp->sev_int_rcvd); 99 int ret;
100
101 ret = wait_event_timeout(psp->sev_int_queue,
102 psp->sev_int_rcvd, timeout * HZ);
103 if (!ret)
104 return -ETIMEDOUT;
105
88 *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg); 106 *reg = ioread32(psp->io_regs + psp->vdata->cmdresp_reg);
107
108 return 0;
89} 109}
90 110
91static int sev_cmd_buffer_len(int cmd) 111static int sev_cmd_buffer_len(int cmd)
@@ -133,12 +153,15 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
133 if (!psp) 153 if (!psp)
134 return -ENODEV; 154 return -ENODEV;
135 155
156 if (psp_dead)
157 return -EBUSY;
158
136 /* Get the physical address of the command buffer */ 159 /* Get the physical address of the command buffer */
137 phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; 160 phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0;
138 phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; 161 phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0;
139 162
140 dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x\n", 163 dev_dbg(psp->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n",
141 cmd, phys_msb, phys_lsb); 164 cmd, phys_msb, phys_lsb, psp_timeout);
142 165
143 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, 166 print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data,
144 sev_cmd_buffer_len(cmd), false); 167 sev_cmd_buffer_len(cmd), false);
@@ -154,7 +177,18 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret)
154 iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg); 177 iowrite32(reg, psp->io_regs + psp->vdata->cmdresp_reg);
155 178
156 /* wait for command completion */ 179 /* wait for command completion */
157 sev_wait_cmd_ioc(psp, &reg); 180 ret = sev_wait_cmd_ioc(psp, &reg, psp_timeout);
181 if (ret) {
182 if (psp_ret)
183 *psp_ret = 0;
184
185 dev_err(psp->dev, "sev command %#x timed out, disabling PSP \n", cmd);
186 psp_dead = true;
187
188 return ret;
189 }
190
191 psp_timeout = psp_cmd_timeout;
158 192
159 if (psp_ret) 193 if (psp_ret)
160 *psp_ret = reg & PSP_CMDRESP_ERR_MASK; 194 *psp_ret = reg & PSP_CMDRESP_ERR_MASK;
@@ -888,6 +922,8 @@ void psp_pci_init(void)
888 922
889 psp_master = sp->psp_data; 923 psp_master = sp->psp_data;
890 924
925 psp_timeout = psp_probe_timeout;
926
891 if (sev_get_api_version()) 927 if (sev_get_api_version())
892 goto err; 928 goto err;
893 929
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index bbe4d72ca105..948806e57cee 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
535 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); 535 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
536} 536}
537 537
538static const struct address_space_operations dev_dax_aops = {
539 .set_page_dirty = noop_set_page_dirty,
540 .invalidatepage = noop_invalidatepage,
541};
542
538static int dax_open(struct inode *inode, struct file *filp) 543static int dax_open(struct inode *inode, struct file *filp)
539{ 544{
540 struct dax_device *dax_dev = inode_dax(inode); 545 struct dax_device *dax_dev = inode_dax(inode);
@@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp)
544 dev_dbg(&dev_dax->dev, "trace\n"); 549 dev_dbg(&dev_dax->dev, "trace\n");
545 inode->i_mapping = __dax_inode->i_mapping; 550 inode->i_mapping = __dax_inode->i_mapping;
546 inode->i_mapping->host = __dax_inode; 551 inode->i_mapping->host = __dax_inode;
552 inode->i_mapping->a_ops = &dev_dax_aops;
547 filp->f_mapping = inode->i_mapping; 553 filp->f_mapping = inode->i_mapping;
548 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); 554 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
549 filp->private_data = dev_dax; 555 filp->private_data = dev_dax;
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index d8e159feb573..89110dfc7127 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -90,14 +90,17 @@ config EFI_ARMSTUB
90config EFI_ARMSTUB_DTB_LOADER 90config EFI_ARMSTUB_DTB_LOADER
91 bool "Enable the DTB loader" 91 bool "Enable the DTB loader"
92 depends on EFI_ARMSTUB 92 depends on EFI_ARMSTUB
93 default y
93 help 94 help
94 Select this config option to add support for the dtb= command 95 Select this config option to add support for the dtb= command
95 line parameter, allowing a device tree blob to be loaded into 96 line parameter, allowing a device tree blob to be loaded into
96 memory from the EFI System Partition by the stub. 97 memory from the EFI System Partition by the stub.
97 98
98 The device tree is typically provided by the platform or by 99 If the device tree is provided by the platform or by
99 the bootloader, so this option is mostly for development 100 the bootloader this option may not be needed.
100 purposes only. 101 But, for various development reasons and to maintain existing
102 functionality for bootloaders that do not have such support
103 this option is necessary.
101 104
102config EFI_BOOTLOADER_CONTROL 105config EFI_BOOTLOADER_CONTROL
103 tristate "EFI Bootloader Control" 106 tristate "EFI Bootloader Control"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index f8bbbb3a9504..0c791e35acf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -272,7 +272,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
272 272
273int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 273int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
274 void **mem_obj, uint64_t *gpu_addr, 274 void **mem_obj, uint64_t *gpu_addr,
275 void **cpu_ptr) 275 void **cpu_ptr, bool mqd_gfx9)
276{ 276{
277 struct amdgpu_device *adev = (struct amdgpu_device *)kgd; 277 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
278 struct amdgpu_bo *bo = NULL; 278 struct amdgpu_bo *bo = NULL;
@@ -287,6 +287,10 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
287 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; 287 bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
288 bp.type = ttm_bo_type_kernel; 288 bp.type = ttm_bo_type_kernel;
289 bp.resv = NULL; 289 bp.resv = NULL;
290
291 if (mqd_gfx9)
292 bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
293
290 r = amdgpu_bo_create(adev, &bp, &bo); 294 r = amdgpu_bo_create(adev, &bp, &bo);
291 if (r) { 295 if (r) {
292 dev_err(adev->dev, 296 dev_err(adev->dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 2f379c183ed2..cc9aeab5468c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -136,7 +136,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
136/* Shared API */ 136/* Shared API */
137int alloc_gtt_mem(struct kgd_dev *kgd, size_t size, 137int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
138 void **mem_obj, uint64_t *gpu_addr, 138 void **mem_obj, uint64_t *gpu_addr,
139 void **cpu_ptr); 139 void **cpu_ptr, bool mqd_gfx9);
140void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj); 140void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj);
141void get_local_mem_info(struct kgd_dev *kgd, 141void get_local_mem_info(struct kgd_dev *kgd,
142 struct kfd_local_mem_info *mem_info); 142 struct kfd_local_mem_info *mem_info);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ea3f698aef5e..9803b91f3e77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -685,7 +685,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
685 685
686 while (true) { 686 while (true) {
687 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); 687 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
688 if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT) 688 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
689 break; 689 break;
690 if (time_after(jiffies, end_jiffies)) 690 if (time_after(jiffies, end_jiffies))
691 return -ETIME; 691 return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 693ec5ea4950..8816c697b205 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -367,12 +367,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
367 break; 367 break;
368 case CHIP_POLARIS10: 368 case CHIP_POLARIS10:
369 if (type == CGS_UCODE_ID_SMU) { 369 if (type == CGS_UCODE_ID_SMU) {
370 if ((adev->pdev->device == 0x67df) && 370 if (((adev->pdev->device == 0x67df) &&
371 ((adev->pdev->revision == 0xe0) || 371 ((adev->pdev->revision == 0xe0) ||
372 (adev->pdev->revision == 0xe3) || 372 (adev->pdev->revision == 0xe3) ||
373 (adev->pdev->revision == 0xe4) || 373 (adev->pdev->revision == 0xe4) ||
374 (adev->pdev->revision == 0xe5) || 374 (adev->pdev->revision == 0xe5) ||
375 (adev->pdev->revision == 0xe7) || 375 (adev->pdev->revision == 0xe7) ||
376 (adev->pdev->revision == 0xef))) ||
377 ((adev->pdev->device == 0x6fdf) &&
376 (adev->pdev->revision == 0xef))) { 378 (adev->pdev->revision == 0xef))) {
377 info->is_kicker = true; 379 info->is_kicker = true;
378 strcpy(fw_name, "amdgpu/polaris10_k_smc.bin"); 380 strcpy(fw_name, "amdgpu/polaris10_k_smc.bin");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8843a06360fa..0f41d8647376 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -740,6 +740,7 @@ static const struct pci_device_id pciidlist[] = {
740 {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 740 {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
741 {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 741 {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
742 {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10}, 742 {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
743 {0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
743 /* Polaris12 */ 744 /* Polaris12 */
744 {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 745 {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
745 {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12}, 746 {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 0cc5190f4f36..5f3f54073818 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
258{ 258{
259 int i; 259 int i;
260 260
261 cancel_delayed_work_sync(&adev->vce.idle_work);
262
261 if (adev->vce.vcpu_bo == NULL) 263 if (adev->vce.vcpu_bo == NULL)
262 return 0; 264 return 0;
263 265
@@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
268 if (i == AMDGPU_MAX_VCE_HANDLES) 270 if (i == AMDGPU_MAX_VCE_HANDLES)
269 return 0; 271 return 0;
270 272
271 cancel_delayed_work_sync(&adev->vce.idle_work);
272 /* TODO: suspending running encoding sessions isn't supported */ 273 /* TODO: suspending running encoding sessions isn't supported */
273 return -EINVAL; 274 return -EINVAL;
274} 275}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index fd654a4406db..400fc74bbae2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -153,11 +153,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
153 unsigned size; 153 unsigned size;
154 void *ptr; 154 void *ptr;
155 155
156 cancel_delayed_work_sync(&adev->vcn.idle_work);
157
156 if (adev->vcn.vcpu_bo == NULL) 158 if (adev->vcn.vcpu_bo == NULL)
157 return 0; 159 return 0;
158 160
159 cancel_delayed_work_sync(&adev->vcn.idle_work);
160
161 size = amdgpu_bo_size(adev->vcn.vcpu_bo); 161 size = amdgpu_bo_size(adev->vcn.vcpu_bo);
162 ptr = adev->vcn.cpu_addr; 162 ptr = adev->vcn.cpu_addr;
163 163
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 1b048715ab8a..29ac74f40dce 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -457,7 +457,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
457 457
458 if (kfd->kfd2kgd->init_gtt_mem_allocation( 458 if (kfd->kfd2kgd->init_gtt_mem_allocation(
459 kfd->kgd, size, &kfd->gtt_mem, 459 kfd->kgd, size, &kfd->gtt_mem,
460 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ 460 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
461 false)) {
461 dev_err(kfd_device, "Could not allocate %d bytes\n", size); 462 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
462 goto out; 463 goto out;
463 } 464 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 7a61f38c09e6..01494752c36a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -62,9 +62,20 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
62 struct amd_iommu_device_info iommu_info; 62 struct amd_iommu_device_info iommu_info;
63 unsigned int pasid_limit; 63 unsigned int pasid_limit;
64 int err; 64 int err;
65 struct kfd_topology_device *top_dev;
65 66
66 if (!kfd->device_info->needs_iommu_device) 67 top_dev = kfd_topology_device_by_id(kfd->id);
68
69 /*
70 * Overwrite ATS capability according to needs_iommu_device to fix
71 * potential missing corresponding bit in CRAT of BIOS.
72 */
73 if (!kfd->device_info->needs_iommu_device) {
74 top_dev->node_props.capability &= ~HSA_CAP_ATS_PRESENT;
67 return 0; 75 return 0;
76 }
77
78 top_dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
68 79
69 iommu_info.flags = 0; 80 iommu_info.flags = 0;
70 err = amd_iommu_device_info(kfd->pdev, &iommu_info); 81 err = amd_iommu_device_info(kfd->pdev, &iommu_info);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index f5fc3675f21e..0cedb37cf513 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -88,7 +88,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
88 ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), 88 ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
89 &((*mqd_mem_obj)->gtt_mem), 89 &((*mqd_mem_obj)->gtt_mem),
90 &((*mqd_mem_obj)->gpu_addr), 90 &((*mqd_mem_obj)->gpu_addr),
91 (void *)&((*mqd_mem_obj)->cpu_ptr)); 91 (void *)&((*mqd_mem_obj)->cpu_ptr), true);
92 } else 92 } else
93 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd), 93 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
94 mqd_mem_obj); 94 mqd_mem_obj);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f971710f1c91..92b285ca73aa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -806,6 +806,7 @@ int kfd_topology_add_device(struct kfd_dev *gpu);
806int kfd_topology_remove_device(struct kfd_dev *gpu); 806int kfd_topology_remove_device(struct kfd_dev *gpu);
807struct kfd_topology_device *kfd_topology_device_by_proximity_domain( 807struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
808 uint32_t proximity_domain); 808 uint32_t proximity_domain);
809struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id);
809struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); 810struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
810struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); 811struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
811int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); 812int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index bc95d4dfee2e..80f5db4ef75f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -63,22 +63,33 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain(
63 return device; 63 return device;
64} 64}
65 65
66struct kfd_dev *kfd_device_by_id(uint32_t gpu_id) 66struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id)
67{ 67{
68 struct kfd_topology_device *top_dev; 68 struct kfd_topology_device *top_dev = NULL;
69 struct kfd_dev *device = NULL; 69 struct kfd_topology_device *ret = NULL;
70 70
71 down_read(&topology_lock); 71 down_read(&topology_lock);
72 72
73 list_for_each_entry(top_dev, &topology_device_list, list) 73 list_for_each_entry(top_dev, &topology_device_list, list)
74 if (top_dev->gpu_id == gpu_id) { 74 if (top_dev->gpu_id == gpu_id) {
75 device = top_dev->gpu; 75 ret = top_dev;
76 break; 76 break;
77 } 77 }
78 78
79 up_read(&topology_lock); 79 up_read(&topology_lock);
80 80
81 return device; 81 return ret;
82}
83
84struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
85{
86 struct kfd_topology_device *top_dev;
87
88 top_dev = kfd_topology_device_by_id(gpu_id);
89 if (!top_dev)
90 return NULL;
91
92 return top_dev->gpu;
82} 93}
83 94
84struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev) 95struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 800f481a6995..96875950845a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -641,6 +641,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
641 return NULL; 641 return NULL;
642} 642}
643 643
644static void emulated_link_detect(struct dc_link *link)
645{
646 struct dc_sink_init_data sink_init_data = { 0 };
647 struct display_sink_capability sink_caps = { 0 };
648 enum dc_edid_status edid_status;
649 struct dc_context *dc_ctx = link->ctx;
650 struct dc_sink *sink = NULL;
651 struct dc_sink *prev_sink = NULL;
652
653 link->type = dc_connection_none;
654 prev_sink = link->local_sink;
655
656 if (prev_sink != NULL)
657 dc_sink_retain(prev_sink);
658
659 switch (link->connector_signal) {
660 case SIGNAL_TYPE_HDMI_TYPE_A: {
661 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
662 sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
663 break;
664 }
665
666 case SIGNAL_TYPE_DVI_SINGLE_LINK: {
667 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
668 sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
669 break;
670 }
671
672 case SIGNAL_TYPE_DVI_DUAL_LINK: {
673 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
674 sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
675 break;
676 }
677
678 case SIGNAL_TYPE_LVDS: {
679 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
680 sink_caps.signal = SIGNAL_TYPE_LVDS;
681 break;
682 }
683
684 case SIGNAL_TYPE_EDP: {
685 sink_caps.transaction_type =
686 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
687 sink_caps.signal = SIGNAL_TYPE_EDP;
688 break;
689 }
690
691 case SIGNAL_TYPE_DISPLAY_PORT: {
692 sink_caps.transaction_type =
693 DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
694 sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
695 break;
696 }
697
698 default:
699 DC_ERROR("Invalid connector type! signal:%d\n",
700 link->connector_signal);
701 return;
702 }
703
704 sink_init_data.link = link;
705 sink_init_data.sink_signal = sink_caps.signal;
706
707 sink = dc_sink_create(&sink_init_data);
708 if (!sink) {
709 DC_ERROR("Failed to create sink!\n");
710 return;
711 }
712
713 link->local_sink = sink;
714
715 edid_status = dm_helpers_read_local_edid(
716 link->ctx,
717 link,
718 sink);
719
720 if (edid_status != EDID_OK)
721 DC_ERROR("Failed to read EDID");
722
723}
724
644static int dm_resume(void *handle) 725static int dm_resume(void *handle)
645{ 726{
646 struct amdgpu_device *adev = handle; 727 struct amdgpu_device *adev = handle;
@@ -654,6 +735,7 @@ static int dm_resume(void *handle)
654 struct drm_plane *plane; 735 struct drm_plane *plane;
655 struct drm_plane_state *new_plane_state; 736 struct drm_plane_state *new_plane_state;
656 struct dm_plane_state *dm_new_plane_state; 737 struct dm_plane_state *dm_new_plane_state;
738 enum dc_connection_type new_connection_type = dc_connection_none;
657 int ret; 739 int ret;
658 int i; 740 int i;
659 741
@@ -684,7 +766,13 @@ static int dm_resume(void *handle)
684 continue; 766 continue;
685 767
686 mutex_lock(&aconnector->hpd_lock); 768 mutex_lock(&aconnector->hpd_lock);
687 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 769 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
770 DRM_ERROR("KMS: Failed to detect connector\n");
771
772 if (aconnector->base.force && new_connection_type == dc_connection_none)
773 emulated_link_detect(aconnector->dc_link);
774 else
775 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
688 776
689 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 777 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
690 aconnector->fake_enable = false; 778 aconnector->fake_enable = false;
@@ -922,6 +1010,7 @@ static void handle_hpd_irq(void *param)
922 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; 1010 struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
923 struct drm_connector *connector = &aconnector->base; 1011 struct drm_connector *connector = &aconnector->base;
924 struct drm_device *dev = connector->dev; 1012 struct drm_device *dev = connector->dev;
1013 enum dc_connection_type new_connection_type = dc_connection_none;
925 1014
926 /* In case of failure or MST no need to update connector status or notify the OS 1015 /* In case of failure or MST no need to update connector status or notify the OS
927 * since (for MST case) MST does this in it's own context. 1016 * since (for MST case) MST does this in it's own context.
@@ -931,7 +1020,21 @@ static void handle_hpd_irq(void *param)
931 if (aconnector->fake_enable) 1020 if (aconnector->fake_enable)
932 aconnector->fake_enable = false; 1021 aconnector->fake_enable = false;
933 1022
934 if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) { 1023 if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
1024 DRM_ERROR("KMS: Failed to detect connector\n");
1025
1026 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1027 emulated_link_detect(aconnector->dc_link);
1028
1029
1030 drm_modeset_lock_all(dev);
1031 dm_restore_drm_connector_state(dev, connector);
1032 drm_modeset_unlock_all(dev);
1033
1034 if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
1035 drm_kms_helper_hotplug_event(dev);
1036
1037 } else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
935 amdgpu_dm_update_connector_after_detect(aconnector); 1038 amdgpu_dm_update_connector_after_detect(aconnector);
936 1039
937 1040
@@ -1031,6 +1134,7 @@ static void handle_hpd_rx_irq(void *param)
1031 struct drm_device *dev = connector->dev; 1134 struct drm_device *dev = connector->dev;
1032 struct dc_link *dc_link = aconnector->dc_link; 1135 struct dc_link *dc_link = aconnector->dc_link;
1033 bool is_mst_root_connector = aconnector->mst_mgr.mst_state; 1136 bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
1137 enum dc_connection_type new_connection_type = dc_connection_none;
1034 1138
1035 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio 1139 /* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
1036 * conflict, after implement i2c helper, this mutex should be 1140 * conflict, after implement i2c helper, this mutex should be
@@ -1042,7 +1146,24 @@ static void handle_hpd_rx_irq(void *param)
1042 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && 1146 if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
1043 !is_mst_root_connector) { 1147 !is_mst_root_connector) {
1044 /* Downstream Port status changed. */ 1148 /* Downstream Port status changed. */
1045 if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) { 1149 if (!dc_link_detect_sink(dc_link, &new_connection_type))
1150 DRM_ERROR("KMS: Failed to detect connector\n");
1151
1152 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1153 emulated_link_detect(dc_link);
1154
1155 if (aconnector->fake_enable)
1156 aconnector->fake_enable = false;
1157
1158 amdgpu_dm_update_connector_after_detect(aconnector);
1159
1160
1161 drm_modeset_lock_all(dev);
1162 dm_restore_drm_connector_state(dev, connector);
1163 drm_modeset_unlock_all(dev);
1164
1165 drm_kms_helper_hotplug_event(dev);
1166 } else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
1046 1167
1047 if (aconnector->fake_enable) 1168 if (aconnector->fake_enable)
1048 aconnector->fake_enable = false; 1169 aconnector->fake_enable = false;
@@ -1433,6 +1554,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1433 struct amdgpu_mode_info *mode_info = &adev->mode_info; 1554 struct amdgpu_mode_info *mode_info = &adev->mode_info;
1434 uint32_t link_cnt; 1555 uint32_t link_cnt;
1435 int32_t total_overlay_planes, total_primary_planes; 1556 int32_t total_overlay_planes, total_primary_planes;
1557 enum dc_connection_type new_connection_type = dc_connection_none;
1436 1558
1437 link_cnt = dm->dc->caps.max_links; 1559 link_cnt = dm->dc->caps.max_links;
1438 if (amdgpu_dm_mode_config_init(dm->adev)) { 1560 if (amdgpu_dm_mode_config_init(dm->adev)) {
@@ -1499,7 +1621,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
1499 1621
1500 link = dc_get_link_at_index(dm->dc, i); 1622 link = dc_get_link_at_index(dm->dc, i);
1501 1623
1502 if (dc_link_detect(link, DETECT_REASON_BOOT)) { 1624 if (!dc_link_detect_sink(link, &new_connection_type))
1625 DRM_ERROR("KMS: Failed to detect connector\n");
1626
1627 if (aconnector->base.force && new_connection_type == dc_connection_none) {
1628 emulated_link_detect(link);
1629 amdgpu_dm_update_connector_after_detect(aconnector);
1630
1631 } else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
1503 amdgpu_dm_update_connector_after_detect(aconnector); 1632 amdgpu_dm_update_connector_after_detect(aconnector);
1504 register_backlight_device(dm, link); 1633 register_backlight_device(dm, link);
1505 } 1634 }
@@ -2494,7 +2623,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2494 if (dm_state && dm_state->freesync_capable) 2623 if (dm_state && dm_state->freesync_capable)
2495 stream->ignore_msa_timing_param = true; 2624 stream->ignore_msa_timing_param = true;
2496finish: 2625finish:
2497 if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL) 2626 if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
2498 dc_sink_release(sink); 2627 dc_sink_release(sink);
2499 2628
2500 return stream; 2629 return stream;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 37eaf72ace54..fced3c1c2ef5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -195,7 +195,7 @@ static bool program_hpd_filter(
195 return result; 195 return result;
196} 196}
197 197
198static bool detect_sink(struct dc_link *link, enum dc_connection_type *type) 198bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
199{ 199{
200 uint32_t is_hpd_high = 0; 200 uint32_t is_hpd_high = 0;
201 struct gpio *hpd_pin; 201 struct gpio *hpd_pin;
@@ -604,7 +604,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
604 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL) 604 if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
605 return false; 605 return false;
606 606
607 if (false == detect_sink(link, &new_connection_type)) { 607 if (false == dc_link_detect_sink(link, &new_connection_type)) {
608 BREAK_TO_DEBUGGER(); 608 BREAK_TO_DEBUGGER();
609 return false; 609 return false;
610 } 610 }
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index d43cefbc43d3..1b48ab9aea89 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -215,6 +215,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
215 215
216bool dc_link_is_dp_sink_present(struct dc_link *link); 216bool dc_link_is_dp_sink_present(struct dc_link *link);
217 217
218bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
218/* 219/*
219 * DPCD access interfaces 220 * DPCD access interfaces
220 */ 221 */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 14384d9675a8..b2f308766a9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2560,7 +2560,7 @@ static void pplib_apply_display_requirements(
2560 dc->prev_display_config = *pp_display_cfg; 2560 dc->prev_display_config = *pp_display_cfg;
2561} 2561}
2562 2562
2563void dce110_set_bandwidth( 2563static void dce110_set_bandwidth(
2564 struct dc *dc, 2564 struct dc *dc,
2565 struct dc_state *context, 2565 struct dc_state *context,
2566 bool decrease_allowed) 2566 bool decrease_allowed)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index e4c5db75c4c6..d6db3dbd9015 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -68,11 +68,6 @@ void dce110_fill_display_configs(
68 const struct dc_state *context, 68 const struct dc_state *context,
69 struct dm_pp_display_configuration *pp_display_cfg); 69 struct dm_pp_display_configuration *pp_display_cfg);
70 70
71void dce110_set_bandwidth(
72 struct dc *dc,
73 struct dc_state *context,
74 bool decrease_allowed);
75
76uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context); 71uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
77 72
78void dp_receiver_power_ctrl(struct dc_link *link, bool on); 73void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index 5853522a6182..eb0f5f9a973b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,17 +244,6 @@ static void dce120_update_dchub(
244 dh_data->dchub_info_valid = false; 244 dh_data->dchub_info_valid = false;
245} 245}
246 246
247static void dce120_set_bandwidth(
248 struct dc *dc,
249 struct dc_state *context,
250 bool decrease_allowed)
251{
252 if (context->stream_count <= 0)
253 return;
254
255 dce110_set_bandwidth(dc, context, decrease_allowed);
256}
257
258void dce120_hw_sequencer_construct(struct dc *dc) 247void dce120_hw_sequencer_construct(struct dc *dc)
259{ 248{
260 /* All registers used by dce11.2 match those in dce11 in offset and 249 /* All registers used by dce11.2 match those in dce11 in offset and
@@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
263 dce110_hw_sequencer_construct(dc); 252 dce110_hw_sequencer_construct(dc);
264 dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating; 253 dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
265 dc->hwss.update_dchub = dce120_update_dchub; 254 dc->hwss.update_dchub = dce120_update_dchub;
266 dc->hwss.set_bandwidth = dce120_set_bandwidth;
267} 255}
268 256
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 14391b06080c..43b82e14007e 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -292,7 +292,7 @@ struct tile_config {
292struct kfd2kgd_calls { 292struct kfd2kgd_calls {
293 int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size, 293 int (*init_gtt_mem_allocation)(struct kgd_dev *kgd, size_t size,
294 void **mem_obj, uint64_t *gpu_addr, 294 void **mem_obj, uint64_t *gpu_addr,
295 void **cpu_ptr); 295 void **cpu_ptr, bool mqd_gfx9);
296 296
297 void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj); 297 void (*free_gtt_mem)(struct kgd_dev *kgd, void *mem_obj);
298 298
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 08b5bb219816..94d6dabec2dc 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -754,6 +754,7 @@ static int malidp_bind(struct device *dev)
754 drm->irq_enabled = true; 754 drm->irq_enabled = true;
755 755
756 ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 756 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
757 drm_crtc_vblank_reset(&malidp->crtc);
757 if (ret < 0) { 758 if (ret < 0) {
758 DRM_ERROR("failed to initialise vblank\n"); 759 DRM_ERROR("failed to initialise vblank\n");
759 goto vblank_fail; 760 goto vblank_fail;
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index c94a4422e0e9..2781e462c1ed 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
384 384
385static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev, 385static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
386 dma_addr_t *addrs, s32 *pitches, 386 dma_addr_t *addrs, s32 *pitches,
387 int num_planes, u16 w, u16 h, u32 fmt_id) 387 int num_planes, u16 w, u16 h, u32 fmt_id,
388 const s16 *rgb2yuv_coeffs)
388{ 389{
389 u32 base = MALIDP500_SE_MEMWRITE_BASE; 390 u32 base = MALIDP500_SE_MEMWRITE_BASE;
390 u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); 391 u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
416 417
417 malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h), 418 malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
418 MALIDP500_SE_MEMWRITE_OUT_SIZE); 419 MALIDP500_SE_MEMWRITE_OUT_SIZE);
420
421 if (rgb2yuv_coeffs) {
422 int i;
423
424 for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
425 malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
426 MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
427 }
428 }
429
419 malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL); 430 malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
420 431
421 return 0; 432 return 0;
@@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
658 669
659static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev, 670static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
660 dma_addr_t *addrs, s32 *pitches, 671 dma_addr_t *addrs, s32 *pitches,
661 int num_planes, u16 w, u16 h, u32 fmt_id) 672 int num_planes, u16 w, u16 h, u32 fmt_id,
673 const s16 *rgb2yuv_coeffs)
662{ 674{
663 u32 base = MALIDP550_SE_MEMWRITE_BASE; 675 u32 base = MALIDP550_SE_MEMWRITE_BASE;
664 u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK); 676 u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
@@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
689 malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN, 701 malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
690 MALIDP550_SE_CONTROL); 702 MALIDP550_SE_CONTROL);
691 703
704 if (rgb2yuv_coeffs) {
705 int i;
706
707 for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
708 malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
709 MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
710 }
711 }
712
692 return 0; 713 return 0;
693} 714}
694 715
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
index ad2e96915d44..9fc94c08190f 100644
--- a/drivers/gpu/drm/arm/malidp_hw.h
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -191,7 +191,8 @@ struct malidp_hw {
191 * @param fmt_id - internal format ID of output buffer 191 * @param fmt_id - internal format ID of output buffer
192 */ 192 */
193 int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs, 193 int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
194 s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id); 194 s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
195 const s16 *rgb2yuv_coeffs);
195 196
196 /* 197 /*
197 * Disable the writing to memory of the next frame's content. 198 * Disable the writing to memory of the next frame's content.
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index ba6ae66387c9..91472e5e0c8b 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
26 s32 pitches[2]; 26 s32 pitches[2];
27 u8 format; 27 u8 format;
28 u8 n_planes; 28 u8 n_planes;
29 bool rgb2yuv_initialized;
30 const s16 *rgb2yuv_coeffs;
29}; 31};
30 32
31static int malidp_mw_connector_get_modes(struct drm_connector *connector) 33static int malidp_mw_connector_get_modes(struct drm_connector *connector)
@@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
84static struct drm_connector_state * 86static struct drm_connector_state *
85malidp_mw_connector_duplicate_state(struct drm_connector *connector) 87malidp_mw_connector_duplicate_state(struct drm_connector *connector)
86{ 88{
87 struct malidp_mw_connector_state *mw_state; 89 struct malidp_mw_connector_state *mw_state, *mw_current_state;
88 90
89 if (WARN_ON(!connector->state)) 91 if (WARN_ON(!connector->state))
90 return NULL; 92 return NULL;
@@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
93 if (!mw_state) 95 if (!mw_state)
94 return NULL; 96 return NULL;
95 97
96 /* No need to preserve any of our driver-local data */ 98 mw_current_state = to_mw_state(connector->state);
99 mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
100 mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
101
97 __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base); 102 __drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
98 103
99 return &mw_state->base; 104 return &mw_state->base;
@@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
108 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 113 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
109}; 114};
110 115
116static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
117 47, 157, 16,
118 -26, -87, 112,
119 112, -102, -10,
120 16, 128, 128
121};
122
111static int 123static int
112malidp_mw_encoder_atomic_check(struct drm_encoder *encoder, 124malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
113 struct drm_crtc_state *crtc_state, 125 struct drm_crtc_state *crtc_state,
@@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
157 } 169 }
158 mw_state->n_planes = n_planes; 170 mw_state->n_planes = n_planes;
159 171
172 if (fb->format->is_yuv)
173 mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
174
160 return 0; 175 return 0;
161} 176}
162 177
@@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
239 254
240 drm_writeback_queue_job(mw_conn, conn_state->writeback_job); 255 drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
241 conn_state->writeback_job = NULL; 256 conn_state->writeback_job = NULL;
242
243 hwdev->hw->enable_memwrite(hwdev, mw_state->addrs, 257 hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
244 mw_state->pitches, mw_state->n_planes, 258 mw_state->pitches, mw_state->n_planes,
245 fb->width, fb->height, mw_state->format); 259 fb->width, fb->height, mw_state->format,
260 !mw_state->rgb2yuv_initialized ?
261 mw_state->rgb2yuv_coeffs : NULL);
262 mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
246 } else { 263 } else {
247 DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n"); 264 DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
248 hwdev->hw->disable_memwrite(hwdev); 265 hwdev->hw->disable_memwrite(hwdev);
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
index 3579d36b2a71..6ffe849774f2 100644
--- a/drivers/gpu/drm/arm/malidp_regs.h
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -205,6 +205,7 @@
205#define MALIDP500_SE_BASE 0x00c00 205#define MALIDP500_SE_BASE 0x00c00
206#define MALIDP500_SE_CONTROL 0x00c0c 206#define MALIDP500_SE_CONTROL 0x00c0c
207#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c 207#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
208#define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74
208#define MALIDP500_SE_MEMWRITE_BASE 0x00e00 209#define MALIDP500_SE_MEMWRITE_BASE 0x00e00
209#define MALIDP500_DC_IRQ_BASE 0x00f00 210#define MALIDP500_DC_IRQ_BASE 0x00f00
210#define MALIDP500_CONFIG_VALID 0x00f00 211#define MALIDP500_CONFIG_VALID 0x00f00
@@ -238,6 +239,7 @@
238#define MALIDP550_SE_CONTROL 0x08010 239#define MALIDP550_SE_CONTROL 0x08010
239#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7) 240#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
240#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030 241#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
242#define MALIDP550_SE_RGB_YUV_COEFFS 0x08078
241#define MALIDP550_SE_MEMWRITE_BASE 0x08100 243#define MALIDP550_SE_MEMWRITE_BASE 0x08100
242#define MALIDP550_DC_BASE 0x0c000 244#define MALIDP550_DC_BASE 0x0c000
243#define MALIDP550_DC_CONTROL 0x0c010 245#define MALIDP550_DC_CONTROL 0x0c010
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3eb061e11e2e..018fcdb353d2 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -2067,7 +2067,7 @@ static void __drm_state_dump(struct drm_device *dev, struct drm_printer *p,
2067 struct drm_connector *connector; 2067 struct drm_connector *connector;
2068 struct drm_connector_list_iter conn_iter; 2068 struct drm_connector_list_iter conn_iter;
2069 2069
2070 if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) 2070 if (!drm_drv_uses_atomic_modeset(dev))
2071 return; 2071 return;
2072 2072
2073 list_for_each_entry(plane, &config->plane_list, head) { 2073 list_for_each_entry(plane, &config->plane_list, head) {
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 6f28fe58f169..373bd4c2b698 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -151,7 +151,7 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
151 return ret; 151 return ret;
152 } 152 }
153 153
154 if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { 154 if (drm_drv_uses_atomic_modeset(dev)) {
155 ret = drm_atomic_debugfs_init(minor); 155 ret = drm_atomic_debugfs_init(minor);
156 if (ret) { 156 if (ret) {
157 DRM_ERROR("Failed to create atomic debugfs files\n"); 157 DRM_ERROR("Failed to create atomic debugfs files\n");
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4b0dd20bccb8..16ec93b75dbf 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -2370,7 +2370,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
2370{ 2370{
2371 int c, o; 2371 int c, o;
2372 struct drm_connector *connector; 2372 struct drm_connector *connector;
2373 const struct drm_connector_helper_funcs *connector_funcs;
2374 int my_score, best_score, score; 2373 int my_score, best_score, score;
2375 struct drm_fb_helper_crtc **crtcs, *crtc; 2374 struct drm_fb_helper_crtc **crtcs, *crtc;
2376 struct drm_fb_helper_connector *fb_helper_conn; 2375 struct drm_fb_helper_connector *fb_helper_conn;
@@ -2399,8 +2398,6 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
2399 if (drm_has_preferred_mode(fb_helper_conn, width, height)) 2398 if (drm_has_preferred_mode(fb_helper_conn, width, height))
2400 my_score++; 2399 my_score++;
2401 2400
2402 connector_funcs = connector->helper_private;
2403
2404 /* 2401 /*
2405 * select a crtc for this connector and then attempt to configure 2402 * select a crtc for this connector and then attempt to configure
2406 * remaining connectors 2403 * remaining connectors
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index b902361dee6e..1d9a9d2fe0e0 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -24,7 +24,6 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/module.h> 25#include <linux/module.h>
26 26
27#include <drm/drm_device.h>
28#include <drm/drm_crtc.h> 27#include <drm/drm_crtc.h>
29#include <drm/drm_panel.h> 28#include <drm/drm_panel.h>
30 29
@@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
105 if (panel->connector) 104 if (panel->connector)
106 return -EBUSY; 105 return -EBUSY;
107 106
108 panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
109 if (!panel->link) {
110 dev_err(panel->dev, "failed to link panel to %s\n",
111 dev_name(connector->dev->dev));
112 return -EINVAL;
113 }
114
115 panel->connector = connector; 107 panel->connector = connector;
116 panel->drm = connector->dev; 108 panel->drm = connector->dev;
117 109
@@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
133 */ 125 */
134int drm_panel_detach(struct drm_panel *panel) 126int drm_panel_detach(struct drm_panel *panel)
135{ 127{
136 device_link_del(panel->link);
137
138 panel->connector = NULL; 128 panel->connector = NULL;
139 panel->drm = NULL; 129 panel->drm = NULL;
140 130
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index adb3cb27d31e..759278fef35a 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -97,6 +97,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
97{ 97{
98 int ret; 98 int ret;
99 99
100 WARN_ON(*fence);
101
100 *fence = drm_syncobj_fence_get(syncobj); 102 *fence = drm_syncobj_fence_get(syncobj);
101 if (*fence) 103 if (*fence)
102 return 1; 104 return 1;
@@ -743,6 +745,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
743 745
744 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) { 746 if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
745 for (i = 0; i < count; ++i) { 747 for (i = 0; i < count; ++i) {
748 if (entries[i].fence)
749 continue;
750
746 drm_syncobj_fence_get_or_add_callback(syncobjs[i], 751 drm_syncobj_fence_get_or_add_callback(syncobjs[i],
747 &entries[i].fence, 752 &entries[i].fence,
748 &entries[i].syncobj_cb, 753 &entries[i].syncobj_cb,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 9b2720b41571..83c1f46670bf 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
592 struct device *dev = &pdev->dev; 592 struct device *dev = &pdev->dev;
593 struct component_match *match = NULL; 593 struct component_match *match = NULL;
594 594
595 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
596
597 if (!dev->platform_data) { 595 if (!dev->platform_data) {
598 struct device_node *core_node; 596 struct device_node *core_node;
599 597
@@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
655 for_each_compatible_node(np, NULL, "vivante,gc") { 653 for_each_compatible_node(np, NULL, "vivante,gc") {
656 if (!of_device_is_available(np)) 654 if (!of_device_is_available(np))
657 continue; 655 continue;
658 pdev = platform_device_register_simple("etnaviv", -1, 656
659 NULL, 0); 657 pdev = platform_device_alloc("etnaviv", -1);
660 if (IS_ERR(pdev)) { 658 if (!pdev) {
661 ret = PTR_ERR(pdev); 659 ret = -ENOMEM;
660 of_node_put(np);
661 goto unregister_platform_driver;
662 }
663 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
664 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
665
666 /*
667 * Apply the same DMA configuration to the virtual etnaviv
668 * device as the GPU we found. This assumes that all Vivante
669 * GPUs in the system share the same DMA constraints.
670 */
671 of_dma_configure(&pdev->dev, np, true);
672
673 ret = platform_device_add(pdev);
674 if (ret) {
675 platform_device_put(pdev);
662 of_node_put(np); 676 of_node_put(np);
663 goto unregister_platform_driver; 677 goto unregister_platform_driver;
664 } 678 }
679
665 etnaviv_drm = pdev; 680 etnaviv_drm = pdev;
666 of_node_put(np); 681 of_node_put(np);
667 break; 682 break;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 72afa518edd9..94c1089ecf59 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -3210,6 +3210,7 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt)
3210 MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT); 3210 MMIO_D(BXT_DSI_PLL_ENABLE, D_BXT);
3211 3211
3212 MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT); 3212 MMIO_D(GEN9_CLKGATE_DIS_0, D_BXT);
3213 MMIO_D(GEN9_CLKGATE_DIS_4, D_BXT);
3213 3214
3214 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT); 3215 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_A), D_BXT);
3215 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT); 3216 MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index c7afee37b2b8..9ad89e38f6c0 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1833,6 +1833,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1833{ 1833{
1834 struct kvmgt_guest_info *info; 1834 struct kvmgt_guest_info *info;
1835 struct kvm *kvm; 1835 struct kvm *kvm;
1836 int idx;
1837 bool ret;
1836 1838
1837 if (!handle_valid(handle)) 1839 if (!handle_valid(handle))
1838 return false; 1840 return false;
@@ -1840,8 +1842,11 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
1840 info = (struct kvmgt_guest_info *)handle; 1842 info = (struct kvmgt_guest_info *)handle;
1841 kvm = info->kvm; 1843 kvm = info->kvm;
1842 1844
1843 return kvm_is_visible_gfn(kvm, gfn); 1845 idx = srcu_read_lock(&kvm->srcu);
1846 ret = kvm_is_visible_gfn(kvm, gfn);
1847 srcu_read_unlock(&kvm->srcu, idx);
1844 1848
1849 return ret;
1845} 1850}
1846 1851
1847struct intel_gvt_mpt kvmgt_mpt = { 1852struct intel_gvt_mpt kvmgt_mpt = {
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 994366035364..9bb9a85c992c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -244,6 +244,34 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
244 244
245 /* set the bit 0:2(Core C-State ) to C0 */ 245 /* set the bit 0:2(Core C-State ) to C0 */
246 vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0; 246 vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
247
248 if (IS_BROXTON(vgpu->gvt->dev_priv)) {
249 vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
250 ~(BIT(0) | BIT(1));
251 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
252 ~PHY_POWER_GOOD;
253 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
254 ~PHY_POWER_GOOD;
255 vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &=
256 ~BIT(30);
257 vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &=
258 ~BIT(30);
259 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
260 ~BXT_PHY_LANE_ENABLED;
261 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
262 BXT_PHY_CMNLANE_POWERDOWN_ACK |
263 BXT_PHY_LANE_POWERDOWN_ACK;
264 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
265 ~BXT_PHY_LANE_ENABLED;
266 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
267 BXT_PHY_CMNLANE_POWERDOWN_ACK |
268 BXT_PHY_LANE_POWERDOWN_ACK;
269 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
270 ~BXT_PHY_LANE_ENABLED;
271 vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
272 BXT_PHY_CMNLANE_POWERDOWN_ACK |
273 BXT_PHY_LANE_POWERDOWN_ACK;
274 }
247 } else { 275 } else {
248#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) 276#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
249 /* only reset the engine related, so starting with 0x44200 277 /* only reset the engine related, so starting with 0x44200
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index a4e8e3cf74fd..c628be05fbfe 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -281,6 +281,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
281 intel_vgpu_clean_submission(vgpu); 281 intel_vgpu_clean_submission(vgpu);
282 intel_vgpu_clean_display(vgpu); 282 intel_vgpu_clean_display(vgpu);
283 intel_vgpu_clean_opregion(vgpu); 283 intel_vgpu_clean_opregion(vgpu);
284 intel_vgpu_reset_ggtt(vgpu, true);
284 intel_vgpu_clean_gtt(vgpu); 285 intel_vgpu_clean_gtt(vgpu);
285 intel_gvt_hypervisor_detach_vgpu(vgpu); 286 intel_gvt_hypervisor_detach_vgpu(vgpu);
286 intel_vgpu_free_resource(vgpu); 287 intel_vgpu_free_resource(vgpu);
diff --git a/drivers/gpu/drm/pl111/pl111_vexpress.c b/drivers/gpu/drm/pl111/pl111_vexpress.c
index a534b225e31b..5fa0441bb6df 100644
--- a/drivers/gpu/drm/pl111/pl111_vexpress.c
+++ b/drivers/gpu/drm/pl111/pl111_vexpress.c
@@ -111,7 +111,8 @@ static int vexpress_muxfpga_probe(struct platform_device *pdev)
111} 111}
112 112
113static const struct of_device_id vexpress_muxfpga_match[] = { 113static const struct of_device_id vexpress_muxfpga_match[] = {
114 { .compatible = "arm,vexpress-muxfpga", } 114 { .compatible = "arm,vexpress-muxfpga", },
115 {}
115}; 116};
116 117
117static struct platform_driver vexpress_muxfpga_driver = { 118static struct platform_driver vexpress_muxfpga_driver = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index dd19d674055c..8b0cd08034e0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -418,7 +418,6 @@ static const struct of_device_id sun4i_drv_of_table[] = {
418 { .compatible = "allwinner,sun8i-a33-display-engine" }, 418 { .compatible = "allwinner,sun8i-a33-display-engine" },
419 { .compatible = "allwinner,sun8i-a83t-display-engine" }, 419 { .compatible = "allwinner,sun8i-a83t-display-engine" },
420 { .compatible = "allwinner,sun8i-h3-display-engine" }, 420 { .compatible = "allwinner,sun8i-h3-display-engine" },
421 { .compatible = "allwinner,sun8i-r40-display-engine" },
422 { .compatible = "allwinner,sun8i-v3s-display-engine" }, 421 { .compatible = "allwinner,sun8i-v3s-display-engine" },
423 { .compatible = "allwinner,sun9i-a80-display-engine" }, 422 { .compatible = "allwinner,sun9i-a80-display-engine" },
424 { } 423 { }
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 82502b351aec..a564b5dfe082 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -398,7 +398,6 @@ static struct regmap_config sun8i_hdmi_phy_regmap_config = {
398 398
399static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = { 399static const struct sun8i_hdmi_phy_variant sun50i_a64_hdmi_phy = {
400 .has_phy_clk = true, 400 .has_phy_clk = true,
401 .has_second_pll = true,
402 .phy_init = &sun8i_hdmi_phy_init_h3, 401 .phy_init = &sun8i_hdmi_phy_init_h3,
403 .phy_disable = &sun8i_hdmi_phy_disable_h3, 402 .phy_disable = &sun8i_hdmi_phy_disable_h3,
404 .phy_config = &sun8i_hdmi_phy_config_h3, 403 .phy_config = &sun8i_hdmi_phy_config_h3,
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index fc3713608f78..cb65b0ed53fd 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -545,22 +545,6 @@ static const struct sun8i_mixer_cfg sun8i_h3_mixer0_cfg = {
545 .vi_num = 1, 545 .vi_num = 1,
546}; 546};
547 547
548static const struct sun8i_mixer_cfg sun8i_r40_mixer0_cfg = {
549 .ccsc = 0,
550 .mod_rate = 297000000,
551 .scaler_mask = 0xf,
552 .ui_num = 3,
553 .vi_num = 1,
554};
555
556static const struct sun8i_mixer_cfg sun8i_r40_mixer1_cfg = {
557 .ccsc = 1,
558 .mod_rate = 297000000,
559 .scaler_mask = 0x3,
560 .ui_num = 1,
561 .vi_num = 1,
562};
563
564static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = { 548static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
565 .vi_num = 2, 549 .vi_num = 2,
566 .ui_num = 1, 550 .ui_num = 1,
@@ -583,14 +567,6 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
583 .data = &sun8i_h3_mixer0_cfg, 567 .data = &sun8i_h3_mixer0_cfg,
584 }, 568 },
585 { 569 {
586 .compatible = "allwinner,sun8i-r40-de2-mixer-0",
587 .data = &sun8i_r40_mixer0_cfg,
588 },
589 {
590 .compatible = "allwinner,sun8i-r40-de2-mixer-1",
591 .data = &sun8i_r40_mixer1_cfg,
592 },
593 {
594 .compatible = "allwinner,sun8i-v3s-de2-mixer", 570 .compatible = "allwinner,sun8i-v3s-de2-mixer",
595 .data = &sun8i_v3s_mixer_cfg, 571 .data = &sun8i_v3s_mixer_cfg,
596 }, 572 },
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 55fe398d8290..d5240b777a8f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -253,7 +253,6 @@ static int sun8i_tcon_top_remove(struct platform_device *pdev)
253 253
254/* sun4i_drv uses this list to check if a device node is a TCON TOP */ 254/* sun4i_drv uses this list to check if a device node is a TCON TOP */
255const struct of_device_id sun8i_tcon_top_of_table[] = { 255const struct of_device_id sun8i_tcon_top_of_table[] = {
256 { .compatible = "allwinner,sun8i-r40-tcon-top" },
257 { /* sentinel */ } 256 { /* sentinel */ }
258}; 257};
259MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table); 258MODULE_DEVICE_TABLE(of, sun8i_tcon_top_of_table);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index dbb62f6eb48a..dd9ffded223b 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -432,9 +432,11 @@ static void udl_fbdev_destroy(struct drm_device *dev,
432{ 432{
433 drm_fb_helper_unregister_fbi(&ufbdev->helper); 433 drm_fb_helper_unregister_fbi(&ufbdev->helper);
434 drm_fb_helper_fini(&ufbdev->helper); 434 drm_fb_helper_fini(&ufbdev->helper);
435 drm_framebuffer_unregister_private(&ufbdev->ufb.base); 435 if (ufbdev->ufb.obj) {
436 drm_framebuffer_cleanup(&ufbdev->ufb.base); 436 drm_framebuffer_unregister_private(&ufbdev->ufb.base);
437 drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base); 437 drm_framebuffer_cleanup(&ufbdev->ufb.base);
438 drm_gem_object_put_unlocked(&ufbdev->ufb.obj->base);
439 }
438} 440}
439 441
440int udl_fbdev_init(struct drm_device *dev) 442int udl_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cfb50fedfa2b..a3275fa66b7b 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -297,6 +297,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
297 vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0], 297 vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
298 vc4_state->crtc_h); 298 vc4_state->crtc_h);
299 299
300 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
301 vc4_state->y_scaling[0] == VC4_SCALING_NONE);
302
300 if (num_planes > 1) { 303 if (num_planes > 1) {
301 vc4_state->is_yuv = true; 304 vc4_state->is_yuv = true;
302 305
@@ -312,24 +315,17 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
312 vc4_get_scaling_mode(vc4_state->src_h[1], 315 vc4_get_scaling_mode(vc4_state->src_h[1],
313 vc4_state->crtc_h); 316 vc4_state->crtc_h);
314 317
315 /* YUV conversion requires that scaling be enabled, 318 /* YUV conversion requires that horizontal scaling be enabled,
316 * even on a plane that's otherwise 1:1. Choose TPZ 319 * even on a plane that's otherwise 1:1. Looks like only PPF
317 * for simplicity. 320 * works in that case, so let's pick that one.
318 */ 321 */
319 if (vc4_state->x_scaling[0] == VC4_SCALING_NONE) 322 if (vc4_state->is_unity)
320 vc4_state->x_scaling[0] = VC4_SCALING_TPZ; 323 vc4_state->x_scaling[0] = VC4_SCALING_PPF;
321 if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
322 vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
323 } else { 324 } else {
324 vc4_state->x_scaling[1] = VC4_SCALING_NONE; 325 vc4_state->x_scaling[1] = VC4_SCALING_NONE;
325 vc4_state->y_scaling[1] = VC4_SCALING_NONE; 326 vc4_state->y_scaling[1] = VC4_SCALING_NONE;
326 } 327 }
327 328
328 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
329 vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
330 vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
331 vc4_state->y_scaling[1] == VC4_SCALING_NONE);
332
333 /* No configuring scaling on the cursor plane, since it gets 329 /* No configuring scaling on the cursor plane, since it gets
334 non-vblank-synced updates, and scaling requires requires 330 non-vblank-synced updates, and scaling requires requires
335 LBM changes which have to be vblank-synced. 331 LBM changes which have to be vblank-synced.
@@ -672,7 +668,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
672 vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5); 668 vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
673 } 669 }
674 670
675 if (!vc4_state->is_unity) { 671 if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
672 vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
673 vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
674 vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
676 /* LBM Base Address. */ 675 /* LBM Base Address. */
677 if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || 676 if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
678 vc4_state->y_scaling[1] != VC4_SCALING_NONE) { 677 vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 1f134570b759..f0ab6b2313bb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3729,7 +3729,7 @@ int vmw_validate_single_buffer(struct vmw_private *dev_priv,
3729{ 3729{
3730 struct vmw_buffer_object *vbo = 3730 struct vmw_buffer_object *vbo =
3731 container_of(bo, struct vmw_buffer_object, base); 3731 container_of(bo, struct vmw_buffer_object, base);
3732 struct ttm_operation_ctx ctx = { interruptible, true }; 3732 struct ttm_operation_ctx ctx = { interruptible, false };
3733 int ret; 3733 int ret;
3734 3734
3735 if (vbo->pin_count > 0) 3735 if (vbo->pin_count > 0)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 23beff5d8e3c..6a712a8d59e9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1512,21 +1512,19 @@ static int vmw_kms_check_display_memory(struct drm_device *dev,
1512 struct drm_rect *rects) 1512 struct drm_rect *rects)
1513{ 1513{
1514 struct vmw_private *dev_priv = vmw_priv(dev); 1514 struct vmw_private *dev_priv = vmw_priv(dev);
1515 struct drm_mode_config *mode_config = &dev->mode_config;
1516 struct drm_rect bounding_box = {0}; 1515 struct drm_rect bounding_box = {0};
1517 u64 total_pixels = 0, pixel_mem, bb_mem; 1516 u64 total_pixels = 0, pixel_mem, bb_mem;
1518 int i; 1517 int i;
1519 1518
1520 for (i = 0; i < num_rects; i++) { 1519 for (i = 0; i < num_rects; i++) {
1521 /* 1520 /*
1522 * Currently this check is limiting the topology within max 1521 * For STDU only individual screen (screen target) is limited by
1523 * texture/screentarget size. This should change in future when 1522 * SCREENTARGET_MAX_WIDTH/HEIGHT registers.
1524 * user-space support multiple fb with topology.
1525 */ 1523 */
1526 if (rects[i].x1 < 0 || rects[i].y1 < 0 || 1524 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1527 rects[i].x2 > mode_config->max_width || 1525 (drm_rect_width(&rects[i]) > dev_priv->stdu_max_width ||
1528 rects[i].y2 > mode_config->max_height) { 1526 drm_rect_height(&rects[i]) > dev_priv->stdu_max_height)) {
1529 DRM_ERROR("Invalid GUI layout.\n"); 1527 DRM_ERROR("Screen size not supported.\n");
1530 return -EINVAL; 1528 return -EINVAL;
1531 } 1529 }
1532 1530
@@ -1615,7 +1613,7 @@ static int vmw_kms_check_topology(struct drm_device *dev,
1615 struct drm_connector_state *conn_state; 1613 struct drm_connector_state *conn_state;
1616 struct vmw_connector_state *vmw_conn_state; 1614 struct vmw_connector_state *vmw_conn_state;
1617 1615
1618 if (!new_crtc_state->enable && old_crtc_state->enable) { 1616 if (!new_crtc_state->enable) {
1619 rects[i].x1 = 0; 1617 rects[i].x1 = 0;
1620 rects[i].y1 = 0; 1618 rects[i].y1 = 0;
1621 rects[i].x2 = 0; 1619 rects[i].x2 = 0;
@@ -2216,12 +2214,16 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
2216 if (dev_priv->assume_16bpp) 2214 if (dev_priv->assume_16bpp)
2217 assumed_bpp = 2; 2215 assumed_bpp = 2;
2218 2216
2217 max_width = min(max_width, dev_priv->texture_max_width);
2218 max_height = min(max_height, dev_priv->texture_max_height);
2219
2220 /*
2221 * For STDU extra limit for a mode on SVGA_REG_SCREENTARGET_MAX_WIDTH/
2222 * HEIGHT registers.
2223 */
2219 if (dev_priv->active_display_unit == vmw_du_screen_target) { 2224 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2220 max_width = min(max_width, dev_priv->stdu_max_width); 2225 max_width = min(max_width, dev_priv->stdu_max_width);
2221 max_width = min(max_width, dev_priv->texture_max_width);
2222
2223 max_height = min(max_height, dev_priv->stdu_max_height); 2226 max_height = min(max_height, dev_priv->stdu_max_height);
2224 max_height = min(max_height, dev_priv->texture_max_height);
2225 } 2227 }
2226 2228
2227 /* Add preferred mode */ 2229 /* Add preferred mode */
@@ -2376,6 +2378,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2376 struct drm_file *file_priv) 2378 struct drm_file *file_priv)
2377{ 2379{
2378 struct vmw_private *dev_priv = vmw_priv(dev); 2380 struct vmw_private *dev_priv = vmw_priv(dev);
2381 struct drm_mode_config *mode_config = &dev->mode_config;
2379 struct drm_vmw_update_layout_arg *arg = 2382 struct drm_vmw_update_layout_arg *arg =
2380 (struct drm_vmw_update_layout_arg *)data; 2383 (struct drm_vmw_update_layout_arg *)data;
2381 void __user *user_rects; 2384 void __user *user_rects;
@@ -2421,6 +2424,21 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
2421 drm_rects[i].y1 = curr_rect.y; 2424 drm_rects[i].y1 = curr_rect.y;
2422 drm_rects[i].x2 = curr_rect.x + curr_rect.w; 2425 drm_rects[i].x2 = curr_rect.x + curr_rect.w;
2423 drm_rects[i].y2 = curr_rect.y + curr_rect.h; 2426 drm_rects[i].y2 = curr_rect.y + curr_rect.h;
2427
2428 /*
2429 * Currently this check is limiting the topology within
2430 * mode_config->max (which actually is max texture size
2431 * supported by virtual device). This limit is here to address
2432 * window managers that create a big framebuffer for whole
2433 * topology.
2434 */
2435 if (drm_rects[i].x1 < 0 || drm_rects[i].y1 < 0 ||
2436 drm_rects[i].x2 > mode_config->max_width ||
2437 drm_rects[i].y2 > mode_config->max_height) {
2438 DRM_ERROR("Invalid GUI layout.\n");
2439 ret = -EINVAL;
2440 goto out_free;
2441 }
2424 } 2442 }
2425 2443
2426 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects); 2444 ret = vmw_kms_check_display_memory(dev, arg->num_outputs, drm_rects);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 93f6b96ca7bb..f30e839f7bfd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1600,31 +1600,6 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
1600 1600
1601 dev_priv->active_display_unit = vmw_du_screen_target; 1601 dev_priv->active_display_unit = vmw_du_screen_target;
1602 1602
1603 if (dev_priv->capabilities & SVGA_CAP_3D) {
1604 /*
1605 * For 3D VMs, display (scanout) buffer size is the smaller of
1606 * max texture and max STDU
1607 */
1608 uint32_t max_width, max_height;
1609
1610 max_width = min(dev_priv->texture_max_width,
1611 dev_priv->stdu_max_width);
1612 max_height = min(dev_priv->texture_max_height,
1613 dev_priv->stdu_max_height);
1614
1615 dev->mode_config.max_width = max_width;
1616 dev->mode_config.max_height = max_height;
1617 } else {
1618 /*
1619 * Given various display aspect ratios, there's no way to
1620 * estimate these using prim_bb_mem. So just set these to
1621 * something arbitrarily large and we will reject any layout
1622 * that doesn't fit prim_bb_mem later
1623 */
1624 dev->mode_config.max_width = 8192;
1625 dev->mode_config.max_height = 8192;
1626 }
1627
1628 vmw_kms_create_implicit_placement_property(dev_priv, false); 1603 vmw_kms_create_implicit_placement_property(dev_priv, false);
1629 1604
1630 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) { 1605 for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index e125233e074b..80a01cd4c051 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -1404,22 +1404,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
1404 *srf_out = NULL; 1404 *srf_out = NULL;
1405 1405
1406 if (for_scanout) { 1406 if (for_scanout) {
1407 uint32_t max_width, max_height;
1408
1409 if (!svga3dsurface_is_screen_target_format(format)) { 1407 if (!svga3dsurface_is_screen_target_format(format)) {
1410 DRM_ERROR("Invalid Screen Target surface format."); 1408 DRM_ERROR("Invalid Screen Target surface format.");
1411 return -EINVAL; 1409 return -EINVAL;
1412 } 1410 }
1413 1411
1414 max_width = min(dev_priv->texture_max_width, 1412 if (size.width > dev_priv->texture_max_width ||
1415 dev_priv->stdu_max_width); 1413 size.height > dev_priv->texture_max_height) {
1416 max_height = min(dev_priv->texture_max_height,
1417 dev_priv->stdu_max_height);
1418
1419 if (size.width > max_width || size.height > max_height) {
1420 DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u", 1414 DRM_ERROR("%ux%u\n, exceeds max surface size %ux%u",
1421 size.width, size.height, 1415 size.width, size.height,
1422 max_width, max_height); 1416 dev_priv->texture_max_width,
1417 dev_priv->texture_max_height);
1423 return -EINVAL; 1418 return -EINVAL;
1424 } 1419 }
1425 } else { 1420 } else {
@@ -1495,8 +1490,17 @@ int vmw_surface_gb_priv_define(struct drm_device *dev,
1495 if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) 1490 if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
1496 srf->res.backup_size += sizeof(SVGA3dDXSOState); 1491 srf->res.backup_size += sizeof(SVGA3dDXSOState);
1497 1492
1493 /*
1494 * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with
1495 * size greater than STDU max width/height. This is really a workaround
1496 * to support creation of big framebuffer requested by some user-space
1497 * for whole topology. That big framebuffer won't really be used for
1498 * binding with screen target as during prepare_fb a separate surface is
1499 * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
1500 */
1498 if (dev_priv->active_display_unit == vmw_du_screen_target && 1501 if (dev_priv->active_display_unit == vmw_du_screen_target &&
1499 for_scanout) 1502 for_scanout && size.width <= dev_priv->stdu_max_width &&
1503 size.height <= dev_priv->stdu_max_height)
1500 srf->flags |= SVGA3D_SURFACE_SCREENTARGET; 1504 srf->flags |= SVGA3D_SURFACE_SCREENTARGET;
1501 1505
1502 /* 1506 /*
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index a96bf46bc483..cf2a18571d48 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -215,6 +215,8 @@ static void vga_switcheroo_enable(void)
215 return; 215 return;
216 216
217 client->id = ret | ID_BIT_AUDIO; 217 client->id = ret | ID_BIT_AUDIO;
218 if (client->ops->gpu_bound)
219 client->ops->gpu_bound(client->pdev, ret);
218 } 220 }
219 221
220 vga_switcheroo_debugfs_init(&vgasr_priv); 222 vga_switcheroo_debugfs_init(&vgasr_priv);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5146ee029db4..bc49909aba8e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -976,7 +976,6 @@
976#define USB_DEVICE_ID_SIS817_TOUCH 0x0817 976#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
977#define USB_DEVICE_ID_SIS_TS 0x1013 977#define USB_DEVICE_ID_SIS_TS 0x1013
978#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030 978#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
979#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb
980 979
981#define USB_VENDOR_ID_SKYCABLE 0x1223 980#define USB_VENDOR_ID_SKYCABLE 0x1223
982#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 981#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index f3076659361a..4e3592e7a3f7 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -47,7 +47,7 @@
47/* quirks to control the device */ 47/* quirks to control the device */
48#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0) 48#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
49#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1) 49#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
50#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2) 50#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
51 51
52/* flags */ 52/* flags */
53#define I2C_HID_STARTED 0 53#define I2C_HID_STARTED 0
@@ -169,9 +169,8 @@ static const struct i2c_hid_quirks {
169 { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755, 169 { USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
170 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV }, 170 I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
171 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288, 171 { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
172 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET }, 172 I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
173 { USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH, 173 I2C_HID_QUIRK_NO_RUNTIME_PM },
174 I2C_HID_QUIRK_RESEND_REPORT_DESCR },
175 { 0, 0 } 174 { 0, 0 }
176}; 175};
177 176
@@ -1105,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
1105 goto err_mem_free; 1104 goto err_mem_free;
1106 } 1105 }
1107 1106
1108 pm_runtime_put(&client->dev); 1107 if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
1108 pm_runtime_put(&client->dev);
1109
1109 return 0; 1110 return 0;
1110 1111
1111err_mem_free: 1112err_mem_free:
@@ -1130,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
1130 struct i2c_hid *ihid = i2c_get_clientdata(client); 1131 struct i2c_hid *ihid = i2c_get_clientdata(client);
1131 struct hid_device *hid; 1132 struct hid_device *hid;
1132 1133
1133 pm_runtime_get_sync(&client->dev); 1134 if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
1135 pm_runtime_get_sync(&client->dev);
1134 pm_runtime_disable(&client->dev); 1136 pm_runtime_disable(&client->dev);
1135 pm_runtime_set_suspended(&client->dev); 1137 pm_runtime_set_suspended(&client->dev);
1136 pm_runtime_put_noidle(&client->dev); 1138 pm_runtime_put_noidle(&client->dev);
@@ -1236,22 +1238,13 @@ static int i2c_hid_resume(struct device *dev)
1236 1238
1237 /* Instead of resetting device, simply powers the device on. This 1239 /* Instead of resetting device, simply powers the device on. This
1238 * solves "incomplete reports" on Raydium devices 2386:3118 and 1240 * solves "incomplete reports" on Raydium devices 2386:3118 and
1239 * 2386:4B33 1241 * 2386:4B33 and fixes various SIS touchscreens no longer sending
1242 * data after a suspend/resume.
1240 */ 1243 */
1241 ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); 1244 ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
1242 if (ret) 1245 if (ret)
1243 return ret; 1246 return ret;
1244 1247
1245 /* Some devices need to re-send report descr cmd
1246 * after resume, after this it will be back normal.
1247 * otherwise it issues too many incomplete reports.
1248 */
1249 if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
1250 ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
1251 if (ret)
1252 return ret;
1253 }
1254
1255 if (hid->driver && hid->driver->reset_resume) { 1248 if (hid->driver && hid->driver->reset_resume) {
1256 ret = hid->driver->reset_resume(hid); 1249 ret = hid->driver->reset_resume(hid);
1257 return ret; 1250 return ret;
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index da133716bed0..08a8327dfd22 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -29,6 +29,7 @@
29#define CNL_Ax_DEVICE_ID 0x9DFC 29#define CNL_Ax_DEVICE_ID 0x9DFC
30#define GLK_Ax_DEVICE_ID 0x31A2 30#define GLK_Ax_DEVICE_ID 0x31A2
31#define CNL_H_DEVICE_ID 0xA37C 31#define CNL_H_DEVICE_ID 0xA37C
32#define ICL_MOBILE_DEVICE_ID 0x34FC
32#define SPT_H_DEVICE_ID 0xA135 33#define SPT_H_DEVICE_ID 0xA135
33 34
34#define REVISION_ID_CHT_A0 0x6 35#define REVISION_ID_CHT_A0 0x6
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index a1125a5c7965..256b3016116c 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)}, 38 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
39 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)}, 39 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
40 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)}, 40 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
41 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
41 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)}, 42 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
42 {0, } 43 {0, }
43}; 44};
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 944f5b63aecd..78603b78cf41 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -207,8 +207,6 @@ superio_exit(int ioreg)
207 207
208#define NUM_FAN 7 208#define NUM_FAN 7
209 209
210#define TEMP_SOURCE_VIRTUAL 0x1f
211
212/* Common and NCT6775 specific data */ 210/* Common and NCT6775 specific data */
213 211
214/* Voltage min/max registers for nr=7..14 are in bank 5 */ 212/* Voltage min/max registers for nr=7..14 are in bank 5 */
@@ -299,8 +297,9 @@ static const u16 NCT6775_REG_PWM_READ[] = {
299 297
300static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; 298static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
301static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d }; 299static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d };
302static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 }; 300static const u16 NCT6775_REG_FAN_PULSES[NUM_FAN] = {
303static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 }; 301 0x641, 0x642, 0x643, 0x644 };
302static const u16 NCT6775_FAN_PULSE_SHIFT[NUM_FAN] = { };
304 303
305static const u16 NCT6775_REG_TEMP[] = { 304static const u16 NCT6775_REG_TEMP[] = {
306 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d }; 305 0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
@@ -373,6 +372,7 @@ static const char *const nct6775_temp_label[] = {
373}; 372};
374 373
375#define NCT6775_TEMP_MASK 0x001ffffe 374#define NCT6775_TEMP_MASK 0x001ffffe
375#define NCT6775_VIRT_TEMP_MASK 0x00000000
376 376
377static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = { 377static const u16 NCT6775_REG_TEMP_ALTERNATE[32] = {
378 [13] = 0x661, 378 [13] = 0x661,
@@ -425,8 +425,8 @@ static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
425 425
426static const u16 NCT6776_REG_FAN_MIN[] = { 426static const u16 NCT6776_REG_FAN_MIN[] = {
427 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c }; 427 0x63a, 0x63c, 0x63e, 0x640, 0x642, 0x64a, 0x64c };
428static const u16 NCT6776_REG_FAN_PULSES[] = { 428static const u16 NCT6776_REG_FAN_PULSES[NUM_FAN] = {
429 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; 429 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
430 430
431static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = { 431static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = {
432 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e }; 432 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e };
@@ -461,6 +461,7 @@ static const char *const nct6776_temp_label[] = {
461}; 461};
462 462
463#define NCT6776_TEMP_MASK 0x007ffffe 463#define NCT6776_TEMP_MASK 0x007ffffe
464#define NCT6776_VIRT_TEMP_MASK 0x00000000
464 465
465static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = { 466static const u16 NCT6776_REG_TEMP_ALTERNATE[32] = {
466 [14] = 0x401, 467 [14] = 0x401,
@@ -501,9 +502,9 @@ static const s8 NCT6779_BEEP_BITS[] = {
501 30, 31 }; /* intrusion0, intrusion1 */ 502 30, 31 }; /* intrusion0, intrusion1 */
502 503
503static const u16 NCT6779_REG_FAN[] = { 504static const u16 NCT6779_REG_FAN[] = {
504 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba, 0x660 }; 505 0x4c0, 0x4c2, 0x4c4, 0x4c6, 0x4c8, 0x4ca, 0x4ce };
505static const u16 NCT6779_REG_FAN_PULSES[] = { 506static const u16 NCT6779_REG_FAN_PULSES[NUM_FAN] = {
506 0x644, 0x645, 0x646, 0x647, 0x648, 0x649, 0 }; 507 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
507 508
508static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = { 509static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = {
509 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 }; 510 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36, 0xb36 };
@@ -559,7 +560,9 @@ static const char *const nct6779_temp_label[] = {
559}; 560};
560 561
561#define NCT6779_TEMP_MASK 0x07ffff7e 562#define NCT6779_TEMP_MASK 0x07ffff7e
563#define NCT6779_VIRT_TEMP_MASK 0x00000000
562#define NCT6791_TEMP_MASK 0x87ffff7e 564#define NCT6791_TEMP_MASK 0x87ffff7e
565#define NCT6791_VIRT_TEMP_MASK 0x80000000
563 566
564static const u16 NCT6779_REG_TEMP_ALTERNATE[32] 567static const u16 NCT6779_REG_TEMP_ALTERNATE[32]
565 = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0, 568 = { 0x490, 0x491, 0x492, 0x493, 0x494, 0x495, 0, 0,
@@ -638,6 +641,7 @@ static const char *const nct6792_temp_label[] = {
638}; 641};
639 642
640#define NCT6792_TEMP_MASK 0x9fffff7e 643#define NCT6792_TEMP_MASK 0x9fffff7e
644#define NCT6792_VIRT_TEMP_MASK 0x80000000
641 645
642static const char *const nct6793_temp_label[] = { 646static const char *const nct6793_temp_label[] = {
643 "", 647 "",
@@ -675,6 +679,7 @@ static const char *const nct6793_temp_label[] = {
675}; 679};
676 680
677#define NCT6793_TEMP_MASK 0xbfff037e 681#define NCT6793_TEMP_MASK 0xbfff037e
682#define NCT6793_VIRT_TEMP_MASK 0x80000000
678 683
679static const char *const nct6795_temp_label[] = { 684static const char *const nct6795_temp_label[] = {
680 "", 685 "",
@@ -712,6 +717,7 @@ static const char *const nct6795_temp_label[] = {
712}; 717};
713 718
714#define NCT6795_TEMP_MASK 0xbfffff7e 719#define NCT6795_TEMP_MASK 0xbfffff7e
720#define NCT6795_VIRT_TEMP_MASK 0x80000000
715 721
716static const char *const nct6796_temp_label[] = { 722static const char *const nct6796_temp_label[] = {
717 "", 723 "",
@@ -724,8 +730,8 @@ static const char *const nct6796_temp_label[] = {
724 "AUXTIN4", 730 "AUXTIN4",
725 "SMBUSMASTER 0", 731 "SMBUSMASTER 0",
726 "SMBUSMASTER 1", 732 "SMBUSMASTER 1",
727 "", 733 "Virtual_TEMP",
728 "", 734 "Virtual_TEMP",
729 "", 735 "",
730 "", 736 "",
731 "", 737 "",
@@ -748,7 +754,8 @@ static const char *const nct6796_temp_label[] = {
748 "Virtual_TEMP" 754 "Virtual_TEMP"
749}; 755};
750 756
751#define NCT6796_TEMP_MASK 0xbfff03fe 757#define NCT6796_TEMP_MASK 0xbfff0ffe
758#define NCT6796_VIRT_TEMP_MASK 0x80000c00
752 759
753/* NCT6102D/NCT6106D specific data */ 760/* NCT6102D/NCT6106D specific data */
754 761
@@ -779,8 +786,8 @@ static const u16 NCT6106_REG_TEMP_CONFIG[] = {
779 786
780static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 }; 787static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 };
781static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 }; 788static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 };
782static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 }; 789static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6 };
783static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 }; 790static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4 };
784 791
785static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 }; 792static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
786static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 }; 793static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
@@ -917,6 +924,11 @@ static unsigned int fan_from_reg16(u16 reg, unsigned int divreg)
917 return 1350000U / (reg << divreg); 924 return 1350000U / (reg << divreg);
918} 925}
919 926
927static unsigned int fan_from_reg_rpm(u16 reg, unsigned int divreg)
928{
929 return reg;
930}
931
920static u16 fan_to_reg(u32 fan, unsigned int divreg) 932static u16 fan_to_reg(u32 fan, unsigned int divreg)
921{ 933{
922 if (!fan) 934 if (!fan)
@@ -969,6 +981,7 @@ struct nct6775_data {
969 u16 reg_temp_config[NUM_TEMP]; 981 u16 reg_temp_config[NUM_TEMP];
970 const char * const *temp_label; 982 const char * const *temp_label;
971 u32 temp_mask; 983 u32 temp_mask;
984 u32 virt_temp_mask;
972 985
973 u16 REG_CONFIG; 986 u16 REG_CONFIG;
974 u16 REG_VBAT; 987 u16 REG_VBAT;
@@ -1276,11 +1289,11 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
1276 case nct6795: 1289 case nct6795:
1277 case nct6796: 1290 case nct6796:
1278 return reg == 0x150 || reg == 0x153 || reg == 0x155 || 1291 return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
1279 ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) || 1292 (reg & 0xfff0) == 0x4c0 ||
1280 reg == 0x402 || 1293 reg == 0x402 ||
1281 reg == 0x63a || reg == 0x63c || reg == 0x63e || 1294 reg == 0x63a || reg == 0x63c || reg == 0x63e ||
1282 reg == 0x640 || reg == 0x642 || reg == 0x64a || 1295 reg == 0x640 || reg == 0x642 || reg == 0x64a ||
1283 reg == 0x64c || reg == 0x660 || 1296 reg == 0x64c ||
1284 reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 || 1297 reg == 0x73 || reg == 0x75 || reg == 0x77 || reg == 0x79 ||
1285 reg == 0x7b || reg == 0x7d; 1298 reg == 0x7b || reg == 0x7d;
1286 } 1299 }
@@ -1558,7 +1571,7 @@ static void nct6775_update_pwm(struct device *dev)
1558 reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]); 1571 reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
1559 data->pwm_weight_temp_sel[i] = reg & 0x1f; 1572 data->pwm_weight_temp_sel[i] = reg & 0x1f;
1560 /* If weight is disabled, report weight source as 0 */ 1573 /* If weight is disabled, report weight source as 0 */
1561 if (j == 1 && !(reg & 0x80)) 1574 if (!(reg & 0x80))
1562 data->pwm_weight_temp_sel[i] = 0; 1575 data->pwm_weight_temp_sel[i] = 0;
1563 1576
1564 /* Weight temp data */ 1577 /* Weight temp data */
@@ -1682,9 +1695,13 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
1682 if (data->has_fan_min & BIT(i)) 1695 if (data->has_fan_min & BIT(i))
1683 data->fan_min[i] = nct6775_read_value(data, 1696 data->fan_min[i] = nct6775_read_value(data,
1684 data->REG_FAN_MIN[i]); 1697 data->REG_FAN_MIN[i]);
1685 data->fan_pulses[i] = 1698
1686 (nct6775_read_value(data, data->REG_FAN_PULSES[i]) 1699 if (data->REG_FAN_PULSES[i]) {
1687 >> data->FAN_PULSE_SHIFT[i]) & 0x03; 1700 data->fan_pulses[i] =
1701 (nct6775_read_value(data,
1702 data->REG_FAN_PULSES[i])
1703 >> data->FAN_PULSE_SHIFT[i]) & 0x03;
1704 }
1688 1705
1689 nct6775_select_fan_div(dev, data, i, reg); 1706 nct6775_select_fan_div(dev, data, i, reg);
1690 } 1707 }
@@ -3639,6 +3656,7 @@ static int nct6775_probe(struct platform_device *pdev)
3639 3656
3640 data->temp_label = nct6776_temp_label; 3657 data->temp_label = nct6776_temp_label;
3641 data->temp_mask = NCT6776_TEMP_MASK; 3658 data->temp_mask = NCT6776_TEMP_MASK;
3659 data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
3642 3660
3643 data->REG_VBAT = NCT6106_REG_VBAT; 3661 data->REG_VBAT = NCT6106_REG_VBAT;
3644 data->REG_DIODE = NCT6106_REG_DIODE; 3662 data->REG_DIODE = NCT6106_REG_DIODE;
@@ -3717,6 +3735,7 @@ static int nct6775_probe(struct platform_device *pdev)
3717 3735
3718 data->temp_label = nct6775_temp_label; 3736 data->temp_label = nct6775_temp_label;
3719 data->temp_mask = NCT6775_TEMP_MASK; 3737 data->temp_mask = NCT6775_TEMP_MASK;
3738 data->virt_temp_mask = NCT6775_VIRT_TEMP_MASK;
3720 3739
3721 data->REG_CONFIG = NCT6775_REG_CONFIG; 3740 data->REG_CONFIG = NCT6775_REG_CONFIG;
3722 data->REG_VBAT = NCT6775_REG_VBAT; 3741 data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3789,6 +3808,7 @@ static int nct6775_probe(struct platform_device *pdev)
3789 3808
3790 data->temp_label = nct6776_temp_label; 3809 data->temp_label = nct6776_temp_label;
3791 data->temp_mask = NCT6776_TEMP_MASK; 3810 data->temp_mask = NCT6776_TEMP_MASK;
3811 data->virt_temp_mask = NCT6776_VIRT_TEMP_MASK;
3792 3812
3793 data->REG_CONFIG = NCT6775_REG_CONFIG; 3813 data->REG_CONFIG = NCT6775_REG_CONFIG;
3794 data->REG_VBAT = NCT6775_REG_VBAT; 3814 data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3853,7 +3873,7 @@ static int nct6775_probe(struct platform_device *pdev)
3853 data->ALARM_BITS = NCT6779_ALARM_BITS; 3873 data->ALARM_BITS = NCT6779_ALARM_BITS;
3854 data->BEEP_BITS = NCT6779_BEEP_BITS; 3874 data->BEEP_BITS = NCT6779_BEEP_BITS;
3855 3875
3856 data->fan_from_reg = fan_from_reg13; 3876 data->fan_from_reg = fan_from_reg_rpm;
3857 data->fan_from_reg_min = fan_from_reg13; 3877 data->fan_from_reg_min = fan_from_reg13;
3858 data->target_temp_mask = 0xff; 3878 data->target_temp_mask = 0xff;
3859 data->tolerance_mask = 0x07; 3879 data->tolerance_mask = 0x07;
@@ -3861,6 +3881,7 @@ static int nct6775_probe(struct platform_device *pdev)
3861 3881
3862 data->temp_label = nct6779_temp_label; 3882 data->temp_label = nct6779_temp_label;
3863 data->temp_mask = NCT6779_TEMP_MASK; 3883 data->temp_mask = NCT6779_TEMP_MASK;
3884 data->virt_temp_mask = NCT6779_VIRT_TEMP_MASK;
3864 3885
3865 data->REG_CONFIG = NCT6775_REG_CONFIG; 3886 data->REG_CONFIG = NCT6775_REG_CONFIG;
3866 data->REG_VBAT = NCT6775_REG_VBAT; 3887 data->REG_VBAT = NCT6775_REG_VBAT;
@@ -3933,7 +3954,7 @@ static int nct6775_probe(struct platform_device *pdev)
3933 data->ALARM_BITS = NCT6791_ALARM_BITS; 3954 data->ALARM_BITS = NCT6791_ALARM_BITS;
3934 data->BEEP_BITS = NCT6779_BEEP_BITS; 3955 data->BEEP_BITS = NCT6779_BEEP_BITS;
3935 3956
3936 data->fan_from_reg = fan_from_reg13; 3957 data->fan_from_reg = fan_from_reg_rpm;
3937 data->fan_from_reg_min = fan_from_reg13; 3958 data->fan_from_reg_min = fan_from_reg13;
3938 data->target_temp_mask = 0xff; 3959 data->target_temp_mask = 0xff;
3939 data->tolerance_mask = 0x07; 3960 data->tolerance_mask = 0x07;
@@ -3944,22 +3965,27 @@ static int nct6775_probe(struct platform_device *pdev)
3944 case nct6791: 3965 case nct6791:
3945 data->temp_label = nct6779_temp_label; 3966 data->temp_label = nct6779_temp_label;
3946 data->temp_mask = NCT6791_TEMP_MASK; 3967 data->temp_mask = NCT6791_TEMP_MASK;
3968 data->virt_temp_mask = NCT6791_VIRT_TEMP_MASK;
3947 break; 3969 break;
3948 case nct6792: 3970 case nct6792:
3949 data->temp_label = nct6792_temp_label; 3971 data->temp_label = nct6792_temp_label;
3950 data->temp_mask = NCT6792_TEMP_MASK; 3972 data->temp_mask = NCT6792_TEMP_MASK;
3973 data->virt_temp_mask = NCT6792_VIRT_TEMP_MASK;
3951 break; 3974 break;
3952 case nct6793: 3975 case nct6793:
3953 data->temp_label = nct6793_temp_label; 3976 data->temp_label = nct6793_temp_label;
3954 data->temp_mask = NCT6793_TEMP_MASK; 3977 data->temp_mask = NCT6793_TEMP_MASK;
3978 data->virt_temp_mask = NCT6793_VIRT_TEMP_MASK;
3955 break; 3979 break;
3956 case nct6795: 3980 case nct6795:
3957 data->temp_label = nct6795_temp_label; 3981 data->temp_label = nct6795_temp_label;
3958 data->temp_mask = NCT6795_TEMP_MASK; 3982 data->temp_mask = NCT6795_TEMP_MASK;
3983 data->virt_temp_mask = NCT6795_VIRT_TEMP_MASK;
3959 break; 3984 break;
3960 case nct6796: 3985 case nct6796:
3961 data->temp_label = nct6796_temp_label; 3986 data->temp_label = nct6796_temp_label;
3962 data->temp_mask = NCT6796_TEMP_MASK; 3987 data->temp_mask = NCT6796_TEMP_MASK;
3988 data->virt_temp_mask = NCT6796_VIRT_TEMP_MASK;
3963 break; 3989 break;
3964 } 3990 }
3965 3991
@@ -4143,7 +4169,7 @@ static int nct6775_probe(struct platform_device *pdev)
4143 * for each fan reflects a different temperature, and there 4169 * for each fan reflects a different temperature, and there
4144 * are no duplicates. 4170 * are no duplicates.
4145 */ 4171 */
4146 if (src != TEMP_SOURCE_VIRTUAL) { 4172 if (!(data->virt_temp_mask & BIT(src))) {
4147 if (mask & BIT(src)) 4173 if (mask & BIT(src))
4148 continue; 4174 continue;
4149 mask |= BIT(src); 4175 mask |= BIT(src);
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index da962aa2cef5..fc6b7f8b62fb 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev)
139 th->thdev[i] = NULL; 139 th->thdev[i] = NULL;
140 } 140 }
141 141
142 th->num_thdevs = lowest; 142 if (lowest >= 0)
143 th->num_thdevs = lowest;
143 } 144 }
144 145
145 if (thdrv->attr_group) 146 if (thdrv->attr_group)
@@ -487,7 +488,7 @@ static const struct intel_th_subdevice {
487 .flags = IORESOURCE_MEM, 488 .flags = IORESOURCE_MEM,
488 }, 489 },
489 { 490 {
490 .start = TH_MMIO_SW, 491 .start = 1, /* use resource[1] */
491 .end = 0, 492 .end = 0,
492 .flags = IORESOURCE_MEM, 493 .flags = IORESOURCE_MEM,
493 }, 494 },
@@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th,
580 struct intel_th_device *thdev; 581 struct intel_th_device *thdev;
581 struct resource res[3]; 582 struct resource res[3];
582 unsigned int req = 0; 583 unsigned int req = 0;
584 bool is64bit = false;
583 int r, err; 585 int r, err;
584 586
585 thdev = intel_th_device_alloc(th, subdev->type, subdev->name, 587 thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
@@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th,
589 591
590 thdev->drvdata = th->drvdata; 592 thdev->drvdata = th->drvdata;
591 593
594 for (r = 0; r < th->num_resources; r++)
595 if (th->resource[r].flags & IORESOURCE_MEM_64) {
596 is64bit = true;
597 break;
598 }
599
592 memcpy(res, subdev->res, 600 memcpy(res, subdev->res,
593 sizeof(struct resource) * subdev->nres); 601 sizeof(struct resource) * subdev->nres);
594 602
595 for (r = 0; r < subdev->nres; r++) { 603 for (r = 0; r < subdev->nres; r++) {
596 struct resource *devres = th->resource; 604 struct resource *devres = th->resource;
597 int bar = TH_MMIO_CONFIG; 605 int bar = 0; /* cut subdevices' MMIO from resource[0] */
598 606
599 /* 607 /*
600 * Take .end == 0 to mean 'take the whole bar', 608 * Take .end == 0 to mean 'take the whole bar',
@@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
603 */ 611 */
604 if (!res[r].end && res[r].flags == IORESOURCE_MEM) { 612 if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
605 bar = res[r].start; 613 bar = res[r].start;
614 if (is64bit)
615 bar *= 2;
606 res[r].start = 0; 616 res[r].start = 0;
607 res[r].end = resource_size(&devres[bar]) - 1; 617 res[r].end = resource_size(&devres[bar]) - 1;
608 } 618 }
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index c2e55e5d97f6..1cf6290d6435 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
160 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1), 160 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
161 .driver_data = (kernel_ulong_t)&intel_th_2x, 161 .driver_data = (kernel_ulong_t)&intel_th_2x,
162 }, 162 },
163 {
164 /* Ice Lake PCH */
165 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
166 .driver_data = (kernel_ulong_t)&intel_th_2x,
167 },
163 { 0 }, 168 { 0 },
164}; 169};
165 170
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index 0bee1f4b914e..3208ad6ad540 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -338,6 +338,39 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
338} 338}
339 339
340/** 340/**
341 * del_gid - Delete GID table entry
342 *
343 * @ib_dev: IB device whose GID entry to be deleted
344 * @port: Port number of the IB device
345 * @table: GID table of the IB device for a port
346 * @ix: GID entry index to delete
347 *
348 */
349static void del_gid(struct ib_device *ib_dev, u8 port,
350 struct ib_gid_table *table, int ix)
351{
352 struct ib_gid_table_entry *entry;
353
354 lockdep_assert_held(&table->lock);
355
356 pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
357 ib_dev->name, port, ix,
358 table->data_vec[ix]->attr.gid.raw);
359
360 write_lock_irq(&table->rwlock);
361 entry = table->data_vec[ix];
362 entry->state = GID_TABLE_ENTRY_PENDING_DEL;
363 /*
364 * For non RoCE protocol, GID entry slot is ready to use.
365 */
366 if (!rdma_protocol_roce(ib_dev, port))
367 table->data_vec[ix] = NULL;
368 write_unlock_irq(&table->rwlock);
369
370 put_gid_entry_locked(entry);
371}
372
373/**
341 * add_modify_gid - Add or modify GID table entry 374 * add_modify_gid - Add or modify GID table entry
342 * 375 *
343 * @table: GID table in which GID to be added or modified 376 * @table: GID table in which GID to be added or modified
@@ -358,7 +391,7 @@ static int add_modify_gid(struct ib_gid_table *table,
358 * this index. 391 * this index.
359 */ 392 */
360 if (is_gid_entry_valid(table->data_vec[attr->index])) 393 if (is_gid_entry_valid(table->data_vec[attr->index]))
361 put_gid_entry(table->data_vec[attr->index]); 394 del_gid(attr->device, attr->port_num, table, attr->index);
362 395
363 /* 396 /*
364 * Some HCA's report multiple GID entries with only one valid GID, and 397 * Some HCA's report multiple GID entries with only one valid GID, and
@@ -386,39 +419,6 @@ done:
386 return ret; 419 return ret;
387} 420}
388 421
389/**
390 * del_gid - Delete GID table entry
391 *
392 * @ib_dev: IB device whose GID entry to be deleted
393 * @port: Port number of the IB device
394 * @table: GID table of the IB device for a port
395 * @ix: GID entry index to delete
396 *
397 */
398static void del_gid(struct ib_device *ib_dev, u8 port,
399 struct ib_gid_table *table, int ix)
400{
401 struct ib_gid_table_entry *entry;
402
403 lockdep_assert_held(&table->lock);
404
405 pr_debug("%s device=%s port=%d index=%d gid %pI6\n", __func__,
406 ib_dev->name, port, ix,
407 table->data_vec[ix]->attr.gid.raw);
408
409 write_lock_irq(&table->rwlock);
410 entry = table->data_vec[ix];
411 entry->state = GID_TABLE_ENTRY_PENDING_DEL;
412 /*
413 * For non RoCE protocol, GID entry slot is ready to use.
414 */
415 if (!rdma_protocol_roce(ib_dev, port))
416 table->data_vec[ix] = NULL;
417 write_unlock_irq(&table->rwlock);
418
419 put_gid_entry_locked(entry);
420}
421
422/* rwlock should be read locked, or lock should be held */ 422/* rwlock should be read locked, or lock should be held */
423static int find_gid(struct ib_gid_table *table, const union ib_gid *gid, 423static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
424 const struct ib_gid_attr *val, bool default_gid, 424 const struct ib_gid_attr *val, bool default_gid,
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 5f437d1570fb..21863ddde63e 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1759,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
1759 mutex_lock(&mut); 1759 mutex_lock(&mut);
1760 if (!ctx->closing) { 1760 if (!ctx->closing) {
1761 mutex_unlock(&mut); 1761 mutex_unlock(&mut);
1762 ucma_put_ctx(ctx);
1763 wait_for_completion(&ctx->comp);
1762 /* rdma_destroy_id ensures that no event handlers are 1764 /* rdma_destroy_id ensures that no event handlers are
1763 * inflight for that id before releasing it. 1765 * inflight for that id before releasing it.
1764 */ 1766 */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a21d5214afc3..e012ca80f9d1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file,
2027 2027
2028 if ((cmd->base.attr_mask & IB_QP_CUR_STATE && 2028 if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
2029 cmd->base.cur_qp_state > IB_QPS_ERR) || 2029 cmd->base.cur_qp_state > IB_QPS_ERR) ||
2030 cmd->base.qp_state > IB_QPS_ERR) { 2030 (cmd->base.attr_mask & IB_QP_STATE &&
2031 cmd->base.qp_state > IB_QPS_ERR)) {
2031 ret = -EINVAL; 2032 ret = -EINVAL;
2032 goto release_qp; 2033 goto release_qp;
2033 } 2034 }
2034 2035
2035 attr->qp_state = cmd->base.qp_state; 2036 if (cmd->base.attr_mask & IB_QP_STATE)
2036 attr->cur_qp_state = cmd->base.cur_qp_state; 2037 attr->qp_state = cmd->base.qp_state;
2037 attr->path_mtu = cmd->base.path_mtu; 2038 if (cmd->base.attr_mask & IB_QP_CUR_STATE)
2038 attr->path_mig_state = cmd->base.path_mig_state; 2039 attr->cur_qp_state = cmd->base.cur_qp_state;
2039 attr->qkey = cmd->base.qkey; 2040 if (cmd->base.attr_mask & IB_QP_PATH_MTU)
2040 attr->rq_psn = cmd->base.rq_psn; 2041 attr->path_mtu = cmd->base.path_mtu;
2041 attr->sq_psn = cmd->base.sq_psn; 2042 if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
2042 attr->dest_qp_num = cmd->base.dest_qp_num; 2043 attr->path_mig_state = cmd->base.path_mig_state;
2043 attr->qp_access_flags = cmd->base.qp_access_flags; 2044 if (cmd->base.attr_mask & IB_QP_QKEY)
2044 attr->pkey_index = cmd->base.pkey_index; 2045 attr->qkey = cmd->base.qkey;
2045 attr->alt_pkey_index = cmd->base.alt_pkey_index; 2046 if (cmd->base.attr_mask & IB_QP_RQ_PSN)
2046 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify; 2047 attr->rq_psn = cmd->base.rq_psn;
2047 attr->max_rd_atomic = cmd->base.max_rd_atomic; 2048 if (cmd->base.attr_mask & IB_QP_SQ_PSN)
2048 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic; 2049 attr->sq_psn = cmd->base.sq_psn;
2049 attr->min_rnr_timer = cmd->base.min_rnr_timer; 2050 if (cmd->base.attr_mask & IB_QP_DEST_QPN)
2050 attr->port_num = cmd->base.port_num; 2051 attr->dest_qp_num = cmd->base.dest_qp_num;
2051 attr->timeout = cmd->base.timeout; 2052 if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
2052 attr->retry_cnt = cmd->base.retry_cnt; 2053 attr->qp_access_flags = cmd->base.qp_access_flags;
2053 attr->rnr_retry = cmd->base.rnr_retry; 2054 if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
2054 attr->alt_port_num = cmd->base.alt_port_num; 2055 attr->pkey_index = cmd->base.pkey_index;
2055 attr->alt_timeout = cmd->base.alt_timeout; 2056 if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
2056 attr->rate_limit = cmd->rate_limit; 2057 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
2058 if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
2059 attr->max_rd_atomic = cmd->base.max_rd_atomic;
2060 if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
2061 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
2062 if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
2063 attr->min_rnr_timer = cmd->base.min_rnr_timer;
2064 if (cmd->base.attr_mask & IB_QP_PORT)
2065 attr->port_num = cmd->base.port_num;
2066 if (cmd->base.attr_mask & IB_QP_TIMEOUT)
2067 attr->timeout = cmd->base.timeout;
2068 if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
2069 attr->retry_cnt = cmd->base.retry_cnt;
2070 if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
2071 attr->rnr_retry = cmd->base.rnr_retry;
2072 if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
2073 attr->alt_port_num = cmd->base.alt_port_num;
2074 attr->alt_timeout = cmd->base.alt_timeout;
2075 attr->alt_pkey_index = cmd->base.alt_pkey_index;
2076 }
2077 if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
2078 attr->rate_limit = cmd->rate_limit;
2057 2079
2058 if (cmd->base.attr_mask & IB_QP_AV) 2080 if (cmd->base.attr_mask & IB_QP_AV)
2059 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr, 2081 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 6d974e2363df..50152c1b1004 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
440 list_del(&entry->obj_list); 440 list_del(&entry->obj_list);
441 kfree(entry); 441 kfree(entry);
442 } 442 }
443 file->ev_queue.is_closed = 1;
443 spin_unlock_irq(&file->ev_queue.lock); 444 spin_unlock_irq(&file->ev_queue.lock);
444 445
445 uverbs_close_fd(filp); 446 uverbs_close_fd(filp);
diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
index 73ea6f0db88f..be854628a7c6 100644
--- a/drivers/infiniband/core/uverbs_uapi.c
+++ b/drivers/infiniband/core/uverbs_uapi.c
@@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
248 kfree(rcu_dereference_protected(*slot, true)); 248 kfree(rcu_dereference_protected(*slot, true));
249 radix_tree_iter_delete(&uapi->radix, &iter, slot); 249 radix_tree_iter_delete(&uapi->radix, &iter, slot);
250 } 250 }
251 kfree(uapi);
251} 252}
252 253
253struct uverbs_api *uverbs_alloc_api( 254struct uverbs_api *uverbs_alloc_api(
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 20b9f31052bf..85cd1a3593d6 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
78/* Mutex to protect the list of bnxt_re devices added */ 78/* Mutex to protect the list of bnxt_re devices added */
79static DEFINE_MUTEX(bnxt_re_dev_lock); 79static DEFINE_MUTEX(bnxt_re_dev_lock);
80static struct workqueue_struct *bnxt_re_wq; 80static struct workqueue_struct *bnxt_re_wq;
81static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait); 81static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
82 82
83/* SR-IOV helper functions */ 83/* SR-IOV helper functions */
84 84
@@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p)
182 if (!rdev) 182 if (!rdev)
183 return; 183 return;
184 184
185 bnxt_re_ib_unreg(rdev, false); 185 bnxt_re_ib_unreg(rdev);
186} 186}
187 187
188static void bnxt_re_stop_irq(void *handle) 188static void bnxt_re_stop_irq(void *handle)
@@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
251/* Driver registration routines used to let the networking driver (bnxt_en) 251/* Driver registration routines used to let the networking driver (bnxt_en)
252 * to know that the RoCE driver is now installed 252 * to know that the RoCE driver is now installed
253 */ 253 */
254static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait) 254static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
255{ 255{
256 struct bnxt_en_dev *en_dev; 256 struct bnxt_en_dev *en_dev;
257 int rc; 257 int rc;
@@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
260 return -EINVAL; 260 return -EINVAL;
261 261
262 en_dev = rdev->en_dev; 262 en_dev = rdev->en_dev;
263 /* Acquire rtnl lock if it is not invokded from netdev event */
264 if (lock_wait)
265 rtnl_lock();
266 263
267 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev, 264 rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
268 BNXT_ROCE_ULP); 265 BNXT_ROCE_ULP);
269 if (lock_wait)
270 rtnl_unlock();
271 return rc; 266 return rc;
272} 267}
273 268
@@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
281 276
282 en_dev = rdev->en_dev; 277 en_dev = rdev->en_dev;
283 278
284 rtnl_lock();
285 rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP, 279 rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
286 &bnxt_re_ulp_ops, rdev); 280 &bnxt_re_ulp_ops, rdev);
287 rtnl_unlock();
288 return rc; 281 return rc;
289} 282}
290 283
291static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait) 284static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
292{ 285{
293 struct bnxt_en_dev *en_dev; 286 struct bnxt_en_dev *en_dev;
294 int rc; 287 int rc;
@@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
298 291
299 en_dev = rdev->en_dev; 292 en_dev = rdev->en_dev;
300 293
301 if (lock_wait)
302 rtnl_lock();
303 294
304 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP); 295 rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
305 296
306 if (lock_wait)
307 rtnl_unlock();
308 return rc; 297 return rc;
309} 298}
310 299
@@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
320 309
321 num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus()); 310 num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
322 311
323 rtnl_lock();
324 num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP, 312 num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
325 rdev->msix_entries, 313 rdev->msix_entries,
326 num_msix_want); 314 num_msix_want);
@@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
335 } 323 }
336 rdev->num_msix = num_msix_got; 324 rdev->num_msix = num_msix_got;
337done: 325done:
338 rtnl_unlock();
339 return rc; 326 return rc;
340} 327}
341 328
@@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
358 fw_msg->timeout = timeout; 345 fw_msg->timeout = timeout;
359} 346}
360 347
361static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id, 348static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
362 bool lock_wait)
363{ 349{
364 struct bnxt_en_dev *en_dev = rdev->en_dev; 350 struct bnxt_en_dev *en_dev = rdev->en_dev;
365 struct hwrm_ring_free_input req = {0}; 351 struct hwrm_ring_free_input req = {0};
366 struct hwrm_ring_free_output resp; 352 struct hwrm_ring_free_output resp;
367 struct bnxt_fw_msg fw_msg; 353 struct bnxt_fw_msg fw_msg;
368 bool do_unlock = false;
369 int rc = -EINVAL; 354 int rc = -EINVAL;
370 355
371 if (!en_dev) 356 if (!en_dev)
372 return rc; 357 return rc;
373 358
374 memset(&fw_msg, 0, sizeof(fw_msg)); 359 memset(&fw_msg, 0, sizeof(fw_msg));
375 if (lock_wait) {
376 rtnl_lock();
377 do_unlock = true;
378 }
379 360
380 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1); 361 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
381 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; 362 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
@@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
386 if (rc) 367 if (rc)
387 dev_err(rdev_to_dev(rdev), 368 dev_err(rdev_to_dev(rdev),
388 "Failed to free HW ring:%d :%#x", req.ring_id, rc); 369 "Failed to free HW ring:%d :%#x", req.ring_id, rc);
389 if (do_unlock)
390 rtnl_unlock();
391 return rc; 370 return rc;
392} 371}
393 372
@@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
405 return rc; 384 return rc;
406 385
407 memset(&fw_msg, 0, sizeof(fw_msg)); 386 memset(&fw_msg, 0, sizeof(fw_msg));
408 rtnl_lock();
409 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1); 387 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
410 req.enables = 0; 388 req.enables = 0;
411 req.page_tbl_addr = cpu_to_le64(dma_arr[0]); 389 req.page_tbl_addr = cpu_to_le64(dma_arr[0]);
@@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
426 if (!rc) 404 if (!rc)
427 *fw_ring_id = le16_to_cpu(resp.ring_id); 405 *fw_ring_id = le16_to_cpu(resp.ring_id);
428 406
429 rtnl_unlock();
430 return rc; 407 return rc;
431} 408}
432 409
433static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, 410static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
434 u32 fw_stats_ctx_id, bool lock_wait) 411 u32 fw_stats_ctx_id)
435{ 412{
436 struct bnxt_en_dev *en_dev = rdev->en_dev; 413 struct bnxt_en_dev *en_dev = rdev->en_dev;
437 struct hwrm_stat_ctx_free_input req = {0}; 414 struct hwrm_stat_ctx_free_input req = {0};
438 struct bnxt_fw_msg fw_msg; 415 struct bnxt_fw_msg fw_msg;
439 bool do_unlock = false;
440 int rc = -EINVAL; 416 int rc = -EINVAL;
441 417
442 if (!en_dev) 418 if (!en_dev)
443 return rc; 419 return rc;
444 420
445 memset(&fw_msg, 0, sizeof(fw_msg)); 421 memset(&fw_msg, 0, sizeof(fw_msg));
446 if (lock_wait) {
447 rtnl_lock();
448 do_unlock = true;
449 }
450 422
451 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); 423 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
452 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); 424 req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
@@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
457 dev_err(rdev_to_dev(rdev), 429 dev_err(rdev_to_dev(rdev),
458 "Failed to free HW stats context %#x", rc); 430 "Failed to free HW stats context %#x", rc);
459 431
460 if (do_unlock)
461 rtnl_unlock();
462 return rc; 432 return rc;
463} 433}
464 434
@@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
478 return rc; 448 return rc;
479 449
480 memset(&fw_msg, 0, sizeof(fw_msg)); 450 memset(&fw_msg, 0, sizeof(fw_msg));
481 rtnl_lock();
482 451
483 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); 452 bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
484 req.update_period_ms = cpu_to_le32(1000); 453 req.update_period_ms = cpu_to_le32(1000);
@@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
490 if (!rc) 459 if (!rc)
491 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); 460 *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
492 461
493 rtnl_unlock();
494 return rc; 462 return rc;
495} 463}
496 464
@@ -929,19 +897,19 @@ fail:
929 return rc; 897 return rc;
930} 898}
931 899
932static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait) 900static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
933{ 901{
934 int i; 902 int i;
935 903
936 for (i = 0; i < rdev->num_msix - 1; i++) { 904 for (i = 0; i < rdev->num_msix - 1; i++) {
937 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait); 905 bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
938 bnxt_qplib_free_nq(&rdev->nq[i]); 906 bnxt_qplib_free_nq(&rdev->nq[i]);
939 } 907 }
940} 908}
941 909
942static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait) 910static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
943{ 911{
944 bnxt_re_free_nq_res(rdev, lock_wait); 912 bnxt_re_free_nq_res(rdev);
945 913
946 if (rdev->qplib_res.dpi_tbl.max) { 914 if (rdev->qplib_res.dpi_tbl.max) {
947 bnxt_qplib_dealloc_dpi(&rdev->qplib_res, 915 bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
@@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
1219 return 0; 1187 return 0;
1220} 1188}
1221 1189
1222static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait) 1190static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
1223{ 1191{
1224 int i, rc; 1192 int i, rc;
1225 1193
@@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
1234 cancel_delayed_work(&rdev->worker); 1202 cancel_delayed_work(&rdev->worker);
1235 1203
1236 bnxt_re_cleanup_res(rdev); 1204 bnxt_re_cleanup_res(rdev);
1237 bnxt_re_free_res(rdev, lock_wait); 1205 bnxt_re_free_res(rdev);
1238 1206
1239 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) { 1207 if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
1240 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw); 1208 rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
1241 if (rc) 1209 if (rc)
1242 dev_warn(rdev_to_dev(rdev), 1210 dev_warn(rdev_to_dev(rdev),
1243 "Failed to deinitialize RCFW: %#x", rc); 1211 "Failed to deinitialize RCFW: %#x", rc);
1244 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, 1212 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1245 lock_wait);
1246 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); 1213 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
1247 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); 1214 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1248 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait); 1215 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
1249 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); 1216 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1250 } 1217 }
1251 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) { 1218 if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
1252 rc = bnxt_re_free_msix(rdev, lock_wait); 1219 rc = bnxt_re_free_msix(rdev);
1253 if (rc) 1220 if (rc)
1254 dev_warn(rdev_to_dev(rdev), 1221 dev_warn(rdev_to_dev(rdev),
1255 "Failed to free MSI-X vectors: %#x", rc); 1222 "Failed to free MSI-X vectors: %#x", rc);
1256 } 1223 }
1257 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) { 1224 if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
1258 rc = bnxt_re_unregister_netdev(rdev, lock_wait); 1225 rc = bnxt_re_unregister_netdev(rdev);
1259 if (rc) 1226 if (rc)
1260 dev_warn(rdev_to_dev(rdev), 1227 dev_warn(rdev_to_dev(rdev),
1261 "Failed to unregister with netdev: %#x", rc); 1228 "Failed to unregister with netdev: %#x", rc);
@@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1276{ 1243{
1277 int i, j, rc; 1244 int i, j, rc;
1278 1245
1246 bool locked;
1247
1248 /* Acquire rtnl lock through out this function */
1249 rtnl_lock();
1250 locked = true;
1251
1279 /* Registered a new RoCE device instance to netdev */ 1252 /* Registered a new RoCE device instance to netdev */
1280 rc = bnxt_re_register_netdev(rdev); 1253 rc = bnxt_re_register_netdev(rdev);
1281 if (rc) { 1254 if (rc) {
@@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1374 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); 1347 schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
1375 } 1348 }
1376 1349
1350 rtnl_unlock();
1351 locked = false;
1352
1377 /* Register ib dev */ 1353 /* Register ib dev */
1378 rc = bnxt_re_register_ib(rdev); 1354 rc = bnxt_re_register_ib(rdev);
1379 if (rc) { 1355 if (rc) {
1380 pr_err("Failed to register with IB: %#x\n", rc); 1356 pr_err("Failed to register with IB: %#x\n", rc);
1381 goto fail; 1357 goto fail;
1382 } 1358 }
1359 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
1383 dev_info(rdev_to_dev(rdev), "Device registered successfully"); 1360 dev_info(rdev_to_dev(rdev), "Device registered successfully");
1384 for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) { 1361 for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
1385 rc = device_create_file(&rdev->ibdev.dev, 1362 rc = device_create_file(&rdev->ibdev.dev,
@@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1395 goto fail; 1372 goto fail;
1396 } 1373 }
1397 } 1374 }
1398 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
1399 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed, 1375 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1400 &rdev->active_width); 1376 &rdev->active_width);
1401 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); 1377 set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
@@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1404 1380
1405 return 0; 1381 return 0;
1406free_sctx: 1382free_sctx:
1407 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true); 1383 bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
1408free_ctx: 1384free_ctx:
1409 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx); 1385 bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
1410disable_rcfw: 1386disable_rcfw:
1411 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); 1387 bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
1412free_ring: 1388free_ring:
1413 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true); 1389 bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
1414free_rcfw: 1390free_rcfw:
1415 bnxt_qplib_free_rcfw_channel(&rdev->rcfw); 1391 bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
1416fail: 1392fail:
1417 bnxt_re_ib_unreg(rdev, true); 1393 if (!locked)
1394 rtnl_lock();
1395 bnxt_re_ib_unreg(rdev);
1396 rtnl_unlock();
1397
1418 return rc; 1398 return rc;
1419} 1399}
1420 1400
@@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
1567 */ 1547 */
1568 if (atomic_read(&rdev->sched_count) > 0) 1548 if (atomic_read(&rdev->sched_count) > 0)
1569 goto exit; 1549 goto exit;
1570 bnxt_re_ib_unreg(rdev, false); 1550 bnxt_re_ib_unreg(rdev);
1571 bnxt_re_remove_one(rdev); 1551 bnxt_re_remove_one(rdev);
1572 bnxt_re_dev_unreg(rdev); 1552 bnxt_re_dev_unreg(rdev);
1573 break; 1553 break;
@@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void)
1646 */ 1626 */
1647 flush_workqueue(bnxt_re_wq); 1627 flush_workqueue(bnxt_re_wq);
1648 bnxt_re_dev_stop(rdev); 1628 bnxt_re_dev_stop(rdev);
1649 bnxt_re_ib_unreg(rdev, true); 1629 /* Acquire the rtnl_lock as the L2 resources are freed here */
1630 rtnl_lock();
1631 bnxt_re_ib_unreg(rdev);
1632 rtnl_unlock();
1650 bnxt_re_remove_one(rdev); 1633 bnxt_re_remove_one(rdev);
1651 bnxt_re_dev_unreg(rdev); 1634 bnxt_re_dev_unreg(rdev);
1652 } 1635 }
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2c19bf772451..e1668bcc2d13 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6733 struct hfi1_devdata *dd = ppd->dd; 6733 struct hfi1_devdata *dd = ppd->dd;
6734 struct send_context *sc; 6734 struct send_context *sc;
6735 int i; 6735 int i;
6736 int sc_flags;
6736 6737
6737 if (flags & FREEZE_SELF) 6738 if (flags & FREEZE_SELF)
6738 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK); 6739 write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
@@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
6743 /* notify all SDMA engines that they are going into a freeze */ 6744 /* notify all SDMA engines that they are going into a freeze */
6744 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN)); 6745 sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
6745 6746
6747 sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
6748 SCF_LINK_DOWN : 0);
6746 /* do halt pre-handling on all enabled send contexts */ 6749 /* do halt pre-handling on all enabled send contexts */
6747 for (i = 0; i < dd->num_send_contexts; i++) { 6750 for (i = 0; i < dd->num_send_contexts; i++) {
6748 sc = dd->send_contexts[i].sc; 6751 sc = dd->send_contexts[i].sc;
6749 if (sc && (sc->flags & SCF_ENABLED)) 6752 if (sc && (sc->flags & SCF_ENABLED))
6750 sc_stop(sc, SCF_FROZEN | SCF_HALTED); 6753 sc_stop(sc, sc_flags);
6751 } 6754 }
6752 6755
6753 /* Send context are frozen. Notify user space */ 6756 /* Send context are frozen. Notify user space */
@@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
10674 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK); 10677 add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
10675 10678
10676 handle_linkup_change(dd, 1); 10679 handle_linkup_change(dd, 1);
10680 pio_kernel_linkup(dd);
10677 10681
10678 /* 10682 /*
10679 * After link up, a new link width will have been set. 10683 * After link up, a new link width will have been set.
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index c2c1cba5b23b..752057647f09 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
86 unsigned long flags; 86 unsigned long flags;
87 int write = 1; /* write sendctrl back */ 87 int write = 1; /* write sendctrl back */
88 int flush = 0; /* re-read sendctrl to make sure it is flushed */ 88 int flush = 0; /* re-read sendctrl to make sure it is flushed */
89 int i;
89 90
90 spin_lock_irqsave(&dd->sendctrl_lock, flags); 91 spin_lock_irqsave(&dd->sendctrl_lock, flags);
91 92
@@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
95 reg |= SEND_CTRL_SEND_ENABLE_SMASK; 96 reg |= SEND_CTRL_SEND_ENABLE_SMASK;
96 /* Fall through */ 97 /* Fall through */
97 case PSC_DATA_VL_ENABLE: 98 case PSC_DATA_VL_ENABLE:
99 mask = 0;
100 for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
101 if (!dd->vld[i].mtu)
102 mask |= BIT_ULL(i);
98 /* Disallow sending on VLs not enabled */ 103 /* Disallow sending on VLs not enabled */
99 mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) << 104 mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
100 SEND_CTRL_UNSUPPORTED_VL_SHIFT; 105 SEND_CTRL_UNSUPPORTED_VL_SHIFT;
101 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask; 106 reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
102 break; 107 break;
103 case PSC_GLOBAL_DISABLE: 108 case PSC_GLOBAL_DISABLE:
@@ -921,20 +926,18 @@ void sc_free(struct send_context *sc)
921void sc_disable(struct send_context *sc) 926void sc_disable(struct send_context *sc)
922{ 927{
923 u64 reg; 928 u64 reg;
924 unsigned long flags;
925 struct pio_buf *pbuf; 929 struct pio_buf *pbuf;
926 930
927 if (!sc) 931 if (!sc)
928 return; 932 return;
929 933
930 /* do all steps, even if already disabled */ 934 /* do all steps, even if already disabled */
931 spin_lock_irqsave(&sc->alloc_lock, flags); 935 spin_lock_irq(&sc->alloc_lock);
932 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL)); 936 reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
933 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK); 937 reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
934 sc->flags &= ~SCF_ENABLED; 938 sc->flags &= ~SCF_ENABLED;
935 sc_wait_for_packet_egress(sc, 1); 939 sc_wait_for_packet_egress(sc, 1);
936 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg); 940 write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
937 spin_unlock_irqrestore(&sc->alloc_lock, flags);
938 941
939 /* 942 /*
940 * Flush any waiters. Once the context is disabled, 943 * Flush any waiters. Once the context is disabled,
@@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc)
944 * proceed with the flush. 947 * proceed with the flush.
945 */ 948 */
946 udelay(1); 949 udelay(1);
947 spin_lock_irqsave(&sc->release_lock, flags); 950 spin_lock(&sc->release_lock);
948 if (sc->sr) { /* this context has a shadow ring */ 951 if (sc->sr) { /* this context has a shadow ring */
949 while (sc->sr_tail != sc->sr_head) { 952 while (sc->sr_tail != sc->sr_head) {
950 pbuf = &sc->sr[sc->sr_tail].pbuf; 953 pbuf = &sc->sr[sc->sr_tail].pbuf;
@@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc)
955 sc->sr_tail = 0; 958 sc->sr_tail = 0;
956 } 959 }
957 } 960 }
958 spin_unlock_irqrestore(&sc->release_lock, flags); 961 spin_unlock(&sc->release_lock);
962 spin_unlock_irq(&sc->alloc_lock);
959} 963}
960 964
961/* return SendEgressCtxtStatus.PacketOccupancy */ 965/* return SendEgressCtxtStatus.PacketOccupancy */
@@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
1178 sc = dd->send_contexts[i].sc; 1182 sc = dd->send_contexts[i].sc;
1179 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER) 1183 if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
1180 continue; 1184 continue;
1185 if (sc->flags & SCF_LINK_DOWN)
1186 continue;
1181 1187
1182 sc_enable(sc); /* will clear the sc frozen flag */ 1188 sc_enable(sc); /* will clear the sc frozen flag */
1183 } 1189 }
1184} 1190}
1185 1191
1192/**
1193 * pio_kernel_linkup() - Re-enable send contexts after linkup event
1194 * @dd: valid devive data
1195 *
1196 * When the link goes down, the freeze path is taken. However, a link down
1197 * event is different from a freeze because if the send context is re-enabled
1198 * whowever is sending data will start sending data again, which will hang
1199 * any QP that is sending data.
1200 *
1201 * The freeze path now looks at the type of event that occurs and takes this
1202 * path for link down event.
1203 */
1204void pio_kernel_linkup(struct hfi1_devdata *dd)
1205{
1206 struct send_context *sc;
1207 int i;
1208
1209 for (i = 0; i < dd->num_send_contexts; i++) {
1210 sc = dd->send_contexts[i].sc;
1211 if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
1212 continue;
1213
1214 sc_enable(sc); /* will clear the sc link down flag */
1215 }
1216}
1217
1186/* 1218/*
1187 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear. 1219 * Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
1188 * Returns: 1220 * Returns:
@@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag)
1382{ 1414{
1383 unsigned long flags; 1415 unsigned long flags;
1384 1416
1385 /* mark the context */
1386 sc->flags |= flag;
1387
1388 /* stop buffer allocations */ 1417 /* stop buffer allocations */
1389 spin_lock_irqsave(&sc->alloc_lock, flags); 1418 spin_lock_irqsave(&sc->alloc_lock, flags);
1419 /* mark the context */
1420 sc->flags |= flag;
1390 sc->flags &= ~SCF_ENABLED; 1421 sc->flags &= ~SCF_ENABLED;
1391 spin_unlock_irqrestore(&sc->alloc_lock, flags); 1422 spin_unlock_irqrestore(&sc->alloc_lock, flags);
1392 wake_up(&sc->halt_wait); 1423 wake_up(&sc->halt_wait);
diff --git a/drivers/infiniband/hw/hfi1/pio.h b/drivers/infiniband/hw/hfi1/pio.h
index 058b08f459ab..aaf372c3e5d6 100644
--- a/drivers/infiniband/hw/hfi1/pio.h
+++ b/drivers/infiniband/hw/hfi1/pio.h
@@ -139,6 +139,7 @@ struct send_context {
139#define SCF_IN_FREE 0x02 139#define SCF_IN_FREE 0x02
140#define SCF_HALTED 0x04 140#define SCF_HALTED 0x04
141#define SCF_FROZEN 0x08 141#define SCF_FROZEN 0x08
142#define SCF_LINK_DOWN 0x10
142 143
143struct send_context_info { 144struct send_context_info {
144 struct send_context *sc; /* allocated working context */ 145 struct send_context *sc; /* allocated working context */
@@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc);
306void pio_reset_all(struct hfi1_devdata *dd); 307void pio_reset_all(struct hfi1_devdata *dd);
307void pio_freeze(struct hfi1_devdata *dd); 308void pio_freeze(struct hfi1_devdata *dd);
308void pio_kernel_unfreeze(struct hfi1_devdata *dd); 309void pio_kernel_unfreeze(struct hfi1_devdata *dd);
310void pio_kernel_linkup(struct hfi1_devdata *dd);
309 311
310/* global PIO send control operations */ 312/* global PIO send control operations */
311#define PSC_GLOBAL_ENABLE 0 313#define PSC_GLOBAL_ENABLE 0
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index a3a7b33196d6..5c88706121c1 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
828 if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { 828 if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
829 if (++req->iov_idx == req->data_iovs) { 829 if (++req->iov_idx == req->data_iovs) {
830 ret = -EFAULT; 830 ret = -EFAULT;
831 goto free_txreq; 831 goto free_tx;
832 } 832 }
833 iovec = &req->iovs[req->iov_idx]; 833 iovec = &req->iovs[req->iov_idx];
834 WARN_ON(iovec->offset); 834 WARN_ON(iovec->offset);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 13374c727b14..a7c586a5589d 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
1582 struct hfi1_pportdata *ppd; 1582 struct hfi1_pportdata *ppd;
1583 struct hfi1_devdata *dd; 1583 struct hfi1_devdata *dd;
1584 u8 sc5; 1584 u8 sc5;
1585 u8 sl;
1585 1586
1586 if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) && 1587 if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
1587 !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH)) 1588 !(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
@@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
1590 /* test the mapping for validity */ 1591 /* test the mapping for validity */
1591 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr)); 1592 ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
1592 ppd = ppd_from_ibp(ibp); 1593 ppd = ppd_from_ibp(ibp);
1593 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
1594 dd = dd_from_ppd(ppd); 1594 dd = dd_from_ppd(ppd);
1595
1596 sl = rdma_ah_get_sl(ah_attr);
1597 if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
1598 return -EINVAL;
1599
1600 sc5 = ibp->sl_to_sc[sl];
1595 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf) 1601 if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
1596 return -EINVAL; 1602 return -EINVAL;
1597 return 0; 1603 return 0;
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index ac116d63e466..f2f11e652dcd 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
723 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE); 723 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
724 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context); 724 struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
725 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device); 725 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
726 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
726 struct devx_obj *obj; 727 struct devx_obj *obj;
727 int err; 728 int err;
728 729
@@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
754 755
755 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len); 756 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
756 if (err) 757 if (err)
757 goto obj_free; 758 goto obj_destroy;
758 759
759 return 0; 760 return 0;
760 761
762obj_destroy:
763 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
761obj_free: 764obj_free:
762 kfree(obj); 765 kfree(obj);
763 return err; 766 return err;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 444d16520506..0b34e909505f 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
2951{ 2951{
2952 struct srp_target_port *target = host_to_target(scmnd->device->host); 2952 struct srp_target_port *target = host_to_target(scmnd->device->host);
2953 struct srp_rdma_ch *ch; 2953 struct srp_rdma_ch *ch;
2954 int i; 2954 int i, j;
2955 u8 status; 2955 u8 status;
2956 2956
2957 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); 2957 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
2965 2965
2966 for (i = 0; i < target->ch_count; i++) { 2966 for (i = 0; i < target->ch_count; i++) {
2967 ch = &target->ch[i]; 2967 ch = &target->ch[i];
2968 for (i = 0; i < target->req_ring_size; ++i) { 2968 for (j = 0; j < target->req_ring_size; ++j) {
2969 struct srp_request *req = &ch->req_ring[i]; 2969 struct srp_request *req = &ch->req_ring[j];
2970 2970
2971 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); 2971 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2972 } 2972 }
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index 6f62da2909ec..6caee807cafa 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -75,8 +75,7 @@ MODULE_LICENSE("GPL");
75 */ 75 */
76 76
77 77
78static unsigned char atakbd_keycode[0x72] = { /* American layout */ 78static unsigned char atakbd_keycode[0x73] = { /* American layout */
79 [0] = KEY_GRAVE,
80 [1] = KEY_ESC, 79 [1] = KEY_ESC,
81 [2] = KEY_1, 80 [2] = KEY_1,
82 [3] = KEY_2, 81 [3] = KEY_2,
@@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
117 [38] = KEY_L, 116 [38] = KEY_L,
118 [39] = KEY_SEMICOLON, 117 [39] = KEY_SEMICOLON,
119 [40] = KEY_APOSTROPHE, 118 [40] = KEY_APOSTROPHE,
120 [41] = KEY_BACKSLASH, /* FIXME, '#' */ 119 [41] = KEY_GRAVE,
121 [42] = KEY_LEFTSHIFT, 120 [42] = KEY_LEFTSHIFT,
122 [43] = KEY_GRAVE, /* FIXME: '~' */ 121 [43] = KEY_BACKSLASH,
123 [44] = KEY_Z, 122 [44] = KEY_Z,
124 [45] = KEY_X, 123 [45] = KEY_X,
125 [46] = KEY_C, 124 [46] = KEY_C,
@@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
145 [66] = KEY_F8, 144 [66] = KEY_F8,
146 [67] = KEY_F9, 145 [67] = KEY_F9,
147 [68] = KEY_F10, 146 [68] = KEY_F10,
148 [69] = KEY_ESC, 147 [71] = KEY_HOME,
149 [70] = KEY_DELETE, 148 [72] = KEY_UP,
150 [71] = KEY_KP7,
151 [72] = KEY_KP8,
152 [73] = KEY_KP9,
153 [74] = KEY_KPMINUS, 149 [74] = KEY_KPMINUS,
154 [75] = KEY_KP4, 150 [75] = KEY_LEFT,
155 [76] = KEY_KP5, 151 [77] = KEY_RIGHT,
156 [77] = KEY_KP6,
157 [78] = KEY_KPPLUS, 152 [78] = KEY_KPPLUS,
158 [79] = KEY_KP1, 153 [80] = KEY_DOWN,
159 [80] = KEY_KP2, 154 [82] = KEY_INSERT,
160 [81] = KEY_KP3, 155 [83] = KEY_DELETE,
161 [82] = KEY_KP0,
162 [83] = KEY_KPDOT,
163 [90] = KEY_KPLEFTPAREN,
164 [91] = KEY_KPRIGHTPAREN,
165 [92] = KEY_KPASTERISK, /* FIXME */
166 [93] = KEY_KPASTERISK,
167 [94] = KEY_KPPLUS,
168 [95] = KEY_HELP,
169 [96] = KEY_102ND, 156 [96] = KEY_102ND,
170 [97] = KEY_KPASTERISK, /* FIXME */ 157 [97] = KEY_UNDO,
171 [98] = KEY_KPSLASH, 158 [98] = KEY_HELP,
172 [99] = KEY_KPLEFTPAREN, 159 [99] = KEY_KPLEFTPAREN,
173 [100] = KEY_KPRIGHTPAREN, 160 [100] = KEY_KPRIGHTPAREN,
174 [101] = KEY_KPSLASH, 161 [101] = KEY_KPSLASH,
175 [102] = KEY_KPASTERISK, 162 [102] = KEY_KPASTERISK,
176 [103] = KEY_UP, 163 [103] = KEY_KP7,
177 [104] = KEY_KPASTERISK, /* FIXME */ 164 [104] = KEY_KP8,
178 [105] = KEY_LEFT, 165 [105] = KEY_KP9,
179 [106] = KEY_RIGHT, 166 [106] = KEY_KP4,
180 [107] = KEY_KPASTERISK, /* FIXME */ 167 [107] = KEY_KP5,
181 [108] = KEY_DOWN, 168 [108] = KEY_KP6,
182 [109] = KEY_KPASTERISK, /* FIXME */ 169 [109] = KEY_KP1,
183 [110] = KEY_KPASTERISK, /* FIXME */ 170 [110] = KEY_KP2,
184 [111] = KEY_KPASTERISK, /* FIXME */ 171 [111] = KEY_KP3,
185 [112] = KEY_KPASTERISK, /* FIXME */ 172 [112] = KEY_KP0,
186 [113] = KEY_KPASTERISK /* FIXME */ 173 [113] = KEY_KPDOT,
174 [114] = KEY_KPENTER,
187}; 175};
188 176
189static struct input_dev *atakbd_dev; 177static struct input_dev *atakbd_dev;
@@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev;
191static void atakbd_interrupt(unsigned char scancode, char down) 179static void atakbd_interrupt(unsigned char scancode, char down)
192{ 180{
193 181
194 if (scancode < 0x72) { /* scancodes < 0xf2 are keys */ 182 if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
195 183
196 // report raw events here? 184 // report raw events here?
197 185
198 scancode = atakbd_keycode[scancode]; 186 scancode = atakbd_keycode[scancode];
199 187
200 if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */ 188 input_report_key(atakbd_dev, scancode, down);
201 input_report_key(atakbd_dev, scancode, 1); 189 input_sync(atakbd_dev);
202 input_report_key(atakbd_dev, scancode, 0); 190 } else /* scancodes >= 0xf3 are mouse data, most likely */
203 input_sync(atakbd_dev);
204 } else {
205 input_report_key(atakbd_dev, scancode, down);
206 input_sync(atakbd_dev);
207 }
208 } else /* scancodes >= 0xf2 are mouse data, most likely */
209 printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode); 191 printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
210 192
211 return; 193 return;
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 96a887f33698..eb14ddf69346 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
410 min = abs->minimum; 410 min = abs->minimum;
411 max = abs->maximum; 411 max = abs->maximum;
412 412
413 if ((min != 0 || max != 0) && max <= min) { 413 if ((min != 0 || max != 0) && max < min) {
414 printk(KERN_DEBUG 414 printk(KERN_DEBUG
415 "%s: invalid abs[%02x] min:%d max:%d\n", 415 "%s: invalid abs[%02x] min:%d max:%d\n",
416 UINPUT_NAME, code, min, max); 416 UINPUT_NAME, code, min, max);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 44f57cf6675b..2d95e8d93cc7 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1178static const char * const middle_button_pnp_ids[] = { 1178static const char * const middle_button_pnp_ids[] = {
1179 "LEN2131", /* ThinkPad P52 w/ NFC */ 1179 "LEN2131", /* ThinkPad P52 w/ NFC */
1180 "LEN2132", /* ThinkPad P52 */ 1180 "LEN2132", /* ThinkPad P52 */
1181 "LEN2133", /* ThinkPad P72 w/ NFC */
1182 "LEN2134", /* ThinkPad P72 */
1181 NULL 1183 NULL
1182}; 1184};
1183 1185
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 80e69bb8283e..83ac8c128192 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev)
241 struct i2c_client *client = to_i2c_client(dev); 241 struct i2c_client *client = to_i2c_client(dev);
242 int ret; 242 int ret;
243 243
244 if (device_may_wakeup(dev))
245 return enable_irq_wake(client->irq);
246
244 ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN); 247 ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
245 return ret > 0 ? 0 : ret; 248 return ret > 0 ? 0 : ret;
246} 249}
@@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev)
249{ 252{
250 struct i2c_client *client = to_i2c_client(dev); 253 struct i2c_client *client = to_i2c_client(dev);
251 254
255 if (device_may_wakeup(dev))
256 return disable_irq_wake(client->irq);
257
252 return egalax_wake_up_device(client); 258 return egalax_wake_up_device(client);
253} 259}
254 260
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4e04fff23977..73e47d93e7a0 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev)
246 246
247 /* The callers make sure that get_device_id() does not fail here */ 247 /* The callers make sure that get_device_id() does not fail here */
248 devid = get_device_id(dev); 248 devid = get_device_id(dev);
249
250 /* For ACPI HID devices, we simply return the devid as such */
251 if (!dev_is_pci(dev))
252 return devid;
253
249 ivrs_alias = amd_iommu_alias_table[devid]; 254 ivrs_alias = amd_iommu_alias_table[devid];
255
250 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias); 256 pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
251 257
252 if (ivrs_alias == pci_alias) 258 if (ivrs_alias == pci_alias)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 5f3f10cf9d9d..bedc801b06a0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2540 if (dev && dev_is_pci(dev) && info->pasid_supported) { 2540 if (dev && dev_is_pci(dev) && info->pasid_supported) {
2541 ret = intel_pasid_alloc_table(dev); 2541 ret = intel_pasid_alloc_table(dev);
2542 if (ret) { 2542 if (ret) {
2543 __dmar_remove_one_dev_info(info); 2543 pr_warn("No pasid table for %s, pasid disabled\n",
2544 spin_unlock_irqrestore(&device_domain_lock, flags); 2544 dev_name(dev));
2545 return NULL; 2545 info->pasid_supported = 0;
2546 } 2546 }
2547 } 2547 }
2548 spin_unlock_irqrestore(&device_domain_lock, flags); 2548 spin_unlock_irqrestore(&device_domain_lock, flags);
diff --git a/drivers/iommu/intel-pasid.h b/drivers/iommu/intel-pasid.h
index 1c05ed6fc5a5..1fb5e12b029a 100644
--- a/drivers/iommu/intel-pasid.h
+++ b/drivers/iommu/intel-pasid.h
@@ -11,7 +11,7 @@
11#define __INTEL_PASID_H 11#define __INTEL_PASID_H
12 12
13#define PASID_MIN 0x1 13#define PASID_MIN 0x1
14#define PASID_MAX 0x100000 14#define PASID_MAX 0x20000
15 15
16struct pasid_entry { 16struct pasid_entry {
17 u64 val; 17 u64 val;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index 258115b10fa9..ad3e2b97469e 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1241,6 +1241,12 @@ err_unprepare_clocks:
1241 1241
1242static void rk_iommu_shutdown(struct platform_device *pdev) 1242static void rk_iommu_shutdown(struct platform_device *pdev)
1243{ 1243{
1244 struct rk_iommu *iommu = platform_get_drvdata(pdev);
1245 int i = 0, irq;
1246
1247 while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
1248 devm_free_irq(iommu->dev, irq, iommu);
1249
1244 pm_runtime_force_suspend(&pdev->dev); 1250 pm_runtime_force_suspend(&pdev->dev);
1245} 1251}
1246 1252
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 83504dd8100a..954dad29e6e8 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
965void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); 965void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
966 966
967extern struct workqueue_struct *bcache_wq; 967extern struct workqueue_struct *bcache_wq;
968extern struct workqueue_struct *bch_journal_wq;
968extern struct mutex bch_register_lock; 969extern struct mutex bch_register_lock;
969extern struct list_head bch_cache_sets; 970extern struct list_head bch_cache_sets;
970 971
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6116bbf870d8..522c7426f3a0 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
485 485
486 closure_get(&ca->set->cl); 486 closure_get(&ca->set->cl);
487 INIT_WORK(&ja->discard_work, journal_discard_work); 487 INIT_WORK(&ja->discard_work, journal_discard_work);
488 schedule_work(&ja->discard_work); 488 queue_work(bch_journal_wq, &ja->discard_work);
489 } 489 }
490} 490}
491 491
@@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
592 : &j->w[0]; 592 : &j->w[0];
593 593
594 __closure_wake_up(&w->wait); 594 __closure_wake_up(&w->wait);
595 continue_at_nobarrier(cl, journal_write, system_wq); 595 continue_at_nobarrier(cl, journal_write, bch_journal_wq);
596} 596}
597 597
598static void journal_write_unlock(struct closure *cl) 598static void journal_write_unlock(struct closure *cl)
@@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
627 spin_unlock(&c->journal.lock); 627 spin_unlock(&c->journal.lock);
628 628
629 btree_flush_write(c); 629 btree_flush_write(c);
630 continue_at(cl, journal_write, system_wq); 630 continue_at(cl, journal_write, bch_journal_wq);
631 return; 631 return;
632 } 632 }
633 633
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 94c756c66bd7..30ba9aeb5ee8 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -47,6 +47,7 @@ static int bcache_major;
47static DEFINE_IDA(bcache_device_idx); 47static DEFINE_IDA(bcache_device_idx);
48static wait_queue_head_t unregister_wait; 48static wait_queue_head_t unregister_wait;
49struct workqueue_struct *bcache_wq; 49struct workqueue_struct *bcache_wq;
50struct workqueue_struct *bch_journal_wq;
50 51
51#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) 52#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
52/* limitation of partitions number on single bcache device */ 53/* limitation of partitions number on single bcache device */
@@ -2341,6 +2342,9 @@ static void bcache_exit(void)
2341 kobject_put(bcache_kobj); 2342 kobject_put(bcache_kobj);
2342 if (bcache_wq) 2343 if (bcache_wq)
2343 destroy_workqueue(bcache_wq); 2344 destroy_workqueue(bcache_wq);
2345 if (bch_journal_wq)
2346 destroy_workqueue(bch_journal_wq);
2347
2344 if (bcache_major) 2348 if (bcache_major)
2345 unregister_blkdev(bcache_major, "bcache"); 2349 unregister_blkdev(bcache_major, "bcache");
2346 unregister_reboot_notifier(&reboot); 2350 unregister_reboot_notifier(&reboot);
@@ -2370,6 +2374,10 @@ static int __init bcache_init(void)
2370 if (!bcache_wq) 2374 if (!bcache_wq)
2371 goto err; 2375 goto err;
2372 2376
2377 bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
2378 if (!bch_journal_wq)
2379 goto err;
2380
2373 bcache_kobj = kobject_create_and_add("bcache", fs_kobj); 2381 bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
2374 if (!bcache_kobj) 2382 if (!bcache_kobj)
2375 goto err; 2383 goto err;
diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c
index b5410aeb5fe2..bb41bea950ac 100644
--- a/drivers/media/i2c/mt9v111.c
+++ b/drivers/media/i2c/mt9v111.c
@@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client)
1159 V4L2_CID_AUTO_WHITE_BALANCE, 1159 V4L2_CID_AUTO_WHITE_BALANCE,
1160 0, 1, 1, 1160 0, 1, 1,
1161 V4L2_WHITE_BALANCE_AUTO); 1161 V4L2_WHITE_BALANCE_AUTO);
1162 if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
1163 ret = PTR_ERR(mt9v111->auto_awb);
1164 goto error_free_ctrls;
1165 }
1166
1167 mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls, 1162 mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
1168 &mt9v111_ctrl_ops, 1163 &mt9v111_ctrl_ops,
1169 V4L2_CID_EXPOSURE_AUTO, 1164 V4L2_CID_EXPOSURE_AUTO,
1170 V4L2_EXPOSURE_MANUAL, 1165 V4L2_EXPOSURE_MANUAL,
1171 0, V4L2_EXPOSURE_AUTO); 1166 0, V4L2_EXPOSURE_AUTO);
1172 if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
1173 ret = PTR_ERR(mt9v111->auto_exp);
1174 goto error_free_ctrls;
1175 }
1176
1177 /* Initialize timings */
1178 mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, 1167 mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
1179 V4L2_CID_HBLANK, 1168 V4L2_CID_HBLANK,
1180 MT9V111_CORE_R05_MIN_HBLANK, 1169 MT9V111_CORE_R05_MIN_HBLANK,
1181 MT9V111_CORE_R05_MAX_HBLANK, 1, 1170 MT9V111_CORE_R05_MAX_HBLANK, 1,
1182 MT9V111_CORE_R05_DEF_HBLANK); 1171 MT9V111_CORE_R05_DEF_HBLANK);
1183 if (IS_ERR_OR_NULL(mt9v111->hblank)) {
1184 ret = PTR_ERR(mt9v111->hblank);
1185 goto error_free_ctrls;
1186 }
1187
1188 mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, 1172 mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
1189 V4L2_CID_VBLANK, 1173 V4L2_CID_VBLANK,
1190 MT9V111_CORE_R06_MIN_VBLANK, 1174 MT9V111_CORE_R06_MIN_VBLANK,
1191 MT9V111_CORE_R06_MAX_VBLANK, 1, 1175 MT9V111_CORE_R06_MAX_VBLANK, 1,
1192 MT9V111_CORE_R06_DEF_VBLANK); 1176 MT9V111_CORE_R06_DEF_VBLANK);
1193 if (IS_ERR_OR_NULL(mt9v111->vblank)) {
1194 ret = PTR_ERR(mt9v111->vblank);
1195 goto error_free_ctrls;
1196 }
1197 1177
1198 /* PIXEL_RATE is fixed: just expose it to user space. */ 1178 /* PIXEL_RATE is fixed: just expose it to user space. */
1199 v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops, 1179 v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
@@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client)
1201 DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1, 1181 DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
1202 DIV_ROUND_CLOSEST(mt9v111->sysclk, 2)); 1182 DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
1203 1183
1184 if (mt9v111->ctrls.error) {
1185 ret = mt9v111->ctrls.error;
1186 goto error_free_ctrls;
1187 }
1204 mt9v111->sd.ctrl_handler = &mt9v111->ctrls; 1188 mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
1205 1189
1206 /* Start with default configuration: 640x480 UYVY. */ 1190 /* Start with default configuration: 640x480 UYVY. */
@@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client)
1226 mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE; 1210 mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE;
1227 ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad); 1211 ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
1228 if (ret) 1212 if (ret)
1229 goto error_free_ctrls; 1213 goto error_free_entity;
1230#endif 1214#endif
1231 1215
1232 ret = mt9v111_chip_probe(mt9v111); 1216 ret = mt9v111_chip_probe(mt9v111);
1233 if (ret) 1217 if (ret)
1234 goto error_free_ctrls; 1218 goto error_free_entity;
1235 1219
1236 ret = v4l2_async_register_subdev(&mt9v111->sd); 1220 ret = v4l2_async_register_subdev(&mt9v111->sd);
1237 if (ret) 1221 if (ret)
1238 goto error_free_ctrls; 1222 goto error_free_entity;
1239 1223
1240 return 0; 1224 return 0;
1241 1225
1242error_free_ctrls: 1226error_free_entity:
1243 v4l2_ctrl_handler_free(&mt9v111->ctrls);
1244
1245#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) 1227#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
1246 media_entity_cleanup(&mt9v111->sd.entity); 1228 media_entity_cleanup(&mt9v111->sd.entity);
1247#endif 1229#endif
1248 1230
1231error_free_ctrls:
1232 v4l2_ctrl_handler_free(&mt9v111->ctrls);
1233
1249 mutex_destroy(&mt9v111->pwr_mutex); 1234 mutex_destroy(&mt9v111->pwr_mutex);
1250 mutex_destroy(&mt9v111->stream_mutex); 1235 mutex_destroy(&mt9v111->stream_mutex);
1251 1236
@@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client)
1259 1244
1260 v4l2_async_unregister_subdev(sd); 1245 v4l2_async_unregister_subdev(sd);
1261 1246
1262 v4l2_ctrl_handler_free(&mt9v111->ctrls);
1263
1264#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER) 1247#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
1265 media_entity_cleanup(&sd->entity); 1248 media_entity_cleanup(&sd->entity);
1266#endif 1249#endif
1267 1250
1251 v4l2_ctrl_handler_free(&mt9v111->ctrls);
1252
1268 mutex_destroy(&mt9v111->pwr_mutex); 1253 mutex_destroy(&mt9v111->pwr_mutex);
1269 mutex_destroy(&mt9v111->stream_mutex); 1254 mutex_destroy(&mt9v111->stream_mutex);
1270 1255
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 94c1fe0e9787..54fe90acb5b2 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC
541 depends on MFD_CROS_EC 541 depends on MFD_CROS_EC
542 select CEC_CORE 542 select CEC_CORE
543 select CEC_NOTIFIER 543 select CEC_NOTIFIER
544 select CHROME_PLATFORMS
545 select CROS_EC_PROTO
544 ---help--- 546 ---help---
545 If you say yes here you will get support for the 547 If you say yes here you will get support for the
546 ChromeOS Embedded Controller's CEC. 548 ChromeOS Embedded Controller's CEC.
diff --git a/drivers/media/platform/qcom/camss/camss-csid.c b/drivers/media/platform/qcom/camss/camss-csid.c
index 729b31891466..a5ae85674ffb 100644
--- a/drivers/media/platform/qcom/camss/camss-csid.c
+++ b/drivers/media/platform/qcom/camss/camss-csid.c
@@ -10,6 +10,7 @@
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/completion.h> 11#include <linux/completion.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/io.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/of.h> 15#include <linux/of.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
index c832539397d7..12bce391d71f 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-2ph-1-0.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/io.h>
15 16
16#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n)) 17#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
17#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n)) 18#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
index bcd0dfd33618..2e65caf1ecae 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy-3ph-1-0.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/io.h>
15 16
16#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n)) 17#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
17#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6)) 18#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))
diff --git a/drivers/media/platform/qcom/camss/camss-csiphy.c b/drivers/media/platform/qcom/camss/camss-csiphy.c
index 4559f3b1b38c..008afb85023b 100644
--- a/drivers/media/platform/qcom/camss/camss-csiphy.c
+++ b/drivers/media/platform/qcom/camss/camss-csiphy.c
@@ -10,6 +10,7 @@
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/io.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/of.h> 15#include <linux/of.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
diff --git a/drivers/media/platform/qcom/camss/camss-ispif.c b/drivers/media/platform/qcom/camss/camss-ispif.c
index 7f269021d08c..1f33b4eb198c 100644
--- a/drivers/media/platform/qcom/camss/camss-ispif.c
+++ b/drivers/media/platform/qcom/camss/camss-ispif.c
@@ -10,6 +10,7 @@
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/completion.h> 11#include <linux/completion.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/io.h>
13#include <linux/iopoll.h> 14#include <linux/iopoll.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/mutex.h> 16#include <linux/mutex.h>
@@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
1076 else 1077 else
1077 return -EINVAL; 1078 return -EINVAL;
1078 1079
1079 ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line), 1080 ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line),
1080 GFP_KERNEL); 1081 GFP_KERNEL);
1081 if (!ispif->line) 1082 if (!ispif->line)
1082 return -ENOMEM; 1083 return -ENOMEM;
1083 1084
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index da3a9fed9f2d..174a36be6f5d 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/io.h>
12#include <linux/iopoll.h> 13#include <linux/iopoll.h>
13 14
14#include "camss-vfe.h" 15#include "camss-vfe.h"
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index 4c584bffd179..0dca8bf9281e 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/io.h>
12#include <linux/iopoll.h> 13#include <linux/iopoll.h>
13 14
14#include "camss-vfe.h" 15#include "camss-vfe.h"
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index dcc0c30ef1b1..669615fff6a0 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev)
848 return -EINVAL; 848 return -EINVAL;
849 } 849 }
850 850
851 camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy), 851 camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
852 GFP_KERNEL); 852 sizeof(*camss->csiphy), GFP_KERNEL);
853 if (!camss->csiphy) 853 if (!camss->csiphy)
854 return -ENOMEM; 854 return -ENOMEM;
855 855
856 camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid), 856 camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
857 GFP_KERNEL); 857 GFP_KERNEL);
858 if (!camss->csid) 858 if (!camss->csid)
859 return -ENOMEM; 859 return -ENOMEM;
860 860
861 camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL); 861 camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
862 GFP_KERNEL);
862 if (!camss->vfe) 863 if (!camss->vfe)
863 return -ENOMEM; 864 return -ENOMEM;
864 865
@@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = {
993 994
994MODULE_DEVICE_TABLE(of, camss_dt_match); 995MODULE_DEVICE_TABLE(of, camss_dt_match);
995 996
996static int camss_runtime_suspend(struct device *dev) 997static int __maybe_unused camss_runtime_suspend(struct device *dev)
997{ 998{
998 return 0; 999 return 0;
999} 1000}
1000 1001
1001static int camss_runtime_resume(struct device *dev) 1002static int __maybe_unused camss_runtime_resume(struct device *dev)
1002{ 1003{
1003 return 0; 1004 return 0;
1004} 1005}
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 666d319d3d1a..1f6c1eefe389 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
402 if (msg[0].addr == state->af9033_i2c_addr[1]) 402 if (msg[0].addr == state->af9033_i2c_addr[1])
403 reg |= 0x100000; 403 reg |= 0x100000;
404 404
405 ret = af9035_wr_regs(d, reg, &msg[0].buf[3], 405 ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
406 msg[0].len - 3); 406 &msg[0].buf[3],
407 msg[0].len - 3)
408 : -EOPNOTSUPP;
407 } else { 409 } else {
408 /* I2C write */ 410 /* I2C write */
409 u8 buf[MAX_XFER_SIZE]; 411 u8 buf[MAX_XFER_SIZE];
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 127fe6eb91d9..a3ef1f50a4b3 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
115 if (sev == NULL) 115 if (sev == NULL)
116 return; 116 return;
117 117
118 /*
119 * If the event has been added to the fh->subscribed list, but its
120 * add op has not completed yet elems will be 0, treat this as
121 * not being subscribed.
122 */
123 if (!sev->elems)
124 return;
125
126 /* Increase event sequence number on fh. */ 118 /* Increase event sequence number on fh. */
127 fh->sequence++; 119 fh->sequence++;
128 120
@@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
208 struct v4l2_subscribed_event *sev, *found_ev; 200 struct v4l2_subscribed_event *sev, *found_ev;
209 unsigned long flags; 201 unsigned long flags;
210 unsigned i; 202 unsigned i;
203 int ret = 0;
211 204
212 if (sub->type == V4L2_EVENT_ALL) 205 if (sub->type == V4L2_EVENT_ALL)
213 return -EINVAL; 206 return -EINVAL;
@@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
225 sev->flags = sub->flags; 218 sev->flags = sub->flags;
226 sev->fh = fh; 219 sev->fh = fh;
227 sev->ops = ops; 220 sev->ops = ops;
221 sev->elems = elems;
222
223 mutex_lock(&fh->subscribe_lock);
228 224
229 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 225 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
230 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id); 226 found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
231 if (!found_ev)
232 list_add(&sev->list, &fh->subscribed);
233 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags); 227 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
234 228
235 if (found_ev) { 229 if (found_ev) {
230 /* Already listening */
236 kvfree(sev); 231 kvfree(sev);
237 return 0; /* Already listening */ 232 goto out_unlock;
238 } 233 }
239 234
240 if (sev->ops && sev->ops->add) { 235 if (sev->ops && sev->ops->add) {
241 int ret = sev->ops->add(sev, elems); 236 ret = sev->ops->add(sev, elems);
242 if (ret) { 237 if (ret) {
243 sev->ops = NULL; 238 kvfree(sev);
244 v4l2_event_unsubscribe(fh, sub); 239 goto out_unlock;
245 return ret;
246 } 240 }
247 } 241 }
248 242
249 /* Mark as ready for use */ 243 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
250 sev->elems = elems; 244 list_add(&sev->list, &fh->subscribed);
245 spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
251 246
252 return 0; 247out_unlock:
248 mutex_unlock(&fh->subscribe_lock);
249
250 return ret;
253} 251}
254EXPORT_SYMBOL_GPL(v4l2_event_subscribe); 252EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
255 253
@@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
288 return 0; 286 return 0;
289 } 287 }
290 288
289 mutex_lock(&fh->subscribe_lock);
290
291 spin_lock_irqsave(&fh->vdev->fh_lock, flags); 291 spin_lock_irqsave(&fh->vdev->fh_lock, flags);
292 292
293 sev = v4l2_event_subscribed(fh, sub->type, sub->id); 293 sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
305 if (sev && sev->ops && sev->ops->del) 305 if (sev && sev->ops && sev->ops->del)
306 sev->ops->del(sev); 306 sev->ops->del(sev);
307 307
308 mutex_unlock(&fh->subscribe_lock);
309
308 kvfree(sev); 310 kvfree(sev);
309 311
310 return 0; 312 return 0;
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index 3895999bf880..c91a7bd3ecfc 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
45 INIT_LIST_HEAD(&fh->available); 45 INIT_LIST_HEAD(&fh->available);
46 INIT_LIST_HEAD(&fh->subscribed); 46 INIT_LIST_HEAD(&fh->subscribed);
47 fh->sequence = -1; 47 fh->sequence = -1;
48 mutex_init(&fh->subscribe_lock);
48} 49}
49EXPORT_SYMBOL_GPL(v4l2_fh_init); 50EXPORT_SYMBOL_GPL(v4l2_fh_init);
50 51
@@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
90 return; 91 return;
91 v4l_disable_media_source(fh->vdev); 92 v4l_disable_media_source(fh->vdev);
92 v4l2_event_unsubscribe_all(fh); 93 v4l2_event_unsubscribe_all(fh);
94 mutex_destroy(&fh->subscribe_lock);
93 fh->vdev = NULL; 95 fh->vdev = NULL;
94} 96}
95EXPORT_SYMBOL_GPL(v4l2_fh_exit); 97EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index e11ab12fbdf2..800986a79704 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -528,8 +528,8 @@ static int usbhs_omap_get_dt_pdata(struct device *dev,
528} 528}
529 529
530static const struct of_device_id usbhs_child_match_table[] = { 530static const struct of_device_id usbhs_child_match_table[] = {
531 { .compatible = "ti,omap-ehci", }, 531 { .compatible = "ti,ehci-omap", },
532 { .compatible = "ti,omap-ohci", }, 532 { .compatible = "ti,ohci-omap3", },
533 { } 533 { }
534}; 534};
535 535
@@ -855,6 +855,7 @@ static struct platform_driver usbhs_omap_driver = {
855 .pm = &usbhsomap_dev_pm_ops, 855 .pm = &usbhsomap_dev_pm_ops,
856 .of_match_table = usbhs_omap_dt_ids, 856 .of_match_table = usbhs_omap_dt_ids,
857 }, 857 },
858 .probe = usbhs_omap_probe,
858 .remove = usbhs_omap_remove, 859 .remove = usbhs_omap_remove,
859}; 860};
860 861
@@ -864,9 +865,9 @@ MODULE_ALIAS("platform:" USBHS_DRIVER_NAME);
864MODULE_LICENSE("GPL v2"); 865MODULE_LICENSE("GPL v2");
865MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI"); 866MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
866 867
867static int __init omap_usbhs_drvinit(void) 868static int omap_usbhs_drvinit(void)
868{ 869{
869 return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe); 870 return platform_driver_register(&usbhs_omap_driver);
870} 871}
871 872
872/* 873/*
@@ -878,7 +879,7 @@ static int __init omap_usbhs_drvinit(void)
878 */ 879 */
879fs_initcall_sync(omap_usbhs_drvinit); 880fs_initcall_sync(omap_usbhs_drvinit);
880 881
881static void __exit omap_usbhs_drvexit(void) 882static void omap_usbhs_drvexit(void)
882{ 883{
883 platform_driver_unregister(&usbhs_omap_driver); 884 platform_driver_unregister(&usbhs_omap_driver);
884} 885}
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abf9e884386c..f57f5de54206 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
235 host->caps |= MMC_CAP_NEEDS_POLL; 235 host->caps |= MMC_CAP_NEEDS_POLL;
236 236
237 ret = mmc_gpiod_request_cd(host, "cd", 0, true, 237 ret = mmc_gpiod_request_cd(host, "cd", 0, true,
238 cd_debounce_delay_ms, 238 cd_debounce_delay_ms * 1000,
239 &cd_gpio_invert); 239 &cd_gpio_invert);
240 if (!ret) 240 if (!ret)
241 dev_info(host->parent, "Got CD GPIO\n"); 241 dev_info(host->parent, "Got CD GPIO\n");
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 2a833686784b..86803a3a04dc 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
271 if (debounce) { 271 if (debounce) {
272 ret = gpiod_set_debounce(desc, debounce); 272 ret = gpiod_set_debounce(desc, debounce);
273 if (ret < 0) 273 if (ret < 0)
274 ctx->cd_debounce_delay_ms = debounce; 274 ctx->cd_debounce_delay_ms = debounce / 1000;
275 } 275 }
276 276
277 if (gpio_invert) 277 if (gpio_invert)
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 890f192dedbd..5389c4821882 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -498,7 +498,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
498 498
499static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev) 499static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
500{ 500{
501 if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible && 501 if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
502 of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
502 !soc_device_match(gen3_soc_whitelist)) 503 !soc_device_match(gen3_soc_whitelist))
503 return -ENODEV; 504 return -ENODEV;
504 505
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index cbfafc453274..270d3c9580c5 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -39,13 +39,23 @@ static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
39 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1), 39 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
40 SPI_MEM_OP_NO_ADDR, 40 SPI_MEM_OP_NO_ADDR,
41 SPI_MEM_OP_NO_DUMMY, 41 SPI_MEM_OP_NO_DUMMY,
42 SPI_MEM_OP_DATA_IN(len, val, 1)); 42 SPI_MEM_OP_DATA_IN(len, NULL, 1));
43 void *scratchbuf;
43 int ret; 44 int ret;
44 45
46 scratchbuf = kmalloc(len, GFP_KERNEL);
47 if (!scratchbuf)
48 return -ENOMEM;
49
50 op.data.buf.in = scratchbuf;
45 ret = spi_mem_exec_op(flash->spimem, &op); 51 ret = spi_mem_exec_op(flash->spimem, &op);
46 if (ret < 0) 52 if (ret < 0)
47 dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret, 53 dev_err(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
48 code); 54 code);
55 else
56 memcpy(val, scratchbuf, len);
57
58 kfree(scratchbuf);
49 59
50 return ret; 60 return ret;
51} 61}
@@ -56,9 +66,19 @@ static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
56 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1), 66 struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
57 SPI_MEM_OP_NO_ADDR, 67 SPI_MEM_OP_NO_ADDR,
58 SPI_MEM_OP_NO_DUMMY, 68 SPI_MEM_OP_NO_DUMMY,
59 SPI_MEM_OP_DATA_OUT(len, buf, 1)); 69 SPI_MEM_OP_DATA_OUT(len, NULL, 1));
70 void *scratchbuf;
71 int ret;
60 72
61 return spi_mem_exec_op(flash->spimem, &op); 73 scratchbuf = kmemdup(buf, len, GFP_KERNEL);
74 if (!scratchbuf)
75 return -ENOMEM;
76
77 op.data.buf.out = scratchbuf;
78 ret = spi_mem_exec_op(flash->spimem, &op);
79 kfree(scratchbuf);
80
81 return ret;
62} 82}
63 83
64static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, 84static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len,
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 52e2cb35fc79..99c460facd5e 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -873,8 +873,11 @@ static int mtd_part_of_parse(struct mtd_info *master,
873 int ret, err = 0; 873 int ret, err = 0;
874 874
875 np = mtd_get_of_node(master); 875 np = mtd_get_of_node(master);
876 if (!mtd_is_partition(master)) 876 if (mtd_is_partition(master))
877 of_node_get(np);
878 else
877 np = of_get_child_by_name(np, "partitions"); 879 np = of_get_child_by_name(np, "partitions");
880
878 of_property_for_each_string(np, "compatible", prop, compat) { 881 of_property_for_each_string(np, "compatible", prop, compat) {
879 parser = mtd_part_get_compatible_parser(compat); 882 parser = mtd_part_get_compatible_parser(compat);
880 if (!parser) 883 if (!parser)
diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c
index 67b2065e7a19..b864b93dd289 100644
--- a/drivers/mtd/nand/raw/denali.c
+++ b/drivers/mtd/nand/raw/denali.c
@@ -596,6 +596,12 @@ static int denali_dma_xfer(struct denali_nand_info *denali, void *buf,
596 } 596 }
597 597
598 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE); 598 iowrite32(DMA_ENABLE__FLAG, denali->reg + DMA_ENABLE);
599 /*
600 * The ->setup_dma() hook kicks DMA by using the data/command
601 * interface, which belongs to a different AXI port from the
602 * register interface. Read back the register to avoid a race.
603 */
604 ioread32(denali->reg + DMA_ENABLE);
599 605
600 denali_reset_irq(denali); 606 denali_reset_irq(denali);
601 denali->setup_dma(denali, dma_addr, page, write); 607 denali->setup_dma(denali, dma_addr, page, write);
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 7af4d6213ee5..bc2ef5209783 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -1547,7 +1547,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
1547 for (op_id = 0; op_id < subop->ninstrs; op_id++) { 1547 for (op_id = 0; op_id < subop->ninstrs; op_id++) {
1548 unsigned int offset, naddrs; 1548 unsigned int offset, naddrs;
1549 const u8 *addrs; 1549 const u8 *addrs;
1550 int len = nand_subop_get_data_len(subop, op_id); 1550 int len;
1551 1551
1552 instr = &subop->instrs[op_id]; 1552 instr = &subop->instrs[op_id];
1553 1553
@@ -1593,6 +1593,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
1593 nfc_op->ndcb[0] |= 1593 nfc_op->ndcb[0] |=
1594 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | 1594 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1595 NDCB0_LEN_OVRD; 1595 NDCB0_LEN_OVRD;
1596 len = nand_subop_get_data_len(subop, op_id);
1596 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); 1597 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1597 } 1598 }
1598 nfc_op->data_delay_ns = instr->delay_ns; 1599 nfc_op->data_delay_ns = instr->delay_ns;
@@ -1606,6 +1607,7 @@ static void marvell_nfc_parse_instructions(struct nand_chip *chip,
1606 nfc_op->ndcb[0] |= 1607 nfc_op->ndcb[0] |=
1607 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) | 1608 NDCB0_CMD_XTYPE(XTYPE_MONOLITHIC_RW) |
1608 NDCB0_LEN_OVRD; 1609 NDCB0_LEN_OVRD;
1610 len = nand_subop_get_data_len(subop, op_id);
1609 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH); 1611 nfc_op->ndcb[3] |= round_up(len, FIFO_DEPTH);
1610 } 1612 }
1611 nfc_op->data_delay_ns = instr->delay_ns; 1613 nfc_op->data_delay_ns = instr->delay_ns;
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 9375cef22420..3d27616d9c85 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -283,8 +283,12 @@ static int ipddp_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
283 case SIOCFINDIPDDPRT: 283 case SIOCFINDIPDDPRT:
284 spin_lock_bh(&ipddp_route_lock); 284 spin_lock_bh(&ipddp_route_lock);
285 rp = __ipddp_find_route(&rcp); 285 rp = __ipddp_find_route(&rcp);
286 if (rp) 286 if (rp) {
287 memcpy(&rcp2, rp, sizeof(rcp2)); 287 memset(&rcp2, 0, sizeof(rcp2));
288 rcp2.ip = rp->ip;
289 rcp2.at = rp->at;
290 rcp2.flags = rp->flags;
291 }
288 spin_unlock_bh(&ipddp_route_lock); 292 spin_unlock_bh(&ipddp_route_lock);
289 293
290 if (rp) { 294 if (rp) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a764a83f99da..0d87e11e7f1d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -971,16 +971,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
971 struct slave *slave = NULL; 971 struct slave *slave = NULL;
972 struct list_head *iter; 972 struct list_head *iter;
973 struct ad_info ad_info; 973 struct ad_info ad_info;
974 struct netpoll_info *ni;
975 const struct net_device_ops *ops;
976 974
977 if (BOND_MODE(bond) == BOND_MODE_8023AD) 975 if (BOND_MODE(bond) == BOND_MODE_8023AD)
978 if (bond_3ad_get_active_agg_info(bond, &ad_info)) 976 if (bond_3ad_get_active_agg_info(bond, &ad_info))
979 return; 977 return;
980 978
981 bond_for_each_slave_rcu(bond, slave, iter) { 979 bond_for_each_slave_rcu(bond, slave, iter) {
982 ops = slave->dev->netdev_ops; 980 if (!bond_slave_is_up(slave))
983 if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
984 continue; 981 continue;
985 982
986 if (BOND_MODE(bond) == BOND_MODE_8023AD) { 983 if (BOND_MODE(bond) == BOND_MODE_8023AD) {
@@ -992,11 +989,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
992 continue; 989 continue;
993 } 990 }
994 991
995 ni = rcu_dereference_bh(slave->dev->npinfo); 992 netpoll_poll_dev(slave->dev);
996 if (down_trylock(&ni->dev_lock))
997 continue;
998 ops->ndo_poll_controller(slave->dev);
999 up(&ni->dev_lock);
1000 } 993 }
1001} 994}
1002 995
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 7c791c1da4b9..bef01331266f 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -128,7 +128,7 @@
128#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000 128#define MV88E6XXX_G1_ATU_OP_GET_CLR_VIOLATION 0x7000
129#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7) 129#define MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION BIT(7)
130#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6) 130#define MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION BIT(6)
131#define MV88E6XXX_G1_ATU_OP_MISS_VIOLTATION BIT(5) 131#define MV88E6XXX_G1_ATU_OP_MISS_VIOLATION BIT(5)
132#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4) 132#define MV88E6XXX_G1_ATU_OP_FULL_VIOLATION BIT(4)
133 133
134/* Offset 0x0C: ATU Data Register */ 134/* Offset 0x0C: ATU Data Register */
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 307410898fc9..5200e4bdce93 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -349,7 +349,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
349 chip->ports[entry.portvec].atu_member_violation++; 349 chip->ports[entry.portvec].atu_member_violation++;
350 } 350 }
351 351
352 if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { 352 if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
353 dev_err_ratelimited(chip->dev, 353 dev_err_ratelimited(chip->dev,
354 "ATU miss violation for %pM portvec %x\n", 354 "ATU miss violation for %pM portvec %x\n",
355 entry.mac, entry.portvec); 355 entry.mac, entry.portvec);
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 024998d6d8c6..6a8e2567f2bd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
154static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); 154static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
155static void bmac_set_timeout(struct net_device *dev); 155static void bmac_set_timeout(struct net_device *dev);
156static void bmac_tx_timeout(struct timer_list *t); 156static void bmac_tx_timeout(struct timer_list *t);
157static int bmac_output(struct sk_buff *skb, struct net_device *dev); 157static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
158static void bmac_start(struct net_device *dev); 158static void bmac_start(struct net_device *dev);
159 159
160#define DBDMA_SET(x) ( ((x) | (x) << 16) ) 160#define DBDMA_SET(x) ( ((x) | (x) << 16) )
@@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev)
1456 spin_unlock_irqrestore(&bp->lock, flags); 1456 spin_unlock_irqrestore(&bp->lock, flags);
1457} 1457}
1458 1458
1459static int 1459static netdev_tx_t
1460bmac_output(struct sk_buff *skb, struct net_device *dev) 1460bmac_output(struct sk_buff *skb, struct net_device *dev)
1461{ 1461{
1462 struct bmac_data *bp = netdev_priv(dev); 1462 struct bmac_data *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 0b5429d76bcf..68b9ee489489 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -78,7 +78,7 @@ struct mace_data {
78 78
79static int mace_open(struct net_device *dev); 79static int mace_open(struct net_device *dev);
80static int mace_close(struct net_device *dev); 80static int mace_close(struct net_device *dev);
81static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 81static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
82static void mace_set_multicast(struct net_device *dev); 82static void mace_set_multicast(struct net_device *dev);
83static void mace_reset(struct net_device *dev); 83static void mace_reset(struct net_device *dev);
84static int mace_set_address(struct net_device *dev, void *addr); 84static int mace_set_address(struct net_device *dev, void *addr);
@@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
525 mp->timeout_active = 1; 525 mp->timeout_active = 1;
526} 526}
527 527
528static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 528static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
529{ 529{
530 struct mace_data *mp = netdev_priv(dev); 530 struct mace_data *mp = netdev_priv(dev);
531 volatile struct dbdma_regs __iomem *td = mp->tx_dma; 531 volatile struct dbdma_regs __iomem *td = mp->tx_dma;
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index 137cbb470af2..376f2c2613e7 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -89,7 +89,7 @@ struct mace_frame {
89 89
90static int mace_open(struct net_device *dev); 90static int mace_open(struct net_device *dev);
91static int mace_close(struct net_device *dev); 91static int mace_close(struct net_device *dev);
92static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev); 92static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
93static void mace_set_multicast(struct net_device *dev); 93static void mace_set_multicast(struct net_device *dev);
94static int mace_set_address(struct net_device *dev, void *addr); 94static int mace_set_address(struct net_device *dev, void *addr);
95static void mace_reset(struct net_device *dev); 95static void mace_reset(struct net_device *dev);
@@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
444 * Transmit a frame 444 * Transmit a frame
445 */ 445 */
446 446
447static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev) 447static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
448{ 448{
449 struct mace_data *mp = netdev_priv(dev); 449 struct mace_data *mp = netdev_priv(dev);
450 unsigned long flags; 450 unsigned long flags;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index b5f1f62e8e25..d1e1a0ba8615 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
225 } 225 }
226 226
227 /* for single fragment packets use build_skb() */ 227 /* for single fragment packets use build_skb() */
228 if (buff->is_eop) { 228 if (buff->is_eop &&
229 buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
229 skb = build_skb(page_address(buff->page), 230 skb = build_skb(page_address(buff->page),
230 buff->len + AQ_SKB_ALIGN); 231 AQ_CFG_RX_FRAME_MAX);
231 if (unlikely(!skb)) { 232 if (unlikely(!skb)) {
232 err = -ENOMEM; 233 err = -ENOMEM;
233 goto err_exit; 234 goto err_exit;
@@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
247 buff->len - ETH_HLEN, 248 buff->len - ETH_HLEN,
248 SKB_TRUESIZE(buff->len - ETH_HLEN)); 249 SKB_TRUESIZE(buff->len - ETH_HLEN));
249 250
250 for (i = 1U, next_ = buff->next, 251 if (!buff->is_eop) {
251 buff_ = &self->buff_ring[next_]; true; 252 for (i = 1U, next_ = buff->next,
252 next_ = buff_->next, 253 buff_ = &self->buff_ring[next_];
253 buff_ = &self->buff_ring[next_], ++i) { 254 true; next_ = buff_->next,
254 skb_add_rx_frag(skb, i, buff_->page, 0, 255 buff_ = &self->buff_ring[next_], ++i) {
255 buff_->len, 256 skb_add_rx_frag(skb, i,
256 SKB_TRUESIZE(buff->len - 257 buff_->page, 0,
257 ETH_HLEN)); 258 buff_->len,
258 buff_->is_cleaned = 1; 259 SKB_TRUESIZE(buff->len -
259 260 ETH_HLEN));
260 if (buff_->is_eop) 261 buff_->is_cleaned = 1;
261 break; 262
263 if (buff_->is_eop)
264 break;
265 }
262 } 266 }
263 } 267 }
264 268
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 71362b7f6040..fcc2328bb0d9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12894 } 12894 }
12895} 12895}
12896 12896
12897#ifdef CONFIG_NET_POLL_CONTROLLER
12898static void poll_bnx2x(struct net_device *dev)
12899{
12900 struct bnx2x *bp = netdev_priv(dev);
12901 int i;
12902
12903 for_each_eth_queue(bp, i) {
12904 struct bnx2x_fastpath *fp = &bp->fp[i];
12905 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
12906 }
12907}
12908#endif
12909
12910static int bnx2x_validate_addr(struct net_device *dev) 12897static int bnx2x_validate_addr(struct net_device *dev)
12911{ 12898{
12912 struct bnx2x *bp = netdev_priv(dev); 12899 struct bnx2x *bp = netdev_priv(dev);
@@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
13113 .ndo_tx_timeout = bnx2x_tx_timeout, 13100 .ndo_tx_timeout = bnx2x_tx_timeout,
13114 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid, 13101 .ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
13115 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid, 13102 .ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
13116#ifdef CONFIG_NET_POLL_CONTROLLER
13117 .ndo_poll_controller = poll_bnx2x,
13118#endif
13119 .ndo_setup_tc = __bnx2x_setup_tc, 13103 .ndo_setup_tc = __bnx2x_setup_tc,
13120#ifdef CONFIG_BNX2X_SRIOV 13104#ifdef CONFIG_BNX2X_SRIOV
13121 .ndo_set_vf_mac = bnx2x_set_vf_mac, 13105 .ndo_set_vf_mac = bnx2x_set_vf_mac,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index cecbb1d1f587..61957b0bbd8c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -7672,21 +7672,6 @@ static void bnxt_tx_timeout(struct net_device *dev)
7672 bnxt_queue_sp_work(bp); 7672 bnxt_queue_sp_work(bp);
7673} 7673}
7674 7674
7675#ifdef CONFIG_NET_POLL_CONTROLLER
7676static void bnxt_poll_controller(struct net_device *dev)
7677{
7678 struct bnxt *bp = netdev_priv(dev);
7679 int i;
7680
7681 /* Only process tx rings/combined rings in netpoll mode. */
7682 for (i = 0; i < bp->tx_nr_rings; i++) {
7683 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
7684
7685 napi_schedule(&txr->bnapi->napi);
7686 }
7687}
7688#endif
7689
7690static void bnxt_timer(struct timer_list *t) 7675static void bnxt_timer(struct timer_list *t)
7691{ 7676{
7692 struct bnxt *bp = from_timer(bp, t, timer); 7677 struct bnxt *bp = from_timer(bp, t, timer);
@@ -8027,7 +8012,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
8027 if (ether_addr_equal(addr->sa_data, dev->dev_addr)) 8012 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
8028 return 0; 8013 return 0;
8029 8014
8030 rc = bnxt_approve_mac(bp, addr->sa_data); 8015 rc = bnxt_approve_mac(bp, addr->sa_data, true);
8031 if (rc) 8016 if (rc)
8032 return rc; 8017 return rc;
8033 8018
@@ -8520,9 +8505,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
8520 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk, 8505 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
8521 .ndo_set_vf_trust = bnxt_set_vf_trust, 8506 .ndo_set_vf_trust = bnxt_set_vf_trust,
8522#endif 8507#endif
8523#ifdef CONFIG_NET_POLL_CONTROLLER
8524 .ndo_poll_controller = bnxt_poll_controller,
8525#endif
8526 .ndo_setup_tc = bnxt_setup_tc, 8508 .ndo_setup_tc = bnxt_setup_tc,
8527#ifdef CONFIG_RFS_ACCEL 8509#ifdef CONFIG_RFS_ACCEL
8528 .ndo_rx_flow_steer = bnxt_rx_flow_steer, 8510 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
@@ -8827,14 +8809,19 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
8827 } else { 8809 } else {
8828#ifdef CONFIG_BNXT_SRIOV 8810#ifdef CONFIG_BNXT_SRIOV
8829 struct bnxt_vf_info *vf = &bp->vf; 8811 struct bnxt_vf_info *vf = &bp->vf;
8812 bool strict_approval = true;
8830 8813
8831 if (is_valid_ether_addr(vf->mac_addr)) { 8814 if (is_valid_ether_addr(vf->mac_addr)) {
8832 /* overwrite netdev dev_addr with admin VF MAC */ 8815 /* overwrite netdev dev_addr with admin VF MAC */
8833 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN); 8816 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
8817 /* Older PF driver or firmware may not approve this
8818 * correctly.
8819 */
8820 strict_approval = false;
8834 } else { 8821 } else {
8835 eth_hw_addr_random(bp->dev); 8822 eth_hw_addr_random(bp->dev);
8836 } 8823 }
8837 rc = bnxt_approve_mac(bp, bp->dev->dev_addr); 8824 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
8838#endif 8825#endif
8839 } 8826 }
8840 return rc; 8827 return rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index f3b9fbcc705b..790c684f08ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
46 } 46 }
47 } 47 }
48 48
49 if (i == ARRAY_SIZE(nvm_params))
50 return -EOPNOTSUPP;
51
49 if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) 52 if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
50 idx = bp->pf.port_id; 53 idx = bp->pf.port_id;
51 else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) 54 else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index fcd085a9853a..3962f6fd543c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1104,7 +1104,7 @@ update_vf_mac_exit:
1104 mutex_unlock(&bp->hwrm_cmd_lock); 1104 mutex_unlock(&bp->hwrm_cmd_lock);
1105} 1105}
1106 1106
1107int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 1107int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1108{ 1108{
1109 struct hwrm_func_vf_cfg_input req = {0}; 1109 struct hwrm_func_vf_cfg_input req = {0};
1110 int rc = 0; 1110 int rc = 0;
@@ -1122,12 +1122,13 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
1122 memcpy(req.dflt_mac_addr, mac, ETH_ALEN); 1122 memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
1123 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 1123 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1124mac_done: 1124mac_done:
1125 if (rc) { 1125 if (rc && strict) {
1126 rc = -EADDRNOTAVAIL; 1126 rc = -EADDRNOTAVAIL;
1127 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n", 1127 netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
1128 mac); 1128 mac);
1129 return rc;
1129 } 1130 }
1130 return rc; 1131 return 0;
1131} 1132}
1132#else 1133#else
1133 1134
@@ -1144,7 +1145,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
1144{ 1145{
1145} 1146}
1146 1147
1147int bnxt_approve_mac(struct bnxt *bp, u8 *mac) 1148int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
1148{ 1149{
1149 return 0; 1150 return 0;
1150} 1151}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index e9b20cd19881..2eed9eda1195 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -39,5 +39,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
39void bnxt_sriov_disable(struct bnxt *); 39void bnxt_sriov_disable(struct bnxt *);
40void bnxt_hwrm_exec_fwd_req(struct bnxt *); 40void bnxt_hwrm_exec_fwd_req(struct bnxt *);
41void bnxt_update_vf_mac(struct bnxt *); 41void bnxt_update_vf_mac(struct bnxt *);
42int bnxt_approve_mac(struct bnxt *, u8 *); 42int bnxt_approve_mac(struct bnxt *, u8 *, bool);
43#endif 43#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 092c817f8f11..e1594c9df4c6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
75 return 0; 75 return 0;
76} 76}
77 77
78static void bnxt_tc_parse_vlan(struct bnxt *bp, 78static int bnxt_tc_parse_vlan(struct bnxt *bp,
79 struct bnxt_tc_actions *actions, 79 struct bnxt_tc_actions *actions,
80 const struct tc_action *tc_act) 80 const struct tc_action *tc_act)
81{ 81{
82 if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) { 82 switch (tcf_vlan_action(tc_act)) {
83 case TCA_VLAN_ACT_POP:
83 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN; 84 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
84 } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) { 85 break;
86 case TCA_VLAN_ACT_PUSH:
85 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN; 87 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
86 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act)); 88 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
87 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act); 89 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
90 break;
91 default:
92 return -EOPNOTSUPP;
88 } 93 }
94 return 0;
89} 95}
90 96
91static int bnxt_tc_parse_tunnel_set(struct bnxt *bp, 97static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
@@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
134 140
135 /* Push/pop VLAN */ 141 /* Push/pop VLAN */
136 if (is_tcf_vlan(tc_act)) { 142 if (is_tcf_vlan(tc_act)) {
137 bnxt_tc_parse_vlan(bp, actions, tc_act); 143 rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
144 if (rc)
145 return rc;
138 continue; 146 continue;
139 } 147 }
140 148
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 16e4ef7d7185..f1a86b422617 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3837,6 +3837,13 @@ static const struct macb_config at91sam9260_config = {
3837 .init = macb_init, 3837 .init = macb_init,
3838}; 3838};
3839 3839
3840static const struct macb_config sama5d3macb_config = {
3841 .caps = MACB_CAPS_SG_DISABLED
3842 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
3843 .clk_init = macb_clk_init,
3844 .init = macb_init,
3845};
3846
3840static const struct macb_config pc302gem_config = { 3847static const struct macb_config pc302gem_config = {
3841 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, 3848 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
3842 .dma_burst_length = 16, 3849 .dma_burst_length = 16,
@@ -3904,6 +3911,7 @@ static const struct of_device_id macb_dt_ids[] = {
3904 { .compatible = "cdns,gem", .data = &pc302gem_config }, 3911 { .compatible = "cdns,gem", .data = &pc302gem_config },
3905 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, 3912 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
3906 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, 3913 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
3914 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
3907 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, 3915 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
3908 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config }, 3916 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
3909 { .compatible = "cdns,emac", .data = &emac_config }, 3917 { .compatible = "cdns,emac", .data = &emac_config },
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index b8f75a22fb6c..f152da1ce046 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
753}; 753};
754 754
755struct cpl_abort_req_rss6 { 755struct cpl_abort_req_rss6 {
756 WR_HDR;
757 union opcode_tid ot; 756 union opcode_tid ot;
758 __be32 srqidx_status; 757 __be32 srqidx_status;
759}; 758};
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index e2a702996db4..13dfdfca49fc 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
332 return rx; 332 return rx;
333} 333}
334 334
335static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) 335static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
336{ 336{
337 struct ep93xx_priv *ep = netdev_priv(dev); 337 struct ep93xx_priv *ep = netdev_priv(dev);
338 struct ep93xx_tdesc *txd; 338 struct ep93xx_tdesc *txd;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 3f8fe8fd79cc..6324e80960c3 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -113,7 +113,7 @@ struct net_local {
113 113
114/* Index to functions, as function prototypes. */ 114/* Index to functions, as function prototypes. */
115static int net_open(struct net_device *dev); 115static int net_open(struct net_device *dev);
116static int net_send_packet(struct sk_buff *skb, struct net_device *dev); 116static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
117static irqreturn_t net_interrupt(int irq, void *dev_id); 117static irqreturn_t net_interrupt(int irq, void *dev_id);
118static void set_multicast_list(struct net_device *dev); 118static void set_multicast_list(struct net_device *dev);
119static void net_rx(struct net_device *dev); 119static void net_rx(struct net_device *dev);
@@ -324,7 +324,7 @@ net_open(struct net_device *dev)
324 return 0; 324 return 0;
325} 325}
326 326
327static int 327static netdev_tx_t
328net_send_packet(struct sk_buff *skb, struct net_device *dev) 328net_send_packet(struct sk_buff *skb, struct net_device *dev)
329{ 329{
330 struct net_local *lp = netdev_priv(dev); 330 struct net_local *lp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index c8c7ad2eff77..9b5a68b65432 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -2634,7 +2634,7 @@ static int hp100_login_to_vg_hub(struct net_device *dev, u_short force_relogin)
2634 /* Wait for link to drop */ 2634 /* Wait for link to drop */
2635 time = jiffies + (HZ / 10); 2635 time = jiffies + (HZ / 10);
2636 do { 2636 do {
2637 if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST)) 2637 if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
2638 break; 2638 break;
2639 if (!in_interrupt()) 2639 if (!in_interrupt())
2640 schedule_timeout_interruptible(1); 2640 schedule_timeout_interruptible(1);
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index dc983450354b..35f6291a3672 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG;
64#define RX_AREA_END 0x0fc00 64#define RX_AREA_END 0x0fc00
65 65
66static int ether1_open(struct net_device *dev); 66static int ether1_open(struct net_device *dev);
67static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev); 67static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
68 struct net_device *dev);
68static irqreturn_t ether1_interrupt(int irq, void *dev_id); 69static irqreturn_t ether1_interrupt(int irq, void *dev_id);
69static int ether1_close(struct net_device *dev); 70static int ether1_close(struct net_device *dev);
70static void ether1_setmulticastlist(struct net_device *dev); 71static void ether1_setmulticastlist(struct net_device *dev);
@@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev)
667 netif_wake_queue(dev); 668 netif_wake_queue(dev);
668} 669}
669 670
670static int 671static netdev_tx_t
671ether1_sendpacket (struct sk_buff *skb, struct net_device *dev) 672ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
672{ 673{
673 int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr; 674 int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index f00a1dc2128c..2f7ae118217f 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -347,7 +347,7 @@ static const char init_setup[] =
347 0x7f /* *multi IA */ }; 347 0x7f /* *multi IA */ };
348 348
349static int i596_open(struct net_device *dev); 349static int i596_open(struct net_device *dev);
350static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev); 350static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
351static irqreturn_t i596_interrupt(int irq, void *dev_id); 351static irqreturn_t i596_interrupt(int irq, void *dev_id);
352static int i596_close(struct net_device *dev); 352static int i596_close(struct net_device *dev);
353static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); 353static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
@@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev)
966} 966}
967 967
968 968
969static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev) 969static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
970{ 970{
971 struct i596_private *lp = netdev_priv(dev); 971 struct i596_private *lp = netdev_priv(dev);
972 struct tx_cmd *tx_cmd; 972 struct tx_cmd *tx_cmd;
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 8bb15a8c2a40..1a86184d44c0 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
121static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id); 121static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
122static int sun3_82586_open(struct net_device *dev); 122static int sun3_82586_open(struct net_device *dev);
123static int sun3_82586_close(struct net_device *dev); 123static int sun3_82586_close(struct net_device *dev);
124static int sun3_82586_send_packet(struct sk_buff *,struct net_device *); 124static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
125 struct net_device *);
125static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev); 126static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
126static void set_multicast_list(struct net_device *dev); 127static void set_multicast_list(struct net_device *dev);
127static void sun3_82586_timeout(struct net_device *dev); 128static void sun3_82586_timeout(struct net_device *dev);
@@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev)
1002 * send frame 1003 * send frame
1003 */ 1004 */
1004 1005
1005static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev) 1006static netdev_tx_t
1007sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
1006{ 1008{
1007 int len,i; 1009 int len,i;
1008#ifndef NO_NOPCOMMANDS 1010#ifndef NO_NOPCOMMANDS
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 372664686309..129f4e9f38da 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev)
2677 if (of_phy_is_fixed_link(np)) { 2677 if (of_phy_is_fixed_link(np)) {
2678 int res = emac_dt_mdio_probe(dev); 2678 int res = emac_dt_mdio_probe(dev);
2679 2679
2680 if (!res) { 2680 if (res)
2681 res = of_phy_register_fixed_link(np); 2681 return res;
2682 if (res) 2682
2683 mdiobus_unregister(dev->mii_bus); 2683 res = of_phy_register_fixed_link(np);
2684 dev->phy_dev = of_phy_find_device(np);
2685 if (res || !dev->phy_dev) {
2686 mdiobus_unregister(dev->mii_bus);
2687 return res ? res : -EINVAL;
2684 } 2688 }
2685 return res; 2689 emac_adjust_link(dev->ndev);
2690 put_device(&dev->phy_dev->mdio.dev);
2686 } 2691 }
2687 return 0; 2692 return 0;
2688 } 2693 }
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index a903a0ba45e1..7d42582ed48d 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface);
504void fm10k_service_event_schedule(struct fm10k_intfc *interface); 504void fm10k_service_event_schedule(struct fm10k_intfc *interface);
505void fm10k_macvlan_schedule(struct fm10k_intfc *interface); 505void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
506void fm10k_update_rx_drop_en(struct fm10k_intfc *interface); 506void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
507#ifdef CONFIG_NET_POLL_CONTROLLER
508void fm10k_netpoll(struct net_device *netdev);
509#endif
510 507
511/* Netdev */ 508/* Netdev */
512struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info); 509struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 929f538d28bc..538a8467f434 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
1648 .ndo_udp_tunnel_del = fm10k_udp_tunnel_del, 1648 .ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
1649 .ndo_dfwd_add_station = fm10k_dfwd_add_station, 1649 .ndo_dfwd_add_station = fm10k_dfwd_add_station,
1650 .ndo_dfwd_del_station = fm10k_dfwd_del_station, 1650 .ndo_dfwd_del_station = fm10k_dfwd_del_station,
1651#ifdef CONFIG_NET_POLL_CONTROLLER
1652 .ndo_poll_controller = fm10k_netpoll,
1653#endif
1654 .ndo_features_check = fm10k_features_check, 1651 .ndo_features_check = fm10k_features_check,
1655}; 1652};
1656 1653
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 15071e4adb98..c859ababeed5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
1210 return IRQ_HANDLED; 1210 return IRQ_HANDLED;
1211} 1211}
1212 1212
1213#ifdef CONFIG_NET_POLL_CONTROLLER
1214/**
1215 * fm10k_netpoll - A Polling 'interrupt' handler
1216 * @netdev: network interface device structure
1217 *
1218 * This is used by netconsole to send skbs without having to re-enable
1219 * interrupts. It's not called while the normal interrupt routine is executing.
1220 **/
1221void fm10k_netpoll(struct net_device *netdev)
1222{
1223 struct fm10k_intfc *interface = netdev_priv(netdev);
1224 int i;
1225
1226 /* if interface is down do nothing */
1227 if (test_bit(__FM10K_DOWN, interface->state))
1228 return;
1229
1230 for (i = 0; i < interface->num_q_vectors; i++)
1231 fm10k_msix_clean_rings(0, interface->q_vector[i]);
1232}
1233
1234#endif
1235#define FM10K_ERR_MSG(type) case (type): error = #type; break 1213#define FM10K_ERR_MSG(type) case (type): error = #type; break
1236static void fm10k_handle_fault(struct fm10k_intfc *interface, int type, 1214static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
1237 struct fm10k_fault *fault) 1215 struct fm10k_fault *fault)
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 5906c1c1d19d..fef6d892ed4c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
396 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS; 396 adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
397} 397}
398 398
399#ifdef CONFIG_NET_POLL_CONTROLLER
400/**
401 * i40evf_netpoll - A Polling 'interrupt' handler
402 * @netdev: network interface device structure
403 *
404 * This is used by netconsole to send skbs without having to re-enable
405 * interrupts. It's not called while the normal interrupt routine is executing.
406 **/
407static void i40evf_netpoll(struct net_device *netdev)
408{
409 struct i40evf_adapter *adapter = netdev_priv(netdev);
410 int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
411 int i;
412
413 /* if interface is down do nothing */
414 if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
415 return;
416
417 for (i = 0; i < q_vectors; i++)
418 i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
419}
420
421#endif
422/** 399/**
423 * i40evf_irq_affinity_notify - Callback for affinity changes 400 * i40evf_irq_affinity_notify - Callback for affinity changes
424 * @notify: context as to what irq was changed 401 * @notify: context as to what irq was changed
@@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
3229 .ndo_features_check = i40evf_features_check, 3206 .ndo_features_check = i40evf_features_check,
3230 .ndo_fix_features = i40evf_fix_features, 3207 .ndo_fix_features = i40evf_fix_features,
3231 .ndo_set_features = i40evf_set_features, 3208 .ndo_set_features = i40evf_set_features,
3232#ifdef CONFIG_NET_POLL_CONTROLLER
3233 .ndo_poll_controller = i40evf_netpoll,
3234#endif
3235 .ndo_setup_tc = i40evf_setup_tc, 3209 .ndo_setup_tc = i40evf_setup_tc,
3236}; 3210};
3237 3211
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f1e80eed2fd6..3f047bb43348 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4806,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
4806 stats->rx_length_errors = vsi_stats->rx_length_errors; 4806 stats->rx_length_errors = vsi_stats->rx_length_errors;
4807} 4807}
4808 4808
4809#ifdef CONFIG_NET_POLL_CONTROLLER
4810/**
4811 * ice_netpoll - polling "interrupt" handler
4812 * @netdev: network interface device structure
4813 *
4814 * Used by netconsole to send skbs without having to re-enable interrupts.
4815 * This is not called in the normal interrupt path.
4816 */
4817static void ice_netpoll(struct net_device *netdev)
4818{
4819 struct ice_netdev_priv *np = netdev_priv(netdev);
4820 struct ice_vsi *vsi = np->vsi;
4821 struct ice_pf *pf = vsi->back;
4822 int i;
4823
4824 if (test_bit(__ICE_DOWN, vsi->state) ||
4825 !test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
4826 return;
4827
4828 for (i = 0; i < vsi->num_q_vectors; i++)
4829 ice_msix_clean_rings(0, vsi->q_vectors[i]);
4830}
4831#endif /* CONFIG_NET_POLL_CONTROLLER */
4832
4833/** 4809/**
4834 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI 4810 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4835 * @vsi: VSI having NAPI disabled 4811 * @vsi: VSI having NAPI disabled
@@ -5497,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = {
5497 .ndo_validate_addr = eth_validate_addr, 5473 .ndo_validate_addr = eth_validate_addr,
5498 .ndo_change_mtu = ice_change_mtu, 5474 .ndo_change_mtu = ice_change_mtu,
5499 .ndo_get_stats64 = ice_get_stats64, 5475 .ndo_get_stats64 = ice_get_stats64,
5500#ifdef CONFIG_NET_POLL_CONTROLLER
5501 .ndo_poll_controller = ice_netpoll,
5502#endif /* CONFIG_NET_POLL_CONTROLLER */
5503 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid, 5476 .ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
5504 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid, 5477 .ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
5505 .ndo_set_features = ice_set_features, 5478 .ndo_set_features = ice_set_features,
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a32c576c1e65..0796cef96fa3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = {
205 .priority = 0 205 .priority = 0
206}; 206};
207#endif 207#endif
208#ifdef CONFIG_NET_POLL_CONTROLLER
209/* for netdump / net console */
210static void igb_netpoll(struct net_device *);
211#endif
212#ifdef CONFIG_PCI_IOV 208#ifdef CONFIG_PCI_IOV
213static unsigned int max_vfs; 209static unsigned int max_vfs;
214module_param(max_vfs, uint, 0); 210module_param(max_vfs, uint, 0);
@@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = {
2881 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk, 2877 .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
2882 .ndo_set_vf_trust = igb_ndo_set_vf_trust, 2878 .ndo_set_vf_trust = igb_ndo_set_vf_trust,
2883 .ndo_get_vf_config = igb_ndo_get_vf_config, 2879 .ndo_get_vf_config = igb_ndo_get_vf_config,
2884#ifdef CONFIG_NET_POLL_CONTROLLER
2885 .ndo_poll_controller = igb_netpoll,
2886#endif
2887 .ndo_fix_features = igb_fix_features, 2880 .ndo_fix_features = igb_fix_features,
2888 .ndo_set_features = igb_set_features, 2881 .ndo_set_features = igb_set_features,
2889 .ndo_fdb_add = igb_ndo_fdb_add, 2882 .ndo_fdb_add = igb_ndo_fdb_add,
@@ -9053,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
9053 return 0; 9046 return 0;
9054} 9047}
9055 9048
9056#ifdef CONFIG_NET_POLL_CONTROLLER
9057/* Polling 'interrupt' - used by things like netconsole to send skbs
9058 * without having to re-enable interrupts. It's not called while
9059 * the interrupt routine is executing.
9060 */
9061static void igb_netpoll(struct net_device *netdev)
9062{
9063 struct igb_adapter *adapter = netdev_priv(netdev);
9064 struct e1000_hw *hw = &adapter->hw;
9065 struct igb_q_vector *q_vector;
9066 int i;
9067
9068 for (i = 0; i < adapter->num_q_vectors; i++) {
9069 q_vector = adapter->q_vector[i];
9070 if (adapter->flags & IGB_FLAG_HAS_MSIX)
9071 wr32(E1000_EIMC, q_vector->eims_value);
9072 else
9073 igb_irq_disable(adapter);
9074 napi_schedule(&q_vector->napi);
9075 }
9076}
9077#endif /* CONFIG_NET_POLL_CONTROLLER */
9078
9079/** 9049/**
9080 * igb_io_error_detected - called when PCI error is detected 9050 * igb_io_error_detected - called when PCI error is detected
9081 * @pdev: Pointer to PCI device 9051 * @pdev: Pointer to PCI device
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index d3e72d0f66ef..7722153c4ac2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
81 __be16 proto, u16 vid); 81 __be16 proto, u16 vid);
82static void ixgb_restore_vlan(struct ixgb_adapter *adapter); 82static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
83 83
84#ifdef CONFIG_NET_POLL_CONTROLLER
85/* for netdump / net console */
86static void ixgb_netpoll(struct net_device *dev);
87#endif
88
89static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, 84static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
90 enum pci_channel_state state); 85 enum pci_channel_state state);
91static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); 86static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
@@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
348 .ndo_tx_timeout = ixgb_tx_timeout, 343 .ndo_tx_timeout = ixgb_tx_timeout,
349 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid, 344 .ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
350 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid, 345 .ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
351#ifdef CONFIG_NET_POLL_CONTROLLER
352 .ndo_poll_controller = ixgb_netpoll,
353#endif
354 .ndo_fix_features = ixgb_fix_features, 346 .ndo_fix_features = ixgb_fix_features,
355 .ndo_set_features = ixgb_set_features, 347 .ndo_set_features = ixgb_set_features,
356}; 348};
@@ -2195,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
2195 ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); 2187 ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2196} 2188}
2197 2189
2198#ifdef CONFIG_NET_POLL_CONTROLLER
2199/*
2200 * Polling 'interrupt' - used by things like netconsole to send skbs
2201 * without having to re-enable interrupts. It's not called while
2202 * the interrupt routine is executing.
2203 */
2204
2205static void ixgb_netpoll(struct net_device *dev)
2206{
2207 struct ixgb_adapter *adapter = netdev_priv(dev);
2208
2209 disable_irq(adapter->pdev->irq);
2210 ixgb_intr(adapter->pdev->irq, dev);
2211 enable_irq(adapter->pdev->irq);
2212}
2213#endif
2214
2215/** 2190/**
2216 * ixgb_io_error_detected - called when PCI error is detected 2191 * ixgb_io_error_detected - called when PCI error is detected
2217 * @pdev: pointer to pci device with error 2192 * @pdev: pointer to pci device with error
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9a23d33a47ed..f27d73a7bf16 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8768,28 +8768,6 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev)
8768 return err; 8768 return err;
8769} 8769}
8770 8770
8771#ifdef CONFIG_NET_POLL_CONTROLLER
8772/*
8773 * Polling 'interrupt' - used by things like netconsole to send skbs
8774 * without having to re-enable interrupts. It's not called while
8775 * the interrupt routine is executing.
8776 */
8777static void ixgbe_netpoll(struct net_device *netdev)
8778{
8779 struct ixgbe_adapter *adapter = netdev_priv(netdev);
8780 int i;
8781
8782 /* if interface is down do nothing */
8783 if (test_bit(__IXGBE_DOWN, &adapter->state))
8784 return;
8785
8786 /* loop through and schedule all active queues */
8787 for (i = 0; i < adapter->num_q_vectors; i++)
8788 ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
8789}
8790
8791#endif
8792
8793static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats, 8771static void ixgbe_get_ring_stats64(struct rtnl_link_stats64 *stats,
8794 struct ixgbe_ring *ring) 8772 struct ixgbe_ring *ring)
8795{ 8773{
@@ -10251,9 +10229,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
10251 .ndo_get_vf_config = ixgbe_ndo_get_vf_config, 10229 .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
10252 .ndo_get_stats64 = ixgbe_get_stats64, 10230 .ndo_get_stats64 = ixgbe_get_stats64,
10253 .ndo_setup_tc = __ixgbe_setup_tc, 10231 .ndo_setup_tc = __ixgbe_setup_tc,
10254#ifdef CONFIG_NET_POLL_CONTROLLER
10255 .ndo_poll_controller = ixgbe_netpoll,
10256#endif
10257#ifdef IXGBE_FCOE 10232#ifdef IXGBE_FCOE
10258 .ndo_select_queue = ixgbe_select_queue, 10233 .ndo_select_queue = ixgbe_select_queue,
10259 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, 10234 .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index d86446d202d5..5a228582423b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -4233,24 +4233,6 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
4233 return 0; 4233 return 0;
4234} 4234}
4235 4235
4236#ifdef CONFIG_NET_POLL_CONTROLLER
4237/* Polling 'interrupt' - used by things like netconsole to send skbs
4238 * without having to re-enable interrupts. It's not called while
4239 * the interrupt routine is executing.
4240 */
4241static void ixgbevf_netpoll(struct net_device *netdev)
4242{
4243 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
4244 int i;
4245
4246 /* if interface is down do nothing */
4247 if (test_bit(__IXGBEVF_DOWN, &adapter->state))
4248 return;
4249 for (i = 0; i < adapter->num_rx_queues; i++)
4250 ixgbevf_msix_clean_rings(0, adapter->q_vector[i]);
4251}
4252#endif /* CONFIG_NET_POLL_CONTROLLER */
4253
4254static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state) 4236static int ixgbevf_suspend(struct pci_dev *pdev, pm_message_t state)
4255{ 4237{
4256 struct net_device *netdev = pci_get_drvdata(pdev); 4238 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4482,9 +4464,6 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
4482 .ndo_tx_timeout = ixgbevf_tx_timeout, 4464 .ndo_tx_timeout = ixgbevf_tx_timeout,
4483 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid, 4465 .ndo_vlan_rx_add_vid = ixgbevf_vlan_rx_add_vid,
4484 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid, 4466 .ndo_vlan_rx_kill_vid = ixgbevf_vlan_rx_kill_vid,
4485#ifdef CONFIG_NET_POLL_CONTROLLER
4486 .ndo_poll_controller = ixgbevf_netpoll,
4487#endif
4488 .ndo_features_check = ixgbevf_features_check, 4467 .ndo_features_check = ixgbevf_features_check,
4489 .ndo_bpf = ixgbevf_xdp, 4468 .ndo_bpf = ixgbevf_xdp,
4490}; 4469};
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index bc80a678abc3..b4ed7d394d07 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1890,8 +1890,8 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
1890 if (!data || !(rx_desc->buf_phys_addr)) 1890 if (!data || !(rx_desc->buf_phys_addr))
1891 continue; 1891 continue;
1892 1892
1893 dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, 1893 dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr,
1894 MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); 1894 PAGE_SIZE, DMA_FROM_DEVICE);
1895 __free_page(data); 1895 __free_page(data);
1896 } 1896 }
1897} 1897}
@@ -2008,8 +2008,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2008 skb_add_rx_frag(rxq->skb, frag_num, page, 2008 skb_add_rx_frag(rxq->skb, frag_num, page,
2009 frag_offset, frag_size, 2009 frag_offset, frag_size,
2010 PAGE_SIZE); 2010 PAGE_SIZE);
2011 dma_unmap_single(dev->dev.parent, phys_addr, 2011 dma_unmap_page(dev->dev.parent, phys_addr,
2012 PAGE_SIZE, DMA_FROM_DEVICE); 2012 PAGE_SIZE, DMA_FROM_DEVICE);
2013 rxq->left_size -= frag_size; 2013 rxq->left_size -= frag_size;
2014 } 2014 }
2015 } else { 2015 } else {
@@ -2039,9 +2039,8 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
2039 frag_offset, frag_size, 2039 frag_offset, frag_size,
2040 PAGE_SIZE); 2040 PAGE_SIZE);
2041 2041
2042 dma_unmap_single(dev->dev.parent, phys_addr, 2042 dma_unmap_page(dev->dev.parent, phys_addr,
2043 PAGE_SIZE, 2043 PAGE_SIZE, DMA_FROM_DEVICE);
2044 DMA_FROM_DEVICE);
2045 2044
2046 rxq->left_size -= frag_size; 2045 rxq->left_size -= frag_size;
2047 } 2046 }
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 28500417843e..38cc01beea79 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -58,6 +58,8 @@ static struct {
58 */ 58 */
59static void mvpp2_mac_config(struct net_device *dev, unsigned int mode, 59static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
60 const struct phylink_link_state *state); 60 const struct phylink_link_state *state);
61static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
62 phy_interface_t interface, struct phy_device *phy);
61 63
62/* Queue modes */ 64/* Queue modes */
63#define MVPP2_QDIST_SINGLE_MODE 0 65#define MVPP2_QDIST_SINGLE_MODE 0
@@ -3053,10 +3055,12 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
3053 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 3055 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
3054 } 3056 }
3055 3057
3056 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 3058 if (port->has_tx_irqs) {
3057 if (cause_tx) { 3059 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
3058 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET; 3060 if (cause_tx) {
3059 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id); 3061 cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
3062 mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
3063 }
3060 } 3064 }
3061 3065
3062 /* Process RX packets */ 3066 /* Process RX packets */
@@ -3142,6 +3146,7 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
3142 mvpp22_mode_reconfigure(port); 3146 mvpp22_mode_reconfigure(port);
3143 3147
3144 if (port->phylink) { 3148 if (port->phylink) {
3149 netif_carrier_off(port->dev);
3145 phylink_start(port->phylink); 3150 phylink_start(port->phylink);
3146 } else { 3151 } else {
3147 /* Phylink isn't used as of now for ACPI, so the MAC has to be 3152 /* Phylink isn't used as of now for ACPI, so the MAC has to be
@@ -3150,9 +3155,10 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
3150 */ 3155 */
3151 struct phylink_link_state state = { 3156 struct phylink_link_state state = {
3152 .interface = port->phy_interface, 3157 .interface = port->phy_interface,
3153 .link = 1,
3154 }; 3158 };
3155 mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state); 3159 mvpp2_mac_config(port->dev, MLO_AN_INBAND, &state);
3160 mvpp2_mac_link_up(port->dev, MLO_AN_INBAND, port->phy_interface,
3161 NULL);
3156 } 3162 }
3157 3163
3158 netif_tx_start_all_queues(port->dev); 3164 netif_tx_start_all_queues(port->dev);
@@ -4495,10 +4501,6 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
4495 return; 4501 return;
4496 } 4502 }
4497 4503
4498 netif_tx_stop_all_queues(port->dev);
4499 if (!port->has_phy)
4500 netif_carrier_off(port->dev);
4501
4502 /* Make sure the port is disabled when reconfiguring the mode */ 4504 /* Make sure the port is disabled when reconfiguring the mode */
4503 mvpp2_port_disable(port); 4505 mvpp2_port_disable(port);
4504 4506
@@ -4523,16 +4525,7 @@ static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
4523 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK) 4525 if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
4524 mvpp2_port_loopback_set(port, state); 4526 mvpp2_port_loopback_set(port, state);
4525 4527
4526 /* If the port already was up, make sure it's still in the same state */ 4528 mvpp2_port_enable(port);
4527 if (state->link || !port->has_phy) {
4528 mvpp2_port_enable(port);
4529
4530 mvpp2_egress_enable(port);
4531 mvpp2_ingress_enable(port);
4532 if (!port->has_phy)
4533 netif_carrier_on(dev);
4534 netif_tx_wake_all_queues(dev);
4535 }
4536} 4529}
4537 4530
4538static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode, 4531static void mvpp2_mac_link_up(struct net_device *dev, unsigned int mode,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 6785661d1a72..fe49384eba48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1286,20 +1286,6 @@ out:
1286 mutex_unlock(&mdev->state_lock); 1286 mutex_unlock(&mdev->state_lock);
1287} 1287}
1288 1288
1289#ifdef CONFIG_NET_POLL_CONTROLLER
1290static void mlx4_en_netpoll(struct net_device *dev)
1291{
1292 struct mlx4_en_priv *priv = netdev_priv(dev);
1293 struct mlx4_en_cq *cq;
1294 int i;
1295
1296 for (i = 0; i < priv->tx_ring_num[TX]; i++) {
1297 cq = priv->tx_cq[TX][i];
1298 napi_schedule(&cq->napi);
1299 }
1300}
1301#endif
1302
1303static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv) 1289static int mlx4_en_set_rss_steer_rules(struct mlx4_en_priv *priv)
1304{ 1290{
1305 u64 reg_id; 1291 u64 reg_id;
@@ -2946,9 +2932,6 @@ static const struct net_device_ops mlx4_netdev_ops = {
2946 .ndo_tx_timeout = mlx4_en_tx_timeout, 2932 .ndo_tx_timeout = mlx4_en_tx_timeout,
2947 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, 2933 .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
2948 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, 2934 .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
2949#ifdef CONFIG_NET_POLL_CONTROLLER
2950 .ndo_poll_controller = mlx4_en_netpoll,
2951#endif
2952 .ndo_set_features = mlx4_en_set_features, 2935 .ndo_set_features = mlx4_en_set_features,
2953 .ndo_fix_features = mlx4_en_fix_features, 2936 .ndo_fix_features = mlx4_en_fix_features,
2954 .ndo_setup_tc = __mlx4_en_setup_tc, 2937 .ndo_setup_tc = __mlx4_en_setup_tc,
@@ -2983,9 +2966,6 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2983 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state, 2966 .ndo_set_vf_link_state = mlx4_en_set_vf_link_state,
2984 .ndo_get_vf_stats = mlx4_en_get_vf_stats, 2967 .ndo_get_vf_stats = mlx4_en_get_vf_stats,
2985 .ndo_get_vf_config = mlx4_en_get_vf_config, 2968 .ndo_get_vf_config = mlx4_en_get_vf_config,
2986#ifdef CONFIG_NET_POLL_CONTROLLER
2987 .ndo_poll_controller = mlx4_en_netpoll,
2988#endif
2989 .ndo_set_features = mlx4_en_set_features, 2969 .ndo_set_features = mlx4_en_set_features,
2990 .ndo_fix_features = mlx4_en_fix_features, 2970 .ndo_fix_features = mlx4_en_fix_features,
2991 .ndo_setup_tc = __mlx4_en_setup_tc, 2971 .ndo_setup_tc = __mlx4_en_setup_tc,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 1f3372c1802e..2df92dbd38e1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -240,7 +240,8 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
240 struct mlx4_dev *dev = &priv->dev; 240 struct mlx4_dev *dev = &priv->dev;
241 struct mlx4_eq *eq = &priv->eq_table.eq[vec]; 241 struct mlx4_eq *eq = &priv->eq_table.eq[vec];
242 242
243 if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask)) 243 if (!cpumask_available(eq->affinity_mask) ||
244 cpumask_empty(eq->affinity_mask))
244 return; 245 return;
245 246
246 hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask); 247 hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3ce14d42ddc8..a53736c26c0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -206,7 +206,7 @@ static void poll_timeout(struct mlx5_cmd_work_ent *ent)
206 u8 own; 206 u8 own;
207 207
208 do { 208 do {
209 own = ent->lay->status_own; 209 own = READ_ONCE(ent->lay->status_own);
210 if (!(own & CMD_OWNER_HW)) { 210 if (!(own & CMD_OWNER_HW)) {
211 ent->ret = 0; 211 ent->ret = 0;
212 return; 212 return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
index eddd7702680b..e88340e196f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls.c
@@ -183,12 +183,13 @@ static const struct tlsdev_ops mlx5e_tls_ops = {
183 183
184void mlx5e_tls_build_netdev(struct mlx5e_priv *priv) 184void mlx5e_tls_build_netdev(struct mlx5e_priv *priv)
185{ 185{
186 u32 caps = mlx5_accel_tls_device_caps(priv->mdev);
187 struct net_device *netdev = priv->netdev; 186 struct net_device *netdev = priv->netdev;
187 u32 caps;
188 188
189 if (!mlx5_accel_is_tls_device(priv->mdev)) 189 if (!mlx5_accel_is_tls_device(priv->mdev))
190 return; 190 return;
191 191
192 caps = mlx5_accel_tls_device_caps(priv->mdev);
192 if (caps & MLX5_ACCEL_TLS_TX) { 193 if (caps & MLX5_ACCEL_TLS_TX) {
193 netdev->features |= NETIF_F_HW_TLS_TX; 194 netdev->features |= NETIF_F_HW_TLS_TX;
194 netdev->hw_features |= NETIF_F_HW_TLS_TX; 195 netdev->hw_features |= NETIF_F_HW_TLS_TX;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5a7939e70190..54118b77dc1f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -4315,22 +4315,6 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4315 } 4315 }
4316} 4316}
4317 4317
4318#ifdef CONFIG_NET_POLL_CONTROLLER
4319/* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
4320 * reenabling interrupts.
4321 */
4322static void mlx5e_netpoll(struct net_device *dev)
4323{
4324 struct mlx5e_priv *priv = netdev_priv(dev);
4325 struct mlx5e_channels *chs = &priv->channels;
4326
4327 int i;
4328
4329 for (i = 0; i < chs->num; i++)
4330 napi_schedule(&chs->c[i]->napi);
4331}
4332#endif
4333
4334static const struct net_device_ops mlx5e_netdev_ops = { 4318static const struct net_device_ops mlx5e_netdev_ops = {
4335 .ndo_open = mlx5e_open, 4319 .ndo_open = mlx5e_open,
4336 .ndo_stop = mlx5e_close, 4320 .ndo_stop = mlx5e_close,
@@ -4356,9 +4340,6 @@ static const struct net_device_ops mlx5e_netdev_ops = {
4356#ifdef CONFIG_MLX5_EN_ARFS 4340#ifdef CONFIG_MLX5_EN_ARFS
4357 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 4341 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
4358#endif 4342#endif
4359#ifdef CONFIG_NET_POLL_CONTROLLER
4360 .ndo_poll_controller = mlx5e_netpoll,
4361#endif
4362#ifdef CONFIG_MLX5_ESWITCH 4343#ifdef CONFIG_MLX5_ESWITCH
4363 /* SRIOV E-Switch NDOs */ 4344 /* SRIOV E-Switch NDOs */
4364 .ndo_set_vf_mac = mlx5e_set_vf_mac, 4345 .ndo_set_vf_mac = mlx5e_set_vf_mac,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index dae1c5c5d27c..d2f76070ea7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -509,7 +509,7 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
509 509
510 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); 510 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
511 511
512 if (next_state == MLX5_RQC_STATE_RDY) { 512 if (next_state == MLX5_SQC_STATE_RDY) {
513 MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq); 513 MLX5_SET(sqc, sqc, hairpin_peer_rq, peer_rq);
514 MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca); 514 MLX5_SET(sqc, sqc, hairpin_peer_vhca, peer_vhca);
515 } 515 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 930700413b1d..b492152c8881 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -44,8 +44,8 @@
44#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) 44#define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100)
45 45
46#define MLXSW_SP1_FWREV_MAJOR 13 46#define MLXSW_SP1_FWREV_MAJOR 13
47#define MLXSW_SP1_FWREV_MINOR 1702 47#define MLXSW_SP1_FWREV_MINOR 1703
48#define MLXSW_SP1_FWREV_SUBMINOR 6 48#define MLXSW_SP1_FWREV_SUBMINOR 4
49#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 49#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
50 50
51static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { 51static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index e7dce79ff2c9..001b5f714c1b 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -2850,7 +2850,7 @@ static void lan743x_pcidev_shutdown(struct pci_dev *pdev)
2850 lan743x_hardware_cleanup(adapter); 2850 lan743x_hardware_cleanup(adapter);
2851} 2851}
2852 2852
2853#ifdef CONFIG_PM 2853#ifdef CONFIG_PM_SLEEP
2854static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len) 2854static u16 lan743x_pm_wakeframe_crc16(const u8 *buf, int len)
2855{ 2855{
2856 return bitrev16(crc16(0xFFFF, buf, len)); 2856 return bitrev16(crc16(0xFFFF, buf, len));
@@ -3016,7 +3016,7 @@ static int lan743x_pm_resume(struct device *dev)
3016static const struct dev_pm_ops lan743x_pm_ops = { 3016static const struct dev_pm_ops lan743x_pm_ops = {
3017 SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume) 3017 SET_SYSTEM_SLEEP_PM_OPS(lan743x_pm_suspend, lan743x_pm_resume)
3018}; 3018};
3019#endif /*CONFIG_PM */ 3019#endif /* CONFIG_PM_SLEEP */
3020 3020
3021static const struct pci_device_id lan743x_pcidev_tbl[] = { 3021static const struct pci_device_id lan743x_pcidev_tbl[] = {
3022 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) }, 3022 { PCI_DEVICE(PCI_VENDOR_ID_SMSC, PCI_DEVICE_ID_SMSC_LAN7430) },
@@ -3028,7 +3028,7 @@ static struct pci_driver lan743x_pcidev_driver = {
3028 .id_table = lan743x_pcidev_tbl, 3028 .id_table = lan743x_pcidev_tbl,
3029 .probe = lan743x_pcidev_probe, 3029 .probe = lan743x_pcidev_probe,
3030 .remove = lan743x_pcidev_remove, 3030 .remove = lan743x_pcidev_remove,
3031#ifdef CONFIG_PM 3031#ifdef CONFIG_PM_SLEEP
3032 .driver.pm = &lan743x_pm_ops, 3032 .driver.pm = &lan743x_pm_ops,
3033#endif 3033#endif
3034 .shutdown = lan743x_pcidev_shutdown, 3034 .shutdown = lan743x_pcidev_shutdown,
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index 26bb3b18f3be..3cdf63e35b53 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -91,7 +91,7 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
91 struct sk_buff *skb; 91 struct sk_buff *skb;
92 struct net_device *dev; 92 struct net_device *dev;
93 u32 *buf; 93 u32 *buf;
94 int sz, len; 94 int sz, len, buf_len;
95 u32 ifh[4]; 95 u32 ifh[4];
96 u32 val; 96 u32 val;
97 struct frame_info info; 97 struct frame_info info;
@@ -116,14 +116,20 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
116 err = -ENOMEM; 116 err = -ENOMEM;
117 break; 117 break;
118 } 118 }
119 buf = (u32 *)skb_put(skb, info.len); 119 buf_len = info.len - ETH_FCS_LEN;
120 buf = (u32 *)skb_put(skb, buf_len);
120 121
121 len = 0; 122 len = 0;
122 do { 123 do {
123 sz = ocelot_rx_frame_word(ocelot, grp, false, &val); 124 sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
124 *buf++ = val; 125 *buf++ = val;
125 len += sz; 126 len += sz;
126 } while ((sz == 4) && (len < info.len)); 127 } while (len < buf_len);
128
129 /* Read the FCS and discard it */
130 sz = ocelot_rx_frame_word(ocelot, grp, false, &val);
131 /* Update the statistics if part of the FCS was read before */
132 len -= ETH_FCS_LEN - sz;
127 133
128 if (sz < 0) { 134 if (sz < 0) {
129 err = sz; 135 err = sz;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 253bdaef1505..8ed38fd5a852 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -3146,21 +3146,6 @@ nfp_net_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3146 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL); 3146 return nfp_net_reconfig_mbox(nn, NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL);
3147} 3147}
3148 3148
3149#ifdef CONFIG_NET_POLL_CONTROLLER
3150static void nfp_net_netpoll(struct net_device *netdev)
3151{
3152 struct nfp_net *nn = netdev_priv(netdev);
3153 int i;
3154
3155 /* nfp_net's NAPIs are statically allocated so even if there is a race
3156 * with reconfig path this will simply try to schedule some disabled
3157 * NAPI instances.
3158 */
3159 for (i = 0; i < nn->dp.num_stack_tx_rings; i++)
3160 napi_schedule_irqoff(&nn->r_vecs[i].napi);
3161}
3162#endif
3163
3164static void nfp_net_stat64(struct net_device *netdev, 3149static void nfp_net_stat64(struct net_device *netdev,
3165 struct rtnl_link_stats64 *stats) 3150 struct rtnl_link_stats64 *stats)
3166{ 3151{
@@ -3519,9 +3504,6 @@ const struct net_device_ops nfp_net_netdev_ops = {
3519 .ndo_get_stats64 = nfp_net_stat64, 3504 .ndo_get_stats64 = nfp_net_stat64,
3520 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid, 3505 .ndo_vlan_rx_add_vid = nfp_net_vlan_rx_add_vid,
3521 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid, 3506 .ndo_vlan_rx_kill_vid = nfp_net_vlan_rx_kill_vid,
3522#ifdef CONFIG_NET_POLL_CONTROLLER
3523 .ndo_poll_controller = nfp_net_netpoll,
3524#endif
3525 .ndo_set_vf_mac = nfp_app_set_vf_mac, 3507 .ndo_set_vf_mac = nfp_app_set_vf_mac,
3526 .ndo_set_vf_vlan = nfp_app_set_vf_vlan, 3508 .ndo_set_vf_vlan = nfp_app_set_vf_vlan,
3527 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, 3509 .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 6bb76e6d3c14..f5459de6d60a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -190,10 +190,8 @@ qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
190 190
191static void 191static void
192qed_dcbx_set_params(struct qed_dcbx_results *p_data, 192qed_dcbx_set_params(struct qed_dcbx_results *p_data,
193 struct qed_hw_info *p_info, 193 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
194 bool enable, 194 bool enable, u8 prio, u8 tc,
195 u8 prio,
196 u8 tc,
197 enum dcbx_protocol_type type, 195 enum dcbx_protocol_type type,
198 enum qed_pci_personality personality) 196 enum qed_pci_personality personality)
199{ 197{
@@ -206,19 +204,30 @@ qed_dcbx_set_params(struct qed_dcbx_results *p_data,
206 else 204 else
207 p_data->arr[type].update = DONT_UPDATE_DCB_DSCP; 205 p_data->arr[type].update = DONT_UPDATE_DCB_DSCP;
208 206
207 /* Do not add vlan tag 0 when DCB is enabled and port in UFP/OV mode */
208 if ((test_bit(QED_MF_8021Q_TAGGING, &p_hwfn->cdev->mf_bits) ||
209 test_bit(QED_MF_8021AD_TAGGING, &p_hwfn->cdev->mf_bits)))
210 p_data->arr[type].dont_add_vlan0 = true;
211
209 /* QM reconf data */ 212 /* QM reconf data */
210 if (p_info->personality == personality) 213 if (p_hwfn->hw_info.personality == personality)
211 qed_hw_info_set_offload_tc(p_info, tc); 214 qed_hw_info_set_offload_tc(&p_hwfn->hw_info, tc);
215
216 /* Configure dcbx vlan priority in doorbell block for roce EDPM */
217 if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits) &&
218 type == DCBX_PROTOCOL_ROCE) {
219 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
220 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_PCP_BB_K2, prio << 1);
221 }
212} 222}
213 223
214/* Update app protocol data and hw_info fields with the TLV info */ 224/* Update app protocol data and hw_info fields with the TLV info */
215static void 225static void
216qed_dcbx_update_app_info(struct qed_dcbx_results *p_data, 226qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
217 struct qed_hwfn *p_hwfn, 227 struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
218 bool enable, 228 bool enable, u8 prio, u8 tc,
219 u8 prio, u8 tc, enum dcbx_protocol_type type) 229 enum dcbx_protocol_type type)
220{ 230{
221 struct qed_hw_info *p_info = &p_hwfn->hw_info;
222 enum qed_pci_personality personality; 231 enum qed_pci_personality personality;
223 enum dcbx_protocol_type id; 232 enum dcbx_protocol_type id;
224 int i; 233 int i;
@@ -231,7 +240,7 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
231 240
232 personality = qed_dcbx_app_update[i].personality; 241 personality = qed_dcbx_app_update[i].personality;
233 242
234 qed_dcbx_set_params(p_data, p_info, enable, 243 qed_dcbx_set_params(p_data, p_hwfn, p_ptt, enable,
235 prio, tc, type, personality); 244 prio, tc, type, personality);
236 } 245 }
237} 246}
@@ -265,7 +274,7 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
265 * reconfiguring QM. Get protocol specific data for PF update ramrod command. 274 * reconfiguring QM. Get protocol specific data for PF update ramrod command.
266 */ 275 */
267static int 276static int
268qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, 277qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
269 struct qed_dcbx_results *p_data, 278 struct qed_dcbx_results *p_data,
270 struct dcbx_app_priority_entry *p_tbl, 279 struct dcbx_app_priority_entry *p_tbl,
271 u32 pri_tc_tbl, int count, u8 dcbx_version) 280 u32 pri_tc_tbl, int count, u8 dcbx_version)
@@ -309,7 +318,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
309 enable = true; 318 enable = true;
310 } 319 }
311 320
312 qed_dcbx_update_app_info(p_data, p_hwfn, enable, 321 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
313 priority, tc, type); 322 priority, tc, type);
314 } 323 }
315 } 324 }
@@ -331,7 +340,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
331 continue; 340 continue;
332 341
333 enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version; 342 enable = (type == DCBX_PROTOCOL_ETH) ? false : !!dcbx_version;
334 qed_dcbx_update_app_info(p_data, p_hwfn, enable, 343 qed_dcbx_update_app_info(p_data, p_hwfn, p_ptt, enable,
335 priority, tc, type); 344 priority, tc, type);
336 } 345 }
337 346
@@ -341,7 +350,8 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
341/* Parse app TLV's to update TC information in hw_info structure for 350/* Parse app TLV's to update TC information in hw_info structure for
342 * reconfiguring QM. Get protocol specific data for PF update ramrod command. 351 * reconfiguring QM. Get protocol specific data for PF update ramrod command.
343 */ 352 */
344static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn) 353static int
354qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
345{ 355{
346 struct dcbx_app_priority_feature *p_app; 356 struct dcbx_app_priority_feature *p_app;
347 struct dcbx_app_priority_entry *p_tbl; 357 struct dcbx_app_priority_entry *p_tbl;
@@ -365,7 +375,7 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
365 p_info = &p_hwfn->hw_info; 375 p_info = &p_hwfn->hw_info;
366 num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); 376 num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
367 377
368 rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, 378 rc = qed_dcbx_process_tlv(p_hwfn, p_ptt, &data, p_tbl, pri_tc_tbl,
369 num_entries, dcbx_version); 379 num_entries, dcbx_version);
370 if (rc) 380 if (rc)
371 return rc; 381 return rc;
@@ -891,7 +901,7 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
891 return rc; 901 return rc;
892 902
893 if (type == QED_DCBX_OPERATIONAL_MIB) { 903 if (type == QED_DCBX_OPERATIONAL_MIB) {
894 rc = qed_dcbx_process_mib_info(p_hwfn); 904 rc = qed_dcbx_process_mib_info(p_hwfn, p_ptt);
895 if (!rc) { 905 if (!rc) {
896 /* reconfigure tcs of QM queues according 906 /* reconfigure tcs of QM queues according
897 * to negotiation results 907 * to negotiation results
@@ -954,6 +964,7 @@ static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
954 p_data->dcb_enable_flag = p_src->arr[type].enable; 964 p_data->dcb_enable_flag = p_src->arr[type].enable;
955 p_data->dcb_priority = p_src->arr[type].priority; 965 p_data->dcb_priority = p_src->arr[type].priority;
956 p_data->dcb_tc = p_src->arr[type].tc; 966 p_data->dcb_tc = p_src->arr[type].tc;
967 p_data->dcb_dont_add_vlan0 = p_src->arr[type].dont_add_vlan0;
957} 968}
958 969
959/* Set pf update ramrod command params */ 970/* Set pf update ramrod command params */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index a4d688c04e18..01f253ea4b22 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -55,6 +55,7 @@ struct qed_dcbx_app_data {
55 u8 update; /* Update indication */ 55 u8 update; /* Update indication */
56 u8 priority; /* Priority */ 56 u8 priority; /* Priority */
57 u8 tc; /* Traffic Class */ 57 u8 tc; /* Traffic Class */
58 bool dont_add_vlan0; /* Do not insert a vlan tag with id 0 */
58}; 59};
59 60
60#define QED_DCBX_VERSION_DISABLED 0 61#define QED_DCBX_VERSION_DISABLED 0
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 016ca8a7ec8a..97f073fd3725 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1706,7 +1706,7 @@ static int qed_vf_start(struct qed_hwfn *p_hwfn,
1706int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params) 1706int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
1707{ 1707{
1708 struct qed_load_req_params load_req_params; 1708 struct qed_load_req_params load_req_params;
1709 u32 load_code, param, drv_mb_param; 1709 u32 load_code, resp, param, drv_mb_param;
1710 bool b_default_mtu = true; 1710 bool b_default_mtu = true;
1711 struct qed_hwfn *p_hwfn; 1711 struct qed_hwfn *p_hwfn;
1712 int rc = 0, mfw_rc, i; 1712 int rc = 0, mfw_rc, i;
@@ -1852,6 +1852,19 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
1852 1852
1853 if (IS_PF(cdev)) { 1853 if (IS_PF(cdev)) {
1854 p_hwfn = QED_LEADING_HWFN(cdev); 1854 p_hwfn = QED_LEADING_HWFN(cdev);
1855
1856 /* Get pre-negotiated values for stag, bandwidth etc. */
1857 DP_VERBOSE(p_hwfn,
1858 QED_MSG_SPQ,
1859 "Sending GET_OEM_UPDATES command to trigger stag/bandwidth attention handling\n");
1860 drv_mb_param = 1 << DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET;
1861 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1862 DRV_MSG_CODE_GET_OEM_UPDATES,
1863 drv_mb_param, &resp, &param);
1864 if (rc)
1865 DP_NOTICE(p_hwfn,
1866 "Failed to send GET_OEM_UPDATES attention request\n");
1867
1855 drv_mb_param = STORM_FW_VERSION; 1868 drv_mb_param = STORM_FW_VERSION;
1856 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, 1869 rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1857 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, 1870 DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 8faceb691657..9b3ef00e5782 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -12414,6 +12414,7 @@ struct public_drv_mb {
12414#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000 12414#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
12415#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000 12415#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
12416#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000 12416#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
12417#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
12417 12418
12418#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 12419#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
12419#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 12420#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
@@ -12541,6 +12542,9 @@ struct public_drv_mb {
12541#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1 12542#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
12542#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2 12543#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
12543 12544
12545#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
12546#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
12547
12544#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 12548#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
12545#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 12549#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
12546#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 12550#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 5d37ec7e9b0b..58c7eb9d8e1b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1581,13 +1581,29 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
1581 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag & 1581 p_hwfn->mcp_info->func_info.ovlan = (u16)shmem_info.ovlan_stag &
1582 FUNC_MF_CFG_OV_STAG_MASK; 1582 FUNC_MF_CFG_OV_STAG_MASK;
1583 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan; 1583 p_hwfn->hw_info.ovlan = p_hwfn->mcp_info->func_info.ovlan;
1584 if ((p_hwfn->hw_info.hw_mode & BIT(MODE_MF_SD)) && 1584 if (test_bit(QED_MF_OVLAN_CLSS, &p_hwfn->cdev->mf_bits)) {
1585 (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET)) { 1585 if (p_hwfn->hw_info.ovlan != QED_MCP_VLAN_UNSET) {
1586 qed_wr(p_hwfn, p_ptt, 1586 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE,
1587 NIG_REG_LLH_FUNC_TAG_VALUE, p_hwfn->hw_info.ovlan); 1587 p_hwfn->hw_info.ovlan);
1588 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 1);
1589
1590 /* Configure DB to add external vlan to EDPM packets */
1591 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 1);
1592 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2,
1593 p_hwfn->hw_info.ovlan);
1594 } else {
1595 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_EN, 0);
1596 qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_FUNC_TAG_VALUE, 0);
1597 qed_wr(p_hwfn, p_ptt, DORQ_REG_TAG1_OVRD_MODE, 0);
1598 qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_EXT_VID_BB_K2, 0);
1599 }
1600
1588 qed_sp_pf_update_stag(p_hwfn); 1601 qed_sp_pf_update_stag(p_hwfn);
1589 } 1602 }
1590 1603
1604 DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n",
1605 p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode);
1606
1591 /* Acknowledge the MFW */ 1607 /* Acknowledge the MFW */
1592 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0, 1608 qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_S_TAG_UPDATE_ACK, 0,
1593 &resp, &param); 1609 &resp, &param);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index f736f70956fd..2440970882c4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -216,6 +216,12 @@
216 0x00c000UL 216 0x00c000UL
217#define DORQ_REG_IFEN \ 217#define DORQ_REG_IFEN \
218 0x100040UL 218 0x100040UL
219#define DORQ_REG_TAG1_OVRD_MODE \
220 0x1008b4UL
221#define DORQ_REG_PF_PCP_BB_K2 \
222 0x1008c4UL
223#define DORQ_REG_PF_EXT_VID_BB_K2 \
224 0x1008c8UL
219#define DORQ_REG_DB_DROP_REASON \ 225#define DORQ_REG_DB_DROP_REASON \
220 0x100a2cUL 226 0x100a2cUL
221#define DORQ_REG_DB_DROP_DETAILS \ 227#define DORQ_REG_DB_DROP_DETAILS \
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 1d8631303b53..ab30aaeac6d3 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -13,6 +13,7 @@
13#include <linux/pci.h> 13#include <linux/pci.h>
14#include <linux/netdevice.h> 14#include <linux/netdevice.h>
15#include <linux/etherdevice.h> 15#include <linux/etherdevice.h>
16#include <linux/clk.h>
16#include <linux/delay.h> 17#include <linux/delay.h>
17#include <linux/ethtool.h> 18#include <linux/ethtool.h>
18#include <linux/phy.h> 19#include <linux/phy.h>
@@ -665,6 +666,7 @@ struct rtl8169_private {
665 666
666 u16 event_slow; 667 u16 event_slow;
667 const struct rtl_coalesce_info *coalesce_info; 668 const struct rtl_coalesce_info *coalesce_info;
669 struct clk *clk;
668 670
669 struct mdio_ops { 671 struct mdio_ops {
670 void (*write)(struct rtl8169_private *, int, int); 672 void (*write)(struct rtl8169_private *, int, int);
@@ -4069,6 +4071,15 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
4069 phy_speed_up(dev->phydev); 4071 phy_speed_up(dev->phydev);
4070 4072
4071 genphy_soft_reset(dev->phydev); 4073 genphy_soft_reset(dev->phydev);
4074
4075 /* It was reported that chip version 33 ends up with 10MBit/Half on a
4076 * 1GBit link after resuming from S3. For whatever reason the PHY on
4077 * this chip doesn't properly start a renegotiation when soft-reset.
4078 * Explicitly requesting a renegotiation fixes this.
4079 */
4080 if (tp->mac_version == RTL_GIGA_MAC_VER_33 &&
4081 dev->phydev->autoneg == AUTONEG_ENABLE)
4082 phy_restart_aneg(dev->phydev);
4072} 4083}
4073 4084
4074static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) 4085static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -4775,12 +4786,14 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
4775static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) 4786static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
4776{ 4787{
4777 if (enable) { 4788 if (enable) {
4778 RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
4779 RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); 4789 RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
4790 RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
4780 } else { 4791 } else {
4781 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); 4792 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
4782 RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); 4793 RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
4783 } 4794 }
4795
4796 udelay(10);
4784} 4797}
4785 4798
4786static void rtl_hw_start_8168bb(struct rtl8169_private *tp) 4799static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
@@ -5625,6 +5638,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5625 5638
5626static void rtl_hw_start_8106(struct rtl8169_private *tp) 5639static void rtl_hw_start_8106(struct rtl8169_private *tp)
5627{ 5640{
5641 rtl_hw_aspm_clkreq_enable(tp, false);
5642
5628 /* Force LAN exit from ASPM if Rx/Tx are not idle */ 5643 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5629 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800); 5644 RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
5630 5645
@@ -5633,6 +5648,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
5633 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); 5648 RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
5634 5649
5635 rtl_pcie_state_l2l3_enable(tp, false); 5650 rtl_pcie_state_l2l3_enable(tp, false);
5651 rtl_hw_aspm_clkreq_enable(tp, true);
5636} 5652}
5637 5653
5638static void rtl_hw_start_8101(struct rtl8169_private *tp) 5654static void rtl_hw_start_8101(struct rtl8169_private *tp)
@@ -7257,6 +7273,11 @@ static int rtl_jumbo_max(struct rtl8169_private *tp)
7257 } 7273 }
7258} 7274}
7259 7275
7276static void rtl_disable_clk(void *data)
7277{
7278 clk_disable_unprepare(data);
7279}
7280
7260static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 7281static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7261{ 7282{
7262 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; 7283 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
@@ -7277,6 +7298,32 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7277 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); 7298 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
7278 tp->supports_gmii = cfg->has_gmii; 7299 tp->supports_gmii = cfg->has_gmii;
7279 7300
7301 /* Get the *optional* external "ether_clk" used on some boards */
7302 tp->clk = devm_clk_get(&pdev->dev, "ether_clk");
7303 if (IS_ERR(tp->clk)) {
7304 rc = PTR_ERR(tp->clk);
7305 if (rc == -ENOENT) {
7306 /* clk-core allows NULL (for suspend / resume) */
7307 tp->clk = NULL;
7308 } else if (rc == -EPROBE_DEFER) {
7309 return rc;
7310 } else {
7311 dev_err(&pdev->dev, "failed to get clk: %d\n", rc);
7312 return rc;
7313 }
7314 } else {
7315 rc = clk_prepare_enable(tp->clk);
7316 if (rc) {
7317 dev_err(&pdev->dev, "failed to enable clk: %d\n", rc);
7318 return rc;
7319 }
7320
7321 rc = devm_add_action_or_reset(&pdev->dev, rtl_disable_clk,
7322 tp->clk);
7323 if (rc)
7324 return rc;
7325 }
7326
7280 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 7327 /* enable device (incl. PCI PM wakeup and hotplug setup) */
7281 rc = pcim_enable_device(pdev); 7328 rc = pcim_enable_device(pdev);
7282 if (rc < 0) { 7329 if (rc < 0) {
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 1470fc12282b..9b6bf557a2f5 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -428,6 +428,7 @@ enum EIS_BIT {
428 EIS_CULF1 = 0x00000080, 428 EIS_CULF1 = 0x00000080,
429 EIS_TFFF = 0x00000100, 429 EIS_TFFF = 0x00000100,
430 EIS_QFS = 0x00010000, 430 EIS_QFS = 0x00010000,
431 EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
431}; 432};
432 433
433/* RIC0 */ 434/* RIC0 */
@@ -472,6 +473,7 @@ enum RIS0_BIT {
472 RIS0_FRF15 = 0x00008000, 473 RIS0_FRF15 = 0x00008000,
473 RIS0_FRF16 = 0x00010000, 474 RIS0_FRF16 = 0x00010000,
474 RIS0_FRF17 = 0x00020000, 475 RIS0_FRF17 = 0x00020000,
476 RIS0_RESERVED = GENMASK(31, 18),
475}; 477};
476 478
477/* RIC1 */ 479/* RIC1 */
@@ -528,6 +530,7 @@ enum RIS2_BIT {
528 RIS2_QFF16 = 0x00010000, 530 RIS2_QFF16 = 0x00010000,
529 RIS2_QFF17 = 0x00020000, 531 RIS2_QFF17 = 0x00020000,
530 RIS2_RFFF = 0x80000000, 532 RIS2_RFFF = 0x80000000,
533 RIS2_RESERVED = GENMASK(30, 18),
531}; 534};
532 535
533/* TIC */ 536/* TIC */
@@ -544,6 +547,7 @@ enum TIS_BIT {
544 TIS_FTF1 = 0x00000002, /* Undocumented? */ 547 TIS_FTF1 = 0x00000002, /* Undocumented? */
545 TIS_TFUF = 0x00000100, 548 TIS_TFUF = 0x00000100,
546 TIS_TFWF = 0x00000200, 549 TIS_TFWF = 0x00000200,
550 TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
547}; 551};
548 552
549/* ISS */ 553/* ISS */
@@ -617,6 +621,7 @@ enum GIC_BIT {
617enum GIS_BIT { 621enum GIS_BIT {
618 GIS_PTCF = 0x00000001, /* Undocumented? */ 622 GIS_PTCF = 0x00000001, /* Undocumented? */
619 GIS_PTMF = 0x00000004, 623 GIS_PTMF = 0x00000004,
624 GIS_RESERVED = GENMASK(15, 10),
620}; 625};
621 626
622/* GIE (R-Car Gen3 only) */ 627/* GIE (R-Car Gen3 only) */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index aff5516b781e..d6f753925352 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -739,10 +739,11 @@ static void ravb_error_interrupt(struct net_device *ndev)
739 u32 eis, ris2; 739 u32 eis, ris2;
740 740
741 eis = ravb_read(ndev, EIS); 741 eis = ravb_read(ndev, EIS);
742 ravb_write(ndev, ~EIS_QFS, EIS); 742 ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
743 if (eis & EIS_QFS) { 743 if (eis & EIS_QFS) {
744 ris2 = ravb_read(ndev, RIS2); 744 ris2 = ravb_read(ndev, RIS2);
745 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2); 745 ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
746 RIS2);
746 747
747 /* Receive Descriptor Empty int */ 748 /* Receive Descriptor Empty int */
748 if (ris2 & RIS2_QFF0) 749 if (ris2 & RIS2_QFF0)
@@ -795,7 +796,7 @@ static bool ravb_timestamp_interrupt(struct net_device *ndev)
795 u32 tis = ravb_read(ndev, TIS); 796 u32 tis = ravb_read(ndev, TIS);
796 797
797 if (tis & TIS_TFUF) { 798 if (tis & TIS_TFUF) {
798 ravb_write(ndev, ~TIS_TFUF, TIS); 799 ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
799 ravb_get_tx_tstamp(ndev); 800 ravb_get_tx_tstamp(ndev);
800 return true; 801 return true;
801 } 802 }
@@ -930,7 +931,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
930 /* Processing RX Descriptor Ring */ 931 /* Processing RX Descriptor Ring */
931 if (ris0 & mask) { 932 if (ris0 & mask) {
932 /* Clear RX interrupt */ 933 /* Clear RX interrupt */
933 ravb_write(ndev, ~mask, RIS0); 934 ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
934 if (ravb_rx(ndev, &quota, q)) 935 if (ravb_rx(ndev, &quota, q))
935 goto out; 936 goto out;
936 } 937 }
@@ -938,7 +939,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
938 if (tis & mask) { 939 if (tis & mask) {
939 spin_lock_irqsave(&priv->lock, flags); 940 spin_lock_irqsave(&priv->lock, flags);
940 /* Clear TX interrupt */ 941 /* Clear TX interrupt */
941 ravb_write(ndev, ~mask, TIS); 942 ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
942 ravb_tx_free(ndev, q, true); 943 ravb_tx_free(ndev, q, true);
943 netif_wake_subqueue(ndev, q); 944 netif_wake_subqueue(ndev, q);
944 mmiowb(); 945 mmiowb();
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 0721b5c35d91..dce2a40a31e3 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -315,7 +315,7 @@ void ravb_ptp_interrupt(struct net_device *ndev)
315 } 315 }
316 } 316 }
317 317
318 ravb_write(ndev, ~gis, GIS); 318 ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
319} 319}
320 320
321void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev) 321void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index c5bc124b41a9..d1bb73bf9914 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -77,7 +77,8 @@ static void ether3_setmulticastlist(struct net_device *dev);
77static int ether3_rx(struct net_device *dev, unsigned int maxcnt); 77static int ether3_rx(struct net_device *dev, unsigned int maxcnt);
78static void ether3_tx(struct net_device *dev); 78static void ether3_tx(struct net_device *dev);
79static int ether3_open (struct net_device *dev); 79static int ether3_open (struct net_device *dev);
80static int ether3_sendpacket (struct sk_buff *skb, struct net_device *dev); 80static netdev_tx_t ether3_sendpacket(struct sk_buff *skb,
81 struct net_device *dev);
81static irqreturn_t ether3_interrupt (int irq, void *dev_id); 82static irqreturn_t ether3_interrupt (int irq, void *dev_id);
82static int ether3_close (struct net_device *dev); 83static int ether3_close (struct net_device *dev);
83static void ether3_setmulticastlist (struct net_device *dev); 84static void ether3_setmulticastlist (struct net_device *dev);
@@ -481,7 +482,7 @@ static void ether3_timeout(struct net_device *dev)
481/* 482/*
482 * Transmit a packet 483 * Transmit a packet
483 */ 484 */
484static int 485static netdev_tx_t
485ether3_sendpacket(struct sk_buff *skb, struct net_device *dev) 486ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
486{ 487{
487 unsigned long flags; 488 unsigned long flags;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 573691bc3b71..70cce63a6081 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -578,7 +578,8 @@ static inline int sgiseeq_reset(struct net_device *dev)
578 return 0; 578 return 0;
579} 579}
580 580
581static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev) 581static netdev_tx_t
582sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
582{ 583{
583 struct sgiseeq_private *sp = netdev_priv(dev); 584 struct sgiseeq_private *sp = netdev_priv(dev);
584 struct hpc3_ethregs *hregs = sp->hregs; 585 struct hpc3_ethregs *hregs = sp->hregs;
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 18d533fdf14c..3140999642ba 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -99,7 +99,7 @@ struct ioc3_private {
99 99
100static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 100static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
101static void ioc3_set_multicast_list(struct net_device *dev); 101static void ioc3_set_multicast_list(struct net_device *dev);
102static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); 102static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
103static void ioc3_timeout(struct net_device *dev); 103static void ioc3_timeout(struct net_device *dev);
104static inline unsigned int ioc3_hash(const unsigned char *addr); 104static inline unsigned int ioc3_hash(const unsigned char *addr);
105static inline void ioc3_stop(struct ioc3_private *ip); 105static inline void ioc3_stop(struct ioc3_private *ip);
@@ -1390,7 +1390,7 @@ static struct pci_driver ioc3_driver = {
1390 .remove = ioc3_remove_one, 1390 .remove = ioc3_remove_one,
1391}; 1391};
1392 1392
1393static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev) 1393static netdev_tx_t ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1394{ 1394{
1395 unsigned long data; 1395 unsigned long data;
1396 struct ioc3_private *ip = netdev_priv(dev); 1396 struct ioc3_private *ip = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index ea55abd62ec7..703fbbefea44 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -697,7 +697,7 @@ static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb)
697/* 697/*
698 * Transmit a packet (called by the kernel) 698 * Transmit a packet (called by the kernel)
699 */ 699 */
700static int meth_tx(struct sk_buff *skb, struct net_device *dev) 700static netdev_tx_t meth_tx(struct sk_buff *skb, struct net_device *dev)
701{ 701{
702 struct meth_private *priv = netdev_priv(dev); 702 struct meth_private *priv = netdev_priv(dev);
703 unsigned long flags; 703 unsigned long flags;
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 1854f270ad66..b1b305f8f414 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -258,10 +258,10 @@ struct stmmac_safety_stats {
258#define MAX_DMA_RIWT 0xff 258#define MAX_DMA_RIWT 0xff
259#define MIN_DMA_RIWT 0x20 259#define MIN_DMA_RIWT 0x20
260/* Tx coalesce parameters */ 260/* Tx coalesce parameters */
261#define STMMAC_COAL_TX_TIMER 40000 261#define STMMAC_COAL_TX_TIMER 1000
262#define STMMAC_MAX_COAL_TX_TICK 100000 262#define STMMAC_MAX_COAL_TX_TICK 100000
263#define STMMAC_TX_MAX_FRAMES 256 263#define STMMAC_TX_MAX_FRAMES 256
264#define STMMAC_TX_FRAMES 64 264#define STMMAC_TX_FRAMES 25
265 265
266/* Packets types */ 266/* Packets types */
267enum packets_types { 267enum packets_types {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c0a855b7ab3b..63e1064b27a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -48,6 +48,8 @@ struct stmmac_tx_info {
48 48
49/* Frequently used values are kept adjacent for cache effect */ 49/* Frequently used values are kept adjacent for cache effect */
50struct stmmac_tx_queue { 50struct stmmac_tx_queue {
51 u32 tx_count_frames;
52 struct timer_list txtimer;
51 u32 queue_index; 53 u32 queue_index;
52 struct stmmac_priv *priv_data; 54 struct stmmac_priv *priv_data;
53 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; 55 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
@@ -73,7 +75,14 @@ struct stmmac_rx_queue {
73 u32 rx_zeroc_thresh; 75 u32 rx_zeroc_thresh;
74 dma_addr_t dma_rx_phy; 76 dma_addr_t dma_rx_phy;
75 u32 rx_tail_addr; 77 u32 rx_tail_addr;
78};
79
80struct stmmac_channel {
76 struct napi_struct napi ____cacheline_aligned_in_smp; 81 struct napi_struct napi ____cacheline_aligned_in_smp;
82 struct stmmac_priv *priv_data;
83 u32 index;
84 int has_rx;
85 int has_tx;
77}; 86};
78 87
79struct stmmac_tc_entry { 88struct stmmac_tc_entry {
@@ -109,14 +118,12 @@ struct stmmac_pps_cfg {
109 118
110struct stmmac_priv { 119struct stmmac_priv {
111 /* Frequently used values are kept adjacent for cache effect */ 120 /* Frequently used values are kept adjacent for cache effect */
112 u32 tx_count_frames;
113 u32 tx_coal_frames; 121 u32 tx_coal_frames;
114 u32 tx_coal_timer; 122 u32 tx_coal_timer;
115 123
116 int tx_coalesce; 124 int tx_coalesce;
117 int hwts_tx_en; 125 int hwts_tx_en;
118 bool tx_path_in_lpi_mode; 126 bool tx_path_in_lpi_mode;
119 struct timer_list txtimer;
120 bool tso; 127 bool tso;
121 128
122 unsigned int dma_buf_sz; 129 unsigned int dma_buf_sz;
@@ -137,6 +144,9 @@ struct stmmac_priv {
137 /* TX Queue */ 144 /* TX Queue */
138 struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES]; 145 struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
139 146
147 /* Generic channel for NAPI */
148 struct stmmac_channel channel[STMMAC_CH_MAX];
149
140 bool oldlink; 150 bool oldlink;
141 int speed; 151 int speed;
142 int oldduplex; 152 int oldduplex;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9f458bb16f2a..75896d6ba6e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -148,12 +148,14 @@ static void stmmac_verify_args(void)
148static void stmmac_disable_all_queues(struct stmmac_priv *priv) 148static void stmmac_disable_all_queues(struct stmmac_priv *priv)
149{ 149{
150 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 150 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
151 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
152 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
151 u32 queue; 153 u32 queue;
152 154
153 for (queue = 0; queue < rx_queues_cnt; queue++) { 155 for (queue = 0; queue < maxq; queue++) {
154 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 156 struct stmmac_channel *ch = &priv->channel[queue];
155 157
156 napi_disable(&rx_q->napi); 158 napi_disable(&ch->napi);
157 } 159 }
158} 160}
159 161
@@ -164,12 +166,14 @@ static void stmmac_disable_all_queues(struct stmmac_priv *priv)
164static void stmmac_enable_all_queues(struct stmmac_priv *priv) 166static void stmmac_enable_all_queues(struct stmmac_priv *priv)
165{ 167{
166 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; 168 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
169 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
170 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
167 u32 queue; 171 u32 queue;
168 172
169 for (queue = 0; queue < rx_queues_cnt; queue++) { 173 for (queue = 0; queue < maxq; queue++) {
170 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 174 struct stmmac_channel *ch = &priv->channel[queue];
171 175
172 napi_enable(&rx_q->napi); 176 napi_enable(&ch->napi);
173 } 177 }
174} 178}
175 179
@@ -1843,18 +1847,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1843 * @queue: TX queue index 1847 * @queue: TX queue index
1844 * Description: it reclaims the transmit resources after transmission completes. 1848 * Description: it reclaims the transmit resources after transmission completes.
1845 */ 1849 */
1846static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) 1850static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
1847{ 1851{
1848 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; 1852 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
1849 unsigned int bytes_compl = 0, pkts_compl = 0; 1853 unsigned int bytes_compl = 0, pkts_compl = 0;
1850 unsigned int entry; 1854 unsigned int entry, count = 0;
1851 1855
1852 netif_tx_lock(priv->dev); 1856 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
1853 1857
1854 priv->xstats.tx_clean++; 1858 priv->xstats.tx_clean++;
1855 1859
1856 entry = tx_q->dirty_tx; 1860 entry = tx_q->dirty_tx;
1857 while (entry != tx_q->cur_tx) { 1861 while ((entry != tx_q->cur_tx) && (count < budget)) {
1858 struct sk_buff *skb = tx_q->tx_skbuff[entry]; 1862 struct sk_buff *skb = tx_q->tx_skbuff[entry];
1859 struct dma_desc *p; 1863 struct dma_desc *p;
1860 int status; 1864 int status;
@@ -1870,6 +1874,8 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1870 if (unlikely(status & tx_dma_own)) 1874 if (unlikely(status & tx_dma_own))
1871 break; 1875 break;
1872 1876
1877 count++;
1878
1873 /* Make sure descriptor fields are read after reading 1879 /* Make sure descriptor fields are read after reading
1874 * the own bit. 1880 * the own bit.
1875 */ 1881 */
@@ -1937,7 +1943,10 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
1937 stmmac_enable_eee_mode(priv); 1943 stmmac_enable_eee_mode(priv);
1938 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer)); 1944 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1939 } 1945 }
1940 netif_tx_unlock(priv->dev); 1946
1947 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
1948
1949 return count;
1941} 1950}
1942 1951
1943/** 1952/**
@@ -2020,6 +2029,33 @@ static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2020 return false; 2029 return false;
2021} 2030}
2022 2031
2032static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan)
2033{
2034 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2035 &priv->xstats, chan);
2036 struct stmmac_channel *ch = &priv->channel[chan];
2037 bool needs_work = false;
2038
2039 if ((status & handle_rx) && ch->has_rx) {
2040 needs_work = true;
2041 } else {
2042 status &= ~handle_rx;
2043 }
2044
2045 if ((status & handle_tx) && ch->has_tx) {
2046 needs_work = true;
2047 } else {
2048 status &= ~handle_tx;
2049 }
2050
2051 if (needs_work && napi_schedule_prep(&ch->napi)) {
2052 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2053 __napi_schedule(&ch->napi);
2054 }
2055
2056 return status;
2057}
2058
2023/** 2059/**
2024 * stmmac_dma_interrupt - DMA ISR 2060 * stmmac_dma_interrupt - DMA ISR
2025 * @priv: driver private structure 2061 * @priv: driver private structure
@@ -2034,57 +2070,14 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2034 u32 channels_to_check = tx_channel_count > rx_channel_count ? 2070 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2035 tx_channel_count : rx_channel_count; 2071 tx_channel_count : rx_channel_count;
2036 u32 chan; 2072 u32 chan;
2037 bool poll_scheduled = false;
2038 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)]; 2073 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2039 2074
2040 /* Make sure we never check beyond our status buffer. */ 2075 /* Make sure we never check beyond our status buffer. */
2041 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status))) 2076 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2042 channels_to_check = ARRAY_SIZE(status); 2077 channels_to_check = ARRAY_SIZE(status);
2043 2078
2044 /* Each DMA channel can be used for rx and tx simultaneously, yet
2045 * napi_struct is embedded in struct stmmac_rx_queue rather than in a
2046 * stmmac_channel struct.
2047 * Because of this, stmmac_poll currently checks (and possibly wakes)
2048 * all tx queues rather than just a single tx queue.
2049 */
2050 for (chan = 0; chan < channels_to_check; chan++) 2079 for (chan = 0; chan < channels_to_check; chan++)
2051 status[chan] = stmmac_dma_interrupt_status(priv, priv->ioaddr, 2080 status[chan] = stmmac_napi_check(priv, chan);
2052 &priv->xstats, chan);
2053
2054 for (chan = 0; chan < rx_channel_count; chan++) {
2055 if (likely(status[chan] & handle_rx)) {
2056 struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
2057
2058 if (likely(napi_schedule_prep(&rx_q->napi))) {
2059 stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
2060 __napi_schedule(&rx_q->napi);
2061 poll_scheduled = true;
2062 }
2063 }
2064 }
2065
2066 /* If we scheduled poll, we already know that tx queues will be checked.
2067 * If we didn't schedule poll, see if any DMA channel (used by tx) has a
2068 * completed transmission, if so, call stmmac_poll (once).
2069 */
2070 if (!poll_scheduled) {
2071 for (chan = 0; chan < tx_channel_count; chan++) {
2072 if (status[chan] & handle_tx) {
2073 /* It doesn't matter what rx queue we choose
2074 * here. We use 0 since it always exists.
2075 */
2076 struct stmmac_rx_queue *rx_q =
2077 &priv->rx_queue[0];
2078
2079 if (likely(napi_schedule_prep(&rx_q->napi))) {
2080 stmmac_disable_dma_irq(priv,
2081 priv->ioaddr, chan);
2082 __napi_schedule(&rx_q->napi);
2083 }
2084 break;
2085 }
2086 }
2087 }
2088 2081
2089 for (chan = 0; chan < tx_channel_count; chan++) { 2082 for (chan = 0; chan < tx_channel_count; chan++) {
2090 if (unlikely(status[chan] & tx_hard_error_bump_tc)) { 2083 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
@@ -2220,8 +2213,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2220 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, 2213 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2221 tx_q->dma_tx_phy, chan); 2214 tx_q->dma_tx_phy, chan);
2222 2215
2223 tx_q->tx_tail_addr = tx_q->dma_tx_phy + 2216 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
2224 (DMA_TX_SIZE * sizeof(struct dma_desc));
2225 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, 2217 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
2226 tx_q->tx_tail_addr, chan); 2218 tx_q->tx_tail_addr, chan);
2227 } 2219 }
@@ -2233,6 +2225,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2233 return ret; 2225 return ret;
2234} 2226}
2235 2227
2228static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
2229{
2230 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2231
2232 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer));
2233}
2234
2236/** 2235/**
2237 * stmmac_tx_timer - mitigation sw timer for tx. 2236 * stmmac_tx_timer - mitigation sw timer for tx.
2238 * @data: data pointer 2237 * @data: data pointer
@@ -2241,13 +2240,14 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2241 */ 2240 */
2242static void stmmac_tx_timer(struct timer_list *t) 2241static void stmmac_tx_timer(struct timer_list *t)
2243{ 2242{
2244 struct stmmac_priv *priv = from_timer(priv, t, txtimer); 2243 struct stmmac_tx_queue *tx_q = from_timer(tx_q, t, txtimer);
2245 u32 tx_queues_count = priv->plat->tx_queues_to_use; 2244 struct stmmac_priv *priv = tx_q->priv_data;
2246 u32 queue; 2245 struct stmmac_channel *ch;
2246
2247 ch = &priv->channel[tx_q->queue_index];
2247 2248
2248 /* let's scan all the tx queues */ 2249 if (likely(napi_schedule_prep(&ch->napi)))
2249 for (queue = 0; queue < tx_queues_count; queue++) 2250 __napi_schedule(&ch->napi);
2250 stmmac_tx_clean(priv, queue);
2251} 2251}
2252 2252
2253/** 2253/**
@@ -2260,11 +2260,17 @@ static void stmmac_tx_timer(struct timer_list *t)
2260 */ 2260 */
2261static void stmmac_init_tx_coalesce(struct stmmac_priv *priv) 2261static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
2262{ 2262{
2263 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2264 u32 chan;
2265
2263 priv->tx_coal_frames = STMMAC_TX_FRAMES; 2266 priv->tx_coal_frames = STMMAC_TX_FRAMES;
2264 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; 2267 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
2265 timer_setup(&priv->txtimer, stmmac_tx_timer, 0); 2268
2266 priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer); 2269 for (chan = 0; chan < tx_channel_count; chan++) {
2267 add_timer(&priv->txtimer); 2270 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
2271
2272 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0);
2273 }
2268} 2274}
2269 2275
2270static void stmmac_set_rings_length(struct stmmac_priv *priv) 2276static void stmmac_set_rings_length(struct stmmac_priv *priv)
@@ -2592,6 +2598,7 @@ static void stmmac_hw_teardown(struct net_device *dev)
2592static int stmmac_open(struct net_device *dev) 2598static int stmmac_open(struct net_device *dev)
2593{ 2599{
2594 struct stmmac_priv *priv = netdev_priv(dev); 2600 struct stmmac_priv *priv = netdev_priv(dev);
2601 u32 chan;
2595 int ret; 2602 int ret;
2596 2603
2597 stmmac_check_ether_addr(priv); 2604 stmmac_check_ether_addr(priv);
@@ -2688,7 +2695,9 @@ irq_error:
2688 if (dev->phydev) 2695 if (dev->phydev)
2689 phy_stop(dev->phydev); 2696 phy_stop(dev->phydev);
2690 2697
2691 del_timer_sync(&priv->txtimer); 2698 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2699 del_timer_sync(&priv->tx_queue[chan].txtimer);
2700
2692 stmmac_hw_teardown(dev); 2701 stmmac_hw_teardown(dev);
2693init_error: 2702init_error:
2694 free_dma_desc_resources(priv); 2703 free_dma_desc_resources(priv);
@@ -2708,6 +2717,7 @@ dma_desc_error:
2708static int stmmac_release(struct net_device *dev) 2717static int stmmac_release(struct net_device *dev)
2709{ 2718{
2710 struct stmmac_priv *priv = netdev_priv(dev); 2719 struct stmmac_priv *priv = netdev_priv(dev);
2720 u32 chan;
2711 2721
2712 if (priv->eee_enabled) 2722 if (priv->eee_enabled)
2713 del_timer_sync(&priv->eee_ctrl_timer); 2723 del_timer_sync(&priv->eee_ctrl_timer);
@@ -2722,7 +2732,8 @@ static int stmmac_release(struct net_device *dev)
2722 2732
2723 stmmac_disable_all_queues(priv); 2733 stmmac_disable_all_queues(priv);
2724 2734
2725 del_timer_sync(&priv->txtimer); 2735 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
2736 del_timer_sync(&priv->tx_queue[chan].txtimer);
2726 2737
2727 /* Free the IRQ lines */ 2738 /* Free the IRQ lines */
2728 free_irq(dev->irq, dev); 2739 free_irq(dev->irq, dev);
@@ -2936,14 +2947,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2936 priv->xstats.tx_tso_nfrags += nfrags; 2947 priv->xstats.tx_tso_nfrags += nfrags;
2937 2948
2938 /* Manage tx mitigation */ 2949 /* Manage tx mitigation */
2939 priv->tx_count_frames += nfrags + 1; 2950 tx_q->tx_count_frames += nfrags + 1;
2940 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { 2951 if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
2941 mod_timer(&priv->txtimer,
2942 STMMAC_COAL_TIMER(priv->tx_coal_timer));
2943 } else {
2944 priv->tx_count_frames = 0;
2945 stmmac_set_tx_ic(priv, desc); 2952 stmmac_set_tx_ic(priv, desc);
2946 priv->xstats.tx_set_ic_bit++; 2953 priv->xstats.tx_set_ic_bit++;
2954 tx_q->tx_count_frames = 0;
2955 } else {
2956 stmmac_tx_timer_arm(priv, queue);
2947 } 2957 }
2948 2958
2949 skb_tx_timestamp(skb); 2959 skb_tx_timestamp(skb);
@@ -2992,6 +3002,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2992 3002
2993 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3003 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
2994 3004
3005 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
2995 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3006 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
2996 3007
2997 return NETDEV_TX_OK; 3008 return NETDEV_TX_OK;
@@ -3146,14 +3157,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3146 * This approach takes care about the fragments: desc is the first 3157 * This approach takes care about the fragments: desc is the first
3147 * element in case of no SG. 3158 * element in case of no SG.
3148 */ 3159 */
3149 priv->tx_count_frames += nfrags + 1; 3160 tx_q->tx_count_frames += nfrags + 1;
3150 if (likely(priv->tx_coal_frames > priv->tx_count_frames)) { 3161 if (priv->tx_coal_frames <= tx_q->tx_count_frames) {
3151 mod_timer(&priv->txtimer,
3152 STMMAC_COAL_TIMER(priv->tx_coal_timer));
3153 } else {
3154 priv->tx_count_frames = 0;
3155 stmmac_set_tx_ic(priv, desc); 3162 stmmac_set_tx_ic(priv, desc);
3156 priv->xstats.tx_set_ic_bit++; 3163 priv->xstats.tx_set_ic_bit++;
3164 tx_q->tx_count_frames = 0;
3165 } else {
3166 stmmac_tx_timer_arm(priv, queue);
3157 } 3167 }
3158 3168
3159 skb_tx_timestamp(skb); 3169 skb_tx_timestamp(skb);
@@ -3199,6 +3209,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3199 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3209 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3200 3210
3201 stmmac_enable_dma_transmission(priv, priv->ioaddr); 3211 stmmac_enable_dma_transmission(priv, priv->ioaddr);
3212
3213 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * sizeof(*desc));
3202 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); 3214 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
3203 3215
3204 return NETDEV_TX_OK; 3216 return NETDEV_TX_OK;
@@ -3319,6 +3331,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
3319static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) 3331static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3320{ 3332{
3321 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3333 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3334 struct stmmac_channel *ch = &priv->channel[queue];
3322 unsigned int entry = rx_q->cur_rx; 3335 unsigned int entry = rx_q->cur_rx;
3323 int coe = priv->hw->rx_csum; 3336 int coe = priv->hw->rx_csum;
3324 unsigned int next_entry; 3337 unsigned int next_entry;
@@ -3491,7 +3504,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3491 else 3504 else
3492 skb->ip_summed = CHECKSUM_UNNECESSARY; 3505 skb->ip_summed = CHECKSUM_UNNECESSARY;
3493 3506
3494 napi_gro_receive(&rx_q->napi, skb); 3507 napi_gro_receive(&ch->napi, skb);
3495 3508
3496 priv->dev->stats.rx_packets++; 3509 priv->dev->stats.rx_packets++;
3497 priv->dev->stats.rx_bytes += frame_len; 3510 priv->dev->stats.rx_bytes += frame_len;
@@ -3514,27 +3527,33 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3514 * Description : 3527 * Description :
3515 * To look at the incoming frames and clear the tx resources. 3528 * To look at the incoming frames and clear the tx resources.
3516 */ 3529 */
3517static int stmmac_poll(struct napi_struct *napi, int budget) 3530static int stmmac_napi_poll(struct napi_struct *napi, int budget)
3518{ 3531{
3519 struct stmmac_rx_queue *rx_q = 3532 struct stmmac_channel *ch =
3520 container_of(napi, struct stmmac_rx_queue, napi); 3533 container_of(napi, struct stmmac_channel, napi);
3521 struct stmmac_priv *priv = rx_q->priv_data; 3534 struct stmmac_priv *priv = ch->priv_data;
3522 u32 tx_count = priv->plat->tx_queues_to_use; 3535 int work_done = 0, work_rem = budget;
3523 u32 chan = rx_q->queue_index; 3536 u32 chan = ch->index;
3524 int work_done = 0;
3525 u32 queue;
3526 3537
3527 priv->xstats.napi_poll++; 3538 priv->xstats.napi_poll++;
3528 3539
3529 /* check all the queues */ 3540 if (ch->has_tx) {
3530 for (queue = 0; queue < tx_count; queue++) 3541 int done = stmmac_tx_clean(priv, work_rem, chan);
3531 stmmac_tx_clean(priv, queue);
3532 3542
3533 work_done = stmmac_rx(priv, budget, rx_q->queue_index); 3543 work_done += done;
3534 if (work_done < budget) { 3544 work_rem -= done;
3535 napi_complete_done(napi, work_done); 3545 }
3536 stmmac_enable_dma_irq(priv, priv->ioaddr, chan); 3546
3547 if (ch->has_rx) {
3548 int done = stmmac_rx(priv, work_rem, chan);
3549
3550 work_done += done;
3551 work_rem -= done;
3537 } 3552 }
3553
3554 if (work_done < budget && napi_complete_done(napi, work_done))
3555 stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
3556
3538 return work_done; 3557 return work_done;
3539} 3558}
3540 3559
@@ -4198,8 +4217,8 @@ int stmmac_dvr_probe(struct device *device,
4198{ 4217{
4199 struct net_device *ndev = NULL; 4218 struct net_device *ndev = NULL;
4200 struct stmmac_priv *priv; 4219 struct stmmac_priv *priv;
4220 u32 queue, maxq;
4201 int ret = 0; 4221 int ret = 0;
4202 u32 queue;
4203 4222
4204 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv), 4223 ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
4205 MTL_MAX_TX_QUEUES, 4224 MTL_MAX_TX_QUEUES,
@@ -4322,11 +4341,22 @@ int stmmac_dvr_probe(struct device *device,
4322 "Enable RX Mitigation via HW Watchdog Timer\n"); 4341 "Enable RX Mitigation via HW Watchdog Timer\n");
4323 } 4342 }
4324 4343
4325 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { 4344 /* Setup channels NAPI */
4326 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4345 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
4327 4346
4328 netif_napi_add(ndev, &rx_q->napi, stmmac_poll, 4347 for (queue = 0; queue < maxq; queue++) {
4329 (8 * priv->plat->rx_queues_to_use)); 4348 struct stmmac_channel *ch = &priv->channel[queue];
4349
4350 ch->priv_data = priv;
4351 ch->index = queue;
4352
4353 if (queue < priv->plat->rx_queues_to_use)
4354 ch->has_rx = true;
4355 if (queue < priv->plat->tx_queues_to_use)
4356 ch->has_tx = true;
4357
4358 netif_napi_add(ndev, &ch->napi, stmmac_napi_poll,
4359 NAPI_POLL_WEIGHT);
4330 } 4360 }
4331 4361
4332 mutex_init(&priv->lock); 4362 mutex_init(&priv->lock);
@@ -4372,10 +4402,10 @@ error_netdev_register:
4372 priv->hw->pcs != STMMAC_PCS_RTBI) 4402 priv->hw->pcs != STMMAC_PCS_RTBI)
4373 stmmac_mdio_unregister(ndev); 4403 stmmac_mdio_unregister(ndev);
4374error_mdio_register: 4404error_mdio_register:
4375 for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) { 4405 for (queue = 0; queue < maxq; queue++) {
4376 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 4406 struct stmmac_channel *ch = &priv->channel[queue];
4377 4407
4378 netif_napi_del(&rx_q->napi); 4408 netif_napi_del(&ch->napi);
4379 } 4409 }
4380error_hw_init: 4410error_hw_init:
4381 destroy_workqueue(priv->wq); 4411 destroy_workqueue(priv->wq);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3609c7b696c7..2b800ce1d5bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -67,7 +67,7 @@ static int dwmac1000_validate_mcast_bins(int mcast_bins)
67 * Description: 67 * Description:
68 * This function validates the number of Unicast address entries supported 68 * This function validates the number of Unicast address entries supported
69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller 69 * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
70 * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter 70 * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
71 * logic. This function validates a valid, supported configuration is 71 * logic. This function validates a valid, supported configuration is
72 * selected, and defaults to 1 Unicast address if an unsupported 72 * selected, and defaults to 1 Unicast address if an unsupported
73 * configuration is selected. 73 * configuration is selected.
@@ -77,8 +77,7 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
77 int x = ucast_entries; 77 int x = ucast_entries;
78 78
79 switch (x) { 79 switch (x) {
80 case 1: 80 case 1 ... 32:
81 case 32:
82 case 64: 81 case 64:
83 case 128: 82 case 128:
84 break; 83 break;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 9263d638bd6d..f932923f7d56 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -41,6 +41,7 @@ config TI_DAVINCI_MDIO
41config TI_DAVINCI_CPDMA 41config TI_DAVINCI_CPDMA
42 tristate "TI DaVinci CPDMA Support" 42 tristate "TI DaVinci CPDMA Support"
43 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST 43 depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
44 select GENERIC_ALLOCATOR
44 ---help--- 45 ---help---
45 This driver supports TI's DaVinci CPDMA dma engine. 46 This driver supports TI's DaVinci CPDMA dma engine.
46 47
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 2bdfb39215e9..d8ba512f166a 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -835,7 +835,7 @@ static void w5100_tx_work(struct work_struct *work)
835 w5100_tx_skb(priv->ndev, skb); 835 w5100_tx_skb(priv->ndev, skb);
836} 836}
837 837
838static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev) 838static netdev_tx_t w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
839{ 839{
840 struct w5100_priv *priv = netdev_priv(ndev); 840 struct w5100_priv *priv = netdev_priv(ndev);
841 841
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 56ae573001e8..80fdbff67d82 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -365,7 +365,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
365 netif_wake_queue(ndev); 365 netif_wake_queue(ndev);
366} 366}
367 367
368static int w5300_start_tx(struct sk_buff *skb, struct net_device *ndev) 368static netdev_tx_t w5300_start_tx(struct sk_buff *skb, struct net_device *ndev)
369{ 369{
370 struct w5300_priv *priv = netdev_priv(ndev); 370 struct w5300_priv *priv = netdev_priv(ndev);
371 371
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 31c3d77b4733..fe01e141c8f8 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1203,6 +1203,9 @@ static void netvsc_send_vf(struct net_device *ndev,
1203 1203
1204 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; 1204 net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated;
1205 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; 1205 net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial;
1206 netdev_info(ndev, "VF slot %u %s\n",
1207 net_device_ctx->vf_serial,
1208 net_device_ctx->vf_alloc ? "added" : "removed");
1206} 1209}
1207 1210
1208static void netvsc_receive_inband(struct net_device *ndev, 1211static void netvsc_receive_inband(struct net_device *ndev,
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 70921bbe0e28..3af6d8d15233 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1894,20 +1894,6 @@ out_unlock:
1894 rtnl_unlock(); 1894 rtnl_unlock();
1895} 1895}
1896 1896
1897static struct net_device *get_netvsc_bymac(const u8 *mac)
1898{
1899 struct net_device_context *ndev_ctx;
1900
1901 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
1902 struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx);
1903
1904 if (ether_addr_equal(mac, dev->perm_addr))
1905 return dev;
1906 }
1907
1908 return NULL;
1909}
1910
1911static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) 1897static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1912{ 1898{
1913 struct net_device_context *net_device_ctx; 1899 struct net_device_context *net_device_ctx;
@@ -2036,26 +2022,48 @@ static void netvsc_vf_setup(struct work_struct *w)
2036 rtnl_unlock(); 2022 rtnl_unlock();
2037} 2023}
2038 2024
2025/* Find netvsc by VMBus serial number.
2026 * The PCI hyperv controller records the serial number as the slot.
2027 */
2028static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2029{
2030 struct device *parent = vf_netdev->dev.parent;
2031 struct net_device_context *ndev_ctx;
2032 struct pci_dev *pdev;
2033
2034 if (!parent || !dev_is_pci(parent))
2035 return NULL; /* not a PCI device */
2036
2037 pdev = to_pci_dev(parent);
2038 if (!pdev->slot) {
2039 netdev_notice(vf_netdev, "no PCI slot information\n");
2040 return NULL;
2041 }
2042
2043 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2044 if (!ndev_ctx->vf_alloc)
2045 continue;
2046
2047 if (ndev_ctx->vf_serial == pdev->slot->number)
2048 return hv_get_drvdata(ndev_ctx->device_ctx);
2049 }
2050
2051 netdev_notice(vf_netdev,
2052 "no netdev found for slot %u\n", pdev->slot->number);
2053 return NULL;
2054}
2055
2039static int netvsc_register_vf(struct net_device *vf_netdev) 2056static int netvsc_register_vf(struct net_device *vf_netdev)
2040{ 2057{
2041 struct net_device *ndev;
2042 struct net_device_context *net_device_ctx; 2058 struct net_device_context *net_device_ctx;
2043 struct device *pdev = vf_netdev->dev.parent;
2044 struct netvsc_device *netvsc_dev; 2059 struct netvsc_device *netvsc_dev;
2060 struct net_device *ndev;
2045 int ret; 2061 int ret;
2046 2062
2047 if (vf_netdev->addr_len != ETH_ALEN) 2063 if (vf_netdev->addr_len != ETH_ALEN)
2048 return NOTIFY_DONE; 2064 return NOTIFY_DONE;
2049 2065
2050 if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev)) 2066 ndev = get_netvsc_byslot(vf_netdev);
2051 return NOTIFY_DONE;
2052
2053 /*
2054 * We will use the MAC address to locate the synthetic interface to
2055 * associate with the VF interface. If we don't find a matching
2056 * synthetic interface, move on.
2057 */
2058 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
2059 if (!ndev) 2067 if (!ndev)
2060 return NOTIFY_DONE; 2068 return NOTIFY_DONE;
2061 2069
@@ -2272,17 +2280,15 @@ static int netvsc_remove(struct hv_device *dev)
2272 2280
2273 cancel_delayed_work_sync(&ndev_ctx->dwork); 2281 cancel_delayed_work_sync(&ndev_ctx->dwork);
2274 2282
2275 rcu_read_lock(); 2283 rtnl_lock();
2276 nvdev = rcu_dereference(ndev_ctx->nvdev); 2284 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2277 2285 if (nvdev)
2278 if (nvdev)
2279 cancel_work_sync(&nvdev->subchan_work); 2286 cancel_work_sync(&nvdev->subchan_work);
2280 2287
2281 /* 2288 /*
2282 * Call to the vsc driver to let it know that the device is being 2289 * Call to the vsc driver to let it know that the device is being
2283 * removed. Also blocks mtu and channel changes. 2290 * removed. Also blocks mtu and channel changes.
2284 */ 2291 */
2285 rtnl_lock();
2286 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 2292 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2287 if (vf_netdev) 2293 if (vf_netdev)
2288 netvsc_unregister_vf(vf_netdev); 2294 netvsc_unregister_vf(vf_netdev);
@@ -2294,7 +2300,6 @@ static int netvsc_remove(struct hv_device *dev)
2294 list_del(&ndev_ctx->list); 2300 list_del(&ndev_ctx->list);
2295 2301
2296 rtnl_unlock(); 2302 rtnl_unlock();
2297 rcu_read_unlock();
2298 2303
2299 hv_set_drvdata(dev, NULL); 2304 hv_set_drvdata(dev, NULL);
2300 2305
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 740655261e5b..83060fb349f4 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -349,6 +349,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
349 } 349 }
350 if (bus->started) 350 if (bus->started)
351 bus->socket_ops->start(bus->sfp); 351 bus->socket_ops->start(bus->sfp);
352 bus->netdev->sfp_bus = bus;
352 bus->registered = true; 353 bus->registered = true;
353 return 0; 354 return 0;
354} 355}
@@ -357,6 +358,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
357{ 358{
358 const struct sfp_upstream_ops *ops = bus->upstream_ops; 359 const struct sfp_upstream_ops *ops = bus->upstream_ops;
359 360
361 bus->netdev->sfp_bus = NULL;
360 if (bus->registered) { 362 if (bus->registered) {
361 if (bus->started) 363 if (bus->started)
362 bus->socket_ops->stop(bus->sfp); 364 bus->socket_ops->stop(bus->sfp);
@@ -438,7 +440,6 @@ static void sfp_upstream_clear(struct sfp_bus *bus)
438{ 440{
439 bus->upstream_ops = NULL; 441 bus->upstream_ops = NULL;
440 bus->upstream = NULL; 442 bus->upstream = NULL;
441 bus->netdev->sfp_bus = NULL;
442 bus->netdev = NULL; 443 bus->netdev = NULL;
443} 444}
444 445
@@ -467,7 +468,6 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
467 bus->upstream_ops = ops; 468 bus->upstream_ops = ops;
468 bus->upstream = upstream; 469 bus->upstream = upstream;
469 bus->netdev = ndev; 470 bus->netdev = ndev;
470 ndev->sfp_bus = bus;
471 471
472 if (bus->sfp) { 472 if (bus->sfp) {
473 ret = sfp_register_bus(bus); 473 ret = sfp_register_bus(bus);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index ce61231e96ea..62dc564b251d 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -429,6 +429,9 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
429 if (!skb) 429 if (!skb)
430 goto out; 430 goto out;
431 431
432 if (skb_mac_header_len(skb) < ETH_HLEN)
433 goto drop;
434
432 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr))) 435 if (!pskb_may_pull(skb, sizeof(struct pppoe_hdr)))
433 goto drop; 436 goto drop;
434 437
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index ebd07ad82431..e2648b5a3861 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1153,43 +1153,6 @@ static netdev_features_t tun_net_fix_features(struct net_device *dev,
1153 1153
1154 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 1154 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1155} 1155}
1156#ifdef CONFIG_NET_POLL_CONTROLLER
1157static void tun_poll_controller(struct net_device *dev)
1158{
1159 /*
1160 * Tun only receives frames when:
1161 * 1) the char device endpoint gets data from user space
1162 * 2) the tun socket gets a sendmsg call from user space
1163 * If NAPI is not enabled, since both of those are synchronous
1164 * operations, we are guaranteed never to have pending data when we poll
1165 * for it so there is nothing to do here but return.
1166 * We need this though so netpoll recognizes us as an interface that
1167 * supports polling, which enables bridge devices in virt setups to
1168 * still use netconsole
1169 * If NAPI is enabled, however, we need to schedule polling for all
1170 * queues unless we are using napi_gro_frags(), which we call in
1171 * process context and not in NAPI context.
1172 */
1173 struct tun_struct *tun = netdev_priv(dev);
1174
1175 if (tun->flags & IFF_NAPI) {
1176 struct tun_file *tfile;
1177 int i;
1178
1179 if (tun_napi_frags_enabled(tun))
1180 return;
1181
1182 rcu_read_lock();
1183 for (i = 0; i < tun->numqueues; i++) {
1184 tfile = rcu_dereference(tun->tfiles[i]);
1185 if (tfile->napi_enabled)
1186 napi_schedule(&tfile->napi);
1187 }
1188 rcu_read_unlock();
1189 }
1190 return;
1191}
1192#endif
1193 1156
1194static void tun_set_headroom(struct net_device *dev, int new_hr) 1157static void tun_set_headroom(struct net_device *dev, int new_hr)
1195{ 1158{
@@ -1283,9 +1246,6 @@ static const struct net_device_ops tun_netdev_ops = {
1283 .ndo_start_xmit = tun_net_xmit, 1246 .ndo_start_xmit = tun_net_xmit,
1284 .ndo_fix_features = tun_net_fix_features, 1247 .ndo_fix_features = tun_net_fix_features,
1285 .ndo_select_queue = tun_select_queue, 1248 .ndo_select_queue = tun_select_queue,
1286#ifdef CONFIG_NET_POLL_CONTROLLER
1287 .ndo_poll_controller = tun_poll_controller,
1288#endif
1289 .ndo_set_rx_headroom = tun_set_headroom, 1249 .ndo_set_rx_headroom = tun_set_headroom,
1290 .ndo_get_stats64 = tun_net_get_stats64, 1250 .ndo_get_stats64 = tun_net_get_stats64,
1291}; 1251};
@@ -1365,9 +1325,6 @@ static const struct net_device_ops tap_netdev_ops = {
1365 .ndo_set_mac_address = eth_mac_addr, 1325 .ndo_set_mac_address = eth_mac_addr,
1366 .ndo_validate_addr = eth_validate_addr, 1326 .ndo_validate_addr = eth_validate_addr,
1367 .ndo_select_queue = tun_select_queue, 1327 .ndo_select_queue = tun_select_queue,
1368#ifdef CONFIG_NET_POLL_CONTROLLER
1369 .ndo_poll_controller = tun_poll_controller,
1370#endif
1371 .ndo_features_check = passthru_features_check, 1328 .ndo_features_check = passthru_features_check,
1372 .ndo_set_rx_headroom = tun_set_headroom, 1329 .ndo_set_rx_headroom = tun_set_headroom,
1373 .ndo_get_stats64 = tun_net_get_stats64, 1330 .ndo_get_stats64 = tun_net_get_stats64,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index e3270deecec2..533b6fb8d923 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1213,13 +1213,13 @@ static const struct usb_device_id products[] = {
1213 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 1213 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
1214 {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */ 1214 {QMI_FIXED_INTF(0x1199, 0x9063, 8)}, /* Sierra Wireless EM7305 */
1215 {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */ 1215 {QMI_FIXED_INTF(0x1199, 0x9063, 10)}, /* Sierra Wireless EM7305 */
1216 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */ 1216 {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
1217 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */ 1217 {QMI_QUIRK_SET_DTR(0x1199, 0x9071, 10)},/* Sierra Wireless MC74xx */
1218 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */ 1218 {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
1219 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ 1219 {QMI_QUIRK_SET_DTR(0x1199, 0x9079, 10)},/* Sierra Wireless EM74xx */
1220 {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ 1220 {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
1221 {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ 1221 {QMI_QUIRK_SET_DTR(0x1199, 0x907b, 10)},/* Sierra Wireless EM74xx */
1222 {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ 1222 {QMI_QUIRK_SET_DTR(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */
1223 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 1223 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1224 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1224 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
1225 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1225 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 8d679c8b7f25..41a00cd76955 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -463,6 +463,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
463 int mac_len, delta, off; 463 int mac_len, delta, off;
464 struct xdp_buff xdp; 464 struct xdp_buff xdp;
465 465
466 skb_orphan(skb);
467
466 rcu_read_lock(); 468 rcu_read_lock();
467 xdp_prog = rcu_dereference(rq->xdp_prog); 469 xdp_prog = rcu_dereference(rq->xdp_prog);
468 if (unlikely(!xdp_prog)) { 470 if (unlikely(!xdp_prog)) {
@@ -508,8 +510,6 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, struct sk_buff *skb,
508 skb_copy_header(nskb, skb); 510 skb_copy_header(nskb, skb);
509 head_off = skb_headroom(nskb) - skb_headroom(skb); 511 head_off = skb_headroom(nskb) - skb_headroom(skb);
510 skb_headers_offset_update(nskb, head_off); 512 skb_headers_offset_update(nskb, head_off);
511 if (skb->sk)
512 skb_set_owner_w(nskb, skb->sk);
513 consume_skb(skb); 513 consume_skb(skb);
514 skb = nskb; 514 skb = nskb;
515 } 515 }
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 9407acbd19a9..f17f602e6171 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -908,7 +908,11 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
908 BUG_ON(pull_to <= skb_headlen(skb)); 908 BUG_ON(pull_to <= skb_headlen(skb));
909 __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); 909 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
910 } 910 }
911 BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); 911 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
912 queue->rx.rsp_cons = ++cons;
913 kfree_skb(nskb);
914 return ~0U;
915 }
912 916
913 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 917 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
914 skb_frag_page(nfrag), 918 skb_frag_page(nfrag),
@@ -1045,6 +1049,8 @@ err:
1045 skb->len += rx->status; 1049 skb->len += rx->status;
1046 1050
1047 i = xennet_fill_frags(queue, skb, &tmpq); 1051 i = xennet_fill_frags(queue, skb, &tmpq);
1052 if (unlikely(i == ~0U))
1053 goto err;
1048 1054
1049 if (rx->flags & XEN_NETRXF_csum_blank) 1055 if (rx->flags & XEN_NETRXF_csum_blank)
1050 skb->ip_summed = CHECKSUM_PARTIAL; 1056 skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5a9562881d4e..9fe3fff818b8 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -537,8 +537,10 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
537 537
538 INIT_WORK(&ctrl->ana_work, nvme_ana_work); 538 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
539 ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL); 539 ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
540 if (!ctrl->ana_log_buf) 540 if (!ctrl->ana_log_buf) {
541 error = -ENOMEM;
541 goto out; 542 goto out;
543 }
542 544
543 error = nvme_read_ana_log(ctrl, true); 545 error = nvme_read_ana_log(ctrl, true);
544 if (error) 546 if (error)
@@ -547,7 +549,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
547out_free_ana_log_buf: 549out_free_ana_log_buf:
548 kfree(ctrl->ana_log_buf); 550 kfree(ctrl->ana_log_buf);
549out: 551out:
550 return -ENOMEM; 552 return error;
551} 553}
552 554
553void nvme_mpath_uninit(struct nvme_ctrl *ctrl) 555void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index a21caea1e080..2008fa62a373 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -245,6 +245,10 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
245 offset += len; 245 offset += len;
246 ngrps++; 246 ngrps++;
247 } 247 }
248 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
249 if (nvmet_ana_group_enabled[grpid])
250 ngrps++;
251 }
248 252
249 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt); 253 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
250 hdr.ngrps = cpu_to_le16(ngrps); 254 hdr.ngrps = cpu_to_le16(ngrps);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 778c4f76a884..2153956a0b20 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -135,7 +135,7 @@ static void dw_pcie_prog_outbound_atu_unroll(struct dw_pcie *pci, int index,
135 if (val & PCIE_ATU_ENABLE) 135 if (val & PCIE_ATU_ENABLE)
136 return; 136 return;
137 137
138 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 138 mdelay(LINK_WAIT_IATU);
139 } 139 }
140 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 140 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
141} 141}
@@ -178,7 +178,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
178 if (val & PCIE_ATU_ENABLE) 178 if (val & PCIE_ATU_ENABLE)
179 return; 179 return;
180 180
181 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 181 mdelay(LINK_WAIT_IATU);
182 } 182 }
183 dev_err(pci->dev, "Outbound iATU is not being enabled\n"); 183 dev_err(pci->dev, "Outbound iATU is not being enabled\n");
184} 184}
@@ -236,7 +236,7 @@ static int dw_pcie_prog_inbound_atu_unroll(struct dw_pcie *pci, int index,
236 if (val & PCIE_ATU_ENABLE) 236 if (val & PCIE_ATU_ENABLE)
237 return 0; 237 return 0;
238 238
239 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 239 mdelay(LINK_WAIT_IATU);
240 } 240 }
241 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 241 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
242 242
@@ -282,7 +282,7 @@ int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int bar,
282 if (val & PCIE_ATU_ENABLE) 282 if (val & PCIE_ATU_ENABLE)
283 return 0; 283 return 0;
284 284
285 usleep_range(LINK_WAIT_IATU_MIN, LINK_WAIT_IATU_MAX); 285 mdelay(LINK_WAIT_IATU);
286 } 286 }
287 dev_err(pci->dev, "Inbound iATU is not being enabled\n"); 287 dev_err(pci->dev, "Inbound iATU is not being enabled\n");
288 288
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 96126fd8403c..9f1a5e399b70 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -26,8 +26,7 @@
26 26
27/* Parameters for the waiting for iATU enabled routine */ 27/* Parameters for the waiting for iATU enabled routine */
28#define LINK_WAIT_MAX_IATU_RETRIES 5 28#define LINK_WAIT_MAX_IATU_RETRIES 5
29#define LINK_WAIT_IATU_MIN 9000 29#define LINK_WAIT_IATU 9
30#define LINK_WAIT_IATU_MAX 10000
31 30
32/* Synopsys-specific PCIe configuration registers */ 31/* Synopsys-specific PCIe configuration registers */
33#define PCIE_PORT_LINK_CONTROL 0x710 32#define PCIE_PORT_LINK_CONTROL 0x710
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index c00f82cc54aa..9ba4d12c179c 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -89,6 +89,9 @@ static enum pci_protocol_version_t pci_protocol_version;
89 89
90#define STATUS_REVISION_MISMATCH 0xC0000059 90#define STATUS_REVISION_MISMATCH 0xC0000059
91 91
92/* space for 32bit serial number as string */
93#define SLOT_NAME_SIZE 11
94
92/* 95/*
93 * Message Types 96 * Message Types
94 */ 97 */
@@ -494,6 +497,7 @@ struct hv_pci_dev {
494 struct list_head list_entry; 497 struct list_head list_entry;
495 refcount_t refs; 498 refcount_t refs;
496 enum hv_pcichild_state state; 499 enum hv_pcichild_state state;
500 struct pci_slot *pci_slot;
497 struct pci_function_description desc; 501 struct pci_function_description desc;
498 bool reported_missing; 502 bool reported_missing;
499 struct hv_pcibus_device *hbus; 503 struct hv_pcibus_device *hbus;
@@ -1457,6 +1461,36 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
1457 spin_unlock_irqrestore(&hbus->device_list_lock, flags); 1461 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1458} 1462}
1459 1463
1464/*
1465 * Assign entries in sysfs pci slot directory.
1466 *
1467 * Note that this function does not need to lock the children list
1468 * because it is called from pci_devices_present_work which
1469 * is serialized with hv_eject_device_work because they are on the
1470 * same ordered workqueue. Therefore hbus->children list will not change
1471 * even when pci_create_slot sleeps.
1472 */
1473static void hv_pci_assign_slots(struct hv_pcibus_device *hbus)
1474{
1475 struct hv_pci_dev *hpdev;
1476 char name[SLOT_NAME_SIZE];
1477 int slot_nr;
1478
1479 list_for_each_entry(hpdev, &hbus->children, list_entry) {
1480 if (hpdev->pci_slot)
1481 continue;
1482
1483 slot_nr = PCI_SLOT(wslot_to_devfn(hpdev->desc.win_slot.slot));
1484 snprintf(name, SLOT_NAME_SIZE, "%u", hpdev->desc.ser);
1485 hpdev->pci_slot = pci_create_slot(hbus->pci_bus, slot_nr,
1486 name, NULL);
1487 if (IS_ERR(hpdev->pci_slot)) {
1488 pr_warn("pci_create slot %s failed\n", name);
1489 hpdev->pci_slot = NULL;
1490 }
1491 }
1492}
1493
1460/** 1494/**
1461 * create_root_hv_pci_bus() - Expose a new root PCI bus 1495 * create_root_hv_pci_bus() - Expose a new root PCI bus
1462 * @hbus: Root PCI bus, as understood by this driver 1496 * @hbus: Root PCI bus, as understood by this driver
@@ -1480,6 +1514,7 @@ static int create_root_hv_pci_bus(struct hv_pcibus_device *hbus)
1480 pci_lock_rescan_remove(); 1514 pci_lock_rescan_remove();
1481 pci_scan_child_bus(hbus->pci_bus); 1515 pci_scan_child_bus(hbus->pci_bus);
1482 pci_bus_assign_resources(hbus->pci_bus); 1516 pci_bus_assign_resources(hbus->pci_bus);
1517 hv_pci_assign_slots(hbus);
1483 pci_bus_add_devices(hbus->pci_bus); 1518 pci_bus_add_devices(hbus->pci_bus);
1484 pci_unlock_rescan_remove(); 1519 pci_unlock_rescan_remove();
1485 hbus->state = hv_pcibus_installed; 1520 hbus->state = hv_pcibus_installed;
@@ -1742,6 +1777,7 @@ static void pci_devices_present_work(struct work_struct *work)
1742 */ 1777 */
1743 pci_lock_rescan_remove(); 1778 pci_lock_rescan_remove();
1744 pci_scan_child_bus(hbus->pci_bus); 1779 pci_scan_child_bus(hbus->pci_bus);
1780 hv_pci_assign_slots(hbus);
1745 pci_unlock_rescan_remove(); 1781 pci_unlock_rescan_remove();
1746 break; 1782 break;
1747 1783
@@ -1858,6 +1894,9 @@ static void hv_eject_device_work(struct work_struct *work)
1858 list_del(&hpdev->list_entry); 1894 list_del(&hpdev->list_entry);
1859 spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags); 1895 spin_unlock_irqrestore(&hpdev->hbus->device_list_lock, flags);
1860 1896
1897 if (hpdev->pci_slot)
1898 pci_destroy_slot(hpdev->pci_slot);
1899
1861 memset(&ctxt, 0, sizeof(ctxt)); 1900 memset(&ctxt, 0, sizeof(ctxt));
1862 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message; 1901 ejct_pkt = (struct pci_eject_response *)&ctxt.pkt.message;
1863 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE; 1902 ejct_pkt->message_type.type = PCI_EJECTION_COMPLETE;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index ef0b1b6ba86f..12afa7fdf77e 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -457,17 +457,18 @@ static void acpiphp_native_scan_bridge(struct pci_dev *bridge)
457/** 457/**
458 * enable_slot - enable, configure a slot 458 * enable_slot - enable, configure a slot
459 * @slot: slot to be enabled 459 * @slot: slot to be enabled
460 * @bridge: true if enable is for the whole bridge (not a single slot)
460 * 461 *
461 * This function should be called per *physical slot*, 462 * This function should be called per *physical slot*,
462 * not per each slot object in ACPI namespace. 463 * not per each slot object in ACPI namespace.
463 */ 464 */
464static void enable_slot(struct acpiphp_slot *slot) 465static void enable_slot(struct acpiphp_slot *slot, bool bridge)
465{ 466{
466 struct pci_dev *dev; 467 struct pci_dev *dev;
467 struct pci_bus *bus = slot->bus; 468 struct pci_bus *bus = slot->bus;
468 struct acpiphp_func *func; 469 struct acpiphp_func *func;
469 470
470 if (bus->self && hotplug_is_native(bus->self)) { 471 if (bridge && bus->self && hotplug_is_native(bus->self)) {
471 /* 472 /*
472 * If native hotplug is used, it will take care of hotplug 473 * If native hotplug is used, it will take care of hotplug
473 * slot management and resource allocation for hotplug 474 * slot management and resource allocation for hotplug
@@ -701,7 +702,7 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
701 trim_stale_devices(dev); 702 trim_stale_devices(dev);
702 703
703 /* configure all functions */ 704 /* configure all functions */
704 enable_slot(slot); 705 enable_slot(slot, true);
705 } else { 706 } else {
706 disable_slot(slot); 707 disable_slot(slot);
707 } 708 }
@@ -785,7 +786,7 @@ static void hotplug_event(u32 type, struct acpiphp_context *context)
785 if (bridge) 786 if (bridge)
786 acpiphp_check_bridge(bridge); 787 acpiphp_check_bridge(bridge);
787 else if (!(slot->flags & SLOT_IS_GOING_AWAY)) 788 else if (!(slot->flags & SLOT_IS_GOING_AWAY))
788 enable_slot(slot); 789 enable_slot(slot, false);
789 790
790 break; 791 break;
791 792
@@ -973,7 +974,7 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
973 974
974 /* configure all functions */ 975 /* configure all functions */
975 if (!(slot->flags & SLOT_ENABLED)) 976 if (!(slot->flags & SLOT_ENABLED))
976 enable_slot(slot); 977 enable_slot(slot, false);
977 978
978 pci_unlock_rescan_remove(); 979 pci_unlock_rescan_remove();
979 return 0; 980 return 0;
diff --git a/drivers/pinctrl/intel/pinctrl-cannonlake.c b/drivers/pinctrl/intel/pinctrl-cannonlake.c
index fb1afe55bf53..e7f45d96b0cb 100644
--- a/drivers/pinctrl/intel/pinctrl-cannonlake.c
+++ b/drivers/pinctrl/intel/pinctrl-cannonlake.c
@@ -15,10 +15,11 @@
15 15
16#include "pinctrl-intel.h" 16#include "pinctrl-intel.h"
17 17
18#define CNL_PAD_OWN 0x020 18#define CNL_PAD_OWN 0x020
19#define CNL_PADCFGLOCK 0x080 19#define CNL_PADCFGLOCK 0x080
20#define CNL_HOSTSW_OWN 0x0b0 20#define CNL_LP_HOSTSW_OWN 0x0b0
21#define CNL_GPI_IE 0x120 21#define CNL_H_HOSTSW_OWN 0x0c0
22#define CNL_GPI_IE 0x120
22 23
23#define CNL_GPP(r, s, e, g) \ 24#define CNL_GPP(r, s, e, g) \
24 { \ 25 { \
@@ -30,12 +31,12 @@
30 31
31#define CNL_NO_GPIO -1 32#define CNL_NO_GPIO -1
32 33
33#define CNL_COMMUNITY(b, s, e, g) \ 34#define CNL_COMMUNITY(b, s, e, o, g) \
34 { \ 35 { \
35 .barno = (b), \ 36 .barno = (b), \
36 .padown_offset = CNL_PAD_OWN, \ 37 .padown_offset = CNL_PAD_OWN, \
37 .padcfglock_offset = CNL_PADCFGLOCK, \ 38 .padcfglock_offset = CNL_PADCFGLOCK, \
38 .hostown_offset = CNL_HOSTSW_OWN, \ 39 .hostown_offset = (o), \
39 .ie_offset = CNL_GPI_IE, \ 40 .ie_offset = CNL_GPI_IE, \
40 .pin_base = (s), \ 41 .pin_base = (s), \
41 .npins = ((e) - (s) + 1), \ 42 .npins = ((e) - (s) + 1), \
@@ -43,6 +44,12 @@
43 .ngpps = ARRAY_SIZE(g), \ 44 .ngpps = ARRAY_SIZE(g), \
44 } 45 }
45 46
47#define CNLLP_COMMUNITY(b, s, e, g) \
48 CNL_COMMUNITY(b, s, e, CNL_LP_HOSTSW_OWN, g)
49
50#define CNLH_COMMUNITY(b, s, e, g) \
51 CNL_COMMUNITY(b, s, e, CNL_H_HOSTSW_OWN, g)
52
46/* Cannon Lake-H */ 53/* Cannon Lake-H */
47static const struct pinctrl_pin_desc cnlh_pins[] = { 54static const struct pinctrl_pin_desc cnlh_pins[] = {
48 /* GPP_A */ 55 /* GPP_A */
@@ -379,7 +386,7 @@ static const struct intel_padgroup cnlh_community1_gpps[] = {
379static const struct intel_padgroup cnlh_community3_gpps[] = { 386static const struct intel_padgroup cnlh_community3_gpps[] = {
380 CNL_GPP(0, 155, 178, 192), /* GPP_K */ 387 CNL_GPP(0, 155, 178, 192), /* GPP_K */
381 CNL_GPP(1, 179, 202, 224), /* GPP_H */ 388 CNL_GPP(1, 179, 202, 224), /* GPP_H */
382 CNL_GPP(2, 203, 215, 258), /* GPP_E */ 389 CNL_GPP(2, 203, 215, 256), /* GPP_E */
383 CNL_GPP(3, 216, 239, 288), /* GPP_F */ 390 CNL_GPP(3, 216, 239, 288), /* GPP_F */
384 CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */ 391 CNL_GPP(4, 240, 248, CNL_NO_GPIO), /* SPI */
385}; 392};
@@ -442,10 +449,10 @@ static const struct intel_function cnlh_functions[] = {
442}; 449};
443 450
444static const struct intel_community cnlh_communities[] = { 451static const struct intel_community cnlh_communities[] = {
445 CNL_COMMUNITY(0, 0, 50, cnlh_community0_gpps), 452 CNLH_COMMUNITY(0, 0, 50, cnlh_community0_gpps),
446 CNL_COMMUNITY(1, 51, 154, cnlh_community1_gpps), 453 CNLH_COMMUNITY(1, 51, 154, cnlh_community1_gpps),
447 CNL_COMMUNITY(2, 155, 248, cnlh_community3_gpps), 454 CNLH_COMMUNITY(2, 155, 248, cnlh_community3_gpps),
448 CNL_COMMUNITY(3, 249, 298, cnlh_community4_gpps), 455 CNLH_COMMUNITY(3, 249, 298, cnlh_community4_gpps),
449}; 456};
450 457
451static const struct intel_pinctrl_soc_data cnlh_soc_data = { 458static const struct intel_pinctrl_soc_data cnlh_soc_data = {
@@ -803,9 +810,9 @@ static const struct intel_padgroup cnllp_community4_gpps[] = {
803}; 810};
804 811
805static const struct intel_community cnllp_communities[] = { 812static const struct intel_community cnllp_communities[] = {
806 CNL_COMMUNITY(0, 0, 67, cnllp_community0_gpps), 813 CNLLP_COMMUNITY(0, 0, 67, cnllp_community0_gpps),
807 CNL_COMMUNITY(1, 68, 180, cnllp_community1_gpps), 814 CNLLP_COMMUNITY(1, 68, 180, cnllp_community1_gpps),
808 CNL_COMMUNITY(2, 181, 243, cnllp_community4_gpps), 815 CNLLP_COMMUNITY(2, 181, 243, cnllp_community4_gpps),
809}; 816};
810 817
811static const struct intel_pinctrl_soc_data cnllp_soc_data = { 818static const struct intel_pinctrl_soc_data cnllp_soc_data = {
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 62b009b27eda..1ea3438ea67e 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -747,13 +747,63 @@ static const struct pinctrl_desc intel_pinctrl_desc = {
747 .owner = THIS_MODULE, 747 .owner = THIS_MODULE,
748}; 748};
749 749
750/**
751 * intel_gpio_to_pin() - Translate from GPIO offset to pin number
752 * @pctrl: Pinctrl structure
753 * @offset: GPIO offset from gpiolib
754 * @commmunity: Community is filled here if not %NULL
755 * @padgrp: Pad group is filled here if not %NULL
756 *
757 * When coming through gpiolib irqchip, the GPIO offset is not
758 * automatically translated to pinctrl pin number. This function can be
759 * used to find out the corresponding pinctrl pin.
760 */
761static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
762 const struct intel_community **community,
763 const struct intel_padgroup **padgrp)
764{
765 int i;
766
767 for (i = 0; i < pctrl->ncommunities; i++) {
768 const struct intel_community *comm = &pctrl->communities[i];
769 int j;
770
771 for (j = 0; j < comm->ngpps; j++) {
772 const struct intel_padgroup *pgrp = &comm->gpps[j];
773
774 if (pgrp->gpio_base < 0)
775 continue;
776
777 if (offset >= pgrp->gpio_base &&
778 offset < pgrp->gpio_base + pgrp->size) {
779 int pin;
780
781 pin = pgrp->base + offset - pgrp->gpio_base;
782 if (community)
783 *community = comm;
784 if (padgrp)
785 *padgrp = pgrp;
786
787 return pin;
788 }
789 }
790 }
791
792 return -EINVAL;
793}
794
750static int intel_gpio_get(struct gpio_chip *chip, unsigned offset) 795static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
751{ 796{
752 struct intel_pinctrl *pctrl = gpiochip_get_data(chip); 797 struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
753 void __iomem *reg; 798 void __iomem *reg;
754 u32 padcfg0; 799 u32 padcfg0;
800 int pin;
755 801
756 reg = intel_get_padcfg(pctrl, offset, PADCFG0); 802 pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
803 if (pin < 0)
804 return -EINVAL;
805
806 reg = intel_get_padcfg(pctrl, pin, PADCFG0);
757 if (!reg) 807 if (!reg)
758 return -EINVAL; 808 return -EINVAL;
759 809
@@ -770,8 +820,13 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
770 unsigned long flags; 820 unsigned long flags;
771 void __iomem *reg; 821 void __iomem *reg;
772 u32 padcfg0; 822 u32 padcfg0;
823 int pin;
773 824
774 reg = intel_get_padcfg(pctrl, offset, PADCFG0); 825 pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
826 if (pin < 0)
827 return;
828
829 reg = intel_get_padcfg(pctrl, pin, PADCFG0);
775 if (!reg) 830 if (!reg)
776 return; 831 return;
777 832
@@ -790,8 +845,13 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
790 struct intel_pinctrl *pctrl = gpiochip_get_data(chip); 845 struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
791 void __iomem *reg; 846 void __iomem *reg;
792 u32 padcfg0; 847 u32 padcfg0;
848 int pin;
793 849
794 reg = intel_get_padcfg(pctrl, offset, PADCFG0); 850 pin = intel_gpio_to_pin(pctrl, offset, NULL, NULL);
851 if (pin < 0)
852 return -EINVAL;
853
854 reg = intel_get_padcfg(pctrl, pin, PADCFG0);
795 if (!reg) 855 if (!reg)
796 return -EINVAL; 856 return -EINVAL;
797 857
@@ -827,81 +887,6 @@ static const struct gpio_chip intel_gpio_chip = {
827 .set_config = gpiochip_generic_config, 887 .set_config = gpiochip_generic_config,
828}; 888};
829 889
830/**
831 * intel_gpio_to_pin() - Translate from GPIO offset to pin number
832 * @pctrl: Pinctrl structure
833 * @offset: GPIO offset from gpiolib
834 * @commmunity: Community is filled here if not %NULL
835 * @padgrp: Pad group is filled here if not %NULL
836 *
837 * When coming through gpiolib irqchip, the GPIO offset is not
838 * automatically translated to pinctrl pin number. This function can be
839 * used to find out the corresponding pinctrl pin.
840 */
841static int intel_gpio_to_pin(struct intel_pinctrl *pctrl, unsigned offset,
842 const struct intel_community **community,
843 const struct intel_padgroup **padgrp)
844{
845 int i;
846
847 for (i = 0; i < pctrl->ncommunities; i++) {
848 const struct intel_community *comm = &pctrl->communities[i];
849 int j;
850
851 for (j = 0; j < comm->ngpps; j++) {
852 const struct intel_padgroup *pgrp = &comm->gpps[j];
853
854 if (pgrp->gpio_base < 0)
855 continue;
856
857 if (offset >= pgrp->gpio_base &&
858 offset < pgrp->gpio_base + pgrp->size) {
859 int pin;
860
861 pin = pgrp->base + offset - pgrp->gpio_base;
862 if (community)
863 *community = comm;
864 if (padgrp)
865 *padgrp = pgrp;
866
867 return pin;
868 }
869 }
870 }
871
872 return -EINVAL;
873}
874
875static int intel_gpio_irq_reqres(struct irq_data *d)
876{
877 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
878 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
879 int pin;
880 int ret;
881
882 pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
883 if (pin >= 0) {
884 ret = gpiochip_lock_as_irq(gc, pin);
885 if (ret) {
886 dev_err(pctrl->dev, "unable to lock HW IRQ %d for IRQ\n",
887 pin);
888 return ret;
889 }
890 }
891 return 0;
892}
893
894static void intel_gpio_irq_relres(struct irq_data *d)
895{
896 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
897 struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
898 int pin;
899
900 pin = intel_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL);
901 if (pin >= 0)
902 gpiochip_unlock_as_irq(gc, pin);
903}
904
905static void intel_gpio_irq_ack(struct irq_data *d) 890static void intel_gpio_irq_ack(struct irq_data *d)
906{ 891{
907 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 892 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1117,8 +1102,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
1117 1102
1118static struct irq_chip intel_gpio_irqchip = { 1103static struct irq_chip intel_gpio_irqchip = {
1119 .name = "intel-gpio", 1104 .name = "intel-gpio",
1120 .irq_request_resources = intel_gpio_irq_reqres,
1121 .irq_release_resources = intel_gpio_irq_relres,
1122 .irq_enable = intel_gpio_irq_enable, 1105 .irq_enable = intel_gpio_irq_enable,
1123 .irq_ack = intel_gpio_irq_ack, 1106 .irq_ack = intel_gpio_irq_ack,
1124 .irq_mask = intel_gpio_irq_mask, 1107 .irq_mask = intel_gpio_irq_mask,
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 41ccc759b8b8..1425c2874d40 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -348,21 +348,12 @@ static void amd_gpio_irq_enable(struct irq_data *d)
348 unsigned long flags; 348 unsigned long flags;
349 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 349 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
350 struct amd_gpio *gpio_dev = gpiochip_get_data(gc); 350 struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
351 u32 mask = BIT(INTERRUPT_ENABLE_OFF) | BIT(INTERRUPT_MASK_OFF);
352 351
353 raw_spin_lock_irqsave(&gpio_dev->lock, flags); 352 raw_spin_lock_irqsave(&gpio_dev->lock, flags);
354 pin_reg = readl(gpio_dev->base + (d->hwirq)*4); 353 pin_reg = readl(gpio_dev->base + (d->hwirq)*4);
355 pin_reg |= BIT(INTERRUPT_ENABLE_OFF); 354 pin_reg |= BIT(INTERRUPT_ENABLE_OFF);
356 pin_reg |= BIT(INTERRUPT_MASK_OFF); 355 pin_reg |= BIT(INTERRUPT_MASK_OFF);
357 writel(pin_reg, gpio_dev->base + (d->hwirq)*4); 356 writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
358 /*
359 * When debounce logic is enabled it takes ~900 us before interrupts
360 * can be enabled. During this "debounce warm up" period the
361 * "INTERRUPT_ENABLE" bit will read as 0. Poll the bit here until it
362 * reads back as 1, signaling that interrupts are now enabled.
363 */
364 while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
365 continue;
366 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 357 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
367} 358}
368 359
@@ -426,7 +417,7 @@ static void amd_gpio_irq_eoi(struct irq_data *d)
426static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) 417static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
427{ 418{
428 int ret = 0; 419 int ret = 0;
429 u32 pin_reg; 420 u32 pin_reg, pin_reg_irq_en, mask;
430 unsigned long flags, irq_flags; 421 unsigned long flags, irq_flags;
431 struct gpio_chip *gc = irq_data_get_irq_chip_data(d); 422 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
432 struct amd_gpio *gpio_dev = gpiochip_get_data(gc); 423 struct amd_gpio *gpio_dev = gpiochip_get_data(gc);
@@ -495,6 +486,28 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type)
495 } 486 }
496 487
497 pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF; 488 pin_reg |= CLR_INTR_STAT << INTERRUPT_STS_OFF;
489 /*
490 * If WAKE_INT_MASTER_REG.MaskStsEn is set, a software write to the
491 * debounce registers of any GPIO will block wake/interrupt status
492 * generation for *all* GPIOs for a lenght of time that depends on
493 * WAKE_INT_MASTER_REG.MaskStsLength[11:0]. During this period the
494 * INTERRUPT_ENABLE bit will read as 0.
495 *
496 * We temporarily enable irq for the GPIO whose configuration is
497 * changing, and then wait for it to read back as 1 to know when
498 * debounce has settled and then disable the irq again.
499 * We do this polling with the spinlock held to ensure other GPIO
500 * access routines do not read an incorrect value for the irq enable
501 * bit of other GPIOs. We keep the GPIO masked while polling to avoid
502 * spurious irqs, and disable the irq again after polling.
503 */
504 mask = BIT(INTERRUPT_ENABLE_OFF);
505 pin_reg_irq_en = pin_reg;
506 pin_reg_irq_en |= mask;
507 pin_reg_irq_en &= ~BIT(INTERRUPT_MASK_OFF);
508 writel(pin_reg_irq_en, gpio_dev->base + (d->hwirq)*4);
509 while ((readl(gpio_dev->base + (d->hwirq)*4) & mask) != mask)
510 continue;
498 writel(pin_reg, gpio_dev->base + (d->hwirq)*4); 511 writel(pin_reg, gpio_dev->base + (d->hwirq)*4);
499 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); 512 raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
500 513
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index d975462a4c57..f10af5c383c5 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -536,6 +536,7 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
536 if (obj && obj->type == ACPI_TYPE_INTEGER) 536 if (obj && obj->type == ACPI_TYPE_INTEGER)
537 *out_data = (u32) obj->integer.value; 537 *out_data = (u32) obj->integer.value;
538 } 538 }
539 kfree(output.pointer);
539 return status; 540 return status;
540 541
541} 542}
diff --git a/drivers/platform/x86/dell-smbios-wmi.c b/drivers/platform/x86/dell-smbios-wmi.c
index 88afe5651d24..cf2229ece9ff 100644
--- a/drivers/platform/x86/dell-smbios-wmi.c
+++ b/drivers/platform/x86/dell-smbios-wmi.c
@@ -78,6 +78,7 @@ static int run_smbios_call(struct wmi_device *wdev)
78 dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n", 78 dev_dbg(&wdev->dev, "result: [%08x,%08x,%08x,%08x]\n",
79 priv->buf->std.output[0], priv->buf->std.output[1], 79 priv->buf->std.output[0], priv->buf->std.output[1],
80 priv->buf->std.output[2], priv->buf->std.output[3]); 80 priv->buf->std.output[2], priv->buf->std.output[3]);
81 kfree(output.pointer);
81 82
82 return 0; 83 return 0;
83} 84}
diff --git a/drivers/regulator/bd71837-regulator.c b/drivers/regulator/bd71837-regulator.c
index 0f8ac8dec3e1..a1bd8aaf4d98 100644
--- a/drivers/regulator/bd71837-regulator.c
+++ b/drivers/regulator/bd71837-regulator.c
@@ -569,6 +569,25 @@ static int bd71837_probe(struct platform_device *pdev)
569 BD71837_REG_REGLOCK); 569 BD71837_REG_REGLOCK);
570 } 570 }
571 571
572 /*
573 * There is a HW quirk in BD71837. The shutdown sequence timings for
574 * bucks/LDOs which are controlled via register interface are changed.
575 * At PMIC poweroff the voltage for BUCK6/7 is cut immediately at the
576 * beginning of shut-down sequence. As bucks 6 and 7 are parent
577 * supplies for LDO5 and LDO6 - this causes LDO5/6 voltage
578 * monitoring to errorneously detect under voltage and force PMIC to
579 * emergency state instead of poweroff. In order to avoid this we
580 * disable voltage monitoring for LDO5 and LDO6
581 */
582 err = regmap_update_bits(pmic->mfd->regmap, BD718XX_REG_MVRFLTMASK2,
583 BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80,
584 BD718XX_LDO5_VRMON80 | BD718XX_LDO6_VRMON80);
585 if (err) {
586 dev_err(&pmic->pdev->dev,
587 "Failed to disable voltage monitoring\n");
588 goto err;
589 }
590
572 for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) { 591 for (i = 0; i < ARRAY_SIZE(pmic_regulator_inits); i++) {
573 592
574 struct regulator_desc *desc; 593 struct regulator_desc *desc;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index bb1324f93143..9577d8941846 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -3161,7 +3161,7 @@ static inline int regulator_suspend_toggle(struct regulator_dev *rdev,
3161 if (!rstate->changeable) 3161 if (!rstate->changeable)
3162 return -EPERM; 3162 return -EPERM;
3163 3163
3164 rstate->enabled = en; 3164 rstate->enabled = (en) ? ENABLE_IN_SUSPEND : DISABLE_IN_SUSPEND;
3165 3165
3166 return 0; 3166 return 0;
3167} 3167}
@@ -4395,13 +4395,13 @@ regulator_register(const struct regulator_desc *regulator_desc,
4395 !rdev->desc->fixed_uV) 4395 !rdev->desc->fixed_uV)
4396 rdev->is_switch = true; 4396 rdev->is_switch = true;
4397 4397
4398 dev_set_drvdata(&rdev->dev, rdev);
4398 ret = device_register(&rdev->dev); 4399 ret = device_register(&rdev->dev);
4399 if (ret != 0) { 4400 if (ret != 0) {
4400 put_device(&rdev->dev); 4401 put_device(&rdev->dev);
4401 goto unset_supplies; 4402 goto unset_supplies;
4402 } 4403 }
4403 4404
4404 dev_set_drvdata(&rdev->dev, rdev);
4405 rdev_init_debugfs(rdev); 4405 rdev_init_debugfs(rdev);
4406 4406
4407 /* try to resolve regulators supply since a new one was registered */ 4407 /* try to resolve regulators supply since a new one was registered */
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 638f17d4c848..210fc20f7de7 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -213,8 +213,6 @@ static void of_get_regulation_constraints(struct device_node *np,
213 else if (of_property_read_bool(suspend_np, 213 else if (of_property_read_bool(suspend_np,
214 "regulator-off-in-suspend")) 214 "regulator-off-in-suspend"))
215 suspend_state->enabled = DISABLE_IN_SUSPEND; 215 suspend_state->enabled = DISABLE_IN_SUSPEND;
216 else
217 suspend_state->enabled = DO_NOTHING_IN_SUSPEND;
218 216
219 if (!of_property_read_u32(np, "regulator-suspend-min-microvolt", 217 if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
220 &pval)) 218 &pval))
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index fac377320158..f42a619198c4 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3474,11 +3474,10 @@ static int ibmvscsis_probe(struct vio_dev *vdev,
3474 vscsi->dds.window[LOCAL].liobn, 3474 vscsi->dds.window[LOCAL].liobn,
3475 vscsi->dds.window[REMOTE].liobn); 3475 vscsi->dds.window[REMOTE].liobn);
3476 3476
3477 strcpy(vscsi->eye, "VSCSI "); 3477 snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
3478 strncat(vscsi->eye, vdev->name, MAX_EYE);
3479 3478
3480 vscsi->dds.unit_id = vdev->unit_address; 3479 vscsi->dds.unit_id = vdev->unit_address;
3481 strncpy(vscsi->dds.partition_name, partition_name, 3480 strscpy(vscsi->dds.partition_name, partition_name,
3482 sizeof(vscsi->dds.partition_name)); 3481 sizeof(vscsi->dds.partition_name));
3483 vscsi->dds.partition_num = partition_number; 3482 vscsi->dds.partition_num = partition_number;
3484 3483
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index f2ec80b0ffc0..271990bc065b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -3335,6 +3335,65 @@ static void ipr_release_dump(struct kref *kref)
3335 LEAVE; 3335 LEAVE;
3336} 3336}
3337 3337
3338static void ipr_add_remove_thread(struct work_struct *work)
3339{
3340 unsigned long lock_flags;
3341 struct ipr_resource_entry *res;
3342 struct scsi_device *sdev;
3343 struct ipr_ioa_cfg *ioa_cfg =
3344 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
3345 u8 bus, target, lun;
3346 int did_work;
3347
3348 ENTER;
3349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3350
3351restart:
3352 do {
3353 did_work = 0;
3354 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356 return;
3357 }
3358
3359 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3360 if (res->del_from_ml && res->sdev) {
3361 did_work = 1;
3362 sdev = res->sdev;
3363 if (!scsi_device_get(sdev)) {
3364 if (!res->add_to_ml)
3365 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3366 else
3367 res->del_from_ml = 0;
3368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369 scsi_remove_device(sdev);
3370 scsi_device_put(sdev);
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372 }
3373 break;
3374 }
3375 }
3376 } while (did_work);
3377
3378 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3379 if (res->add_to_ml) {
3380 bus = res->bus;
3381 target = res->target;
3382 lun = res->lun;
3383 res->add_to_ml = 0;
3384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3385 scsi_add_device(ioa_cfg->host, bus, target, lun);
3386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3387 goto restart;
3388 }
3389 }
3390
3391 ioa_cfg->scan_done = 1;
3392 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3393 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3394 LEAVE;
3395}
3396
3338/** 3397/**
3339 * ipr_worker_thread - Worker thread 3398 * ipr_worker_thread - Worker thread
3340 * @work: ioa config struct 3399 * @work: ioa config struct
@@ -3349,13 +3408,9 @@ static void ipr_release_dump(struct kref *kref)
3349static void ipr_worker_thread(struct work_struct *work) 3408static void ipr_worker_thread(struct work_struct *work)
3350{ 3409{
3351 unsigned long lock_flags; 3410 unsigned long lock_flags;
3352 struct ipr_resource_entry *res;
3353 struct scsi_device *sdev;
3354 struct ipr_dump *dump; 3411 struct ipr_dump *dump;
3355 struct ipr_ioa_cfg *ioa_cfg = 3412 struct ipr_ioa_cfg *ioa_cfg =
3356 container_of(work, struct ipr_ioa_cfg, work_q); 3413 container_of(work, struct ipr_ioa_cfg, work_q);
3357 u8 bus, target, lun;
3358 int did_work;
3359 3414
3360 ENTER; 3415 ENTER;
3361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -3393,49 +3448,9 @@ static void ipr_worker_thread(struct work_struct *work)
3393 return; 3448 return;
3394 } 3449 }
3395 3450
3396restart: 3451 schedule_work(&ioa_cfg->scsi_add_work_q);
3397 do {
3398 did_work = 0;
3399 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
3400 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3401 return;
3402 }
3403 3452
3404 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3405 if (res->del_from_ml && res->sdev) {
3406 did_work = 1;
3407 sdev = res->sdev;
3408 if (!scsi_device_get(sdev)) {
3409 if (!res->add_to_ml)
3410 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3411 else
3412 res->del_from_ml = 0;
3413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3414 scsi_remove_device(sdev);
3415 scsi_device_put(sdev);
3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417 }
3418 break;
3419 }
3420 }
3421 } while (did_work);
3422
3423 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3424 if (res->add_to_ml) {
3425 bus = res->bus;
3426 target = res->target;
3427 lun = res->lun;
3428 res->add_to_ml = 0;
3429 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3430 scsi_add_device(ioa_cfg->host, bus, target, lun);
3431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3432 goto restart;
3433 }
3434 }
3435
3436 ioa_cfg->scan_done = 1;
3437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3438 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3439 LEAVE; 3454 LEAVE;
3440} 3455}
3441 3456
@@ -9933,6 +9948,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9933 INIT_LIST_HEAD(&ioa_cfg->free_res_q); 9948 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9934 INIT_LIST_HEAD(&ioa_cfg->used_res_q); 9949 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9935 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread); 9950 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9951 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
9936 init_waitqueue_head(&ioa_cfg->reset_wait_q); 9952 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9937 init_waitqueue_head(&ioa_cfg->msi_wait_q); 9953 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9938 init_waitqueue_head(&ioa_cfg->eeh_wait_q); 9954 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 68afbbde54d3..f6baa2351313 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1575,6 +1575,7 @@ struct ipr_ioa_cfg {
1575 u8 saved_mode_page_len; 1575 u8 saved_mode_page_len;
1576 1576
1577 struct work_struct work_q; 1577 struct work_struct work_q;
1578 struct work_struct scsi_add_work_q;
1578 struct workqueue_struct *reset_work_q; 1579 struct workqueue_struct *reset_work_q;
1579 1580
1580 wait_queue_head_t reset_wait_q; 1581 wait_queue_head_t reset_wait_q;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 057a60abe664..1a6ed9b0a249 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -360,12 +360,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
360 goto buffer_done; 360 goto buffer_done;
361 361
362 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 362 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
363 nrport = NULL;
364 spin_lock(&vport->phba->hbalock);
363 rport = lpfc_ndlp_get_nrport(ndlp); 365 rport = lpfc_ndlp_get_nrport(ndlp);
364 if (!rport) 366 if (rport)
365 continue; 367 nrport = rport->remoteport;
366 368 spin_unlock(&vport->phba->hbalock);
367 /* local short-hand pointer. */
368 nrport = rport->remoteport;
369 if (!nrport) 369 if (!nrport)
370 continue; 370 continue;
371 371
@@ -3386,6 +3386,7 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3386 struct lpfc_nodelist *ndlp; 3386 struct lpfc_nodelist *ndlp;
3387#if (IS_ENABLED(CONFIG_NVME_FC)) 3387#if (IS_ENABLED(CONFIG_NVME_FC))
3388 struct lpfc_nvme_rport *rport; 3388 struct lpfc_nvme_rport *rport;
3389 struct nvme_fc_remote_port *remoteport = NULL;
3389#endif 3390#endif
3390 3391
3391 shost = lpfc_shost_from_vport(vport); 3392 shost = lpfc_shost_from_vport(vport);
@@ -3396,8 +3397,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
3396 if (ndlp->rport) 3397 if (ndlp->rport)
3397 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo; 3398 ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
3398#if (IS_ENABLED(CONFIG_NVME_FC)) 3399#if (IS_ENABLED(CONFIG_NVME_FC))
3400 spin_lock(&vport->phba->hbalock);
3399 rport = lpfc_ndlp_get_nrport(ndlp); 3401 rport = lpfc_ndlp_get_nrport(ndlp);
3400 if (rport) 3402 if (rport)
3403 remoteport = rport->remoteport;
3404 spin_unlock(&vport->phba->hbalock);
3405 if (remoteport)
3401 nvme_fc_set_remoteport_devloss(rport->remoteport, 3406 nvme_fc_set_remoteport_devloss(rport->remoteport,
3402 vport->cfg_devloss_tmo); 3407 vport->cfg_devloss_tmo);
3403#endif 3408#endif
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 9df0c051349f..aec5b10a8c85 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -551,7 +551,7 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
551 unsigned char *statep; 551 unsigned char *statep;
552 struct nvme_fc_local_port *localport; 552 struct nvme_fc_local_port *localport;
553 struct lpfc_nvmet_tgtport *tgtp; 553 struct lpfc_nvmet_tgtport *tgtp;
554 struct nvme_fc_remote_port *nrport; 554 struct nvme_fc_remote_port *nrport = NULL;
555 struct lpfc_nvme_rport *rport; 555 struct lpfc_nvme_rport *rport;
556 556
557 cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE); 557 cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
@@ -696,11 +696,11 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
696 len += snprintf(buf + len, size - len, "\tRport List:\n"); 696 len += snprintf(buf + len, size - len, "\tRport List:\n");
697 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { 697 list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
698 /* local short-hand pointer. */ 698 /* local short-hand pointer. */
699 spin_lock(&phba->hbalock);
699 rport = lpfc_ndlp_get_nrport(ndlp); 700 rport = lpfc_ndlp_get_nrport(ndlp);
700 if (!rport) 701 if (rport)
701 continue; 702 nrport = rport->remoteport;
702 703 spin_unlock(&phba->hbalock);
703 nrport = rport->remoteport;
704 if (!nrport) 704 if (!nrport)
705 continue; 705 continue;
706 706
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 028462e5994d..918ae18ef8a8 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -2725,7 +2725,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2725 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn); 2725 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2726 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn); 2726 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2727 2727
2728 spin_lock_irq(&vport->phba->hbalock);
2728 oldrport = lpfc_ndlp_get_nrport(ndlp); 2729 oldrport = lpfc_ndlp_get_nrport(ndlp);
2730 spin_unlock_irq(&vport->phba->hbalock);
2729 if (!oldrport) 2731 if (!oldrport)
2730 lpfc_nlp_get(ndlp); 2732 lpfc_nlp_get(ndlp);
2731 2733
@@ -2840,7 +2842,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2840 struct nvme_fc_local_port *localport; 2842 struct nvme_fc_local_port *localport;
2841 struct lpfc_nvme_lport *lport; 2843 struct lpfc_nvme_lport *lport;
2842 struct lpfc_nvme_rport *rport; 2844 struct lpfc_nvme_rport *rport;
2843 struct nvme_fc_remote_port *remoteport; 2845 struct nvme_fc_remote_port *remoteport = NULL;
2844 2846
2845 localport = vport->localport; 2847 localport = vport->localport;
2846 2848
@@ -2854,11 +2856,14 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2854 if (!lport) 2856 if (!lport)
2855 goto input_err; 2857 goto input_err;
2856 2858
2859 spin_lock_irq(&vport->phba->hbalock);
2857 rport = lpfc_ndlp_get_nrport(ndlp); 2860 rport = lpfc_ndlp_get_nrport(ndlp);
2858 if (!rport) 2861 if (rport)
2862 remoteport = rport->remoteport;
2863 spin_unlock_irq(&vport->phba->hbalock);
2864 if (!remoteport)
2859 goto input_err; 2865 goto input_err;
2860 2866
2861 remoteport = rport->remoteport;
2862 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, 2867 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2863 "6033 Unreg nvme remoteport %p, portname x%llx, " 2868 "6033 Unreg nvme remoteport %p, portname x%llx, "
2864 "port_id x%06x, portstate x%x port type x%x\n", 2869 "port_id x%06x, portstate x%x port type x%x\n",
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index fecf96f0225c..199d3ba1916d 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -374,8 +374,8 @@ struct atio_from_isp {
374static inline int fcpcmd_is_corrupted(struct atio *atio) 374static inline int fcpcmd_is_corrupted(struct atio *atio)
375{ 375{
376 if (atio->entry_type == ATIO_TYPE7 && 376 if (atio->entry_type == ATIO_TYPE7 &&
377 (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) < 377 ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
378 FCP_CMD_LENGTH_MIN)) 378 FCP_CMD_LENGTH_MIN))
379 return 1; 379 return 1;
380 else 380 else
381 return 0; 381 return 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b79b366a94f7..4a57ffecc7e6 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1276,7 +1276,8 @@ static int sd_init_command(struct scsi_cmnd *cmd)
1276 case REQ_OP_ZONE_RESET: 1276 case REQ_OP_ZONE_RESET:
1277 return sd_zbc_setup_reset_cmnd(cmd); 1277 return sd_zbc_setup_reset_cmnd(cmd);
1278 default: 1278 default:
1279 BUG(); 1279 WARN_ON_ONCE(1);
1280 return BLKPREP_KILL;
1280 } 1281 }
1281} 1282}
1282 1283
@@ -2959,6 +2960,9 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
2959 if (rot == 1) { 2960 if (rot == 1) {
2960 blk_queue_flag_set(QUEUE_FLAG_NONROT, q); 2961 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
2961 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); 2962 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2963 } else {
2964 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
2965 blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
2962 } 2966 }
2963 2967
2964 if (sdkp->device->type == TYPE_ZBC) { 2968 if (sdkp->device->type == TYPE_ZBC) {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9d5d2ca7fc4f..c55f38ec391c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -7940,6 +7940,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
7940 err = -ENOMEM; 7940 err = -ENOMEM;
7941 goto out_error; 7941 goto out_error;
7942 } 7942 }
7943
7944 /*
7945 * Do not use blk-mq at this time because blk-mq does not support
7946 * runtime pm.
7947 */
7948 host->use_blk_mq = false;
7949
7943 hba = shost_priv(host); 7950 hba = shost_priv(host);
7944 hba->host = host; 7951 hba->host = host;
7945 hba->dev = dev; 7952 hba->dev = dev;
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index ecb22749df0b..8cc015183043 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2729,6 +2729,9 @@ static int qman_alloc_range(struct gen_pool *p, u32 *result, u32 cnt)
2729{ 2729{
2730 unsigned long addr; 2730 unsigned long addr;
2731 2731
2732 if (!p)
2733 return -ENODEV;
2734
2732 addr = gen_pool_alloc(p, cnt); 2735 addr = gen_pool_alloc(p, cnt);
2733 if (!addr) 2736 if (!addr)
2734 return -ENOMEM; 2737 return -ENOMEM;
diff --git a/drivers/soc/fsl/qe/ucc.c b/drivers/soc/fsl/qe/ucc.c
index c646d8713861..681f7d4b7724 100644
--- a/drivers/soc/fsl/qe/ucc.c
+++ b/drivers/soc/fsl/qe/ucc.c
@@ -626,7 +626,7 @@ static u32 ucc_get_tdm_sync_shift(enum comm_dir mode, u32 tdm_num)
626{ 626{
627 u32 shift; 627 u32 shift;
628 628
629 shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : RX_SYNC_SHIFT_BASE; 629 shift = (mode == COMM_DIR_RX) ? RX_SYNC_SHIFT_BASE : TX_SYNC_SHIFT_BASE;
630 shift -= tdm_num * 2; 630 shift -= tdm_num * 2;
631 631
632 return shift; 632 return shift;
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 4b5e250e8615..e5c7e1ef6318 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -899,9 +899,10 @@ static void sdw_release_master_stream(struct sdw_stream_runtime *stream)
899 struct sdw_master_runtime *m_rt = stream->m_rt; 899 struct sdw_master_runtime *m_rt = stream->m_rt;
900 struct sdw_slave_runtime *s_rt, *_s_rt; 900 struct sdw_slave_runtime *s_rt, *_s_rt;
901 901
902 list_for_each_entry_safe(s_rt, _s_rt, 902 list_for_each_entry_safe(s_rt, _s_rt, &m_rt->slave_rt_list, m_rt_node) {
903 &m_rt->slave_rt_list, m_rt_node) 903 sdw_slave_port_release(s_rt->slave->bus, s_rt->slave, stream);
904 sdw_stream_remove_slave(s_rt->slave, stream); 904 sdw_release_slave_stream(s_rt->slave, stream);
905 }
905 906
906 list_del(&m_rt->bus_node); 907 list_del(&m_rt->bus_node);
907} 908}
@@ -1112,7 +1113,7 @@ int sdw_stream_add_master(struct sdw_bus *bus,
1112 "Master runtime config failed for stream:%s", 1113 "Master runtime config failed for stream:%s",
1113 stream->name); 1114 stream->name);
1114 ret = -ENOMEM; 1115 ret = -ENOMEM;
1115 goto error; 1116 goto unlock;
1116 } 1117 }
1117 1118
1118 ret = sdw_config_stream(bus->dev, stream, stream_config, false); 1119 ret = sdw_config_stream(bus->dev, stream, stream_config, false);
@@ -1123,11 +1124,11 @@ int sdw_stream_add_master(struct sdw_bus *bus,
1123 if (ret) 1124 if (ret)
1124 goto stream_error; 1125 goto stream_error;
1125 1126
1126 stream->state = SDW_STREAM_CONFIGURED; 1127 goto unlock;
1127 1128
1128stream_error: 1129stream_error:
1129 sdw_release_master_stream(stream); 1130 sdw_release_master_stream(stream);
1130error: 1131unlock:
1131 mutex_unlock(&bus->bus_lock); 1132 mutex_unlock(&bus->bus_lock);
1132 return ret; 1133 return ret;
1133} 1134}
@@ -1141,6 +1142,10 @@ EXPORT_SYMBOL(sdw_stream_add_master);
1141 * @stream: SoundWire stream 1142 * @stream: SoundWire stream
1142 * @port_config: Port configuration for audio stream 1143 * @port_config: Port configuration for audio stream
1143 * @num_ports: Number of ports 1144 * @num_ports: Number of ports
1145 *
1146 * It is expected that Slave is added before adding Master
1147 * to the Stream.
1148 *
1144 */ 1149 */
1145int sdw_stream_add_slave(struct sdw_slave *slave, 1150int sdw_stream_add_slave(struct sdw_slave *slave,
1146 struct sdw_stream_config *stream_config, 1151 struct sdw_stream_config *stream_config,
@@ -1186,6 +1191,12 @@ int sdw_stream_add_slave(struct sdw_slave *slave,
1186 if (ret) 1191 if (ret)
1187 goto stream_error; 1192 goto stream_error;
1188 1193
1194 /*
1195 * Change stream state to CONFIGURED on first Slave add.
1196 * Bus is not aware of number of Slave(s) in a stream at this
1197 * point so cannot depend on all Slave(s) to be added in order to
1198 * change stream state to CONFIGURED.
1199 */
1189 stream->state = SDW_STREAM_CONFIGURED; 1200 stream->state = SDW_STREAM_CONFIGURED;
1190 goto error; 1201 goto error;
1191 1202
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 7cb3ab0a35a0..3082e72e4f6c 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -30,7 +30,11 @@
30 30
31#define DRIVER_NAME "fsl-dspi" 31#define DRIVER_NAME "fsl-dspi"
32 32
33#ifdef CONFIG_M5441x
34#define DSPI_FIFO_SIZE 16
35#else
33#define DSPI_FIFO_SIZE 4 36#define DSPI_FIFO_SIZE 4
37#endif
34#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024) 38#define DSPI_DMA_BUFSIZE (DSPI_FIFO_SIZE * 1024)
35 39
36#define SPI_MCR 0x00 40#define SPI_MCR 0x00
@@ -623,9 +627,11 @@ static void dspi_tcfq_read(struct fsl_dspi *dspi)
623static void dspi_eoq_write(struct fsl_dspi *dspi) 627static void dspi_eoq_write(struct fsl_dspi *dspi)
624{ 628{
625 int fifo_size = DSPI_FIFO_SIZE; 629 int fifo_size = DSPI_FIFO_SIZE;
630 u16 xfer_cmd = dspi->tx_cmd;
626 631
627 /* Fill TX FIFO with as many transfers as possible */ 632 /* Fill TX FIFO with as many transfers as possible */
628 while (dspi->len && fifo_size--) { 633 while (dspi->len && fifo_size--) {
634 dspi->tx_cmd = xfer_cmd;
629 /* Request EOQF for last transfer in FIFO */ 635 /* Request EOQF for last transfer in FIFO */
630 if (dspi->len == dspi->bytes_per_word || fifo_size == 0) 636 if (dspi->len == dspi->bytes_per_word || fifo_size == 0)
631 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ; 637 dspi->tx_cmd |= SPI_PUSHR_CMD_EOQ;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 0626e6e3ea0c..421bfc7dda67 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -300,8 +300,8 @@ static int spi_gpio_request(struct device *dev,
300 *mflags |= SPI_MASTER_NO_RX; 300 *mflags |= SPI_MASTER_NO_RX;
301 301
302 spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW); 302 spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
303 if (IS_ERR(spi_gpio->mosi)) 303 if (IS_ERR(spi_gpio->sck))
304 return PTR_ERR(spi_gpio->mosi); 304 return PTR_ERR(spi_gpio->sck);
305 305
306 for (i = 0; i < num_chipselects; i++) { 306 for (i = 0; i < num_chipselects; i++) {
307 spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs", 307 spi_gpio->cs_gpios[i] = devm_gpiod_get_index(dev, "cs",
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 95dc4d78618d..b37de1d991d6 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -598,11 +598,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
598 598
599 ret = wait_event_interruptible_timeout(rspi->wait, 599 ret = wait_event_interruptible_timeout(rspi->wait,
600 rspi->dma_callbacked, HZ); 600 rspi->dma_callbacked, HZ);
601 if (ret > 0 && rspi->dma_callbacked) 601 if (ret > 0 && rspi->dma_callbacked) {
602 ret = 0; 602 ret = 0;
603 else if (!ret) { 603 } else {
604 dev_err(&rspi->master->dev, "DMA timeout\n"); 604 if (!ret) {
605 ret = -ETIMEDOUT; 605 dev_err(&rspi->master->dev, "DMA timeout\n");
606 ret = -ETIMEDOUT;
607 }
606 if (tx) 608 if (tx)
607 dmaengine_terminate_all(rspi->master->dma_tx); 609 dmaengine_terminate_all(rspi->master->dma_tx);
608 if (rx) 610 if (rx)
@@ -1350,12 +1352,36 @@ static const struct platform_device_id spi_driver_ids[] = {
1350 1352
1351MODULE_DEVICE_TABLE(platform, spi_driver_ids); 1353MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1352 1354
1355#ifdef CONFIG_PM_SLEEP
1356static int rspi_suspend(struct device *dev)
1357{
1358 struct platform_device *pdev = to_platform_device(dev);
1359 struct rspi_data *rspi = platform_get_drvdata(pdev);
1360
1361 return spi_master_suspend(rspi->master);
1362}
1363
1364static int rspi_resume(struct device *dev)
1365{
1366 struct platform_device *pdev = to_platform_device(dev);
1367 struct rspi_data *rspi = platform_get_drvdata(pdev);
1368
1369 return spi_master_resume(rspi->master);
1370}
1371
1372static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
1373#define DEV_PM_OPS &rspi_pm_ops
1374#else
1375#define DEV_PM_OPS NULL
1376#endif /* CONFIG_PM_SLEEP */
1377
1353static struct platform_driver rspi_driver = { 1378static struct platform_driver rspi_driver = {
1354 .probe = rspi_probe, 1379 .probe = rspi_probe,
1355 .remove = rspi_remove, 1380 .remove = rspi_remove,
1356 .id_table = spi_driver_ids, 1381 .id_table = spi_driver_ids,
1357 .driver = { 1382 .driver = {
1358 .name = "renesas_spi", 1383 .name = "renesas_spi",
1384 .pm = DEV_PM_OPS,
1359 .of_match_table = of_match_ptr(rspi_of_match), 1385 .of_match_table = of_match_ptr(rspi_of_match),
1360 }, 1386 },
1361}; 1387};
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 539d6d1a277a..101cd6aae2ea 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -397,7 +397,8 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
397 397
398static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) 398static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
399{ 399{
400 sh_msiof_write(p, STR, sh_msiof_read(p, STR)); 400 sh_msiof_write(p, STR,
401 sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
401} 402}
402 403
403static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, 404static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1426,12 +1427,37 @@ static const struct platform_device_id spi_driver_ids[] = {
1426}; 1427};
1427MODULE_DEVICE_TABLE(platform, spi_driver_ids); 1428MODULE_DEVICE_TABLE(platform, spi_driver_ids);
1428 1429
1430#ifdef CONFIG_PM_SLEEP
1431static int sh_msiof_spi_suspend(struct device *dev)
1432{
1433 struct platform_device *pdev = to_platform_device(dev);
1434 struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
1435
1436 return spi_master_suspend(p->master);
1437}
1438
1439static int sh_msiof_spi_resume(struct device *dev)
1440{
1441 struct platform_device *pdev = to_platform_device(dev);
1442 struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
1443
1444 return spi_master_resume(p->master);
1445}
1446
1447static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
1448 sh_msiof_spi_resume);
1449#define DEV_PM_OPS &sh_msiof_spi_pm_ops
1450#else
1451#define DEV_PM_OPS NULL
1452#endif /* CONFIG_PM_SLEEP */
1453
1429static struct platform_driver sh_msiof_spi_drv = { 1454static struct platform_driver sh_msiof_spi_drv = {
1430 .probe = sh_msiof_spi_probe, 1455 .probe = sh_msiof_spi_probe,
1431 .remove = sh_msiof_spi_remove, 1456 .remove = sh_msiof_spi_remove,
1432 .id_table = spi_driver_ids, 1457 .id_table = spi_driver_ids,
1433 .driver = { 1458 .driver = {
1434 .name = "spi_sh_msiof", 1459 .name = "spi_sh_msiof",
1460 .pm = DEV_PM_OPS,
1435 .of_match_table = of_match_ptr(sh_msiof_match), 1461 .of_match_table = of_match_ptr(sh_msiof_match),
1436 }, 1462 },
1437}; 1463};
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 6f7b946b5ced..1427f343b39a 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1063,6 +1063,24 @@ static int tegra_slink_probe(struct platform_device *pdev)
1063 goto exit_free_master; 1063 goto exit_free_master;
1064 } 1064 }
1065 1065
1066 /* disabled clock may cause interrupt storm upon request */
1067 tspi->clk = devm_clk_get(&pdev->dev, NULL);
1068 if (IS_ERR(tspi->clk)) {
1069 ret = PTR_ERR(tspi->clk);
1070 dev_err(&pdev->dev, "Can not get clock %d\n", ret);
1071 goto exit_free_master;
1072 }
1073 ret = clk_prepare(tspi->clk);
1074 if (ret < 0) {
1075 dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
1076 goto exit_free_master;
1077 }
1078 ret = clk_enable(tspi->clk);
1079 if (ret < 0) {
1080 dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
1081 goto exit_free_master;
1082 }
1083
1066 spi_irq = platform_get_irq(pdev, 0); 1084 spi_irq = platform_get_irq(pdev, 0);
1067 tspi->irq = spi_irq; 1085 tspi->irq = spi_irq;
1068 ret = request_threaded_irq(tspi->irq, tegra_slink_isr, 1086 ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
1071 if (ret < 0) { 1089 if (ret < 0) {
1072 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n", 1090 dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
1073 tspi->irq); 1091 tspi->irq);
1074 goto exit_free_master; 1092 goto exit_clk_disable;
1075 }
1076
1077 tspi->clk = devm_clk_get(&pdev->dev, NULL);
1078 if (IS_ERR(tspi->clk)) {
1079 dev_err(&pdev->dev, "can not get clock\n");
1080 ret = PTR_ERR(tspi->clk);
1081 goto exit_free_irq;
1082 } 1093 }
1083 1094
1084 tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi"); 1095 tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@ exit_rx_dma_free:
1138 tegra_slink_deinit_dma_param(tspi, true); 1149 tegra_slink_deinit_dma_param(tspi, true);
1139exit_free_irq: 1150exit_free_irq:
1140 free_irq(spi_irq, tspi); 1151 free_irq(spi_irq, tspi);
1152exit_clk_disable:
1153 clk_disable(tspi->clk);
1141exit_free_master: 1154exit_free_master:
1142 spi_master_put(master); 1155 spi_master_put(master);
1143 return ret; 1156 return ret;
@@ -1150,6 +1163,8 @@ static int tegra_slink_remove(struct platform_device *pdev)
1150 1163
1151 free_irq(tspi->irq, tspi); 1164 free_irq(tspi->irq, tspi);
1152 1165
1166 clk_disable(tspi->clk);
1167
1153 if (tspi->tx_dma_chan) 1168 if (tspi->tx_dma_chan)
1154 tegra_slink_deinit_dma_param(tspi, false); 1169 tegra_slink_deinit_dma_param(tspi, false);
1155 1170
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ec395a6baf9c..9da0bc5a036c 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2143,8 +2143,17 @@ int spi_register_controller(struct spi_controller *ctlr)
2143 */ 2143 */
2144 if (ctlr->num_chipselect == 0) 2144 if (ctlr->num_chipselect == 0)
2145 return -EINVAL; 2145 return -EINVAL;
2146 /* allocate dynamic bus number using Linux idr */ 2146 if (ctlr->bus_num >= 0) {
2147 if ((ctlr->bus_num < 0) && ctlr->dev.of_node) { 2147 /* devices with a fixed bus num must check-in with the num */
2148 mutex_lock(&board_lock);
2149 id = idr_alloc(&spi_master_idr, ctlr, ctlr->bus_num,
2150 ctlr->bus_num + 1, GFP_KERNEL);
2151 mutex_unlock(&board_lock);
2152 if (WARN(id < 0, "couldn't get idr"))
2153 return id == -ENOSPC ? -EBUSY : id;
2154 ctlr->bus_num = id;
2155 } else if (ctlr->dev.of_node) {
2156 /* allocate dynamic bus number using Linux idr */
2148 id = of_alias_get_id(ctlr->dev.of_node, "spi"); 2157 id = of_alias_get_id(ctlr->dev.of_node, "spi");
2149 if (id >= 0) { 2158 if (id >= 0) {
2150 ctlr->bus_num = id; 2159 ctlr->bus_num = id;
diff --git a/drivers/staging/media/mt9t031/Kconfig b/drivers/staging/media/mt9t031/Kconfig
index f48e06a03cdb..9a58aaf72edd 100644
--- a/drivers/staging/media/mt9t031/Kconfig
+++ b/drivers/staging/media/mt9t031/Kconfig
@@ -1,9 +1,3 @@
1config SOC_CAMERA_IMX074
2 tristate "imx074 support (DEPRECATED)"
3 depends on SOC_CAMERA && I2C
4 help
5 This driver supports IMX074 cameras from Sony
6
7config SOC_CAMERA_MT9T031 1config SOC_CAMERA_MT9T031
8 tristate "mt9t031 support (DEPRECATED)" 2 tristate "mt9t031 support (DEPRECATED)"
9 depends on SOC_CAMERA && I2C 3 depends on SOC_CAMERA && I2C
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 9cdfccbdd06f..cc756a123fd8 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1416,7 +1416,8 @@ static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
1416 1416
1417 sg_init_table(sg, ARRAY_SIZE(sg)); 1417 sg_init_table(sg, ARRAY_SIZE(sg));
1418 sg_set_buf(sg, buf, payload_length); 1418 sg_set_buf(sg, buf, payload_length);
1419 sg_set_buf(sg + 1, pad_bytes, padding); 1419 if (padding)
1420 sg_set_buf(sg + 1, pad_bytes, padding);
1420 1421
1421 ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding); 1422 ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
1422 1423
@@ -3910,10 +3911,14 @@ static bool iscsi_target_check_conn_state(struct iscsi_conn *conn)
3910static void iscsit_get_rx_pdu(struct iscsi_conn *conn) 3911static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3911{ 3912{
3912 int ret; 3913 int ret;
3913 u8 buffer[ISCSI_HDR_LEN], opcode; 3914 u8 *buffer, opcode;
3914 u32 checksum = 0, digest = 0; 3915 u32 checksum = 0, digest = 0;
3915 struct kvec iov; 3916 struct kvec iov;
3916 3917
3918 buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
3919 if (!buffer)
3920 return;
3921
3917 while (!kthread_should_stop()) { 3922 while (!kthread_should_stop()) {
3918 /* 3923 /*
3919 * Ensure that both TX and RX per connection kthreads 3924 * Ensure that both TX and RX per connection kthreads
@@ -3921,7 +3926,6 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3921 */ 3926 */
3922 iscsit_thread_check_cpumask(conn, current, 0); 3927 iscsit_thread_check_cpumask(conn, current, 0);
3923 3928
3924 memset(buffer, 0, ISCSI_HDR_LEN);
3925 memset(&iov, 0, sizeof(struct kvec)); 3929 memset(&iov, 0, sizeof(struct kvec));
3926 3930
3927 iov.iov_base = buffer; 3931 iov.iov_base = buffer;
@@ -3930,7 +3934,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3930 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN); 3934 ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
3931 if (ret != ISCSI_HDR_LEN) { 3935 if (ret != ISCSI_HDR_LEN) {
3932 iscsit_rx_thread_wait_for_tcp(conn); 3936 iscsit_rx_thread_wait_for_tcp(conn);
3933 return; 3937 break;
3934 } 3938 }
3935 3939
3936 if (conn->conn_ops->HeaderDigest) { 3940 if (conn->conn_ops->HeaderDigest) {
@@ -3940,7 +3944,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3940 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN); 3944 ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
3941 if (ret != ISCSI_CRC_LEN) { 3945 if (ret != ISCSI_CRC_LEN) {
3942 iscsit_rx_thread_wait_for_tcp(conn); 3946 iscsit_rx_thread_wait_for_tcp(conn);
3943 return; 3947 break;
3944 } 3948 }
3945 3949
3946 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer, 3950 iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
@@ -3964,7 +3968,7 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3964 } 3968 }
3965 3969
3966 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) 3970 if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
3967 return; 3971 break;
3968 3972
3969 opcode = buffer[0] & ISCSI_OPCODE_MASK; 3973 opcode = buffer[0] & ISCSI_OPCODE_MASK;
3970 3974
@@ -3975,13 +3979,15 @@ static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
3975 " while in Discovery Session, rejecting.\n", opcode); 3979 " while in Discovery Session, rejecting.\n", opcode);
3976 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR, 3980 iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
3977 buffer); 3981 buffer);
3978 return; 3982 break;
3979 } 3983 }
3980 3984
3981 ret = iscsi_target_rx_opcode(conn, buffer); 3985 ret = iscsi_target_rx_opcode(conn, buffer);
3982 if (ret < 0) 3986 if (ret < 0)
3983 return; 3987 break;
3984 } 3988 }
3989
3990 kfree(buffer);
3985} 3991}
3986 3992
3987int iscsi_target_rx_thread(void *arg) 3993int iscsi_target_rx_thread(void *arg)
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 9518ffd8b8ba..4e680d753941 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -26,27 +26,6 @@
26#include "iscsi_target_nego.h" 26#include "iscsi_target_nego.h"
27#include "iscsi_target_auth.h" 27#include "iscsi_target_auth.h"
28 28
29static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
30{
31 int j = DIV_ROUND_UP(len, 2), rc;
32
33 rc = hex2bin(dst, src, j);
34 if (rc < 0)
35 pr_debug("CHAP string contains non hex digit symbols\n");
36
37 dst[j] = '\0';
38 return j;
39}
40
41static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
42{
43 int i;
44
45 for (i = 0; i < src_len; i++) {
46 sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
47 }
48}
49
50static int chap_gen_challenge( 29static int chap_gen_challenge(
51 struct iscsi_conn *conn, 30 struct iscsi_conn *conn,
52 int caller, 31 int caller,
@@ -62,7 +41,7 @@ static int chap_gen_challenge(
62 ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); 41 ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH);
63 if (unlikely(ret)) 42 if (unlikely(ret))
64 return ret; 43 return ret;
65 chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, 44 bin2hex(challenge_asciihex, chap->challenge,
66 CHAP_CHALLENGE_LENGTH); 45 CHAP_CHALLENGE_LENGTH);
67 /* 46 /*
68 * Set CHAP_C, and copy the generated challenge into c_str. 47 * Set CHAP_C, and copy the generated challenge into c_str.
@@ -248,9 +227,16 @@ static int chap_server_compute_md5(
248 pr_err("Could not find CHAP_R.\n"); 227 pr_err("Could not find CHAP_R.\n");
249 goto out; 228 goto out;
250 } 229 }
230 if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
231 pr_err("Malformed CHAP_R\n");
232 goto out;
233 }
234 if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
235 pr_err("Malformed CHAP_R\n");
236 goto out;
237 }
251 238
252 pr_debug("[server] Got CHAP_R=%s\n", chap_r); 239 pr_debug("[server] Got CHAP_R=%s\n", chap_r);
253 chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
254 240
255 tfm = crypto_alloc_shash("md5", 0, 0); 241 tfm = crypto_alloc_shash("md5", 0, 0);
256 if (IS_ERR(tfm)) { 242 if (IS_ERR(tfm)) {
@@ -294,7 +280,7 @@ static int chap_server_compute_md5(
294 goto out; 280 goto out;
295 } 281 }
296 282
297 chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE); 283 bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
298 pr_debug("[server] MD5 Server Digest: %s\n", response); 284 pr_debug("[server] MD5 Server Digest: %s\n", response);
299 285
300 if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) { 286 if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -349,9 +335,7 @@ static int chap_server_compute_md5(
349 pr_err("Could not find CHAP_C.\n"); 335 pr_err("Could not find CHAP_C.\n");
350 goto out; 336 goto out;
351 } 337 }
352 pr_debug("[server] Got CHAP_C=%s\n", challenge); 338 challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
353 challenge_len = chap_string_to_hex(challenge_binhex, challenge,
354 strlen(challenge));
355 if (!challenge_len) { 339 if (!challenge_len) {
356 pr_err("Unable to convert incoming challenge\n"); 340 pr_err("Unable to convert incoming challenge\n");
357 goto out; 341 goto out;
@@ -360,6 +344,11 @@ static int chap_server_compute_md5(
360 pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n"); 344 pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
361 goto out; 345 goto out;
362 } 346 }
347 if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
348 pr_err("Malformed CHAP_C\n");
349 goto out;
350 }
351 pr_debug("[server] Got CHAP_C=%s\n", challenge);
363 /* 352 /*
364 * During mutual authentication, the CHAP_C generated by the 353 * During mutual authentication, the CHAP_C generated by the
365 * initiator must not match the original CHAP_C generated by 354 * initiator must not match the original CHAP_C generated by
@@ -413,7 +402,7 @@ static int chap_server_compute_md5(
413 /* 402 /*
414 * Convert response from binary hex to ascii hext. 403 * Convert response from binary hex to ascii hext.
415 */ 404 */
416 chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE); 405 bin2hex(response, digest, MD5_SIGNATURE_SIZE);
417 *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s", 406 *nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
418 response); 407 response);
419 *nr_out_len += 1; 408 *nr_out_len += 1;
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 24a5f05e769b..e5389591bb4f 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1054,8 +1054,8 @@ static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo)
1054 /* Get the address of the host memory buffer. 1054 /* Get the address of the host memory buffer.
1055 */ 1055 */
1056 bdp = pinfo->rx_cur; 1056 bdp = pinfo->rx_cur;
1057 while (bdp->cbd_sc & BD_SC_EMPTY) 1057 if (bdp->cbd_sc & BD_SC_EMPTY)
1058 ; 1058 return NO_POLL_CHAR;
1059 1059
1060 /* If the buffer address is in the CPM DPRAM, don't 1060 /* If the buffer address is in the CPM DPRAM, don't
1061 * convert it. 1061 * convert it.
@@ -1090,7 +1090,11 @@ static int cpm_get_poll_char(struct uart_port *port)
1090 poll_chars = 0; 1090 poll_chars = 0;
1091 } 1091 }
1092 if (poll_chars <= 0) { 1092 if (poll_chars <= 0) {
1093 poll_chars = poll_wait_key(poll_buf, pinfo); 1093 int ret = poll_wait_key(poll_buf, pinfo);
1094
1095 if (ret == NO_POLL_CHAR)
1096 return ret;
1097 poll_chars = ret;
1094 pollp = poll_buf; 1098 pollp = poll_buf;
1095 } 1099 }
1096 poll_chars--; 1100 poll_chars--;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 51e47a63d61a..3f8d1274fc85 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -979,7 +979,8 @@ static inline int lpuart_start_rx_dma(struct lpuart_port *sport)
979 struct circ_buf *ring = &sport->rx_ring; 979 struct circ_buf *ring = &sport->rx_ring;
980 int ret, nent; 980 int ret, nent;
981 int bits, baud; 981 int bits, baud;
982 struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port); 982 struct tty_port *port = &sport->port.state->port;
983 struct tty_struct *tty = port->tty;
983 struct ktermios *termios = &tty->termios; 984 struct ktermios *termios = &tty->termios;
984 985
985 baud = tty_get_baud_rate(tty); 986 baud = tty_get_baud_rate(tty);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 239c0fa2e981..0f67197a3783 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2351,6 +2351,14 @@ static int imx_uart_probe(struct platform_device *pdev)
2351 ret); 2351 ret);
2352 return ret; 2352 return ret;
2353 } 2353 }
2354
2355 ret = devm_request_irq(&pdev->dev, rtsirq, imx_uart_rtsint, 0,
2356 dev_name(&pdev->dev), sport);
2357 if (ret) {
2358 dev_err(&pdev->dev, "failed to request rts irq: %d\n",
2359 ret);
2360 return ret;
2361 }
2354 } else { 2362 } else {
2355 ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0, 2363 ret = devm_request_irq(&pdev->dev, rxirq, imx_uart_int, 0,
2356 dev_name(&pdev->dev), sport); 2364 dev_name(&pdev->dev), sport);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index d04b5eeea3c6..170e446a2f62 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -511,6 +511,7 @@ static void mvebu_uart_set_termios(struct uart_port *port,
511 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR); 511 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
512 termios->c_cflag &= CREAD | CBAUD; 512 termios->c_cflag &= CREAD | CBAUD;
513 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD); 513 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
514 termios->c_cflag |= CS8;
514 } 515 }
515 516
516 spin_unlock_irqrestore(&port->lock, flags); 517 spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 32bc3e3fe4d3..5e5da9acaf0a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1255,6 +1255,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
1255static int tty_reopen(struct tty_struct *tty) 1255static int tty_reopen(struct tty_struct *tty)
1256{ 1256{
1257 struct tty_driver *driver = tty->driver; 1257 struct tty_driver *driver = tty->driver;
1258 int retval;
1258 1259
1259 if (driver->type == TTY_DRIVER_TYPE_PTY && 1260 if (driver->type == TTY_DRIVER_TYPE_PTY &&
1260 driver->subtype == PTY_TYPE_MASTER) 1261 driver->subtype == PTY_TYPE_MASTER)
@@ -1268,10 +1269,14 @@ static int tty_reopen(struct tty_struct *tty)
1268 1269
1269 tty->count++; 1270 tty->count++;
1270 1271
1271 if (!tty->ldisc) 1272 if (tty->ldisc)
1272 return tty_ldisc_reinit(tty, tty->termios.c_line); 1273 return 0;
1273 1274
1274 return 0; 1275 retval = tty_ldisc_reinit(tty, tty->termios.c_line);
1276 if (retval)
1277 tty->count--;
1278
1279 return retval;
1275} 1280}
1276 1281
1277/** 1282/**
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index a78ad10a119b..73cdc0d633dd 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -32,6 +32,8 @@
32#include <asm/io.h> 32#include <asm/io.h>
33#include <linux/uaccess.h> 33#include <linux/uaccess.h>
34 34
35#include <linux/nospec.h>
36
35#include <linux/kbd_kern.h> 37#include <linux/kbd_kern.h>
36#include <linux/vt_kern.h> 38#include <linux/vt_kern.h>
37#include <linux/kbd_diacr.h> 39#include <linux/kbd_diacr.h>
@@ -700,6 +702,8 @@ int vt_ioctl(struct tty_struct *tty,
700 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) 702 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
701 ret = -ENXIO; 703 ret = -ENXIO;
702 else { 704 else {
705 vsa.console = array_index_nospec(vsa.console,
706 MAX_NR_CONSOLES + 1);
703 vsa.console--; 707 vsa.console--;
704 console_lock(); 708 console_lock();
705 ret = vc_allocate(vsa.console); 709 ret = vc_allocate(vsa.console);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 656d247819c9..bec581fb7c63 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -460,7 +460,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
460 460
461 set_bit(WDM_RESPONDING, &desc->flags); 461 set_bit(WDM_RESPONDING, &desc->flags);
462 spin_unlock_irq(&desc->iuspin); 462 spin_unlock_irq(&desc->iuspin);
463 rv = usb_submit_urb(desc->response, GFP_ATOMIC); 463 rv = usb_submit_urb(desc->response, GFP_KERNEL);
464 spin_lock_irq(&desc->iuspin); 464 spin_lock_irq(&desc->iuspin);
465 if (rv) { 465 if (rv) {
466 dev_err(&desc->intf->dev, 466 dev_err(&desc->intf->dev,
diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c
index 15cc76e22123..99116af07f1d 100644
--- a/drivers/usb/common/roles.c
+++ b/drivers/usb/common/roles.c
@@ -109,8 +109,15 @@ static void *usb_role_switch_match(struct device_connection *con, int ep,
109 */ 109 */
110struct usb_role_switch *usb_role_switch_get(struct device *dev) 110struct usb_role_switch *usb_role_switch_get(struct device *dev)
111{ 111{
112 return device_connection_find_match(dev, "usb-role-switch", NULL, 112 struct usb_role_switch *sw;
113 usb_role_switch_match); 113
114 sw = device_connection_find_match(dev, "usb-role-switch", NULL,
115 usb_role_switch_match);
116
117 if (!IS_ERR_OR_NULL(sw))
118 WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
119
120 return sw;
114} 121}
115EXPORT_SYMBOL_GPL(usb_role_switch_get); 122EXPORT_SYMBOL_GPL(usb_role_switch_get);
116 123
@@ -122,8 +129,10 @@ EXPORT_SYMBOL_GPL(usb_role_switch_get);
122 */ 129 */
123void usb_role_switch_put(struct usb_role_switch *sw) 130void usb_role_switch_put(struct usb_role_switch *sw)
124{ 131{
125 if (!IS_ERR_OR_NULL(sw)) 132 if (!IS_ERR_OR_NULL(sw)) {
126 put_device(&sw->dev); 133 put_device(&sw->dev);
134 module_put(sw->dev.parent->driver->owner);
135 }
127} 136}
128EXPORT_SYMBOL_GPL(usb_role_switch_put); 137EXPORT_SYMBOL_GPL(usb_role_switch_put);
129 138
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 6ce77b33da61..244417d0dfd1 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1434,10 +1434,13 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1434 struct async *as = NULL; 1434 struct async *as = NULL;
1435 struct usb_ctrlrequest *dr = NULL; 1435 struct usb_ctrlrequest *dr = NULL;
1436 unsigned int u, totlen, isofrmlen; 1436 unsigned int u, totlen, isofrmlen;
1437 int i, ret, is_in, num_sgs = 0, ifnum = -1; 1437 int i, ret, num_sgs = 0, ifnum = -1;
1438 int number_of_packets = 0; 1438 int number_of_packets = 0;
1439 unsigned int stream_id = 0; 1439 unsigned int stream_id = 0;
1440 void *buf; 1440 void *buf;
1441 bool is_in;
1442 bool allow_short = false;
1443 bool allow_zero = false;
1441 unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK | 1444 unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
1442 USBDEVFS_URB_BULK_CONTINUATION | 1445 USBDEVFS_URB_BULK_CONTINUATION |
1443 USBDEVFS_URB_NO_FSBR | 1446 USBDEVFS_URB_NO_FSBR |
@@ -1471,6 +1474,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1471 u = 0; 1474 u = 0;
1472 switch (uurb->type) { 1475 switch (uurb->type) {
1473 case USBDEVFS_URB_TYPE_CONTROL: 1476 case USBDEVFS_URB_TYPE_CONTROL:
1477 if (is_in)
1478 allow_short = true;
1474 if (!usb_endpoint_xfer_control(&ep->desc)) 1479 if (!usb_endpoint_xfer_control(&ep->desc))
1475 return -EINVAL; 1480 return -EINVAL;
1476 /* min 8 byte setup packet */ 1481 /* min 8 byte setup packet */
@@ -1511,6 +1516,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1511 break; 1516 break;
1512 1517
1513 case USBDEVFS_URB_TYPE_BULK: 1518 case USBDEVFS_URB_TYPE_BULK:
1519 if (!is_in)
1520 allow_zero = true;
1521 else
1522 allow_short = true;
1514 switch (usb_endpoint_type(&ep->desc)) { 1523 switch (usb_endpoint_type(&ep->desc)) {
1515 case USB_ENDPOINT_XFER_CONTROL: 1524 case USB_ENDPOINT_XFER_CONTROL:
1516 case USB_ENDPOINT_XFER_ISOC: 1525 case USB_ENDPOINT_XFER_ISOC:
@@ -1531,6 +1540,10 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1531 if (!usb_endpoint_xfer_int(&ep->desc)) 1540 if (!usb_endpoint_xfer_int(&ep->desc))
1532 return -EINVAL; 1541 return -EINVAL;
1533 interrupt_urb: 1542 interrupt_urb:
1543 if (!is_in)
1544 allow_zero = true;
1545 else
1546 allow_short = true;
1534 break; 1547 break;
1535 1548
1536 case USBDEVFS_URB_TYPE_ISO: 1549 case USBDEVFS_URB_TYPE_ISO:
@@ -1676,14 +1689,19 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1676 u = (is_in ? URB_DIR_IN : URB_DIR_OUT); 1689 u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
1677 if (uurb->flags & USBDEVFS_URB_ISO_ASAP) 1690 if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
1678 u |= URB_ISO_ASAP; 1691 u |= URB_ISO_ASAP;
1679 if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in) 1692 if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
1680 u |= URB_SHORT_NOT_OK; 1693 u |= URB_SHORT_NOT_OK;
1681 if (uurb->flags & USBDEVFS_URB_ZERO_PACKET) 1694 if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
1682 u |= URB_ZERO_PACKET; 1695 u |= URB_ZERO_PACKET;
1683 if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT) 1696 if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
1684 u |= URB_NO_INTERRUPT; 1697 u |= URB_NO_INTERRUPT;
1685 as->urb->transfer_flags = u; 1698 as->urb->transfer_flags = u;
1686 1699
1700 if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
1701 dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
1702 if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
1703 dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
1704
1687 as->urb->transfer_buffer_length = uurb->buffer_length; 1705 as->urb->transfer_buffer_length = uurb->buffer_length;
1688 as->urb->setup_packet = (unsigned char *)dr; 1706 as->urb->setup_packet = (unsigned char *)dr;
1689 dr = NULL; 1707 dr = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e76e95f62f76..a1f225f077cd 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -512,7 +512,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
512 struct device *dev; 512 struct device *dev;
513 struct usb_device *udev; 513 struct usb_device *udev;
514 int retval = 0; 514 int retval = 0;
515 int lpm_disable_error = -ENODEV;
516 515
517 if (!iface) 516 if (!iface)
518 return -ENODEV; 517 return -ENODEV;
@@ -533,16 +532,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
533 532
534 iface->condition = USB_INTERFACE_BOUND; 533 iface->condition = USB_INTERFACE_BOUND;
535 534
536 /* See the comment about disabling LPM in usb_probe_interface(). */
537 if (driver->disable_hub_initiated_lpm) {
538 lpm_disable_error = usb_unlocked_disable_lpm(udev);
539 if (lpm_disable_error) {
540 dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n",
541 __func__, driver->name);
542 return -ENOMEM;
543 }
544 }
545
546 /* Claimed interfaces are initially inactive (suspended) and 535 /* Claimed interfaces are initially inactive (suspended) and
547 * runtime-PM-enabled, but only if the driver has autosuspend 536 * runtime-PM-enabled, but only if the driver has autosuspend
548 * support. Otherwise they are marked active, to prevent the 537 * support. Otherwise they are marked active, to prevent the
@@ -561,9 +550,20 @@ int usb_driver_claim_interface(struct usb_driver *driver,
561 if (device_is_registered(dev)) 550 if (device_is_registered(dev))
562 retval = device_bind_driver(dev); 551 retval = device_bind_driver(dev);
563 552
564 /* Attempt to re-enable USB3 LPM, if the disable was successful. */ 553 if (retval) {
565 if (!lpm_disable_error) 554 dev->driver = NULL;
566 usb_unlocked_enable_lpm(udev); 555 usb_set_intfdata(iface, NULL);
556 iface->needs_remote_wakeup = 0;
557 iface->condition = USB_INTERFACE_UNBOUND;
558
559 /*
560 * Unbound interfaces are always runtime-PM-disabled
561 * and runtime-PM-suspended
562 */
563 if (driver->supports_autosuspend)
564 pm_runtime_disable(dev);
565 pm_runtime_set_suspended(dev);
566 }
567 567
568 return retval; 568 return retval;
569} 569}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index e77dfe5ed5ec..178d6c6063c0 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -58,6 +58,7 @@ static int quirks_param_set(const char *val, const struct kernel_param *kp)
58 quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry), 58 quirk_list = kcalloc(quirk_count, sizeof(struct quirk_entry),
59 GFP_KERNEL); 59 GFP_KERNEL);
60 if (!quirk_list) { 60 if (!quirk_list) {
61 quirk_count = 0;
61 mutex_unlock(&quirk_mutex); 62 mutex_unlock(&quirk_mutex);
62 return -ENOMEM; 63 return -ENOMEM;
63 } 64 }
@@ -154,7 +155,7 @@ static struct kparam_string quirks_param_string = {
154 .string = quirks_param, 155 .string = quirks_param,
155}; 156};
156 157
157module_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644); 158device_param_cb(quirks, &quirks_param_ops, &quirks_param_string, 0644);
158MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks"); 159MODULE_PARM_DESC(quirks, "Add/modify USB quirks by specifying quirks=vendorID:productID:quirks");
159 160
160/* Lists of quirky USB devices, split in device quirks and interface quirks. 161/* Lists of quirky USB devices, split in device quirks and interface quirks.
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 623be3174fb3..79d8bd7a612e 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -228,6 +228,8 @@ struct usb_host_interface *usb_find_alt_setting(
228 struct usb_interface_cache *intf_cache = NULL; 228 struct usb_interface_cache *intf_cache = NULL;
229 int i; 229 int i;
230 230
231 if (!config)
232 return NULL;
231 for (i = 0; i < config->desc.bNumInterfaces; i++) { 233 for (i = 0; i < config->desc.bNumInterfaces; i++) {
232 if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber 234 if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
233 == iface_num) { 235 == iface_num) {
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index df827ff57b0d..23a0df79ef21 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -658,16 +658,6 @@ dsps_dma_controller_create(struct musb *musb, void __iomem *base)
658 return controller; 658 return controller;
659} 659}
660 660
661static void dsps_dma_controller_destroy(struct dma_controller *c)
662{
663 struct musb *musb = c->musb;
664 struct dsps_glue *glue = dev_get_drvdata(musb->controller->parent);
665 void __iomem *usbss_base = glue->usbss_base;
666
667 musb_writel(usbss_base, USBSS_IRQ_CLEARR, USBSS_IRQ_PD_COMP);
668 cppi41_dma_controller_destroy(c);
669}
670
671#ifdef CONFIG_PM_SLEEP 661#ifdef CONFIG_PM_SLEEP
672static void dsps_dma_controller_suspend(struct dsps_glue *glue) 662static void dsps_dma_controller_suspend(struct dsps_glue *glue)
673{ 663{
@@ -697,7 +687,7 @@ static struct musb_platform_ops dsps_ops = {
697 687
698#ifdef CONFIG_USB_TI_CPPI41_DMA 688#ifdef CONFIG_USB_TI_CPPI41_DMA
699 .dma_init = dsps_dma_controller_create, 689 .dma_init = dsps_dma_controller_create,
700 .dma_exit = dsps_dma_controller_destroy, 690 .dma_exit = cppi41_dma_controller_destroy,
701#endif 691#endif
702 .enable = dsps_musb_enable, 692 .enable = dsps_musb_enable,
703 .disable = dsps_musb_disable, 693 .disable = dsps_musb_disable,
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index ddaac63ecf12..d990aa510fab 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/module.h>
12#include <linux/mutex.h> 13#include <linux/mutex.h>
13#include <linux/usb/typec_mux.h> 14#include <linux/usb/typec_mux.h>
14 15
@@ -49,8 +50,10 @@ struct typec_switch *typec_switch_get(struct device *dev)
49 mutex_lock(&switch_lock); 50 mutex_lock(&switch_lock);
50 sw = device_connection_find_match(dev, "typec-switch", NULL, 51 sw = device_connection_find_match(dev, "typec-switch", NULL,
51 typec_switch_match); 52 typec_switch_match);
52 if (!IS_ERR_OR_NULL(sw)) 53 if (!IS_ERR_OR_NULL(sw)) {
54 WARN_ON(!try_module_get(sw->dev->driver->owner));
53 get_device(sw->dev); 55 get_device(sw->dev);
56 }
54 mutex_unlock(&switch_lock); 57 mutex_unlock(&switch_lock);
55 58
56 return sw; 59 return sw;
@@ -65,8 +68,10 @@ EXPORT_SYMBOL_GPL(typec_switch_get);
65 */ 68 */
66void typec_switch_put(struct typec_switch *sw) 69void typec_switch_put(struct typec_switch *sw)
67{ 70{
68 if (!IS_ERR_OR_NULL(sw)) 71 if (!IS_ERR_OR_NULL(sw)) {
72 module_put(sw->dev->driver->owner);
69 put_device(sw->dev); 73 put_device(sw->dev);
74 }
70} 75}
71EXPORT_SYMBOL_GPL(typec_switch_put); 76EXPORT_SYMBOL_GPL(typec_switch_put);
72 77
@@ -136,8 +141,10 @@ struct typec_mux *typec_mux_get(struct device *dev, const char *name)
136 141
137 mutex_lock(&mux_lock); 142 mutex_lock(&mux_lock);
138 mux = device_connection_find_match(dev, name, NULL, typec_mux_match); 143 mux = device_connection_find_match(dev, name, NULL, typec_mux_match);
139 if (!IS_ERR_OR_NULL(mux)) 144 if (!IS_ERR_OR_NULL(mux)) {
145 WARN_ON(!try_module_get(mux->dev->driver->owner));
140 get_device(mux->dev); 146 get_device(mux->dev);
147 }
141 mutex_unlock(&mux_lock); 148 mutex_unlock(&mux_lock);
142 149
143 return mux; 150 return mux;
@@ -152,8 +159,10 @@ EXPORT_SYMBOL_GPL(typec_mux_get);
152 */ 159 */
153void typec_mux_put(struct typec_mux *mux) 160void typec_mux_put(struct typec_mux *mux)
154{ 161{
155 if (!IS_ERR_OR_NULL(mux)) 162 if (!IS_ERR_OR_NULL(mux)) {
163 module_put(mux->dev->driver->owner);
156 put_device(mux->dev); 164 put_device(mux->dev);
165 }
157} 166}
158EXPORT_SYMBOL_GPL(typec_mux_put); 167EXPORT_SYMBOL_GPL(typec_mux_put);
159 168
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 3946649b85c8..ba906876cc45 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -42,6 +42,7 @@ struct bmp_dib_header {
42 u32 colors_important; 42 u32 colors_important;
43} __packed; 43} __packed;
44 44
45static bool use_bgrt = true;
45static bool request_mem_succeeded = false; 46static bool request_mem_succeeded = false;
46static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC; 47static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
47 48
@@ -160,6 +161,9 @@ static void efifb_show_boot_graphics(struct fb_info *info)
160 void *bgrt_image = NULL; 161 void *bgrt_image = NULL;
161 u8 *dst = info->screen_base; 162 u8 *dst = info->screen_base;
162 163
164 if (!use_bgrt)
165 return;
166
163 if (!bgrt_tab.image_address) { 167 if (!bgrt_tab.image_address) {
164 pr_info("efifb: No BGRT, not showing boot graphics\n"); 168 pr_info("efifb: No BGRT, not showing boot graphics\n");
165 return; 169 return;
@@ -290,6 +294,8 @@ static int efifb_setup(char *options)
290 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0); 294 screen_info.lfb_width = simple_strtoul(this_opt+6, NULL, 0);
291 else if (!strcmp(this_opt, "nowc")) 295 else if (!strcmp(this_opt, "nowc"))
292 mem_flags &= ~EFI_MEMORY_WC; 296 mem_flags &= ~EFI_MEMORY_WC;
297 else if (!strcmp(this_opt, "nobgrt"))
298 use_bgrt = false;
293 } 299 }
294 } 300 }
295 301
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index ef69273074ba..a3edb20ea4c3 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
496 if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size)) 496 if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
497 return -EFAULT; 497 return -EFAULT;
498 498
499 if (mr->w > 4096 || mr->h > 4096)
500 return -EINVAL;
501
499 if (mr->w * mr->h * 3 > mr->buffer_size) 502 if (mr->w * mr->h * 3 > mr->buffer_size)
500 return -EINVAL; 503 return -EINVAL;
501 504
@@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
509 mr->x, mr->y, mr->w, mr->h); 512 mr->x, mr->y, mr->w, mr->h);
510 513
511 if (r > 0) { 514 if (r > 0) {
512 if (copy_to_user(mr->buffer, buf, mr->buffer_size)) 515 if (copy_to_user(mr->buffer, buf, r))
513 r = -EFAULT; 516 r = -EFAULT;
514 } 517 }
515 518
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index def3a501acd6..d059d04c63ac 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -712,7 +712,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
712 /* 712 /*
713 * enable controller clock 713 * enable controller clock
714 */ 714 */
715 clk_enable(fbi->clk); 715 clk_prepare_enable(fbi->clk);
716 716
717 pxa168fb_set_par(info); 717 pxa168fb_set_par(info);
718 718
@@ -767,7 +767,7 @@ static int pxa168fb_probe(struct platform_device *pdev)
767failed_free_cmap: 767failed_free_cmap:
768 fb_dealloc_cmap(&info->cmap); 768 fb_dealloc_cmap(&info->cmap);
769failed_free_clk: 769failed_free_clk:
770 clk_disable(fbi->clk); 770 clk_disable_unprepare(fbi->clk);
771failed_free_fbmem: 771failed_free_fbmem:
772 dma_free_coherent(fbi->dev, info->fix.smem_len, 772 dma_free_coherent(fbi->dev, info->fix.smem_len,
773 info->screen_base, fbi->fb_start_dma); 773 info->screen_base, fbi->fb_start_dma);
@@ -807,7 +807,7 @@ static int pxa168fb_remove(struct platform_device *pdev)
807 dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len), 807 dma_free_wc(fbi->dev, PAGE_ALIGN(info->fix.smem_len),
808 info->screen_base, info->fix.smem_start); 808 info->screen_base, info->fix.smem_start);
809 809
810 clk_disable(fbi->clk); 810 clk_disable_unprepare(fbi->clk);
811 811
812 framebuffer_release(info); 812 framebuffer_release(info);
813 813
diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c
index 045e8afe398b..9e88e3f594c2 100644
--- a/drivers/video/fbdev/stifb.c
+++ b/drivers/video/fbdev/stifb.c
@@ -1157,7 +1157,7 @@ static int __init stifb_init_fb(struct sti_struct *sti, int bpp_pref)
1157 dev_name); 1157 dev_name);
1158 goto out_err0; 1158 goto out_err0;
1159 } 1159 }
1160 /* fall though */ 1160 /* fall through */
1161 case S9000_ID_ARTIST: 1161 case S9000_ID_ARTIST:
1162 case S9000_ID_HCRX: 1162 case S9000_ID_HCRX:
1163 case S9000_ID_TIMBER: 1163 case S9000_ID_TIMBER:
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 7bafa703a992..84575baceebc 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1040,18 +1040,33 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1040 return ret; 1040 return ret;
1041 1041
1042 for (i = 0; i < count; i++) { 1042 for (i = 0; i < count; i++) {
1043 /* Retry eagain maps */ 1043 switch (map_ops[i].status) {
1044 if (map_ops[i].status == GNTST_eagain) 1044 case GNTST_okay:
1045 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i, 1045 {
1046 &map_ops[i].status, __func__);
1047
1048 if (map_ops[i].status == GNTST_okay) {
1049 struct xen_page_foreign *foreign; 1046 struct xen_page_foreign *foreign;
1050 1047
1051 SetPageForeign(pages[i]); 1048 SetPageForeign(pages[i]);
1052 foreign = xen_page_foreign(pages[i]); 1049 foreign = xen_page_foreign(pages[i]);
1053 foreign->domid = map_ops[i].dom; 1050 foreign->domid = map_ops[i].dom;
1054 foreign->gref = map_ops[i].ref; 1051 foreign->gref = map_ops[i].ref;
1052 break;
1053 }
1054
1055 case GNTST_no_device_space:
1056 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1057 break;
1058
1059 case GNTST_eagain:
1060 /* Retry eagain maps */
1061 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1062 map_ops + i,
1063 &map_ops[i].status, __func__);
1064 /* Test status in next loop iteration. */
1065 i--;
1066 break;
1067
1068 default:
1069 break;
1055 } 1070 }
1056 } 1071 }
1057 1072