aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-03-27 02:43:39 -0400
committerIngo Molnar <mingo@kernel.org>2018-03-27 02:43:39 -0400
commit0bc91d4ba77156ae9217d25ed7c434540f950d05 (patch)
tree949c1acf27b106184d8842586740fbbcc9c9ea62 /drivers
parent565977a3d929fc4427769117a8ac976ec16776d5 (diff)
parent3eb2ce825ea1ad89d20f7a3b5780df850e4be274 (diff)
Merge tag 'v4.16-rc7' into x86/mm, to fix up conflict
Conflicts: arch/x86/mm/init_64.c Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_watchdog.c4
-rw-r--r--drivers/acpi/battery.c48
-rw-r--r--drivers/acpi/nfit/core.c10
-rw-r--r--drivers/acpi/numa.c10
-rw-r--r--drivers/ata/ahci.c4
-rw-r--r--drivers/ata/libahci.c10
-rw-r--r--drivers/ata/libahci_platform.c2
-rw-r--r--drivers/ata/libata-core.c26
-rw-r--r--drivers/ata/libata-eh.c3
-rw-r--r--drivers/ata/libata-scsi.c12
-rw-r--r--drivers/ata/sata_rcar.c62
-rw-r--r--drivers/auxdisplay/img-ascii-lcd.c6
-rw-r--r--drivers/auxdisplay/panel.c6
-rw-r--r--drivers/bluetooth/btusb.c8
-rw-r--r--drivers/bluetooth/hci_bcm.c13
-rw-r--r--drivers/clk/bcm/clk-bcm2835.c12
-rw-r--r--drivers/clk/clk-aspeed.c28
-rw-r--r--drivers/clk/clk.c46
-rw-r--r--drivers/clk/hisilicon/clk-hi3660-stub.c2
-rw-r--r--drivers/clk/imx/clk-imx51-imx53.c20
-rw-r--r--drivers/clk/qcom/apcs-msm8916.c5
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c6
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-43xx.c2
-rw-r--r--drivers/clk/ti/clkctrl.c2
-rw-r--r--drivers/dma/stm32-dmamux.c9
-rw-r--r--drivers/firmware/dcdbas.c2
-rw-r--r--drivers/firmware/efi/libstub/tpm.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_opp.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c20
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h4
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c71
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c16
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hangcheck.c4
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c5
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c5
-rw-r--r--drivers/gpu/drm/tegra/dc.c16
-rw-r--r--drivers/gpu/drm/tegra/drm.c1
-rw-r--r--drivers/gpu/drm/tegra/dsi.c1
-rw-r--r--drivers/gpu/drm/tegra/plane.c9
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c5
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c12
-rw-r--r--drivers/hv/ring_buffer.c52
-rw-r--r--drivers/iio/accel/st_accel_core.c7
-rw-r--r--drivers/iio/adc/meson_saradc.c4
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c39
-rw-r--r--drivers/iio/adc/stm32-dfsdm-core.c12
-rw-r--r--drivers/iio/chemical/ccs811.c3
-rw-r--r--drivers/iio/pressure/st_pressure_core.c2
-rw-r--r--drivers/infiniband/core/cma.c15
-rw-r--r--drivers/infiniband/core/ucma.c36
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c21
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h3
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c12
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c23
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c15
-rw-r--r--drivers/infiniband/sw/rdmavt/mr.c10
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c13
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c14
-rw-r--r--drivers/md/dm-mpath.c43
-rw-r--r--drivers/media/Kconfig2
-rw-r--r--drivers/media/platform/tegra-cec/tegra_cec.c17
-rw-r--r--drivers/mmc/core/block.c19
-rw-r--r--drivers/mmc/core/card.h1
-rw-r--r--drivers/mmc/core/quirks.h6
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c8
-rw-r--r--drivers/mmc/host/dw_mmc.c15
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mtd/mtdchar.c4
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c32
-rw-r--r--drivers/net/can/cc770/cc770.c100
-rw-r--r--drivers/net/can/cc770/cc770.h2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c75
-rw-r--r--drivers/net/can/m_can/m_can.c7
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c25
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c8
-rw-r--r--drivers/net/dsa/Makefile5
-rw-r--r--drivers/net/dsa/b53/b53_common.c4
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c7
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c66
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h2
-rw-r--r--drivers/net/ethernet/arc/emac_rockchip.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c33
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c180
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c33
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c3
-rw-r--r--drivers/net/ethernet/cortina/gemini.c1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c8
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h21
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c42
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c25
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c2
-rw-r--r--drivers/net/ethernet/natsemi/Kconfig6
-rw-r--r--drivers/net/ethernet/natsemi/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c5
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c17
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c23
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c4
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c3
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c52
-rw-r--r--drivers/net/hyperv/netvsc_drv.c293
-rw-r--r--drivers/net/hyperv/rndis_filter.c68
-rw-r--r--drivers/net/macsec.c5
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/bcm-phy-lib.c4
-rw-r--r--drivers/net/phy/marvell.c4
-rw-r--r--drivers/net/phy/micrel.c27
-rw-r--r--drivers/net/phy/phy.c145
-rw-r--r--drivers/net/phy/phy_device.c32
-rw-r--r--drivers/net/phy/realtek.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c26
-rw-r--r--drivers/net/team/team.c4
-rw-r--r--drivers/net/tun.c3
-rw-r--r--drivers/net/usb/usbnet.c10
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c16
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.h18
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c3
-rw-r--r--drivers/nvdimm/blk.c3
-rw-r--r--drivers/nvdimm/btt.c3
-rw-r--r--drivers/nvdimm/pfn_devs.c2
-rw-r--r--drivers/nvdimm/region_devs.c17
-rw-r--r--drivers/pci/quirks.c2
-rw-r--r--drivers/phy/qualcomm/phy-qcom-ufs.c5
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm.c56
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c14
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c28
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c7
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c61
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h40
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7795.c1
-rw-r--r--drivers/platform/x86/Kconfig7
-rw-r--r--drivers/platform/x86/dell-smbios-base.c2
-rw-r--r--drivers/platform/x86/dell-wmi.c2
-rw-r--r--drivers/s390/net/qeth_core_main.c21
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c33
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c59
-rw-r--r--drivers/scsi/sd.c3
-rw-r--r--drivers/scsi/sd_zbc.c35
-rw-r--r--drivers/soc/fsl/qbman/qman.c28
-rw-r--r--drivers/staging/android/ashmem.c23
-rw-r--r--drivers/staging/comedi/drivers.c3
-rw-r--r--drivers/staging/ncpfs/ncplib_kernel.c4
-rw-r--r--drivers/tty/n_tty.c6
-rw-r--r--drivers/tty/serial/8250/8250_pci.c21
-rw-r--r--drivers/tty/serial/atmel_serial.c1
-rw-r--r--drivers/tty/serial/earlycon.c3
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/tty_io.c9
-rw-r--r--drivers/tty/vt/vt.c8
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/params.c6
-rw-r--r--drivers/usb/dwc3/core.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c1
-rw-r--r--drivers/usb/host/ohci-hcd.c3
-rw-r--r--drivers/usb/host/xhci-dbgcap.c20
-rw-r--r--drivers/usb/host/xhci-dbgtty.c20
-rw-r--r--drivers/usb/host/xhci-pci.c3
-rw-r--r--drivers/usb/host/xhci-plat.c11
-rw-r--r--drivers/usb/host/xhci-rcar.c4
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h23
-rw-r--r--drivers/usb/mon/mon_text.c126
-rw-r--r--drivers/usb/musb/musb_core.c4
-rw-r--r--drivers/usb/storage/uas.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/typec/fusb302/fusb302.c3
-rw-r--r--drivers/usb/typec/tcpm.c163
-rw-r--r--drivers/usb/usbip/vudc_sysfs.c8
-rw-r--r--drivers/vfio/pci/vfio_pci.c3
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/watchdog/wdat_wdt.c2
273 files changed, 2510 insertions, 1630 deletions
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index 11b113f8e367..ebb626ffb5fa 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -74,10 +74,10 @@ void __init acpi_watchdog_init(void)
74 res.start = gas->address; 74 res.start = gas->address;
75 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 75 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
76 res.flags = IORESOURCE_MEM; 76 res.flags = IORESOURCE_MEM;
77 res.end = res.start + ALIGN(gas->access_width, 4); 77 res.end = res.start + ALIGN(gas->access_width, 4) - 1;
78 } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 78 } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
79 res.flags = IORESOURCE_IO; 79 res.flags = IORESOURCE_IO;
80 res.end = res.start + gas->access_width; 80 res.end = res.start + gas->access_width - 1;
81 } else { 81 } else {
82 pr_warn("Unsupported address space: %u\n", 82 pr_warn("Unsupported address space: %u\n",
83 gas->space_id); 83 gas->space_id);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7128488a3a72..f2eb6c37ea0a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -70,7 +70,6 @@ static async_cookie_t async_cookie;
70static bool battery_driver_registered; 70static bool battery_driver_registered;
71static int battery_bix_broken_package; 71static int battery_bix_broken_package;
72static int battery_notification_delay_ms; 72static int battery_notification_delay_ms;
73static int battery_full_discharging;
74static unsigned int cache_time = 1000; 73static unsigned int cache_time = 1000;
75module_param(cache_time, uint, 0644); 74module_param(cache_time, uint, 0644);
76MODULE_PARM_DESC(cache_time, "cache time in milliseconds"); 75MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -215,12 +214,9 @@ static int acpi_battery_get_property(struct power_supply *psy,
215 return -ENODEV; 214 return -ENODEV;
216 switch (psp) { 215 switch (psp) {
217 case POWER_SUPPLY_PROP_STATUS: 216 case POWER_SUPPLY_PROP_STATUS:
218 if (battery->state & ACPI_BATTERY_STATE_DISCHARGING) { 217 if (battery->state & ACPI_BATTERY_STATE_DISCHARGING)
219 if (battery_full_discharging && battery->rate_now == 0) 218 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
220 val->intval = POWER_SUPPLY_STATUS_FULL; 219 else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
221 else
222 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
223 } else if (battery->state & ACPI_BATTERY_STATE_CHARGING)
224 val->intval = POWER_SUPPLY_STATUS_CHARGING; 220 val->intval = POWER_SUPPLY_STATUS_CHARGING;
225 else if (acpi_battery_is_charged(battery)) 221 else if (acpi_battery_is_charged(battery))
226 val->intval = POWER_SUPPLY_STATUS_FULL; 222 val->intval = POWER_SUPPLY_STATUS_FULL;
@@ -1170,12 +1166,6 @@ battery_notification_delay_quirk(const struct dmi_system_id *d)
1170 return 0; 1166 return 0;
1171} 1167}
1172 1168
1173static int __init battery_full_discharging_quirk(const struct dmi_system_id *d)
1174{
1175 battery_full_discharging = 1;
1176 return 0;
1177}
1178
1179static const struct dmi_system_id bat_dmi_table[] __initconst = { 1169static const struct dmi_system_id bat_dmi_table[] __initconst = {
1180 { 1170 {
1181 .callback = battery_bix_broken_package_quirk, 1171 .callback = battery_bix_broken_package_quirk,
@@ -1193,38 +1183,6 @@ static const struct dmi_system_id bat_dmi_table[] __initconst = {
1193 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"), 1183 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
1194 }, 1184 },
1195 }, 1185 },
1196 {
1197 .callback = battery_full_discharging_quirk,
1198 .ident = "ASUS GL502VSK",
1199 .matches = {
1200 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1201 DMI_MATCH(DMI_PRODUCT_NAME, "GL502VSK"),
1202 },
1203 },
1204 {
1205 .callback = battery_full_discharging_quirk,
1206 .ident = "ASUS UX305LA",
1207 .matches = {
1208 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1209 DMI_MATCH(DMI_PRODUCT_NAME, "UX305LA"),
1210 },
1211 },
1212 {
1213 .callback = battery_full_discharging_quirk,
1214 .ident = "ASUS UX360UA",
1215 .matches = {
1216 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1217 DMI_MATCH(DMI_PRODUCT_NAME, "UX360UA"),
1218 },
1219 },
1220 {
1221 .callback = battery_full_discharging_quirk,
1222 .ident = "ASUS UX410UAK",
1223 .matches = {
1224 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
1225 DMI_MATCH(DMI_PRODUCT_NAME, "UX410UAK"),
1226 },
1227 },
1228 {}, 1186 {},
1229}; 1187};
1230 1188
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index bbe48ad20886..eb09ef55c38a 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2675,10 +2675,14 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2675 else 2675 else
2676 ndr_desc->numa_node = NUMA_NO_NODE; 2676 ndr_desc->numa_node = NUMA_NO_NODE;
2677 2677
2678 if(acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH) 2678 /*
2679 * Persistence domain bits are hierarchical, if
2680 * ACPI_NFIT_CAPABILITY_CACHE_FLUSH is set then
2681 * ACPI_NFIT_CAPABILITY_MEM_FLUSH is implied.
2682 */
2683 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_CACHE_FLUSH)
2679 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags); 2684 set_bit(ND_REGION_PERSIST_CACHE, &ndr_desc->flags);
2680 2685 else if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2681 if (acpi_desc->platform_cap & ACPI_NFIT_CAPABILITY_MEM_FLUSH)
2682 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags); 2686 set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc->flags);
2683 2687
2684 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 2688 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 8ccaae3550d2..85167603b9c9 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -103,25 +103,27 @@ int acpi_map_pxm_to_node(int pxm)
103 */ 103 */
104int acpi_map_pxm_to_online_node(int pxm) 104int acpi_map_pxm_to_online_node(int pxm)
105{ 105{
106 int node, n, dist, min_dist; 106 int node, min_node;
107 107
108 node = acpi_map_pxm_to_node(pxm); 108 node = acpi_map_pxm_to_node(pxm);
109 109
110 if (node == NUMA_NO_NODE) 110 if (node == NUMA_NO_NODE)
111 node = 0; 111 node = 0;
112 112
113 min_node = node;
113 if (!node_online(node)) { 114 if (!node_online(node)) {
114 min_dist = INT_MAX; 115 int min_dist = INT_MAX, dist, n;
116
115 for_each_online_node(n) { 117 for_each_online_node(n) {
116 dist = node_distance(node, n); 118 dist = node_distance(node, n);
117 if (dist < min_dist) { 119 if (dist < min_dist) {
118 min_dist = dist; 120 min_dist = dist;
119 node = n; 121 min_node = n;
120 } 122 }
121 } 123 }
122 } 124 }
123 125
124 return node; 126 return min_node;
125} 127}
126EXPORT_SYMBOL(acpi_map_pxm_to_online_node); 128EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
127 129
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 355a95a83a34..1ff17799769d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -550,7 +550,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
550 .driver_data = board_ahci_yes_fbs }, 550 .driver_data = board_ahci_yes_fbs },
551 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), 551 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230),
552 .driver_data = board_ahci_yes_fbs }, 552 .driver_data = board_ahci_yes_fbs },
553 { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), 553 { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0642), /* highpoint rocketraid 642L */
554 .driver_data = board_ahci_yes_fbs },
555 { PCI_DEVICE(PCI_VENDOR_ID_TTI, 0x0645), /* highpoint rocketraid 644L */
554 .driver_data = board_ahci_yes_fbs }, 556 .driver_data = board_ahci_yes_fbs },
555 557
556 /* Promise */ 558 /* Promise */
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index a0de7a38430c..7adcf3caabd0 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -665,6 +665,16 @@ int ahci_stop_engine(struct ata_port *ap)
665 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0) 665 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
666 return 0; 666 return 0;
667 667
668 /*
669 * Don't try to issue commands but return with ENODEV if the
670 * AHCI controller not available anymore (e.g. due to PCIe hot
671 * unplugging). Otherwise a 500ms delay for each port is added.
672 */
673 if (tmp == 0xffffffff) {
674 dev_err(ap->host->dev, "AHCI controller unavailable!\n");
675 return -ENODEV;
676 }
677
668 /* setting HBA to idle */ 678 /* setting HBA to idle */
669 tmp &= ~PORT_CMD_START; 679 tmp &= ~PORT_CMD_START;
670 writel(tmp, port_mmio + PORT_CMD); 680 writel(tmp, port_mmio + PORT_CMD);
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 341d0ef82cbd..30cc8f1a31e1 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -340,7 +340,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
340 * 2) regulator for controlling the targets power (optional) 340 * 2) regulator for controlling the targets power (optional)
341 * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node, 341 * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
342 * or for non devicetree enabled platforms a single clock 342 * or for non devicetree enabled platforms a single clock
343 * 4) phys (optional) 343 * 4) phys (optional)
344 * 344 *
345 * RETURNS: 345 * RETURNS:
346 * The allocated ahci_host_priv on success, otherwise an ERR_PTR value 346 * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 3c09122bf038..7431ccd03316 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4530,6 +4530,25 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4530 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4530 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
4531 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4531 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
4532 4532
4533 /* Crucial BX100 SSD 500GB has broken LPM support */
4534 { "CT500BX100SSD1", NULL, ATA_HORKAGE_NOLPM },
4535
4536 /* 512GB MX100 with MU01 firmware has both queued TRIM and LPM issues */
4537 { "Crucial_CT512MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4538 ATA_HORKAGE_ZERO_AFTER_TRIM |
4539 ATA_HORKAGE_NOLPM, },
4540 /* 512GB MX100 with newer firmware has only LPM issues */
4541 { "Crucial_CT512MX100*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM |
4542 ATA_HORKAGE_NOLPM, },
4543
4544 /* 480GB+ M500 SSDs have both queued TRIM and LPM issues */
4545 { "Crucial_CT480M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4546 ATA_HORKAGE_ZERO_AFTER_TRIM |
4547 ATA_HORKAGE_NOLPM, },
4548 { "Crucial_CT960M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4549 ATA_HORKAGE_ZERO_AFTER_TRIM |
4550 ATA_HORKAGE_NOLPM, },
4551
4533 /* devices that don't properly handle queued TRIM commands */ 4552 /* devices that don't properly handle queued TRIM commands */
4534 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4553 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4535 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4554 ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4541,7 +4560,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4541 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4560 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4542 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4561 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4543 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4562 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4544 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4563 { "Samsung SSD 840*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4564 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4565 { "Samsung SSD 850*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4545 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4566 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4546 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4567 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4547 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4568 ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -5401,8 +5422,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
5401 * We guarantee to LLDs that they will have at least one 5422 * We guarantee to LLDs that they will have at least one
5402 * non-zero sg if the command is a data command. 5423 * non-zero sg if the command is a data command.
5403 */ 5424 */
5404 if (WARN_ON_ONCE(ata_is_data(prot) && 5425 if (ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes))
5405 (!qc->sg || !qc->n_elem || !qc->nbytes)))
5406 goto sys_err; 5426 goto sys_err;
5407 5427
5408 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5428 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 11c3137d7b0a..c016829a38fd 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -815,7 +815,8 @@ void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap)
815 815
816 if (ap->pflags & ATA_PFLAG_LOADING) 816 if (ap->pflags & ATA_PFLAG_LOADING)
817 ap->pflags &= ~ATA_PFLAG_LOADING; 817 ap->pflags &= ~ATA_PFLAG_LOADING;
818 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) 818 else if ((ap->pflags & ATA_PFLAG_SCSI_HOTPLUG) &&
819 !(ap->flags & ATA_FLAG_SAS_HOST))
819 schedule_delayed_work(&ap->hotplug_task, 0); 820 schedule_delayed_work(&ap->hotplug_task, 0);
820 821
821 if (ap->pflags & ATA_PFLAG_RECOVERED) 822 if (ap->pflags & ATA_PFLAG_RECOVERED)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 66be961c93a4..89a9d4a2efc8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3316,6 +3316,12 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
3316 goto invalid_fld; 3316 goto invalid_fld;
3317 } 3317 }
3318 3318
3319 /* We may not issue NCQ commands to devices not supporting NCQ */
3320 if (ata_is_ncq(tf->protocol) && !ata_ncq_enabled(dev)) {
3321 fp = 1;
3322 goto invalid_fld;
3323 }
3324
3319 /* sanity check for pio multi commands */ 3325 /* sanity check for pio multi commands */
3320 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) { 3326 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) {
3321 fp = 1; 3327 fp = 1;
@@ -4282,7 +4288,7 @@ static inline void ata_scsi_dump_cdb(struct ata_port *ap,
4282#ifdef ATA_DEBUG 4288#ifdef ATA_DEBUG
4283 struct scsi_device *scsidev = cmd->device; 4289 struct scsi_device *scsidev = cmd->device;
4284 4290
4285 DPRINTK("CDB (%u:%d,%d,%d) %9ph\n", 4291 DPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
4286 ap->print_id, 4292 ap->print_id,
4287 scsidev->channel, scsidev->id, scsidev->lun, 4293 scsidev->channel, scsidev->id, scsidev->lun,
4288 cmd->cmnd); 4294 cmd->cmnd);
@@ -4309,7 +4315,9 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
4309 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { 4315 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
4310 /* relay SCSI command to ATAPI device */ 4316 /* relay SCSI command to ATAPI device */
4311 int len = COMMAND_SIZE(scsi_op); 4317 int len = COMMAND_SIZE(scsi_op);
4312 if (unlikely(len > scmd->cmd_len || len > dev->cdb_len)) 4318 if (unlikely(len > scmd->cmd_len ||
4319 len > dev->cdb_len ||
4320 scmd->cmd_len > ATAPI_CDB_LEN))
4313 goto bad_cdb_len; 4321 goto bad_cdb_len;
4314 4322
4315 xlat_func = atapi_xlat; 4323 xlat_func = atapi_xlat;
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 80ee2f2a50d0..6456e07db72a 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -146,6 +146,7 @@
146enum sata_rcar_type { 146enum sata_rcar_type {
147 RCAR_GEN1_SATA, 147 RCAR_GEN1_SATA,
148 RCAR_GEN2_SATA, 148 RCAR_GEN2_SATA,
149 RCAR_GEN3_SATA,
149 RCAR_R8A7790_ES1_SATA, 150 RCAR_R8A7790_ES1_SATA,
150}; 151};
151 152
@@ -784,26 +785,11 @@ static void sata_rcar_setup_port(struct ata_host *host)
784 ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2); 785 ioaddr->command_addr = ioaddr->cmd_addr + (ATA_REG_CMD << 2);
785} 786}
786 787
787static void sata_rcar_init_controller(struct ata_host *host) 788static void sata_rcar_init_module(struct sata_rcar_priv *priv)
788{ 789{
789 struct sata_rcar_priv *priv = host->private_data;
790 void __iomem *base = priv->base; 790 void __iomem *base = priv->base;
791 u32 val; 791 u32 val;
792 792
793 /* reset and setup phy */
794 switch (priv->type) {
795 case RCAR_GEN1_SATA:
796 sata_rcar_gen1_phy_init(priv);
797 break;
798 case RCAR_GEN2_SATA:
799 case RCAR_R8A7790_ES1_SATA:
800 sata_rcar_gen2_phy_init(priv);
801 break;
802 default:
803 dev_warn(host->dev, "SATA phy is not initialized\n");
804 break;
805 }
806
807 /* SATA-IP reset state */ 793 /* SATA-IP reset state */
808 val = ioread32(base + ATAPI_CONTROL1_REG); 794 val = ioread32(base + ATAPI_CONTROL1_REG);
809 val |= ATAPI_CONTROL1_RESET; 795 val |= ATAPI_CONTROL1_RESET;
@@ -824,10 +810,33 @@ static void sata_rcar_init_controller(struct ata_host *host)
824 /* ack and mask */ 810 /* ack and mask */
825 iowrite32(0, base + SATAINTSTAT_REG); 811 iowrite32(0, base + SATAINTSTAT_REG);
826 iowrite32(0x7ff, base + SATAINTMASK_REG); 812 iowrite32(0x7ff, base + SATAINTMASK_REG);
813
827 /* enable interrupts */ 814 /* enable interrupts */
828 iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG); 815 iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG);
829} 816}
830 817
818static void sata_rcar_init_controller(struct ata_host *host)
819{
820 struct sata_rcar_priv *priv = host->private_data;
821
822 /* reset and setup phy */
823 switch (priv->type) {
824 case RCAR_GEN1_SATA:
825 sata_rcar_gen1_phy_init(priv);
826 break;
827 case RCAR_GEN2_SATA:
828 case RCAR_GEN3_SATA:
829 case RCAR_R8A7790_ES1_SATA:
830 sata_rcar_gen2_phy_init(priv);
831 break;
832 default:
833 dev_warn(host->dev, "SATA phy is not initialized\n");
834 break;
835 }
836
837 sata_rcar_init_module(priv);
838}
839
831static const struct of_device_id sata_rcar_match[] = { 840static const struct of_device_id sata_rcar_match[] = {
832 { 841 {
833 /* Deprecated by "renesas,sata-r8a7779" */ 842 /* Deprecated by "renesas,sata-r8a7779" */
@@ -856,7 +865,7 @@ static const struct of_device_id sata_rcar_match[] = {
856 }, 865 },
857 { 866 {
858 .compatible = "renesas,sata-r8a7795", 867 .compatible = "renesas,sata-r8a7795",
859 .data = (void *)RCAR_GEN2_SATA 868 .data = (void *)RCAR_GEN3_SATA
860 }, 869 },
861 { 870 {
862 .compatible = "renesas,rcar-gen2-sata", 871 .compatible = "renesas,rcar-gen2-sata",
@@ -864,7 +873,7 @@ static const struct of_device_id sata_rcar_match[] = {
864 }, 873 },
865 { 874 {
866 .compatible = "renesas,rcar-gen3-sata", 875 .compatible = "renesas,rcar-gen3-sata",
867 .data = (void *)RCAR_GEN2_SATA 876 .data = (void *)RCAR_GEN3_SATA
868 }, 877 },
869 { }, 878 { },
870}; 879};
@@ -982,11 +991,18 @@ static int sata_rcar_resume(struct device *dev)
982 if (ret) 991 if (ret)
983 return ret; 992 return ret;
984 993
985 /* ack and mask */ 994 if (priv->type == RCAR_GEN3_SATA) {
986 iowrite32(0, base + SATAINTSTAT_REG); 995 sata_rcar_gen2_phy_init(priv);
987 iowrite32(0x7ff, base + SATAINTMASK_REG); 996 sata_rcar_init_module(priv);
988 /* enable interrupts */ 997 } else {
989 iowrite32(ATAPI_INT_ENABLE_SATAINT, base + ATAPI_INT_ENABLE_REG); 998 /* ack and mask */
999 iowrite32(0, base + SATAINTSTAT_REG);
1000 iowrite32(0x7ff, base + SATAINTMASK_REG);
1001
1002 /* enable interrupts */
1003 iowrite32(ATAPI_INT_ENABLE_SATAINT,
1004 base + ATAPI_INT_ENABLE_REG);
1005 }
990 1006
991 ata_host_resume(host); 1007 ata_host_resume(host);
992 1008
diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c
index 9180b9bd5821..834509506ef6 100644
--- a/drivers/auxdisplay/img-ascii-lcd.c
+++ b/drivers/auxdisplay/img-ascii-lcd.c
@@ -97,7 +97,7 @@ static struct img_ascii_lcd_config boston_config = {
97static void malta_update(struct img_ascii_lcd_ctx *ctx) 97static void malta_update(struct img_ascii_lcd_ctx *ctx)
98{ 98{
99 unsigned int i; 99 unsigned int i;
100 int err; 100 int err = 0;
101 101
102 for (i = 0; i < ctx->cfg->num_chars; i++) { 102 for (i = 0; i < ctx->cfg->num_chars; i++) {
103 err = regmap_write(ctx->regmap, 103 err = regmap_write(ctx->regmap,
@@ -180,7 +180,7 @@ static int sead3_wait_lcd_idle(struct img_ascii_lcd_ctx *ctx)
180static void sead3_update(struct img_ascii_lcd_ctx *ctx) 180static void sead3_update(struct img_ascii_lcd_ctx *ctx)
181{ 181{
182 unsigned int i; 182 unsigned int i;
183 int err; 183 int err = 0;
184 184
185 for (i = 0; i < ctx->cfg->num_chars; i++) { 185 for (i = 0; i < ctx->cfg->num_chars; i++) {
186 err = sead3_wait_lcd_idle(ctx); 186 err = sead3_wait_lcd_idle(ctx);
@@ -224,7 +224,7 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches);
224 224
225/** 225/**
226 * img_ascii_lcd_scroll() - scroll the display by a character 226 * img_ascii_lcd_scroll() - scroll the display by a character
227 * @arg: really a pointer to the private data structure 227 * @t: really a pointer to the private data structure
228 * 228 *
229 * Scroll the current message along the LCD by one character, rearming the 229 * Scroll the current message along the LCD by one character, rearming the
230 * timer if required. 230 * timer if required.
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index ea7869c0d7f9..ec5e8800f8ad 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1372,7 +1372,7 @@ static void panel_process_inputs(void)
1372 break; 1372 break;
1373 input->rise_timer = 0; 1373 input->rise_timer = 0;
1374 input->state = INPUT_ST_RISING; 1374 input->state = INPUT_ST_RISING;
1375 /* no break here, fall through */ 1375 /* fall through */
1376 case INPUT_ST_RISING: 1376 case INPUT_ST_RISING:
1377 if ((phys_curr & input->mask) != input->value) { 1377 if ((phys_curr & input->mask) != input->value) {
1378 input->state = INPUT_ST_LOW; 1378 input->state = INPUT_ST_LOW;
@@ -1385,11 +1385,11 @@ static void panel_process_inputs(void)
1385 } 1385 }
1386 input->high_timer = 0; 1386 input->high_timer = 0;
1387 input->state = INPUT_ST_HIGH; 1387 input->state = INPUT_ST_HIGH;
1388 /* no break here, fall through */ 1388 /* fall through */
1389 case INPUT_ST_HIGH: 1389 case INPUT_ST_HIGH:
1390 if (input_state_high(input)) 1390 if (input_state_high(input))
1391 break; 1391 break;
1392 /* no break here, fall through */ 1392 /* fall through */
1393 case INPUT_ST_FALLING: 1393 case INPUT_ST_FALLING:
1394 input_state_falling(input); 1394 input_state_falling(input);
1395 } 1395 }
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 60bf04b8f103..366a49c7c08f 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -231,7 +231,6 @@ static const struct usb_device_id blacklist_table[] = {
231 { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, 231 { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
232 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, 232 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
233 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, 233 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
234 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
235 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 234 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
236 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 235 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
237 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, 236 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -264,6 +263,7 @@ static const struct usb_device_id blacklist_table[] = {
264 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, 263 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
265 264
266 /* QCA ROME chipset */ 265 /* QCA ROME chipset */
266 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
267 { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME }, 267 { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
268 { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME }, 268 { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
269 { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME }, 269 { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
@@ -386,10 +386,10 @@ static const struct usb_device_id blacklist_table[] = {
386 */ 386 */
387static const struct dmi_system_id btusb_needs_reset_resume_table[] = { 387static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
388 { 388 {
389 /* Lenovo Yoga 920 (QCA Rome device 0cf3:e300) */ 389 /* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */
390 .matches = { 390 .matches = {
391 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 391 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
392 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 920"), 392 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
393 }, 393 },
394 }, 394 },
395 {} 395 {}
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 6314dfb02969..40b9fb247010 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -244,7 +244,9 @@ static irqreturn_t bcm_host_wake(int irq, void *data)
244 244
245 bt_dev_dbg(bdev, "Host wake IRQ"); 245 bt_dev_dbg(bdev, "Host wake IRQ");
246 246
247 pm_request_resume(bdev->dev); 247 pm_runtime_get(bdev->dev);
248 pm_runtime_mark_last_busy(bdev->dev);
249 pm_runtime_put_autosuspend(bdev->dev);
248 250
249 return IRQ_HANDLED; 251 return IRQ_HANDLED;
250} 252}
@@ -301,7 +303,7 @@ static const struct bcm_set_sleep_mode default_sleep_params = {
301 .usb_auto_sleep = 0, 303 .usb_auto_sleep = 0,
302 .usb_resume_timeout = 0, 304 .usb_resume_timeout = 0,
303 .break_to_host = 0, 305 .break_to_host = 0,
304 .pulsed_host_wake = 0, 306 .pulsed_host_wake = 1,
305}; 307};
306 308
307static int bcm_setup_sleep(struct hci_uart *hu) 309static int bcm_setup_sleep(struct hci_uart *hu)
@@ -586,8 +588,11 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
586 } else if (!bcm->rx_skb) { 588 } else if (!bcm->rx_skb) {
587 /* Delay auto-suspend when receiving completed packet */ 589 /* Delay auto-suspend when receiving completed packet */
588 mutex_lock(&bcm_device_lock); 590 mutex_lock(&bcm_device_lock);
589 if (bcm->dev && bcm_device_exists(bcm->dev)) 591 if (bcm->dev && bcm_device_exists(bcm->dev)) {
590 pm_request_resume(bcm->dev->dev); 592 pm_runtime_get(bcm->dev->dev);
593 pm_runtime_mark_last_busy(bcm->dev->dev);
594 pm_runtime_put_autosuspend(bcm->dev->dev);
595 }
591 mutex_unlock(&bcm_device_lock); 596 mutex_unlock(&bcm_device_lock);
592 } 597 }
593 598
diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
index 44301a3d9963..a07f6451694a 100644
--- a/drivers/clk/bcm/clk-bcm2835.c
+++ b/drivers/clk/bcm/clk-bcm2835.c
@@ -449,17 +449,17 @@ struct bcm2835_pll_ana_bits {
449static const struct bcm2835_pll_ana_bits bcm2835_ana_default = { 449static const struct bcm2835_pll_ana_bits bcm2835_ana_default = {
450 .mask0 = 0, 450 .mask0 = 0,
451 .set0 = 0, 451 .set0 = 0,
452 .mask1 = (u32)~(A2W_PLL_KI_MASK | A2W_PLL_KP_MASK), 452 .mask1 = A2W_PLL_KI_MASK | A2W_PLL_KP_MASK,
453 .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT), 453 .set1 = (2 << A2W_PLL_KI_SHIFT) | (8 << A2W_PLL_KP_SHIFT),
454 .mask3 = (u32)~A2W_PLL_KA_MASK, 454 .mask3 = A2W_PLL_KA_MASK,
455 .set3 = (2 << A2W_PLL_KA_SHIFT), 455 .set3 = (2 << A2W_PLL_KA_SHIFT),
456 .fb_prediv_mask = BIT(14), 456 .fb_prediv_mask = BIT(14),
457}; 457};
458 458
459static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = { 459static const struct bcm2835_pll_ana_bits bcm2835_ana_pllh = {
460 .mask0 = (u32)~(A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK), 460 .mask0 = A2W_PLLH_KA_MASK | A2W_PLLH_KI_LOW_MASK,
461 .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT), 461 .set0 = (2 << A2W_PLLH_KA_SHIFT) | (2 << A2W_PLLH_KI_LOW_SHIFT),
462 .mask1 = (u32)~(A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK), 462 .mask1 = A2W_PLLH_KI_HIGH_MASK | A2W_PLLH_KP_MASK,
463 .set1 = (6 << A2W_PLLH_KP_SHIFT), 463 .set1 = (6 << A2W_PLLH_KP_SHIFT),
464 .mask3 = 0, 464 .mask3 = 0,
465 .set3 = 0, 465 .set3 = 0,
@@ -623,8 +623,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
623 ~A2W_PLL_CTRL_PWRDN); 623 ~A2W_PLL_CTRL_PWRDN);
624 624
625 /* Take the PLL out of reset. */ 625 /* Take the PLL out of reset. */
626 spin_lock(&cprman->regs_lock);
626 cprman_write(cprman, data->cm_ctrl_reg, 627 cprman_write(cprman, data->cm_ctrl_reg,
627 cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST); 628 cprman_read(cprman, data->cm_ctrl_reg) & ~CM_PLL_ANARST);
629 spin_unlock(&cprman->regs_lock);
628 630
629 /* Wait for the PLL to lock. */ 631 /* Wait for the PLL to lock. */
630 timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS); 632 timeout = ktime_add_ns(ktime_get(), LOCK_TIMEOUT_NS);
@@ -701,9 +703,11 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw,
701 } 703 }
702 704
703 /* Unmask the reference clock from the oscillator. */ 705 /* Unmask the reference clock from the oscillator. */
706 spin_lock(&cprman->regs_lock);
704 cprman_write(cprman, A2W_XOSC_CTRL, 707 cprman_write(cprman, A2W_XOSC_CTRL,
705 cprman_read(cprman, A2W_XOSC_CTRL) | 708 cprman_read(cprman, A2W_XOSC_CTRL) |
706 data->reference_enable_mask); 709 data->reference_enable_mask);
710 spin_unlock(&cprman->regs_lock);
707 711
708 if (do_ana_setup_first) 712 if (do_ana_setup_first)
709 bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana); 713 bcm2835_pll_write_ana(cprman, data->ana_reg_base, ana);
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 9f7f931d6b2f..5eb50c31e455 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -205,6 +205,18 @@ static const struct aspeed_clk_soc_data ast2400_data = {
205 .calc_pll = aspeed_ast2400_calc_pll, 205 .calc_pll = aspeed_ast2400_calc_pll,
206}; 206};
207 207
208static int aspeed_clk_is_enabled(struct clk_hw *hw)
209{
210 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
211 u32 clk = BIT(gate->clock_idx);
212 u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
213 u32 reg;
214
215 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
216
217 return ((reg & clk) == enval) ? 1 : 0;
218}
219
208static int aspeed_clk_enable(struct clk_hw *hw) 220static int aspeed_clk_enable(struct clk_hw *hw)
209{ 221{
210 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); 222 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
@@ -215,6 +227,11 @@ static int aspeed_clk_enable(struct clk_hw *hw)
215 227
216 spin_lock_irqsave(gate->lock, flags); 228 spin_lock_irqsave(gate->lock, flags);
217 229
230 if (aspeed_clk_is_enabled(hw)) {
231 spin_unlock_irqrestore(gate->lock, flags);
232 return 0;
233 }
234
218 if (gate->reset_idx >= 0) { 235 if (gate->reset_idx >= 0) {
219 /* Put IP in reset */ 236 /* Put IP in reset */
220 regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst); 237 regmap_update_bits(gate->map, ASPEED_RESET_CTRL, rst, rst);
@@ -255,17 +272,6 @@ static void aspeed_clk_disable(struct clk_hw *hw)
255 spin_unlock_irqrestore(gate->lock, flags); 272 spin_unlock_irqrestore(gate->lock, flags);
256} 273}
257 274
258static int aspeed_clk_is_enabled(struct clk_hw *hw)
259{
260 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
261 u32 clk = BIT(gate->clock_idx);
262 u32 reg;
263
264 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
265
266 return (reg & clk) ? 0 : 1;
267}
268
269static const struct clk_ops aspeed_clk_gate_ops = { 275static const struct clk_ops aspeed_clk_gate_ops = {
270 .enable = aspeed_clk_enable, 276 .enable = aspeed_clk_enable,
271 .disable = aspeed_clk_disable, 277 .disable = aspeed_clk_disable,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0f686a9dac3e..076d4244d672 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1125,8 +1125,10 @@ static int clk_core_round_rate_nolock(struct clk_core *core,
1125{ 1125{
1126 lockdep_assert_held(&prepare_lock); 1126 lockdep_assert_held(&prepare_lock);
1127 1127
1128 if (!core) 1128 if (!core) {
1129 req->rate = 0;
1129 return 0; 1130 return 0;
1131 }
1130 1132
1131 clk_core_init_rate_req(core, req); 1133 clk_core_init_rate_req(core, req);
1132 1134
@@ -2309,8 +2311,11 @@ static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2309 2311
2310 trace_clk_set_phase(core, degrees); 2312 trace_clk_set_phase(core, degrees);
2311 2313
2312 if (core->ops->set_phase) 2314 if (core->ops->set_phase) {
2313 ret = core->ops->set_phase(core->hw, degrees); 2315 ret = core->ops->set_phase(core->hw, degrees);
2316 if (!ret)
2317 core->phase = degrees;
2318 }
2314 2319
2315 trace_clk_set_phase_complete(core, degrees); 2320 trace_clk_set_phase_complete(core, degrees);
2316 2321
@@ -2968,22 +2973,37 @@ static int __clk_core_init(struct clk_core *core)
2968 core->rate = core->req_rate = rate; 2973 core->rate = core->req_rate = rate;
2969 2974
2970 /* 2975 /*
2976 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
2977 * don't get accidentally disabled when walking the orphan tree and
2978 * reparenting clocks
2979 */
2980 if (core->flags & CLK_IS_CRITICAL) {
2981 unsigned long flags;
2982
2983 clk_core_prepare(core);
2984
2985 flags = clk_enable_lock();
2986 clk_core_enable(core);
2987 clk_enable_unlock(flags);
2988 }
2989
2990 /*
2971 * walk the list of orphan clocks and reparent any that newly finds a 2991 * walk the list of orphan clocks and reparent any that newly finds a
2972 * parent. 2992 * parent.
2973 */ 2993 */
2974 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) { 2994 hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
2975 struct clk_core *parent = __clk_init_parent(orphan); 2995 struct clk_core *parent = __clk_init_parent(orphan);
2976 unsigned long flags;
2977 2996
2978 /* 2997 /*
2979 * we could call __clk_set_parent, but that would result in a 2998 * We need to use __clk_set_parent_before() and _after() to
2980 * redundant call to the .set_rate op, if it exists 2999 * to properly migrate any prepare/enable count of the orphan
3000 * clock. This is important for CLK_IS_CRITICAL clocks, which
3001 * are enabled during init but might not have a parent yet.
2981 */ 3002 */
2982 if (parent) { 3003 if (parent) {
2983 /* update the clk tree topology */ 3004 /* update the clk tree topology */
2984 flags = clk_enable_lock(); 3005 __clk_set_parent_before(orphan, parent);
2985 clk_reparent(orphan, parent); 3006 __clk_set_parent_after(orphan, parent, NULL);
2986 clk_enable_unlock(flags);
2987 __clk_recalc_accuracies(orphan); 3007 __clk_recalc_accuracies(orphan);
2988 __clk_recalc_rates(orphan, 0); 3008 __clk_recalc_rates(orphan, 0);
2989 } 3009 }
@@ -3000,16 +3020,6 @@ static int __clk_core_init(struct clk_core *core)
3000 if (core->ops->init) 3020 if (core->ops->init)
3001 core->ops->init(core->hw); 3021 core->ops->init(core->hw);
3002 3022
3003 if (core->flags & CLK_IS_CRITICAL) {
3004 unsigned long flags;
3005
3006 clk_core_prepare(core);
3007
3008 flags = clk_enable_lock();
3009 clk_core_enable(core);
3010 clk_enable_unlock(flags);
3011 }
3012
3013 kref_init(&core->ref); 3023 kref_init(&core->ref);
3014out: 3024out:
3015 clk_pm_runtime_put(core); 3025 clk_pm_runtime_put(core);
diff --git a/drivers/clk/hisilicon/clk-hi3660-stub.c b/drivers/clk/hisilicon/clk-hi3660-stub.c
index 9b6c72bbddf9..e8b2c43b1bb8 100644
--- a/drivers/clk/hisilicon/clk-hi3660-stub.c
+++ b/drivers/clk/hisilicon/clk-hi3660-stub.c
@@ -149,6 +149,8 @@ static int hi3660_stub_clk_probe(struct platform_device *pdev)
149 return PTR_ERR(stub_clk_chan.mbox); 149 return PTR_ERR(stub_clk_chan.mbox);
150 150
151 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 151 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
152 if (!res)
153 return -EINVAL;
152 freq_reg = devm_ioremap(dev, res->start, resource_size(res)); 154 freq_reg = devm_ioremap(dev, res->start, resource_size(res));
153 if (!freq_reg) 155 if (!freq_reg)
154 return -ENOMEM; 156 return -ENOMEM;
diff --git a/drivers/clk/imx/clk-imx51-imx53.c b/drivers/clk/imx/clk-imx51-imx53.c
index c864992e6983..caa8bd40692c 100644
--- a/drivers/clk/imx/clk-imx51-imx53.c
+++ b/drivers/clk/imx/clk-imx51-imx53.c
@@ -131,7 +131,17 @@ static const char *ieee1588_sels[] = { "pll3_sw", "pll4_sw", "dummy" /* usbphy2_
131static struct clk *clk[IMX5_CLK_END]; 131static struct clk *clk[IMX5_CLK_END];
132static struct clk_onecell_data clk_data; 132static struct clk_onecell_data clk_data;
133 133
134static struct clk ** const uart_clks[] __initconst = { 134static struct clk ** const uart_clks_mx51[] __initconst = {
135 &clk[IMX5_CLK_UART1_IPG_GATE],
136 &clk[IMX5_CLK_UART1_PER_GATE],
137 &clk[IMX5_CLK_UART2_IPG_GATE],
138 &clk[IMX5_CLK_UART2_PER_GATE],
139 &clk[IMX5_CLK_UART3_IPG_GATE],
140 &clk[IMX5_CLK_UART3_PER_GATE],
141 NULL
142};
143
144static struct clk ** const uart_clks_mx50_mx53[] __initconst = {
135 &clk[IMX5_CLK_UART1_IPG_GATE], 145 &clk[IMX5_CLK_UART1_IPG_GATE],
136 &clk[IMX5_CLK_UART1_PER_GATE], 146 &clk[IMX5_CLK_UART1_PER_GATE],
137 &clk[IMX5_CLK_UART2_IPG_GATE], 147 &clk[IMX5_CLK_UART2_IPG_GATE],
@@ -321,8 +331,6 @@ static void __init mx5_clocks_common_init(void __iomem *ccm_base)
321 clk_prepare_enable(clk[IMX5_CLK_TMAX1]); 331 clk_prepare_enable(clk[IMX5_CLK_TMAX1]);
322 clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */ 332 clk_prepare_enable(clk[IMX5_CLK_TMAX2]); /* esdhc2, fec */
323 clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */ 333 clk_prepare_enable(clk[IMX5_CLK_TMAX3]); /* esdhc1, esdhc4 */
324
325 imx_register_uart_clocks(uart_clks);
326} 334}
327 335
328static void __init mx50_clocks_init(struct device_node *np) 336static void __init mx50_clocks_init(struct device_node *np)
@@ -388,6 +396,8 @@ static void __init mx50_clocks_init(struct device_node *np)
388 396
389 r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000); 397 r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
390 clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r); 398 clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
399
400 imx_register_uart_clocks(uart_clks_mx50_mx53);
391} 401}
392CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init); 402CLK_OF_DECLARE(imx50_ccm, "fsl,imx50-ccm", mx50_clocks_init);
393 403
@@ -477,6 +487,8 @@ static void __init mx51_clocks_init(struct device_node *np)
477 val = readl(MXC_CCM_CLPCR); 487 val = readl(MXC_CCM_CLPCR);
478 val |= 1 << 23; 488 val |= 1 << 23;
479 writel(val, MXC_CCM_CLPCR); 489 writel(val, MXC_CCM_CLPCR);
490
491 imx_register_uart_clocks(uart_clks_mx51);
480} 492}
481CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init); 493CLK_OF_DECLARE(imx51_ccm, "fsl,imx51-ccm", mx51_clocks_init);
482 494
@@ -606,5 +618,7 @@ static void __init mx53_clocks_init(struct device_node *np)
606 618
607 r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000); 619 r = clk_round_rate(clk[IMX5_CLK_USBOH3_PER_GATE], 54000000);
608 clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r); 620 clk_set_rate(clk[IMX5_CLK_USBOH3_PER_GATE], r);
621
622 imx_register_uart_clocks(uart_clks_mx50_mx53);
609} 623}
610CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init); 624CLK_OF_DECLARE(imx53_ccm, "fsl,imx53-ccm", mx53_clocks_init);
diff --git a/drivers/clk/qcom/apcs-msm8916.c b/drivers/clk/qcom/apcs-msm8916.c
index 246957f1a413..b1cc8dbcd327 100644
--- a/drivers/clk/qcom/apcs-msm8916.c
+++ b/drivers/clk/qcom/apcs-msm8916.c
@@ -49,11 +49,10 @@ static int qcom_apcs_msm8916_clk_probe(struct platform_device *pdev)
49 struct clk_regmap_mux_div *a53cc; 49 struct clk_regmap_mux_div *a53cc;
50 struct regmap *regmap; 50 struct regmap *regmap;
51 struct clk_init_data init = { }; 51 struct clk_init_data init = { };
52 int ret; 52 int ret = -ENODEV;
53 53
54 regmap = dev_get_regmap(parent, NULL); 54 regmap = dev_get_regmap(parent, NULL);
55 if (IS_ERR(regmap)) { 55 if (!regmap) {
56 ret = PTR_ERR(regmap);
57 dev_err(dev, "failed to get regmap: %d\n", ret); 56 dev_err(dev, "failed to get regmap: %d\n", ret);
58 return ret; 57 return ret;
59 } 58 }
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 72b16ed1012b..3b97f60540ad 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -762,7 +762,7 @@ static struct ccu_mp out_a_clk = {
762 .features = CCU_FEATURE_FIXED_PREDIV, 762 .features = CCU_FEATURE_FIXED_PREDIV,
763 .hw.init = CLK_HW_INIT_PARENTS("out-a", 763 .hw.init = CLK_HW_INIT_PARENTS("out-a",
764 clk_out_parents, 764 clk_out_parents,
765 &ccu_div_ops, 765 &ccu_mp_ops,
766 0), 766 0),
767 }, 767 },
768}; 768};
@@ -783,7 +783,7 @@ static struct ccu_mp out_b_clk = {
783 .features = CCU_FEATURE_FIXED_PREDIV, 783 .features = CCU_FEATURE_FIXED_PREDIV,
784 .hw.init = CLK_HW_INIT_PARENTS("out-b", 784 .hw.init = CLK_HW_INIT_PARENTS("out-b",
785 clk_out_parents, 785 clk_out_parents,
786 &ccu_div_ops, 786 &ccu_mp_ops,
787 0), 787 0),
788 }, 788 },
789}; 789};
@@ -804,7 +804,7 @@ static struct ccu_mp out_c_clk = {
804 .features = CCU_FEATURE_FIXED_PREDIV, 804 .features = CCU_FEATURE_FIXED_PREDIV,
805 .hw.init = CLK_HW_INIT_PARENTS("out-c", 805 .hw.init = CLK_HW_INIT_PARENTS("out-c",
806 clk_out_parents, 806 clk_out_parents,
807 &ccu_div_ops, 807 &ccu_mp_ops,
808 0), 808 0),
809 }, 809 },
810}; 810};
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 612491a26070..12e0a2d19911 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -45,7 +45,7 @@ static const struct omap_clkctrl_bit_data am3_gpio4_bit_data[] __initconst = {
45 45
46static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = { 46static const struct omap_clkctrl_reg_data am3_l4_per_clkctrl_regs[] __initconst = {
47 { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, 47 { AM3_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
48 { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP, "lcd_gclk", "lcdc_clkdm" }, 48 { AM3_LCDC_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "lcd_gclk", "lcdc_clkdm" },
49 { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" }, 49 { AM3_USB_OTG_HS_CLKCTRL, NULL, CLKF_SW_SUP, "usbotg_fck", "l3s_clkdm" },
50 { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" }, 50 { AM3_TPTC0_CLKCTRL, NULL, CLKF_SW_SUP, "l3_gclk", "l3_clkdm" },
51 { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" }, 51 { AM3_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_div2_ck", "l3_clkdm" },
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index 2b7c2e017665..63c5ddb50187 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -187,7 +187,7 @@ static const struct omap_clkctrl_reg_data am4_l4_per_clkctrl_regs[] __initconst
187 { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, 187 { AM4_OCP2SCP0_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
188 { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" }, 188 { AM4_OCP2SCP1_CLKCTRL, NULL, CLKF_SW_SUP, "l4ls_gclk" },
189 { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" }, 189 { AM4_EMIF_CLKCTRL, NULL, CLKF_SW_SUP, "dpll_ddr_m2_ck", "emif_clkdm" },
190 { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP, "disp_clk", "dss_clkdm" }, 190 { AM4_DSS_CORE_CLKCTRL, NULL, CLKF_SW_SUP | CLKF_SET_RATE_PARENT, "disp_clk", "dss_clkdm" },
191 { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" }, 191 { AM4_CPGMAC0_CLKCTRL, NULL, CLKF_SW_SUP, "cpsw_125mhz_gclk", "cpsw_125mhz_clkdm" },
192 { 0 }, 192 { 0 },
193}; 193};
diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
index afa0d6bfc5c1..421b05392220 100644
--- a/drivers/clk/ti/clkctrl.c
+++ b/drivers/clk/ti/clkctrl.c
@@ -537,6 +537,8 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
537 init.parent_names = &reg_data->parent; 537 init.parent_names = &reg_data->parent;
538 init.num_parents = 1; 538 init.num_parents = 1;
539 init.flags = 0; 539 init.flags = 0;
540 if (reg_data->flags & CLKF_SET_RATE_PARENT)
541 init.flags |= CLK_SET_RATE_PARENT;
540 init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d", 542 init.name = kasprintf(GFP_KERNEL, "%s:%s:%04x:%d",
541 node->parent->name, node->name, 543 node->parent->name, node->name,
542 reg_data->offset, 0); 544 reg_data->offset, 0);
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index 4dbb30cf94ac..b922db90939a 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -118,14 +118,15 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
118 spin_lock_irqsave(&dmamux->lock, flags); 118 spin_lock_irqsave(&dmamux->lock, flags);
119 mux->chan_id = find_first_zero_bit(dmamux->dma_inuse, 119 mux->chan_id = find_first_zero_bit(dmamux->dma_inuse,
120 dmamux->dma_requests); 120 dmamux->dma_requests);
121 set_bit(mux->chan_id, dmamux->dma_inuse);
122 spin_unlock_irqrestore(&dmamux->lock, flags);
123 121
124 if (mux->chan_id == dmamux->dma_requests) { 122 if (mux->chan_id == dmamux->dma_requests) {
123 spin_unlock_irqrestore(&dmamux->lock, flags);
125 dev_err(&pdev->dev, "Run out of free DMA requests\n"); 124 dev_err(&pdev->dev, "Run out of free DMA requests\n");
126 ret = -ENOMEM; 125 ret = -ENOMEM;
127 goto error; 126 goto error_chan_id;
128 } 127 }
128 set_bit(mux->chan_id, dmamux->dma_inuse);
129 spin_unlock_irqrestore(&dmamux->lock, flags);
129 130
130 /* Look for DMA Master */ 131 /* Look for DMA Master */
131 for (i = 1, min = 0, max = dmamux->dma_reqs[i]; 132 for (i = 1, min = 0, max = dmamux->dma_reqs[i];
@@ -173,6 +174,8 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
173 174
174error: 175error:
175 clear_bit(mux->chan_id, dmamux->dma_inuse); 176 clear_bit(mux->chan_id, dmamux->dma_inuse);
177
178error_chan_id:
176 kfree(mux); 179 kfree(mux);
177 return ERR_PTR(ret); 180 return ERR_PTR(ret);
178} 181}
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index c16600f30611..0bdea60c65dd 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -639,7 +639,7 @@ static void __exit dcdbas_exit(void)
639 platform_driver_unregister(&dcdbas_driver); 639 platform_driver_unregister(&dcdbas_driver);
640} 640}
641 641
642module_init(dcdbas_init); 642subsys_initcall_sync(dcdbas_init);
643module_exit(dcdbas_exit); 643module_exit(dcdbas_exit);
644 644
645MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")"); 645MODULE_DESCRIPTION(DRIVER_DESCRIPTION " (version " DRIVER_VERSION ")");
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index da661bf8cb96..13c1edd37e96 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -68,11 +68,11 @@ void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
68 efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID; 68 efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
69 efi_status_t status; 69 efi_status_t status;
70 efi_physical_addr_t log_location, log_last_entry; 70 efi_physical_addr_t log_location, log_last_entry;
71 struct linux_efi_tpm_eventlog *log_tbl; 71 struct linux_efi_tpm_eventlog *log_tbl = NULL;
72 unsigned long first_entry_addr, last_entry_addr; 72 unsigned long first_entry_addr, last_entry_addr;
73 size_t log_size, last_entry_size; 73 size_t log_size, last_entry_size;
74 efi_bool_t truncated; 74 efi_bool_t truncated;
75 void *tcg2_protocol; 75 void *tcg2_protocol = NULL;
76 76
77 status = efi_call_early(locate_protocol, &tcg2_guid, NULL, 77 status = efi_call_early(locate_protocol, &tcg2_guid, NULL,
78 &tcg2_protocol); 78 &tcg2_protocol);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 74d2efaec52f..7a073ac5f9c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -69,25 +69,18 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
69 /* don't do anything if sink is not display port, i.e., 69 /* don't do anything if sink is not display port, i.e.,
70 * passive dp->(dvi|hdmi) adaptor 70 * passive dp->(dvi|hdmi) adaptor
71 */ 71 */
72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 72 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
73 int saved_dpms = connector->dpms; 73 amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd) &&
74 /* Only turn off the display if it's physically disconnected */ 74 amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
75 if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) { 75 /* Don't start link training before we have the DPCD */
76 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 76 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { 77 return;
78 /* Don't try to start link training before we 78
79 * have the dpcd */ 79 /* Turn the connector off and back on immediately, which
80 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) 80 * will trigger link training
81 return; 81 */
82 82 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
83 /* set it to OFF so that drm_helper_connector_dpms() 83 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
84 * won't return immediately since the current state
85 * is ON at this point.
86 */
87 connector->dpms = DRM_MODE_DPMS_OFF;
88 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
89 }
90 connector->dpms = saved_dpms;
91 } 84 }
92 } 85 }
93} 86}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index af1b879a9ee9..66cb10cdc7c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2063,9 +2063,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
2063 2063
2064 DRM_INFO("amdgpu: finishing device.\n"); 2064 DRM_INFO("amdgpu: finishing device.\n");
2065 adev->shutdown = true; 2065 adev->shutdown = true;
2066 if (adev->mode_info.mode_config_initialized) 2066 if (adev->mode_info.mode_config_initialized){
2067 drm_crtc_force_disable_all(adev->ddev); 2067 if (!amdgpu_device_has_dc_support(adev))
2068 2068 drm_crtc_force_disable_all(adev->ddev);
2069 else
2070 drm_atomic_helper_shutdown(adev->ddev);
2071 }
2069 amdgpu_ib_pool_fini(adev); 2072 amdgpu_ib_pool_fini(adev);
2070 amdgpu_fence_driver_fini(adev); 2073 amdgpu_fence_driver_fini(adev);
2071 amdgpu_fbdev_fini(adev); 2074 amdgpu_fbdev_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index e48b4ec88c8c..ca6c931dabfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -36,8 +36,6 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); 36 struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj);
37 37
38 if (robj) { 38 if (robj) {
39 if (robj->gem_base.import_attach)
40 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
41 amdgpu_mn_unregister(robj); 39 amdgpu_mn_unregister(robj);
42 amdgpu_bo_unref(&robj); 40 amdgpu_bo_unref(&robj);
43 } 41 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index 54f06c959340..2264c5c97009 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -352,6 +352,7 @@ struct amdgpu_mode_info {
352 u16 firmware_flags; 352 u16 firmware_flags;
353 /* pointer to backlight encoder */ 353 /* pointer to backlight encoder */
354 struct amdgpu_encoder *bl_encoder; 354 struct amdgpu_encoder *bl_encoder;
355 u8 bl_level; /* saved backlight level */
355 struct amdgpu_audio audio; /* audio stuff */ 356 struct amdgpu_audio audio; /* audio stuff */
356 int num_crtc; /* number of crtcs */ 357 int num_crtc; /* number of crtcs */
357 int num_hpd; /* number of hpd pins */ 358 int num_hpd; /* number of hpd pins */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5c4c3e0d527b..1220322c1680 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -56,6 +56,8 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
56 56
57 amdgpu_bo_kunmap(bo); 57 amdgpu_bo_kunmap(bo);
58 58
59 if (bo->gem_base.import_attach)
60 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
59 drm_gem_object_release(&bo->gem_base); 61 drm_gem_object_release(&bo->gem_base);
60 amdgpu_bo_unref(&bo->parent); 62 amdgpu_bo_unref(&bo->parent);
61 if (!list_empty(&bo->shadow_list)) { 63 if (!list_empty(&bo->shadow_list)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 2af26d2da127..d702fb8e3427 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -34,7 +34,7 @@
34#include <linux/backlight.h> 34#include <linux/backlight.h>
35#include "bif/bif_4_1_d.h" 35#include "bif/bif_4_1_d.h"
36 36
37static u8 37u8
38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev) 38amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
39{ 39{
40 u8 backlight_level; 40 u8 backlight_level;
@@ -48,7 +48,7 @@ amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev)
48 return backlight_level; 48 return backlight_level;
49} 49}
50 50
51static void 51void
52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev, 52amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
53 u8 backlight_level) 53 u8 backlight_level)
54{ 54{
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
index 2bdec40515ce..f77cbdef679e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.h
@@ -25,6 +25,11 @@
25#define __ATOMBIOS_ENCODER_H__ 25#define __ATOMBIOS_ENCODER_H__
26 26
27u8 27u8
28amdgpu_atombios_encoder_get_backlight_level_from_reg(struct amdgpu_device *adev);
29void
30amdgpu_atombios_encoder_set_backlight_level_to_reg(struct amdgpu_device *adev,
31 u8 backlight_level);
32u8
28amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder); 33amdgpu_atombios_encoder_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder);
29void 34void
30amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder, 35amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index f34bc68aadfb..022f303463fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2921,6 +2921,11 @@ static int dce_v10_0_hw_fini(void *handle)
2921 2921
2922static int dce_v10_0_suspend(void *handle) 2922static int dce_v10_0_suspend(void *handle)
2923{ 2923{
2924 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2925
2926 adev->mode_info.bl_level =
2927 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2928
2924 return dce_v10_0_hw_fini(handle); 2929 return dce_v10_0_hw_fini(handle);
2925} 2930}
2926 2931
@@ -2929,6 +2934,9 @@ static int dce_v10_0_resume(void *handle)
2929 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2934 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2930 int ret; 2935 int ret;
2931 2936
2937 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2938 adev->mode_info.bl_level);
2939
2932 ret = dce_v10_0_hw_init(handle); 2940 ret = dce_v10_0_hw_init(handle);
2933 2941
2934 /* turn on the BL */ 2942 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 26378bd6aba4..800a9f36ab4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -3047,6 +3047,11 @@ static int dce_v11_0_hw_fini(void *handle)
3047 3047
3048static int dce_v11_0_suspend(void *handle) 3048static int dce_v11_0_suspend(void *handle)
3049{ 3049{
3050 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3051
3052 adev->mode_info.bl_level =
3053 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
3054
3050 return dce_v11_0_hw_fini(handle); 3055 return dce_v11_0_hw_fini(handle);
3051} 3056}
3052 3057
@@ -3055,6 +3060,9 @@ static int dce_v11_0_resume(void *handle)
3055 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3060 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3056 int ret; 3061 int ret;
3057 3062
3063 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
3064 adev->mode_info.bl_level);
3065
3058 ret = dce_v11_0_hw_init(handle); 3066 ret = dce_v11_0_hw_init(handle);
3059 3067
3060 /* turn on the BL */ 3068 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index a712f4b285f6..b8368f69ce1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2787,6 +2787,11 @@ static int dce_v6_0_hw_fini(void *handle)
2787 2787
2788static int dce_v6_0_suspend(void *handle) 2788static int dce_v6_0_suspend(void *handle)
2789{ 2789{
2790 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2791
2792 adev->mode_info.bl_level =
2793 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2794
2790 return dce_v6_0_hw_fini(handle); 2795 return dce_v6_0_hw_fini(handle);
2791} 2796}
2792 2797
@@ -2795,6 +2800,9 @@ static int dce_v6_0_resume(void *handle)
2795 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2800 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2796 int ret; 2801 int ret;
2797 2802
2803 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2804 adev->mode_info.bl_level);
2805
2798 ret = dce_v6_0_hw_init(handle); 2806 ret = dce_v6_0_hw_init(handle);
2799 2807
2800 /* turn on the BL */ 2808 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index c008dc030687..012e0a9ae0ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2819,6 +2819,11 @@ static int dce_v8_0_hw_fini(void *handle)
2819 2819
2820static int dce_v8_0_suspend(void *handle) 2820static int dce_v8_0_suspend(void *handle)
2821{ 2821{
2822 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2823
2824 adev->mode_info.bl_level =
2825 amdgpu_atombios_encoder_get_backlight_level_from_reg(adev);
2826
2822 return dce_v8_0_hw_fini(handle); 2827 return dce_v8_0_hw_fini(handle);
2823} 2828}
2824 2829
@@ -2827,6 +2832,9 @@ static int dce_v8_0_resume(void *handle)
2827 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 2832 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2828 int ret; 2833 int ret;
2829 2834
2835 amdgpu_atombios_encoder_set_backlight_level_to_reg(adev,
2836 adev->mode_info.bl_level);
2837
2830 ret = dce_v8_0_hw_init(handle); 2838 ret = dce_v8_0_hw_init(handle);
2831 2839
2832 /* turn on the BL */ 2840 /* turn on the BL */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c345e645f1d7..63c67346d316 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3134,8 +3134,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
3134 3134
3135 switch (aplane->base.type) { 3135 switch (aplane->base.type) {
3136 case DRM_PLANE_TYPE_PRIMARY: 3136 case DRM_PLANE_TYPE_PRIMARY:
3137 aplane->base.format_default = true;
3138
3139 res = drm_universal_plane_init( 3137 res = drm_universal_plane_init(
3140 dm->adev->ddev, 3138 dm->adev->ddev,
3141 &aplane->base, 3139 &aplane->base,
@@ -4794,6 +4792,9 @@ static int dm_atomic_check_plane_state_fb(struct drm_atomic_state *state,
4794 return -EDEADLK; 4792 return -EDEADLK;
4795 4793
4796 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc); 4794 crtc_state = drm_atomic_get_crtc_state(plane_state->state, crtc);
4795 if (IS_ERR(crtc_state))
4796 return PTR_ERR(crtc_state);
4797
4797 if (crtc->primary == plane && crtc_state->active) { 4798 if (crtc->primary == plane && crtc_state->active) {
4798 if (!plane_state->fb) 4799 if (!plane_state->fb)
4799 return -EINVAL; 4800 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 9bd142f65f9b..e1acc10e35a2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -109,7 +109,7 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
109 struct cea_sad *sad = &sads[i]; 109 struct cea_sad *sad = &sads[i];
110 110
111 edid_caps->audio_modes[i].format_code = sad->format; 111 edid_caps->audio_modes[i].format_code = sad->format;
112 edid_caps->audio_modes[i].channel_count = sad->channels; 112 edid_caps->audio_modes[i].channel_count = sad->channels + 1;
113 edid_caps->audio_modes[i].sample_rate = sad->freq; 113 edid_caps->audio_modes[i].sample_rate = sad->freq;
114 edid_caps->audio_modes[i].sample_size = sad->byte2; 114 edid_caps->audio_modes[i].sample_size = sad->byte2;
115 } 115 }
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index a993279a8f2d..f11f17fe08f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -496,6 +496,9 @@ struct dce_hwseq_registers {
496 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \ 496 HWS_SF(, DOMAIN7_PG_STATUS, DOMAIN7_PGFSM_PWR_STATUS, mask_sh), \
497 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ 497 HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \
498 HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\ 498 HWS_SF(, D1VGA_CONTROL, D1VGA_MODE_ENABLE, mask_sh),\
499 HWS_SF(, D2VGA_CONTROL, D2VGA_MODE_ENABLE, mask_sh),\
500 HWS_SF(, D3VGA_CONTROL, D3VGA_MODE_ENABLE, mask_sh),\
501 HWS_SF(, D4VGA_CONTROL, D4VGA_MODE_ENABLE, mask_sh),\
499 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\ 502 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_ENABLE, mask_sh),\
500 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\ 503 HWS_SF(, VGA_TEST_CONTROL, VGA_TEST_RENDER_START, mask_sh),\
501 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ 504 HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \
@@ -591,7 +594,10 @@ struct dce_hwseq_registers {
591 type DENTIST_DISPCLK_WDIVIDER; \ 594 type DENTIST_DISPCLK_WDIVIDER; \
592 type VGA_TEST_ENABLE; \ 595 type VGA_TEST_ENABLE; \
593 type VGA_TEST_RENDER_START; \ 596 type VGA_TEST_RENDER_START; \
594 type D1VGA_MODE_ENABLE; 597 type D1VGA_MODE_ENABLE; \
598 type D2VGA_MODE_ENABLE; \
599 type D3VGA_MODE_ENABLE; \
600 type D4VGA_MODE_ENABLE;
595 601
596struct dce_hwseq_shift { 602struct dce_hwseq_shift {
597 HWSEQ_REG_FIELD_LIST(uint8_t) 603 HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
index 3931412ab6d3..87093894ea9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_opp.c
@@ -128,23 +128,22 @@ static void set_truncation(
128 return; 128 return;
129 } 129 }
130 /* on other format-to do */ 130 /* on other format-to do */
131 if (params->flags.TRUNCATE_ENABLED == 0 || 131 if (params->flags.TRUNCATE_ENABLED == 0)
132 params->flags.TRUNCATE_DEPTH == 2)
133 return; 132 return;
134 /*Set truncation depth and Enable truncation*/ 133 /*Set truncation depth and Enable truncation*/
135 REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL, 134 REG_UPDATE_3(FMT_BIT_DEPTH_CONTROL,
136 FMT_TRUNCATE_EN, 1, 135 FMT_TRUNCATE_EN, 1,
137 FMT_TRUNCATE_DEPTH, 136 FMT_TRUNCATE_DEPTH,
138 params->flags.TRUNCATE_MODE, 137 params->flags.TRUNCATE_DEPTH,
139 FMT_TRUNCATE_MODE, 138 FMT_TRUNCATE_MODE,
140 params->flags.TRUNCATE_DEPTH); 139 params->flags.TRUNCATE_MODE);
141} 140}
142 141
143 142
144/** 143/**
145 * set_spatial_dither 144 * set_spatial_dither
146 * 1) set spatial dithering mode: pattern of seed 145 * 1) set spatial dithering mode: pattern of seed
147 * 2) set spatical dithering depth: 0 for 18bpp or 1 for 24bpp 146 * 2) set spatial dithering depth: 0 for 18bpp or 1 for 24bpp
148 * 3) set random seed 147 * 3) set random seed
149 * 4) set random mode 148 * 4) set random mode
150 * lfsr is reset every frame or not reset 149 * lfsr is reset every frame or not reset
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 072e4485e85e..dc1e010725c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -238,14 +238,24 @@ static void enable_power_gating_plane(
238static void disable_vga( 238static void disable_vga(
239 struct dce_hwseq *hws) 239 struct dce_hwseq *hws)
240{ 240{
241 unsigned int in_vga_mode = 0; 241 unsigned int in_vga1_mode = 0;
242 242 unsigned int in_vga2_mode = 0;
243 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga_mode); 243 unsigned int in_vga3_mode = 0;
244 244 unsigned int in_vga4_mode = 0;
245 if (in_vga_mode == 0) 245
246 REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
247 REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
248 REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
249 REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
250
251 if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
252 in_vga3_mode == 0 && in_vga4_mode == 0)
246 return; 253 return;
247 254
248 REG_WRITE(D1VGA_CONTROL, 0); 255 REG_WRITE(D1VGA_CONTROL, 0);
256 REG_WRITE(D2VGA_CONTROL, 0);
257 REG_WRITE(D3VGA_CONTROL, 0);
258 REG_WRITE(D4VGA_CONTROL, 0);
249 259
250 /* HW Engineer's Notes: 260 /* HW Engineer's Notes:
251 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and 261 * During switch from vga->extended, if we set the VGA_TEST_ENABLE and
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index 5f4c2e833a65..d665dd5af5dd 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -97,7 +97,7 @@ static const struct ast_vbios_dclk_info dclk_table[] = {
97 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ 97 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
98 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ 98 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
99 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ 99 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
100 {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ 100 {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
101 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 101 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
102 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 102 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
103 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 103 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
@@ -127,7 +127,7 @@ static const struct ast_vbios_dclk_info dclk_table_ast2500[] = {
127 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */ 127 {0x67, 0x22, 0x00}, /* 0E: VCLK157_5 */
128 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */ 128 {0x6A, 0x22, 0x00}, /* 0F: VCLK162 */
129 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */ 129 {0x4d, 0x4c, 0x80}, /* 10: VCLK154 */
130 {0xa7, 0x78, 0x80}, /* 11: VCLK83.5 */ 130 {0x68, 0x6f, 0x80}, /* 11: VCLK83.5 */
131 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */ 131 {0x28, 0x49, 0x80}, /* 12: VCLK106.5 */
132 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */ 132 {0x37, 0x49, 0x80}, /* 13: VCLK146.25 */
133 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */ 133 {0x1f, 0x45, 0x80}, /* 14: VCLK148.5 */
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index c0530a1af5e3..2dc5e8bed172 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -461,6 +461,12 @@ int drm_mode_getfb(struct drm_device *dev,
461 if (!fb) 461 if (!fb)
462 return -ENOENT; 462 return -ENOENT;
463 463
464 /* Multi-planar framebuffers need getfb2. */
465 if (fb->format->num_planes > 1) {
466 ret = -EINVAL;
467 goto out;
468 }
469
464 r->height = fb->height; 470 r->height = fb->height;
465 r->width = fb->width; 471 r->width = fb->width;
466 r->depth = fb->format->depth; 472 r->depth = fb->format->depth;
@@ -484,6 +490,7 @@ int drm_mode_getfb(struct drm_device *dev,
484 ret = -ENODEV; 490 ret = -ENODEV;
485 } 491 }
486 492
493out:
487 drm_framebuffer_put(fb); 494 drm_framebuffer_put(fb);
488 495
489 return ret; 496 return ret;
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index c8454ac43fae..db6b94dda5df 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -471,6 +471,7 @@ struct parser_exec_state {
471 * used when ret from 2nd level batch buffer 471 * used when ret from 2nd level batch buffer
472 */ 472 */
473 int saved_buf_addr_type; 473 int saved_buf_addr_type;
474 bool is_ctx_wa;
474 475
475 struct cmd_info *info; 476 struct cmd_info *info;
476 477
@@ -1715,6 +1716,11 @@ static int perform_bb_shadow(struct parser_exec_state *s)
1715 bb->accessing = true; 1716 bb->accessing = true;
1716 bb->bb_start_cmd_va = s->ip_va; 1717 bb->bb_start_cmd_va = s->ip_va;
1717 1718
1719 if ((s->buf_type == BATCH_BUFFER_INSTRUCTION) && (!s->is_ctx_wa))
1720 bb->bb_offset = s->ip_va - s->rb_va;
1721 else
1722 bb->bb_offset = 0;
1723
1718 /* 1724 /*
1719 * ip_va saves the virtual address of the shadow batch buffer, while 1725 * ip_va saves the virtual address of the shadow batch buffer, while
1720 * ip_gma saves the graphics address of the original batch buffer. 1726 * ip_gma saves the graphics address of the original batch buffer.
@@ -2571,6 +2577,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
2571 s.ring_tail = gma_tail; 2577 s.ring_tail = gma_tail;
2572 s.rb_va = workload->shadow_ring_buffer_va; 2578 s.rb_va = workload->shadow_ring_buffer_va;
2573 s.workload = workload; 2579 s.workload = workload;
2580 s.is_ctx_wa = false;
2574 2581
2575 if ((bypass_scan_mask & (1 << workload->ring_id)) || 2582 if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2576 gma_head == gma_tail) 2583 gma_head == gma_tail)
@@ -2624,6 +2631,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2624 s.ring_tail = gma_tail; 2631 s.ring_tail = gma_tail;
2625 s.rb_va = wa_ctx->indirect_ctx.shadow_va; 2632 s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2626 s.workload = workload; 2633 s.workload = workload;
2634 s.is_ctx_wa = true;
2627 2635
2628 if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { 2636 if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2629 ret = -EINVAL; 2637 ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 256f1bb522b7..152df3d0291e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -394,9 +394,11 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
394 * performace for batch mmio read/write, so we need 394 * performace for batch mmio read/write, so we need
395 * handle forcewake mannually. 395 * handle forcewake mannually.
396 */ 396 */
397 intel_runtime_pm_get(dev_priv);
397 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 398 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
398 switch_mmio(pre, next, ring_id); 399 switch_mmio(pre, next, ring_id);
399 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 400 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
401 intel_runtime_pm_put(dev_priv);
400} 402}
401 403
402/** 404/**
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index b55b3580ca1d..d74d6f05c62c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -52,6 +52,54 @@ static void set_context_pdp_root_pointer(
52 pdp_pair[i].val = pdp[7 - i]; 52 pdp_pair[i].val = pdp[7 - i];
53} 53}
54 54
55/*
56 * when populating shadow ctx from guest, we should not overrride oa related
57 * registers, so that they will not be overlapped by guest oa configs. Thus
58 * made it possible to capture oa data from host for both host and guests.
59 */
60static void sr_oa_regs(struct intel_vgpu_workload *workload,
61 u32 *reg_state, bool save)
62{
63 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
64 u32 ctx_oactxctrl = dev_priv->perf.oa.ctx_oactxctrl_offset;
65 u32 ctx_flexeu0 = dev_priv->perf.oa.ctx_flexeu0_offset;
66 int i = 0;
67 u32 flex_mmio[] = {
68 i915_mmio_reg_offset(EU_PERF_CNTL0),
69 i915_mmio_reg_offset(EU_PERF_CNTL1),
70 i915_mmio_reg_offset(EU_PERF_CNTL2),
71 i915_mmio_reg_offset(EU_PERF_CNTL3),
72 i915_mmio_reg_offset(EU_PERF_CNTL4),
73 i915_mmio_reg_offset(EU_PERF_CNTL5),
74 i915_mmio_reg_offset(EU_PERF_CNTL6),
75 };
76
77 if (!workload || !reg_state || workload->ring_id != RCS)
78 return;
79
80 if (save) {
81 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
82
83 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
84 u32 state_offset = ctx_flexeu0 + i * 2;
85
86 workload->flex_mmio[i] = reg_state[state_offset + 1];
87 }
88 } else {
89 reg_state[ctx_oactxctrl] =
90 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
91 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
92
93 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
94 u32 state_offset = ctx_flexeu0 + i * 2;
95 u32 mmio = flex_mmio[i];
96
97 reg_state[state_offset] = mmio;
98 reg_state[state_offset + 1] = workload->flex_mmio[i];
99 }
100 }
101}
102
55static int populate_shadow_context(struct intel_vgpu_workload *workload) 103static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{ 104{
57 struct intel_vgpu *vgpu = workload->vgpu; 105 struct intel_vgpu *vgpu = workload->vgpu;
@@ -98,6 +146,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
98 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); 146 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
99 shadow_ring_context = kmap(page); 147 shadow_ring_context = kmap(page);
100 148
149 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
101#define COPY_REG(name) \ 150#define COPY_REG(name) \
102 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ 151 intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
103 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4) 152 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
@@ -122,6 +171,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
122 sizeof(*shadow_ring_context), 171 sizeof(*shadow_ring_context),
123 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); 172 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
124 173
174 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
125 kunmap(page); 175 kunmap(page);
126 return 0; 176 return 0;
127} 177}
@@ -376,6 +426,17 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
376 goto err; 426 goto err;
377 } 427 }
378 428
429 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
430 * is only updated into ring_scan_buffer, not real ring address
431 * allocated in later copy_workload_to_ring_buffer. pls be noted
432 * shadow_ring_buffer_va is now pointed to real ring buffer va
433 * in copy_workload_to_ring_buffer.
434 */
435
436 if (bb->bb_offset)
437 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
438 + bb->bb_offset;
439
379 /* relocate shadow batch buffer */ 440 /* relocate shadow batch buffer */
380 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma); 441 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
381 if (gmadr_bytes == 8) 442 if (gmadr_bytes == 8)
@@ -1044,10 +1105,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1044 1105
1045 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES); 1106 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1046 1107
1047 s->workloads = kmem_cache_create("gvt-g_vgpu_workload", 1108 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1048 sizeof(struct intel_vgpu_workload), 0, 1109 sizeof(struct intel_vgpu_workload), 0,
1049 SLAB_HWCACHE_ALIGN, 1110 SLAB_HWCACHE_ALIGN,
1050 NULL); 1111 offsetof(struct intel_vgpu_workload, rb_tail),
1112 sizeof_field(struct intel_vgpu_workload, rb_tail),
1113 NULL);
1051 1114
1052 if (!s->workloads) { 1115 if (!s->workloads) {
1053 ret = -ENOMEM; 1116 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index ff175a98b19e..a79a4f60637e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -110,6 +110,10 @@ struct intel_vgpu_workload {
110 /* shadow batch buffer */ 110 /* shadow batch buffer */
111 struct list_head shadow_bb; 111 struct list_head shadow_bb;
112 struct intel_shadow_wa_ctx wa_ctx; 112 struct intel_shadow_wa_ctx wa_ctx;
113
114 /* oa registers */
115 u32 oactxctrl;
116 u32 flex_mmio[7];
113}; 117};
114 118
115struct intel_vgpu_shadow_bb { 119struct intel_vgpu_shadow_bb {
@@ -120,6 +124,7 @@ struct intel_vgpu_shadow_bb {
120 u32 *bb_start_cmd_va; 124 u32 *bb_start_cmd_va;
121 unsigned int clflush; 125 unsigned int clflush;
122 bool accessing; 126 bool accessing;
127 unsigned long bb_offset;
123}; 128};
124 129
125#define workload_q_head(vgpu, ring_id) \ 130#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 66ee9d888d16..6ff5d655c202 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -434,20 +434,28 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
434 dma_fence_put(shared[i]); 434 dma_fence_put(shared[i]);
435 kfree(shared); 435 kfree(shared);
436 436
437 /*
438 * If both shared fences and an exclusive fence exist,
439 * then by construction the shared fences must be later
440 * than the exclusive fence. If we successfully wait for
441 * all the shared fences, we know that the exclusive fence
442 * must all be signaled. If all the shared fences are
443 * signaled, we can prune the array and recover the
444 * floating references on the fences/requests.
445 */
437 prune_fences = count && timeout >= 0; 446 prune_fences = count && timeout >= 0;
438 } else { 447 } else {
439 excl = reservation_object_get_excl_rcu(resv); 448 excl = reservation_object_get_excl_rcu(resv);
440 } 449 }
441 450
442 if (excl && timeout >= 0) { 451 if (excl && timeout >= 0)
443 timeout = i915_gem_object_wait_fence(excl, flags, timeout, 452 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
444 rps_client); 453 rps_client);
445 prune_fences = timeout >= 0;
446 }
447 454
448 dma_fence_put(excl); 455 dma_fence_put(excl);
449 456
450 /* Oportunistically prune the fences iff we know they have *all* been 457 /*
458 * Opportunistically prune the fences iff we know they have *all* been
451 * signaled and that the reservation object has not been changed (i.e. 459 * signaled and that the reservation object has not been changed (i.e.
452 * no new fences have been added). 460 * no new fences have been added).
453 */ 461 */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index b33d2158c234..e5e6f6bb2b05 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -304,8 +304,9 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
304{ 304{
305 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); 305 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
306 struct intel_rps *rps = &dev_priv->gt_pm.rps; 306 struct intel_rps *rps = &dev_priv->gt_pm.rps;
307 u32 val; 307 bool boost = false;
308 ssize_t ret; 308 ssize_t ret;
309 u32 val;
309 310
310 ret = kstrtou32(buf, 0, &val); 311 ret = kstrtou32(buf, 0, &val);
311 if (ret) 312 if (ret)
@@ -317,8 +318,13 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
317 return -EINVAL; 318 return -EINVAL;
318 319
319 mutex_lock(&dev_priv->pcu_lock); 320 mutex_lock(&dev_priv->pcu_lock);
320 rps->boost_freq = val; 321 if (val != rps->boost_freq) {
322 rps->boost_freq = val;
323 boost = atomic_read(&rps->num_waiters);
324 }
321 mutex_unlock(&dev_priv->pcu_lock); 325 mutex_unlock(&dev_priv->pcu_lock);
326 if (boost)
327 schedule_work(&rps->work);
322 328
323 return count; 329 return count;
324} 330}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f51645a08dca..6aff9d096e13 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -2175,8 +2175,7 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
2175 intel_prepare_dp_ddi_buffers(encoder, crtc_state); 2175 intel_prepare_dp_ddi_buffers(encoder, crtc_state);
2176 2176
2177 intel_ddi_init_dp_buf_reg(encoder); 2177 intel_ddi_init_dp_buf_reg(encoder);
2178 if (!is_mst) 2178 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2179 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2180 intel_dp_start_link_train(intel_dp); 2179 intel_dp_start_link_train(intel_dp);
2181 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) 2180 if (port != PORT_A || INTEL_GEN(dev_priv) >= 9)
2182 intel_dp_stop_link_train(intel_dp); 2181 intel_dp_stop_link_train(intel_dp);
@@ -2274,14 +2273,12 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
2274 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2273 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2275 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); 2274 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
2276 struct intel_dp *intel_dp = &dig_port->dp; 2275 struct intel_dp *intel_dp = &dig_port->dp;
2277 bool is_mst = intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST);
2278 2276
2279 /* 2277 /*
2280 * Power down sink before disabling the port, otherwise we end 2278 * Power down sink before disabling the port, otherwise we end
2281 * up getting interrupts from the sink on detecting link loss. 2279 * up getting interrupts from the sink on detecting link loss.
2282 */ 2280 */
2283 if (!is_mst) 2281 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2284 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2285 2282
2286 intel_disable_ddi_buf(encoder); 2283 intel_disable_ddi_buf(encoder);
2287 2284
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 35c5299feab6..a29868cd30c7 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -620,19 +620,15 @@ static int
620bxt_power_sequencer_idx(struct intel_dp *intel_dp) 620bxt_power_sequencer_idx(struct intel_dp *intel_dp)
621{ 621{
622 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp)); 622 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
623 int backlight_controller = dev_priv->vbt.backlight.controller;
623 624
624 lockdep_assert_held(&dev_priv->pps_mutex); 625 lockdep_assert_held(&dev_priv->pps_mutex);
625 626
626 /* We should never land here with regular DP ports */ 627 /* We should never land here with regular DP ports */
627 WARN_ON(!intel_dp_is_edp(intel_dp)); 628 WARN_ON(!intel_dp_is_edp(intel_dp));
628 629
629 /*
630 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
631 * mapping needs to be retrieved from VBT, for now just hard-code to
632 * use instance #0 always.
633 */
634 if (!intel_dp->pps_reset) 630 if (!intel_dp->pps_reset)
635 return 0; 631 return backlight_controller;
636 632
637 intel_dp->pps_reset = false; 633 intel_dp->pps_reset = false;
638 634
@@ -642,7 +638,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
642 */ 638 */
643 intel_dp_init_panel_power_sequencer_registers(intel_dp, false); 639 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
644 640
645 return 0; 641 return backlight_controller;
646} 642}
647 643
648typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 644typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_hangcheck.c b/drivers/gpu/drm/i915/intel_hangcheck.c
index 348a4f7ffb67..53747318f4a7 100644
--- a/drivers/gpu/drm/i915/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/intel_hangcheck.c
@@ -246,7 +246,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
246 */ 246 */
247 tmp = I915_READ_CTL(engine); 247 tmp = I915_READ_CTL(engine);
248 if (tmp & RING_WAIT) { 248 if (tmp & RING_WAIT) {
249 i915_handle_error(dev_priv, 0, 249 i915_handle_error(dev_priv, BIT(engine->id),
250 "Kicking stuck wait on %s", 250 "Kicking stuck wait on %s",
251 engine->name); 251 engine->name);
252 I915_WRITE_CTL(engine, tmp); 252 I915_WRITE_CTL(engine, tmp);
@@ -258,7 +258,7 @@ engine_stuck(struct intel_engine_cs *engine, u64 acthd)
258 default: 258 default:
259 return ENGINE_DEAD; 259 return ENGINE_DEAD;
260 case 1: 260 case 1:
261 i915_handle_error(dev_priv, 0, 261 i915_handle_error(dev_priv, ALL_ENGINES,
262 "Kicking stuck semaphore on %s", 262 "Kicking stuck semaphore on %s",
263 engine->name); 263 engine->name);
264 I915_WRITE_CTL(engine, tmp); 264 I915_WRITE_CTL(engine, tmp);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 9a9961802f5c..e83af0f2be86 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -225,7 +225,11 @@ static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
225 struct drm_crtc_state *old_crtc_state) 225 struct drm_crtc_state *old_crtc_state)
226{ 226{
227 drm_crtc_vblank_on(crtc); 227 drm_crtc_vblank_on(crtc);
228}
228 229
230static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
231 struct drm_crtc_state *old_crtc_state)
232{
229 spin_lock_irq(&crtc->dev->event_lock); 233 spin_lock_irq(&crtc->dev->event_lock);
230 if (crtc->state->event) { 234 if (crtc->state->event) {
231 WARN_ON(drm_crtc_vblank_get(crtc)); 235 WARN_ON(drm_crtc_vblank_get(crtc));
@@ -293,6 +297,7 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
293 .mode_set_nofb = ipu_crtc_mode_set_nofb, 297 .mode_set_nofb = ipu_crtc_mode_set_nofb,
294 .atomic_check = ipu_crtc_atomic_check, 298 .atomic_check = ipu_crtc_atomic_check,
295 .atomic_begin = ipu_crtc_atomic_begin, 299 .atomic_begin = ipu_crtc_atomic_begin,
300 .atomic_flush = ipu_crtc_atomic_flush,
296 .atomic_disable = ipu_crtc_atomic_disable, 301 .atomic_disable = ipu_crtc_atomic_disable,
297 .atomic_enable = ipu_crtc_atomic_enable, 302 .atomic_enable = ipu_crtc_atomic_enable,
298}; 303};
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 57ed56d8623f..d9113faaa62f 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -22,6 +22,7 @@
22#include <drm/drm_plane_helper.h> 22#include <drm/drm_plane_helper.h>
23 23
24#include "video/imx-ipu-v3.h" 24#include "video/imx-ipu-v3.h"
25#include "imx-drm.h"
25#include "ipuv3-plane.h" 26#include "ipuv3-plane.h"
26 27
27struct ipu_plane_state { 28struct ipu_plane_state {
@@ -272,7 +273,7 @@ static void ipu_plane_destroy(struct drm_plane *plane)
272 kfree(ipu_plane); 273 kfree(ipu_plane);
273} 274}
274 275
275void ipu_plane_state_reset(struct drm_plane *plane) 276static void ipu_plane_state_reset(struct drm_plane *plane)
276{ 277{
277 struct ipu_plane_state *ipu_state; 278 struct ipu_plane_state *ipu_state;
278 279
@@ -292,7 +293,8 @@ void ipu_plane_state_reset(struct drm_plane *plane)
292 plane->state = &ipu_state->base; 293 plane->state = &ipu_state->base;
293} 294}
294 295
295struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane) 296static struct drm_plane_state *
297ipu_plane_duplicate_state(struct drm_plane *plane)
296{ 298{
297 struct ipu_plane_state *state; 299 struct ipu_plane_state *state;
298 300
@@ -306,8 +308,8 @@ struct drm_plane_state *ipu_plane_duplicate_state(struct drm_plane *plane)
306 return &state->base; 308 return &state->base;
307} 309}
308 310
309void ipu_plane_destroy_state(struct drm_plane *plane, 311static void ipu_plane_destroy_state(struct drm_plane *plane,
310 struct drm_plane_state *state) 312 struct drm_plane_state *state)
311{ 313{
312 struct ipu_plane_state *ipu_state = to_ipu_plane_state(state); 314 struct ipu_plane_state *ipu_state = to_ipu_plane_state(state);
313 315
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 380f340204e8..debbbf0fd4bd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -134,7 +134,7 @@ nv50_get_intensity(struct backlight_device *bd)
134 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 134 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 135 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
136 struct nvif_object *device = &drm->client.device.object; 136 struct nvif_object *device = &drm->client.device.object;
137 int or = nv_encoder->or; 137 int or = ffs(nv_encoder->dcb->or) - 1;
138 u32 div = 1025; 138 u32 div = 1025;
139 u32 val; 139 u32 val;
140 140
@@ -149,7 +149,7 @@ nv50_set_intensity(struct backlight_device *bd)
149 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 149 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
150 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 150 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
151 struct nvif_object *device = &drm->client.device.object; 151 struct nvif_object *device = &drm->client.device.object;
152 int or = nv_encoder->or; 152 int or = ffs(nv_encoder->dcb->or) - 1;
153 u32 div = 1025; 153 u32 div = 1025;
154 u32 val = (bd->props.brightness * div) / 100; 154 u32 val = (bd->props.brightness * div) / 100;
155 155
@@ -170,7 +170,7 @@ nva3_get_intensity(struct backlight_device *bd)
170 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 170 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
171 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 171 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
172 struct nvif_object *device = &drm->client.device.object; 172 struct nvif_object *device = &drm->client.device.object;
173 int or = nv_encoder->or; 173 int or = ffs(nv_encoder->dcb->or) - 1;
174 u32 div, val; 174 u32 div, val;
175 175
176 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); 176 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
@@ -188,7 +188,7 @@ nva3_set_intensity(struct backlight_device *bd)
188 struct nouveau_encoder *nv_encoder = bl_get_data(bd); 188 struct nouveau_encoder *nv_encoder = bl_get_data(bd);
189 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev); 189 struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
190 struct nvif_object *device = &drm->client.device.object; 190 struct nvif_object *device = &drm->client.device.object;
191 int or = nv_encoder->or; 191 int or = ffs(nv_encoder->dcb->or) - 1;
192 u32 div, val; 192 u32 div, val;
193 193
194 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or)); 194 div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
@@ -228,7 +228,7 @@ nv50_backlight_init(struct drm_connector *connector)
228 return -ENODEV; 228 return -ENODEV;
229 } 229 }
230 230
231 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or))) 231 if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)))
232 return 0; 232 return 0;
233 233
234 if (drm->client.device.info.chipset <= 0xa0 || 234 if (drm->client.device.info.chipset <= 0xa0 ||
@@ -268,13 +268,13 @@ nouveau_backlight_init(struct drm_device *dev)
268 struct nvif_device *device = &drm->client.device; 268 struct nvif_device *device = &drm->client.device;
269 struct drm_connector *connector; 269 struct drm_connector *connector;
270 270
271 INIT_LIST_HEAD(&drm->bl_connectors);
272
271 if (apple_gmux_present()) { 273 if (apple_gmux_present()) {
272 NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n"); 274 NV_INFO(drm, "Apple GMUX detected: not registering Nouveau backlight interface\n");
273 return 0; 275 return 0;
274 } 276 }
275 277
276 INIT_LIST_HEAD(&drm->bl_connectors);
277
278 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 278 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
279 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && 279 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
280 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 280 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 93946dcee319..1c12e58f44c2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1354,7 +1354,7 @@ nvkm_vmm_get_locked(struct nvkm_vmm *vmm, bool getref, bool mapref, bool sparse,
1354 1354
1355 tail = this->addr + this->size; 1355 tail = this->addr + this->size;
1356 if (vmm->func->page_block && next && next->page != p) 1356 if (vmm->func->page_block && next && next->page != p)
1357 tail = ALIGN_DOWN(addr, vmm->func->page_block); 1357 tail = ALIGN_DOWN(tail, vmm->func->page_block);
1358 1358
1359 if (addr <= tail && tail - addr >= size) { 1359 if (addr <= tail && tail - addr >= size) {
1360 rb_erase(&this->tree, &vmm->free); 1360 rb_erase(&this->tree, &vmm->free);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 2e2ca3c6b47d..df9469a8fdb1 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -90,25 +90,18 @@ void radeon_connector_hotplug(struct drm_connector *connector)
90 /* don't do anything if sink is not display port, i.e., 90 /* don't do anything if sink is not display port, i.e.,
91 * passive dp->(dvi|hdmi) adaptor 91 * passive dp->(dvi|hdmi) adaptor
92 */ 92 */
93 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) { 93 if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT &&
94 int saved_dpms = connector->dpms; 94 radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) &&
95 /* Only turn off the display if it's physically disconnected */ 95 radeon_dp_needs_link_train(radeon_connector)) {
96 if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 96 /* Don't start link training before we have the DPCD */
97 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); 97 if (!radeon_dp_getdpcd(radeon_connector))
98 } else if (radeon_dp_needs_link_train(radeon_connector)) { 98 return;
99 /* Don't try to start link training before we 99
100 * have the dpcd */ 100 /* Turn the connector off and back on immediately, which
101 if (!radeon_dp_getdpcd(radeon_connector)) 101 * will trigger link training
102 return; 102 */
103 103 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
104 /* set it to OFF so that drm_helper_connector_dpms() 104 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
105 * won't return immediately since the current state
106 * is ON at this point.
107 */
108 connector->dpms = DRM_MODE_DPMS_OFF;
109 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
110 }
111 connector->dpms = saved_dpms;
112 } 105 }
113 } 106 }
114} 107}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a9962ffba720..27d8e7dd2d06 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -34,8 +34,6 @@ void radeon_gem_object_free(struct drm_gem_object *gobj)
34 struct radeon_bo *robj = gem_to_radeon_bo(gobj); 34 struct radeon_bo *robj = gem_to_radeon_bo(gobj);
35 35
36 if (robj) { 36 if (robj) {
37 if (robj->gem_base.import_attach)
38 drm_prime_gem_destroy(&robj->gem_base, robj->tbo.sg);
39 radeon_mn_unregister(robj); 37 radeon_mn_unregister(robj);
40 radeon_bo_unref(&robj); 38 radeon_bo_unref(&robj);
41 } 39 }
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 15404af9d740..31f5ad605e59 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -82,6 +82,8 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
82 mutex_unlock(&bo->rdev->gem.mutex); 82 mutex_unlock(&bo->rdev->gem.mutex);
83 radeon_bo_clear_surface_reg(bo); 83 radeon_bo_clear_surface_reg(bo);
84 WARN_ON_ONCE(!list_empty(&bo->va)); 84 WARN_ON_ONCE(!list_empty(&bo->va));
85 if (bo->gem_base.import_attach)
86 drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
85 drm_gem_object_release(&bo->gem_base); 87 drm_gem_object_release(&bo->gem_base);
86 kfree(bo); 88 kfree(bo);
87} 89}
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 4570da0227b4..d9a71f361b14 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -111,7 +111,7 @@ static int sun4i_drv_bind(struct device *dev)
111 /* drm_vblank_init calls kcalloc, which can fail */ 111 /* drm_vblank_init calls kcalloc, which can fail */
112 ret = drm_vblank_init(drm, drm->mode_config.num_crtc); 112 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
113 if (ret) 113 if (ret)
114 goto free_mem_region; 114 goto cleanup_mode_config;
115 115
116 drm->irq_enabled = true; 116 drm->irq_enabled = true;
117 117
@@ -139,7 +139,6 @@ finish_poll:
139 sun4i_framebuffer_free(drm); 139 sun4i_framebuffer_free(drm);
140cleanup_mode_config: 140cleanup_mode_config:
141 drm_mode_config_cleanup(drm); 141 drm_mode_config_cleanup(drm);
142free_mem_region:
143 of_reserved_mem_device_release(dev); 142 of_reserved_mem_device_release(dev);
144free_drm: 143free_drm:
145 drm_dev_unref(drm); 144 drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 500b6fb3e028..fa4bcd092eaf 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -538,7 +538,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
538 &sun4i_hdmi_regmap_config); 538 &sun4i_hdmi_regmap_config);
539 if (IS_ERR(hdmi->regmap)) { 539 if (IS_ERR(hdmi->regmap)) {
540 dev_err(dev, "Couldn't create HDMI encoder regmap\n"); 540 dev_err(dev, "Couldn't create HDMI encoder regmap\n");
541 return PTR_ERR(hdmi->regmap); 541 ret = PTR_ERR(hdmi->regmap);
542 goto err_disable_mod_clk;
542 } 543 }
543 544
544 ret = sun4i_tmds_create(hdmi); 545 ret = sun4i_tmds_create(hdmi);
@@ -551,7 +552,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
551 hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc"); 552 hdmi->ddc_parent_clk = devm_clk_get(dev, "ddc");
552 if (IS_ERR(hdmi->ddc_parent_clk)) { 553 if (IS_ERR(hdmi->ddc_parent_clk)) {
553 dev_err(dev, "Couldn't get the HDMI DDC clock\n"); 554 dev_err(dev, "Couldn't get the HDMI DDC clock\n");
554 return PTR_ERR(hdmi->ddc_parent_clk); 555 ret = PTR_ERR(hdmi->ddc_parent_clk);
556 goto err_disable_mod_clk;
555 } 557 }
556 } else { 558 } else {
557 hdmi->ddc_parent_clk = hdmi->tmds_clk; 559 hdmi->ddc_parent_clk = hdmi->tmds_clk;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 2de586b7c98b..a818ca491605 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -103,6 +103,7 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
103 103
104 if (enabled) { 104 if (enabled) {
105 clk_prepare_enable(clk); 105 clk_prepare_enable(clk);
106 clk_rate_exclusive_get(clk);
106 } else { 107 } else {
107 clk_rate_exclusive_put(clk); 108 clk_rate_exclusive_put(clk);
108 clk_disable_unprepare(clk); 109 clk_disable_unprepare(clk);
@@ -262,7 +263,7 @@ static void sun4i_tcon0_mode_set_common(struct sun4i_tcon *tcon,
262 const struct drm_display_mode *mode) 263 const struct drm_display_mode *mode)
263{ 264{
264 /* Configure the dot clock */ 265 /* Configure the dot clock */
265 clk_set_rate_exclusive(tcon->dclk, mode->crtc_clock * 1000); 266 clk_set_rate(tcon->dclk, mode->crtc_clock * 1000);
266 267
267 /* Set the resolution */ 268 /* Set the resolution */
268 regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG, 269 regmap_write(tcon->regs, SUN4I_TCON0_BASIC0_REG,
@@ -423,7 +424,7 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
423 WARN_ON(!tcon->quirks->has_channel_1); 424 WARN_ON(!tcon->quirks->has_channel_1);
424 425
425 /* Configure the dot clock */ 426 /* Configure the dot clock */
426 clk_set_rate_exclusive(tcon->sclk1, mode->crtc_clock * 1000); 427 clk_set_rate(tcon->sclk1, mode->crtc_clock * 1000);
427 428
428 /* Adjust clock delay */ 429 /* Adjust clock delay */
429 clk_delay = sun4i_tcon_get_clk_delay(mode, 1); 430 clk_delay = sun4i_tcon_get_clk_delay(mode, 1);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index b8403ed48285..fbffe1948b3b 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -1903,8 +1903,12 @@ cleanup:
1903 if (!IS_ERR(primary)) 1903 if (!IS_ERR(primary))
1904 drm_plane_cleanup(primary); 1904 drm_plane_cleanup(primary);
1905 1905
1906 if (group && tegra->domain) { 1906 if (group && dc->domain) {
1907 iommu_detach_group(tegra->domain, group); 1907 if (group == tegra->group) {
1908 iommu_detach_group(dc->domain, group);
1909 tegra->group = NULL;
1910 }
1911
1908 dc->domain = NULL; 1912 dc->domain = NULL;
1909 } 1913 }
1910 1914
@@ -1913,8 +1917,10 @@ cleanup:
1913 1917
1914static int tegra_dc_exit(struct host1x_client *client) 1918static int tegra_dc_exit(struct host1x_client *client)
1915{ 1919{
1920 struct drm_device *drm = dev_get_drvdata(client->parent);
1916 struct iommu_group *group = iommu_group_get(client->dev); 1921 struct iommu_group *group = iommu_group_get(client->dev);
1917 struct tegra_dc *dc = host1x_client_to_dc(client); 1922 struct tegra_dc *dc = host1x_client_to_dc(client);
1923 struct tegra_drm *tegra = drm->dev_private;
1918 int err; 1924 int err;
1919 1925
1920 devm_free_irq(dc->dev, dc->irq, dc); 1926 devm_free_irq(dc->dev, dc->irq, dc);
@@ -1926,7 +1932,11 @@ static int tegra_dc_exit(struct host1x_client *client)
1926 } 1932 }
1927 1933
1928 if (group && dc->domain) { 1934 if (group && dc->domain) {
1929 iommu_detach_group(dc->domain, group); 1935 if (group == tegra->group) {
1936 iommu_detach_group(dc->domain, group);
1937 tegra->group = NULL;
1938 }
1939
1930 dc->domain = NULL; 1940 dc->domain = NULL;
1931 } 1941 }
1932 1942
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index d50bddb2e447..7fcf4a242840 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -250,6 +250,7 @@ static void tegra_drm_unload(struct drm_device *drm)
250 250
251 drm_kms_helper_poll_fini(drm); 251 drm_kms_helper_poll_fini(drm);
252 tegra_drm_fb_exit(drm); 252 tegra_drm_fb_exit(drm);
253 drm_atomic_helper_shutdown(drm);
253 drm_mode_config_cleanup(drm); 254 drm_mode_config_cleanup(drm);
254 255
255 err = host1x_device_exit(device); 256 err = host1x_device_exit(device);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index 4d2ed966f9e3..87c5d89bc9ba 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -1072,7 +1072,6 @@ static int tegra_dsi_exit(struct host1x_client *client)
1072 struct tegra_dsi *dsi = host1x_client_to_dsi(client); 1072 struct tegra_dsi *dsi = host1x_client_to_dsi(client);
1073 1073
1074 tegra_output_exit(&dsi->output); 1074 tegra_output_exit(&dsi->output);
1075 regulator_disable(dsi->vdd);
1076 1075
1077 return 0; 1076 return 0;
1078} 1077}
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index 36a06a993698..94dac79ac3c9 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -297,6 +297,10 @@ int tegra_plane_format_get_alpha(unsigned int opaque, unsigned int *alpha)
297 case WIN_COLOR_DEPTH_B8G8R8X8: 297 case WIN_COLOR_DEPTH_B8G8R8X8:
298 *alpha = WIN_COLOR_DEPTH_B8G8R8A8; 298 *alpha = WIN_COLOR_DEPTH_B8G8R8A8;
299 return 0; 299 return 0;
300
301 case WIN_COLOR_DEPTH_B5G6R5:
302 *alpha = opaque;
303 return 0;
300 } 304 }
301 305
302 return -EINVAL; 306 return -EINVAL;
@@ -330,9 +334,6 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,
330 unsigned int zpos[2]; 334 unsigned int zpos[2];
331 unsigned int i; 335 unsigned int i;
332 336
333 for (i = 0; i < 3; i++)
334 state->dependent[i] = false;
335
336 for (i = 0; i < 2; i++) 337 for (i = 0; i < 2; i++)
337 zpos[i] = 0; 338 zpos[i] = 0;
338 339
@@ -346,6 +347,8 @@ void tegra_plane_check_dependent(struct tegra_plane *tegra,
346 347
347 index = tegra_plane_get_overlap_index(tegra, p); 348 index = tegra_plane_get_overlap_index(tegra, p);
348 349
350 state->dependent[index] = false;
351
349 /* 352 /*
350 * If any of the other planes is on top of this plane and uses 353 * If any of the other planes is on top of this plane and uses
351 * a format with an alpha component, mark this plane as being 354 * a format with an alpha component, mark this plane as being
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index b5b335c9b2bb..2ebdc6d5a76e 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -159,10 +159,15 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
159{ 159{
160 unsigned long start = vma->vm_start; 160 unsigned long start = vma->vm_start;
161 unsigned long size = vma->vm_end - vma->vm_start; 161 unsigned long size = vma->vm_end - vma->vm_start;
162 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; 162 unsigned long offset;
163 unsigned long page, pos; 163 unsigned long page, pos;
164 164
165 if (offset + size > info->fix.smem_len) 165 if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT))
166 return -EINVAL;
167
168 offset = vma->vm_pgoff << PAGE_SHIFT;
169
170 if (offset > info->fix.smem_len || size > info->fix.smem_len - offset)
166 return -EINVAL; 171 return -EINVAL;
167 172
168 pos = (unsigned long)info->fix.smem_start + offset; 173 pos = (unsigned long)info->fix.smem_start + offset;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 184340d486c3..86d25f18aa99 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1337,6 +1337,19 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv)
1337 */ 1337 */
1338void vmw_svga_disable(struct vmw_private *dev_priv) 1338void vmw_svga_disable(struct vmw_private *dev_priv)
1339{ 1339{
1340 /*
1341 * Disabling SVGA will turn off device modesetting capabilities, so
1342 * notify KMS about that so that it doesn't cache atomic state that
1343 * isn't valid anymore, for example crtcs turned on.
1344 * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex),
1345 * but vmw_kms_lost_device() takes the reservation sem and thus we'll
1346 * end up with lock order reversal. Thus, a master may actually perform
1347 * a new modeset just after we call vmw_kms_lost_device() and race with
1348 * vmw_svga_disable(), but that should at worst cause atomic KMS state
1349 * to be inconsistent with the device, causing modesetting problems.
1350 *
1351 */
1352 vmw_kms_lost_device(dev_priv->dev);
1340 ttm_write_lock(&dev_priv->reservation_sem, false); 1353 ttm_write_lock(&dev_priv->reservation_sem, false);
1341 spin_lock(&dev_priv->svga_lock); 1354 spin_lock(&dev_priv->svga_lock);
1342 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) { 1355 if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d08753e8fd94..9116fe8baebc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -938,6 +938,7 @@ int vmw_kms_present(struct vmw_private *dev_priv,
938int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, 938int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
939 struct drm_file *file_priv); 939 struct drm_file *file_priv);
940void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); 940void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv);
941void vmw_kms_lost_device(struct drm_device *dev);
941 942
942int vmw_dumb_create(struct drm_file *file_priv, 943int vmw_dumb_create(struct drm_file *file_priv,
943 struct drm_device *dev, 944 struct drm_device *dev,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index ead61015cd79..3c824fd7cbf3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -31,7 +31,6 @@
31#include <drm/drm_atomic_helper.h> 31#include <drm/drm_atomic_helper.h>
32#include <drm/drm_rect.h> 32#include <drm/drm_rect.h>
33 33
34
35/* Might need a hrtimer here? */ 34/* Might need a hrtimer here? */
36#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) 35#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
37 36
@@ -2517,9 +2516,12 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
2517 * Helper to be used if an error forces the caller to undo the actions of 2516 * Helper to be used if an error forces the caller to undo the actions of
2518 * vmw_kms_helper_resource_prepare. 2517 * vmw_kms_helper_resource_prepare.
2519 */ 2518 */
2520void vmw_kms_helper_resource_revert(struct vmw_resource *res) 2519void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx)
2521{ 2520{
2522 vmw_kms_helper_buffer_revert(res->backup); 2521 struct vmw_resource *res = ctx->res;
2522
2523 vmw_kms_helper_buffer_revert(ctx->buf);
2524 vmw_dmabuf_unreference(&ctx->buf);
2523 vmw_resource_unreserve(res, false, NULL, 0); 2525 vmw_resource_unreserve(res, false, NULL, 0);
2524 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 2526 mutex_unlock(&res->dev_priv->cmdbuf_mutex);
2525} 2527}
@@ -2536,10 +2538,14 @@ void vmw_kms_helper_resource_revert(struct vmw_resource *res)
2536 * interrupted by a signal. 2538 * interrupted by a signal.
2537 */ 2539 */
2538int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 2540int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2539 bool interruptible) 2541 bool interruptible,
2542 struct vmw_validation_ctx *ctx)
2540{ 2543{
2541 int ret = 0; 2544 int ret = 0;
2542 2545
2546 ctx->buf = NULL;
2547 ctx->res = res;
2548
2543 if (interruptible) 2549 if (interruptible)
2544 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex); 2550 ret = mutex_lock_interruptible(&res->dev_priv->cmdbuf_mutex);
2545 else 2551 else
@@ -2558,6 +2564,8 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2558 res->dev_priv->has_mob); 2564 res->dev_priv->has_mob);
2559 if (ret) 2565 if (ret)
2560 goto out_unreserve; 2566 goto out_unreserve;
2567
2568 ctx->buf = vmw_dmabuf_reference(res->backup);
2561 } 2569 }
2562 ret = vmw_resource_validate(res); 2570 ret = vmw_resource_validate(res);
2563 if (ret) 2571 if (ret)
@@ -2565,7 +2573,7 @@ int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
2565 return 0; 2573 return 0;
2566 2574
2567out_revert: 2575out_revert:
2568 vmw_kms_helper_buffer_revert(res->backup); 2576 vmw_kms_helper_buffer_revert(ctx->buf);
2569out_unreserve: 2577out_unreserve:
2570 vmw_resource_unreserve(res, false, NULL, 0); 2578 vmw_resource_unreserve(res, false, NULL, 0);
2571out_unlock: 2579out_unlock:
@@ -2581,11 +2589,13 @@ out_unlock:
2581 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a 2589 * @out_fence: Optional pointer to a fence pointer. If non-NULL, a
2582 * ref-counted fence pointer is returned here. 2590 * ref-counted fence pointer is returned here.
2583 */ 2591 */
2584void vmw_kms_helper_resource_finish(struct vmw_resource *res, 2592void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
2585 struct vmw_fence_obj **out_fence) 2593 struct vmw_fence_obj **out_fence)
2586{ 2594{
2587 if (res->backup || out_fence) 2595 struct vmw_resource *res = ctx->res;
2588 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, 2596
2597 if (ctx->buf || out_fence)
2598 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
2589 out_fence, NULL); 2599 out_fence, NULL);
2590 2600
2591 vmw_resource_unreserve(res, false, NULL, 0); 2601 vmw_resource_unreserve(res, false, NULL, 0);
@@ -2851,3 +2861,14 @@ int vmw_kms_set_config(struct drm_mode_set *set,
2851 2861
2852 return drm_atomic_helper_set_config(set, ctx); 2862 return drm_atomic_helper_set_config(set, ctx);
2853} 2863}
2864
2865
2866/**
2867 * vmw_kms_lost_device - Notify kms that modesetting capabilities will be lost
2868 *
2869 * @dev: Pointer to the drm device
2870 */
2871void vmw_kms_lost_device(struct drm_device *dev)
2872{
2873 drm_atomic_helper_shutdown(dev);
2874}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index cd9da2dd79af..3d2ca280eaa7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -240,6 +240,11 @@ struct vmw_display_unit {
240 int set_gui_y; 240 int set_gui_y;
241}; 241};
242 242
243struct vmw_validation_ctx {
244 struct vmw_resource *res;
245 struct vmw_dma_buffer *buf;
246};
247
243#define vmw_crtc_to_du(x) \ 248#define vmw_crtc_to_du(x) \
244 container_of(x, struct vmw_display_unit, crtc) 249 container_of(x, struct vmw_display_unit, crtc)
245#define vmw_connector_to_du(x) \ 250#define vmw_connector_to_du(x) \
@@ -296,9 +301,10 @@ void vmw_kms_helper_buffer_finish(struct vmw_private *dev_priv,
296 struct drm_vmw_fence_rep __user * 301 struct drm_vmw_fence_rep __user *
297 user_fence_rep); 302 user_fence_rep);
298int vmw_kms_helper_resource_prepare(struct vmw_resource *res, 303int vmw_kms_helper_resource_prepare(struct vmw_resource *res,
299 bool interruptible); 304 bool interruptible,
300void vmw_kms_helper_resource_revert(struct vmw_resource *res); 305 struct vmw_validation_ctx *ctx);
301void vmw_kms_helper_resource_finish(struct vmw_resource *res, 306void vmw_kms_helper_resource_revert(struct vmw_validation_ctx *ctx);
307void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
302 struct vmw_fence_obj **out_fence); 308 struct vmw_fence_obj **out_fence);
303int vmw_kms_readback(struct vmw_private *dev_priv, 309int vmw_kms_readback(struct vmw_private *dev_priv,
304 struct drm_file *file_priv, 310 struct drm_file *file_priv,
@@ -439,5 +445,4 @@ int vmw_kms_stdu_dma(struct vmw_private *dev_priv,
439 445
440int vmw_kms_set_config(struct drm_mode_set *set, 446int vmw_kms_set_config(struct drm_mode_set *set,
441 struct drm_modeset_acquire_ctx *ctx); 447 struct drm_modeset_acquire_ctx *ctx);
442
443#endif 448#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 63a4cd794b73..3ec9eae831b8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -909,12 +909,13 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
909 struct vmw_framebuffer_surface *vfbs = 909 struct vmw_framebuffer_surface *vfbs =
910 container_of(framebuffer, typeof(*vfbs), base); 910 container_of(framebuffer, typeof(*vfbs), base);
911 struct vmw_kms_sou_surface_dirty sdirty; 911 struct vmw_kms_sou_surface_dirty sdirty;
912 struct vmw_validation_ctx ctx;
912 int ret; 913 int ret;
913 914
914 if (!srf) 915 if (!srf)
915 srf = &vfbs->surface->res; 916 srf = &vfbs->surface->res;
916 917
917 ret = vmw_kms_helper_resource_prepare(srf, true); 918 ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
918 if (ret) 919 if (ret)
919 return ret; 920 return ret;
920 921
@@ -933,7 +934,7 @@ int vmw_kms_sou_do_surface_dirty(struct vmw_private *dev_priv,
933 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips, 934 ret = vmw_kms_helper_dirty(dev_priv, framebuffer, clips, vclips,
934 dest_x, dest_y, num_clips, inc, 935 dest_x, dest_y, num_clips, inc,
935 &sdirty.base); 936 &sdirty.base);
936 vmw_kms_helper_resource_finish(srf, out_fence); 937 vmw_kms_helper_resource_finish(&ctx, out_fence);
937 938
938 return ret; 939 return ret;
939} 940}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index b68d74888ab1..6b969e5dea2a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -980,12 +980,13 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
980 struct vmw_framebuffer_surface *vfbs = 980 struct vmw_framebuffer_surface *vfbs =
981 container_of(framebuffer, typeof(*vfbs), base); 981 container_of(framebuffer, typeof(*vfbs), base);
982 struct vmw_stdu_dirty sdirty; 982 struct vmw_stdu_dirty sdirty;
983 struct vmw_validation_ctx ctx;
983 int ret; 984 int ret;
984 985
985 if (!srf) 986 if (!srf)
986 srf = &vfbs->surface->res; 987 srf = &vfbs->surface->res;
987 988
988 ret = vmw_kms_helper_resource_prepare(srf, true); 989 ret = vmw_kms_helper_resource_prepare(srf, true, &ctx);
989 if (ret) 990 if (ret)
990 return ret; 991 return ret;
991 992
@@ -1008,7 +1009,7 @@ int vmw_kms_stdu_surface_dirty(struct vmw_private *dev_priv,
1008 dest_x, dest_y, num_clips, inc, 1009 dest_x, dest_y, num_clips, inc,
1009 &sdirty.base); 1010 &sdirty.base);
1010out_finish: 1011out_finish:
1011 vmw_kms_helper_resource_finish(srf, out_fence); 1012 vmw_kms_helper_resource_finish(&ctx, out_fence);
1012 1013
1013 return ret; 1014 return ret;
1014} 1015}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 97b99500153d..83f9dd934a5d 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -250,10 +250,14 @@ void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan)
250{ 250{
251 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); 251 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
252 struct ipu_prg *prg = ipu_chan->ipu->prg_priv; 252 struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
253 struct ipu_prg_channel *chan = &prg->chan[prg_chan]; 253 struct ipu_prg_channel *chan;
254 u32 val; 254 u32 val;
255 255
256 if (!chan->enabled || prg_chan < 0) 256 if (prg_chan < 0)
257 return;
258
259 chan = &prg->chan[prg_chan];
260 if (!chan->enabled)
257 return; 261 return;
258 262
259 pm_runtime_get_sync(prg->dev); 263 pm_runtime_get_sync(prg->dev);
@@ -280,13 +284,15 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
280{ 284{
281 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num); 285 int prg_chan = ipu_prg_ipu_to_prg_chan(ipu_chan->num);
282 struct ipu_prg *prg = ipu_chan->ipu->prg_priv; 286 struct ipu_prg *prg = ipu_chan->ipu->prg_priv;
283 struct ipu_prg_channel *chan = &prg->chan[prg_chan]; 287 struct ipu_prg_channel *chan;
284 u32 val; 288 u32 val;
285 int ret; 289 int ret;
286 290
287 if (prg_chan < 0) 291 if (prg_chan < 0)
288 return prg_chan; 292 return prg_chan;
289 293
294 chan = &prg->chan[prg_chan];
295
290 if (chan->enabled) { 296 if (chan->enabled) {
291 ipu_pre_update(prg->pres[chan->used_pre], *eba); 297 ipu_pre_update(prg->pres[chan->used_pre], *eba);
292 return 0; 298 return 0;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 50e071444a5c..8699bb969e7e 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -417,13 +417,24 @@ __hv_pkt_iter_next(struct vmbus_channel *channel,
417} 417}
418EXPORT_SYMBOL_GPL(__hv_pkt_iter_next); 418EXPORT_SYMBOL_GPL(__hv_pkt_iter_next);
419 419
420/* How many bytes were read in this iterator cycle */
421static u32 hv_pkt_iter_bytes_read(const struct hv_ring_buffer_info *rbi,
422 u32 start_read_index)
423{
424 if (rbi->priv_read_index >= start_read_index)
425 return rbi->priv_read_index - start_read_index;
426 else
427 return rbi->ring_datasize - start_read_index +
428 rbi->priv_read_index;
429}
430
420/* 431/*
421 * Update host ring buffer after iterating over packets. 432 * Update host ring buffer after iterating over packets.
422 */ 433 */
423void hv_pkt_iter_close(struct vmbus_channel *channel) 434void hv_pkt_iter_close(struct vmbus_channel *channel)
424{ 435{
425 struct hv_ring_buffer_info *rbi = &channel->inbound; 436 struct hv_ring_buffer_info *rbi = &channel->inbound;
426 u32 orig_write_sz = hv_get_bytes_to_write(rbi); 437 u32 curr_write_sz, pending_sz, bytes_read, start_read_index;
427 438
428 /* 439 /*
429 * Make sure all reads are done before we update the read index since 440 * Make sure all reads are done before we update the read index since
@@ -431,8 +442,12 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
431 * is updated. 442 * is updated.
432 */ 443 */
433 virt_rmb(); 444 virt_rmb();
445 start_read_index = rbi->ring_buffer->read_index;
434 rbi->ring_buffer->read_index = rbi->priv_read_index; 446 rbi->ring_buffer->read_index = rbi->priv_read_index;
435 447
448 if (!rbi->ring_buffer->feature_bits.feat_pending_send_sz)
449 return;
450
436 /* 451 /*
437 * Issue a full memory barrier before making the signaling decision. 452 * Issue a full memory barrier before making the signaling decision.
438 * Here is the reason for having this barrier: 453 * Here is the reason for having this barrier:
@@ -446,26 +461,29 @@ void hv_pkt_iter_close(struct vmbus_channel *channel)
446 */ 461 */
447 virt_mb(); 462 virt_mb();
448 463
449 /* If host has disabled notifications then skip */ 464 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz);
450 if (rbi->ring_buffer->interrupt_mask) 465 if (!pending_sz)
451 return; 466 return;
452 467
453 if (rbi->ring_buffer->feature_bits.feat_pending_send_sz) { 468 /*
454 u32 pending_sz = READ_ONCE(rbi->ring_buffer->pending_send_sz); 469 * Ensure the read of write_index in hv_get_bytes_to_write()
470 * happens after the read of pending_send_sz.
471 */
472 virt_rmb();
473 curr_write_sz = hv_get_bytes_to_write(rbi);
474 bytes_read = hv_pkt_iter_bytes_read(rbi, start_read_index);
455 475
456 /* 476 /*
457 * If there was space before we began iteration, 477 * If there was space before we began iteration,
458 * then host was not blocked. Also handles case where 478 * then host was not blocked.
459 * pending_sz is zero then host has nothing pending 479 */
460 * and does not need to be signaled.
461 */
462 if (orig_write_sz > pending_sz)
463 return;
464 480
465 /* If pending write will not fit, don't give false hope. */ 481 if (curr_write_sz - bytes_read > pending_sz)
466 if (hv_get_bytes_to_write(rbi) < pending_sz) 482 return;
467 return; 483
468 } 484 /* If pending write will not fit, don't give false hope. */
485 if (curr_write_sz <= pending_sz)
486 return;
469 487
470 vmbus_setevent(channel); 488 vmbus_setevent(channel);
471} 489}
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 6fe995cf16a6..3e6fd5a8ac5b 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -920,6 +920,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
920int st_accel_common_probe(struct iio_dev *indio_dev) 920int st_accel_common_probe(struct iio_dev *indio_dev)
921{ 921{
922 struct st_sensor_data *adata = iio_priv(indio_dev); 922 struct st_sensor_data *adata = iio_priv(indio_dev);
923 struct st_sensors_platform_data *pdata =
924 (struct st_sensors_platform_data *)adata->dev->platform_data;
923 int irq = adata->get_irq_data_ready(indio_dev); 925 int irq = adata->get_irq_data_ready(indio_dev);
924 int err; 926 int err;
925 927
@@ -946,7 +948,10 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
946 &adata->sensor_settings->fs.fs_avl[0]; 948 &adata->sensor_settings->fs.fs_avl[0];
947 adata->odr = adata->sensor_settings->odr.odr_avl[0].hz; 949 adata->odr = adata->sensor_settings->odr.odr_avl[0].hz;
948 950
949 err = st_sensors_init_sensor(indio_dev, adata->dev->platform_data); 951 if (!pdata)
952 pdata = (struct st_sensors_platform_data *)&default_accel_pdata;
953
954 err = st_sensors_init_sensor(indio_dev, pdata);
950 if (err < 0) 955 if (err < 0)
951 goto st_accel_power_off; 956 goto st_accel_power_off;
952 957
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index 29fa7736d80c..ede955d9b2a4 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -462,8 +462,10 @@ static int meson_sar_adc_lock(struct iio_dev *indio_dev)
462 regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val); 462 regmap_read(priv->regmap, MESON_SAR_ADC_DELAY, &val);
463 } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--); 463 } while (val & MESON_SAR_ADC_DELAY_BL30_BUSY && timeout--);
464 464
465 if (timeout < 0) 465 if (timeout < 0) {
466 mutex_unlock(&indio_dev->mlock);
466 return -ETIMEDOUT; 467 return -ETIMEDOUT;
468 }
467 } 469 }
468 470
469 return 0; 471 return 0;
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index daa026d6a94f..01422d11753c 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -54,7 +54,6 @@ struct stm32_dfsdm_adc {
54 struct stm32_dfsdm *dfsdm; 54 struct stm32_dfsdm *dfsdm;
55 const struct stm32_dfsdm_dev_data *dev_data; 55 const struct stm32_dfsdm_dev_data *dev_data;
56 unsigned int fl_id; 56 unsigned int fl_id;
57 unsigned int ch_id;
58 57
59 /* ADC specific */ 58 /* ADC specific */
60 unsigned int oversamp; 59 unsigned int oversamp;
@@ -384,7 +383,7 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
384{ 383{
385 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 384 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
386 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; 385 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
387 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id]; 386 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
388 unsigned int sample_freq = adc->sample_freq; 387 unsigned int sample_freq = adc->sample_freq;
389 unsigned int spi_freq; 388 unsigned int spi_freq;
390 int ret; 389 int ret;
@@ -419,18 +418,20 @@ static ssize_t dfsdm_adc_audio_set_spiclk(struct iio_dev *indio_dev,
419 return len; 418 return len;
420} 419}
421 420
422static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc, bool dma) 421static int stm32_dfsdm_start_conv(struct stm32_dfsdm_adc *adc,
422 const struct iio_chan_spec *chan,
423 bool dma)
423{ 424{
424 struct regmap *regmap = adc->dfsdm->regmap; 425 struct regmap *regmap = adc->dfsdm->regmap;
425 int ret; 426 int ret;
426 unsigned int dma_en = 0, cont_en = 0; 427 unsigned int dma_en = 0, cont_en = 0;
427 428
428 ret = stm32_dfsdm_start_channel(adc->dfsdm, adc->ch_id); 429 ret = stm32_dfsdm_start_channel(adc->dfsdm, chan->channel);
429 if (ret < 0) 430 if (ret < 0)
430 return ret; 431 return ret;
431 432
432 ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id, 433 ret = stm32_dfsdm_filter_configure(adc->dfsdm, adc->fl_id,
433 adc->ch_id); 434 chan->channel);
434 if (ret < 0) 435 if (ret < 0)
435 goto stop_channels; 436 goto stop_channels;
436 437
@@ -464,12 +465,13 @@ stop_channels:
464 465
465 regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id), 466 regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
466 DFSDM_CR1_RCONT_MASK, 0); 467 DFSDM_CR1_RCONT_MASK, 0);
467 stm32_dfsdm_stop_channel(adc->dfsdm, adc->fl_id); 468 stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);
468 469
469 return ret; 470 return ret;
470} 471}
471 472
472static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc) 473static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc,
474 const struct iio_chan_spec *chan)
473{ 475{
474 struct regmap *regmap = adc->dfsdm->regmap; 476 struct regmap *regmap = adc->dfsdm->regmap;
475 477
@@ -482,7 +484,7 @@ static void stm32_dfsdm_stop_conv(struct stm32_dfsdm_adc *adc)
482 regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id), 484 regmap_update_bits(regmap, DFSDM_CR1(adc->fl_id),
483 DFSDM_CR1_RCONT_MASK, 0); 485 DFSDM_CR1_RCONT_MASK, 0);
484 486
485 stm32_dfsdm_stop_channel(adc->dfsdm, adc->ch_id); 487 stm32_dfsdm_stop_channel(adc->dfsdm, chan->channel);
486} 488}
487 489
488static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev, 490static int stm32_dfsdm_set_watermark(struct iio_dev *indio_dev,
@@ -609,6 +611,7 @@ static int stm32_dfsdm_adc_dma_start(struct iio_dev *indio_dev)
609static int stm32_dfsdm_postenable(struct iio_dev *indio_dev) 611static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
610{ 612{
611 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 613 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
614 const struct iio_chan_spec *chan = &indio_dev->channels[0];
612 int ret; 615 int ret;
613 616
614 /* Reset adc buffer index */ 617 /* Reset adc buffer index */
@@ -618,7 +621,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
618 if (ret < 0) 621 if (ret < 0)
619 return ret; 622 return ret;
620 623
621 ret = stm32_dfsdm_start_conv(adc, true); 624 ret = stm32_dfsdm_start_conv(adc, chan, true);
622 if (ret) { 625 if (ret) {
623 dev_err(&indio_dev->dev, "Can't start conversion\n"); 626 dev_err(&indio_dev->dev, "Can't start conversion\n");
624 goto stop_dfsdm; 627 goto stop_dfsdm;
@@ -635,7 +638,7 @@ static int stm32_dfsdm_postenable(struct iio_dev *indio_dev)
635 return 0; 638 return 0;
636 639
637err_stop_conv: 640err_stop_conv:
638 stm32_dfsdm_stop_conv(adc); 641 stm32_dfsdm_stop_conv(adc, chan);
639stop_dfsdm: 642stop_dfsdm:
640 stm32_dfsdm_stop_dfsdm(adc->dfsdm); 643 stm32_dfsdm_stop_dfsdm(adc->dfsdm);
641 644
@@ -645,11 +648,12 @@ stop_dfsdm:
645static int stm32_dfsdm_predisable(struct iio_dev *indio_dev) 648static int stm32_dfsdm_predisable(struct iio_dev *indio_dev)
646{ 649{
647 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 650 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
651 const struct iio_chan_spec *chan = &indio_dev->channels[0];
648 652
649 if (adc->dma_chan) 653 if (adc->dma_chan)
650 dmaengine_terminate_all(adc->dma_chan); 654 dmaengine_terminate_all(adc->dma_chan);
651 655
652 stm32_dfsdm_stop_conv(adc); 656 stm32_dfsdm_stop_conv(adc, chan);
653 657
654 stm32_dfsdm_stop_dfsdm(adc->dfsdm); 658 stm32_dfsdm_stop_dfsdm(adc->dfsdm);
655 659
@@ -730,7 +734,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
730 if (ret < 0) 734 if (ret < 0)
731 goto stop_dfsdm; 735 goto stop_dfsdm;
732 736
733 ret = stm32_dfsdm_start_conv(adc, false); 737 ret = stm32_dfsdm_start_conv(adc, chan, false);
734 if (ret < 0) { 738 if (ret < 0) {
735 regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id), 739 regmap_update_bits(adc->dfsdm->regmap, DFSDM_CR2(adc->fl_id),
736 DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0)); 740 DFSDM_CR2_REOCIE_MASK, DFSDM_CR2_REOCIE(0));
@@ -751,7 +755,7 @@ static int stm32_dfsdm_single_conv(struct iio_dev *indio_dev,
751 else 755 else
752 ret = IIO_VAL_INT; 756 ret = IIO_VAL_INT;
753 757
754 stm32_dfsdm_stop_conv(adc); 758 stm32_dfsdm_stop_conv(adc, chan);
755 759
756stop_dfsdm: 760stop_dfsdm:
757 stm32_dfsdm_stop_dfsdm(adc->dfsdm); 761 stm32_dfsdm_stop_dfsdm(adc->dfsdm);
@@ -765,7 +769,7 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
765{ 769{
766 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev); 770 struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
767 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id]; 771 struct stm32_dfsdm_filter *fl = &adc->dfsdm->fl_list[adc->fl_id];
768 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[adc->ch_id]; 772 struct stm32_dfsdm_channel *ch = &adc->dfsdm->ch_list[chan->channel];
769 unsigned int spi_freq = adc->spi_freq; 773 unsigned int spi_freq = adc->spi_freq;
770 int ret = -EINVAL; 774 int ret = -EINVAL;
771 775
@@ -972,7 +976,6 @@ static int stm32_dfsdm_adc_chan_init_one(struct iio_dev *indio_dev,
972 } 976 }
973 ch->scan_type.realbits = 24; 977 ch->scan_type.realbits = 24;
974 ch->scan_type.storagebits = 32; 978 ch->scan_type.storagebits = 32;
975 adc->ch_id = ch->channel;
976 979
977 return stm32_dfsdm_chan_configure(adc->dfsdm, 980 return stm32_dfsdm_chan_configure(adc->dfsdm,
978 &adc->dfsdm->ch_list[ch->channel]); 981 &adc->dfsdm->ch_list[ch->channel]);
@@ -1001,7 +1004,7 @@ static int stm32_dfsdm_audio_init(struct iio_dev *indio_dev)
1001 } 1004 }
1002 ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ); 1005 ch->info_mask_separate = BIT(IIO_CHAN_INFO_SAMP_FREQ);
1003 1006
1004 d_ch = &adc->dfsdm->ch_list[adc->ch_id]; 1007 d_ch = &adc->dfsdm->ch_list[ch->channel];
1005 if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL) 1008 if (d_ch->src != DFSDM_CHANNEL_SPI_CLOCK_EXTERNAL)
1006 adc->spi_freq = adc->dfsdm->spi_master_freq; 1009 adc->spi_freq = adc->dfsdm->spi_master_freq;
1007 1010
@@ -1042,8 +1045,8 @@ static int stm32_dfsdm_adc_init(struct iio_dev *indio_dev)
1042 return -ENOMEM; 1045 return -ENOMEM;
1043 1046
1044 for (chan_idx = 0; chan_idx < num_ch; chan_idx++) { 1047 for (chan_idx = 0; chan_idx < num_ch; chan_idx++) {
1045 ch->scan_index = chan_idx; 1048 ch[chan_idx].scan_index = chan_idx;
1046 ret = stm32_dfsdm_adc_chan_init_one(indio_dev, ch); 1049 ret = stm32_dfsdm_adc_chan_init_one(indio_dev, &ch[chan_idx]);
1047 if (ret < 0) { 1050 if (ret < 0) {
1048 dev_err(&indio_dev->dev, "Channels init failed\n"); 1051 dev_err(&indio_dev->dev, "Channels init failed\n");
1049 return ret; 1052 return ret;
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index 6290332cfd3f..e50efdcc41ff 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -83,7 +83,7 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
83{ 83{
84 struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm); 84 struct dfsdm_priv *priv = container_of(dfsdm, struct dfsdm_priv, dfsdm);
85 struct device *dev = &priv->pdev->dev; 85 struct device *dev = &priv->pdev->dev;
86 unsigned int clk_div = priv->spi_clk_out_div; 86 unsigned int clk_div = priv->spi_clk_out_div, clk_src;
87 int ret; 87 int ret;
88 88
89 if (atomic_inc_return(&priv->n_active_ch) == 1) { 89 if (atomic_inc_return(&priv->n_active_ch) == 1) {
@@ -100,6 +100,14 @@ int stm32_dfsdm_start_dfsdm(struct stm32_dfsdm *dfsdm)
100 } 100 }
101 } 101 }
102 102
103 /* select clock source, e.g. 0 for "dfsdm" or 1 for "audio" */
104 clk_src = priv->aclk ? 1 : 0;
105 ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
106 DFSDM_CHCFGR1_CKOUTSRC_MASK,
107 DFSDM_CHCFGR1_CKOUTSRC(clk_src));
108 if (ret < 0)
109 goto disable_aclk;
110
103 /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */ 111 /* Output the SPI CLKOUT (if clk_div == 0 clock if OFF) */
104 ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0), 112 ret = regmap_update_bits(dfsdm->regmap, DFSDM_CHCFGR1(0),
105 DFSDM_CHCFGR1_CKOUTDIV_MASK, 113 DFSDM_CHCFGR1_CKOUTDIV_MASK,
@@ -274,7 +282,7 @@ static int stm32_dfsdm_probe(struct platform_device *pdev)
274 282
275 dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm", 283 dfsdm->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dfsdm",
276 dfsdm->base, 284 dfsdm->base,
277 &stm32h7_dfsdm_regmap_cfg); 285 dev_data->regmap_cfg);
278 if (IS_ERR(dfsdm->regmap)) { 286 if (IS_ERR(dfsdm->regmap)) {
279 ret = PTR_ERR(dfsdm->regmap); 287 ret = PTR_ERR(dfsdm->regmap);
280 dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n", 288 dev_err(&pdev->dev, "%s: Failed to allocate regmap: %d\n",
diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
index fbe2431f5b81..1ea9f5513b02 100644
--- a/drivers/iio/chemical/ccs811.c
+++ b/drivers/iio/chemical/ccs811.c
@@ -133,6 +133,9 @@ static int ccs811_start_sensor_application(struct i2c_client *client)
133 if (ret < 0) 133 if (ret < 0)
134 return ret; 134 return ret;
135 135
136 if ((ret & CCS811_STATUS_FW_MODE_APPLICATION))
137 return 0;
138
136 if ((ret & CCS811_STATUS_APP_VALID_MASK) != 139 if ((ret & CCS811_STATUS_APP_VALID_MASK) !=
137 CCS811_STATUS_APP_VALID_LOADED) 140 CCS811_STATUS_APP_VALID_LOADED)
138 return -EIO; 141 return -EIO;
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 349e5c713c03..4ddb6cf7d401 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -640,7 +640,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
640 press_data->sensor_settings->drdy_irq.int2.addr)) 640 press_data->sensor_settings->drdy_irq.int2.addr))
641 pdata = (struct st_sensors_platform_data *)&default_press_pdata; 641 pdata = (struct st_sensors_platform_data *)&default_press_pdata;
642 642
643 err = st_sensors_init_sensor(indio_dev, press_data->dev->platform_data); 643 err = st_sensors_init_sensor(indio_dev, pdata);
644 if (err < 0) 644 if (err < 0)
645 goto st_press_power_off; 645 goto st_press_power_off;
646 646
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e66963ca58bd..a5367c5efbe7 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3069,7 +3069,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3069 continue; 3069 continue;
3070 3070
3071 /* different dest port -> unique */ 3071 /* different dest port -> unique */
3072 if (!cma_any_port(cur_daddr) && 3072 if (!cma_any_port(daddr) &&
3073 !cma_any_port(cur_daddr) &&
3073 (dport != cur_dport)) 3074 (dport != cur_dport))
3074 continue; 3075 continue;
3075 3076
@@ -3080,7 +3081,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3080 continue; 3081 continue;
3081 3082
3082 /* different dst address -> unique */ 3083 /* different dst address -> unique */
3083 if (!cma_any_addr(cur_daddr) && 3084 if (!cma_any_addr(daddr) &&
3085 !cma_any_addr(cur_daddr) &&
3084 cma_addr_cmp(daddr, cur_daddr)) 3086 cma_addr_cmp(daddr, cur_daddr))
3085 continue; 3087 continue;
3086 3088
@@ -3378,13 +3380,13 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
3378 } 3380 }
3379#endif 3381#endif
3380 } 3382 }
3383 daddr = cma_dst_addr(id_priv);
3384 daddr->sa_family = addr->sa_family;
3385
3381 ret = cma_get_port(id_priv); 3386 ret = cma_get_port(id_priv);
3382 if (ret) 3387 if (ret)
3383 goto err2; 3388 goto err2;
3384 3389
3385 daddr = cma_dst_addr(id_priv);
3386 daddr->sa_family = addr->sa_family;
3387
3388 return 0; 3390 return 0;
3389err2: 3391err2:
3390 if (id_priv->cma_dev) 3392 if (id_priv->cma_dev)
@@ -4173,6 +4175,9 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
4173 struct cma_multicast *mc; 4175 struct cma_multicast *mc;
4174 int ret; 4176 int ret;
4175 4177
4178 if (!id->device)
4179 return -EINVAL;
4180
4176 id_priv = container_of(id, struct rdma_id_private, id); 4181 id_priv = container_of(id, struct rdma_id_private, id);
4177 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && 4182 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
4178 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) 4183 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 3a9d0f5b5881..e5a1e7d81326 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -132,7 +132,7 @@ static inline struct ucma_context *_ucma_find_context(int id,
132 ctx = idr_find(&ctx_idr, id); 132 ctx = idr_find(&ctx_idr, id);
133 if (!ctx) 133 if (!ctx)
134 ctx = ERR_PTR(-ENOENT); 134 ctx = ERR_PTR(-ENOENT);
135 else if (ctx->file != file) 135 else if (ctx->file != file || !ctx->cm_id)
136 ctx = ERR_PTR(-EINVAL); 136 ctx = ERR_PTR(-EINVAL);
137 return ctx; 137 return ctx;
138} 138}
@@ -456,6 +456,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
456 struct rdma_ucm_create_id cmd; 456 struct rdma_ucm_create_id cmd;
457 struct rdma_ucm_create_id_resp resp; 457 struct rdma_ucm_create_id_resp resp;
458 struct ucma_context *ctx; 458 struct ucma_context *ctx;
459 struct rdma_cm_id *cm_id;
459 enum ib_qp_type qp_type; 460 enum ib_qp_type qp_type;
460 int ret; 461 int ret;
461 462
@@ -476,10 +477,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
476 return -ENOMEM; 477 return -ENOMEM;
477 478
478 ctx->uid = cmd.uid; 479 ctx->uid = cmd.uid;
479 ctx->cm_id = rdma_create_id(current->nsproxy->net_ns, 480 cm_id = rdma_create_id(current->nsproxy->net_ns,
480 ucma_event_handler, ctx, cmd.ps, qp_type); 481 ucma_event_handler, ctx, cmd.ps, qp_type);
481 if (IS_ERR(ctx->cm_id)) { 482 if (IS_ERR(cm_id)) {
482 ret = PTR_ERR(ctx->cm_id); 483 ret = PTR_ERR(cm_id);
483 goto err1; 484 goto err1;
484 } 485 }
485 486
@@ -489,14 +490,19 @@ static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
489 ret = -EFAULT; 490 ret = -EFAULT;
490 goto err2; 491 goto err2;
491 } 492 }
493
494 ctx->cm_id = cm_id;
492 return 0; 495 return 0;
493 496
494err2: 497err2:
495 rdma_destroy_id(ctx->cm_id); 498 rdma_destroy_id(cm_id);
496err1: 499err1:
497 mutex_lock(&mut); 500 mutex_lock(&mut);
498 idr_remove(&ctx_idr, ctx->id); 501 idr_remove(&ctx_idr, ctx->id);
499 mutex_unlock(&mut); 502 mutex_unlock(&mut);
503 mutex_lock(&file->mut);
504 list_del(&ctx->list);
505 mutex_unlock(&file->mut);
500 kfree(ctx); 506 kfree(ctx);
501 return ret; 507 return ret;
502} 508}
@@ -664,19 +670,23 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
664 int in_len, int out_len) 670 int in_len, int out_len)
665{ 671{
666 struct rdma_ucm_resolve_ip cmd; 672 struct rdma_ucm_resolve_ip cmd;
673 struct sockaddr *src, *dst;
667 struct ucma_context *ctx; 674 struct ucma_context *ctx;
668 int ret; 675 int ret;
669 676
670 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 677 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
671 return -EFAULT; 678 return -EFAULT;
672 679
680 src = (struct sockaddr *) &cmd.src_addr;
681 dst = (struct sockaddr *) &cmd.dst_addr;
682 if (!rdma_addr_size(src) || !rdma_addr_size(dst))
683 return -EINVAL;
684
673 ctx = ucma_get_ctx(file, cmd.id); 685 ctx = ucma_get_ctx(file, cmd.id);
674 if (IS_ERR(ctx)) 686 if (IS_ERR(ctx))
675 return PTR_ERR(ctx); 687 return PTR_ERR(ctx);
676 688
677 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, 689 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
678 (struct sockaddr *) &cmd.dst_addr,
679 cmd.timeout_ms);
680 ucma_put_ctx(ctx); 690 ucma_put_ctx(ctx);
681 return ret; 691 return ret;
682} 692}
@@ -1349,7 +1359,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
1349 return -ENOSPC; 1359 return -ENOSPC;
1350 1360
1351 addr = (struct sockaddr *) &cmd->addr; 1361 addr = (struct sockaddr *) &cmd->addr;
1352 if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr))) 1362 if (cmd->addr_size != rdma_addr_size(addr))
1353 return -EINVAL; 1363 return -EINVAL;
1354 1364
1355 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER) 1365 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
@@ -1417,6 +1427,9 @@ static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1417 join_cmd.uid = cmd.uid; 1427 join_cmd.uid = cmd.uid;
1418 join_cmd.id = cmd.id; 1428 join_cmd.id = cmd.id;
1419 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr); 1429 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1430 if (!join_cmd.addr_size)
1431 return -EINVAL;
1432
1420 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER; 1433 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1421 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size); 1434 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1422 1435
@@ -1432,6 +1445,9 @@ static ssize_t ucma_join_multicast(struct ucma_file *file,
1432 if (copy_from_user(&cmd, inbuf, sizeof(cmd))) 1445 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1433 return -EFAULT; 1446 return -EFAULT;
1434 1447
1448 if (!rdma_addr_size((struct sockaddr *)&cmd.addr))
1449 return -EINVAL;
1450
1435 return ucma_process_join(file, &cmd, out_len); 1451 return ucma_process_join(file, &cmd, out_len);
1436} 1452}
1437 1453
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index 3eb7a8387116..96f76896488d 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -57,8 +57,8 @@
57#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M) 57#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
58#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G) 58#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
59 59
60#define BNXT_RE_MAX_MR_SIZE_LOW BIT(BNXT_RE_PAGE_SHIFT_1G) 60#define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G)
61#define BNXT_RE_MAX_MR_SIZE_HIGH BIT(39) 61#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
62#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH 62#define BNXT_RE_MAX_MR_SIZE BNXT_RE_MAX_MR_SIZE_HIGH
63 63
64#define BNXT_RE_MAX_QPC_COUNT (64 * 1024) 64#define BNXT_RE_MAX_QPC_COUNT (64 * 1024)
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 0dd75f449872..8301d7e5fa8c 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -3598,7 +3598,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
3598 int umem_pgs, page_shift, rc; 3598 int umem_pgs, page_shift, rc;
3599 3599
3600 if (length > BNXT_RE_MAX_MR_SIZE) { 3600 if (length > BNXT_RE_MAX_MR_SIZE) {
3601 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n", 3601 dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%lld\n",
3602 length, BNXT_RE_MAX_MR_SIZE); 3602 length, BNXT_RE_MAX_MR_SIZE);
3603 return ERR_PTR(-ENOMEM); 3603 return ERR_PTR(-ENOMEM);
3604 } 3604 }
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 06b42c880fd4..3a78faba8d91 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -243,7 +243,7 @@ static void bnxt_qplib_service_nq(unsigned long data)
243 u32 sw_cons, raw_cons; 243 u32 sw_cons, raw_cons;
244 u16 type; 244 u16 type;
245 int budget = nq->budget; 245 int budget = nq->budget;
246 u64 q_handle; 246 uintptr_t q_handle;
247 247
248 /* Service the NQ until empty */ 248 /* Service the NQ until empty */
249 raw_cons = hwq->cons; 249 raw_cons = hwq->cons;
@@ -526,7 +526,7 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
526 526
527 /* Configure the request */ 527 /* Configure the request */
528 req.dpi = cpu_to_le32(srq->dpi->dpi); 528 req.dpi = cpu_to_le32(srq->dpi->dpi);
529 req.srq_handle = cpu_to_le64(srq); 529 req.srq_handle = cpu_to_le64((uintptr_t)srq);
530 530
531 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements); 531 req.srq_size = cpu_to_le16((u16)srq->hwq.max_elements);
532 pbl = &srq->hwq.pbl[PBL_LVL_0]; 532 pbl = &srq->hwq.pbl[PBL_LVL_0];
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 033b6af90de9..da091de4e69d 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4860,19 +4860,19 @@ static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
4860 return ib_register_device(&dev->ib_dev, NULL); 4860 return ib_register_device(&dev->ib_dev, NULL);
4861} 4861}
4862 4862
4863static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) 4863static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev)
4864{ 4864{
4865 ib_unregister_device(&dev->ib_dev); 4865 destroy_umrc_res(dev);
4866} 4866}
4867 4867
4868static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev) 4868static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
4869{ 4869{
4870 return create_umr_res(dev); 4870 ib_unregister_device(&dev->ib_dev);
4871} 4871}
4872 4872
4873static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev) 4873static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev)
4874{ 4874{
4875 destroy_umrc_res(dev); 4875 return create_umr_res(dev);
4876} 4876}
4877 4877
4878static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) 4878static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev)
@@ -4982,12 +4982,15 @@ static const struct mlx5_ib_profile pf_profile = {
4982 STAGE_CREATE(MLX5_IB_STAGE_BFREG, 4982 STAGE_CREATE(MLX5_IB_STAGE_BFREG,
4983 mlx5_ib_stage_bfrag_init, 4983 mlx5_ib_stage_bfrag_init,
4984 mlx5_ib_stage_bfrag_cleanup), 4984 mlx5_ib_stage_bfrag_cleanup),
4985 STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
4986 NULL,
4987 mlx5_ib_stage_pre_ib_reg_umr_cleanup),
4985 STAGE_CREATE(MLX5_IB_STAGE_IB_REG, 4988 STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
4986 mlx5_ib_stage_ib_reg_init, 4989 mlx5_ib_stage_ib_reg_init,
4987 mlx5_ib_stage_ib_reg_cleanup), 4990 mlx5_ib_stage_ib_reg_cleanup),
4988 STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES, 4991 STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
4989 mlx5_ib_stage_umr_res_init, 4992 mlx5_ib_stage_post_ib_reg_umr_init,
4990 mlx5_ib_stage_umr_res_cleanup), 4993 NULL),
4991 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, 4994 STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP,
4992 mlx5_ib_stage_delay_drop_init, 4995 mlx5_ib_stage_delay_drop_init,
4993 mlx5_ib_stage_delay_drop_cleanup), 4996 mlx5_ib_stage_delay_drop_cleanup),
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 139385129973..a5272499b600 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -739,8 +739,9 @@ enum mlx5_ib_stages {
739 MLX5_IB_STAGE_CONG_DEBUGFS, 739 MLX5_IB_STAGE_CONG_DEBUGFS,
740 MLX5_IB_STAGE_UAR, 740 MLX5_IB_STAGE_UAR,
741 MLX5_IB_STAGE_BFREG, 741 MLX5_IB_STAGE_BFREG,
742 MLX5_IB_STAGE_PRE_IB_REG_UMR,
742 MLX5_IB_STAGE_IB_REG, 743 MLX5_IB_STAGE_IB_REG,
743 MLX5_IB_STAGE_UMR_RESOURCES, 744 MLX5_IB_STAGE_POST_IB_REG_UMR,
744 MLX5_IB_STAGE_DELAY_DROP, 745 MLX5_IB_STAGE_DELAY_DROP,
745 MLX5_IB_STAGE_CLASS_ATTR, 746 MLX5_IB_STAGE_CLASS_ATTR,
746 MLX5_IB_STAGE_MAX, 747 MLX5_IB_STAGE_MAX,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 1961c6a45437..c51c602f06d6 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -838,7 +838,8 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
838 *umem = ib_umem_get(pd->uobject->context, start, length, 838 *umem = ib_umem_get(pd->uobject->context, start, length,
839 access_flags, 0); 839 access_flags, 0);
840 err = PTR_ERR_OR_ZERO(*umem); 840 err = PTR_ERR_OR_ZERO(*umem);
841 if (err < 0) { 841 if (err) {
842 *umem = NULL;
842 mlx5_ib_err(dev, "umem get failed (%d)\n", err); 843 mlx5_ib_err(dev, "umem get failed (%d)\n", err);
843 return err; 844 return err;
844 } 845 }
@@ -1415,6 +1416,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1415 if (err) { 1416 if (err) {
1416 mlx5_ib_warn(dev, "Failed to rereg UMR\n"); 1417 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1417 ib_umem_release(mr->umem); 1418 ib_umem_release(mr->umem);
1419 mr->umem = NULL;
1418 clean_mr(dev, mr); 1420 clean_mr(dev, mr);
1419 return err; 1421 return err;
1420 } 1422 }
@@ -1498,14 +1500,11 @@ static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1498 u32 key = mr->mmkey.key; 1500 u32 key = mr->mmkey.key;
1499 1501
1500 err = destroy_mkey(dev, mr); 1502 err = destroy_mkey(dev, mr);
1501 kfree(mr);
1502 if (err) { 1503 if (err) {
1503 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n", 1504 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
1504 key, err); 1505 key, err);
1505 return err; 1506 return err;
1506 } 1507 }
1507 } else {
1508 mlx5_mr_cache_free(dev, mr);
1509 } 1508 }
1510 1509
1511 return 0; 1510 return 0;
@@ -1548,6 +1547,11 @@ static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1548 atomic_sub(npages, &dev->mdev->priv.reg_pages); 1547 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1549 } 1548 }
1550 1549
1550 if (!mr->allocated_from_cache)
1551 kfree(mr);
1552 else
1553 mlx5_mr_cache_free(dev, mr);
1554
1551 return 0; 1555 return 0;
1552} 1556}
1553 1557
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 36197fbac63a..a2e1aa86e133 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1161,7 +1161,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
1161 ib_umem_release(sq->ubuffer.umem); 1161 ib_umem_release(sq->ubuffer.umem);
1162} 1162}
1163 1163
1164static int get_rq_pas_size(void *qpc) 1164static size_t get_rq_pas_size(void *qpc)
1165{ 1165{
1166 u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12; 1166 u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
1167 u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride); 1167 u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
@@ -1177,7 +1177,8 @@ static int get_rq_pas_size(void *qpc)
1177} 1177}
1178 1178
1179static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, 1179static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1180 struct mlx5_ib_rq *rq, void *qpin) 1180 struct mlx5_ib_rq *rq, void *qpin,
1181 size_t qpinlen)
1181{ 1182{
1182 struct mlx5_ib_qp *mqp = rq->base.container_mibqp; 1183 struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
1183 __be64 *pas; 1184 __be64 *pas;
@@ -1186,9 +1187,12 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
1186 void *rqc; 1187 void *rqc;
1187 void *wq; 1188 void *wq;
1188 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc); 1189 void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
1189 int inlen; 1190 size_t rq_pas_size = get_rq_pas_size(qpc);
1191 size_t inlen;
1190 int err; 1192 int err;
1191 u32 rq_pas_size = get_rq_pas_size(qpc); 1193
1194 if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
1195 return -EINVAL;
1192 1196
1193 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size; 1197 inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
1194 in = kvzalloc(inlen, GFP_KERNEL); 1198 in = kvzalloc(inlen, GFP_KERNEL);
@@ -1277,7 +1281,7 @@ static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
1277} 1281}
1278 1282
1279static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, 1283static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1280 u32 *in, 1284 u32 *in, size_t inlen,
1281 struct ib_pd *pd) 1285 struct ib_pd *pd)
1282{ 1286{
1283 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp; 1287 struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
@@ -1309,7 +1313,7 @@ static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
1309 rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING; 1313 rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
1310 if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING) 1314 if (qp->flags & MLX5_IB_QP_PCI_WRITE_END_PADDING)
1311 rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING; 1315 rq->flags |= MLX5_IB_RQ_PCI_WRITE_END_PADDING;
1312 err = create_raw_packet_qp_rq(dev, rq, in); 1316 err = create_raw_packet_qp_rq(dev, rq, in, inlen);
1313 if (err) 1317 if (err)
1314 goto err_destroy_sq; 1318 goto err_destroy_sq;
1315 1319
@@ -1872,11 +1876,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
1872 } 1876 }
1873 } 1877 }
1874 1878
1879 if (inlen < 0) {
1880 err = -EINVAL;
1881 goto err;
1882 }
1883
1875 if (init_attr->qp_type == IB_QPT_RAW_PACKET || 1884 if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
1876 qp->flags & MLX5_IB_QP_UNDERLAY) { 1885 qp->flags & MLX5_IB_QP_UNDERLAY) {
1877 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; 1886 qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
1878 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp); 1887 raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
1879 err = create_raw_packet_qp(dev, qp, in, pd); 1888 err = create_raw_packet_qp(dev, qp, in, inlen, pd);
1880 } else { 1889 } else {
1881 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); 1890 err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
1882 } 1891 }
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 6d5fadad9090..3c7522d025f2 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -241,8 +241,8 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
241{ 241{
242 struct mlx5_ib_dev *dev = to_mdev(pd->device); 242 struct mlx5_ib_dev *dev = to_mdev(pd->device);
243 struct mlx5_ib_srq *srq; 243 struct mlx5_ib_srq *srq;
244 int desc_size; 244 size_t desc_size;
245 int buf_size; 245 size_t buf_size;
246 int err; 246 int err;
247 struct mlx5_srq_attr in = {0}; 247 struct mlx5_srq_attr in = {0};
248 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); 248 __u32 max_srq_wqes = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
@@ -266,15 +266,18 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
266 266
267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + 267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); 268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
269 if (desc_size == 0 || srq->msrq.max_gs > desc_size)
270 return ERR_PTR(-EINVAL);
269 desc_size = roundup_pow_of_two(desc_size); 271 desc_size = roundup_pow_of_two(desc_size);
270 desc_size = max_t(int, 32, desc_size); 272 desc_size = max_t(size_t, 32, desc_size);
273 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
274 return ERR_PTR(-EINVAL);
271 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / 275 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
272 sizeof(struct mlx5_wqe_data_seg); 276 sizeof(struct mlx5_wqe_data_seg);
273 srq->msrq.wqe_shift = ilog2(desc_size); 277 srq->msrq.wqe_shift = ilog2(desc_size);
274 buf_size = srq->msrq.max * desc_size; 278 buf_size = srq->msrq.max * desc_size;
275 mlx5_ib_dbg(dev, "desc_size 0x%x, req wr 0x%x, srq size 0x%x, max_gs 0x%x, max_avail_gather 0x%x\n", 279 if (buf_size < desc_size)
276 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, 280 return ERR_PTR(-EINVAL);
277 srq->msrq.max_avail_gather);
278 in.type = init_attr->srq_type; 281 in.type = init_attr->srq_type;
279 282
280 if (pd->uobject) 283 if (pd->uobject)
diff --git a/drivers/infiniband/sw/rdmavt/mr.c b/drivers/infiniband/sw/rdmavt/mr.c
index 1b2e5362a3ff..cc429b567d0a 100644
--- a/drivers/infiniband/sw/rdmavt/mr.c
+++ b/drivers/infiniband/sw/rdmavt/mr.c
@@ -489,11 +489,13 @@ static int rvt_check_refs(struct rvt_mregion *mr, const char *t)
489 unsigned long timeout; 489 unsigned long timeout;
490 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device); 490 struct rvt_dev_info *rdi = ib_to_rvt(mr->pd->device);
491 491
492 if (percpu_ref_is_zero(&mr->refcount)) 492 if (mr->lkey) {
493 return 0; 493 /* avoid dma mr */
494 /* avoid dma mr */
495 if (mr->lkey)
496 rvt_dereg_clean_qps(mr); 494 rvt_dereg_clean_qps(mr);
495 /* @mr was indexed on rcu protected @lkey_table */
496 synchronize_rcu();
497 }
498
497 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ); 499 timeout = wait_for_completion_timeout(&mr->comp, 5 * HZ);
498 if (!timeout) { 500 if (!timeout) {
499 rvt_pr_err(rdi, 501 rvt_pr_err(rdi,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 1d3056f53747..2cbb19cddbf8 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1412,7 +1412,7 @@ static struct irq_chip its_irq_chip = {
1412 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. 1412 * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations.
1413 */ 1413 */
1414#define IRQS_PER_CHUNK_SHIFT 5 1414#define IRQS_PER_CHUNK_SHIFT 5
1415#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) 1415#define IRQS_PER_CHUNK (1UL << IRQS_PER_CHUNK_SHIFT)
1416#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ 1416#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1417 1417
1418static unsigned long *lpi_bitmap; 1418static unsigned long *lpi_bitmap;
@@ -2119,11 +2119,10 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2119 2119
2120 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 2120 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
2121 /* 2121 /*
2122 * At least one bit of EventID is being used, hence a minimum 2122 * We allocate at least one chunk worth of LPIs bet device,
2123 * of two entries. No, the architecture doesn't let you 2123 * and thus that many ITEs. The device may require less though.
2124 * express an ITT with a single entry.
2125 */ 2124 */
2126 nr_ites = max(2UL, roundup_pow_of_two(nvecs)); 2125 nr_ites = max(IRQS_PER_CHUNK, roundup_pow_of_two(nvecs));
2127 sz = nr_ites * its->ite_size; 2126 sz = nr_ites * its->ite_size;
2128 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 2127 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2129 itt = kzalloc(sz, GFP_KERNEL); 2128 itt = kzalloc(sz, GFP_KERNEL);
@@ -2495,7 +2494,7 @@ static int its_vpe_set_affinity(struct irq_data *d,
2495 2494
2496static void its_vpe_schedule(struct its_vpe *vpe) 2495static void its_vpe_schedule(struct its_vpe *vpe)
2497{ 2496{
2498 void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 2497 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2499 u64 val; 2498 u64 val;
2500 2499
2501 /* Schedule the VPE */ 2500 /* Schedule the VPE */
@@ -2527,7 +2526,7 @@ static void its_vpe_schedule(struct its_vpe *vpe)
2527 2526
2528static void its_vpe_deschedule(struct its_vpe *vpe) 2527static void its_vpe_deschedule(struct its_vpe *vpe)
2529{ 2528{
2530 void * __iomem vlpi_base = gic_data_rdist_vlpi_base(); 2529 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2531 u32 count = 1000000; /* 1s! */ 2530 u32 count = 1000000; /* 1s! */
2532 bool clean; 2531 bool clean;
2533 u64 val; 2532 u64 val;
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 675eda5ff2b8..4760307ab43f 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -28,20 +28,6 @@ struct gpcv2_irqchip_data {
28 28
29static struct gpcv2_irqchip_data *imx_gpcv2_instance; 29static struct gpcv2_irqchip_data *imx_gpcv2_instance;
30 30
31/*
32 * Interface for the low level wakeup code.
33 */
34u32 imx_gpcv2_get_wakeup_source(u32 **sources)
35{
36 if (!imx_gpcv2_instance)
37 return 0;
38
39 if (sources)
40 *sources = imx_gpcv2_instance->wakeup_sources;
41
42 return IMR_NUM;
43}
44
45static int gpcv2_wakeup_source_save(void) 31static int gpcv2_wakeup_source_save(void)
46{ 32{
47 struct gpcv2_irqchip_data *cd; 33 struct gpcv2_irqchip_data *cd;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3fde9e9faddd..a05a560d3cba 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -22,7 +22,6 @@
22#include <linux/time.h> 22#include <linux/time.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_dh.h> 25#include <scsi/scsi_dh.h>
27#include <linux/atomic.h> 26#include <linux/atomic.h>
28#include <linux/blk-mq.h> 27#include <linux/blk-mq.h>
@@ -223,6 +222,16 @@ static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
223 222
224 dm_table_set_type(ti->table, m->queue_mode); 223 dm_table_set_type(ti->table, m->queue_mode);
225 224
225 /*
226 * Init fields that are only used when a scsi_dh is attached
227 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
228 */
229 set_bit(MPATHF_QUEUE_IO, &m->flags);
230 atomic_set(&m->pg_init_in_progress, 0);
231 atomic_set(&m->pg_init_count, 0);
232 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
233 init_waitqueue_head(&m->pg_init_wait);
234
226 return 0; 235 return 0;
227} 236}
228 237
@@ -331,7 +340,6 @@ static void __switch_pg(struct multipath *m, struct priority_group *pg)
331 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 340 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
332 set_bit(MPATHF_QUEUE_IO, &m->flags); 341 set_bit(MPATHF_QUEUE_IO, &m->flags);
333 } else { 342 } else {
334 /* FIXME: not needed if no scsi_dh is attached */
335 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); 343 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
336 clear_bit(MPATHF_QUEUE_IO, &m->flags); 344 clear_bit(MPATHF_QUEUE_IO, &m->flags);
337 } 345 }
@@ -796,15 +804,14 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
796 return 0; 804 return 0;
797} 805}
798 806
799static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, char **error) 807static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
808 const char *attached_handler_name, char **error)
800{ 809{
801 struct request_queue *q = bdev_get_queue(bdev); 810 struct request_queue *q = bdev_get_queue(bdev);
802 const char *attached_handler_name;
803 int r; 811 int r;
804 812
805 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) { 813 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
806retain: 814retain:
807 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
808 if (attached_handler_name) { 815 if (attached_handler_name) {
809 /* 816 /*
810 * Clear any hw_handler_params associated with a 817 * Clear any hw_handler_params associated with a
@@ -823,16 +830,6 @@ retain:
823 */ 830 */
824 kfree(m->hw_handler_name); 831 kfree(m->hw_handler_name);
825 m->hw_handler_name = attached_handler_name; 832 m->hw_handler_name = attached_handler_name;
826
827 /*
828 * Init fields that are only used when a scsi_dh is attached
829 */
830 if (!test_and_set_bit(MPATHF_QUEUE_IO, &m->flags)) {
831 atomic_set(&m->pg_init_in_progress, 0);
832 atomic_set(&m->pg_init_count, 0);
833 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
834 init_waitqueue_head(&m->pg_init_wait);
835 }
836 } 833 }
837 } 834 }
838 835
@@ -868,7 +865,8 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
868 int r; 865 int r;
869 struct pgpath *p; 866 struct pgpath *p;
870 struct multipath *m = ti->private; 867 struct multipath *m = ti->private;
871 struct scsi_device *sdev; 868 struct request_queue *q;
869 const char *attached_handler_name;
872 870
873 /* we need at least a path arg */ 871 /* we need at least a path arg */
874 if (as->argc < 1) { 872 if (as->argc < 1) {
@@ -887,11 +885,11 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
887 goto bad; 885 goto bad;
888 } 886 }
889 887
890 sdev = scsi_device_from_queue(bdev_get_queue(p->path.dev->bdev)); 888 q = bdev_get_queue(p->path.dev->bdev);
891 if (sdev) { 889 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
892 put_device(&sdev->sdev_gendev); 890 if (attached_handler_name) {
893 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); 891 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
894 r = setup_scsi_dh(p->path.dev->bdev, m, &ti->error); 892 r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
895 if (r) { 893 if (r) {
896 dm_put_device(ti, p->path.dev); 894 dm_put_device(ti, p->path.dev);
897 goto bad; 895 goto bad;
@@ -2022,8 +2020,9 @@ static int multipath_busy(struct dm_target *ti)
2022 *---------------------------------------------------------------*/ 2020 *---------------------------------------------------------------*/
2023static struct target_type multipath_target = { 2021static struct target_type multipath_target = {
2024 .name = "multipath", 2022 .name = "multipath",
2025 .version = {1, 12, 0}, 2023 .version = {1, 13, 0},
2026 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, 2024 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2025 DM_TARGET_PASSES_INTEGRITY,
2027 .module = THIS_MODULE, 2026 .module = THIS_MODULE,
2028 .ctr = multipath_ctr, 2027 .ctr = multipath_ctr,
2029 .dtr = multipath_dtr, 2028 .dtr = multipath_dtr,
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 372c074bb1b9..86c1a190d946 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -151,7 +151,7 @@ config DVB_MMAP
151 select VIDEOBUF2_VMALLOC 151 select VIDEOBUF2_VMALLOC
152 default n 152 default n
153 help 153 help
154 This option enables DVB experimental memory-mapped API, with 154 This option enables DVB experimental memory-mapped API, which
155 reduces the number of context switches to read DVB buffers, as 155 reduces the number of context switches to read DVB buffers, as
156 the buffers can use mmap() syscalls. 156 the buffers can use mmap() syscalls.
157 157
diff --git a/drivers/media/platform/tegra-cec/tegra_cec.c b/drivers/media/platform/tegra-cec/tegra_cec.c
index 92f93a880015..aba488cd0e64 100644
--- a/drivers/media/platform/tegra-cec/tegra_cec.c
+++ b/drivers/media/platform/tegra-cec/tegra_cec.c
@@ -172,16 +172,13 @@ static irqreturn_t tegra_cec_irq_handler(int irq, void *data)
172 } 172 }
173 } 173 }
174 174
175 if (status & (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN | 175 if (status & TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED) {
176 TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED |
177 TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED |
178 TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)) {
179 cec_write(cec, TEGRA_CEC_INT_STAT, 176 cec_write(cec, TEGRA_CEC_INT_STAT,
180 (TEGRA_CEC_INT_STAT_RX_REGISTER_OVERRUN | 177 TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED);
181 TEGRA_CEC_INT_STAT_RX_BUS_ANOMALY_DETECTED | 178 cec->rx_done = false;
182 TEGRA_CEC_INT_STAT_RX_START_BIT_DETECTED | 179 cec->rx_buf_cnt = 0;
183 TEGRA_CEC_INT_STAT_RX_BUS_ERROR_DETECTED)); 180 }
184 } else if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) { 181 if (status & TEGRA_CEC_INT_STAT_RX_REGISTER_FULL) {
185 u32 v; 182 u32 v;
186 183
187 cec_write(cec, TEGRA_CEC_INT_STAT, 184 cec_write(cec, TEGRA_CEC_INT_STAT,
@@ -255,7 +252,7 @@ static int tegra_cec_adap_enable(struct cec_adapter *adap, bool enable)
255 TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED | 252 TEGRA_CEC_INT_MASK_TX_BUS_ANOMALY_DETECTED |
256 TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED | 253 TEGRA_CEC_INT_MASK_TX_FRAME_TRANSMITTED |
257 TEGRA_CEC_INT_MASK_RX_REGISTER_FULL | 254 TEGRA_CEC_INT_MASK_RX_REGISTER_FULL |
258 TEGRA_CEC_INT_MASK_RX_REGISTER_OVERRUN); 255 TEGRA_CEC_INT_MASK_RX_START_BIT_DETECTED);
259 256
260 cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE); 257 cec_write(cec, TEGRA_CEC_HW_CONTROL, TEGRA_CEC_HWCTRL_TX_RX_MODE);
261 return 0; 258 return 0;
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 20135a5de748..2cfb963d9f37 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -72,6 +72,7 @@ MODULE_ALIAS("mmc:block");
72#define MMC_BLK_TIMEOUT_MS (10 * 1000) 72#define MMC_BLK_TIMEOUT_MS (10 * 1000)
73#define MMC_SANITIZE_REQ_TIMEOUT 240000 73#define MMC_SANITIZE_REQ_TIMEOUT 240000
74#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) 74#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
75#define MMC_EXTRACT_VALUE_FROM_ARG(x) ((x & 0x0000FF00) >> 8)
75 76
76#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \ 77#define mmc_req_rel_wr(req) ((req->cmd_flags & REQ_FUA) && \
77 (rq_data_dir(req) == WRITE)) 78 (rq_data_dir(req) == WRITE))
@@ -587,6 +588,24 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
587 } 588 }
588 589
589 /* 590 /*
591 * Make sure the cache of the PARTITION_CONFIG register and
592 * PARTITION_ACCESS bits is updated in case the ioctl ext_csd write
593 * changed it successfully.
594 */
595 if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_PART_CONFIG) &&
596 (cmd.opcode == MMC_SWITCH)) {
597 struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev);
598 u8 value = MMC_EXTRACT_VALUE_FROM_ARG(cmd.arg);
599
600 /*
601 * Update cache so the next mmc_blk_part_switch call operates
602 * on up-to-date data.
603 */
604 card->ext_csd.part_config = value;
605 main_md->part_curr = value & EXT_CSD_PART_CONFIG_ACC_MASK;
606 }
607
608 /*
590 * According to the SD specs, some commands require a delay after 609 * According to the SD specs, some commands require a delay after
591 * issuing the command. 610 * issuing the command.
592 */ 611 */
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
index 79a5b985ccf5..9c821eedd156 100644
--- a/drivers/mmc/core/card.h
+++ b/drivers/mmc/core/card.h
@@ -82,6 +82,7 @@ struct mmc_fixup {
82#define CID_MANFID_APACER 0x27 82#define CID_MANFID_APACER 0x27
83#define CID_MANFID_KINGSTON 0x70 83#define CID_MANFID_KINGSTON 0x70
84#define CID_MANFID_HYNIX 0x90 84#define CID_MANFID_HYNIX 0x90
85#define CID_MANFID_NUMONYX 0xFE
85 86
86#define END_FIXUP { NULL } 87#define END_FIXUP { NULL }
87 88
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
index 75d317623852..5153577754f0 100644
--- a/drivers/mmc/core/quirks.h
+++ b/drivers/mmc/core/quirks.h
@@ -109,6 +109,12 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {
109 */ 109 */
110 MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX, 110 MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_HYNIX,
111 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5), 111 0x014a, add_quirk, MMC_QUIRK_BROKEN_HPI, 5),
112 /*
113 * Certain Micron (Numonyx) eMMC 4.5 cards might get broken when HPI
114 * feature is used so disable the HPI feature for such buggy cards.
115 */
116 MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX,
117 0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6),
112 118
113 END_FIXUP 119 END_FIXUP
114}; 120};
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index fa41d9422d57..a84aa3f1ae85 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -165,9 +165,15 @@ static void dw_mci_exynos_set_clksel_timing(struct dw_mci *host, u32 timing)
165static int dw_mci_exynos_runtime_resume(struct device *dev) 165static int dw_mci_exynos_runtime_resume(struct device *dev)
166{ 166{
167 struct dw_mci *host = dev_get_drvdata(dev); 167 struct dw_mci *host = dev_get_drvdata(dev);
168 int ret;
169
170 ret = dw_mci_runtime_resume(dev);
171 if (ret)
172 return ret;
168 173
169 dw_mci_exynos_config_smu(host); 174 dw_mci_exynos_config_smu(host);
170 return dw_mci_runtime_resume(dev); 175
176 return ret;
171} 177}
172 178
173/** 179/**
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index d9b4acefed31..06d47414d0c1 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -413,7 +413,9 @@ static inline void dw_mci_set_cto(struct dw_mci *host)
413 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 413 cto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
414 if (cto_div == 0) 414 if (cto_div == 0)
415 cto_div = 1; 415 cto_div = 1;
416 cto_ms = DIV_ROUND_UP(MSEC_PER_SEC * cto_clks * cto_div, host->bus_hz); 416
417 cto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * cto_clks * cto_div,
418 host->bus_hz);
417 419
418 /* add a bit spare time */ 420 /* add a bit spare time */
419 cto_ms += 10; 421 cto_ms += 10;
@@ -562,6 +564,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
562 (sizeof(struct idmac_desc_64addr) * 564 (sizeof(struct idmac_desc_64addr) *
563 (i + 1))) >> 32; 565 (i + 1))) >> 32;
564 /* Initialize reserved and buffer size fields to "0" */ 566 /* Initialize reserved and buffer size fields to "0" */
567 p->des0 = 0;
565 p->des1 = 0; 568 p->des1 = 0;
566 p->des2 = 0; 569 p->des2 = 0;
567 p->des3 = 0; 570 p->des3 = 0;
@@ -584,6 +587,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
584 i++, p++) { 587 i++, p++) {
585 p->des3 = cpu_to_le32(host->sg_dma + 588 p->des3 = cpu_to_le32(host->sg_dma +
586 (sizeof(struct idmac_desc) * (i + 1))); 589 (sizeof(struct idmac_desc) * (i + 1)));
590 p->des0 = 0;
587 p->des1 = 0; 591 p->des1 = 0;
588 } 592 }
589 593
@@ -1799,8 +1803,8 @@ static bool dw_mci_reset(struct dw_mci *host)
1799 } 1803 }
1800 1804
1801 if (host->use_dma == TRANS_MODE_IDMAC) 1805 if (host->use_dma == TRANS_MODE_IDMAC)
1802 /* It is also recommended that we reset and reprogram idmac */ 1806 /* It is also required that we reinit idmac */
1803 dw_mci_idmac_reset(host); 1807 dw_mci_idmac_init(host);
1804 1808
1805 ret = true; 1809 ret = true;
1806 1810
@@ -1948,8 +1952,9 @@ static void dw_mci_set_drto(struct dw_mci *host)
1948 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; 1952 drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2;
1949 if (drto_div == 0) 1953 if (drto_div == 0)
1950 drto_div = 1; 1954 drto_div = 1;
1951 drto_ms = DIV_ROUND_UP(MSEC_PER_SEC * drto_clks * drto_div, 1955
1952 host->bus_hz); 1956 drto_ms = DIV_ROUND_UP_ULL((u64)MSEC_PER_SEC * drto_clks * drto_div,
1957 host->bus_hz);
1953 1958
1954 /* add a bit spare time */ 1959 /* add a bit spare time */
1955 drto_ms += 10; 1960 drto_ms += 10;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 4065da58789d..32321bd596d8 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -680,7 +680,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
680 host->hw_name = "ACPI"; 680 host->hw_name = "ACPI";
681 host->ops = &sdhci_acpi_ops_dflt; 681 host->ops = &sdhci_acpi_ops_dflt;
682 host->irq = platform_get_irq(pdev, 0); 682 host->irq = platform_get_irq(pdev, 0);
683 if (host->irq <= 0) { 683 if (host->irq < 0) {
684 err = -EINVAL; 684 err = -EINVAL;
685 goto err_free; 685 goto err_free;
686 } 686 }
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index de8c902059b8..7d80a8bb96fe 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -479,7 +479,7 @@ static int shrink_ecclayout(struct mtd_info *mtd,
479 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) { 479 for (i = 0; i < MTD_MAX_ECCPOS_ENTRIES;) {
480 u32 eccpos; 480 u32 eccpos;
481 481
482 ret = mtd_ooblayout_ecc(mtd, section, &oobregion); 482 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
483 if (ret < 0) { 483 if (ret < 0) {
484 if (ret != -ERANGE) 484 if (ret != -ERANGE)
485 return ret; 485 return ret;
@@ -526,7 +526,7 @@ static int get_oobinfo(struct mtd_info *mtd, struct nand_oobinfo *to)
526 for (i = 0; i < ARRAY_SIZE(to->eccpos);) { 526 for (i = 0; i < ARRAY_SIZE(to->eccpos);) {
527 u32 eccpos; 527 u32 eccpos;
528 528
529 ret = mtd_ooblayout_ecc(mtd, section, &oobregion); 529 ret = mtd_ooblayout_ecc(mtd, section++, &oobregion);
530 if (ret < 0) { 530 if (ret < 0) {
531 if (ret != -ERANGE) 531 if (ret != -ERANGE)
532 return ret; 532 return ret;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index 4872a7ba6503..5a9c2f0020c2 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -173,14 +173,9 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
173 173
174/* returns nonzero if entire page is blank */ 174/* returns nonzero if entire page is blank */
175static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, 175static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
176 u32 *eccstat, unsigned int bufnum) 176 u32 eccstat, unsigned int bufnum)
177{ 177{
178 u32 reg = eccstat[bufnum / 4]; 178 return (eccstat >> ((3 - bufnum % 4) * 8)) & 15;
179 int errors;
180
181 errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
182
183 return errors;
184} 179}
185 180
186/* 181/*
@@ -193,7 +188,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
193 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 188 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
194 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; 189 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
195 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 190 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
196 u32 eccstat[4]; 191 u32 eccstat;
197 int i; 192 int i;
198 193
199 /* set the chip select for NAND Transaction */ 194 /* set the chip select for NAND Transaction */
@@ -228,19 +223,17 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
228 if (nctrl->eccread) { 223 if (nctrl->eccread) {
229 int errors; 224 int errors;
230 int bufnum = nctrl->page & priv->bufnum_mask; 225 int bufnum = nctrl->page & priv->bufnum_mask;
231 int sector = bufnum * chip->ecc.steps; 226 int sector_start = bufnum * chip->ecc.steps;
232 int sector_end = sector + chip->ecc.steps - 1; 227 int sector_end = sector_start + chip->ecc.steps - 1;
233 __be32 *eccstat_regs; 228 __be32 *eccstat_regs;
234 229
235 if (ctrl->version >= FSL_IFC_VERSION_2_0_0) 230 eccstat_regs = ifc->ifc_nand.nand_eccstat;
236 eccstat_regs = ifc->ifc_nand.v2_nand_eccstat; 231 eccstat = ifc_in32(&eccstat_regs[sector_start / 4]);
237 else
238 eccstat_regs = ifc->ifc_nand.v1_nand_eccstat;
239 232
240 for (i = sector / 4; i <= sector_end / 4; i++) 233 for (i = sector_start; i <= sector_end; i++) {
241 eccstat[i] = ifc_in32(&eccstat_regs[i]); 234 if (i != sector_start && !(i % 4))
235 eccstat = ifc_in32(&eccstat_regs[i / 4]);
242 236
243 for (i = sector; i <= sector_end; i++) {
244 errors = check_read_ecc(mtd, ctrl, eccstat, i); 237 errors = check_read_ecc(mtd, ctrl, eccstat, i);
245 238
246 if (errors == 15) { 239 if (errors == 15) {
@@ -626,6 +619,7 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
626 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 619 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
627 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; 620 struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
628 u32 nand_fsr; 621 u32 nand_fsr;
622 int status;
629 623
630 /* Use READ_STATUS command, but wait for the device to be ready */ 624 /* Use READ_STATUS command, but wait for the device to be ready */
631 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | 625 ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
@@ -640,12 +634,12 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
640 fsl_ifc_run_command(mtd); 634 fsl_ifc_run_command(mtd);
641 635
642 nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); 636 nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
643 637 status = nand_fsr >> 24;
644 /* 638 /*
645 * The chip always seems to report that it is 639 * The chip always seems to report that it is
646 * write-protected, even when it is not. 640 * write-protected, even when it is not.
647 */ 641 */
648 return nand_fsr | NAND_STATUS_WP; 642 return status | NAND_STATUS_WP;
649} 643}
650 644
651/* 645/*
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index 1e37313054f3..6da69af103e6 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -390,37 +390,23 @@ static int cc770_get_berr_counter(const struct net_device *dev,
390 return 0; 390 return 0;
391} 391}
392 392
393static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) 393static void cc770_tx(struct net_device *dev, int mo)
394{ 394{
395 struct cc770_priv *priv = netdev_priv(dev); 395 struct cc770_priv *priv = netdev_priv(dev);
396 struct net_device_stats *stats = &dev->stats; 396 struct can_frame *cf = (struct can_frame *)priv->tx_skb->data;
397 struct can_frame *cf = (struct can_frame *)skb->data;
398 unsigned int mo = obj2msgobj(CC770_OBJ_TX);
399 u8 dlc, rtr; 397 u8 dlc, rtr;
400 u32 id; 398 u32 id;
401 int i; 399 int i;
402 400
403 if (can_dropped_invalid_skb(dev, skb))
404 return NETDEV_TX_OK;
405
406 if ((cc770_read_reg(priv,
407 msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
408 netdev_err(dev, "TX register is still occupied!\n");
409 return NETDEV_TX_BUSY;
410 }
411
412 netif_stop_queue(dev);
413
414 dlc = cf->can_dlc; 401 dlc = cf->can_dlc;
415 id = cf->can_id; 402 id = cf->can_id;
416 if (cf->can_id & CAN_RTR_FLAG) 403 rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR;
417 rtr = 0; 404
418 else 405 cc770_write_reg(priv, msgobj[mo].ctrl0,
419 rtr = MSGCFG_DIR; 406 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
420 cc770_write_reg(priv, msgobj[mo].ctrl1, 407 cc770_write_reg(priv, msgobj[mo].ctrl1,
421 RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); 408 RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
422 cc770_write_reg(priv, msgobj[mo].ctrl0, 409
423 MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
424 if (id & CAN_EFF_FLAG) { 410 if (id & CAN_EFF_FLAG) {
425 id &= CAN_EFF_MASK; 411 id &= CAN_EFF_MASK;
426 cc770_write_reg(priv, msgobj[mo].config, 412 cc770_write_reg(priv, msgobj[mo].config,
@@ -439,22 +425,30 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
439 for (i = 0; i < dlc; i++) 425 for (i = 0; i < dlc; i++)
440 cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); 426 cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
441 427
442 /* Store echo skb before starting the transfer */
443 can_put_echo_skb(skb, dev, 0);
444
445 cc770_write_reg(priv, msgobj[mo].ctrl1, 428 cc770_write_reg(priv, msgobj[mo].ctrl1,
446 RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); 429 RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
430 cc770_write_reg(priv, msgobj[mo].ctrl0,
431 MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC);
432}
447 433
448 stats->tx_bytes += dlc; 434static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
435{
436 struct cc770_priv *priv = netdev_priv(dev);
437 unsigned int mo = obj2msgobj(CC770_OBJ_TX);
449 438
439 if (can_dropped_invalid_skb(dev, skb))
440 return NETDEV_TX_OK;
450 441
451 /* 442 netif_stop_queue(dev);
452 * HM: We had some cases of repeated IRQs so make sure the 443
453 * INT is acknowledged I know it's already further up, but 444 if ((cc770_read_reg(priv,
454 * doing again fixed the issue 445 msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
455 */ 446 netdev_err(dev, "TX register is still occupied!\n");
456 cc770_write_reg(priv, msgobj[mo].ctrl0, 447 return NETDEV_TX_BUSY;
457 MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); 448 }
449
450 priv->tx_skb = skb;
451 cc770_tx(dev, mo);
458 452
459 return NETDEV_TX_OK; 453 return NETDEV_TX_OK;
460} 454}
@@ -680,19 +674,46 @@ static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
680 struct cc770_priv *priv = netdev_priv(dev); 674 struct cc770_priv *priv = netdev_priv(dev);
681 struct net_device_stats *stats = &dev->stats; 675 struct net_device_stats *stats = &dev->stats;
682 unsigned int mo = obj2msgobj(o); 676 unsigned int mo = obj2msgobj(o);
677 struct can_frame *cf;
678 u8 ctrl1;
679
680 ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
683 681
684 /* Nothing more to send, switch off interrupts */
685 cc770_write_reg(priv, msgobj[mo].ctrl0, 682 cc770_write_reg(priv, msgobj[mo].ctrl0,
686 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); 683 MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
687 /* 684 cc770_write_reg(priv, msgobj[mo].ctrl1,
688 * We had some cases of repeated IRQ so make sure the 685 RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES);
689 * INT is acknowledged 686
687 if (unlikely(!priv->tx_skb)) {
688 netdev_err(dev, "missing tx skb in tx interrupt\n");
689 return;
690 }
691
692 if (unlikely(ctrl1 & MSGLST_SET)) {
693 stats->rx_over_errors++;
694 stats->rx_errors++;
695 }
696
697 /* When the CC770 is sending an RTR message and it receives a regular
698 * message that matches the id of the RTR message, it will overwrite the
699 * outgoing message in the TX register. When this happens we must
700 * process the received message and try to transmit the outgoing skb
701 * again.
690 */ 702 */
691 cc770_write_reg(priv, msgobj[mo].ctrl0, 703 if (unlikely(ctrl1 & NEWDAT_SET)) {
692 MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES); 704 cc770_rx(dev, mo, ctrl1);
705 cc770_tx(dev, mo);
706 return;
707 }
693 708
709 cf = (struct can_frame *)priv->tx_skb->data;
710 stats->tx_bytes += cf->can_dlc;
694 stats->tx_packets++; 711 stats->tx_packets++;
712
713 can_put_echo_skb(priv->tx_skb, dev, 0);
695 can_get_echo_skb(dev, 0); 714 can_get_echo_skb(dev, 0);
715 priv->tx_skb = NULL;
716
696 netif_wake_queue(dev); 717 netif_wake_queue(dev);
697} 718}
698 719
@@ -804,6 +825,7 @@ struct net_device *alloc_cc770dev(int sizeof_priv)
804 priv->can.do_set_bittiming = cc770_set_bittiming; 825 priv->can.do_set_bittiming = cc770_set_bittiming;
805 priv->can.do_set_mode = cc770_set_mode; 826 priv->can.do_set_mode = cc770_set_mode;
806 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; 827 priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
828 priv->tx_skb = NULL;
807 829
808 memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); 830 memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
809 831
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
index a1739db98d91..95752e1d1283 100644
--- a/drivers/net/can/cc770/cc770.h
+++ b/drivers/net/can/cc770/cc770.h
@@ -193,6 +193,8 @@ struct cc770_priv {
193 u8 cpu_interface; /* CPU interface register */ 193 u8 cpu_interface; /* CPU interface register */
194 u8 clkout; /* Clock out register */ 194 u8 clkout; /* Clock out register */
195 u8 bus_config; /* Bus conffiguration register */ 195 u8 bus_config; /* Bus conffiguration register */
196
197 struct sk_buff *tx_skb;
196}; 198};
197 199
198struct net_device *alloc_cc770dev(int sizeof_priv); 200struct net_device *alloc_cc770dev(int sizeof_priv);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 2772d05ff11c..fedd927ba6ed 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -30,6 +30,7 @@
30#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2) 30#define IFI_CANFD_STCMD_ERROR_ACTIVE BIT(2)
31#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3) 31#define IFI_CANFD_STCMD_ERROR_PASSIVE BIT(3)
32#define IFI_CANFD_STCMD_BUSOFF BIT(4) 32#define IFI_CANFD_STCMD_BUSOFF BIT(4)
33#define IFI_CANFD_STCMD_ERROR_WARNING BIT(5)
33#define IFI_CANFD_STCMD_BUSMONITOR BIT(16) 34#define IFI_CANFD_STCMD_BUSMONITOR BIT(16)
34#define IFI_CANFD_STCMD_LOOPBACK BIT(18) 35#define IFI_CANFD_STCMD_LOOPBACK BIT(18)
35#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24) 36#define IFI_CANFD_STCMD_DISABLE_CANFD BIT(24)
@@ -52,7 +53,10 @@
52#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13) 53#define IFI_CANFD_TXSTCMD_OVERFLOW BIT(13)
53 54
54#define IFI_CANFD_INTERRUPT 0xc 55#define IFI_CANFD_INTERRUPT 0xc
56#define IFI_CANFD_INTERRUPT_ERROR_BUSOFF BIT(0)
55#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1) 57#define IFI_CANFD_INTERRUPT_ERROR_WARNING BIT(1)
58#define IFI_CANFD_INTERRUPT_ERROR_STATE_CHG BIT(2)
59#define IFI_CANFD_INTERRUPT_ERROR_REC_TEC_INC BIT(3)
56#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10) 60#define IFI_CANFD_INTERRUPT_ERROR_COUNTER BIT(10)
57#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16) 61#define IFI_CANFD_INTERRUPT_TXFIFO_EMPTY BIT(16)
58#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22) 62#define IFI_CANFD_INTERRUPT_TXFIFO_REMOVE BIT(22)
@@ -61,6 +65,10 @@
61#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31)) 65#define IFI_CANFD_INTERRUPT_SET_IRQ ((u32)BIT(31))
62 66
63#define IFI_CANFD_IRQMASK 0x10 67#define IFI_CANFD_IRQMASK 0x10
68#define IFI_CANFD_IRQMASK_ERROR_BUSOFF BIT(0)
69#define IFI_CANFD_IRQMASK_ERROR_WARNING BIT(1)
70#define IFI_CANFD_IRQMASK_ERROR_STATE_CHG BIT(2)
71#define IFI_CANFD_IRQMASK_ERROR_REC_TEC_INC BIT(3)
64#define IFI_CANFD_IRQMASK_SET_ERR BIT(7) 72#define IFI_CANFD_IRQMASK_SET_ERR BIT(7)
65#define IFI_CANFD_IRQMASK_SET_TS BIT(15) 73#define IFI_CANFD_IRQMASK_SET_TS BIT(15)
66#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16) 74#define IFI_CANFD_IRQMASK_TXFIFO_EMPTY BIT(16)
@@ -136,6 +144,8 @@
136#define IFI_CANFD_SYSCLOCK 0x50 144#define IFI_CANFD_SYSCLOCK 0x50
137 145
138#define IFI_CANFD_VER 0x54 146#define IFI_CANFD_VER 0x54
147#define IFI_CANFD_VER_REV_MASK 0xff
148#define IFI_CANFD_VER_REV_MIN_SUPPORTED 0x15
139 149
140#define IFI_CANFD_IP_ID 0x58 150#define IFI_CANFD_IP_ID 0x58
141#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD 151#define IFI_CANFD_IP_ID_VALUE 0xD073CAFD
@@ -220,7 +230,10 @@ static void ifi_canfd_irq_enable(struct net_device *ndev, bool enable)
220 230
221 if (enable) { 231 if (enable) {
222 enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY | 232 enirq = IFI_CANFD_IRQMASK_TXFIFO_EMPTY |
223 IFI_CANFD_IRQMASK_RXFIFO_NEMPTY; 233 IFI_CANFD_IRQMASK_RXFIFO_NEMPTY |
234 IFI_CANFD_IRQMASK_ERROR_STATE_CHG |
235 IFI_CANFD_IRQMASK_ERROR_WARNING |
236 IFI_CANFD_IRQMASK_ERROR_BUSOFF;
224 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 237 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
225 enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER; 238 enirq |= IFI_CANFD_INTERRUPT_ERROR_COUNTER;
226 } 239 }
@@ -361,12 +374,13 @@ static int ifi_canfd_handle_lost_msg(struct net_device *ndev)
361 return 1; 374 return 1;
362} 375}
363 376
364static int ifi_canfd_handle_lec_err(struct net_device *ndev, const u32 errctr) 377static int ifi_canfd_handle_lec_err(struct net_device *ndev)
365{ 378{
366 struct ifi_canfd_priv *priv = netdev_priv(ndev); 379 struct ifi_canfd_priv *priv = netdev_priv(ndev);
367 struct net_device_stats *stats = &ndev->stats; 380 struct net_device_stats *stats = &ndev->stats;
368 struct can_frame *cf; 381 struct can_frame *cf;
369 struct sk_buff *skb; 382 struct sk_buff *skb;
383 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR);
370 const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST | 384 const u32 errmask = IFI_CANFD_ERROR_CTR_OVERLOAD_FIRST |
371 IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST | 385 IFI_CANFD_ERROR_CTR_ACK_ERROR_FIRST |
372 IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST | 386 IFI_CANFD_ERROR_CTR_BIT0_ERROR_FIRST |
@@ -449,6 +463,11 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
449 463
450 switch (new_state) { 464 switch (new_state) {
451 case CAN_STATE_ERROR_ACTIVE: 465 case CAN_STATE_ERROR_ACTIVE:
466 /* error active state */
467 priv->can.can_stats.error_warning++;
468 priv->can.state = CAN_STATE_ERROR_ACTIVE;
469 break;
470 case CAN_STATE_ERROR_WARNING:
452 /* error warning state */ 471 /* error warning state */
453 priv->can.can_stats.error_warning++; 472 priv->can.can_stats.error_warning++;
454 priv->can.state = CAN_STATE_ERROR_WARNING; 473 priv->can.state = CAN_STATE_ERROR_WARNING;
@@ -477,7 +496,7 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
477 ifi_canfd_get_berr_counter(ndev, &bec); 496 ifi_canfd_get_berr_counter(ndev, &bec);
478 497
479 switch (new_state) { 498 switch (new_state) {
480 case CAN_STATE_ERROR_ACTIVE: 499 case CAN_STATE_ERROR_WARNING:
481 /* error warning state */ 500 /* error warning state */
482 cf->can_id |= CAN_ERR_CRTL; 501 cf->can_id |= CAN_ERR_CRTL;
483 cf->data[1] = (bec.txerr > bec.rxerr) ? 502 cf->data[1] = (bec.txerr > bec.rxerr) ?
@@ -510,22 +529,21 @@ static int ifi_canfd_handle_state_change(struct net_device *ndev,
510 return 1; 529 return 1;
511} 530}
512 531
513static int ifi_canfd_handle_state_errors(struct net_device *ndev, u32 stcmd) 532static int ifi_canfd_handle_state_errors(struct net_device *ndev)
514{ 533{
515 struct ifi_canfd_priv *priv = netdev_priv(ndev); 534 struct ifi_canfd_priv *priv = netdev_priv(ndev);
535 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
516 int work_done = 0; 536 int work_done = 0;
517 u32 isr;
518 537
519 /* 538 if ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) &&
520 * The ErrWarn condition is a little special, since the bit is 539 (priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
521 * located in the INTERRUPT register instead of STCMD register. 540 netdev_dbg(ndev, "Error, entered active state\n");
522 */ 541 work_done += ifi_canfd_handle_state_change(ndev,
523 isr = readl(priv->base + IFI_CANFD_INTERRUPT); 542 CAN_STATE_ERROR_ACTIVE);
524 if ((isr & IFI_CANFD_INTERRUPT_ERROR_WARNING) && 543 }
544
545 if ((stcmd & IFI_CANFD_STCMD_ERROR_WARNING) &&
525 (priv->can.state != CAN_STATE_ERROR_WARNING)) { 546 (priv->can.state != CAN_STATE_ERROR_WARNING)) {
526 /* Clear the interrupt */
527 writel(IFI_CANFD_INTERRUPT_ERROR_WARNING,
528 priv->base + IFI_CANFD_INTERRUPT);
529 netdev_dbg(ndev, "Error, entered warning state\n"); 547 netdev_dbg(ndev, "Error, entered warning state\n");
530 work_done += ifi_canfd_handle_state_change(ndev, 548 work_done += ifi_canfd_handle_state_change(ndev,
531 CAN_STATE_ERROR_WARNING); 549 CAN_STATE_ERROR_WARNING);
@@ -552,18 +570,11 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
552{ 570{
553 struct net_device *ndev = napi->dev; 571 struct net_device *ndev = napi->dev;
554 struct ifi_canfd_priv *priv = netdev_priv(ndev); 572 struct ifi_canfd_priv *priv = netdev_priv(ndev);
555 const u32 stcmd_state_mask = IFI_CANFD_STCMD_ERROR_PASSIVE |
556 IFI_CANFD_STCMD_BUSOFF;
557 int work_done = 0;
558
559 u32 stcmd = readl(priv->base + IFI_CANFD_STCMD);
560 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD); 573 u32 rxstcmd = readl(priv->base + IFI_CANFD_RXSTCMD);
561 u32 errctr = readl(priv->base + IFI_CANFD_ERROR_CTR); 574 int work_done = 0;
562 575
563 /* Handle bus state changes */ 576 /* Handle bus state changes */
564 if ((stcmd & stcmd_state_mask) || 577 work_done += ifi_canfd_handle_state_errors(ndev);
565 ((stcmd & IFI_CANFD_STCMD_ERROR_ACTIVE) == 0))
566 work_done += ifi_canfd_handle_state_errors(ndev, stcmd);
567 578
568 /* Handle lost messages on RX */ 579 /* Handle lost messages on RX */
569 if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW) 580 if (rxstcmd & IFI_CANFD_RXSTCMD_OVERFLOW)
@@ -571,7 +582,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
571 582
572 /* Handle lec errors on the bus */ 583 /* Handle lec errors on the bus */
573 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) 584 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
574 work_done += ifi_canfd_handle_lec_err(ndev, errctr); 585 work_done += ifi_canfd_handle_lec_err(ndev);
575 586
576 /* Handle normal messages on RX */ 587 /* Handle normal messages on RX */
577 if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY)) 588 if (!(rxstcmd & IFI_CANFD_RXSTCMD_EMPTY))
@@ -592,12 +603,13 @@ static irqreturn_t ifi_canfd_isr(int irq, void *dev_id)
592 struct net_device_stats *stats = &ndev->stats; 603 struct net_device_stats *stats = &ndev->stats;
593 const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY | 604 const u32 rx_irq_mask = IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY |
594 IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER | 605 IFI_CANFD_INTERRUPT_RXFIFO_NEMPTY_PER |
606 IFI_CANFD_INTERRUPT_ERROR_COUNTER |
607 IFI_CANFD_INTERRUPT_ERROR_STATE_CHG |
595 IFI_CANFD_INTERRUPT_ERROR_WARNING | 608 IFI_CANFD_INTERRUPT_ERROR_WARNING |
596 IFI_CANFD_INTERRUPT_ERROR_COUNTER; 609 IFI_CANFD_INTERRUPT_ERROR_BUSOFF;
597 const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY | 610 const u32 tx_irq_mask = IFI_CANFD_INTERRUPT_TXFIFO_EMPTY |
598 IFI_CANFD_INTERRUPT_TXFIFO_REMOVE; 611 IFI_CANFD_INTERRUPT_TXFIFO_REMOVE;
599 const u32 clr_irq_mask = ~((u32)(IFI_CANFD_INTERRUPT_SET_IRQ | 612 const u32 clr_irq_mask = ~((u32)IFI_CANFD_INTERRUPT_SET_IRQ);
600 IFI_CANFD_INTERRUPT_ERROR_WARNING));
601 u32 isr; 613 u32 isr;
602 614
603 isr = readl(priv->base + IFI_CANFD_INTERRUPT); 615 isr = readl(priv->base + IFI_CANFD_INTERRUPT);
@@ -933,7 +945,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
933 struct resource *res; 945 struct resource *res;
934 void __iomem *addr; 946 void __iomem *addr;
935 int irq, ret; 947 int irq, ret;
936 u32 id; 948 u32 id, rev;
937 949
938 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 950 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
939 addr = devm_ioremap_resource(dev, res); 951 addr = devm_ioremap_resource(dev, res);
@@ -947,6 +959,13 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
947 return -EINVAL; 959 return -EINVAL;
948 } 960 }
949 961
962 rev = readl(addr + IFI_CANFD_VER) & IFI_CANFD_VER_REV_MASK;
963 if (rev < IFI_CANFD_VER_REV_MIN_SUPPORTED) {
964 dev_err(dev, "This block is too old (rev %i), minimum supported is rev %i\n",
965 rev, IFI_CANFD_VER_REV_MIN_SUPPORTED);
966 return -EINVAL;
967 }
968
950 ndev = alloc_candev(sizeof(*priv), 1); 969 ndev = alloc_candev(sizeof(*priv), 1);
951 if (!ndev) 970 if (!ndev)
952 return -ENOMEM; 971 return -ENOMEM;
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2594f7779c6f..b397a33f3d32 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -26,6 +26,7 @@
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/iopoll.h> 27#include <linux/iopoll.h>
28#include <linux/can/dev.h> 28#include <linux/can/dev.h>
29#include <linux/pinctrl/consumer.h>
29 30
30/* napi related */ 31/* napi related */
31#define M_CAN_NAPI_WEIGHT 64 32#define M_CAN_NAPI_WEIGHT 64
@@ -253,7 +254,7 @@ enum m_can_mram_cfg {
253 254
254/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */ 255/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
255#define RXFC_FWM_SHIFT 24 256#define RXFC_FWM_SHIFT 24
256#define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT) 257#define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT)
257#define RXFC_FS_SHIFT 16 258#define RXFC_FS_SHIFT 16
258#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT) 259#define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
259 260
@@ -1700,6 +1701,8 @@ static __maybe_unused int m_can_suspend(struct device *dev)
1700 m_can_clk_stop(priv); 1701 m_can_clk_stop(priv);
1701 } 1702 }
1702 1703
1704 pinctrl_pm_select_sleep_state(dev);
1705
1703 priv->can.state = CAN_STATE_SLEEPING; 1706 priv->can.state = CAN_STATE_SLEEPING;
1704 1707
1705 return 0; 1708 return 0;
@@ -1710,6 +1713,8 @@ static __maybe_unused int m_can_resume(struct device *dev)
1710 struct net_device *ndev = dev_get_drvdata(dev); 1713 struct net_device *ndev = dev_get_drvdata(dev);
1711 struct m_can_priv *priv = netdev_priv(ndev); 1714 struct m_can_priv *priv = netdev_priv(ndev);
1712 1715
1716 pinctrl_pm_select_default_state(dev);
1717
1713 m_can_init_ram(priv); 1718 m_can_init_ram(priv);
1714 1719
1715 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1720 priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 55513411a82e..ed8561d4a90f 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -262,7 +262,6 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
262 262
263 spin_lock_irqsave(&priv->echo_lock, flags); 263 spin_lock_irqsave(&priv->echo_lock, flags);
264 can_get_echo_skb(priv->ndev, msg->client); 264 can_get_echo_skb(priv->ndev, msg->client);
265 spin_unlock_irqrestore(&priv->echo_lock, flags);
266 265
267 /* count bytes of the echo instead of skb */ 266 /* count bytes of the echo instead of skb */
268 stats->tx_bytes += cf_len; 267 stats->tx_bytes += cf_len;
@@ -271,6 +270,7 @@ static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
271 /* restart tx queue (a slot is free) */ 270 /* restart tx queue (a slot is free) */
272 netif_wake_queue(priv->ndev); 271 netif_wake_queue(priv->ndev);
273 272
273 spin_unlock_irqrestore(&priv->echo_lock, flags);
274 return 0; 274 return 0;
275 } 275 }
276 276
@@ -333,7 +333,6 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
333 333
334 /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ 334 /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
335 if (pucan_status_is_rx_barrier(msg)) { 335 if (pucan_status_is_rx_barrier(msg)) {
336 unsigned long flags;
337 336
338 if (priv->enable_tx_path) { 337 if (priv->enable_tx_path) {
339 int err = priv->enable_tx_path(priv); 338 int err = priv->enable_tx_path(priv);
@@ -342,16 +341,8 @@ static int pucan_handle_status(struct peak_canfd_priv *priv,
342 return err; 341 return err;
343 } 342 }
344 343
345 /* restart network queue only if echo skb array is free */ 344 /* start network queue (echo_skb array is empty) */
346 spin_lock_irqsave(&priv->echo_lock, flags); 345 netif_start_queue(ndev);
347
348 if (!priv->can.echo_skb[priv->echo_idx]) {
349 spin_unlock_irqrestore(&priv->echo_lock, flags);
350
351 netif_wake_queue(ndev);
352 } else {
353 spin_unlock_irqrestore(&priv->echo_lock, flags);
354 }
355 346
356 return 0; 347 return 0;
357 } 348 }
@@ -726,11 +717,6 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
726 */ 717 */
727 should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); 718 should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
728 719
729 spin_unlock_irqrestore(&priv->echo_lock, flags);
730
731 /* write the skb on the interface */
732 priv->write_tx_msg(priv, msg);
733
734 /* stop network tx queue if not enough room to save one more msg too */ 720 /* stop network tx queue if not enough room to save one more msg too */
735 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) 721 if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
736 should_stop_tx_queue |= (room_left < 722 should_stop_tx_queue |= (room_left <
@@ -742,6 +728,11 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
742 if (should_stop_tx_queue) 728 if (should_stop_tx_queue)
743 netif_stop_queue(ndev); 729 netif_stop_queue(ndev);
744 730
731 spin_unlock_irqrestore(&priv->echo_lock, flags);
732
733 /* write the skb on the interface */
734 priv->write_tx_msg(priv, msg);
735
745 return NETDEV_TX_OK; 736 return NETDEV_TX_OK;
746} 737}
747 738
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 788c3464a3b0..3c51a884db87 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -349,8 +349,12 @@ static irqreturn_t pciefd_irq_handler(int irq, void *arg)
349 priv->tx_pages_free++; 349 priv->tx_pages_free++;
350 spin_unlock_irqrestore(&priv->tx_lock, flags); 350 spin_unlock_irqrestore(&priv->tx_lock, flags);
351 351
352 /* wake producer up */ 352 /* wake producer up (only if enough room in echo_skb array) */
353 netif_wake_queue(priv->ucan.ndev); 353 spin_lock_irqsave(&priv->ucan.echo_lock, flags);
354 if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx])
355 netif_wake_queue(priv->ucan.ndev);
356
357 spin_unlock_irqrestore(&priv->ucan.echo_lock, flags);
354 } 358 }
355 359
356 /* re-enable Rx DMA transfer for this CAN */ 360 /* re-enable Rx DMA transfer for this CAN */
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index d040aeb45172..15c2a831edf1 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,7 +1,10 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o 2obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
3bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o 3bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
4obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o dsa_loop_bdinfo.o 4obj-$(CONFIG_NET_DSA_LOOP) += dsa_loop.o
5ifdef CONFIG_NET_DSA_LOOP
6obj-$(CONFIG_FIXED_PHY) += dsa_loop_bdinfo.o
7endif
5obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o 8obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
6obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o 9obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
7obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o 10obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index db830a1141d9..63e02a54d537 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -814,8 +814,8 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
814 unsigned int i; 814 unsigned int i;
815 815
816 for (i = 0; i < mib_size; i++) 816 for (i = 0; i < mib_size; i++)
817 memcpy(data + i * ETH_GSTRING_LEN, 817 strlcpy(data + i * ETH_GSTRING_LEN,
818 mibs[i].name, ETH_GSTRING_LEN); 818 mibs[i].name, ETH_GSTRING_LEN);
819} 819}
820EXPORT_SYMBOL(b53_get_strings); 820EXPORT_SYMBOL(b53_get_strings);
821 821
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index 29c3075bfb05..fdc673484add 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config NET_VENDOR_8390 5config NET_VENDOR_8390
6 bool "National Semi-conductor 8390 devices" 6 bool "National Semiconductor 8390 devices"
7 default y 7 default y
8 depends on NET_VENDOR_NATSEMI 8 depends on NET_VENDOR_NATSEMI
9 ---help--- 9 ---help---
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 0b49f1aeebd3..fc7383106946 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -36,6 +36,8 @@
36#define AQ_CFG_TX_FRAME_MAX (16U * 1024U) 36#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
37#define AQ_CFG_RX_FRAME_MAX (4U * 1024U) 37#define AQ_CFG_RX_FRAME_MAX (4U * 1024U)
38 38
39#define AQ_CFG_TX_CLEAN_BUDGET 256U
40
39/* LRO */ 41/* LRO */
40#define AQ_CFG_IS_LRO_DEF 1U 42#define AQ_CFG_IS_LRO_DEF 1U
41 43
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index ebbaf63eaf47..c96a92118b8b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -247,6 +247,8 @@ void aq_nic_ndev_init(struct aq_nic_s *self)
247 self->ndev->hw_features |= aq_hw_caps->hw_features; 247 self->ndev->hw_features |= aq_hw_caps->hw_features;
248 self->ndev->features = aq_hw_caps->hw_features; 248 self->ndev->features = aq_hw_caps->hw_features;
249 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; 249 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
250 self->ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
251
250 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; 252 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
251 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN; 253 self->ndev->max_mtu = aq_hw_caps->mtu - ETH_FCS_LEN - ETH_HLEN;
252 254
@@ -937,3 +939,23 @@ err_exit:
937out: 939out:
938 return err; 940 return err;
939} 941}
942
943void aq_nic_shutdown(struct aq_nic_s *self)
944{
945 int err = 0;
946
947 if (!self->ndev)
948 return;
949
950 rtnl_lock();
951
952 netif_device_detach(self->ndev);
953
954 err = aq_nic_stop(self);
955 if (err < 0)
956 goto err_exit;
957 aq_nic_deinit(self);
958
959err_exit:
960 rtnl_unlock();
961} \ No newline at end of file
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index d16b0f1a95aa..219b550d1665 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -118,5 +118,6 @@ struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
118u32 aq_nic_get_fw_version(struct aq_nic_s *self); 118u32 aq_nic_get_fw_version(struct aq_nic_s *self);
119int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); 119int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
120int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); 120int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self);
121void aq_nic_shutdown(struct aq_nic_s *self);
121 122
122#endif /* AQ_NIC_H */ 123#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 87c4308b52a7..ecc6306f940f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -323,6 +323,20 @@ static void aq_pci_remove(struct pci_dev *pdev)
323 pci_disable_device(pdev); 323 pci_disable_device(pdev);
324} 324}
325 325
326static void aq_pci_shutdown(struct pci_dev *pdev)
327{
328 struct aq_nic_s *self = pci_get_drvdata(pdev);
329
330 aq_nic_shutdown(self);
331
332 pci_disable_device(pdev);
333
334 if (system_state == SYSTEM_POWER_OFF) {
335 pci_wake_from_d3(pdev, false);
336 pci_set_power_state(pdev, PCI_D3hot);
337 }
338}
339
326static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg) 340static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
327{ 341{
328 struct aq_nic_s *self = pci_get_drvdata(pdev); 342 struct aq_nic_s *self = pci_get_drvdata(pdev);
@@ -345,6 +359,7 @@ static struct pci_driver aq_pci_ops = {
345 .remove = aq_pci_remove, 359 .remove = aq_pci_remove,
346 .suspend = aq_pci_suspend, 360 .suspend = aq_pci_suspend,
347 .resume = aq_pci_resume, 361 .resume = aq_pci_resume,
362 .shutdown = aq_pci_shutdown,
348}; 363};
349 364
350module_pci_driver(aq_pci_ops); 365module_pci_driver(aq_pci_ops);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 0be6a11370bb..b5f1f62e8e25 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -136,11 +136,12 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
136 netif_stop_subqueue(ndev, ring->idx); 136 netif_stop_subqueue(ndev, ring->idx);
137} 137}
138 138
139void aq_ring_tx_clean(struct aq_ring_s *self) 139bool aq_ring_tx_clean(struct aq_ring_s *self)
140{ 140{
141 struct device *dev = aq_nic_get_dev(self->aq_nic); 141 struct device *dev = aq_nic_get_dev(self->aq_nic);
142 unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET;
142 143
143 for (; self->sw_head != self->hw_head; 144 for (; self->sw_head != self->hw_head && budget--;
144 self->sw_head = aq_ring_next_dx(self, self->sw_head)) { 145 self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
145 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 146 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
146 147
@@ -167,6 +168,8 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
167 buff->pa = 0U; 168 buff->pa = 0U;
168 buff->eop_index = 0xffffU; 169 buff->eop_index = 0xffffU;
169 } 170 }
171
172 return !!budget;
170} 173}
171 174
172#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 175#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 965fae0fb6e0..ac1329f4051d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -153,7 +153,7 @@ void aq_ring_free(struct aq_ring_s *self);
153void aq_ring_update_queue_state(struct aq_ring_s *ring); 153void aq_ring_update_queue_state(struct aq_ring_s *ring);
154void aq_ring_queue_wake(struct aq_ring_s *ring); 154void aq_ring_queue_wake(struct aq_ring_s *ring);
155void aq_ring_queue_stop(struct aq_ring_s *ring); 155void aq_ring_queue_stop(struct aq_ring_s *ring);
156void aq_ring_tx_clean(struct aq_ring_s *self); 156bool aq_ring_tx_clean(struct aq_ring_s *self);
157int aq_ring_rx_clean(struct aq_ring_s *self, 157int aq_ring_rx_clean(struct aq_ring_s *self,
158 struct napi_struct *napi, 158 struct napi_struct *napi,
159 int *work_done, 159 int *work_done,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f890b8a5a862..d335c334fa56 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -35,12 +35,12 @@ struct aq_vec_s {
35static int aq_vec_poll(struct napi_struct *napi, int budget) 35static int aq_vec_poll(struct napi_struct *napi, int budget)
36{ 36{
37 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi); 37 struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
38 unsigned int sw_tail_old = 0U;
38 struct aq_ring_s *ring = NULL; 39 struct aq_ring_s *ring = NULL;
40 bool was_tx_cleaned = true;
41 unsigned int i = 0U;
39 int work_done = 0; 42 int work_done = 0;
40 int err = 0; 43 int err = 0;
41 unsigned int i = 0U;
42 unsigned int sw_tail_old = 0U;
43 bool was_tx_cleaned = false;
44 44
45 if (!self) { 45 if (!self) {
46 err = -EINVAL; 46 err = -EINVAL;
@@ -57,9 +57,8 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
57 57
58 if (ring[AQ_VEC_TX_ID].sw_head != 58 if (ring[AQ_VEC_TX_ID].sw_head !=
59 ring[AQ_VEC_TX_ID].hw_head) { 59 ring[AQ_VEC_TX_ID].hw_head) {
60 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 60 was_tx_cleaned = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
61 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]); 61 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
62 was_tx_cleaned = true;
63 } 62 }
64 63
65 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw, 64 err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
@@ -90,7 +89,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
90 } 89 }
91 } 90 }
92 91
93 if (was_tx_cleaned) 92 if (!was_tx_cleaned)
94 work_done = budget; 93 work_done = budget;
95 94
96 if (work_done < budget) { 95 if (work_done < budget) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 967f0fd07fcf..d3b847ec7465 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -21,6 +21,10 @@
21 21
22#define HW_ATL_UCP_0X370_REG 0x0370U 22#define HW_ATL_UCP_0X370_REG 0x0370U
23 23
24#define HW_ATL_MIF_CMD 0x0200U
25#define HW_ATL_MIF_ADDR 0x0208U
26#define HW_ATL_MIF_VAL 0x020CU
27
24#define HW_ATL_FW_SM_RAM 0x2U 28#define HW_ATL_FW_SM_RAM 0x2U
25#define HW_ATL_MPI_FW_VERSION 0x18 29#define HW_ATL_MPI_FW_VERSION 0x18
26#define HW_ATL_MPI_CONTROL_ADR 0x0368U 30#define HW_ATL_MPI_CONTROL_ADR 0x0368U
@@ -79,16 +83,15 @@ int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
79 83
80static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self) 84static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
81{ 85{
86 u32 gsr, val;
82 int k = 0; 87 int k = 0;
83 u32 gsr;
84 88
85 aq_hw_write_reg(self, 0x404, 0x40e1); 89 aq_hw_write_reg(self, 0x404, 0x40e1);
86 AQ_HW_SLEEP(50); 90 AQ_HW_SLEEP(50);
87 91
88 /* Cleanup SPI */ 92 /* Cleanup SPI */
89 aq_hw_write_reg(self, 0x534, 0xA0); 93 val = aq_hw_read_reg(self, 0x53C);
90 aq_hw_write_reg(self, 0x100, 0x9F); 94 aq_hw_write_reg(self, 0x53C, val | 0x10);
91 aq_hw_write_reg(self, 0x100, 0x809F);
92 95
93 gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR); 96 gsr = aq_hw_read_reg(self, HW_ATL_GLB_SOFT_RES_ADR);
94 aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000); 97 aq_hw_write_reg(self, HW_ATL_GLB_SOFT_RES_ADR, (gsr & 0xBFFF) | 0x8000);
@@ -97,7 +100,14 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
97 aq_hw_write_reg(self, 0x404, 0x80e0); 100 aq_hw_write_reg(self, 0x404, 0x80e0);
98 aq_hw_write_reg(self, 0x32a8, 0x0); 101 aq_hw_write_reg(self, 0x32a8, 0x0);
99 aq_hw_write_reg(self, 0x520, 0x1); 102 aq_hw_write_reg(self, 0x520, 0x1);
103
104 /* Reset SPI again because of possible interrupted SPI burst */
105 val = aq_hw_read_reg(self, 0x53C);
106 aq_hw_write_reg(self, 0x53C, val | 0x10);
100 AQ_HW_SLEEP(10); 107 AQ_HW_SLEEP(10);
108 /* Clear SPI reset state */
109 aq_hw_write_reg(self, 0x53C, val & ~0x10);
110
101 aq_hw_write_reg(self, 0x404, 0x180e0); 111 aq_hw_write_reg(self, 0x404, 0x180e0);
102 112
103 for (k = 0; k < 1000; k++) { 113 for (k = 0; k < 1000; k++) {
@@ -141,13 +151,15 @@ static int hw_atl_utils_soft_reset_flb(struct aq_hw_s *self)
141 aq_pr_err("FW kickstart failed\n"); 151 aq_pr_err("FW kickstart failed\n");
142 return -EIO; 152 return -EIO;
143 } 153 }
154 /* Old FW requires fixed delay after init */
155 AQ_HW_SLEEP(15);
144 156
145 return 0; 157 return 0;
146} 158}
147 159
148static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self) 160static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
149{ 161{
150 u32 gsr, rbl_status; 162 u32 gsr, val, rbl_status;
151 int k; 163 int k;
152 164
153 aq_hw_write_reg(self, 0x404, 0x40e1); 165 aq_hw_write_reg(self, 0x404, 0x40e1);
@@ -157,6 +169,10 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
157 /* Alter RBL status */ 169 /* Alter RBL status */
158 aq_hw_write_reg(self, 0x388, 0xDEAD); 170 aq_hw_write_reg(self, 0x388, 0xDEAD);
159 171
172 /* Cleanup SPI */
173 val = aq_hw_read_reg(self, 0x53C);
174 aq_hw_write_reg(self, 0x53C, val | 0x10);
175
160 /* Global software reset*/ 176 /* Global software reset*/
161 hw_atl_rx_rx_reg_res_dis_set(self, 0U); 177 hw_atl_rx_rx_reg_res_dis_set(self, 0U);
162 hw_atl_tx_tx_reg_res_dis_set(self, 0U); 178 hw_atl_tx_tx_reg_res_dis_set(self, 0U);
@@ -204,6 +220,8 @@ static int hw_atl_utils_soft_reset_rbl(struct aq_hw_s *self)
204 aq_pr_err("FW kickstart failed\n"); 220 aq_pr_err("FW kickstart failed\n");
205 return -EIO; 221 return -EIO;
206 } 222 }
223 /* Old FW requires fixed delay after init */
224 AQ_HW_SLEEP(15);
207 225
208 return 0; 226 return 0;
209} 227}
@@ -255,18 +273,22 @@ int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
255 } 273 }
256 } 274 }
257 275
258 aq_hw_write_reg(self, 0x00000208U, a); 276 aq_hw_write_reg(self, HW_ATL_MIF_ADDR, a);
259
260 for (++cnt; --cnt;) {
261 u32 i = 0U;
262 277
263 aq_hw_write_reg(self, 0x00000200U, 0x00008000U); 278 for (++cnt; --cnt && !err;) {
279 aq_hw_write_reg(self, HW_ATL_MIF_CMD, 0x00008000U);
264 280
265 for (i = 1024U; 281 if (IS_CHIP_FEATURE(REVISION_B1))
266 (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) { 282 AQ_HW_WAIT_FOR(a != aq_hw_read_reg(self,
267 } 283 HW_ATL_MIF_ADDR),
284 1, 1000U);
285 else
286 AQ_HW_WAIT_FOR(!(0x100 & aq_hw_read_reg(self,
287 HW_ATL_MIF_CMD)),
288 1, 1000U);
268 289
269 *(p++) = aq_hw_read_reg(self, 0x0000020CU); 290 *(p++) = aq_hw_read_reg(self, HW_ATL_MIF_VAL);
291 a += 4;
270 } 292 }
271 293
272 hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM); 294 hw_atl_reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
@@ -662,14 +684,18 @@ void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
662 u32 val = hw_atl_reg_glb_mif_id_get(self); 684 u32 val = hw_atl_reg_glb_mif_id_get(self);
663 u32 mif_rev = val & 0xFFU; 685 u32 mif_rev = val & 0xFFU;
664 686
665 if ((3U & mif_rev) == 1U) { 687 if ((0xFU & mif_rev) == 1U) {
666 chip_features |= 688 chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
667 HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
668 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | 689 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
669 HAL_ATLANTIC_UTILS_CHIP_MIPS; 690 HAL_ATLANTIC_UTILS_CHIP_MIPS;
670 } else if ((3U & mif_rev) == 2U) { 691 } else if ((0xFU & mif_rev) == 2U) {
671 chip_features |= 692 chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
672 HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 | 693 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
694 HAL_ATLANTIC_UTILS_CHIP_MIPS |
695 HAL_ATLANTIC_UTILS_CHIP_TPO2 |
696 HAL_ATLANTIC_UTILS_CHIP_RPF2;
697 } else if ((0xFU & mif_rev) == 0xAU) {
698 chip_features |= HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 |
673 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ | 699 HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
674 HAL_ATLANTIC_UTILS_CHIP_MIPS | 700 HAL_ATLANTIC_UTILS_CHIP_MIPS |
675 HAL_ATLANTIC_UTILS_CHIP_TPO2 | 701 HAL_ATLANTIC_UTILS_CHIP_TPO2 |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
index 2c690947910a..cd8f18f39c61 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -161,6 +161,7 @@ struct __packed hw_aq_atl_utils_mbox {
161#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U 161#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
162#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U 162#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
163#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U 163#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
164#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B1 0x04000000U
164 165
165#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \ 166#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
166 self->chip_features) 167 self->chip_features)
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
index 5265b937677b..a445de6837a6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/ver.h
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -13,7 +13,7 @@
13#define NIC_MAJOR_DRIVER_VERSION 2 13#define NIC_MAJOR_DRIVER_VERSION 2
14#define NIC_MINOR_DRIVER_VERSION 0 14#define NIC_MINOR_DRIVER_VERSION 0
15#define NIC_BUILD_DRIVER_VERSION 2 15#define NIC_BUILD_DRIVER_VERSION 2
16#define NIC_REVISION_DRIVER_VERSION 0 16#define NIC_REVISION_DRIVER_VERSION 1
17 17
18#define AQ_CFG_DRV_VERSION_SUFFIX "-kern" 18#define AQ_CFG_DRV_VERSION_SUFFIX "-kern"
19 19
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c
index 16f9bee992fe..0f6576802607 100644
--- a/drivers/net/ethernet/arc/emac_rockchip.c
+++ b/drivers/net/ethernet/arc/emac_rockchip.c
@@ -169,8 +169,10 @@ static int emac_rockchip_probe(struct platform_device *pdev)
169 /* Optional regulator for PHY */ 169 /* Optional regulator for PHY */
170 priv->regulator = devm_regulator_get_optional(dev, "phy"); 170 priv->regulator = devm_regulator_get_optional(dev, "phy");
171 if (IS_ERR(priv->regulator)) { 171 if (IS_ERR(priv->regulator)) {
172 if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) 172 if (PTR_ERR(priv->regulator) == -EPROBE_DEFER) {
173 return -EPROBE_DEFER; 173 err = -EPROBE_DEFER;
174 goto out_clk_disable;
175 }
174 dev_err(dev, "no regulator found\n"); 176 dev_err(dev, "no regulator found\n");
175 priv->regulator = NULL; 177 priv->regulator = NULL;
176 } 178 }
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index f15a8fc6dfc9..3fc549b88c43 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -855,10 +855,12 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
855static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, 855static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
856 struct bcm_sysport_tx_ring *ring) 856 struct bcm_sysport_tx_ring *ring)
857{ 857{
858 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
859 unsigned int pkts_compl = 0, bytes_compl = 0; 858 unsigned int pkts_compl = 0, bytes_compl = 0;
860 struct net_device *ndev = priv->netdev; 859 struct net_device *ndev = priv->netdev;
860 unsigned int txbds_processed = 0;
861 struct bcm_sysport_cb *cb; 861 struct bcm_sysport_cb *cb;
862 unsigned int txbds_ready;
863 unsigned int c_index;
862 u32 hw_ind; 864 u32 hw_ind;
863 865
864 /* Clear status before servicing to reduce spurious interrupts */ 866 /* Clear status before servicing to reduce spurious interrupts */
@@ -871,29 +873,23 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
871 /* Compute how many descriptors have been processed since last call */ 873 /* Compute how many descriptors have been processed since last call */
872 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index)); 874 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
873 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK; 875 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
874 ring->p_index = (hw_ind & RING_PROD_INDEX_MASK); 876 txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
875
876 last_c_index = ring->c_index;
877 num_tx_cbs = ring->size;
878
879 c_index &= (num_tx_cbs - 1);
880
881 if (c_index >= last_c_index)
882 last_tx_cn = c_index - last_c_index;
883 else
884 last_tx_cn = num_tx_cbs - last_c_index + c_index;
885 877
886 netif_dbg(priv, tx_done, ndev, 878 netif_dbg(priv, tx_done, ndev,
887 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n", 879 "ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
888 ring->index, c_index, last_tx_cn, last_c_index); 880 ring->index, ring->c_index, c_index, txbds_ready);
889 881
890 while (last_tx_cn-- > 0) { 882 while (txbds_processed < txbds_ready) {
891 cb = ring->cbs + last_c_index; 883 cb = &ring->cbs[ring->clean_index];
892 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl); 884 bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
893 885
894 ring->desc_count++; 886 ring->desc_count++;
895 last_c_index++; 887 txbds_processed++;
896 last_c_index &= (num_tx_cbs - 1); 888
889 if (likely(ring->clean_index < ring->size - 1))
890 ring->clean_index++;
891 else
892 ring->clean_index = 0;
897 } 893 }
898 894
899 u64_stats_update_begin(&priv->syncp); 895 u64_stats_update_begin(&priv->syncp);
@@ -1394,6 +1390,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1394 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64); 1390 netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
1395 ring->index = index; 1391 ring->index = index;
1396 ring->size = size; 1392 ring->size = size;
1393 ring->clean_index = 0;
1397 ring->alloc_size = ring->size; 1394 ring->alloc_size = ring->size;
1398 ring->desc_cpu = p; 1395 ring->desc_cpu = p;
1399 ring->desc_count = ring->size; 1396 ring->desc_count = ring->size;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index f5a984c1c986..19c91c76e327 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -706,7 +706,7 @@ struct bcm_sysport_tx_ring {
706 unsigned int desc_count; /* Number of descriptors */ 706 unsigned int desc_count; /* Number of descriptors */
707 unsigned int curr_desc; /* Current descriptor */ 707 unsigned int curr_desc; /* Current descriptor */
708 unsigned int c_index; /* Last consumer index */ 708 unsigned int c_index; /* Last consumer index */
709 unsigned int p_index; /* Current producer index */ 709 unsigned int clean_index; /* Current clean index */
710 struct bcm_sysport_cb *cbs; /* Transmit control blocks */ 710 struct bcm_sysport_cb *cbs; /* Transmit control blocks */
711 struct dma_desc *desc_cpu; /* CPU view of the descriptor */ 711 struct dma_desc *desc_cpu; /* CPU view of the descriptor */
712 struct bcm_sysport_priv *priv; /* private context backpointer */ 712 struct bcm_sysport_priv *priv; /* private context backpointer */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 74fc9af4aadb..b8388e93520a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13913,7 +13913,7 @@ static void bnx2x_register_phc(struct bnx2x *bp)
13913 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev); 13913 bp->ptp_clock = ptp_clock_register(&bp->ptp_clock_info, &bp->pdev->dev);
13914 if (IS_ERR(bp->ptp_clock)) { 13914 if (IS_ERR(bp->ptp_clock)) {
13915 bp->ptp_clock = NULL; 13915 bp->ptp_clock = NULL;
13916 BNX2X_ERR("PTP clock registeration failed\n"); 13916 BNX2X_ERR("PTP clock registration failed\n");
13917 } 13917 }
13918} 13918}
13919 13919
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 1500243b9886..c7e5e6f09647 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1439,7 +1439,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
1439 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1439 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1440 u16 vlan_proto = tpa_info->metadata >> 1440 u16 vlan_proto = tpa_info->metadata >>
1441 RX_CMP_FLAGS2_METADATA_TPID_SFT; 1441 RX_CMP_FLAGS2_METADATA_TPID_SFT;
1442 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK; 1442 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1443 1443
1444 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1444 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
1445 } 1445 }
@@ -1623,7 +1623,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
1623 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) && 1623 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1624 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) { 1624 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1625 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); 1625 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
1626 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK; 1626 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
1627 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; 1627 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1628 1628
1629 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); 1629 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
@@ -3847,6 +3847,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
3847 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; 3847 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
3848 struct hwrm_vnic_tpa_cfg_input req = {0}; 3848 struct hwrm_vnic_tpa_cfg_input req = {0};
3849 3849
3850 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
3851 return 0;
3852
3850 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); 3853 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
3851 3854
3852 if (tpa_flags) { 3855 if (tpa_flags) {
@@ -4558,18 +4561,17 @@ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
4558 return rc; 4561 return rc;
4559} 4562}
4560 4563
4561static int 4564static void
4562bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4565__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
4563 int ring_grps, int cp_rings, int vnics) 4566 int tx_rings, int rx_rings, int ring_grps,
4567 int cp_rings, int vnics)
4564{ 4568{
4565 struct hwrm_func_cfg_input req = {0};
4566 u32 enables = 0; 4569 u32 enables = 0;
4567 int rc;
4568 4570
4569 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4571 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
4570 req.fid = cpu_to_le16(0xffff); 4572 req->fid = cpu_to_le16(0xffff);
4571 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 4573 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4572 req.num_tx_rings = cpu_to_le16(tx_rings); 4574 req->num_tx_rings = cpu_to_le16(tx_rings);
4573 if (bp->flags & BNXT_FLAG_NEW_RM) { 4575 if (bp->flags & BNXT_FLAG_NEW_RM) {
4574 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0; 4576 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4575 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | 4577 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
@@ -4578,16 +4580,53 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4578 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0; 4580 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4579 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0; 4581 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4580 4582
4581 req.num_rx_rings = cpu_to_le16(rx_rings); 4583 req->num_rx_rings = cpu_to_le16(rx_rings);
4582 req.num_hw_ring_grps = cpu_to_le16(ring_grps); 4584 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
4583 req.num_cmpl_rings = cpu_to_le16(cp_rings); 4585 req->num_cmpl_rings = cpu_to_le16(cp_rings);
4584 req.num_stat_ctxs = req.num_cmpl_rings; 4586 req->num_stat_ctxs = req->num_cmpl_rings;
4585 req.num_vnics = cpu_to_le16(vnics); 4587 req->num_vnics = cpu_to_le16(vnics);
4586 } 4588 }
4587 if (!enables) 4589 req->enables = cpu_to_le32(enables);
4590}
4591
4592static void
4593__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
4594 struct hwrm_func_vf_cfg_input *req, int tx_rings,
4595 int rx_rings, int ring_grps, int cp_rings,
4596 int vnics)
4597{
4598 u32 enables = 0;
4599
4600 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
4601 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
4602 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4603 enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4604 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4605 enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4606 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4607
4608 req->num_tx_rings = cpu_to_le16(tx_rings);
4609 req->num_rx_rings = cpu_to_le16(rx_rings);
4610 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
4611 req->num_cmpl_rings = cpu_to_le16(cp_rings);
4612 req->num_stat_ctxs = req->num_cmpl_rings;
4613 req->num_vnics = cpu_to_le16(vnics);
4614
4615 req->enables = cpu_to_le32(enables);
4616}
4617
4618static int
4619bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4620 int ring_grps, int cp_rings, int vnics)
4621{
4622 struct hwrm_func_cfg_input req = {0};
4623 int rc;
4624
4625 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4626 cp_rings, vnics);
4627 if (!req.enables)
4588 return 0; 4628 return 0;
4589 4629
4590 req.enables = cpu_to_le32(enables);
4591 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4630 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4592 if (rc) 4631 if (rc)
4593 return -ENOMEM; 4632 return -ENOMEM;
@@ -4604,7 +4643,6 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4604 int ring_grps, int cp_rings, int vnics) 4643 int ring_grps, int cp_rings, int vnics)
4605{ 4644{
4606 struct hwrm_func_vf_cfg_input req = {0}; 4645 struct hwrm_func_vf_cfg_input req = {0};
4607 u32 enables = 0;
4608 int rc; 4646 int rc;
4609 4647
4610 if (!(bp->flags & BNXT_FLAG_NEW_RM)) { 4648 if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
@@ -4612,22 +4650,8 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4612 return 0; 4650 return 0;
4613 } 4651 }
4614 4652
4615 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4653 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4616 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; 4654 cp_rings, vnics);
4617 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
4618 enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4619 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
4620 enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
4621 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
4622
4623 req.num_tx_rings = cpu_to_le16(tx_rings);
4624 req.num_rx_rings = cpu_to_le16(rx_rings);
4625 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4626 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4627 req.num_stat_ctxs = req.num_cmpl_rings;
4628 req.num_vnics = cpu_to_le16(vnics);
4629
4630 req.enables = cpu_to_le32(enables);
4631 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4655 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4632 if (rc) 4656 if (rc)
4633 return -ENOMEM; 4657 return -ENOMEM;
@@ -4743,39 +4767,25 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
4743} 4767}
4744 4768
4745static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4769static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4746 int ring_grps, int cp_rings) 4770 int ring_grps, int cp_rings, int vnics)
4747{ 4771{
4748 struct hwrm_func_vf_cfg_input req = {0}; 4772 struct hwrm_func_vf_cfg_input req = {0};
4749 u32 flags, enables; 4773 u32 flags;
4750 int rc; 4774 int rc;
4751 4775
4752 if (!(bp->flags & BNXT_FLAG_NEW_RM)) 4776 if (!(bp->flags & BNXT_FLAG_NEW_RM))
4753 return 0; 4777 return 0;
4754 4778
4755 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); 4779 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4780 cp_rings, vnics);
4756 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | 4781 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
4757 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | 4782 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
4758 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 4783 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
4759 FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | 4784 FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
4760 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 4785 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
4761 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 4786 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
4762 enables = FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS |
4763 FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
4764 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4765 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
4766 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS |
4767 FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS;
4768 4787
4769 req.flags = cpu_to_le32(flags); 4788 req.flags = cpu_to_le32(flags);
4770 req.enables = cpu_to_le32(enables);
4771 req.num_tx_rings = cpu_to_le16(tx_rings);
4772 req.num_rx_rings = cpu_to_le16(rx_rings);
4773 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4774 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4775 req.num_stat_ctxs = cpu_to_le16(cp_rings);
4776 req.num_vnics = cpu_to_le16(1);
4777 if (bp->flags & BNXT_FLAG_RFS)
4778 req.num_vnics = cpu_to_le16(rx_rings + 1);
4779 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4789 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4780 if (rc) 4790 if (rc)
4781 return -ENOMEM; 4791 return -ENOMEM;
@@ -4783,38 +4793,23 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4783} 4793}
4784 4794
4785static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4795static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4786 int ring_grps, int cp_rings) 4796 int ring_grps, int cp_rings, int vnics)
4787{ 4797{
4788 struct hwrm_func_cfg_input req = {0}; 4798 struct hwrm_func_cfg_input req = {0};
4789 u32 flags, enables; 4799 u32 flags;
4790 int rc; 4800 int rc;
4791 4801
4792 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); 4802 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
4793 req.fid = cpu_to_le16(0xffff); 4803 cp_rings, vnics);
4794 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; 4804 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
4795 enables = FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS; 4805 if (bp->flags & BNXT_FLAG_NEW_RM)
4796 req.num_tx_rings = cpu_to_le16(tx_rings);
4797 if (bp->flags & BNXT_FLAG_NEW_RM) {
4798 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | 4806 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
4799 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | 4807 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
4800 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST | 4808 FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
4801 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST | 4809 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
4802 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST; 4810 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
4803 enables |= FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | 4811
4804 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
4805 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
4806 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS |
4807 FUNC_CFG_REQ_ENABLES_NUM_VNICS;
4808 req.num_rx_rings = cpu_to_le16(rx_rings);
4809 req.num_cmpl_rings = cpu_to_le16(cp_rings);
4810 req.num_hw_ring_grps = cpu_to_le16(ring_grps);
4811 req.num_stat_ctxs = cpu_to_le16(cp_rings);
4812 req.num_vnics = cpu_to_le16(1);
4813 if (bp->flags & BNXT_FLAG_RFS)
4814 req.num_vnics = cpu_to_le16(rx_rings + 1);
4815 }
4816 req.flags = cpu_to_le32(flags); 4812 req.flags = cpu_to_le32(flags);
4817 req.enables = cpu_to_le32(enables);
4818 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 4813 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4819 if (rc) 4814 if (rc)
4820 return -ENOMEM; 4815 return -ENOMEM;
@@ -4822,17 +4817,17 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4822} 4817}
4823 4818
4824static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, 4819static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
4825 int ring_grps, int cp_rings) 4820 int ring_grps, int cp_rings, int vnics)
4826{ 4821{
4827 if (bp->hwrm_spec_code < 0x10801) 4822 if (bp->hwrm_spec_code < 0x10801)
4828 return 0; 4823 return 0;
4829 4824
4830 if (BNXT_PF(bp)) 4825 if (BNXT_PF(bp))
4831 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, 4826 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
4832 ring_grps, cp_rings); 4827 ring_grps, cp_rings, vnics);
4833 4828
4834 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, 4829 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
4835 cp_rings); 4830 cp_rings, vnics);
4836} 4831}
4837 4832
4838static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal, 4833static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
@@ -5865,7 +5860,6 @@ static int bnxt_init_msix(struct bnxt *bp)
5865 if (rc) 5860 if (rc)
5866 goto msix_setup_exit; 5861 goto msix_setup_exit;
5867 5862
5868 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5869 bp->cp_nr_rings = (min == 1) ? 5863 bp->cp_nr_rings = (min == 1) ?
5870 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : 5864 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
5871 bp->tx_nr_rings + bp->rx_nr_rings; 5865 bp->tx_nr_rings + bp->rx_nr_rings;
@@ -5897,7 +5891,6 @@ static int bnxt_init_inta(struct bnxt *bp)
5897 bp->rx_nr_rings = 1; 5891 bp->rx_nr_rings = 1;
5898 bp->tx_nr_rings = 1; 5892 bp->tx_nr_rings = 1;
5899 bp->cp_nr_rings = 1; 5893 bp->cp_nr_rings = 1;
5900 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
5901 bp->flags |= BNXT_FLAG_SHARED_RINGS; 5894 bp->flags |= BNXT_FLAG_SHARED_RINGS;
5902 bp->irq_tbl[0].vector = bp->pdev->irq; 5895 bp->irq_tbl[0].vector = bp->pdev->irq;
5903 return 0; 5896 return 0;
@@ -7531,7 +7524,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7531 int max_rx, max_tx, tx_sets = 1; 7524 int max_rx, max_tx, tx_sets = 1;
7532 int tx_rings_needed; 7525 int tx_rings_needed;
7533 int rx_rings = rx; 7526 int rx_rings = rx;
7534 int cp, rc; 7527 int cp, vnics, rc;
7535 7528
7536 if (tcs) 7529 if (tcs)
7537 tx_sets = tcs; 7530 tx_sets = tcs;
@@ -7547,10 +7540,15 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
7547 if (max_tx < tx_rings_needed) 7540 if (max_tx < tx_rings_needed)
7548 return -ENOMEM; 7541 return -ENOMEM;
7549 7542
7543 vnics = 1;
7544 if (bp->flags & BNXT_FLAG_RFS)
7545 vnics += rx_rings;
7546
7550 if (bp->flags & BNXT_FLAG_AGG_RINGS) 7547 if (bp->flags & BNXT_FLAG_AGG_RINGS)
7551 rx_rings <<= 1; 7548 rx_rings <<= 1;
7552 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; 7549 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
7553 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp); 7550 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
7551 vnics);
7554} 7552}
7555 7553
7556static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) 7554static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
@@ -8437,13 +8435,20 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
8437 return 0; 8435 return 0;
8438 8436
8439 bnxt_hwrm_func_qcaps(bp); 8437 bnxt_hwrm_func_qcaps(bp);
8440 __bnxt_close_nic(bp, true, false); 8438
8439 if (netif_running(bp->dev))
8440 __bnxt_close_nic(bp, true, false);
8441
8441 bnxt_clear_int_mode(bp); 8442 bnxt_clear_int_mode(bp);
8442 rc = bnxt_init_int_mode(bp); 8443 rc = bnxt_init_int_mode(bp);
8443 if (rc) 8444
8444 dev_close(bp->dev); 8445 if (netif_running(bp->dev)) {
8445 else 8446 if (rc)
8446 rc = bnxt_open_nic(bp, true, false); 8447 dev_close(bp->dev);
8448 else
8449 rc = bnxt_open_nic(bp, true, false);
8450 }
8451
8447 return rc; 8452 return rc;
8448} 8453}
8449 8454
@@ -8664,6 +8669,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8664 if (rc) 8669 if (rc)
8665 goto init_err_pci_clean; 8670 goto init_err_pci_clean;
8666 8671
8672 /* No TC has been set yet and rings may have been trimmed due to
8673 * limited MSIX, so we re-initialize the TX rings per TC.
8674 */
8675 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
8676
8667 bnxt_get_wol_settings(bp); 8677 bnxt_get_wol_settings(bp);
8668 if (bp->flags & BNXT_FLAG_WOL_CAP) 8678 if (bp->flags & BNXT_FLAG_WOL_CAP)
8669 device_set_wakeup_enable(&pdev->dev, bp->wol); 8679 device_set_wakeup_enable(&pdev->dev, bp->wol);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 1989c470172c..5e3d62189cab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -189,6 +189,7 @@ struct rx_cmp_ext {
189 #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3) 189 #define RX_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
190 #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4) 190 #define RX_CMP_FLAGS2_META_FORMAT_VLAN (0x1 << 4)
191 __le32 rx_cmp_meta_data; 191 __le32 rx_cmp_meta_data;
192 #define RX_CMP_FLAGS2_METADATA_TCI_MASK 0xffff
192 #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff 193 #define RX_CMP_FLAGS2_METADATA_VID_MASK 0xfff
193 #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000 194 #define RX_CMP_FLAGS2_METADATA_TPID_MASK 0xffff0000
194 #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16 195 #define RX_CMP_FLAGS2_METADATA_TPID_SFT 16
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index fbe6e208e17b..65c2cee35766 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -349,6 +349,9 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
349 if (rc) 349 if (rc)
350 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", 350 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
351 __func__, flow_handle, rc); 351 __func__, flow_handle, rc);
352
353 if (rc)
354 rc = -EIO;
352 return rc; 355 return rc;
353} 356}
354 357
@@ -484,13 +487,15 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
484 req.action_flags = cpu_to_le16(action_flags); 487 req.action_flags = cpu_to_le16(action_flags);
485 488
486 mutex_lock(&bp->hwrm_cmd_lock); 489 mutex_lock(&bp->hwrm_cmd_lock);
487
488 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 490 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
489 if (!rc) 491 if (!rc)
490 *flow_handle = resp->flow_handle; 492 *flow_handle = resp->flow_handle;
491
492 mutex_unlock(&bp->hwrm_cmd_lock); 493 mutex_unlock(&bp->hwrm_cmd_lock);
493 494
495 if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
496 rc = -ENOSPC;
497 else if (rc)
498 rc = -EIO;
494 return rc; 499 return rc;
495} 500}
496 501
@@ -561,6 +566,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
561 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 566 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
562 mutex_unlock(&bp->hwrm_cmd_lock); 567 mutex_unlock(&bp->hwrm_cmd_lock);
563 568
569 if (rc)
570 rc = -EIO;
564 return rc; 571 return rc;
565} 572}
566 573
@@ -576,6 +583,9 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
576 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 583 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
577 if (rc) 584 if (rc)
578 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 585 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
586
587 if (rc)
588 rc = -EIO;
579 return rc; 589 return rc;
580} 590}
581 591
@@ -624,6 +634,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
624 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 634 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
625 mutex_unlock(&bp->hwrm_cmd_lock); 635 mutex_unlock(&bp->hwrm_cmd_lock);
626 636
637 if (rc)
638 rc = -EIO;
627 return rc; 639 return rc;
628} 640}
629 641
@@ -639,6 +651,9 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
639 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); 651 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
640 if (rc) 652 if (rc)
641 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); 653 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
654
655 if (rc)
656 rc = -EIO;
642 return rc; 657 return rc;
643} 658}
644 659
@@ -1269,11 +1284,8 @@ static int bnxt_tc_del_flow(struct bnxt *bp,
1269 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 1284 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1270 &tc_flow_cmd->cookie, 1285 &tc_flow_cmd->cookie,
1271 tc_info->flow_ht_params); 1286 tc_info->flow_ht_params);
1272 if (!flow_node) { 1287 if (!flow_node)
1273 netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
1274 tc_flow_cmd->cookie);
1275 return -EINVAL; 1288 return -EINVAL;
1276 }
1277 1289
1278 return __bnxt_tc_del_flow(bp, flow_node); 1290 return __bnxt_tc_del_flow(bp, flow_node);
1279} 1291}
@@ -1290,11 +1302,8 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1290 flow_node = rhashtable_lookup_fast(&tc_info->flow_table, 1302 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1291 &tc_flow_cmd->cookie, 1303 &tc_flow_cmd->cookie,
1292 tc_info->flow_ht_params); 1304 tc_info->flow_ht_params);
1293 if (!flow_node) { 1305 if (!flow_node)
1294 netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
1295 tc_flow_cmd->cookie);
1296 return -1; 1306 return -1;
1297 }
1298 1307
1299 flow = &flow_node->flow; 1308 flow = &flow_node->flow;
1300 curr_stats = &flow->stats; 1309 curr_stats = &flow->stats;
@@ -1344,8 +1353,10 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1344 } else { 1353 } else {
1345 netdev_info(bp->dev, "error rc=%d", rc); 1354 netdev_info(bp->dev, "error rc=%d", rc);
1346 } 1355 }
1347
1348 mutex_unlock(&bp->hwrm_cmd_lock); 1356 mutex_unlock(&bp->hwrm_cmd_lock);
1357
1358 if (rc)
1359 rc = -EIO;
1349 return rc; 1360 return rc;
1350} 1361}
1351 1362
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index c1841db1b500..f2593978ae75 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
820 820
821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822 822
823 usleep_range(10, 20); 823 udelay(10);
824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825 } 825 }
826 826
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 7b452e85de2a..61022b5f6743 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4970,7 +4970,6 @@ static void cxgb4_mgmt_setup(struct net_device *dev)
4970 /* Initialize the device structure. */ 4970 /* Initialize the device structure. */
4971 dev->netdev_ops = &cxgb4_mgmt_netdev_ops; 4971 dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
4972 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops; 4972 dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
4973 dev->needs_free_netdev = true;
4974} 4973}
4975 4974
4976static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs) 4975static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
@@ -5181,6 +5180,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5181 adapter->name = pci_name(pdev); 5180 adapter->name = pci_name(pdev);
5182 adapter->mbox = func; 5181 adapter->mbox = func;
5183 adapter->pf = func; 5182 adapter->pf = func;
5183 adapter->params.chip = chip;
5184 adapter->adap_idx = adap_idx;
5184 adapter->msg_enable = DFLT_MSG_ENABLE; 5185 adapter->msg_enable = DFLT_MSG_ENABLE;
5185 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) + 5186 adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
5186 (sizeof(struct mbox_cmd) * 5187 (sizeof(struct mbox_cmd) *
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 5eb999af2c40..bd3f6e4d1341 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -540,6 +540,7 @@ static int gmac_setup_txqs(struct net_device *netdev)
540 540
541 if (port->txq_dma_base & ~DMA_Q_BASE_MASK) { 541 if (port->txq_dma_base & ~DMA_Q_BASE_MASK) {
542 dev_warn(geth->dev, "TX queue base it not aligned\n"); 542 dev_warn(geth->dev, "TX queue base it not aligned\n");
543 kfree(skb_tab);
543 return -ENOMEM; 544 return -ENOMEM;
544 } 545 }
545 546
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 7caa8da48421..e4ec32a9ca15 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2008,7 +2008,6 @@ static inline int dpaa_xmit(struct dpaa_priv *priv,
2008 } 2008 }
2009 2009
2010 if (unlikely(err < 0)) { 2010 if (unlikely(err < 0)) {
2011 percpu_stats->tx_errors++;
2012 percpu_stats->tx_fifo_errors++; 2011 percpu_stats->tx_fifo_errors++;
2013 return err; 2012 return err;
2014 } 2013 }
@@ -2278,7 +2277,6 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2278 vaddr = phys_to_virt(addr); 2277 vaddr = phys_to_virt(addr);
2279 prefetch(vaddr + qm_fd_get_offset(fd)); 2278 prefetch(vaddr + qm_fd_get_offset(fd));
2280 2279
2281 fd_format = qm_fd_get_format(fd);
2282 /* The only FD types that we may receive are contig and S/G */ 2280 /* The only FD types that we may receive are contig and S/G */
2283 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg)); 2281 WARN_ON((fd_format != qm_fd_contig) && (fd_format != qm_fd_sg));
2284 2282
@@ -2311,8 +2309,10 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
2311 2309
2312 skb_len = skb->len; 2310 skb_len = skb->len;
2313 2311
2314 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) 2312 if (unlikely(netif_receive_skb(skb) == NET_RX_DROP)) {
2313 percpu_stats->rx_dropped++;
2315 return qman_cb_dqrr_consume; 2314 return qman_cb_dqrr_consume;
2315 }
2316 2316
2317 percpu_stats->rx_packets++; 2317 percpu_stats->rx_packets++;
2318 percpu_stats->rx_bytes += skb_len; 2318 percpu_stats->rx_bytes += skb_len;
@@ -2860,7 +2860,7 @@ static int dpaa_remove(struct platform_device *pdev)
2860 struct device *dev; 2860 struct device *dev;
2861 int err; 2861 int err;
2862 2862
2863 dev = &pdev->dev; 2863 dev = pdev->dev.parent;
2864 net_dev = dev_get_drvdata(dev); 2864 net_dev = dev_get_drvdata(dev);
2865 2865
2866 priv = netdev_priv(net_dev); 2866 priv = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 7a7f3a42b2aa..d4604bc8eb5b 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3600,6 +3600,8 @@ fec_drv_remove(struct platform_device *pdev)
3600 fec_enet_mii_remove(fep); 3600 fec_enet_mii_remove(fep);
3601 if (fep->reg_phy) 3601 if (fep->reg_phy)
3602 regulator_disable(fep->reg_phy); 3602 regulator_disable(fep->reg_phy);
3603 pm_runtime_put(&pdev->dev);
3604 pm_runtime_disable(&pdev->dev);
3603 if (of_phy_is_fixed_link(np)) 3605 if (of_phy_is_fixed_link(np))
3604 of_phy_deregister_fixed_link(np); 3606 of_phy_deregister_fixed_link(np);
3605 of_node_put(fep->phy_node); 3607 of_node_put(fep->phy_node);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index ea43b4974149..7af31ddd093f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1100,7 +1100,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
1100 set_bucket(dtsec->regs, bucket, true); 1100 set_bucket(dtsec->regs, bucket, true);
1101 1101
1102 /* Create element to be added to the driver hash table */ 1102 /* Create element to be added to the driver hash table */
1103 hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL); 1103 hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
1104 if (!hash_entry) 1104 if (!hash_entry)
1105 return -ENOMEM; 1105 return -ENOMEM;
1106 hash_entry->addr = addr; 1106 hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 86944bc3b273..74bd260ca02a 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -666,7 +666,7 @@ static void hns_gmac_get_strings(u32 stringset, u8 *data)
666 666
667static int hns_gmac_get_sset_count(int stringset) 667static int hns_gmac_get_sset_count(int stringset)
668{ 668{
669 if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) 669 if (stringset == ETH_SS_STATS)
670 return ARRAY_SIZE(g_gmac_stats_string); 670 return ARRAY_SIZE(g_gmac_stats_string);
671 671
672 return 0; 672 return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index b62816c1574e..93e71e27401b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -422,7 +422,7 @@ void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
422 422
423int hns_ppe_get_sset_count(int stringset) 423int hns_ppe_get_sset_count(int stringset)
424{ 424{
425 if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) 425 if (stringset == ETH_SS_STATS)
426 return ETH_PPE_STATIC_NUM; 426 return ETH_PPE_STATIC_NUM;
427 return 0; 427 return 0;
428} 428}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6f3570cfb501..e2e28532e4dc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -876,7 +876,7 @@ void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
876 */ 876 */
877int hns_rcb_get_ring_sset_count(int stringset) 877int hns_rcb_get_ring_sset_count(int stringset)
878{ 878{
879 if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS) 879 if (stringset == ETH_SS_STATS)
880 return HNS_RING_STATIC_REG_NUM; 880 return HNS_RING_STATIC_REG_NUM;
881 881
882 return 0; 882 return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 7ea7f8a4aa2a..2e14a3ae1d8b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -993,8 +993,10 @@ int hns_get_sset_count(struct net_device *netdev, int stringset)
993 cnt--; 993 cnt--;
994 994
995 return cnt; 995 return cnt;
996 } else { 996 } else if (stringset == ETH_SS_STATS) {
997 return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset)); 997 return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
998 } else {
999 return -EOPNOTSUPP;
998 } 1000 }
999} 1001}
1000 1002
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index afb7ebe20b24..824fd44e25f0 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -400,6 +400,10 @@
400#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ 400#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
401#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */ 401#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
402#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ 402#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
403#define E1000_ICR_MDAC 0x00000200 /* MDIO Access Complete */
404#define E1000_ICR_SRPD 0x00010000 /* Small Receive Packet Detected */
405#define E1000_ICR_ACK 0x00020000 /* Receive ACK Frame Detected */
406#define E1000_ICR_MNG 0x00040000 /* Manageability Event Detected */
403#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ 407#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
404/* If this bit asserted, the driver should claim the interrupt */ 408/* If this bit asserted, the driver should claim the interrupt */
405#define E1000_ICR_INT_ASSERTED 0x80000000 409#define E1000_ICR_INT_ASSERTED 0x80000000
@@ -407,7 +411,7 @@
407#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ 411#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */
408#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ 412#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */
409#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ 413#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */
410#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ 414#define E1000_ICR_OTHER 0x01000000 /* Other Interrupt */
411 415
412/* PBA ECC Register */ 416/* PBA ECC Register */
413#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ 417#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */
@@ -431,12 +435,27 @@
431 E1000_IMS_RXSEQ | \ 435 E1000_IMS_RXSEQ | \
432 E1000_IMS_LSC) 436 E1000_IMS_LSC)
433 437
438/* These are all of the events related to the OTHER interrupt.
439 */
440#define IMS_OTHER_MASK ( \
441 E1000_IMS_LSC | \
442 E1000_IMS_RXO | \
443 E1000_IMS_MDAC | \
444 E1000_IMS_SRPD | \
445 E1000_IMS_ACK | \
446 E1000_IMS_MNG)
447
434/* Interrupt Mask Set */ 448/* Interrupt Mask Set */
435#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 449#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
436#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 450#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
437#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ 451#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
438#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ 452#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
453#define E1000_IMS_RXO E1000_ICR_RXO /* Receiver Overrun */
439#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ 454#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
455#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO Access Complete */
456#define E1000_IMS_SRPD E1000_ICR_SRPD /* Small Receive Packet */
457#define E1000_IMS_ACK E1000_ICR_ACK /* Receive ACK Frame Detected */
458#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability Event */
440#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ 459#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */
441#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ 460#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */
442#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ 461#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 31277d3bb7dc..1dddfb7b2de6 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1367,9 +1367,6 @@ out:
1367 * Checks to see of the link status of the hardware has changed. If a 1367 * Checks to see of the link status of the hardware has changed. If a
1368 * change in link status has been detected, then we read the PHY registers 1368 * change in link status has been detected, then we read the PHY registers
1369 * to get the current speed/duplex if link exists. 1369 * to get the current speed/duplex if link exists.
1370 *
1371 * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
1372 * up).
1373 **/ 1370 **/
1374static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) 1371static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1375{ 1372{
@@ -1385,7 +1382,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1385 * Change or Rx Sequence Error interrupt. 1382 * Change or Rx Sequence Error interrupt.
1386 */ 1383 */
1387 if (!mac->get_link_status) 1384 if (!mac->get_link_status)
1388 return 1; 1385 return 0;
1386 mac->get_link_status = false;
1389 1387
1390 /* First we want to see if the MII Status Register reports 1388 /* First we want to see if the MII Status Register reports
1391 * link. If so, then we want to get the current speed/duplex 1389 * link. If so, then we want to get the current speed/duplex
@@ -1393,12 +1391,12 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1393 */ 1391 */
1394 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 1392 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
1395 if (ret_val) 1393 if (ret_val)
1396 return ret_val; 1394 goto out;
1397 1395
1398 if (hw->mac.type == e1000_pchlan) { 1396 if (hw->mac.type == e1000_pchlan) {
1399 ret_val = e1000_k1_gig_workaround_hv(hw, link); 1397 ret_val = e1000_k1_gig_workaround_hv(hw, link);
1400 if (ret_val) 1398 if (ret_val)
1401 return ret_val; 1399 goto out;
1402 } 1400 }
1403 1401
1404 /* When connected at 10Mbps half-duplex, some parts are excessively 1402 /* When connected at 10Mbps half-duplex, some parts are excessively
@@ -1431,7 +1429,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1431 1429
1432 ret_val = hw->phy.ops.acquire(hw); 1430 ret_val = hw->phy.ops.acquire(hw);
1433 if (ret_val) 1431 if (ret_val)
1434 return ret_val; 1432 goto out;
1435 1433
1436 if (hw->mac.type == e1000_pch2lan) 1434 if (hw->mac.type == e1000_pch2lan)
1437 emi_addr = I82579_RX_CONFIG; 1435 emi_addr = I82579_RX_CONFIG;
@@ -1453,7 +1451,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1453 hw->phy.ops.release(hw); 1451 hw->phy.ops.release(hw);
1454 1452
1455 if (ret_val) 1453 if (ret_val)
1456 return ret_val; 1454 goto out;
1457 1455
1458 if (hw->mac.type >= e1000_pch_spt) { 1456 if (hw->mac.type >= e1000_pch_spt) {
1459 u16 data; 1457 u16 data;
@@ -1462,14 +1460,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1462 if (speed == SPEED_1000) { 1460 if (speed == SPEED_1000) {
1463 ret_val = hw->phy.ops.acquire(hw); 1461 ret_val = hw->phy.ops.acquire(hw);
1464 if (ret_val) 1462 if (ret_val)
1465 return ret_val; 1463 goto out;
1466 1464
1467 ret_val = e1e_rphy_locked(hw, 1465 ret_val = e1e_rphy_locked(hw,
1468 PHY_REG(776, 20), 1466 PHY_REG(776, 20),
1469 &data); 1467 &data);
1470 if (ret_val) { 1468 if (ret_val) {
1471 hw->phy.ops.release(hw); 1469 hw->phy.ops.release(hw);
1472 return ret_val; 1470 goto out;
1473 } 1471 }
1474 1472
1475 ptr_gap = (data & (0x3FF << 2)) >> 2; 1473 ptr_gap = (data & (0x3FF << 2)) >> 2;
@@ -1483,18 +1481,18 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1483 } 1481 }
1484 hw->phy.ops.release(hw); 1482 hw->phy.ops.release(hw);
1485 if (ret_val) 1483 if (ret_val)
1486 return ret_val; 1484 goto out;
1487 } else { 1485 } else {
1488 ret_val = hw->phy.ops.acquire(hw); 1486 ret_val = hw->phy.ops.acquire(hw);
1489 if (ret_val) 1487 if (ret_val)
1490 return ret_val; 1488 goto out;
1491 1489
1492 ret_val = e1e_wphy_locked(hw, 1490 ret_val = e1e_wphy_locked(hw,
1493 PHY_REG(776, 20), 1491 PHY_REG(776, 20),
1494 0xC023); 1492 0xC023);
1495 hw->phy.ops.release(hw); 1493 hw->phy.ops.release(hw);
1496 if (ret_val) 1494 if (ret_val)
1497 return ret_val; 1495 goto out;
1498 1496
1499 } 1497 }
1500 } 1498 }
@@ -1521,7 +1519,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1521 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { 1519 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
1522 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1520 ret_val = e1000_k1_workaround_lpt_lp(hw, link);
1523 if (ret_val) 1521 if (ret_val)
1524 return ret_val; 1522 goto out;
1525 } 1523 }
1526 if (hw->mac.type >= e1000_pch_lpt) { 1524 if (hw->mac.type >= e1000_pch_lpt) {
1527 /* Set platform power management values for 1525 /* Set platform power management values for
@@ -1529,7 +1527,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1529 */ 1527 */
1530 ret_val = e1000_platform_pm_pch_lpt(hw, link); 1528 ret_val = e1000_platform_pm_pch_lpt(hw, link);
1531 if (ret_val) 1529 if (ret_val)
1532 return ret_val; 1530 goto out;
1533 } 1531 }
1534 1532
1535 /* Clear link partner's EEE ability */ 1533 /* Clear link partner's EEE ability */
@@ -1552,9 +1550,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1552 } 1550 }
1553 1551
1554 if (!link) 1552 if (!link)
1555 return 0; /* No link detected */ 1553 goto out;
1556
1557 mac->get_link_status = false;
1558 1554
1559 switch (hw->mac.type) { 1555 switch (hw->mac.type) {
1560 case e1000_pch2lan: 1556 case e1000_pch2lan:
@@ -1616,12 +1612,14 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
1616 * different link partner. 1612 * different link partner.
1617 */ 1613 */
1618 ret_val = e1000e_config_fc_after_link_up(hw); 1614 ret_val = e1000e_config_fc_after_link_up(hw);
1619 if (ret_val) { 1615 if (ret_val)
1620 e_dbg("Error configuring flow control\n"); 1616 e_dbg("Error configuring flow control\n");
1621 return ret_val;
1622 }
1623 1617
1624 return 1; 1618 return ret_val;
1619
1620out:
1621 mac->get_link_status = true;
1622 return ret_val;
1625} 1623}
1626 1624
1627static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) 1625static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index f457c5703d0c..5bdc3a2d4fd7 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -410,9 +410,6 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
410 * Checks to see of the link status of the hardware has changed. If a 410 * Checks to see of the link status of the hardware has changed. If a
411 * change in link status has been detected, then we read the PHY registers 411 * change in link status has been detected, then we read the PHY registers
412 * to get the current speed/duplex if link exists. 412 * to get the current speed/duplex if link exists.
413 *
414 * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
415 * up).
416 **/ 413 **/
417s32 e1000e_check_for_copper_link(struct e1000_hw *hw) 414s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
418{ 415{
@@ -426,20 +423,16 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
426 * Change or Rx Sequence Error interrupt. 423 * Change or Rx Sequence Error interrupt.
427 */ 424 */
428 if (!mac->get_link_status) 425 if (!mac->get_link_status)
429 return 1; 426 return 0;
427 mac->get_link_status = false;
430 428
431 /* First we want to see if the MII Status Register reports 429 /* First we want to see if the MII Status Register reports
432 * link. If so, then we want to get the current speed/duplex 430 * link. If so, then we want to get the current speed/duplex
433 * of the PHY. 431 * of the PHY.
434 */ 432 */
435 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); 433 ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
436 if (ret_val) 434 if (ret_val || !link)
437 return ret_val; 435 goto out;
438
439 if (!link)
440 return 0; /* No link detected */
441
442 mac->get_link_status = false;
443 436
444 /* Check if there was DownShift, must be checked 437 /* Check if there was DownShift, must be checked
445 * immediately after link-up 438 * immediately after link-up
@@ -464,12 +457,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
464 * different link partner. 457 * different link partner.
465 */ 458 */
466 ret_val = e1000e_config_fc_after_link_up(hw); 459 ret_val = e1000e_config_fc_after_link_up(hw);
467 if (ret_val) { 460 if (ret_val)
468 e_dbg("Error configuring flow control\n"); 461 e_dbg("Error configuring flow control\n");
469 return ret_val;
470 }
471 462
472 return 1; 463 return ret_val;
464
465out:
466 mac->get_link_status = true;
467 return ret_val;
473} 468}
474 469
475/** 470/**
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 1298b69f990b..dc853b0863af 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1914,30 +1914,20 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
1914 struct net_device *netdev = data; 1914 struct net_device *netdev = data;
1915 struct e1000_adapter *adapter = netdev_priv(netdev); 1915 struct e1000_adapter *adapter = netdev_priv(netdev);
1916 struct e1000_hw *hw = &adapter->hw; 1916 struct e1000_hw *hw = &adapter->hw;
1917 u32 icr; 1917 u32 icr = er32(ICR);
1918 bool enable = true; 1918
1919 1919 if (icr & adapter->eiac_mask)
1920 icr = er32(ICR); 1920 ew32(ICS, (icr & adapter->eiac_mask));
1921 if (icr & E1000_ICR_RXO) { 1921
1922 ew32(ICR, E1000_ICR_RXO);
1923 enable = false;
1924 /* napi poll will re-enable Other, make sure it runs */
1925 if (napi_schedule_prep(&adapter->napi)) {
1926 adapter->total_rx_bytes = 0;
1927 adapter->total_rx_packets = 0;
1928 __napi_schedule(&adapter->napi);
1929 }
1930 }
1931 if (icr & E1000_ICR_LSC) { 1922 if (icr & E1000_ICR_LSC) {
1932 ew32(ICR, E1000_ICR_LSC);
1933 hw->mac.get_link_status = true; 1923 hw->mac.get_link_status = true;
1934 /* guard against interrupt when we're going down */ 1924 /* guard against interrupt when we're going down */
1935 if (!test_bit(__E1000_DOWN, &adapter->state)) 1925 if (!test_bit(__E1000_DOWN, &adapter->state))
1936 mod_timer(&adapter->watchdog_timer, jiffies + 1); 1926 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1937 } 1927 }
1938 1928
1939 if (enable && !test_bit(__E1000_DOWN, &adapter->state)) 1929 if (!test_bit(__E1000_DOWN, &adapter->state))
1940 ew32(IMS, E1000_IMS_OTHER); 1930 ew32(IMS, E1000_IMS_OTHER | IMS_OTHER_MASK);
1941 1931
1942 return IRQ_HANDLED; 1932 return IRQ_HANDLED;
1943} 1933}
@@ -2040,7 +2030,6 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
2040 hw->hw_addr + E1000_EITR_82574(vector)); 2030 hw->hw_addr + E1000_EITR_82574(vector));
2041 else 2031 else
2042 writel(1, hw->hw_addr + E1000_EITR_82574(vector)); 2032 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
2043 adapter->eiac_mask |= E1000_IMS_OTHER;
2044 2033
2045 /* Cause Tx interrupts on every write back */ 2034 /* Cause Tx interrupts on every write back */
2046 ivar |= BIT(31); 2035 ivar |= BIT(31);
@@ -2265,7 +2254,8 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
2265 2254
2266 if (adapter->msix_entries) { 2255 if (adapter->msix_entries) {
2267 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); 2256 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2268 ew32(IMS, adapter->eiac_mask | E1000_IMS_LSC); 2257 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER |
2258 IMS_OTHER_MASK);
2269 } else if (hw->mac.type >= e1000_pch_lpt) { 2259 } else if (hw->mac.type >= e1000_pch_lpt) {
2270 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); 2260 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2271 } else { 2261 } else {
@@ -2333,8 +2323,8 @@ static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2333{ 2323{
2334 struct pci_dev *pdev = adapter->pdev; 2324 struct pci_dev *pdev = adapter->pdev;
2335 2325
2336 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, 2326 ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma,
2337 GFP_KERNEL); 2327 GFP_KERNEL);
2338 if (!ring->desc) 2328 if (!ring->desc)
2339 return -ENOMEM; 2329 return -ENOMEM;
2340 2330
@@ -2707,8 +2697,7 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
2707 napi_complete_done(napi, work_done); 2697 napi_complete_done(napi, work_done);
2708 if (!test_bit(__E1000_DOWN, &adapter->state)) { 2698 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2709 if (adapter->msix_entries) 2699 if (adapter->msix_entries)
2710 ew32(IMS, adapter->rx_ring->ims_val | 2700 ew32(IMS, adapter->rx_ring->ims_val);
2711 E1000_IMS_OTHER);
2712 else 2701 else
2713 e1000_irq_enable(adapter); 2702 e1000_irq_enable(adapter);
2714 } 2703 }
@@ -5101,7 +5090,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
5101 case e1000_media_type_copper: 5090 case e1000_media_type_copper:
5102 if (hw->mac.get_link_status) { 5091 if (hw->mac.get_link_status) {
5103 ret_val = hw->mac.ops.check_for_link(hw); 5092 ret_val = hw->mac.ops.check_for_link(hw);
5104 link_active = ret_val > 0; 5093 link_active = !hw->mac.get_link_status;
5105 } else { 5094 } else {
5106 link_active = true; 5095 link_active = true;
5107 } 5096 }
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index b698fb481b2e..996dc099cd58 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -443,6 +443,17 @@ int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id)
443} 443}
444EXPORT_SYMBOL(mlxsw_afa_block_jump); 444EXPORT_SYMBOL(mlxsw_afa_block_jump);
445 445
446int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block)
447{
448 if (block->finished)
449 return -EINVAL;
450 mlxsw_afa_set_goto_set(block->cur_set,
451 MLXSW_AFA_SET_GOTO_BINDING_CMD_TERM, 0);
452 block->finished = true;
453 return 0;
454}
455EXPORT_SYMBOL(mlxsw_afa_block_terminate);
456
446static struct mlxsw_afa_fwd_entry * 457static struct mlxsw_afa_fwd_entry *
447mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port) 458mlxsw_afa_fwd_entry_create(struct mlxsw_afa *mlxsw_afa, u8 local_port)
448{ 459{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 43132293475c..b91f2b0829b0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -65,6 +65,7 @@ char *mlxsw_afa_block_first_set(struct mlxsw_afa_block *block);
65u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block); 65u32 mlxsw_afa_block_first_set_kvdl_index(struct mlxsw_afa_block *block);
66int mlxsw_afa_block_continue(struct mlxsw_afa_block *block); 66int mlxsw_afa_block_continue(struct mlxsw_afa_block *block);
67int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id); 67int mlxsw_afa_block_jump(struct mlxsw_afa_block *block, u16 group_id);
68int mlxsw_afa_block_terminate(struct mlxsw_afa_block *block);
68int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block); 69int mlxsw_afa_block_append_drop(struct mlxsw_afa_block *block);
69int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id); 70int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
70int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block, 71int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c7e941aecc2a..bf400c75fcc8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -655,13 +655,17 @@ static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
655} 655}
656 656
657static struct mlxsw_sp_span_inspected_port * 657static struct mlxsw_sp_span_inspected_port *
658mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port, 658mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_span_entry *span_entry,
659 struct mlxsw_sp_span_entry *span_entry) 659 enum mlxsw_sp_span_type type,
660 struct mlxsw_sp_port *port,
661 bool bind)
660{ 662{
661 struct mlxsw_sp_span_inspected_port *p; 663 struct mlxsw_sp_span_inspected_port *p;
662 664
663 list_for_each_entry(p, &span_entry->bound_ports_list, list) 665 list_for_each_entry(p, &span_entry->bound_ports_list, list)
664 if (port->local_port == p->local_port) 666 if (type == p->type &&
667 port->local_port == p->local_port &&
668 bind == p->bound)
665 return p; 669 return p;
666 return NULL; 670 return NULL;
667} 671}
@@ -691,8 +695,22 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
691 struct mlxsw_sp_span_inspected_port *inspected_port; 695 struct mlxsw_sp_span_inspected_port *inspected_port;
692 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 696 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
693 char sbib_pl[MLXSW_REG_SBIB_LEN]; 697 char sbib_pl[MLXSW_REG_SBIB_LEN];
698 int i;
694 int err; 699 int err;
695 700
701 /* A given (source port, direction) can only be bound to one analyzer,
702 * so if a binding is requested, check for conflicts.
703 */
704 if (bind)
705 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
706 struct mlxsw_sp_span_entry *curr =
707 &mlxsw_sp->span.entries[i];
708
709 if (mlxsw_sp_span_entry_bound_port_find(curr, type,
710 port, bind))
711 return -EEXIST;
712 }
713
696 /* if it is an egress SPAN, bind a shared buffer to it */ 714 /* if it is an egress SPAN, bind a shared buffer to it */
697 if (type == MLXSW_SP_SPAN_EGRESS) { 715 if (type == MLXSW_SP_SPAN_EGRESS) {
698 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, 716 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
@@ -720,6 +738,7 @@ mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
720 } 738 }
721 inspected_port->local_port = port->local_port; 739 inspected_port->local_port = port->local_port;
722 inspected_port->type = type; 740 inspected_port->type = type;
741 inspected_port->bound = bind;
723 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list); 742 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
724 743
725 return 0; 744 return 0;
@@ -746,7 +765,8 @@ mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
746 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp; 765 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
747 char sbib_pl[MLXSW_REG_SBIB_LEN]; 766 char sbib_pl[MLXSW_REG_SBIB_LEN];
748 767
749 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry); 768 inspected_port = mlxsw_sp_span_entry_bound_port_find(span_entry, type,
769 port, bind);
750 if (!inspected_port) 770 if (!inspected_port)
751 return; 771 return;
752 772
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4ec1ca3c96c8..92064db2ae44 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -120,6 +120,9 @@ struct mlxsw_sp_span_inspected_port {
120 struct list_head list; 120 struct list_head list;
121 enum mlxsw_sp_span_type type; 121 enum mlxsw_sp_span_type type;
122 u8 local_port; 122 u8 local_port;
123
124 /* Whether this is a directly bound mirror (port-to-port) or an ACL. */
125 bool bound;
123}; 126};
124 127
125struct mlxsw_sp_span_entry { 128struct mlxsw_sp_span_entry {
@@ -553,6 +556,7 @@ void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
553int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei); 556int mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei);
554int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei, 557int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
555 u16 group_id); 558 u16 group_id);
559int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei);
556int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei); 560int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei);
557int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei); 561int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei);
558int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp, 562int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 0897a5435cc2..92d90ed7207e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -528,6 +528,11 @@ int mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
528 return mlxsw_afa_block_jump(rulei->act_block, group_id); 528 return mlxsw_afa_block_jump(rulei->act_block, group_id);
529} 529}
530 530
531int mlxsw_sp_acl_rulei_act_terminate(struct mlxsw_sp_acl_rule_info *rulei)
532{
533 return mlxsw_afa_block_terminate(rulei->act_block);
534}
535
531int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei) 536int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
532{ 537{
533 return mlxsw_afa_block_append_drop(rulei->act_block); 538 return mlxsw_afa_block_append_drop(rulei->act_block);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 93728c694e6d..0a9adc5962fb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -385,13 +385,13 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
385 385
386static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = { 386static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
387 MLXSW_SP_CPU_PORT_SB_CM, 387 MLXSW_SP_CPU_PORT_SB_CM,
388 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
389 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
390 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
391 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
392 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
388 MLXSW_SP_CPU_PORT_SB_CM, 393 MLXSW_SP_CPU_PORT_SB_CM,
389 MLXSW_SP_CPU_PORT_SB_CM, 394 MLXSW_SP_SB_CM(MLXSW_PORT_MAX_MTU, 0, 0),
390 MLXSW_SP_CPU_PORT_SB_CM,
391 MLXSW_SP_CPU_PORT_SB_CM,
392 MLXSW_SP_CPU_PORT_SB_CM,
393 MLXSW_SP_CPU_PORT_SB_CM,
394 MLXSW_SP_SB_CM(10000, 0, 0),
395 MLXSW_SP_CPU_PORT_SB_CM, 395 MLXSW_SP_CPU_PORT_SB_CM,
396 MLXSW_SP_CPU_PORT_SB_CM, 396 MLXSW_SP_CPU_PORT_SB_CM,
397 MLXSW_SP_CPU_PORT_SB_CM, 397 MLXSW_SP_CPU_PORT_SB_CM,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 6ce00e28d4ea..89dbf569dff5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -65,7 +65,7 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
65 tcf_exts_to_list(exts, &actions); 65 tcf_exts_to_list(exts, &actions);
66 list_for_each_entry(a, &actions, list) { 66 list_for_each_entry(a, &actions, list) {
67 if (is_tcf_gact_ok(a)) { 67 if (is_tcf_gact_ok(a)) {
68 err = mlxsw_sp_acl_rulei_act_continue(rulei); 68 err = mlxsw_sp_acl_rulei_act_terminate(rulei);
69 if (err) 69 if (err)
70 return err; 70 return err;
71 } else if (is_tcf_gact_shot(a)) { 71 } else if (is_tcf_gact_shot(a)) {
diff --git a/drivers/net/ethernet/natsemi/Kconfig b/drivers/net/ethernet/natsemi/Kconfig
index a10ef50e4f12..017fb2322589 100644
--- a/drivers/net/ethernet/natsemi/Kconfig
+++ b/drivers/net/ethernet/natsemi/Kconfig
@@ -1,16 +1,16 @@
1# 1#
2# National Semi-conductor device configuration 2# National Semiconductor device configuration
3# 3#
4 4
5config NET_VENDOR_NATSEMI 5config NET_VENDOR_NATSEMI
6 bool "National Semi-conductor devices" 6 bool "National Semiconductor devices"
7 default y 7 default y
8 ---help--- 8 ---help---
9 If you have a network (Ethernet) card belonging to this class, say Y. 9 If you have a network (Ethernet) card belonging to this class, say Y.
10 10
11 Note that the answer to this question doesn't directly affect the 11 Note that the answer to this question doesn't directly affect the
12 kernel: saying N will just cause the configurator to skip all 12 kernel: saying N will just cause the configurator to skip all
13 the questions about National Semi-conductor devices. If you say Y, 13 the questions about National Semiconductor devices. If you say Y,
14 you will be asked for your specific card in the following questions. 14 you will be asked for your specific card in the following questions.
15 15
16if NET_VENDOR_NATSEMI 16if NET_VENDOR_NATSEMI
diff --git a/drivers/net/ethernet/natsemi/Makefile b/drivers/net/ethernet/natsemi/Makefile
index cc664977596e..a759aa09ef59 100644
--- a/drivers/net/ethernet/natsemi/Makefile
+++ b/drivers/net/ethernet/natsemi/Makefile
@@ -1,6 +1,6 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# 2#
3# Makefile for the National Semi-conductor Sonic devices. 3# Makefile for the National Semiconductor Sonic devices.
4# 4#
5 5
6obj-$(CONFIG_MACSONIC) += macsonic.o 6obj-$(CONFIG_MACSONIC) += macsonic.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 6f546e869d8d..00f41c145d4d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2480,7 +2480,10 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
2480 if (rc) 2480 if (rc)
2481 return rc; 2481 return rc;
2482 2482
2483 /* Free Task CXT */ 2483 /* Free Task CXT ( Intentionally RoCE as task-id is shared between
2484 * RoCE and iWARP )
2485 */
2486 proto = PROTOCOLID_ROCE;
2484 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0, 2487 rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
2485 qed_cxt_get_proto_tid_count(p_hwfn, proto)); 2488 qed_cxt_get_proto_tid_count(p_hwfn, proto));
2486 if (rc) 2489 if (rc)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index ca4a81dc1ace..d5d02be72947 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1703,6 +1703,13 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1703 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen); 1703 iph = (struct iphdr *)((u8 *)(ethh) + eth_hlen);
1704 1704
1705 if (eth_type == ETH_P_IP) { 1705 if (eth_type == ETH_P_IP) {
1706 if (iph->protocol != IPPROTO_TCP) {
1707 DP_NOTICE(p_hwfn,
1708 "Unexpected ip protocol on ll2 %x\n",
1709 iph->protocol);
1710 return -EINVAL;
1711 }
1712
1706 cm_info->local_ip[0] = ntohl(iph->daddr); 1713 cm_info->local_ip[0] = ntohl(iph->daddr);
1707 cm_info->remote_ip[0] = ntohl(iph->saddr); 1714 cm_info->remote_ip[0] = ntohl(iph->saddr);
1708 cm_info->ip_version = TCP_IPV4; 1715 cm_info->ip_version = TCP_IPV4;
@@ -1711,6 +1718,14 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn,
1711 *payload_len = ntohs(iph->tot_len) - ip_hlen; 1718 *payload_len = ntohs(iph->tot_len) - ip_hlen;
1712 } else if (eth_type == ETH_P_IPV6) { 1719 } else if (eth_type == ETH_P_IPV6) {
1713 ip6h = (struct ipv6hdr *)iph; 1720 ip6h = (struct ipv6hdr *)iph;
1721
1722 if (ip6h->nexthdr != IPPROTO_TCP) {
1723 DP_NOTICE(p_hwfn,
1724 "Unexpected ip protocol on ll2 %x\n",
1725 iph->protocol);
1726 return -EINVAL;
1727 }
1728
1714 for (i = 0; i < 4; i++) { 1729 for (i = 0; i < 4; i++) {
1715 cm_info->local_ip[i] = 1730 cm_info->local_ip[i] =
1716 ntohl(ip6h->daddr.in6_u.u6_addr32[i]); 1731 ntohl(ip6h->daddr.in6_u.u6_addr32[i]);
@@ -1928,8 +1943,8 @@ qed_iwarp_update_fpdu_length(struct qed_hwfn *p_hwfn,
1928 /* Missing lower byte is now available */ 1943 /* Missing lower byte is now available */
1929 mpa_len = fpdu->fpdu_length | *mpa_data; 1944 mpa_len = fpdu->fpdu_length | *mpa_data;
1930 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len); 1945 fpdu->fpdu_length = QED_IWARP_FPDU_LEN_WITH_PAD(mpa_len);
1931 fpdu->mpa_frag_len = fpdu->fpdu_length;
1932 /* one byte of hdr */ 1946 /* one byte of hdr */
1947 fpdu->mpa_frag_len = 1;
1933 fpdu->incomplete_bytes = fpdu->fpdu_length - 1; 1948 fpdu->incomplete_bytes = fpdu->fpdu_length - 1;
1934 DP_VERBOSE(p_hwfn, 1949 DP_VERBOSE(p_hwfn,
1935 QED_MSG_RDMA, 1950 QED_MSG_RDMA,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 5d040b873137..a411f9c702a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -379,6 +379,7 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn)
379 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n"); 379 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "Freeing RDMA\n");
380 380
381 qed_rdma_free_reserved_lkey(p_hwfn); 381 qed_rdma_free_reserved_lkey(p_hwfn);
382 qed_cxt_free_proto_ilt(p_hwfn, p_hwfn->p_rdma_info->proto);
382 qed_rdma_resc_free(p_hwfn); 383 qed_rdma_resc_free(p_hwfn);
383} 384}
384 385
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 2db70eabddfe..a01e7d6e5442 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -288,7 +288,7 @@ int __init qede_init(void)
288 } 288 }
289 289
290 /* Must register notifier before pci ops, since we might miss 290 /* Must register notifier before pci ops, since we might miss
291 * interface rename after pci probe and netdev registeration. 291 * interface rename after pci probe and netdev registration.
292 */ 292 */
293 ret = register_netdevice_notifier(&qede_netdev_notifier); 293 ret = register_netdevice_notifier(&qede_netdev_notifier);
294 if (ret) { 294 if (ret) {
@@ -988,7 +988,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
988 if (rc) 988 if (rc)
989 goto err3; 989 goto err3;
990 990
991 /* Prepare the lock prior to the registeration of the netdev, 991 /* Prepare the lock prior to the registration of the netdev,
992 * as once it's registered we might reach flows requiring it 992 * as once it's registered we might reach flows requiring it
993 * [it's even possible to reach a flow needing it directly 993 * [it's even possible to reach a flow needing it directly
994 * from there, although it's unlikely]. 994 * from there, although it's unlikely].
@@ -2067,8 +2067,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
2067 link_params.link_up = true; 2067 link_params.link_up = true;
2068 edev->ops->common->set_link(edev->cdev, &link_params); 2068 edev->ops->common->set_link(edev->cdev, &link_params);
2069 2069
2070 qede_rdma_dev_event_open(edev);
2071
2072 edev->state = QEDE_STATE_OPEN; 2070 edev->state = QEDE_STATE_OPEN;
2073 2071
2074 DP_INFO(edev, "Ending successfully qede load\n"); 2072 DP_INFO(edev, "Ending successfully qede load\n");
@@ -2169,12 +2167,14 @@ static void qede_link_update(void *dev, struct qed_link_output *link)
2169 DP_NOTICE(edev, "Link is up\n"); 2167 DP_NOTICE(edev, "Link is up\n");
2170 netif_tx_start_all_queues(edev->ndev); 2168 netif_tx_start_all_queues(edev->ndev);
2171 netif_carrier_on(edev->ndev); 2169 netif_carrier_on(edev->ndev);
2170 qede_rdma_dev_event_open(edev);
2172 } 2171 }
2173 } else { 2172 } else {
2174 if (netif_carrier_ok(edev->ndev)) { 2173 if (netif_carrier_ok(edev->ndev)) {
2175 DP_NOTICE(edev, "Link is down\n"); 2174 DP_NOTICE(edev, "Link is down\n");
2176 netif_tx_disable(edev->ndev); 2175 netif_tx_disable(edev->ndev);
2177 netif_carrier_off(edev->ndev); 2176 netif_carrier_off(edev->ndev);
2177 qede_rdma_dev_event_close(edev);
2178 } 2178 }
2179 } 2179 }
2180} 2180}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 9b2280badaf7..02adb513f475 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -485,7 +485,7 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
485 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev); 485 ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
486 if (IS_ERR(ptp->clock)) { 486 if (IS_ERR(ptp->clock)) {
487 rc = -EINVAL; 487 rc = -EINVAL;
488 DP_ERR(edev, "PTP clock registeration failed\n"); 488 DP_ERR(edev, "PTP clock registration failed\n");
489 goto err2; 489 goto err2;
490 } 490 }
491 491
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 9cbb27263742..d5a32b7c7dc5 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -1194,9 +1194,9 @@ void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q)
1194 while (tx_q->tpd.consume_idx != hw_consume_idx) { 1194 while (tx_q->tpd.consume_idx != hw_consume_idx) {
1195 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); 1195 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx);
1196 if (tpbuf->dma_addr) { 1196 if (tpbuf->dma_addr) {
1197 dma_unmap_single(adpt->netdev->dev.parent, 1197 dma_unmap_page(adpt->netdev->dev.parent,
1198 tpbuf->dma_addr, tpbuf->length, 1198 tpbuf->dma_addr, tpbuf->length,
1199 DMA_TO_DEVICE); 1199 DMA_TO_DEVICE);
1200 tpbuf->dma_addr = 0; 1200 tpbuf->dma_addr = 0;
1201 } 1201 }
1202 1202
@@ -1353,9 +1353,11 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
1353 1353
1354 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); 1354 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1355 tpbuf->length = mapped_len; 1355 tpbuf->length = mapped_len;
1356 tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, 1356 tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
1357 skb->data, tpbuf->length, 1357 virt_to_page(skb->data),
1358 DMA_TO_DEVICE); 1358 offset_in_page(skb->data),
1359 tpbuf->length,
1360 DMA_TO_DEVICE);
1359 ret = dma_mapping_error(adpt->netdev->dev.parent, 1361 ret = dma_mapping_error(adpt->netdev->dev.parent,
1360 tpbuf->dma_addr); 1362 tpbuf->dma_addr);
1361 if (ret) 1363 if (ret)
@@ -1371,9 +1373,12 @@ static void emac_tx_fill_tpd(struct emac_adapter *adpt,
1371 if (mapped_len < len) { 1373 if (mapped_len < len) {
1372 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); 1374 tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx);
1373 tpbuf->length = len - mapped_len; 1375 tpbuf->length = len - mapped_len;
1374 tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, 1376 tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent,
1375 skb->data + mapped_len, 1377 virt_to_page(skb->data +
1376 tpbuf->length, DMA_TO_DEVICE); 1378 mapped_len),
1379 offset_in_page(skb->data +
1380 mapped_len),
1381 tpbuf->length, DMA_TO_DEVICE);
1377 ret = dma_mapping_error(adpt->netdev->dev.parent, 1382 ret = dma_mapping_error(adpt->netdev->dev.parent,
1378 tpbuf->dma_addr); 1383 tpbuf->dma_addr);
1379 if (ret) 1384 if (ret)
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 012fb66eed8d..f0afb88d7bc2 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2335,14 +2335,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
2335 pdata = netdev_priv(dev); 2335 pdata = netdev_priv(dev);
2336 BUG_ON(!pdata); 2336 BUG_ON(!pdata);
2337 BUG_ON(!pdata->ioaddr); 2337 BUG_ON(!pdata->ioaddr);
2338 WARN_ON(dev->phydev);
2339 2338
2340 SMSC_TRACE(pdata, ifdown, "Stopping driver"); 2339 SMSC_TRACE(pdata, ifdown, "Stopping driver");
2341 2340
2341 unregister_netdev(dev);
2342
2342 mdiobus_unregister(pdata->mii_bus); 2343 mdiobus_unregister(pdata->mii_bus);
2343 mdiobus_free(pdata->mii_bus); 2344 mdiobus_free(pdata->mii_bus);
2344 2345
2345 unregister_netdev(dev);
2346 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 2346 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2347 "smsc911x-memory"); 2347 "smsc911x-memory");
2348 if (!res) 2348 if (!res)
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index 111e7ca9df56..f5c5984afefb 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1295,7 +1295,7 @@ static int ave_open(struct net_device *ndev)
1295 val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16); 1295 val |= AVE_IIRQC_EN0 | (AVE_INTM_COUNT << 16);
1296 writel(val, priv->base + AVE_IIRQC); 1296 writel(val, priv->base + AVE_IIRQC);
1297 1297
1298 val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX; 1298 val = AVE_GI_RXIINT | AVE_GI_RXOVF | AVE_GI_TX | AVE_GI_RXDROP;
1299 ave_irq_restore(ndev, val); 1299 ave_irq_restore(ndev, val);
1300 1300
1301 napi_enable(&priv->napi_rx); 1301 napi_enable(&priv->napi_rx);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 63d3d6b215f3..a94f50442613 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -312,7 +312,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
312 dev->ethtool_ops = &vnet_ethtool_ops; 312 dev->ethtool_ops = &vnet_ethtool_ops;
313 dev->watchdog_timeo = VNET_TX_TIMEOUT; 313 dev->watchdog_timeo = VNET_TX_TIMEOUT;
314 314
315 dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_GSO_SOFTWARE | 315 dev->hw_features = NETIF_F_TSO | NETIF_F_GSO | NETIF_F_ALL_TSO |
316 NETIF_F_HW_CSUM | NETIF_F_SG; 316 NETIF_F_HW_CSUM | NETIF_F_SG;
317 dev->features = dev->hw_features; 317 dev->features = dev->hw_features;
318 318
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 1b1b78fdc138..b2b30c9df037 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1014,7 +1014,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
1014 /* set speed_in input in case RMII mode is used in 100Mbps */ 1014 /* set speed_in input in case RMII mode is used in 100Mbps */
1015 if (phy->speed == 100) 1015 if (phy->speed == 100)
1016 mac_control |= BIT(15); 1016 mac_control |= BIT(15);
1017 else if (phy->speed == 10) 1017 /* in band mode only works in 10Mbps RGMII mode */
1018 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
1018 mac_control |= BIT(18); /* In Band mode */ 1019 mac_control |= BIT(18); /* In Band mode */
1019 1020
1020 if (priv->rx_pause) 1021 if (priv->rx_pause)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 0db3bd1ea06f..32861036c3fc 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -173,6 +173,7 @@ struct rndis_device {
173 struct list_head req_list; 173 struct list_head req_list;
174 174
175 struct work_struct mcast_work; 175 struct work_struct mcast_work;
176 u32 filter;
176 177
177 bool link_state; /* 0 - link up, 1 - link down */ 178 bool link_state; /* 0 - link up, 1 - link down */
178 179
@@ -211,7 +212,6 @@ void netvsc_channel_cb(void *context);
211int netvsc_poll(struct napi_struct *napi, int budget); 212int netvsc_poll(struct napi_struct *napi, int budget);
212 213
213void rndis_set_subchannel(struct work_struct *w); 214void rndis_set_subchannel(struct work_struct *w);
214bool rndis_filter_opened(const struct netvsc_device *nvdev);
215int rndis_filter_open(struct netvsc_device *nvdev); 215int rndis_filter_open(struct netvsc_device *nvdev);
216int rndis_filter_close(struct netvsc_device *nvdev); 216int rndis_filter_close(struct netvsc_device *nvdev);
217struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, 217struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0265d703eb03..7472172823f3 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -90,6 +90,11 @@ static void free_netvsc_device(struct rcu_head *head)
90 = container_of(head, struct netvsc_device, rcu); 90 = container_of(head, struct netvsc_device, rcu);
91 int i; 91 int i;
92 92
93 kfree(nvdev->extension);
94 vfree(nvdev->recv_buf);
95 vfree(nvdev->send_buf);
96 kfree(nvdev->send_section_map);
97
93 for (i = 0; i < VRSS_CHANNEL_MAX; i++) 98 for (i = 0; i < VRSS_CHANNEL_MAX; i++)
94 vfree(nvdev->chan_table[i].mrc.slots); 99 vfree(nvdev->chan_table[i].mrc.slots);
95 100
@@ -211,12 +216,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
211 net_device->recv_buf_gpadl_handle = 0; 216 net_device->recv_buf_gpadl_handle = 0;
212 } 217 }
213 218
214 if (net_device->recv_buf) {
215 /* Free up the receive buffer */
216 vfree(net_device->recv_buf);
217 net_device->recv_buf = NULL;
218 }
219
220 if (net_device->send_buf_gpadl_handle) { 219 if (net_device->send_buf_gpadl_handle) {
221 ret = vmbus_teardown_gpadl(device->channel, 220 ret = vmbus_teardown_gpadl(device->channel,
222 net_device->send_buf_gpadl_handle); 221 net_device->send_buf_gpadl_handle);
@@ -231,12 +230,6 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
231 } 230 }
232 net_device->send_buf_gpadl_handle = 0; 231 net_device->send_buf_gpadl_handle = 0;
233 } 232 }
234 if (net_device->send_buf) {
235 /* Free up the send buffer */
236 vfree(net_device->send_buf);
237 net_device->send_buf = NULL;
238 }
239 kfree(net_device->send_section_map);
240} 233}
241 234
242int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx) 235int netvsc_alloc_recv_comp_ring(struct netvsc_device *net_device, u32 q_idx)
@@ -562,26 +555,29 @@ void netvsc_device_remove(struct hv_device *device)
562 = rtnl_dereference(net_device_ctx->nvdev); 555 = rtnl_dereference(net_device_ctx->nvdev);
563 int i; 556 int i;
564 557
565 cancel_work_sync(&net_device->subchan_work);
566
567 netvsc_revoke_buf(device, net_device); 558 netvsc_revoke_buf(device, net_device);
568 559
569 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL); 560 RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
570 561
562 /* And disassociate NAPI context from device */
563 for (i = 0; i < net_device->num_chn; i++)
564 netif_napi_del(&net_device->chan_table[i].napi);
565
571 /* 566 /*
572 * At this point, no one should be accessing net_device 567 * At this point, no one should be accessing net_device
573 * except in here 568 * except in here
574 */ 569 */
575 netdev_dbg(ndev, "net device safe to remove\n"); 570 netdev_dbg(ndev, "net device safe to remove\n");
576 571
572 /* older versions require that buffer be revoked before close */
573 if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4)
574 netvsc_teardown_gpadl(device, net_device);
575
577 /* Now, we can close the channel safely */ 576 /* Now, we can close the channel safely */
578 vmbus_close(device->channel); 577 vmbus_close(device->channel);
579 578
580 netvsc_teardown_gpadl(device, net_device); 579 if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4)
581 580 netvsc_teardown_gpadl(device, net_device);
582 /* And dissassociate NAPI context from device */
583 for (i = 0; i < net_device->num_chn; i++)
584 netif_napi_del(&net_device->chan_table[i].napi);
585 581
586 /* Release all resources */ 582 /* Release all resources */
587 free_netvsc_device_rcu(net_device); 583 free_netvsc_device_rcu(net_device);
@@ -645,14 +641,18 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
645 queue_sends = 641 queue_sends =
646 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends); 642 atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
647 643
648 if (net_device->destroy && queue_sends == 0) 644 if (unlikely(net_device->destroy)) {
649 wake_up(&net_device->wait_drain); 645 if (queue_sends == 0)
646 wake_up(&net_device->wait_drain);
647 } else {
648 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
650 649
651 if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) && 650 if (netif_tx_queue_stopped(txq) &&
652 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER || 651 (hv_ringbuf_avail_percent(&channel->outbound) > RING_AVAIL_PERCENT_HIWATER ||
653 queue_sends < 1)) { 652 queue_sends < 1)) {
654 netif_tx_wake_queue(netdev_get_tx_queue(ndev, q_idx)); 653 netif_tx_wake_queue(txq);
655 ndev_ctx->eth_stats.wake_queue++; 654 ndev_ctx->eth_stats.wake_queue++;
655 }
656 } 656 }
657} 657}
658 658
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cdb78eefab67..f28c85d212ce 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -46,7 +46,10 @@
46 46
47#include "hyperv_net.h" 47#include "hyperv_net.h"
48 48
49#define RING_SIZE_MIN 64 49#define RING_SIZE_MIN 64
50#define RETRY_US_LO 5000
51#define RETRY_US_HI 10000
52#define RETRY_MAX 2000 /* >10 sec */
50 53
51#define LINKCHANGE_INT (2 * HZ) 54#define LINKCHANGE_INT (2 * HZ)
52#define VF_TAKEOVER_INT (HZ / 10) 55#define VF_TAKEOVER_INT (HZ / 10)
@@ -89,15 +92,20 @@ static void netvsc_change_rx_flags(struct net_device *net, int change)
89static void netvsc_set_rx_mode(struct net_device *net) 92static void netvsc_set_rx_mode(struct net_device *net)
90{ 93{
91 struct net_device_context *ndev_ctx = netdev_priv(net); 94 struct net_device_context *ndev_ctx = netdev_priv(net);
92 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); 95 struct net_device *vf_netdev;
93 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev); 96 struct netvsc_device *nvdev;
94 97
98 rcu_read_lock();
99 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
95 if (vf_netdev) { 100 if (vf_netdev) {
96 dev_uc_sync(vf_netdev, net); 101 dev_uc_sync(vf_netdev, net);
97 dev_mc_sync(vf_netdev, net); 102 dev_mc_sync(vf_netdev, net);
98 } 103 }
99 104
100 rndis_filter_update(nvdev); 105 nvdev = rcu_dereference(ndev_ctx->nvdev);
106 if (nvdev)
107 rndis_filter_update(nvdev);
108 rcu_read_unlock();
101} 109}
102 110
103static int netvsc_open(struct net_device *net) 111static int netvsc_open(struct net_device *net)
@@ -118,10 +126,8 @@ static int netvsc_open(struct net_device *net)
118 } 126 }
119 127
120 rdev = nvdev->extension; 128 rdev = nvdev->extension;
121 if (!rdev->link_state) { 129 if (!rdev->link_state)
122 netif_carrier_on(net); 130 netif_carrier_on(net);
123 netif_tx_wake_all_queues(net);
124 }
125 131
126 if (vf_netdev) { 132 if (vf_netdev) {
127 /* Setting synthetic device up transparently sets 133 /* Setting synthetic device up transparently sets
@@ -137,36 +143,25 @@ static int netvsc_open(struct net_device *net)
137 return 0; 143 return 0;
138} 144}
139 145
140static int netvsc_close(struct net_device *net) 146static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
141{ 147{
142 struct net_device_context *net_device_ctx = netdev_priv(net); 148 unsigned int retry = 0;
143 struct net_device *vf_netdev 149 int i;
144 = rtnl_dereference(net_device_ctx->vf_netdev);
145 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
146 int ret = 0;
147 u32 aread, i, msec = 10, retry = 0, retry_max = 20;
148 struct vmbus_channel *chn;
149
150 netif_tx_disable(net);
151
152 /* No need to close rndis filter if it is removed already */
153 if (!nvdev)
154 goto out;
155
156 ret = rndis_filter_close(nvdev);
157 if (ret != 0) {
158 netdev_err(net, "unable to close device (ret %d).\n", ret);
159 return ret;
160 }
161 150
162 /* Ensure pending bytes in ring are read */ 151 /* Ensure pending bytes in ring are read */
163 while (true) { 152 for (;;) {
164 aread = 0; 153 u32 aread = 0;
154
165 for (i = 0; i < nvdev->num_chn; i++) { 155 for (i = 0; i < nvdev->num_chn; i++) {
166 chn = nvdev->chan_table[i].channel; 156 struct vmbus_channel *chn
157 = nvdev->chan_table[i].channel;
158
167 if (!chn) 159 if (!chn)
168 continue; 160 continue;
169 161
162 /* make sure receive not running now */
163 napi_synchronize(&nvdev->chan_table[i].napi);
164
170 aread = hv_get_bytes_to_read(&chn->inbound); 165 aread = hv_get_bytes_to_read(&chn->inbound);
171 if (aread) 166 if (aread)
172 break; 167 break;
@@ -176,22 +171,40 @@ static int netvsc_close(struct net_device *net)
176 break; 171 break;
177 } 172 }
178 173
179 retry++; 174 if (aread == 0)
180 if (retry > retry_max || aread == 0) 175 return 0;
181 break;
182 176
183 msleep(msec); 177 if (++retry > RETRY_MAX)
178 return -ETIMEDOUT;
184 179
185 if (msec < 1000) 180 usleep_range(RETRY_US_LO, RETRY_US_HI);
186 msec *= 2;
187 } 181 }
182}
188 183
189 if (aread) { 184static int netvsc_close(struct net_device *net)
190 netdev_err(net, "Ring buffer not empty after closing rndis\n"); 185{
191 ret = -ETIMEDOUT; 186 struct net_device_context *net_device_ctx = netdev_priv(net);
187 struct net_device *vf_netdev
188 = rtnl_dereference(net_device_ctx->vf_netdev);
189 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
190 int ret;
191
192 netif_tx_disable(net);
193
194 /* No need to close rndis filter if it is removed already */
195 if (!nvdev)
196 return 0;
197
198 ret = rndis_filter_close(nvdev);
199 if (ret != 0) {
200 netdev_err(net, "unable to close device (ret %d).\n", ret);
201 return ret;
192 } 202 }
193 203
194out: 204 ret = netvsc_wait_until_empty(nvdev);
205 if (ret)
206 netdev_err(net, "Ring buffer not empty after closing rndis\n");
207
195 if (vf_netdev) 208 if (vf_netdev)
196 dev_close(vf_netdev); 209 dev_close(vf_netdev);
197 210
@@ -840,16 +853,81 @@ static void netvsc_get_channels(struct net_device *net,
840 } 853 }
841} 854}
842 855
856static int netvsc_detach(struct net_device *ndev,
857 struct netvsc_device *nvdev)
858{
859 struct net_device_context *ndev_ctx = netdev_priv(ndev);
860 struct hv_device *hdev = ndev_ctx->device_ctx;
861 int ret;
862
863 /* Don't try continuing to try and setup sub channels */
864 if (cancel_work_sync(&nvdev->subchan_work))
865 nvdev->num_chn = 1;
866
867 /* If device was up (receiving) then shutdown */
868 if (netif_running(ndev)) {
869 netif_tx_disable(ndev);
870
871 ret = rndis_filter_close(nvdev);
872 if (ret) {
873 netdev_err(ndev,
874 "unable to close device (ret %d).\n", ret);
875 return ret;
876 }
877
878 ret = netvsc_wait_until_empty(nvdev);
879 if (ret) {
880 netdev_err(ndev,
881 "Ring buffer not empty after closing rndis\n");
882 return ret;
883 }
884 }
885
886 netif_device_detach(ndev);
887
888 rndis_filter_device_remove(hdev, nvdev);
889
890 return 0;
891}
892
893static int netvsc_attach(struct net_device *ndev,
894 struct netvsc_device_info *dev_info)
895{
896 struct net_device_context *ndev_ctx = netdev_priv(ndev);
897 struct hv_device *hdev = ndev_ctx->device_ctx;
898 struct netvsc_device *nvdev;
899 struct rndis_device *rdev;
900 int ret;
901
902 nvdev = rndis_filter_device_add(hdev, dev_info);
903 if (IS_ERR(nvdev))
904 return PTR_ERR(nvdev);
905
906 /* Note: enable and attach happen when sub-channels setup */
907
908 netif_carrier_off(ndev);
909
910 if (netif_running(ndev)) {
911 ret = rndis_filter_open(nvdev);
912 if (ret)
913 return ret;
914
915 rdev = nvdev->extension;
916 if (!rdev->link_state)
917 netif_carrier_on(ndev);
918 }
919
920 return 0;
921}
922
843static int netvsc_set_channels(struct net_device *net, 923static int netvsc_set_channels(struct net_device *net,
844 struct ethtool_channels *channels) 924 struct ethtool_channels *channels)
845{ 925{
846 struct net_device_context *net_device_ctx = netdev_priv(net); 926 struct net_device_context *net_device_ctx = netdev_priv(net);
847 struct hv_device *dev = net_device_ctx->device_ctx;
848 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 927 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
849 unsigned int orig, count = channels->combined_count; 928 unsigned int orig, count = channels->combined_count;
850 struct netvsc_device_info device_info; 929 struct netvsc_device_info device_info;
851 bool was_opened; 930 int ret;
852 int ret = 0;
853 931
854 /* We do not support separate count for rx, tx, or other */ 932 /* We do not support separate count for rx, tx, or other */
855 if (count == 0 || 933 if (count == 0 ||
@@ -866,9 +944,6 @@ static int netvsc_set_channels(struct net_device *net,
866 return -EINVAL; 944 return -EINVAL;
867 945
868 orig = nvdev->num_chn; 946 orig = nvdev->num_chn;
869 was_opened = rndis_filter_opened(nvdev);
870 if (was_opened)
871 rndis_filter_close(nvdev);
872 947
873 memset(&device_info, 0, sizeof(device_info)); 948 memset(&device_info, 0, sizeof(device_info));
874 device_info.num_chn = count; 949 device_info.num_chn = count;
@@ -877,28 +952,17 @@ static int netvsc_set_channels(struct net_device *net,
877 device_info.recv_sections = nvdev->recv_section_cnt; 952 device_info.recv_sections = nvdev->recv_section_cnt;
878 device_info.recv_section_size = nvdev->recv_section_size; 953 device_info.recv_section_size = nvdev->recv_section_size;
879 954
880 rndis_filter_device_remove(dev, nvdev); 955 ret = netvsc_detach(net, nvdev);
956 if (ret)
957 return ret;
881 958
882 nvdev = rndis_filter_device_add(dev, &device_info); 959 ret = netvsc_attach(net, &device_info);
883 if (IS_ERR(nvdev)) { 960 if (ret) {
884 ret = PTR_ERR(nvdev);
885 device_info.num_chn = orig; 961 device_info.num_chn = orig;
886 nvdev = rndis_filter_device_add(dev, &device_info); 962 if (netvsc_attach(net, &device_info))
887 963 netdev_err(net, "restoring channel setting failed\n");
888 if (IS_ERR(nvdev)) {
889 netdev_err(net, "restoring channel setting failed: %ld\n",
890 PTR_ERR(nvdev));
891 return ret;
892 }
893 } 964 }
894 965
895 if (was_opened)
896 rndis_filter_open(nvdev);
897
898 /* We may have missed link change notifications */
899 net_device_ctx->last_reconfig = 0;
900 schedule_delayed_work(&net_device_ctx->dwork, 0);
901
902 return ret; 966 return ret;
903} 967}
904 968
@@ -964,10 +1028,8 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
964 struct net_device_context *ndevctx = netdev_priv(ndev); 1028 struct net_device_context *ndevctx = netdev_priv(ndev);
965 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev); 1029 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
966 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1030 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
967 struct hv_device *hdev = ndevctx->device_ctx;
968 int orig_mtu = ndev->mtu; 1031 int orig_mtu = ndev->mtu;
969 struct netvsc_device_info device_info; 1032 struct netvsc_device_info device_info;
970 bool was_opened;
971 int ret = 0; 1033 int ret = 0;
972 1034
973 if (!nvdev || nvdev->destroy) 1035 if (!nvdev || nvdev->destroy)
@@ -980,11 +1042,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
980 return ret; 1042 return ret;
981 } 1043 }
982 1044
983 netif_device_detach(ndev);
984 was_opened = rndis_filter_opened(nvdev);
985 if (was_opened)
986 rndis_filter_close(nvdev);
987
988 memset(&device_info, 0, sizeof(device_info)); 1045 memset(&device_info, 0, sizeof(device_info));
989 device_info.num_chn = nvdev->num_chn; 1046 device_info.num_chn = nvdev->num_chn;
990 device_info.send_sections = nvdev->send_section_cnt; 1047 device_info.send_sections = nvdev->send_section_cnt;
@@ -992,35 +1049,27 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
992 device_info.recv_sections = nvdev->recv_section_cnt; 1049 device_info.recv_sections = nvdev->recv_section_cnt;
993 device_info.recv_section_size = nvdev->recv_section_size; 1050 device_info.recv_section_size = nvdev->recv_section_size;
994 1051
995 rndis_filter_device_remove(hdev, nvdev); 1052 ret = netvsc_detach(ndev, nvdev);
1053 if (ret)
1054 goto rollback_vf;
996 1055
997 ndev->mtu = mtu; 1056 ndev->mtu = mtu;
998 1057
999 nvdev = rndis_filter_device_add(hdev, &device_info); 1058 ret = netvsc_attach(ndev, &device_info);
1000 if (IS_ERR(nvdev)) { 1059 if (ret)
1001 ret = PTR_ERR(nvdev); 1060 goto rollback;
1002
1003 /* Attempt rollback to original MTU */
1004 ndev->mtu = orig_mtu;
1005 nvdev = rndis_filter_device_add(hdev, &device_info);
1006
1007 if (vf_netdev)
1008 dev_set_mtu(vf_netdev, orig_mtu);
1009
1010 if (IS_ERR(nvdev)) {
1011 netdev_err(ndev, "restoring mtu failed: %ld\n",
1012 PTR_ERR(nvdev));
1013 return ret;
1014 }
1015 }
1016 1061
1017 if (was_opened) 1062 return 0;
1018 rndis_filter_open(nvdev);
1019 1063
1020 netif_device_attach(ndev); 1064rollback:
1065 /* Attempt rollback to original MTU */
1066 ndev->mtu = orig_mtu;
1021 1067
1022 /* We may have missed link change notifications */ 1068 if (netvsc_attach(ndev, &device_info))
1023 schedule_delayed_work(&ndevctx->dwork, 0); 1069 netdev_err(ndev, "restoring mtu failed\n");
1070rollback_vf:
1071 if (vf_netdev)
1072 dev_set_mtu(vf_netdev, orig_mtu);
1024 1073
1025 return ret; 1074 return ret;
1026} 1075}
@@ -1526,11 +1575,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1526{ 1575{
1527 struct net_device_context *ndevctx = netdev_priv(ndev); 1576 struct net_device_context *ndevctx = netdev_priv(ndev);
1528 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev); 1577 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1529 struct hv_device *hdev = ndevctx->device_ctx;
1530 struct netvsc_device_info device_info; 1578 struct netvsc_device_info device_info;
1531 struct ethtool_ringparam orig; 1579 struct ethtool_ringparam orig;
1532 u32 new_tx, new_rx; 1580 u32 new_tx, new_rx;
1533 bool was_opened;
1534 int ret = 0; 1581 int ret = 0;
1535 1582
1536 if (!nvdev || nvdev->destroy) 1583 if (!nvdev || nvdev->destroy)
@@ -1555,34 +1602,18 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1555 device_info.recv_sections = new_rx; 1602 device_info.recv_sections = new_rx;
1556 device_info.recv_section_size = nvdev->recv_section_size; 1603 device_info.recv_section_size = nvdev->recv_section_size;
1557 1604
1558 netif_device_detach(ndev); 1605 ret = netvsc_detach(ndev, nvdev);
1559 was_opened = rndis_filter_opened(nvdev); 1606 if (ret)
1560 if (was_opened) 1607 return ret;
1561 rndis_filter_close(nvdev);
1562
1563 rndis_filter_device_remove(hdev, nvdev);
1564
1565 nvdev = rndis_filter_device_add(hdev, &device_info);
1566 if (IS_ERR(nvdev)) {
1567 ret = PTR_ERR(nvdev);
1568 1608
1609 ret = netvsc_attach(ndev, &device_info);
1610 if (ret) {
1569 device_info.send_sections = orig.tx_pending; 1611 device_info.send_sections = orig.tx_pending;
1570 device_info.recv_sections = orig.rx_pending; 1612 device_info.recv_sections = orig.rx_pending;
1571 nvdev = rndis_filter_device_add(hdev, &device_info);
1572 if (IS_ERR(nvdev)) {
1573 netdev_err(ndev, "restoring ringparam failed: %ld\n",
1574 PTR_ERR(nvdev));
1575 return ret;
1576 }
1577 }
1578
1579 if (was_opened)
1580 rndis_filter_open(nvdev);
1581 netif_device_attach(ndev);
1582 1613
1583 /* We may have missed link change notifications */ 1614 if (netvsc_attach(ndev, &device_info))
1584 ndevctx->last_reconfig = 0; 1615 netdev_err(ndev, "restoring ringparam failed");
1585 schedule_delayed_work(&ndevctx->dwork, 0); 1616 }
1586 1617
1587 return ret; 1618 return ret;
1588} 1619}
@@ -1846,8 +1877,12 @@ static void __netvsc_vf_setup(struct net_device *ndev,
1846 1877
1847 /* set multicast etc flags on VF */ 1878 /* set multicast etc flags on VF */
1848 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); 1879 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
1880
1881 /* sync address list from ndev to VF */
1882 netif_addr_lock_bh(ndev);
1849 dev_uc_sync(vf_netdev, ndev); 1883 dev_uc_sync(vf_netdev, ndev);
1850 dev_mc_sync(vf_netdev, ndev); 1884 dev_mc_sync(vf_netdev, ndev);
1885 netif_addr_unlock_bh(ndev);
1851 1886
1852 if (netif_running(ndev)) { 1887 if (netif_running(ndev)) {
1853 ret = dev_open(vf_netdev); 1888 ret = dev_open(vf_netdev);
@@ -2063,8 +2098,8 @@ no_net:
2063static int netvsc_remove(struct hv_device *dev) 2098static int netvsc_remove(struct hv_device *dev)
2064{ 2099{
2065 struct net_device_context *ndev_ctx; 2100 struct net_device_context *ndev_ctx;
2066 struct net_device *vf_netdev; 2101 struct net_device *vf_netdev, *net;
2067 struct net_device *net; 2102 struct netvsc_device *nvdev;
2068 2103
2069 net = hv_get_drvdata(dev); 2104 net = hv_get_drvdata(dev);
2070 if (net == NULL) { 2105 if (net == NULL) {
@@ -2074,10 +2109,14 @@ static int netvsc_remove(struct hv_device *dev)
2074 2109
2075 ndev_ctx = netdev_priv(net); 2110 ndev_ctx = netdev_priv(net);
2076 2111
2077 netif_device_detach(net);
2078
2079 cancel_delayed_work_sync(&ndev_ctx->dwork); 2112 cancel_delayed_work_sync(&ndev_ctx->dwork);
2080 2113
2114 rcu_read_lock();
2115 nvdev = rcu_dereference(ndev_ctx->nvdev);
2116
2117 if (nvdev)
2118 cancel_work_sync(&nvdev->subchan_work);
2119
2081 /* 2120 /*
2082 * Call to the vsc driver to let it know that the device is being 2121 * Call to the vsc driver to let it know that the device is being
2083 * removed. Also blocks mtu and channel changes. 2122 * removed. Also blocks mtu and channel changes.
@@ -2087,11 +2126,13 @@ static int netvsc_remove(struct hv_device *dev)
2087 if (vf_netdev) 2126 if (vf_netdev)
2088 netvsc_unregister_vf(vf_netdev); 2127 netvsc_unregister_vf(vf_netdev);
2089 2128
2129 if (nvdev)
2130 rndis_filter_device_remove(dev, nvdev);
2131
2090 unregister_netdevice(net); 2132 unregister_netdevice(net);
2091 2133
2092 rndis_filter_device_remove(dev,
2093 rtnl_dereference(ndev_ctx->nvdev));
2094 rtnl_unlock(); 2134 rtnl_unlock();
2135 rcu_read_unlock();
2095 2136
2096 hv_set_drvdata(dev, NULL); 2137 hv_set_drvdata(dev, NULL);
2097 2138
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8927c483c217..a6ec41c399d6 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -264,13 +264,23 @@ static void rndis_set_link_state(struct rndis_device *rdev,
264 } 264 }
265} 265}
266 266
267static void rndis_filter_receive_response(struct rndis_device *dev, 267static void rndis_filter_receive_response(struct net_device *ndev,
268 struct rndis_message *resp) 268 struct netvsc_device *nvdev,
269 const struct rndis_message *resp)
269{ 270{
271 struct rndis_device *dev = nvdev->extension;
270 struct rndis_request *request = NULL; 272 struct rndis_request *request = NULL;
271 bool found = false; 273 bool found = false;
272 unsigned long flags; 274 unsigned long flags;
273 struct net_device *ndev = dev->ndev; 275
276 /* This should never happen, it means control message
277 * response received after device removed.
278 */
279 if (dev->state == RNDIS_DEV_UNINITIALIZED) {
280 netdev_err(ndev,
281 "got rndis message uninitialized\n");
282 return;
283 }
274 284
275 spin_lock_irqsave(&dev->request_lock, flags); 285 spin_lock_irqsave(&dev->request_lock, flags);
276 list_for_each_entry(request, &dev->req_list, list_ent) { 286 list_for_each_entry(request, &dev->req_list, list_ent) {
@@ -352,7 +362,6 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
352 362
353static int rndis_filter_receive_data(struct net_device *ndev, 363static int rndis_filter_receive_data(struct net_device *ndev,
354 struct netvsc_device *nvdev, 364 struct netvsc_device *nvdev,
355 struct rndis_device *dev,
356 struct rndis_message *msg, 365 struct rndis_message *msg,
357 struct vmbus_channel *channel, 366 struct vmbus_channel *channel,
358 void *data, u32 data_buflen) 367 void *data, u32 data_buflen)
@@ -372,7 +381,7 @@ static int rndis_filter_receive_data(struct net_device *ndev,
372 * should be the data packet size plus the trailer padding size 381 * should be the data packet size plus the trailer padding size
373 */ 382 */
374 if (unlikely(data_buflen < rndis_pkt->data_len)) { 383 if (unlikely(data_buflen < rndis_pkt->data_len)) {
375 netdev_err(dev->ndev, "rndis message buffer " 384 netdev_err(ndev, "rndis message buffer "
376 "overflow detected (got %u, min %u)" 385 "overflow detected (got %u, min %u)"
377 "...dropping this message!\n", 386 "...dropping this message!\n",
378 data_buflen, rndis_pkt->data_len); 387 data_buflen, rndis_pkt->data_len);
@@ -400,35 +409,20 @@ int rndis_filter_receive(struct net_device *ndev,
400 void *data, u32 buflen) 409 void *data, u32 buflen)
401{ 410{
402 struct net_device_context *net_device_ctx = netdev_priv(ndev); 411 struct net_device_context *net_device_ctx = netdev_priv(ndev);
403 struct rndis_device *rndis_dev = net_dev->extension;
404 struct rndis_message *rndis_msg = data; 412 struct rndis_message *rndis_msg = data;
405 413
406 /* Make sure the rndis device state is initialized */
407 if (unlikely(!rndis_dev)) {
408 netif_dbg(net_device_ctx, rx_err, ndev,
409 "got rndis message but no rndis device!\n");
410 return NVSP_STAT_FAIL;
411 }
412
413 if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
414 netif_dbg(net_device_ctx, rx_err, ndev,
415 "got rndis message uninitialized\n");
416 return NVSP_STAT_FAIL;
417 }
418
419 if (netif_msg_rx_status(net_device_ctx)) 414 if (netif_msg_rx_status(net_device_ctx))
420 dump_rndis_message(ndev, rndis_msg); 415 dump_rndis_message(ndev, rndis_msg);
421 416
422 switch (rndis_msg->ndis_msg_type) { 417 switch (rndis_msg->ndis_msg_type) {
423 case RNDIS_MSG_PACKET: 418 case RNDIS_MSG_PACKET:
424 return rndis_filter_receive_data(ndev, net_dev, 419 return rndis_filter_receive_data(ndev, net_dev, rndis_msg,
425 rndis_dev, rndis_msg,
426 channel, data, buflen); 420 channel, data, buflen);
427 case RNDIS_MSG_INIT_C: 421 case RNDIS_MSG_INIT_C:
428 case RNDIS_MSG_QUERY_C: 422 case RNDIS_MSG_QUERY_C:
429 case RNDIS_MSG_SET_C: 423 case RNDIS_MSG_SET_C:
430 /* completion msgs */ 424 /* completion msgs */
431 rndis_filter_receive_response(rndis_dev, rndis_msg); 425 rndis_filter_receive_response(ndev, net_dev, rndis_msg);
432 break; 426 break;
433 427
434 case RNDIS_MSG_INDICATE: 428 case RNDIS_MSG_INDICATE:
@@ -825,13 +819,15 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
825 struct rndis_set_request *set; 819 struct rndis_set_request *set;
826 int ret; 820 int ret;
827 821
822 if (dev->filter == new_filter)
823 return 0;
824
828 request = get_rndis_request(dev, RNDIS_MSG_SET, 825 request = get_rndis_request(dev, RNDIS_MSG_SET,
829 RNDIS_MESSAGE_SIZE(struct rndis_set_request) + 826 RNDIS_MESSAGE_SIZE(struct rndis_set_request) +
830 sizeof(u32)); 827 sizeof(u32));
831 if (!request) 828 if (!request)
832 return -ENOMEM; 829 return -ENOMEM;
833 830
834
835 /* Setup the rndis set */ 831 /* Setup the rndis set */
836 set = &request->request_msg.msg.set_req; 832 set = &request->request_msg.msg.set_req;
837 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER; 833 set->oid = RNDIS_OID_GEN_CURRENT_PACKET_FILTER;
@@ -842,8 +838,10 @@ static int rndis_filter_set_packet_filter(struct rndis_device *dev,
842 &new_filter, sizeof(u32)); 838 &new_filter, sizeof(u32));
843 839
844 ret = rndis_filter_send_request(dev, request); 840 ret = rndis_filter_send_request(dev, request);
845 if (ret == 0) 841 if (ret == 0) {
846 wait_for_completion(&request->wait_event); 842 wait_for_completion(&request->wait_event);
843 dev->filter = new_filter;
844 }
847 845
848 put_rndis_request(dev, request); 846 put_rndis_request(dev, request);
849 847
@@ -861,9 +859,9 @@ static void rndis_set_multicast(struct work_struct *w)
861 filter = NDIS_PACKET_TYPE_PROMISCUOUS; 859 filter = NDIS_PACKET_TYPE_PROMISCUOUS;
862 } else { 860 } else {
863 if (flags & IFF_ALLMULTI) 861 if (flags & IFF_ALLMULTI)
864 flags |= NDIS_PACKET_TYPE_ALL_MULTICAST; 862 filter |= NDIS_PACKET_TYPE_ALL_MULTICAST;
865 if (flags & IFF_BROADCAST) 863 if (flags & IFF_BROADCAST)
866 flags |= NDIS_PACKET_TYPE_BROADCAST; 864 filter |= NDIS_PACKET_TYPE_BROADCAST;
867 } 865 }
868 866
869 rndis_filter_set_packet_filter(rdev, filter); 867 rndis_filter_set_packet_filter(rdev, filter);
@@ -1120,6 +1118,7 @@ void rndis_set_subchannel(struct work_struct *w)
1120 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) 1118 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1121 ndev_ctx->tx_table[i] = i % nvdev->num_chn; 1119 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1122 1120
1121 netif_device_attach(ndev);
1123 rtnl_unlock(); 1122 rtnl_unlock();
1124 return; 1123 return;
1125 1124
@@ -1130,6 +1129,8 @@ failed:
1130 1129
1131 nvdev->max_chn = 1; 1130 nvdev->max_chn = 1;
1132 nvdev->num_chn = 1; 1131 nvdev->num_chn = 1;
1132
1133 netif_device_attach(ndev);
1133unlock: 1134unlock:
1134 rtnl_unlock(); 1135 rtnl_unlock();
1135} 1136}
@@ -1332,6 +1333,10 @@ out:
1332 net_device->num_chn = 1; 1333 net_device->num_chn = 1;
1333 } 1334 }
1334 1335
1336 /* No sub channels, device is ready */
1337 if (net_device->num_chn == 1)
1338 netif_device_attach(net);
1339
1335 return net_device; 1340 return net_device;
1336 1341
1337err_dev_remv: 1342err_dev_remv:
@@ -1344,16 +1349,12 @@ void rndis_filter_device_remove(struct hv_device *dev,
1344{ 1349{
1345 struct rndis_device *rndis_dev = net_dev->extension; 1350 struct rndis_device *rndis_dev = net_dev->extension;
1346 1351
1347 /* Don't try and setup sub channels if about to halt */
1348 cancel_work_sync(&net_dev->subchan_work);
1349
1350 /* Halt and release the rndis device */ 1352 /* Halt and release the rndis device */
1351 rndis_filter_halt_device(rndis_dev); 1353 rndis_filter_halt_device(rndis_dev);
1352 1354
1353 net_dev->extension = NULL; 1355 net_dev->extension = NULL;
1354 1356
1355 netvsc_device_remove(dev); 1357 netvsc_device_remove(dev);
1356 kfree(rndis_dev);
1357} 1358}
1358 1359
1359int rndis_filter_open(struct netvsc_device *nvdev) 1360int rndis_filter_open(struct netvsc_device *nvdev)
@@ -1371,10 +1372,3 @@ int rndis_filter_close(struct netvsc_device *nvdev)
1371 1372
1372 return rndis_filter_close_device(nvdev->extension); 1373 return rndis_filter_close_device(nvdev->extension);
1373} 1374}
1374
1375bool rndis_filter_opened(const struct netvsc_device *nvdev)
1376{
1377 const struct rndis_device *dev = nvdev->extension;
1378
1379 return dev->state == RNDIS_DEV_DATAINITIALIZED;
1380}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 7de88b33d5b9..9cbb0c8a896a 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
3277 3277
3278 err = netdev_upper_dev_link(real_dev, dev, extack); 3278 err = netdev_upper_dev_link(real_dev, dev, extack);
3279 if (err < 0) 3279 if (err < 0)
3280 goto unregister; 3280 goto put_dev;
3281 3281
3282 /* need to be already registered so that ->init has run and 3282 /* need to be already registered so that ->init has run and
3283 * the MAC addr is set 3283 * the MAC addr is set
@@ -3316,7 +3316,8 @@ del_dev:
3316 macsec_del_dev(macsec); 3316 macsec_del_dev(macsec);
3317unlink: 3317unlink:
3318 netdev_upper_dev_unlink(real_dev, dev); 3318 netdev_upper_dev_unlink(real_dev, dev);
3319unregister: 3319put_dev:
3320 dev_put(real_dev);
3320 unregister_netdevice(dev); 3321 unregister_netdevice(dev);
3321 return err; 3322 return err;
3322} 3323}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8fc02d9db3d0..725f4b4afc6d 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1036,7 +1036,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
1036 lowerdev_features &= (features | ~NETIF_F_LRO); 1036 lowerdev_features &= (features | ~NETIF_F_LRO);
1037 features = netdev_increment_features(lowerdev_features, features, mask); 1037 features = netdev_increment_features(lowerdev_features, features, mask);
1038 features |= ALWAYS_ON_FEATURES; 1038 features |= ALWAYS_ON_FEATURES;
1039 features &= ~NETIF_F_NETNS_LOCAL; 1039 features &= (ALWAYS_ON_FEATURES | MACVLAN_FEATURES);
1040 1040
1041 return features; 1041 return features;
1042} 1042}
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 171010eb4d9c..5ad130c3da43 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -341,8 +341,8 @@ void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
341 unsigned int i; 341 unsigned int i;
342 342
343 for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++) 343 for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
344 memcpy(data + i * ETH_GSTRING_LEN, 344 strlcpy(data + i * ETH_GSTRING_LEN,
345 bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN); 345 bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
346} 346}
347EXPORT_SYMBOL_GPL(bcm_phy_get_strings); 347EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
348 348
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 22d9bc9c33a4..0e0978d8a0eb 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1452,8 +1452,8 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
1452 int i; 1452 int i;
1453 1453
1454 for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) { 1454 for (i = 0; i < ARRAY_SIZE(marvell_hw_stats); i++) {
1455 memcpy(data + i * ETH_GSTRING_LEN, 1455 strlcpy(data + i * ETH_GSTRING_LEN,
1456 marvell_hw_stats[i].string, ETH_GSTRING_LEN); 1456 marvell_hw_stats[i].string, ETH_GSTRING_LEN);
1457 } 1457 }
1458} 1458}
1459 1459
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 0f45310300f6..f41b224a9cdb 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -635,25 +635,6 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)
635 return 0; 635 return 0;
636} 636}
637 637
638/* This routine returns -1 as an indication to the caller that the
639 * Micrel ksz9021 10/100/1000 PHY does not support standard IEEE
640 * MMD extended PHY registers.
641 */
642static int
643ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum)
644{
645 return -1;
646}
647
648/* This routine does nothing since the Micrel ksz9021 does not support
649 * standard IEEE MMD extended PHY registers.
650 */
651static int
652ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int devad, u16 regnum, u16 val)
653{
654 return -1;
655}
656
657static int kszphy_get_sset_count(struct phy_device *phydev) 638static int kszphy_get_sset_count(struct phy_device *phydev)
658{ 639{
659 return ARRAY_SIZE(kszphy_hw_stats); 640 return ARRAY_SIZE(kszphy_hw_stats);
@@ -664,8 +645,8 @@ static void kszphy_get_strings(struct phy_device *phydev, u8 *data)
664 int i; 645 int i;
665 646
666 for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) { 647 for (i = 0; i < ARRAY_SIZE(kszphy_hw_stats); i++) {
667 memcpy(data + i * ETH_GSTRING_LEN, 648 strlcpy(data + i * ETH_GSTRING_LEN,
668 kszphy_hw_stats[i].string, ETH_GSTRING_LEN); 649 kszphy_hw_stats[i].string, ETH_GSTRING_LEN);
669 } 650 }
670} 651}
671 652
@@ -946,8 +927,8 @@ static struct phy_driver ksphy_driver[] = {
946 .get_stats = kszphy_get_stats, 927 .get_stats = kszphy_get_stats,
947 .suspend = genphy_suspend, 928 .suspend = genphy_suspend,
948 .resume = genphy_resume, 929 .resume = genphy_resume,
949 .read_mmd = ksz9021_rd_mmd_phyreg, 930 .read_mmd = genphy_read_mmd_unsupported,
950 .write_mmd = ksz9021_wr_mmd_phyreg, 931 .write_mmd = genphy_write_mmd_unsupported,
951}, { 932}, {
952 .phy_id = PHY_ID_KSZ9031, 933 .phy_id = PHY_ID_KSZ9031,
953 .phy_id_mask = MICREL_PHY_ID_MASK, 934 .phy_id_mask = MICREL_PHY_ID_MASK,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index a6f924fee584..9aabfa1a455a 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -618,6 +618,77 @@ static void phy_error(struct phy_device *phydev)
618} 618}
619 619
620/** 620/**
621 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
622 * @phydev: target phy_device struct
623 */
624static int phy_disable_interrupts(struct phy_device *phydev)
625{
626 int err;
627
628 /* Disable PHY interrupts */
629 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
630 if (err)
631 goto phy_err;
632
633 /* Clear the interrupt */
634 err = phy_clear_interrupt(phydev);
635 if (err)
636 goto phy_err;
637
638 return 0;
639
640phy_err:
641 phy_error(phydev);
642
643 return err;
644}
645
646/**
647 * phy_change - Called by the phy_interrupt to handle PHY changes
648 * @phydev: phy_device struct that interrupted
649 */
650static irqreturn_t phy_change(struct phy_device *phydev)
651{
652 if (phy_interrupt_is_valid(phydev)) {
653 if (phydev->drv->did_interrupt &&
654 !phydev->drv->did_interrupt(phydev))
655 return IRQ_NONE;
656
657 if (phydev->state == PHY_HALTED)
658 if (phy_disable_interrupts(phydev))
659 goto phy_err;
660 }
661
662 mutex_lock(&phydev->lock);
663 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
664 phydev->state = PHY_CHANGELINK;
665 mutex_unlock(&phydev->lock);
666
667 /* reschedule state queue work to run as soon as possible */
668 phy_trigger_machine(phydev, true);
669
670 if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
671 goto phy_err;
672 return IRQ_HANDLED;
673
674phy_err:
675 phy_error(phydev);
676 return IRQ_NONE;
677}
678
679/**
680 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
681 * @work: work_struct that describes the work to be done
682 */
683void phy_change_work(struct work_struct *work)
684{
685 struct phy_device *phydev =
686 container_of(work, struct phy_device, phy_queue);
687
688 phy_change(phydev);
689}
690
691/**
621 * phy_interrupt - PHY interrupt handler 692 * phy_interrupt - PHY interrupt handler
622 * @irq: interrupt line 693 * @irq: interrupt line
623 * @phy_dat: phy_device pointer 694 * @phy_dat: phy_device pointer
@@ -632,9 +703,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
632 if (PHY_HALTED == phydev->state) 703 if (PHY_HALTED == phydev->state)
633 return IRQ_NONE; /* It can't be ours. */ 704 return IRQ_NONE; /* It can't be ours. */
634 705
635 phy_change(phydev); 706 return phy_change(phydev);
636
637 return IRQ_HANDLED;
638} 707}
639 708
640/** 709/**
@@ -652,32 +721,6 @@ static int phy_enable_interrupts(struct phy_device *phydev)
652} 721}
653 722
654/** 723/**
655 * phy_disable_interrupts - Disable the PHY interrupts from the PHY side
656 * @phydev: target phy_device struct
657 */
658static int phy_disable_interrupts(struct phy_device *phydev)
659{
660 int err;
661
662 /* Disable PHY interrupts */
663 err = phy_config_interrupt(phydev, PHY_INTERRUPT_DISABLED);
664 if (err)
665 goto phy_err;
666
667 /* Clear the interrupt */
668 err = phy_clear_interrupt(phydev);
669 if (err)
670 goto phy_err;
671
672 return 0;
673
674phy_err:
675 phy_error(phydev);
676
677 return err;
678}
679
680/**
681 * phy_start_interrupts - request and enable interrupts for a PHY device 724 * phy_start_interrupts - request and enable interrupts for a PHY device
682 * @phydev: target phy_device struct 725 * @phydev: target phy_device struct
683 * 726 *
@@ -720,50 +763,6 @@ int phy_stop_interrupts(struct phy_device *phydev)
720EXPORT_SYMBOL(phy_stop_interrupts); 763EXPORT_SYMBOL(phy_stop_interrupts);
721 764
722/** 765/**
723 * phy_change - Called by the phy_interrupt to handle PHY changes
724 * @phydev: phy_device struct that interrupted
725 */
726void phy_change(struct phy_device *phydev)
727{
728 if (phy_interrupt_is_valid(phydev)) {
729 if (phydev->drv->did_interrupt &&
730 !phydev->drv->did_interrupt(phydev))
731 return;
732
733 if (phydev->state == PHY_HALTED)
734 if (phy_disable_interrupts(phydev))
735 goto phy_err;
736 }
737
738 mutex_lock(&phydev->lock);
739 if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state))
740 phydev->state = PHY_CHANGELINK;
741 mutex_unlock(&phydev->lock);
742
743 /* reschedule state queue work to run as soon as possible */
744 phy_trigger_machine(phydev, true);
745
746 if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev))
747 goto phy_err;
748 return;
749
750phy_err:
751 phy_error(phydev);
752}
753
754/**
755 * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes
756 * @work: work_struct that describes the work to be done
757 */
758void phy_change_work(struct work_struct *work)
759{
760 struct phy_device *phydev =
761 container_of(work, struct phy_device, phy_queue);
762
763 phy_change(phydev);
764}
765
766/**
767 * phy_stop - Bring down the PHY link, and stop checking the status 766 * phy_stop - Bring down the PHY link, and stop checking the status
768 * @phydev: target phy_device struct 767 * @phydev: target phy_device struct
769 */ 768 */
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 478405e544cc..74664a6c0cdc 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1012,10 +1012,17 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
1012 err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj, 1012 err = sysfs_create_link(&phydev->mdio.dev.kobj, &dev->dev.kobj,
1013 "attached_dev"); 1013 "attached_dev");
1014 if (!err) { 1014 if (!err) {
1015 err = sysfs_create_link(&dev->dev.kobj, &phydev->mdio.dev.kobj, 1015 err = sysfs_create_link_nowarn(&dev->dev.kobj,
1016 "phydev"); 1016 &phydev->mdio.dev.kobj,
1017 if (err) 1017 "phydev");
1018 goto error; 1018 if (err) {
1019 dev_err(&dev->dev, "could not add device link to %s err %d\n",
1020 kobject_name(&phydev->mdio.dev.kobj),
1021 err);
1022 /* non-fatal - some net drivers can use one netdevice
1023 * with more then one phy
1024 */
1025 }
1019 1026
1020 phydev->sysfs_links = true; 1027 phydev->sysfs_links = true;
1021 } 1028 }
@@ -1666,6 +1673,23 @@ int genphy_config_init(struct phy_device *phydev)
1666} 1673}
1667EXPORT_SYMBOL(genphy_config_init); 1674EXPORT_SYMBOL(genphy_config_init);
1668 1675
1676/* This is used for the phy device which doesn't support the MMD extended
1677 * register access, but it does have side effect when we are trying to access
1678 * the MMD register via indirect method.
1679 */
1680int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, u16 regnum)
1681{
1682 return -EOPNOTSUPP;
1683}
1684EXPORT_SYMBOL(genphy_read_mmd_unsupported);
1685
1686int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
1687 u16 regnum, u16 val)
1688{
1689 return -EOPNOTSUPP;
1690}
1691EXPORT_SYMBOL(genphy_write_mmd_unsupported);
1692
1669int genphy_suspend(struct phy_device *phydev) 1693int genphy_suspend(struct phy_device *phydev)
1670{ 1694{
1671 return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN); 1695 return phy_set_bits(phydev, MII_BMCR, BMCR_PDOWN);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index ee3ca4a2f12b..9f48ecf9c627 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -172,6 +172,8 @@ static struct phy_driver realtek_drvs[] = {
172 .flags = PHY_HAS_INTERRUPT, 172 .flags = PHY_HAS_INTERRUPT,
173 .ack_interrupt = &rtl821x_ack_interrupt, 173 .ack_interrupt = &rtl821x_ack_interrupt,
174 .config_intr = &rtl8211b_config_intr, 174 .config_intr = &rtl8211b_config_intr,
175 .read_mmd = &genphy_read_mmd_unsupported,
176 .write_mmd = &genphy_write_mmd_unsupported,
175 }, { 177 }, {
176 .phy_id = 0x001cc914, 178 .phy_id = 0x001cc914,
177 .name = "RTL8211DN Gigabit Ethernet", 179 .name = "RTL8211DN Gigabit Ethernet",
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fa2a9bdd1866..da1937832c99 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -257,7 +257,7 @@ struct ppp_net {
257/* Prototypes. */ 257/* Prototypes. */
258static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, 258static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
259 struct file *file, unsigned int cmd, unsigned long arg); 259 struct file *file, unsigned int cmd, unsigned long arg);
260static void ppp_xmit_process(struct ppp *ppp); 260static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb);
261static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb); 261static void ppp_send_frame(struct ppp *ppp, struct sk_buff *skb);
262static void ppp_push(struct ppp *ppp); 262static void ppp_push(struct ppp *ppp);
263static void ppp_channel_push(struct channel *pch); 263static void ppp_channel_push(struct channel *pch);
@@ -513,13 +513,12 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
513 goto out; 513 goto out;
514 } 514 }
515 515
516 skb_queue_tail(&pf->xq, skb);
517
518 switch (pf->kind) { 516 switch (pf->kind) {
519 case INTERFACE: 517 case INTERFACE:
520 ppp_xmit_process(PF_TO_PPP(pf)); 518 ppp_xmit_process(PF_TO_PPP(pf), skb);
521 break; 519 break;
522 case CHANNEL: 520 case CHANNEL:
521 skb_queue_tail(&pf->xq, skb);
523 ppp_channel_push(PF_TO_CHANNEL(pf)); 522 ppp_channel_push(PF_TO_CHANNEL(pf));
524 break; 523 break;
525 } 524 }
@@ -1267,8 +1266,8 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
1267 put_unaligned_be16(proto, pp); 1266 put_unaligned_be16(proto, pp);
1268 1267
1269 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev))); 1268 skb_scrub_packet(skb, !net_eq(ppp->ppp_net, dev_net(dev)));
1270 skb_queue_tail(&ppp->file.xq, skb); 1269 ppp_xmit_process(ppp, skb);
1271 ppp_xmit_process(ppp); 1270
1272 return NETDEV_TX_OK; 1271 return NETDEV_TX_OK;
1273 1272
1274 outf: 1273 outf:
@@ -1420,13 +1419,14 @@ static void ppp_setup(struct net_device *dev)
1420 */ 1419 */
1421 1420
1422/* Called to do any work queued up on the transmit side that can now be done */ 1421/* Called to do any work queued up on the transmit side that can now be done */
1423static void __ppp_xmit_process(struct ppp *ppp) 1422static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1424{ 1423{
1425 struct sk_buff *skb;
1426
1427 ppp_xmit_lock(ppp); 1424 ppp_xmit_lock(ppp);
1428 if (!ppp->closing) { 1425 if (!ppp->closing) {
1429 ppp_push(ppp); 1426 ppp_push(ppp);
1427
1428 if (skb)
1429 skb_queue_tail(&ppp->file.xq, skb);
1430 while (!ppp->xmit_pending && 1430 while (!ppp->xmit_pending &&
1431 (skb = skb_dequeue(&ppp->file.xq))) 1431 (skb = skb_dequeue(&ppp->file.xq)))
1432 ppp_send_frame(ppp, skb); 1432 ppp_send_frame(ppp, skb);
@@ -1440,7 +1440,7 @@ static void __ppp_xmit_process(struct ppp *ppp)
1440 ppp_xmit_unlock(ppp); 1440 ppp_xmit_unlock(ppp);
1441} 1441}
1442 1442
1443static void ppp_xmit_process(struct ppp *ppp) 1443static void ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
1444{ 1444{
1445 local_bh_disable(); 1445 local_bh_disable();
1446 1446
@@ -1448,7 +1448,7 @@ static void ppp_xmit_process(struct ppp *ppp)
1448 goto err; 1448 goto err;
1449 1449
1450 (*this_cpu_ptr(ppp->xmit_recursion))++; 1450 (*this_cpu_ptr(ppp->xmit_recursion))++;
1451 __ppp_xmit_process(ppp); 1451 __ppp_xmit_process(ppp, skb);
1452 (*this_cpu_ptr(ppp->xmit_recursion))--; 1452 (*this_cpu_ptr(ppp->xmit_recursion))--;
1453 1453
1454 local_bh_enable(); 1454 local_bh_enable();
@@ -1458,6 +1458,8 @@ static void ppp_xmit_process(struct ppp *ppp)
1458err: 1458err:
1459 local_bh_enable(); 1459 local_bh_enable();
1460 1460
1461 kfree_skb(skb);
1462
1461 if (net_ratelimit()) 1463 if (net_ratelimit())
1462 netdev_err(ppp->dev, "recursion detected\n"); 1464 netdev_err(ppp->dev, "recursion detected\n");
1463} 1465}
@@ -1942,7 +1944,7 @@ static void __ppp_channel_push(struct channel *pch)
1942 if (skb_queue_empty(&pch->file.xq)) { 1944 if (skb_queue_empty(&pch->file.xq)) {
1943 ppp = pch->ppp; 1945 ppp = pch->ppp;
1944 if (ppp) 1946 if (ppp)
1945 __ppp_xmit_process(ppp); 1947 __ppp_xmit_process(ppp, NULL);
1946 } 1948 }
1947} 1949}
1948 1950
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a468439969df..56c701b73c12 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2395,7 +2395,7 @@ send_done:
2395 if (!nlh) { 2395 if (!nlh) {
2396 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2396 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2397 if (err) 2397 if (err)
2398 goto errout; 2398 return err;
2399 goto send_done; 2399 goto send_done;
2400 } 2400 }
2401 2401
@@ -2681,7 +2681,7 @@ send_done:
2681 if (!nlh) { 2681 if (!nlh) {
2682 err = __send_and_alloc_skb(&skb, team, portid, send_func); 2682 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2683 if (err) 2683 if (err)
2684 goto errout; 2684 return err;
2685 goto send_done; 2685 goto send_done;
2686 } 2686 }
2687 2687
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7433bb2e4451..28cfa642e39a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -655,7 +655,7 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
655 return tun; 655 return tun;
656} 656}
657 657
658static void tun_ptr_free(void *ptr) 658void tun_ptr_free(void *ptr)
659{ 659{
660 if (!ptr) 660 if (!ptr)
661 return; 661 return;
@@ -667,6 +667,7 @@ static void tun_ptr_free(void *ptr)
667 __skb_array_destroy_skb(ptr); 667 __skb_array_destroy_skb(ptr);
668 } 668 }
669} 669}
670EXPORT_SYMBOL_GPL(tun_ptr_free);
670 671
671static void tun_queue_purge(struct tun_file *tfile) 672static void tun_queue_purge(struct tun_file *tfile)
672{ 673{
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 8a22ff67b026..d9eea8cfe6cb 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -315,6 +315,7 @@ static void __usbnet_status_stop_force(struct usbnet *dev)
315void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb) 315void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
316{ 316{
317 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); 317 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
318 unsigned long flags;
318 int status; 319 int status;
319 320
320 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) { 321 if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
@@ -326,10 +327,10 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
326 if (skb->protocol == 0) 327 if (skb->protocol == 0)
327 skb->protocol = eth_type_trans (skb, dev->net); 328 skb->protocol = eth_type_trans (skb, dev->net);
328 329
329 u64_stats_update_begin(&stats64->syncp); 330 flags = u64_stats_update_begin_irqsave(&stats64->syncp);
330 stats64->rx_packets++; 331 stats64->rx_packets++;
331 stats64->rx_bytes += skb->len; 332 stats64->rx_bytes += skb->len;
332 u64_stats_update_end(&stats64->syncp); 333 u64_stats_update_end_irqrestore(&stats64->syncp, flags);
333 334
334 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n", 335 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
335 skb->len + sizeof (struct ethhdr), skb->protocol); 336 skb->len + sizeof (struct ethhdr), skb->protocol);
@@ -1248,11 +1249,12 @@ static void tx_complete (struct urb *urb)
1248 1249
1249 if (urb->status == 0) { 1250 if (urb->status == 0) {
1250 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64); 1251 struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
1252 unsigned long flags;
1251 1253
1252 u64_stats_update_begin(&stats64->syncp); 1254 flags = u64_stats_update_begin_irqsave(&stats64->syncp);
1253 stats64->tx_packets += entry->packets; 1255 stats64->tx_packets += entry->packets;
1254 stats64->tx_bytes += entry->length; 1256 stats64->tx_bytes += entry->length;
1255 u64_stats_update_end(&stats64->syncp); 1257 u64_stats_update_end_irqrestore(&stats64->syncp, flags);
1256 } else { 1258 } else {
1257 dev->net->stats.tx_errors++; 1259 dev->net->stats.tx_errors++;
1258 1260
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 8b39c160743d..e04937f44f33 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -977,6 +977,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
977{ 977{
978 int ret; 978 int ret;
979 u32 count; 979 u32 count;
980 int num_pkts;
981 int tx_num_deferred;
980 unsigned long flags; 982 unsigned long flags;
981 struct vmxnet3_tx_ctx ctx; 983 struct vmxnet3_tx_ctx ctx;
982 union Vmxnet3_GenericDesc *gdesc; 984 union Vmxnet3_GenericDesc *gdesc;
@@ -1075,12 +1077,12 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1075#else 1077#else
1076 gdesc = ctx.sop_txd; 1078 gdesc = ctx.sop_txd;
1077#endif 1079#endif
1080 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred);
1078 if (ctx.mss) { 1081 if (ctx.mss) {
1079 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size; 1082 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1080 gdesc->txd.om = VMXNET3_OM_TSO; 1083 gdesc->txd.om = VMXNET3_OM_TSO;
1081 gdesc->txd.msscof = ctx.mss; 1084 gdesc->txd.msscof = ctx.mss;
1082 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len - 1085 num_pkts = (skb->len - gdesc->txd.hlen + ctx.mss - 1) / ctx.mss;
1083 gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1084 } else { 1086 } else {
1085 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1087 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1086 gdesc->txd.hlen = ctx.eth_ip_hdr_size; 1088 gdesc->txd.hlen = ctx.eth_ip_hdr_size;
@@ -1091,8 +1093,10 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1091 gdesc->txd.om = 0; 1093 gdesc->txd.om = 0;
1092 gdesc->txd.msscof = 0; 1094 gdesc->txd.msscof = 0;
1093 } 1095 }
1094 le32_add_cpu(&tq->shared->txNumDeferred, 1); 1096 num_pkts = 1;
1095 } 1097 }
1098 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts);
1099 tx_num_deferred += num_pkts;
1096 1100
1097 if (skb_vlan_tag_present(skb)) { 1101 if (skb_vlan_tag_present(skb)) {
1098 gdesc->txd.ti = 1; 1102 gdesc->txd.ti = 1;
@@ -1118,8 +1122,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1118 1122
1119 spin_unlock_irqrestore(&tq->tx_lock, flags); 1123 spin_unlock_irqrestore(&tq->tx_lock, flags);
1120 1124
1121 if (le32_to_cpu(tq->shared->txNumDeferred) >= 1125 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) {
1122 le32_to_cpu(tq->shared->txThreshold)) {
1123 tq->shared->txNumDeferred = 0; 1126 tq->shared->txNumDeferred = 0;
1124 VMXNET3_WRITE_BAR0_REG(adapter, 1127 VMXNET3_WRITE_BAR0_REG(adapter,
1125 VMXNET3_REG_TXPROD + tq->qid * 8, 1128 VMXNET3_REG_TXPROD + tq->qid * 8,
@@ -1470,7 +1473,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1470 vmxnet3_rx_csum(adapter, skb, 1473 vmxnet3_rx_csum(adapter, skb,
1471 (union Vmxnet3_GenericDesc *)rcd); 1474 (union Vmxnet3_GenericDesc *)rcd);
1472 skb->protocol = eth_type_trans(skb, adapter->netdev); 1475 skb->protocol = eth_type_trans(skb, adapter->netdev);
1473 if (!rcd->tcp || !adapter->lro) 1476 if (!rcd->tcp ||
1477 !(adapter->netdev->features & NETIF_F_LRO))
1474 goto not_lro; 1478 goto not_lro;
1475 1479
1476 if (segCnt != 0 && mss != 0) { 1480 if (segCnt != 0 && mss != 0) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 5ba222920e80..59ec34052a65 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.11.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040b00 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
@@ -342,9 +342,6 @@ struct vmxnet3_adapter {
342 u8 __iomem *hw_addr1; /* for BAR 1 */ 342 u8 __iomem *hw_addr1; /* for BAR 1 */
343 u8 version; 343 u8 version;
344 344
345 bool rxcsum;
346 bool lro;
347
348#ifdef VMXNET3_RSS 345#ifdef VMXNET3_RSS
349 struct UPT1_RSSConf *rss_conf; 346 struct UPT1_RSSConf *rss_conf;
350 bool rss; 347 bool rss;
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index e89e5ef2c2a4..f246e9ed4a81 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -729,6 +729,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
729 ieee80211_hw_set(hw, SPECTRUM_MGMT); 729 ieee80211_hw_set(hw, SPECTRUM_MGMT);
730 ieee80211_hw_set(hw, SIGNAL_DBM); 730 ieee80211_hw_set(hw, SIGNAL_DBM);
731 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 731 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
732 ieee80211_hw_set(hw, DOESNT_SUPPORT_QOS_NDP);
732 733
733 if (ath9k_ps_enable) 734 if (ath9k_ps_enable)
734 ieee80211_hw_set(hw, SUPPORTS_PS); 735 ieee80211_hw_set(hw, SUPPORTS_PS);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
index df8a1ecb9924..232dcbb83311 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.h
@@ -181,6 +181,7 @@ enum brcmf_netif_stop_reason {
181 * @netif_stop_lock: spinlock for update netif_stop from multiple sources. 181 * @netif_stop_lock: spinlock for update netif_stop from multiple sources.
182 * @pend_8021x_cnt: tracks outstanding number of 802.1x frames. 182 * @pend_8021x_cnt: tracks outstanding number of 802.1x frames.
183 * @pend_8021x_wait: used for signalling change in count. 183 * @pend_8021x_wait: used for signalling change in count.
184 * @fwil_fwerr: flag indicating fwil layer should return firmware error codes.
184 */ 185 */
185struct brcmf_if { 186struct brcmf_if {
186 struct brcmf_pub *drvr; 187 struct brcmf_pub *drvr;
@@ -198,6 +199,7 @@ struct brcmf_if {
198 wait_queue_head_t pend_8021x_wait; 199 wait_queue_head_t pend_8021x_wait;
199 struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES]; 200 struct in6_addr ipv6_addr_tbl[NDOL_MAX_ENTRIES];
200 u8 ipv6addr_idx; 201 u8 ipv6addr_idx;
202 bool fwil_fwerr;
201}; 203};
202 204
203int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp); 205int brcmf_netdev_wait_pend8021x(struct brcmf_if *ifp);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
index 47de35a33853..bede7b7fd996 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/feature.c
@@ -104,6 +104,9 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
104 u32 data; 104 u32 data;
105 int err; 105 int err;
106 106
107 /* we need to know firmware error */
108 ifp->fwil_fwerr = true;
109
107 err = brcmf_fil_iovar_int_get(ifp, name, &data); 110 err = brcmf_fil_iovar_int_get(ifp, name, &data);
108 if (err == 0) { 111 if (err == 0) {
109 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); 112 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
@@ -112,6 +115,8 @@ static void brcmf_feat_iovar_int_get(struct brcmf_if *ifp,
112 brcmf_dbg(TRACE, "%s feature check failed: %d\n", 115 brcmf_dbg(TRACE, "%s feature check failed: %d\n",
113 brcmf_feat_names[id], err); 116 brcmf_feat_names[id], err);
114 } 117 }
118
119 ifp->fwil_fwerr = false;
115} 120}
116 121
117static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp, 122static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
@@ -120,6 +125,9 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
120{ 125{
121 int err; 126 int err;
122 127
128 /* we need to know firmware error */
129 ifp->fwil_fwerr = true;
130
123 err = brcmf_fil_iovar_data_set(ifp, name, data, len); 131 err = brcmf_fil_iovar_data_set(ifp, name, data, len);
124 if (err != -BRCMF_FW_UNSUPPORTED) { 132 if (err != -BRCMF_FW_UNSUPPORTED) {
125 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]); 133 brcmf_dbg(INFO, "enabling feature: %s\n", brcmf_feat_names[id]);
@@ -128,6 +136,8 @@ static void brcmf_feat_iovar_data_set(struct brcmf_if *ifp,
128 brcmf_dbg(TRACE, "%s feature check failed: %d\n", 136 brcmf_dbg(TRACE, "%s feature check failed: %d\n",
129 brcmf_feat_names[id], err); 137 brcmf_feat_names[id], err);
130 } 138 }
139
140 ifp->fwil_fwerr = false;
131} 141}
132 142
133#define MAX_CAPS_BUFFER_SIZE 512 143#define MAX_CAPS_BUFFER_SIZE 512
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
index f2cfdd3b2bf1..fc5751116d99 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.c
@@ -131,6 +131,9 @@ brcmf_fil_cmd_data(struct brcmf_if *ifp, u32 cmd, void *data, u32 len, bool set)
131 brcmf_fil_get_errstr((u32)(-fwerr)), fwerr); 131 brcmf_fil_get_errstr((u32)(-fwerr)), fwerr);
132 err = -EBADE; 132 err = -EBADE;
133 } 133 }
134 if (ifp->fwil_fwerr)
135 return fwerr;
136
134 return err; 137 return err;
135} 138}
136 139
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 2ee54133efa1..82064e909784 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -462,25 +462,23 @@ static int brcmf_p2p_set_firmware(struct brcmf_if *ifp, u8 *p2p_mac)
462 * @dev_addr: optional device address. 462 * @dev_addr: optional device address.
463 * 463 *
464 * P2P needs mac addresses for P2P device and interface. If no device 464 * P2P needs mac addresses for P2P device and interface. If no device
465 * address it specified, these are derived from the primary net device, ie. 465 * address it specified, these are derived from a random ethernet
466 * the permanent ethernet address of the device. 466 * address.
467 */ 467 */
468static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr) 468static void brcmf_p2p_generate_bss_mac(struct brcmf_p2p_info *p2p, u8 *dev_addr)
469{ 469{
470 struct brcmf_if *pri_ifp = p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif->ifp; 470 bool random_addr = false;
471 bool local_admin = false;
472 471
473 if (!dev_addr || is_zero_ether_addr(dev_addr)) { 472 if (!dev_addr || is_zero_ether_addr(dev_addr))
474 dev_addr = pri_ifp->mac_addr; 473 random_addr = true;
475 local_admin = true;
476 }
477 474
478 /* Generate the P2P Device Address. This consists of the device's 475 /* Generate the P2P Device Address obtaining a random ethernet
479 * primary MAC address with the locally administered bit set. 476 * address with the locally administered bit set.
480 */ 477 */
481 memcpy(p2p->dev_addr, dev_addr, ETH_ALEN); 478 if (random_addr)
482 if (local_admin) 479 eth_random_addr(p2p->dev_addr);
483 p2p->dev_addr[0] |= 0x02; 480 else
481 memcpy(p2p->dev_addr, dev_addr, ETH_ALEN);
484 482
485 /* Generate the P2P Interface Address. If the discovery and connection 483 /* Generate the P2P Interface Address. If the discovery and connection
486 * BSSCFGs need to simultaneously co-exist, then this address must be 484 * BSSCFGs need to simultaneously co-exist, then this address must be
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index c5f2ddf9b0fe..e5a2fc738ac3 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -91,7 +91,6 @@ config IWLWIFI_BCAST_FILTERING
91config IWLWIFI_PCIE_RTPM 91config IWLWIFI_PCIE_RTPM
92 bool "Enable runtime power management mode for PCIe devices" 92 bool "Enable runtime power management mode for PCIe devices"
93 depends on IWLMVM && PM && EXPERT 93 depends on IWLMVM && PM && EXPERT
94 default false
95 help 94 help
96 Say Y here to enable runtime power management for PCIe 95 Say Y here to enable runtime power management for PCIe
97 devices. If enabled, the device will go into low power mode 96 devices. If enabled, the device will go into low power mode
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 3721a3ed358b..f824bebceb06 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -211,7 +211,7 @@ enum {
211 * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end 211 * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
212 * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use. 212 * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
213 * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use. 213 * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
214 * @T2_V2_START_IMMEDIATELY: start time event immediately 214 * @TE_V2_START_IMMEDIATELY: start time event immediately
215 * @TE_V2_DEP_OTHER: depends on another time event 215 * @TE_V2_DEP_OTHER: depends on another time event
216 * @TE_V2_DEP_TSF: depends on a specific time 216 * @TE_V2_DEP_TSF: depends on a specific time
217 * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC 217 * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
@@ -230,7 +230,7 @@ enum iwl_time_event_policy {
230 TE_V2_NOTIF_HOST_FRAG_END = BIT(5), 230 TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
231 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6), 231 TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
232 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7), 232 TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
233 T2_V2_START_IMMEDIATELY = BIT(11), 233 TE_V2_START_IMMEDIATELY = BIT(11),
234 234
235 /* placement characteristics */ 235 /* placement characteristics */
236 TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS), 236 TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 67aefc8fc9ac..7bd704a3e640 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 36 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
37 * Copyright(c) 2018 Intel Corporation
36 * All rights reserved. 38 * All rights reserved.
37 * 39 *
38 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -942,7 +944,6 @@ dump_trans_data:
942 944
943out: 945out:
944 iwl_fw_free_dump_desc(fwrt); 946 iwl_fw_free_dump_desc(fwrt);
945 fwrt->dump.trig = NULL;
946 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status); 947 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
947 IWL_DEBUG_INFO(fwrt, "WRT dump done\n"); 948 IWL_DEBUG_INFO(fwrt, "WRT dump done\n");
948} 949}
@@ -1112,6 +1113,14 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1112 fwrt->ops->dump_start(fwrt->ops_ctx)) 1113 fwrt->ops->dump_start(fwrt->ops_ctx))
1113 return; 1114 return;
1114 1115
1116 if (fwrt->ops && fwrt->ops->fw_running &&
1117 !fwrt->ops->fw_running(fwrt->ops_ctx)) {
1118 IWL_ERR(fwrt, "Firmware not running - cannot dump error\n");
1119 iwl_fw_free_dump_desc(fwrt);
1120 clear_bit(IWL_FWRT_STATUS_DUMPING, &fwrt->status);
1121 goto out;
1122 }
1123
1115 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { 1124 if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
1116 /* stop recording */ 1125 /* stop recording */
1117 iwl_fw_dbg_stop_recording(fwrt); 1126 iwl_fw_dbg_stop_recording(fwrt);
@@ -1145,7 +1154,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work)
1145 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl); 1154 iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, out_ctrl);
1146 } 1155 }
1147 } 1156 }
1148 1157out:
1149 if (fwrt->ops && fwrt->ops->dump_end) 1158 if (fwrt->ops && fwrt->ops->dump_end)
1150 fwrt->ops->dump_end(fwrt->ops_ctx); 1159 fwrt->ops->dump_end(fwrt->ops_ctx);
1151} 1160}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
index 223fb77a3aa9..72259bff9922 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
@@ -8,6 +8,7 @@
8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -33,6 +34,7 @@
33 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. 34 * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
34 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
35 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH 36 * Copyright(c) 2015 - 2017 Intel Deutschland GmbH
37 * Copyright(c) 2018 Intel Corporation
36 * All rights reserved. 38 * All rights reserved.
37 * 39 *
38 * Redistribution and use in source and binary forms, with or without 40 * Redistribution and use in source and binary forms, with or without
@@ -91,6 +93,7 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt)
91 if (fwrt->dump.desc != &iwl_dump_desc_assert) 93 if (fwrt->dump.desc != &iwl_dump_desc_assert)
92 kfree(fwrt->dump.desc); 94 kfree(fwrt->dump.desc);
93 fwrt->dump.desc = NULL; 95 fwrt->dump.desc = NULL;
96 fwrt->dump.trig = NULL;
94} 97}
95 98
96void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); 99void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
index e57ff92a68ae..3da468d2cc92 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.h
@@ -75,6 +75,20 @@ static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt)
75 cancel_delayed_work_sync(&fwrt->timestamp.wk); 75 cancel_delayed_work_sync(&fwrt->timestamp.wk);
76} 76}
77 77
78static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt)
79{
80 cancel_delayed_work_sync(&fwrt->timestamp.wk);
81}
82
83static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt)
84{
85 if (!fwrt->timestamp.delay)
86 return;
87
88 schedule_delayed_work(&fwrt->timestamp.wk,
89 round_jiffies_relative(fwrt->timestamp.delay));
90}
91
78#else 92#else
79static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt, 93static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
80 struct dentry *dbgfs_dir) 94 struct dentry *dbgfs_dir)
@@ -84,4 +98,8 @@ static inline int iwl_fwrt_dbgfs_register(struct iwl_fw_runtime *fwrt,
84 98
85static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {} 99static inline void iwl_fw_cancel_timestamp(struct iwl_fw_runtime *fwrt) {}
86 100
101static inline void iwl_fw_suspend_timestamp(struct iwl_fw_runtime *fwrt) {}
102
103static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {}
104
87#endif /* CONFIG_IWLWIFI_DEBUGFS */ 105#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index c39fe84bb4c4..2efac307909e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -77,8 +77,14 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
77} 77}
78IWL_EXPORT_SYMBOL(iwl_fw_runtime_init); 78IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
79 79
80void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt) 80void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt)
81{ 81{
82 iwl_fw_cancel_timestamp(fwrt); 82 iwl_fw_suspend_timestamp(fwrt);
83} 83}
84IWL_EXPORT_SYMBOL(iwl_fw_runtime_exit); 84IWL_EXPORT_SYMBOL(iwl_fw_runtime_suspend);
85
86void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt)
87{
88 iwl_fw_resume_timestamp(fwrt);
89}
90IWL_EXPORT_SYMBOL(iwl_fw_runtime_resume);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index e25c049f980f..3fb940ebd74a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -6,6 +6,7 @@
6 * GPL LICENSE SUMMARY 6 * GPL LICENSE SUMMARY
7 * 7 *
8 * Copyright(c) 2017 Intel Deutschland GmbH 8 * Copyright(c) 2017 Intel Deutschland GmbH
9 * Copyright(c) 2018 Intel Corporation
9 * 10 *
10 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as 12 * it under the terms of version 2 of the GNU General Public License as
@@ -26,6 +27,7 @@
26 * BSD LICENSE 27 * BSD LICENSE
27 * 28 *
28 * Copyright(c) 2017 Intel Deutschland GmbH 29 * Copyright(c) 2017 Intel Deutschland GmbH
30 * Copyright(c) 2018 Intel Corporation
29 * All rights reserved. 31 * All rights reserved.
30 * 32 *
31 * Redistribution and use in source and binary forms, with or without 33 * Redistribution and use in source and binary forms, with or without
@@ -68,6 +70,7 @@
68struct iwl_fw_runtime_ops { 70struct iwl_fw_runtime_ops {
69 int (*dump_start)(void *ctx); 71 int (*dump_start)(void *ctx);
70 void (*dump_end)(void *ctx); 72 void (*dump_end)(void *ctx);
73 bool (*fw_running)(void *ctx);
71}; 74};
72 75
73#define MAX_NUM_LMAC 2 76#define MAX_NUM_LMAC 2
@@ -150,6 +153,10 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
150 153
151void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt); 154void iwl_fw_runtime_exit(struct iwl_fw_runtime *fwrt);
152 155
156void iwl_fw_runtime_suspend(struct iwl_fw_runtime *fwrt);
157
158void iwl_fw_runtime_resume(struct iwl_fw_runtime *fwrt);
159
153static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt, 160static inline void iwl_fw_set_current_image(struct iwl_fw_runtime *fwrt,
154 enum iwl_ucode_type cur_fw_img) 161 enum iwl_ucode_type cur_fw_img)
155{ 162{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 0e6cf39285f4..2efe9b099556 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1098,6 +1098,8 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
1098 /* make sure the d0i3 exit work is not pending */ 1098 /* make sure the d0i3 exit work is not pending */
1099 flush_work(&mvm->d0i3_exit_work); 1099 flush_work(&mvm->d0i3_exit_work);
1100 1100
1101 iwl_fw_runtime_suspend(&mvm->fwrt);
1102
1101 ret = iwl_trans_suspend(trans); 1103 ret = iwl_trans_suspend(trans);
1102 if (ret) 1104 if (ret)
1103 return ret; 1105 return ret;
@@ -2012,6 +2014,8 @@ int iwl_mvm_resume(struct ieee80211_hw *hw)
2012 2014
2013 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2015 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2014 2016
2017 iwl_fw_runtime_resume(&mvm->fwrt);
2018
2015 return ret; 2019 return ret;
2016} 2020}
2017 2021
@@ -2038,6 +2042,8 @@ static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
2038 2042
2039 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; 2043 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
2040 2044
2045 iwl_fw_runtime_suspend(&mvm->fwrt);
2046
2041 /* start pseudo D3 */ 2047 /* start pseudo D3 */
2042 rtnl_lock(); 2048 rtnl_lock();
2043 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true); 2049 err = __iwl_mvm_suspend(mvm->hw, mvm->hw->wiphy->wowlan_config, true);
@@ -2098,6 +2104,8 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
2098 __iwl_mvm_resume(mvm, true); 2104 __iwl_mvm_resume(mvm, true);
2099 rtnl_unlock(); 2105 rtnl_unlock();
2100 2106
2107 iwl_fw_runtime_resume(&mvm->fwrt);
2108
2101 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; 2109 mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
2102 2110
2103 iwl_abort_notification_waits(&mvm->notif_wait); 2111 iwl_abort_notification_waits(&mvm->notif_wait);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index a7892c1254a2..9c436d8d001d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
38 * All rights reserved. 40 * All rights reserved.
39 * 41 *
40 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
@@ -1281,9 +1283,6 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm,
1281{ 1283{
1282 int ret; 1284 int ret;
1283 1285
1284 if (!iwl_mvm_firmware_running(mvm))
1285 return -EIO;
1286
1287 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE); 1286 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PRPH_WRITE);
1288 if (ret) 1287 if (ret)
1289 return ret; 1288 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 2f22e14e00fe..8ba16fc24e3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -438,7 +438,8 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
438 } 438 }
439 439
440 /* Allocate the CAB queue for softAP and GO interfaces */ 440 /* Allocate the CAB queue for softAP and GO interfaces */
441 if (vif->type == NL80211_IFTYPE_AP) { 441 if (vif->type == NL80211_IFTYPE_AP ||
442 vif->type == NL80211_IFTYPE_ADHOC) {
442 /* 443 /*
443 * For TVQM this will be overwritten later with the FW assigned 444 * For TVQM this will be overwritten later with the FW assigned
444 * queue value (when queue is enabled). 445 * queue value (when queue is enabled).
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 8aed40a8bc38..ebf511150f4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -2106,15 +2107,40 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
2106 if (ret) 2107 if (ret)
2107 goto out_remove; 2108 goto out_remove;
2108 2109
2109 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2110 /*
2110 if (ret) 2111 * This is not very nice, but the simplest:
2111 goto out_unbind; 2112 * For older FWs adding the mcast sta before the bcast station may
2112 2113 * cause assert 0x2b00.
2113 /* Send the bcast station. At this stage the TBTT and DTIM time events 2114 * This is fixed in later FW so make the order of removal depend on
2114 * are added and applied to the scheduler */ 2115 * the TLV
2115 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2116 */
2116 if (ret) 2117 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2117 goto out_rm_mcast; 2118 ret = iwl_mvm_add_mcast_sta(mvm, vif);
2119 if (ret)
2120 goto out_unbind;
2121 /*
2122 * Send the bcast station. At this stage the TBTT and DTIM time
2123 * events are added and applied to the scheduler
2124 */
2125 ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2126 if (ret) {
2127 iwl_mvm_rm_mcast_sta(mvm, vif);
2128 goto out_unbind;
2129 }
2130 } else {
2131 /*
2132 * Send the bcast station. At this stage the TBTT and DTIM time
2133 * events are added and applied to the scheduler
2134 */
2135 iwl_mvm_send_add_bcast_sta(mvm, vif);
2136 if (ret)
2137 goto out_unbind;
2138 iwl_mvm_add_mcast_sta(mvm, vif);
2139 if (ret) {
2140 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2141 goto out_unbind;
2142 }
2143 }
2118 2144
2119 /* must be set before quota calculations */ 2145 /* must be set before quota calculations */
2120 mvmvif->ap_ibss_active = true; 2146 mvmvif->ap_ibss_active = true;
@@ -2144,7 +2170,6 @@ out_quota_failed:
2144 iwl_mvm_power_update_mac(mvm); 2170 iwl_mvm_power_update_mac(mvm);
2145 mvmvif->ap_ibss_active = false; 2171 mvmvif->ap_ibss_active = false;
2146 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2172 iwl_mvm_send_rm_bcast_sta(mvm, vif);
2147out_rm_mcast:
2148 iwl_mvm_rm_mcast_sta(mvm, vif); 2173 iwl_mvm_rm_mcast_sta(mvm, vif);
2149out_unbind: 2174out_unbind:
2150 iwl_mvm_binding_remove_vif(mvm, vif); 2175 iwl_mvm_binding_remove_vif(mvm, vif);
@@ -2682,6 +2707,10 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
2682 2707
2683 /* enable beacon filtering */ 2708 /* enable beacon filtering */
2684 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2709 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
2710
2711 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
2712 false);
2713
2685 ret = 0; 2714 ret = 0;
2686 } else if (old_state == IEEE80211_STA_AUTHORIZED && 2715 } else if (old_state == IEEE80211_STA_AUTHORIZED &&
2687 new_state == IEEE80211_STA_ASSOC) { 2716 new_state == IEEE80211_STA_ASSOC) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 2d28e0804218..89ff02d7c876 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -90,6 +90,7 @@
90#include "fw/runtime.h" 90#include "fw/runtime.h"
91#include "fw/dbg.h" 91#include "fw/dbg.h"
92#include "fw/acpi.h" 92#include "fw/acpi.h"
93#include "fw/debugfs.h"
93 94
94#define IWL_MVM_MAX_ADDRESSES 5 95#define IWL_MVM_MAX_ADDRESSES 5
95/* RSSI offset for WkP */ 96/* RSSI offset for WkP */
@@ -1783,6 +1784,7 @@ static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm)
1783 1784
1784static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) 1785static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm)
1785{ 1786{
1787 iwl_fw_cancel_timestamp(&mvm->fwrt);
1786 iwl_free_fw_paging(&mvm->fwrt); 1788 iwl_free_fw_paging(&mvm->fwrt);
1787 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1789 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
1788 iwl_fw_dump_conf_clear(&mvm->fwrt); 1790 iwl_fw_dump_conf_clear(&mvm->fwrt);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 5d525a0023dc..ab7fb5aad984 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -8,6 +8,7 @@
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
11 * 12 *
12 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as 14 * it under the terms of version 2 of the GNU General Public License as
@@ -35,6 +36,7 @@
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 36 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 38 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
39 * Copyright(c) 2018 Intel Corporation
38 * All rights reserved. 40 * All rights reserved.
39 * 41 *
40 * Redistribution and use in source and binary forms, with or without 42 * Redistribution and use in source and binary forms, with or without
@@ -552,9 +554,15 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
552 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT); 554 iwl_mvm_unref(mvm, IWL_MVM_REF_FW_DBG_COLLECT);
553} 555}
554 556
557static bool iwl_mvm_fwrt_fw_running(void *ctx)
558{
559 return iwl_mvm_firmware_running(ctx);
560}
561
555static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { 562static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
556 .dump_start = iwl_mvm_fwrt_dump_start, 563 .dump_start = iwl_mvm_fwrt_dump_start,
557 .dump_end = iwl_mvm_fwrt_dump_end, 564 .dump_end = iwl_mvm_fwrt_dump_end,
565 .fw_running = iwl_mvm_fwrt_fw_running,
558}; 566};
559 567
560static struct iwl_op_mode * 568static struct iwl_op_mode *
@@ -802,7 +810,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
802 iwl_mvm_leds_exit(mvm); 810 iwl_mvm_leds_exit(mvm);
803 iwl_mvm_thermal_exit(mvm); 811 iwl_mvm_thermal_exit(mvm);
804 out_free: 812 out_free:
805 iwl_fw_runtime_exit(&mvm->fwrt);
806 iwl_fw_flush_dump(&mvm->fwrt); 813 iwl_fw_flush_dump(&mvm->fwrt);
807 814
808 if (iwlmvm_mod_params.init_dbg) 815 if (iwlmvm_mod_params.init_dbg)
@@ -843,7 +850,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
843#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS) 850#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_IWLWIFI_DEBUGFS)
844 kfree(mvm->d3_resume_sram); 851 kfree(mvm->d3_resume_sram);
845#endif 852#endif
846 iwl_fw_runtime_exit(&mvm->fwrt);
847 iwl_trans_op_mode_leave(mvm->trans); 853 iwl_trans_op_mode_leave(mvm->trans);
848 854
849 iwl_phy_db_free(mvm->phy_db); 855 iwl_phy_db_free(mvm->phy_db);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index 60abb0084ee5..47f4c7a1d80d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -2684,7 +2684,8 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2684 struct ieee80211_sta *sta, 2684 struct ieee80211_sta *sta,
2685 struct iwl_lq_sta *lq_sta, 2685 struct iwl_lq_sta *lq_sta,
2686 enum nl80211_band band, 2686 enum nl80211_band band,
2687 struct rs_rate *rate) 2687 struct rs_rate *rate,
2688 bool init)
2688{ 2689{
2689 int i, nentries; 2690 int i, nentries;
2690 unsigned long active_rate; 2691 unsigned long active_rate;
@@ -2738,14 +2739,25 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2738 */ 2739 */
2739 if (sta->vht_cap.vht_supported && 2740 if (sta->vht_cap.vht_supported &&
2740 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { 2741 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
2741 switch (sta->bandwidth) { 2742 /*
2742 case IEEE80211_STA_RX_BW_160: 2743 * In AP mode, when a new station associates, rs is initialized
2743 case IEEE80211_STA_RX_BW_80: 2744 * immediately upon association completion, before the phy
2744 case IEEE80211_STA_RX_BW_40: 2745 * context is updated with the association parameters, so the
2746 * sta bandwidth might be wider than the phy context allows.
2747 * To avoid this issue, always initialize rs with 20mhz
2748 * bandwidth rate, and after authorization, when the phy context
2749 * is already up-to-date, re-init rs with the correct bw.
2750 */
2751 u32 bw = init ? RATE_MCS_CHAN_WIDTH_20 : rs_bw_from_sta_bw(sta);
2752
2753 switch (bw) {
2754 case RATE_MCS_CHAN_WIDTH_40:
2755 case RATE_MCS_CHAN_WIDTH_80:
2756 case RATE_MCS_CHAN_WIDTH_160:
2745 initial_rates = rs_optimal_rates_vht; 2757 initial_rates = rs_optimal_rates_vht;
2746 nentries = ARRAY_SIZE(rs_optimal_rates_vht); 2758 nentries = ARRAY_SIZE(rs_optimal_rates_vht);
2747 break; 2759 break;
2748 case IEEE80211_STA_RX_BW_20: 2760 case RATE_MCS_CHAN_WIDTH_20:
2749 initial_rates = rs_optimal_rates_vht_20mhz; 2761 initial_rates = rs_optimal_rates_vht_20mhz;
2750 nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz); 2762 nentries = ARRAY_SIZE(rs_optimal_rates_vht_20mhz);
2751 break; 2763 break;
@@ -2756,7 +2768,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
2756 2768
2757 active_rate = lq_sta->active_siso_rate; 2769 active_rate = lq_sta->active_siso_rate;
2758 rate->type = LQ_VHT_SISO; 2770 rate->type = LQ_VHT_SISO;
2759 rate->bw = rs_bw_from_sta_bw(sta); 2771 rate->bw = bw;
2760 } else if (sta->ht_cap.ht_supported && 2772 } else if (sta->ht_cap.ht_supported &&
2761 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) { 2773 best_rssi > IWL_RS_LOW_RSSI_THRESHOLD) {
2762 initial_rates = rs_optimal_rates_ht; 2774 initial_rates = rs_optimal_rates_ht;
@@ -2839,7 +2851,7 @@ static void rs_initialize_lq(struct iwl_mvm *mvm,
2839 tbl = &(lq_sta->lq_info[active_tbl]); 2851 tbl = &(lq_sta->lq_info[active_tbl]);
2840 rate = &tbl->rate; 2852 rate = &tbl->rate;
2841 2853
2842 rs_get_initial_rate(mvm, sta, lq_sta, band, rate); 2854 rs_get_initial_rate(mvm, sta, lq_sta, band, rate, init);
2843 rs_init_optimal_rate(mvm, sta, lq_sta); 2855 rs_init_optimal_rate(mvm, sta, lq_sta);
2844 2856
2845 WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B, 2857 WARN_ONCE(rate->ant != ANT_A && rate->ant != ANT_B,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index a3f7c1bf3cc8..580de5851fc7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -71,6 +71,7 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
71 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 71 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
72 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb); 72 struct ieee80211_rx_status *stats = IEEE80211_SKB_RXCB(skb);
73 struct iwl_mvm_key_pn *ptk_pn; 73 struct iwl_mvm_key_pn *ptk_pn;
74 int res;
74 u8 tid, keyidx; 75 u8 tid, keyidx;
75 u8 pn[IEEE80211_CCMP_PN_LEN]; 76 u8 pn[IEEE80211_CCMP_PN_LEN];
76 u8 *extiv; 77 u8 *extiv;
@@ -127,12 +128,13 @@ static inline int iwl_mvm_check_pn(struct iwl_mvm *mvm, struct sk_buff *skb,
127 pn[4] = extiv[1]; 128 pn[4] = extiv[1];
128 pn[5] = extiv[0]; 129 pn[5] = extiv[0];
129 130
130 if (memcmp(pn, ptk_pn->q[queue].pn[tid], 131 res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN);
131 IEEE80211_CCMP_PN_LEN) <= 0) 132 if (res < 0)
133 return -1;
134 if (!res && !(stats->flag & RX_FLAG_ALLOW_SAME_PN))
132 return -1; 135 return -1;
133 136
134 if (!(stats->flag & RX_FLAG_AMSDU_MORE)) 137 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
135 memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN);
136 stats->flag |= RX_FLAG_PN_VALIDATED; 138 stats->flag |= RX_FLAG_PN_VALIDATED;
137 139
138 return 0; 140 return 0;
@@ -314,28 +316,21 @@ static void iwl_mvm_rx_csum(struct ieee80211_sta *sta,
314} 316}
315 317
316/* 318/*
317 * returns true if a packet outside BA session is a duplicate and 319 * returns true if a packet is a duplicate and should be dropped.
318 * should be dropped 320 * Updates AMSDU PN tracking info
319 */ 321 */
320static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue, 322static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
321 struct ieee80211_rx_status *rx_status, 323 struct ieee80211_rx_status *rx_status,
322 struct ieee80211_hdr *hdr, 324 struct ieee80211_hdr *hdr,
323 struct iwl_rx_mpdu_desc *desc) 325 struct iwl_rx_mpdu_desc *desc)
324{ 326{
325 struct iwl_mvm_sta *mvm_sta; 327 struct iwl_mvm_sta *mvm_sta;
326 struct iwl_mvm_rxq_dup_data *dup_data; 328 struct iwl_mvm_rxq_dup_data *dup_data;
327 u8 baid, tid, sub_frame_idx; 329 u8 tid, sub_frame_idx;
328 330
329 if (WARN_ON(IS_ERR_OR_NULL(sta))) 331 if (WARN_ON(IS_ERR_OR_NULL(sta)))
330 return false; 332 return false;
331 333
332 baid = (le32_to_cpu(desc->reorder_data) &
333 IWL_RX_MPDU_REORDER_BAID_MASK) >>
334 IWL_RX_MPDU_REORDER_BAID_SHIFT;
335
336 if (baid != IWL_RX_REORDER_DATA_INVALID_BAID)
337 return false;
338
339 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 334 mvm_sta = iwl_mvm_sta_from_mac80211(sta);
340 dup_data = &mvm_sta->dup_data[queue]; 335 dup_data = &mvm_sta->dup_data[queue];
341 336
@@ -365,6 +360,12 @@ static bool iwl_mvm_is_nonagg_dup(struct ieee80211_sta *sta, int queue,
365 dup_data->last_sub_frame[tid] >= sub_frame_idx)) 360 dup_data->last_sub_frame[tid] >= sub_frame_idx))
366 return true; 361 return true;
367 362
363 /* Allow same PN as the first subframe for following sub frames */
364 if (dup_data->last_seq[tid] == hdr->seq_ctrl &&
365 sub_frame_idx > dup_data->last_sub_frame[tid] &&
366 desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU)
367 rx_status->flag |= RX_FLAG_ALLOW_SAME_PN;
368
368 dup_data->last_seq[tid] = hdr->seq_ctrl; 369 dup_data->last_seq[tid] = hdr->seq_ctrl;
369 dup_data->last_sub_frame[tid] = sub_frame_idx; 370 dup_data->last_sub_frame[tid] = sub_frame_idx;
370 371
@@ -971,7 +972,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
971 if (ieee80211_is_data(hdr->frame_control)) 972 if (ieee80211_is_data(hdr->frame_control))
972 iwl_mvm_rx_csum(sta, skb, desc); 973 iwl_mvm_rx_csum(sta, skb, desc);
973 974
974 if (iwl_mvm_is_nonagg_dup(sta, queue, rx_status, hdr, desc)) { 975 if (iwl_mvm_is_dup(sta, queue, rx_status, hdr, desc)) {
975 kfree_skb(skb); 976 kfree_skb(skb);
976 goto out; 977 goto out;
977 } 978 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 6b2674e02606..630e23cb0ffb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -2039,7 +2039,7 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2039 struct iwl_trans_txq_scd_cfg cfg = { 2039 struct iwl_trans_txq_scd_cfg cfg = {
2040 .fifo = IWL_MVM_TX_FIFO_MCAST, 2040 .fifo = IWL_MVM_TX_FIFO_MCAST,
2041 .sta_id = msta->sta_id, 2041 .sta_id = msta->sta_id,
2042 .tid = IWL_MAX_TID_COUNT, 2042 .tid = 0,
2043 .aggregate = false, 2043 .aggregate = false,
2044 .frame_limit = IWL_FRAME_LIMIT, 2044 .frame_limit = IWL_FRAME_LIMIT,
2045 }; 2045 };
@@ -2053,6 +2053,17 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2053 return -ENOTSUPP; 2053 return -ENOTSUPP;
2054 2054
2055 /* 2055 /*
2056 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2057 * invalid, so make sure we use the queue we want.
2058 * Note that this is done here as we want to avoid making DQA
2059 * changes in mac80211 layer.
2060 */
2061 if (vif->type == NL80211_IFTYPE_ADHOC) {
2062 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2063 mvmvif->cab_queue = vif->cab_queue;
2064 }
2065
2066 /*
2056 * While in previous FWs we had to exclude cab queue from TFD queue 2067 * While in previous FWs we had to exclude cab queue from TFD queue
2057 * mask, now it is needed as any other queue. 2068 * mask, now it is needed as any other queue.
2058 */ 2069 */
@@ -2079,24 +2090,13 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2079 if (iwl_mvm_has_new_tx_api(mvm)) { 2090 if (iwl_mvm_has_new_tx_api(mvm)) {
2080 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue, 2091 int queue = iwl_mvm_tvqm_enable_txq(mvm, vif->cab_queue,
2081 msta->sta_id, 2092 msta->sta_id,
2082 IWL_MAX_TID_COUNT, 2093 0,
2083 timeout); 2094 timeout);
2084 mvmvif->cab_queue = queue; 2095 mvmvif->cab_queue = queue;
2085 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2096 } else if (!fw_has_api(&mvm->fw->ucode_capa,
2086 IWL_UCODE_TLV_API_STA_TYPE)) { 2097 IWL_UCODE_TLV_API_STA_TYPE))
2087 /*
2088 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2089 * invalid, so make sure we use the queue we want.
2090 * Note that this is done here as we want to avoid making DQA
2091 * changes in mac80211 layer.
2092 */
2093 if (vif->type == NL80211_IFTYPE_ADHOC) {
2094 vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2095 mvmvif->cab_queue = vif->cab_queue;
2096 }
2097 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, 2098 iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0,
2098 &cfg, timeout); 2099 &cfg, timeout);
2099 }
2100 2100
2101 return 0; 2101 return 0;
2102} 2102}
@@ -2115,7 +2115,7 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2115 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); 2115 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0);
2116 2116
2117 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue, 2117 iwl_mvm_disable_txq(mvm, mvmvif->cab_queue, vif->cab_queue,
2118 IWL_MAX_TID_COUNT, 0); 2118 0, 0);
2119 2119
2120 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); 2120 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2121 if (ret) 2121 if (ret)
@@ -3170,8 +3170,9 @@ static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
3170 int ret, size; 3170 int ret, size;
3171 u32 status; 3171 u32 status;
3172 3172
3173 /* This is a valid situation for GTK removal */
3173 if (sta_id == IWL_MVM_INVALID_STA) 3174 if (sta_id == IWL_MVM_INVALID_STA)
3174 return -EINVAL; 3175 return 0;
3175 3176
3176 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 3177 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
3177 STA_KEY_FLG_KEYID_MSK); 3178 STA_KEY_FLG_KEYID_MSK);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 200ab50ec86b..acb217e666db 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -616,7 +616,7 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
616 time_cmd.repeat = 1; 616 time_cmd.repeat = 1;
617 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 617 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
618 TE_V2_NOTIF_HOST_EVENT_END | 618 TE_V2_NOTIF_HOST_EVENT_END |
619 T2_V2_START_IMMEDIATELY); 619 TE_V2_START_IMMEDIATELY);
620 620
621 if (!wait_for_notif) { 621 if (!wait_for_notif) {
622 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 622 iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
@@ -803,7 +803,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
803 time_cmd.repeat = 1; 803 time_cmd.repeat = 1;
804 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 804 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
805 TE_V2_NOTIF_HOST_EVENT_END | 805 TE_V2_NOTIF_HOST_EVENT_END |
806 T2_V2_START_IMMEDIATELY); 806 TE_V2_START_IMMEDIATELY);
807 807
808 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 808 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
809} 809}
@@ -913,6 +913,8 @@ int iwl_mvm_schedule_csa_period(struct iwl_mvm *mvm,
913 time_cmd.interval = cpu_to_le32(1); 913 time_cmd.interval = cpu_to_le32(1);
914 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START | 914 time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
915 TE_V2_ABSENCE); 915 TE_V2_ABSENCE);
916 if (!apply_time)
917 time_cmd.policy |= cpu_to_le16(TE_V2_START_IMMEDIATELY);
916 918
917 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd); 919 return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
918} 920}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index dda77b327c98..af6dfceab6b8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -419,11 +419,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
419{ 419{
420 struct ieee80211_key_conf *keyconf = info->control.hw_key; 420 struct ieee80211_key_conf *keyconf = info->control.hw_key;
421 u8 *crypto_hdr = skb_frag->data + hdrlen; 421 u8 *crypto_hdr = skb_frag->data + hdrlen;
422 enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
422 u64 pn; 423 u64 pn;
423 424
424 switch (keyconf->cipher) { 425 switch (keyconf->cipher) {
425 case WLAN_CIPHER_SUITE_CCMP: 426 case WLAN_CIPHER_SUITE_CCMP:
426 case WLAN_CIPHER_SUITE_CCMP_256:
427 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd); 427 iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
428 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); 428 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
429 break; 429 break;
@@ -447,13 +447,16 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
447 break; 447 break;
448 case WLAN_CIPHER_SUITE_GCMP: 448 case WLAN_CIPHER_SUITE_GCMP:
449 case WLAN_CIPHER_SUITE_GCMP_256: 449 case WLAN_CIPHER_SUITE_GCMP_256:
450 type = TX_CMD_SEC_GCMP;
451 /* Fall through */
452 case WLAN_CIPHER_SUITE_CCMP_256:
450 /* TODO: Taking the key from the table might introduce a race 453 /* TODO: Taking the key from the table might introduce a race
451 * when PTK rekeying is done, having an old packets with a PN 454 * when PTK rekeying is done, having an old packets with a PN
452 * based on the old key but the message encrypted with a new 455 * based on the old key but the message encrypted with a new
453 * one. 456 * one.
454 * Need to handle this. 457 * Need to handle this.
455 */ 458 */
456 tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TX_CMD_SEC_KEY_FROM_TABLE; 459 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
457 tx_cmd->key[0] = keyconf->hw_key_idx; 460 tx_cmd->key[0] = keyconf->hw_key_idx;
458 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr); 461 iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
459 break; 462 break;
@@ -645,7 +648,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
645 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || 648 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
646 info.control.vif->type == NL80211_IFTYPE_AP || 649 info.control.vif->type == NL80211_IFTYPE_AP ||
647 info.control.vif->type == NL80211_IFTYPE_ADHOC) { 650 info.control.vif->type == NL80211_IFTYPE_ADHOC) {
648 sta_id = mvmvif->bcast_sta.sta_id; 651 if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
652 sta_id = mvmvif->bcast_sta.sta_id;
653 else
654 sta_id = mvmvif->mcast_sta.sta_id;
655
649 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, 656 queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
650 hdr->frame_control); 657 hdr->frame_control);
651 if (queue < 0) 658 if (queue < 0)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 6d0a907d5ba5..fabae0f60683 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -147,7 +147,7 @@ static void iwl_pcie_gen2_tfd_unmap(struct iwl_trans *trans,
147 /* Sanity check on number of chunks */ 147 /* Sanity check on number of chunks */
148 num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd); 148 num_tbs = iwl_pcie_gen2_get_num_tbs(trans, tfd);
149 149
150 if (num_tbs >= trans_pcie->max_tbs) { 150 if (num_tbs > trans_pcie->max_tbs) {
151 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 151 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
152 return; 152 return;
153 } 153 }
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 3f85713c41dc..1a566287993d 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -378,7 +378,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
378 /* Sanity check on number of chunks */ 378 /* Sanity check on number of chunks */
379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd); 379 num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
380 380
381 if (num_tbs >= trans_pcie->max_tbs) { 381 if (num_tbs > trans_pcie->max_tbs) {
382 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 382 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
383 /* @todo issue fatal error, it is quite serious situation */ 383 /* @todo issue fatal error, it is quite serious situation */
384 return; 384 return;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6e0af815f25e..35b21f8152bb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2727,6 +2727,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
2727 mutex_init(&data->mutex); 2727 mutex_init(&data->mutex);
2728 2728
2729 data->netgroup = hwsim_net_get_netgroup(net); 2729 data->netgroup = hwsim_net_get_netgroup(net);
2730 data->wmediumd = hwsim_net_get_wmediumd(net);
2730 2731
2731 /* Enable frame retransmissions for lossy channels */ 2732 /* Enable frame retransmissions for lossy channels */
2732 hw->max_rates = 4; 2733 hw->max_rates = 4;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
index f9ccd13c79f9..e7bbbc95cdb1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/hw.c
@@ -1125,7 +1125,8 @@ static void _rtl8723be_enable_aspm_back_door(struct ieee80211_hw *hw)
1125 1125
1126 /* Configuration Space offset 0x70f BIT7 is used to control L0S */ 1126 /* Configuration Space offset 0x70f BIT7 is used to control L0S */
1127 tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f); 1127 tmp8 = _rtl8723be_dbi_read(rtlpriv, 0x70f);
1128 _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7)); 1128 _rtl8723be_dbi_write(rtlpriv, 0x70f, tmp8 | BIT(7) |
1129 ASPM_L1_LATENCY << 3);
1129 1130
1130 /* Configuration Space offset 0x719 Bit3 is for L1 1131 /* Configuration Space offset 0x719 Bit3 is for L1
1131 * BIT4 is for clock request 1132 * BIT4 is for clock request
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 345acca576b3..1bd7b3734751 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -278,8 +278,6 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
278 disk->queue = q; 278 disk->queue = q;
279 disk->flags = GENHD_FL_EXT_DEVT; 279 disk->flags = GENHD_FL_EXT_DEVT;
280 nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name); 280 nvdimm_namespace_disk_name(&nsblk->common, disk->disk_name);
281 set_capacity(disk, 0);
282 device_add_disk(dev, disk);
283 281
284 if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk)) 282 if (devm_add_action_or_reset(dev, nd_blk_release_disk, disk))
285 return -ENOMEM; 283 return -ENOMEM;
@@ -292,6 +290,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk)
292 } 290 }
293 291
294 set_capacity(disk, available_disk_size >> SECTOR_SHIFT); 292 set_capacity(disk, available_disk_size >> SECTOR_SHIFT);
293 device_add_disk(dev, disk);
295 revalidate_disk(disk); 294 revalidate_disk(disk);
296 return 0; 295 return 0;
297} 296}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 2ef544f10ec8..4b95ac513de2 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1545,8 +1545,6 @@ static int btt_blk_init(struct btt *btt)
1545 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); 1545 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue);
1546 btt->btt_queue->queuedata = btt; 1546 btt->btt_queue->queuedata = btt;
1547 1547
1548 set_capacity(btt->btt_disk, 0);
1549 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1550 if (btt_meta_size(btt)) { 1548 if (btt_meta_size(btt)) {
1551 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt)); 1549 int rc = nd_integrity_init(btt->btt_disk, btt_meta_size(btt));
1552 1550
@@ -1558,6 +1556,7 @@ static int btt_blk_init(struct btt *btt)
1558 } 1556 }
1559 } 1557 }
1560 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9); 1558 set_capacity(btt->btt_disk, btt->nlba * btt->sector_size >> 9);
1559 device_add_disk(&btt->nd_btt->dev, btt->btt_disk);
1561 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size; 1560 btt->nd_btt->size = btt->nlba * (u64)btt->sector_size;
1562 revalidate_disk(btt->btt_disk); 1561 revalidate_disk(btt->btt_disk);
1563 1562
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index f5c4e8c6e29d..2f4d18752c97 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -304,7 +304,7 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = {
304struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, 304struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
305 struct nd_namespace_common *ndns) 305 struct nd_namespace_common *ndns)
306{ 306{
307 struct device *dev = &nd_pfn->dev; 307 struct device *dev;
308 308
309 if (!nd_pfn) 309 if (!nd_pfn)
310 return NULL; 310 return NULL;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index e6d01911e092..1593e1806b16 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -532,11 +532,13 @@ static ssize_t persistence_domain_show(struct device *dev,
532 struct device_attribute *attr, char *buf) 532 struct device_attribute *attr, char *buf)
533{ 533{
534 struct nd_region *nd_region = to_nd_region(dev); 534 struct nd_region *nd_region = to_nd_region(dev);
535 unsigned long flags = nd_region->flags;
536 535
537 return sprintf(buf, "%s%s\n", 536 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
538 flags & BIT(ND_REGION_PERSIST_CACHE) ? "cpu_cache " : "", 537 return sprintf(buf, "cpu_cache\n");
539 flags & BIT(ND_REGION_PERSIST_MEMCTRL) ? "memory_controller " : ""); 538 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
539 return sprintf(buf, "memory_controller\n");
540 else
541 return sprintf(buf, "\n");
540} 542}
541static DEVICE_ATTR_RO(persistence_domain); 543static DEVICE_ATTR_RO(persistence_domain);
542 544
@@ -593,6 +595,13 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
593 return 0; 595 return 0;
594 } 596 }
595 597
598 if (a == &dev_attr_persistence_domain.attr) {
599 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
600 | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
601 return 0;
602 return a->mode;
603 }
604
596 if (a != &dev_attr_set_cookie.attr 605 if (a != &dev_attr_set_cookie.attr
597 && a != &dev_attr_available_size.attr) 606 && a != &dev_attr_available_size.attr)
598 return a->mode; 607 return a->mode;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 8b14bd326d4a..46d47bd6ca1f 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3908,6 +3908,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9230,
3908 quirk_dma_func1_alias); 3908 quirk_dma_func1_alias);
3909DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642, 3909DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0642,
3910 quirk_dma_func1_alias); 3910 quirk_dma_func1_alias);
3911DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TTI, 0x0645,
3912 quirk_dma_func1_alias);
3911/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */ 3913/* https://bugs.gentoo.org/show_bug.cgi?id=497630 */
3912DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON, 3914DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JMICRON,
3913 PCI_DEVICE_ID_JMICRON_JMB388_ESD, 3915 PCI_DEVICE_ID_JMICRON_JMB388_ESD,
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs.c b/drivers/phy/qualcomm/phy-qcom-ufs.c
index c5ff4525edef..c5493ea51282 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs.c
+++ b/drivers/phy/qualcomm/phy-qcom-ufs.c
@@ -675,3 +675,8 @@ int ufs_qcom_phy_power_off(struct phy *generic_phy)
675 return 0; 675 return 0;
676} 676}
677EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off); 677EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
678
679MODULE_AUTHOR("Yaniv Gardi <ygardi@codeaurora.org>");
680MODULE_AUTHOR("Vivek Gautam <vivek.gautam@codeaurora.org>");
681MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY");
682MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
index c32399faff57..90c274490181 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
@@ -124,7 +124,7 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
124 EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c), 124 EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c),
125}; 125};
126 126
127const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = { 127static const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
128 { 128 {
129 /* pin-controller instance 0 data */ 129 /* pin-controller instance 0 data */
130 .pin_banks = s5pv210_pin_bank, 130 .pin_banks = s5pv210_pin_bank,
@@ -137,6 +137,11 @@ const struct samsung_pin_ctrl s5pv210_pin_ctrl[] __initconst = {
137 }, 137 },
138}; 138};
139 139
140const struct samsung_pinctrl_of_match_data s5pv210_of_data __initconst = {
141 .ctrl = s5pv210_pin_ctrl,
142 .num_ctrl = ARRAY_SIZE(s5pv210_pin_ctrl),
143};
144
140/* Pad retention control code for accessing PMU regmap */ 145/* Pad retention control code for accessing PMU regmap */
141static atomic_t exynos_shared_retention_refcnt; 146static atomic_t exynos_shared_retention_refcnt;
142 147
@@ -199,7 +204,7 @@ static const struct samsung_retention_data exynos3250_retention_data __initconst
199 * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes 204 * Samsung pinctrl driver data for Exynos3250 SoC. Exynos3250 SoC includes
200 * two gpio/pin-mux/pinconfig controllers. 205 * two gpio/pin-mux/pinconfig controllers.
201 */ 206 */
202const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = { 207static const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
203 { 208 {
204 /* pin-controller instance 0 data */ 209 /* pin-controller instance 0 data */
205 .pin_banks = exynos3250_pin_banks0, 210 .pin_banks = exynos3250_pin_banks0,
@@ -220,6 +225,11 @@ const struct samsung_pin_ctrl exynos3250_pin_ctrl[] __initconst = {
220 }, 225 },
221}; 226};
222 227
228const struct samsung_pinctrl_of_match_data exynos3250_of_data __initconst = {
229 .ctrl = exynos3250_pin_ctrl,
230 .num_ctrl = ARRAY_SIZE(exynos3250_pin_ctrl),
231};
232
223/* pin banks of exynos4210 pin-controller 0 */ 233/* pin banks of exynos4210 pin-controller 0 */
224static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = { 234static const struct samsung_pin_bank_data exynos4210_pin_banks0[] __initconst = {
225 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 235 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -303,7 +313,7 @@ static const struct samsung_retention_data exynos4_audio_retention_data __initco
303 * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes 313 * Samsung pinctrl driver data for Exynos4210 SoC. Exynos4210 SoC includes
304 * three gpio/pin-mux/pinconfig controllers. 314 * three gpio/pin-mux/pinconfig controllers.
305 */ 315 */
306const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = { 316static const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
307 { 317 {
308 /* pin-controller instance 0 data */ 318 /* pin-controller instance 0 data */
309 .pin_banks = exynos4210_pin_banks0, 319 .pin_banks = exynos4210_pin_banks0,
@@ -329,6 +339,11 @@ const struct samsung_pin_ctrl exynos4210_pin_ctrl[] __initconst = {
329 }, 339 },
330}; 340};
331 341
342const struct samsung_pinctrl_of_match_data exynos4210_of_data __initconst = {
343 .ctrl = exynos4210_pin_ctrl,
344 .num_ctrl = ARRAY_SIZE(exynos4210_pin_ctrl),
345};
346
332/* pin banks of exynos4x12 pin-controller 0 */ 347/* pin banks of exynos4x12 pin-controller 0 */
333static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = { 348static const struct samsung_pin_bank_data exynos4x12_pin_banks0[] __initconst = {
334 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 349 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -391,7 +406,7 @@ static const struct samsung_pin_bank_data exynos4x12_pin_banks3[] __initconst =
391 * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes 406 * Samsung pinctrl driver data for Exynos4x12 SoC. Exynos4x12 SoC includes
392 * four gpio/pin-mux/pinconfig controllers. 407 * four gpio/pin-mux/pinconfig controllers.
393 */ 408 */
394const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = { 409static const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
395 { 410 {
396 /* pin-controller instance 0 data */ 411 /* pin-controller instance 0 data */
397 .pin_banks = exynos4x12_pin_banks0, 412 .pin_banks = exynos4x12_pin_banks0,
@@ -427,6 +442,11 @@ const struct samsung_pin_ctrl exynos4x12_pin_ctrl[] __initconst = {
427 }, 442 },
428}; 443};
429 444
445const struct samsung_pinctrl_of_match_data exynos4x12_of_data __initconst = {
446 .ctrl = exynos4x12_pin_ctrl,
447 .num_ctrl = ARRAY_SIZE(exynos4x12_pin_ctrl),
448};
449
430/* pin banks of exynos5250 pin-controller 0 */ 450/* pin banks of exynos5250 pin-controller 0 */
431static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = { 451static const struct samsung_pin_bank_data exynos5250_pin_banks0[] __initconst = {
432 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 452 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -487,7 +507,7 @@ static const struct samsung_pin_bank_data exynos5250_pin_banks3[] __initconst =
487 * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes 507 * Samsung pinctrl driver data for Exynos5250 SoC. Exynos5250 SoC includes
488 * four gpio/pin-mux/pinconfig controllers. 508 * four gpio/pin-mux/pinconfig controllers.
489 */ 509 */
490const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = { 510static const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
491 { 511 {
492 /* pin-controller instance 0 data */ 512 /* pin-controller instance 0 data */
493 .pin_banks = exynos5250_pin_banks0, 513 .pin_banks = exynos5250_pin_banks0,
@@ -523,6 +543,11 @@ const struct samsung_pin_ctrl exynos5250_pin_ctrl[] __initconst = {
523 }, 543 },
524}; 544};
525 545
546const struct samsung_pinctrl_of_match_data exynos5250_of_data __initconst = {
547 .ctrl = exynos5250_pin_ctrl,
548 .num_ctrl = ARRAY_SIZE(exynos5250_pin_ctrl),
549};
550
526/* pin banks of exynos5260 pin-controller 0 */ 551/* pin banks of exynos5260 pin-controller 0 */
527static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = { 552static const struct samsung_pin_bank_data exynos5260_pin_banks0[] __initconst = {
528 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00), 553 EXYNOS_PIN_BANK_EINTG(4, 0x000, "gpa0", 0x00),
@@ -567,7 +592,7 @@ static const struct samsung_pin_bank_data exynos5260_pin_banks2[] __initconst =
567 * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes 592 * Samsung pinctrl driver data for Exynos5260 SoC. Exynos5260 SoC includes
568 * three gpio/pin-mux/pinconfig controllers. 593 * three gpio/pin-mux/pinconfig controllers.
569 */ 594 */
570const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = { 595static const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {
571 { 596 {
572 /* pin-controller instance 0 data */ 597 /* pin-controller instance 0 data */
573 .pin_banks = exynos5260_pin_banks0, 598 .pin_banks = exynos5260_pin_banks0,
@@ -587,6 +612,11 @@ const struct samsung_pin_ctrl exynos5260_pin_ctrl[] __initconst = {
587 }, 612 },
588}; 613};
589 614
615const struct samsung_pinctrl_of_match_data exynos5260_of_data __initconst = {
616 .ctrl = exynos5260_pin_ctrl,
617 .num_ctrl = ARRAY_SIZE(exynos5260_pin_ctrl),
618};
619
590/* pin banks of exynos5410 pin-controller 0 */ 620/* pin banks of exynos5410 pin-controller 0 */
591static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = { 621static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst = {
592 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00), 622 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
@@ -657,7 +687,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks3[] __initconst =
657 * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes 687 * Samsung pinctrl driver data for Exynos5410 SoC. Exynos5410 SoC includes
658 * four gpio/pin-mux/pinconfig controllers. 688 * four gpio/pin-mux/pinconfig controllers.
659 */ 689 */
660const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = { 690static const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = {
661 { 691 {
662 /* pin-controller instance 0 data */ 692 /* pin-controller instance 0 data */
663 .pin_banks = exynos5410_pin_banks0, 693 .pin_banks = exynos5410_pin_banks0,
@@ -690,6 +720,11 @@ const struct samsung_pin_ctrl exynos5410_pin_ctrl[] __initconst = {
690 }, 720 },
691}; 721};
692 722
723const struct samsung_pinctrl_of_match_data exynos5410_of_data __initconst = {
724 .ctrl = exynos5410_pin_ctrl,
725 .num_ctrl = ARRAY_SIZE(exynos5410_pin_ctrl),
726};
727
693/* pin banks of exynos5420 pin-controller 0 */ 728/* pin banks of exynos5420 pin-controller 0 */
694static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = { 729static const struct samsung_pin_bank_data exynos5420_pin_banks0[] __initconst = {
695 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00), 730 EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpy7", 0x00),
@@ -774,7 +809,7 @@ static const struct samsung_retention_data exynos5420_retention_data __initconst
774 * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes 809 * Samsung pinctrl driver data for Exynos5420 SoC. Exynos5420 SoC includes
775 * four gpio/pin-mux/pinconfig controllers. 810 * four gpio/pin-mux/pinconfig controllers.
776 */ 811 */
777const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = { 812static const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
778 { 813 {
779 /* pin-controller instance 0 data */ 814 /* pin-controller instance 0 data */
780 .pin_banks = exynos5420_pin_banks0, 815 .pin_banks = exynos5420_pin_banks0,
@@ -808,3 +843,8 @@ const struct samsung_pin_ctrl exynos5420_pin_ctrl[] __initconst = {
808 .retention_data = &exynos4_audio_retention_data, 843 .retention_data = &exynos4_audio_retention_data,
809 }, 844 },
810}; 845};
846
847const struct samsung_pinctrl_of_match_data exynos5420_of_data __initconst = {
848 .ctrl = exynos5420_pin_ctrl,
849 .num_ctrl = ARRAY_SIZE(exynos5420_pin_ctrl),
850};
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index fc8f7833bec0..71c9d1d9f345 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -175,7 +175,7 @@ static const struct samsung_retention_data exynos5433_fsys_retention_data __init
175 * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes 175 * Samsung pinctrl driver data for Exynos5433 SoC. Exynos5433 SoC includes
176 * ten gpio/pin-mux/pinconfig controllers. 176 * ten gpio/pin-mux/pinconfig controllers.
177 */ 177 */
178const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = { 178static const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = {
179 { 179 {
180 /* pin-controller instance 0 data */ 180 /* pin-controller instance 0 data */
181 .pin_banks = exynos5433_pin_banks0, 181 .pin_banks = exynos5433_pin_banks0,
@@ -260,6 +260,11 @@ const struct samsung_pin_ctrl exynos5433_pin_ctrl[] __initconst = {
260 }, 260 },
261}; 261};
262 262
263const struct samsung_pinctrl_of_match_data exynos5433_of_data __initconst = {
264 .ctrl = exynos5433_pin_ctrl,
265 .num_ctrl = ARRAY_SIZE(exynos5433_pin_ctrl),
266};
267
263/* pin banks of exynos7 pin-controller - ALIVE */ 268/* pin banks of exynos7 pin-controller - ALIVE */
264static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = { 269static const struct samsung_pin_bank_data exynos7_pin_banks0[] __initconst = {
265 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00), 270 EXYNOS_PIN_BANK_EINTW(8, 0x000, "gpa0", 0x00),
@@ -339,7 +344,7 @@ static const struct samsung_pin_bank_data exynos7_pin_banks9[] __initconst = {
339 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04), 344 EXYNOS_PIN_BANK_EINTG(4, 0x020, "gpz1", 0x04),
340}; 345};
341 346
342const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = { 347static const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
343 { 348 {
344 /* pin-controller instance 0 Alive data */ 349 /* pin-controller instance 0 Alive data */
345 .pin_banks = exynos7_pin_banks0, 350 .pin_banks = exynos7_pin_banks0,
@@ -392,3 +397,8 @@ const struct samsung_pin_ctrl exynos7_pin_ctrl[] __initconst = {
392 .eint_gpio_init = exynos_eint_gpio_init, 397 .eint_gpio_init = exynos_eint_gpio_init,
393 }, 398 },
394}; 399};
400
401const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = {
402 .ctrl = exynos7_pin_ctrl,
403 .num_ctrl = ARRAY_SIZE(exynos7_pin_ctrl),
404};
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 10187cb0e9b9..7e824e4d20f4 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -565,7 +565,7 @@ static const struct samsung_pin_bank_data s3c2412_pin_banks[] __initconst = {
565 PIN_BANK_2BIT(13, 0x080, "gpj"), 565 PIN_BANK_2BIT(13, 0x080, "gpj"),
566}; 566};
567 567
568const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = { 568static const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = {
569 { 569 {
570 .pin_banks = s3c2412_pin_banks, 570 .pin_banks = s3c2412_pin_banks,
571 .nr_banks = ARRAY_SIZE(s3c2412_pin_banks), 571 .nr_banks = ARRAY_SIZE(s3c2412_pin_banks),
@@ -573,6 +573,11 @@ const struct samsung_pin_ctrl s3c2412_pin_ctrl[] __initconst = {
573 }, 573 },
574}; 574};
575 575
576const struct samsung_pinctrl_of_match_data s3c2412_of_data __initconst = {
577 .ctrl = s3c2412_pin_ctrl,
578 .num_ctrl = ARRAY_SIZE(s3c2412_pin_ctrl),
579};
580
576static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = { 581static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = {
577 PIN_BANK_A(27, 0x000, "gpa"), 582 PIN_BANK_A(27, 0x000, "gpa"),
578 PIN_BANK_2BIT(11, 0x010, "gpb"), 583 PIN_BANK_2BIT(11, 0x010, "gpb"),
@@ -587,7 +592,7 @@ static const struct samsung_pin_bank_data s3c2416_pin_banks[] __initconst = {
587 PIN_BANK_2BIT(2, 0x100, "gpm"), 592 PIN_BANK_2BIT(2, 0x100, "gpm"),
588}; 593};
589 594
590const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = { 595static const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = {
591 { 596 {
592 .pin_banks = s3c2416_pin_banks, 597 .pin_banks = s3c2416_pin_banks,
593 .nr_banks = ARRAY_SIZE(s3c2416_pin_banks), 598 .nr_banks = ARRAY_SIZE(s3c2416_pin_banks),
@@ -595,6 +600,11 @@ const struct samsung_pin_ctrl s3c2416_pin_ctrl[] __initconst = {
595 }, 600 },
596}; 601};
597 602
603const struct samsung_pinctrl_of_match_data s3c2416_of_data __initconst = {
604 .ctrl = s3c2416_pin_ctrl,
605 .num_ctrl = ARRAY_SIZE(s3c2416_pin_ctrl),
606};
607
598static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = { 608static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = {
599 PIN_BANK_A(25, 0x000, "gpa"), 609 PIN_BANK_A(25, 0x000, "gpa"),
600 PIN_BANK_2BIT(11, 0x010, "gpb"), 610 PIN_BANK_2BIT(11, 0x010, "gpb"),
@@ -607,7 +617,7 @@ static const struct samsung_pin_bank_data s3c2440_pin_banks[] __initconst = {
607 PIN_BANK_2BIT(13, 0x0d0, "gpj"), 617 PIN_BANK_2BIT(13, 0x0d0, "gpj"),
608}; 618};
609 619
610const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = { 620static const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = {
611 { 621 {
612 .pin_banks = s3c2440_pin_banks, 622 .pin_banks = s3c2440_pin_banks,
613 .nr_banks = ARRAY_SIZE(s3c2440_pin_banks), 623 .nr_banks = ARRAY_SIZE(s3c2440_pin_banks),
@@ -615,6 +625,11 @@ const struct samsung_pin_ctrl s3c2440_pin_ctrl[] __initconst = {
615 }, 625 },
616}; 626};
617 627
628const struct samsung_pinctrl_of_match_data s3c2440_of_data __initconst = {
629 .ctrl = s3c2440_pin_ctrl,
630 .num_ctrl = ARRAY_SIZE(s3c2440_pin_ctrl),
631};
632
618static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = { 633static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = {
619 PIN_BANK_A(28, 0x000, "gpa"), 634 PIN_BANK_A(28, 0x000, "gpa"),
620 PIN_BANK_2BIT(11, 0x010, "gpb"), 635 PIN_BANK_2BIT(11, 0x010, "gpb"),
@@ -630,10 +645,15 @@ static const struct samsung_pin_bank_data s3c2450_pin_banks[] __initconst = {
630 PIN_BANK_2BIT(2, 0x100, "gpm"), 645 PIN_BANK_2BIT(2, 0x100, "gpm"),
631}; 646};
632 647
633const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = { 648static const struct samsung_pin_ctrl s3c2450_pin_ctrl[] __initconst = {
634 { 649 {
635 .pin_banks = s3c2450_pin_banks, 650 .pin_banks = s3c2450_pin_banks,
636 .nr_banks = ARRAY_SIZE(s3c2450_pin_banks), 651 .nr_banks = ARRAY_SIZE(s3c2450_pin_banks),
637 .eint_wkup_init = s3c24xx_eint_init, 652 .eint_wkup_init = s3c24xx_eint_init,
638 }, 653 },
639}; 654};
655
656const struct samsung_pinctrl_of_match_data s3c2450_of_data __initconst = {
657 .ctrl = s3c2450_pin_ctrl,
658 .num_ctrl = ARRAY_SIZE(s3c2450_pin_ctrl),
659};
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index 679628ac4b31..288e6567ceb1 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -789,7 +789,7 @@ static const struct samsung_pin_bank_data s3c64xx_pin_banks0[] __initconst = {
789 * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes 789 * Samsung pinctrl driver data for S3C64xx SoC. S3C64xx SoC includes
790 * one gpio/pin-mux/pinconfig controller. 790 * one gpio/pin-mux/pinconfig controller.
791 */ 791 */
792const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = { 792static const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = {
793 { 793 {
794 /* pin-controller instance 1 data */ 794 /* pin-controller instance 1 data */
795 .pin_banks = s3c64xx_pin_banks0, 795 .pin_banks = s3c64xx_pin_banks0,
@@ -798,3 +798,8 @@ const struct samsung_pin_ctrl s3c64xx_pin_ctrl[] __initconst = {
798 .eint_wkup_init = s3c64xx_eint_eint0_init, 798 .eint_wkup_init = s3c64xx_eint_eint0_init,
799 }, 799 },
800}; 800};
801
802const struct samsung_pinctrl_of_match_data s3c64xx_of_data __initconst = {
803 .ctrl = s3c64xx_pin_ctrl,
804 .num_ctrl = ARRAY_SIZE(s3c64xx_pin_ctrl),
805};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index da58e4554137..336e88d7bdb9 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -942,12 +942,33 @@ static int samsung_gpiolib_register(struct platform_device *pdev,
942 return 0; 942 return 0;
943} 943}
944 944
945static const struct samsung_pin_ctrl *
946samsung_pinctrl_get_soc_data_for_of_alias(struct platform_device *pdev)
947{
948 struct device_node *node = pdev->dev.of_node;
949 const struct samsung_pinctrl_of_match_data *of_data;
950 int id;
951
952 id = of_alias_get_id(node, "pinctrl");
953 if (id < 0) {
954 dev_err(&pdev->dev, "failed to get alias id\n");
955 return NULL;
956 }
957
958 of_data = of_device_get_match_data(&pdev->dev);
959 if (id >= of_data->num_ctrl) {
960 dev_err(&pdev->dev, "invalid alias id %d\n", id);
961 return NULL;
962 }
963
964 return &(of_data->ctrl[id]);
965}
966
945/* retrieve the soc specific data */ 967/* retrieve the soc specific data */
946static const struct samsung_pin_ctrl * 968static const struct samsung_pin_ctrl *
947samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d, 969samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
948 struct platform_device *pdev) 970 struct platform_device *pdev)
949{ 971{
950 int id;
951 struct device_node *node = pdev->dev.of_node; 972 struct device_node *node = pdev->dev.of_node;
952 struct device_node *np; 973 struct device_node *np;
953 const struct samsung_pin_bank_data *bdata; 974 const struct samsung_pin_bank_data *bdata;
@@ -957,13 +978,9 @@ samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
957 void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES]; 978 void __iomem *virt_base[SAMSUNG_PINCTRL_NUM_RESOURCES];
958 unsigned int i; 979 unsigned int i;
959 980
960 id = of_alias_get_id(node, "pinctrl"); 981 ctrl = samsung_pinctrl_get_soc_data_for_of_alias(pdev);
961 if (id < 0) { 982 if (!ctrl)
962 dev_err(&pdev->dev, "failed to get alias id\n");
963 return ERR_PTR(-ENOENT); 983 return ERR_PTR(-ENOENT);
964 }
965 ctrl = of_device_get_match_data(&pdev->dev);
966 ctrl += id;
967 984
968 d->suspend = ctrl->suspend; 985 d->suspend = ctrl->suspend;
969 d->resume = ctrl->resume; 986 d->resume = ctrl->resume;
@@ -1188,41 +1205,41 @@ static int __maybe_unused samsung_pinctrl_resume(struct device *dev)
1188static const struct of_device_id samsung_pinctrl_dt_match[] = { 1205static const struct of_device_id samsung_pinctrl_dt_match[] = {
1189#ifdef CONFIG_PINCTRL_EXYNOS_ARM 1206#ifdef CONFIG_PINCTRL_EXYNOS_ARM
1190 { .compatible = "samsung,exynos3250-pinctrl", 1207 { .compatible = "samsung,exynos3250-pinctrl",
1191 .data = exynos3250_pin_ctrl }, 1208 .data = &exynos3250_of_data },
1192 { .compatible = "samsung,exynos4210-pinctrl", 1209 { .compatible = "samsung,exynos4210-pinctrl",
1193 .data = exynos4210_pin_ctrl }, 1210 .data = &exynos4210_of_data },
1194 { .compatible = "samsung,exynos4x12-pinctrl", 1211 { .compatible = "samsung,exynos4x12-pinctrl",
1195 .data = exynos4x12_pin_ctrl }, 1212 .data = &exynos4x12_of_data },
1196 { .compatible = "samsung,exynos5250-pinctrl", 1213 { .compatible = "samsung,exynos5250-pinctrl",
1197 .data = exynos5250_pin_ctrl }, 1214 .data = &exynos5250_of_data },
1198 { .compatible = "samsung,exynos5260-pinctrl", 1215 { .compatible = "samsung,exynos5260-pinctrl",
1199 .data = exynos5260_pin_ctrl }, 1216 .data = &exynos5260_of_data },
1200 { .compatible = "samsung,exynos5410-pinctrl", 1217 { .compatible = "samsung,exynos5410-pinctrl",
1201 .data = exynos5410_pin_ctrl }, 1218 .data = &exynos5410_of_data },
1202 { .compatible = "samsung,exynos5420-pinctrl", 1219 { .compatible = "samsung,exynos5420-pinctrl",
1203 .data = exynos5420_pin_ctrl }, 1220 .data = &exynos5420_of_data },
1204 { .compatible = "samsung,s5pv210-pinctrl", 1221 { .compatible = "samsung,s5pv210-pinctrl",
1205 .data = s5pv210_pin_ctrl }, 1222 .data = &s5pv210_of_data },
1206#endif 1223#endif
1207#ifdef CONFIG_PINCTRL_EXYNOS_ARM64 1224#ifdef CONFIG_PINCTRL_EXYNOS_ARM64
1208 { .compatible = "samsung,exynos5433-pinctrl", 1225 { .compatible = "samsung,exynos5433-pinctrl",
1209 .data = exynos5433_pin_ctrl }, 1226 .data = &exynos5433_of_data },
1210 { .compatible = "samsung,exynos7-pinctrl", 1227 { .compatible = "samsung,exynos7-pinctrl",
1211 .data = exynos7_pin_ctrl }, 1228 .data = &exynos7_of_data },
1212#endif 1229#endif
1213#ifdef CONFIG_PINCTRL_S3C64XX 1230#ifdef CONFIG_PINCTRL_S3C64XX
1214 { .compatible = "samsung,s3c64xx-pinctrl", 1231 { .compatible = "samsung,s3c64xx-pinctrl",
1215 .data = s3c64xx_pin_ctrl }, 1232 .data = &s3c64xx_of_data },
1216#endif 1233#endif
1217#ifdef CONFIG_PINCTRL_S3C24XX 1234#ifdef CONFIG_PINCTRL_S3C24XX
1218 { .compatible = "samsung,s3c2412-pinctrl", 1235 { .compatible = "samsung,s3c2412-pinctrl",
1219 .data = s3c2412_pin_ctrl }, 1236 .data = &s3c2412_of_data },
1220 { .compatible = "samsung,s3c2416-pinctrl", 1237 { .compatible = "samsung,s3c2416-pinctrl",
1221 .data = s3c2416_pin_ctrl }, 1238 .data = &s3c2416_of_data },
1222 { .compatible = "samsung,s3c2440-pinctrl", 1239 { .compatible = "samsung,s3c2440-pinctrl",
1223 .data = s3c2440_pin_ctrl }, 1240 .data = &s3c2440_of_data },
1224 { .compatible = "samsung,s3c2450-pinctrl", 1241 { .compatible = "samsung,s3c2450-pinctrl",
1225 .data = s3c2450_pin_ctrl }, 1242 .data = &s3c2450_of_data },
1226#endif 1243#endif
1227 {}, 1244 {},
1228}; 1245};
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index e204f609823b..f0cda9424dfe 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -282,6 +282,16 @@ struct samsung_pinctrl_drv_data {
282}; 282};
283 283
284/** 284/**
285 * struct samsung_pinctrl_of_match_data: OF match device specific configuration data.
286 * @ctrl: array of pin controller data.
287 * @num_ctrl: size of array @ctrl.
288 */
289struct samsung_pinctrl_of_match_data {
290 const struct samsung_pin_ctrl *ctrl;
291 unsigned int num_ctrl;
292};
293
294/**
285 * struct samsung_pin_group: represent group of pins of a pinmux function. 295 * struct samsung_pin_group: represent group of pins of a pinmux function.
286 * @name: name of the pin group, used to lookup the group. 296 * @name: name of the pin group, used to lookup the group.
287 * @pins: the pins included in this group. 297 * @pins: the pins included in this group.
@@ -309,20 +319,20 @@ struct samsung_pmx_func {
309}; 319};
310 320
311/* list of all exported SoC specific data */ 321/* list of all exported SoC specific data */
312extern const struct samsung_pin_ctrl exynos3250_pin_ctrl[]; 322extern const struct samsung_pinctrl_of_match_data exynos3250_of_data;
313extern const struct samsung_pin_ctrl exynos4210_pin_ctrl[]; 323extern const struct samsung_pinctrl_of_match_data exynos4210_of_data;
314extern const struct samsung_pin_ctrl exynos4x12_pin_ctrl[]; 324extern const struct samsung_pinctrl_of_match_data exynos4x12_of_data;
315extern const struct samsung_pin_ctrl exynos5250_pin_ctrl[]; 325extern const struct samsung_pinctrl_of_match_data exynos5250_of_data;
316extern const struct samsung_pin_ctrl exynos5260_pin_ctrl[]; 326extern const struct samsung_pinctrl_of_match_data exynos5260_of_data;
317extern const struct samsung_pin_ctrl exynos5410_pin_ctrl[]; 327extern const struct samsung_pinctrl_of_match_data exynos5410_of_data;
318extern const struct samsung_pin_ctrl exynos5420_pin_ctrl[]; 328extern const struct samsung_pinctrl_of_match_data exynos5420_of_data;
319extern const struct samsung_pin_ctrl exynos5433_pin_ctrl[]; 329extern const struct samsung_pinctrl_of_match_data exynos5433_of_data;
320extern const struct samsung_pin_ctrl exynos7_pin_ctrl[]; 330extern const struct samsung_pinctrl_of_match_data exynos7_of_data;
321extern const struct samsung_pin_ctrl s3c64xx_pin_ctrl[]; 331extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
322extern const struct samsung_pin_ctrl s3c2412_pin_ctrl[]; 332extern const struct samsung_pinctrl_of_match_data s3c2412_of_data;
323extern const struct samsung_pin_ctrl s3c2416_pin_ctrl[]; 333extern const struct samsung_pinctrl_of_match_data s3c2416_of_data;
324extern const struct samsung_pin_ctrl s3c2440_pin_ctrl[]; 334extern const struct samsung_pinctrl_of_match_data s3c2440_of_data;
325extern const struct samsung_pin_ctrl s3c2450_pin_ctrl[]; 335extern const struct samsung_pinctrl_of_match_data s3c2450_of_data;
326extern const struct samsung_pin_ctrl s5pv210_pin_ctrl[]; 336extern const struct samsung_pinctrl_of_match_data s5pv210_of_data;
327 337
328#endif /* __PINCTRL_SAMSUNG_H */ 338#endif /* __PINCTRL_SAMSUNG_H */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
index 18aeee592fdc..35951e7b89d2 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7795.c
@@ -1538,7 +1538,6 @@ static const struct sh_pfc_pin pinmux_pins[] = {
1538 SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, CFG_FLAGS), 1538 SH_PFC_PIN_NAMED_CFG('B', 18, AVB_TD1, CFG_FLAGS),
1539 SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, CFG_FLAGS), 1539 SH_PFC_PIN_NAMED_CFG('B', 19, AVB_RXC, CFG_FLAGS),
1540 SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, CFG_FLAGS), 1540 SH_PFC_PIN_NAMED_CFG('C', 1, PRESETOUT#, CFG_FLAGS),
1541 SH_PFC_PIN_NAMED_CFG('F', 1, CLKOUT, CFG_FLAGS),
1542 SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, CFG_FLAGS), 1541 SH_PFC_PIN_NAMED_CFG('H', 37, MLB_REF, CFG_FLAGS),
1543 SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, CFG_FLAGS), 1542 SH_PFC_PIN_NAMED_CFG('V', 3, QSPI1_SPCLK, CFG_FLAGS),
1544 SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, CFG_FLAGS), 1543 SH_PFC_PIN_NAMED_CFG('V', 5, QSPI1_SSL, CFG_FLAGS),
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d10ffe51da24..51ebc5a6053f 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -106,13 +106,14 @@ config ASUS_LAPTOP
106 If you have an ACPI-compatible ASUS laptop, say Y or M here. 106 If you have an ACPI-compatible ASUS laptop, say Y or M here.
107 107
108# 108#
109# If the DELL_SMBIOS_SMM feature is enabled, the DELL_SMBIOS driver 109# The DELL_SMBIOS driver depends on ACPI_WMI and/or DCDBAS if those
110# becomes dependent on the DCDBAS driver. The "depends" line prevents a 110# backends are selected. The "depends" line prevents a configuration
111# configuration where DELL_SMBIOS=y while DCDBAS=m. 111# where DELL_SMBIOS=y while either of those dependencies =m.
112# 112#
113config DELL_SMBIOS 113config DELL_SMBIOS
114 tristate "Dell SMBIOS driver" 114 tristate "Dell SMBIOS driver"
115 depends on DCDBAS || DCDBAS=n 115 depends on DCDBAS || DCDBAS=n
116 depends on ACPI_WMI || ACPI_WMI=n
116 ---help--- 117 ---help---
117 This provides support for the Dell SMBIOS calling interface. 118 This provides support for the Dell SMBIOS calling interface.
118 If you have a Dell computer you should enable this option. 119 If you have a Dell computer you should enable this option.
diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c
index 5bcf8a18f785..2485c80a9fdd 100644
--- a/drivers/platform/x86/dell-smbios-base.c
+++ b/drivers/platform/x86/dell-smbios-base.c
@@ -637,7 +637,7 @@ static void __exit dell_smbios_exit(void)
637 mutex_unlock(&smbios_mutex); 637 mutex_unlock(&smbios_mutex);
638} 638}
639 639
640subsys_initcall(dell_smbios_init); 640module_init(dell_smbios_init);
641module_exit(dell_smbios_exit); 641module_exit(dell_smbios_exit);
642 642
643MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>"); 643MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 2c9927430d85..8d102195a392 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -714,7 +714,7 @@ static int __init dell_wmi_init(void)
714 714
715 return wmi_driver_register(&dell_wmi_driver); 715 return wmi_driver_register(&dell_wmi_driver);
716} 716}
717module_init(dell_wmi_init); 717late_initcall(dell_wmi_init);
718 718
719static void __exit dell_wmi_exit(void) 719static void __exit dell_wmi_exit(void)
720{ 720{
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c8b308cfabf1..3653bea38470 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -527,8 +527,7 @@ static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
527 queue == card->qdio.no_in_queues - 1; 527 queue == card->qdio.no_in_queues - 1;
528} 528}
529 529
530 530static int __qeth_issue_next_read(struct qeth_card *card)
531static int qeth_issue_next_read(struct qeth_card *card)
532{ 531{
533 int rc; 532 int rc;
534 struct qeth_cmd_buffer *iob; 533 struct qeth_cmd_buffer *iob;
@@ -559,6 +558,17 @@ static int qeth_issue_next_read(struct qeth_card *card)
559 return rc; 558 return rc;
560} 559}
561 560
561static int qeth_issue_next_read(struct qeth_card *card)
562{
563 int ret;
564
565 spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
566 ret = __qeth_issue_next_read(card);
567 spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
568
569 return ret;
570}
571
562static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card) 572static struct qeth_reply *qeth_alloc_reply(struct qeth_card *card)
563{ 573{
564 struct qeth_reply *reply; 574 struct qeth_reply *reply;
@@ -960,7 +970,7 @@ void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
960 spin_lock_irqsave(&card->thread_mask_lock, flags); 970 spin_lock_irqsave(&card->thread_mask_lock, flags);
961 card->thread_running_mask &= ~thread; 971 card->thread_running_mask &= ~thread;
962 spin_unlock_irqrestore(&card->thread_mask_lock, flags); 972 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
963 wake_up(&card->wait_q); 973 wake_up_all(&card->wait_q);
964} 974}
965EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit); 975EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
966 976
@@ -1164,6 +1174,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1164 } 1174 }
1165 rc = qeth_get_problem(cdev, irb); 1175 rc = qeth_get_problem(cdev, irb);
1166 if (rc) { 1176 if (rc) {
1177 card->read_or_write_problem = 1;
1167 qeth_clear_ipacmd_list(card); 1178 qeth_clear_ipacmd_list(card);
1168 qeth_schedule_recovery(card); 1179 qeth_schedule_recovery(card);
1169 goto out; 1180 goto out;
@@ -1182,7 +1193,7 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
1182 return; 1193 return;
1183 if (channel == &card->read && 1194 if (channel == &card->read &&
1184 channel->state == CH_STATE_UP) 1195 channel->state == CH_STATE_UP)
1185 qeth_issue_next_read(card); 1196 __qeth_issue_next_read(card);
1186 1197
1187 iob = channel->iob; 1198 iob = channel->iob;
1188 index = channel->buf_no; 1199 index = channel->buf_no;
@@ -5087,8 +5098,6 @@ static void qeth_core_free_card(struct qeth_card *card)
5087 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *)); 5098 QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
5088 qeth_clean_channel(&card->read); 5099 qeth_clean_channel(&card->read);
5089 qeth_clean_channel(&card->write); 5100 qeth_clean_channel(&card->write);
5090 if (card->dev)
5091 free_netdev(card->dev);
5092 qeth_free_qdio_buffers(card); 5101 qeth_free_qdio_buffers(card);
5093 unregister_service_level(&card->qeth_service_level); 5102 unregister_service_level(&card->qeth_service_level);
5094 kfree(card); 5103 kfree(card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 7f236440483f..5ef4c978ad19 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -915,8 +915,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
915 qeth_l2_set_offline(cgdev); 915 qeth_l2_set_offline(cgdev);
916 916
917 if (card->dev) { 917 if (card->dev) {
918 netif_napi_del(&card->napi);
919 unregister_netdev(card->dev); 918 unregister_netdev(card->dev);
919 free_netdev(card->dev);
920 card->dev = NULL; 920 card->dev = NULL;
921 } 921 }
922 return; 922 return;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 962a04b68dd2..b6b12220da71 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2865,8 +2865,8 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2865 qeth_l3_set_offline(cgdev); 2865 qeth_l3_set_offline(cgdev);
2866 2866
2867 if (card->dev) { 2867 if (card->dev) {
2868 netif_napi_del(&card->napi);
2869 unregister_netdev(card->dev); 2868 unregister_netdev(card->dev);
2869 free_netdev(card->dev);
2870 card->dev = NULL; 2870 card->dev = NULL;
2871 } 2871 }
2872 2872
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 6de9681ace82..ceab5e5c41c2 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -223,6 +223,7 @@ out_done:
223static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) 223static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
224{ 224{
225 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host); 225 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
226 struct domain_device *dev = cmd_to_domain_dev(cmd);
226 struct sas_task *task = TO_SAS_TASK(cmd); 227 struct sas_task *task = TO_SAS_TASK(cmd);
227 228
228 /* At this point, we only get called following an actual abort 229 /* At this point, we only get called following an actual abort
@@ -231,6 +232,14 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
231 */ 232 */
232 sas_end_task(cmd, task); 233 sas_end_task(cmd, task);
233 234
235 if (dev_is_sata(dev)) {
236 /* defer commands to libata so that libata EH can
237 * handle ata qcs correctly
238 */
239 list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
240 return;
241 }
242
234 /* now finish the command and move it on to the error 243 /* now finish the command and move it on to the error
235 * handler done list, this also takes it off the 244 * handler done list, this also takes it off the
236 * error handler pending list. 245 * error handler pending list.
@@ -238,22 +247,6 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
238 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q); 247 scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
239} 248}
240 249
241static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
242{
243 struct domain_device *dev = cmd_to_domain_dev(cmd);
244 struct sas_ha_struct *ha = dev->port->ha;
245 struct sas_task *task = TO_SAS_TASK(cmd);
246
247 if (!dev_is_sata(dev)) {
248 sas_eh_finish_cmd(cmd);
249 return;
250 }
251
252 /* report the timeout to libata */
253 sas_end_task(cmd, task);
254 list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
255}
256
257static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd) 250static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
258{ 251{
259 struct scsi_cmnd *cmd, *n; 252 struct scsi_cmnd *cmd, *n;
@@ -261,7 +254,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd
261 list_for_each_entry_safe(cmd, n, error_q, eh_entry) { 254 list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
262 if (cmd->device->sdev_target == my_cmd->device->sdev_target && 255 if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
263 cmd->device->lun == my_cmd->device->lun) 256 cmd->device->lun == my_cmd->device->lun)
264 sas_eh_defer_cmd(cmd); 257 sas_eh_finish_cmd(cmd);
265 } 258 }
266} 259}
267 260
@@ -631,12 +624,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
631 case TASK_IS_DONE: 624 case TASK_IS_DONE:
632 SAS_DPRINTK("%s: task 0x%p is done\n", __func__, 625 SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
633 task); 626 task);
634 sas_eh_defer_cmd(cmd); 627 sas_eh_finish_cmd(cmd);
635 continue; 628 continue;
636 case TASK_IS_ABORTED: 629 case TASK_IS_ABORTED:
637 SAS_DPRINTK("%s: task 0x%p is aborted\n", 630 SAS_DPRINTK("%s: task 0x%p is aborted\n",
638 __func__, task); 631 __func__, task);
639 sas_eh_defer_cmd(cmd); 632 sas_eh_finish_cmd(cmd);
640 continue; 633 continue;
641 case TASK_IS_AT_LU: 634 case TASK_IS_AT_LU:
642 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); 635 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -647,7 +640,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
647 "recovered\n", 640 "recovered\n",
648 SAS_ADDR(task->dev), 641 SAS_ADDR(task->dev),
649 cmd->device->lun); 642 cmd->device->lun);
650 sas_eh_defer_cmd(cmd); 643 sas_eh_finish_cmd(cmd);
651 sas_scsi_clear_queue_lu(work_q, cmd); 644 sas_scsi_clear_queue_lu(work_q, cmd);
652 goto Again; 645 goto Again;
653 } 646 }
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index c2ea13c7e37e..a1cb0236c550 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -10558,7 +10558,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
10558 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), 10558 snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
10559 "fw_event_%s%d", ioc->driver_name, ioc->id); 10559 "fw_event_%s%d", ioc->driver_name, ioc->id);
10560 ioc->firmware_event_thread = alloc_ordered_workqueue( 10560 ioc->firmware_event_thread = alloc_ordered_workqueue(
10561 ioc->firmware_event_name, WQ_MEM_RECLAIM); 10561 ioc->firmware_event_name, 0);
10562 if (!ioc->firmware_event_thread) { 10562 if (!ioc->firmware_event_thread) {
10563 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", 10563 pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
10564 ioc->name, __FILE__, __LINE__, __func__); 10564 ioc->name, __FILE__, __LINE__, __func__);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 00329dda6179..8d7fab3cd01d 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1719,7 +1719,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
1719 1719
1720 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map); 1720 set_bit(ea->fcport->loop_id, vha->hw->loop_id_map);
1721 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); 1721 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
1722 ea->fcport->loop_id = FC_NO_LOOP_ID;
1723 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset; 1722 ea->fcport->chip_reset = vha->hw->base_qpair->chip_reset;
1724 ea->fcport->logout_on_delete = 1; 1723 ea->fcport->logout_on_delete = 1;
1725 ea->fcport->send_els_logo = 0; 1724 ea->fcport->send_els_logo = 0;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 285911e81728..5c5dcca4d1da 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -454,7 +454,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
454 ha->req_q_map[0] = req; 454 ha->req_q_map[0] = req;
455 set_bit(0, ha->rsp_qid_map); 455 set_bit(0, ha->rsp_qid_map);
456 set_bit(0, ha->req_qid_map); 456 set_bit(0, ha->req_qid_map);
457 return 1; 457 return 0;
458 458
459fail_qpair_map: 459fail_qpair_map:
460 kfree(ha->base_qpair); 460 kfree(ha->base_qpair);
@@ -471,6 +471,9 @@ fail_req_map:
471 471
472static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req) 472static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
473{ 473{
474 if (!ha->req_q_map)
475 return;
476
474 if (IS_QLAFX00(ha)) { 477 if (IS_QLAFX00(ha)) {
475 if (req && req->ring_fx00) 478 if (req && req->ring_fx00)
476 dma_free_coherent(&ha->pdev->dev, 479 dma_free_coherent(&ha->pdev->dev,
@@ -481,14 +484,17 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
481 (req->length + 1) * sizeof(request_t), 484 (req->length + 1) * sizeof(request_t),
482 req->ring, req->dma); 485 req->ring, req->dma);
483 486
484 if (req) 487 if (req) {
485 kfree(req->outstanding_cmds); 488 kfree(req->outstanding_cmds);
486 489 kfree(req);
487 kfree(req); 490 }
488} 491}
489 492
490static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp) 493static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
491{ 494{
495 if (!ha->rsp_q_map)
496 return;
497
492 if (IS_QLAFX00(ha)) { 498 if (IS_QLAFX00(ha)) {
493 if (rsp && rsp->ring) 499 if (rsp && rsp->ring)
494 dma_free_coherent(&ha->pdev->dev, 500 dma_free_coherent(&ha->pdev->dev,
@@ -499,7 +505,8 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
499 (rsp->length + 1) * sizeof(response_t), 505 (rsp->length + 1) * sizeof(response_t),
500 rsp->ring, rsp->dma); 506 rsp->ring, rsp->dma);
501 } 507 }
502 kfree(rsp); 508 if (rsp)
509 kfree(rsp);
503} 510}
504 511
505static void qla2x00_free_queues(struct qla_hw_data *ha) 512static void qla2x00_free_queues(struct qla_hw_data *ha)
@@ -1723,6 +1730,8 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
1723 struct qla_tgt_cmd *cmd; 1730 struct qla_tgt_cmd *cmd;
1724 uint8_t trace = 0; 1731 uint8_t trace = 0;
1725 1732
1733 if (!ha->req_q_map)
1734 return;
1726 spin_lock_irqsave(qp->qp_lock_ptr, flags); 1735 spin_lock_irqsave(qp->qp_lock_ptr, flags);
1727 req = qp->req; 1736 req = qp->req;
1728 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { 1737 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
@@ -3095,14 +3104,14 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3095 /* Set up the irqs */ 3104 /* Set up the irqs */
3096 ret = qla2x00_request_irqs(ha, rsp); 3105 ret = qla2x00_request_irqs(ha, rsp);
3097 if (ret) 3106 if (ret)
3098 goto probe_hw_failed; 3107 goto probe_failed;
3099 3108
3100 /* Alloc arrays of request and response ring ptrs */ 3109 /* Alloc arrays of request and response ring ptrs */
3101 if (!qla2x00_alloc_queues(ha, req, rsp)) { 3110 if (qla2x00_alloc_queues(ha, req, rsp)) {
3102 ql_log(ql_log_fatal, base_vha, 0x003d, 3111 ql_log(ql_log_fatal, base_vha, 0x003d,
3103 "Failed to allocate memory for queue pointers..." 3112 "Failed to allocate memory for queue pointers..."
3104 "aborting.\n"); 3113 "aborting.\n");
3105 goto probe_init_failed; 3114 goto probe_failed;
3106 } 3115 }
3107 3116
3108 if (ha->mqenable && shost_use_blk_mq(host)) { 3117 if (ha->mqenable && shost_use_blk_mq(host)) {
@@ -3387,15 +3396,6 @@ skip_dpc:
3387 3396
3388 return 0; 3397 return 0;
3389 3398
3390probe_init_failed:
3391 qla2x00_free_req_que(ha, req);
3392 ha->req_q_map[0] = NULL;
3393 clear_bit(0, ha->req_qid_map);
3394 qla2x00_free_rsp_que(ha, rsp);
3395 ha->rsp_q_map[0] = NULL;
3396 clear_bit(0, ha->rsp_qid_map);
3397 ha->max_req_queues = ha->max_rsp_queues = 0;
3398
3399probe_failed: 3399probe_failed:
3400 if (base_vha->timer_active) 3400 if (base_vha->timer_active)
3401 qla2x00_stop_timer(base_vha); 3401 qla2x00_stop_timer(base_vha);
@@ -4508,11 +4508,17 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4508 if (ha->init_cb) 4508 if (ha->init_cb)
4509 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 4509 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
4510 ha->init_cb, ha->init_cb_dma); 4510 ha->init_cb, ha->init_cb_dma);
4511 vfree(ha->optrom_buffer); 4511
4512 kfree(ha->nvram); 4512 if (ha->optrom_buffer)
4513 kfree(ha->npiv_info); 4513 vfree(ha->optrom_buffer);
4514 kfree(ha->swl); 4514 if (ha->nvram)
4515 kfree(ha->loop_id_map); 4515 kfree(ha->nvram);
4516 if (ha->npiv_info)
4517 kfree(ha->npiv_info);
4518 if (ha->swl)
4519 kfree(ha->swl);
4520 if (ha->loop_id_map)
4521 kfree(ha->loop_id_map);
4516 4522
4517 ha->srb_mempool = NULL; 4523 ha->srb_mempool = NULL;
4518 ha->ctx_mempool = NULL; 4524 ha->ctx_mempool = NULL;
@@ -4528,6 +4534,15 @@ qla2x00_mem_free(struct qla_hw_data *ha)
4528 ha->ex_init_cb_dma = 0; 4534 ha->ex_init_cb_dma = 0;
4529 ha->async_pd = NULL; 4535 ha->async_pd = NULL;
4530 ha->async_pd_dma = 0; 4536 ha->async_pd_dma = 0;
4537 ha->loop_id_map = NULL;
4538 ha->npiv_info = NULL;
4539 ha->optrom_buffer = NULL;
4540 ha->swl = NULL;
4541 ha->nvram = NULL;
4542 ha->mctp_dump = NULL;
4543 ha->dcbx_tlv = NULL;
4544 ha->xgmac_data = NULL;
4545 ha->sfp_data = NULL;
4531 4546
4532 ha->s_dma_pool = NULL; 4547 ha->s_dma_pool = NULL;
4533 ha->dl_dma_pool = NULL; 4548 ha->dl_dma_pool = NULL;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bff21e636ddd..3541caf3fceb 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2595,6 +2595,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2595 int res; 2595 int res;
2596 struct scsi_device *sdp = sdkp->device; 2596 struct scsi_device *sdp = sdkp->device;
2597 struct scsi_mode_data data; 2597 struct scsi_mode_data data;
2598 int disk_ro = get_disk_ro(sdkp->disk);
2598 int old_wp = sdkp->write_prot; 2599 int old_wp = sdkp->write_prot;
2599 2600
2600 set_disk_ro(sdkp->disk, 0); 2601 set_disk_ro(sdkp->disk, 0);
@@ -2635,7 +2636,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2635 "Test WP failed, assume Write Enabled\n"); 2636 "Test WP failed, assume Write Enabled\n");
2636 } else { 2637 } else {
2637 sdkp->write_prot = ((data.device_specific & 0x80) != 0); 2638 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2638 set_disk_ro(sdkp->disk, sdkp->write_prot); 2639 set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
2639 if (sdkp->first_scan || old_wp != sdkp->write_prot) { 2640 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
2640 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n", 2641 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
2641 sdkp->write_prot ? "on" : "off"); 2642 sdkp->write_prot ? "on" : "off");
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index 6c348a211ebb..89cf4498f535 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -403,7 +403,7 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
403 */ 403 */
404static int sd_zbc_check_zone_size(struct scsi_disk *sdkp) 404static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
405{ 405{
406 u64 zone_blocks; 406 u64 zone_blocks = 0;
407 sector_t block = 0; 407 sector_t block = 0;
408 unsigned char *buf; 408 unsigned char *buf;
409 unsigned char *rec; 409 unsigned char *rec;
@@ -421,10 +421,8 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
421 421
422 /* Do a report zone to get the same field */ 422 /* Do a report zone to get the same field */
423 ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0); 423 ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
424 if (ret) { 424 if (ret)
425 zone_blocks = 0; 425 goto out_free;
426 goto out;
427 }
428 426
429 same = buf[4] & 0x0f; 427 same = buf[4] & 0x0f;
430 if (same > 0) { 428 if (same > 0) {
@@ -464,7 +462,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
464 ret = sd_zbc_report_zones(sdkp, buf, 462 ret = sd_zbc_report_zones(sdkp, buf,
465 SD_ZBC_BUF_SIZE, block); 463 SD_ZBC_BUF_SIZE, block);
466 if (ret) 464 if (ret)
467 return ret; 465 goto out_free;
468 } 466 }
469 467
470 } while (block < sdkp->capacity); 468 } while (block < sdkp->capacity);
@@ -472,35 +470,32 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
472 zone_blocks = sdkp->zone_blocks; 470 zone_blocks = sdkp->zone_blocks;
473 471
474out: 472out:
475 kfree(buf);
476
477 if (!zone_blocks) { 473 if (!zone_blocks) {
478 if (sdkp->first_scan) 474 if (sdkp->first_scan)
479 sd_printk(KERN_NOTICE, sdkp, 475 sd_printk(KERN_NOTICE, sdkp,
480 "Devices with non constant zone " 476 "Devices with non constant zone "
481 "size are not supported\n"); 477 "size are not supported\n");
482 return -ENODEV; 478 ret = -ENODEV;
483 } 479 } else if (!is_power_of_2(zone_blocks)) {
484
485 if (!is_power_of_2(zone_blocks)) {
486 if (sdkp->first_scan) 480 if (sdkp->first_scan)
487 sd_printk(KERN_NOTICE, sdkp, 481 sd_printk(KERN_NOTICE, sdkp,
488 "Devices with non power of 2 zone " 482 "Devices with non power of 2 zone "
489 "size are not supported\n"); 483 "size are not supported\n");
490 return -ENODEV; 484 ret = -ENODEV;
491 } 485 } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
492
493 if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
494 if (sdkp->first_scan) 486 if (sdkp->first_scan)
495 sd_printk(KERN_NOTICE, sdkp, 487 sd_printk(KERN_NOTICE, sdkp,
496 "Zone size too large\n"); 488 "Zone size too large\n");
497 return -ENODEV; 489 ret = -ENODEV;
490 } else {
491 sdkp->zone_blocks = zone_blocks;
492 sdkp->zone_shift = ilog2(zone_blocks);
498 } 493 }
499 494
500 sdkp->zone_blocks = zone_blocks; 495out_free:
501 sdkp->zone_shift = ilog2(zone_blocks); 496 kfree(buf);
502 497
503 return 0; 498 return ret;
504} 499}
505 500
506/** 501/**
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index e4f5bb056fd2..ba3cfa8e279b 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2443,39 +2443,21 @@ struct cgr_comp {
2443 struct completion completion; 2443 struct completion completion;
2444}; 2444};
2445 2445
2446static int qman_delete_cgr_thread(void *p) 2446static void qman_delete_cgr_smp_call(void *p)
2447{ 2447{
2448 struct cgr_comp *cgr_comp = (struct cgr_comp *)p; 2448 qman_delete_cgr((struct qman_cgr *)p);
2449 int ret;
2450
2451 ret = qman_delete_cgr(cgr_comp->cgr);
2452 complete(&cgr_comp->completion);
2453
2454 return ret;
2455} 2449}
2456 2450
2457void qman_delete_cgr_safe(struct qman_cgr *cgr) 2451void qman_delete_cgr_safe(struct qman_cgr *cgr)
2458{ 2452{
2459 struct task_struct *thread;
2460 struct cgr_comp cgr_comp;
2461
2462 preempt_disable(); 2453 preempt_disable();
2463 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) { 2454 if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
2464 init_completion(&cgr_comp.completion); 2455 smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
2465 cgr_comp.cgr = cgr; 2456 qman_delete_cgr_smp_call, cgr, true);
2466 thread = kthread_create(qman_delete_cgr_thread, &cgr_comp,
2467 "cgr_del");
2468
2469 if (IS_ERR(thread))
2470 goto out;
2471
2472 kthread_bind(thread, qman_cgr_cpus[cgr->cgrid]);
2473 wake_up_process(thread);
2474 wait_for_completion(&cgr_comp.completion);
2475 preempt_enable(); 2457 preempt_enable();
2476 return; 2458 return;
2477 } 2459 }
2478out: 2460
2479 qman_delete_cgr(cgr); 2461 qman_delete_cgr(cgr);
2480 preempt_enable(); 2462 preempt_enable();
2481} 2463}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 6dbba5aff191..86580b6df33d 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -326,24 +326,23 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
326 mutex_lock(&ashmem_mutex); 326 mutex_lock(&ashmem_mutex);
327 327
328 if (asma->size == 0) { 328 if (asma->size == 0) {
329 ret = -EINVAL; 329 mutex_unlock(&ashmem_mutex);
330 goto out; 330 return -EINVAL;
331 } 331 }
332 332
333 if (!asma->file) { 333 if (!asma->file) {
334 ret = -EBADF; 334 mutex_unlock(&ashmem_mutex);
335 goto out; 335 return -EBADF;
336 } 336 }
337 337
338 mutex_unlock(&ashmem_mutex);
339
338 ret = vfs_llseek(asma->file, offset, origin); 340 ret = vfs_llseek(asma->file, offset, origin);
339 if (ret < 0) 341 if (ret < 0)
340 goto out; 342 return ret;
341 343
342 /** Copy f_pos from backing file, since f_ops->llseek() sets it */ 344 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
343 file->f_pos = asma->file->f_pos; 345 file->f_pos = asma->file->f_pos;
344
345out:
346 mutex_unlock(&ashmem_mutex);
347 return ret; 346 return ret;
348} 347}
349 348
@@ -702,16 +701,14 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
702 size_t pgstart, pgend; 701 size_t pgstart, pgend;
703 int ret = -EINVAL; 702 int ret = -EINVAL;
704 703
704 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
705 return -EFAULT;
706
705 mutex_lock(&ashmem_mutex); 707 mutex_lock(&ashmem_mutex);
706 708
707 if (unlikely(!asma->file)) 709 if (unlikely(!asma->file))
708 goto out_unlock; 710 goto out_unlock;
709 711
710 if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) {
711 ret = -EFAULT;
712 goto out_unlock;
713 }
714
715 /* per custom, you can pass zero for len to mean "everything onward" */ 712 /* per custom, you can pass zero for len to mean "everything onward" */
716 if (!pin.len) 713 if (!pin.len)
717 pin.len = PAGE_ALIGN(asma->size) - pin.offset; 714 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index e618a87521a3..9d733471ca2e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -475,8 +475,7 @@ unsigned int comedi_nsamples_left(struct comedi_subdevice *s,
475 struct comedi_cmd *cmd = &async->cmd; 475 struct comedi_cmd *cmd = &async->cmd;
476 476
477 if (cmd->stop_src == TRIG_COUNT) { 477 if (cmd->stop_src == TRIG_COUNT) {
478 unsigned int nscans = nsamples / cmd->scan_end_arg; 478 unsigned int scans_left = __comedi_nscans_left(s, cmd->stop_arg);
479 unsigned int scans_left = __comedi_nscans_left(s, nscans);
480 unsigned int scan_pos = 479 unsigned int scan_pos =
481 comedi_bytes_to_samples(s, async->scan_progress); 480 comedi_bytes_to_samples(s, async->scan_progress);
482 unsigned long long samples_left = 0; 481 unsigned long long samples_left = 0;
diff --git a/drivers/staging/ncpfs/ncplib_kernel.c b/drivers/staging/ncpfs/ncplib_kernel.c
index 804adfebba2f..3e047eb4cc7c 100644
--- a/drivers/staging/ncpfs/ncplib_kernel.c
+++ b/drivers/staging/ncpfs/ncplib_kernel.c
@@ -981,6 +981,10 @@ ncp_read_kernel(struct ncp_server *server, const char *file_id,
981 goto out; 981 goto out;
982 } 982 }
983 *bytes_read = ncp_reply_be16(server, 0); 983 *bytes_read = ncp_reply_be16(server, 0);
984 if (*bytes_read > to_read) {
985 result = -EINVAL;
986 goto out;
987 }
984 source = ncp_reply_data(server, 2 + (offset & 1)); 988 source = ncp_reply_data(server, 2 + (offset & 1));
985 989
986 memcpy(target, source, *bytes_read); 990 memcpy(target, source, *bytes_read);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 5c0e59e8fe46..cbe98bc2b998 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2180,6 +2180,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
2180 } 2180 }
2181 if (tty_hung_up_p(file)) 2181 if (tty_hung_up_p(file))
2182 break; 2182 break;
2183 /*
2184 * Abort readers for ttys which never actually
2185 * get hung up. See __tty_hangup().
2186 */
2187 if (test_bit(TTY_HUPPING, &tty->flags))
2188 break;
2183 if (!timeout) 2189 if (!timeout)
2184 break; 2190 break;
2185 if (file->f_flags & O_NONBLOCK) { 2191 if (file->f_flags & O_NONBLOCK) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 54adf8d56350..a93f77ab3da0 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3387,11 +3387,9 @@ static int serial_pci_is_class_communication(struct pci_dev *dev)
3387 /* 3387 /*
3388 * If it is not a communications device or the programming 3388 * If it is not a communications device or the programming
3389 * interface is greater than 6, give up. 3389 * interface is greater than 6, give up.
3390 *
3391 * (Should we try to make guesses for multiport serial devices
3392 * later?)
3393 */ 3390 */
3394 if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && 3391 if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) &&
3392 ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MULTISERIAL) &&
3395 ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || 3393 ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) ||
3396 (dev->class & 0xff) > 6) 3394 (dev->class & 0xff) > 6)
3397 return -ENODEV; 3395 return -ENODEV;
@@ -3428,6 +3426,12 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
3428{ 3426{
3429 int num_iomem, num_port, first_port = -1, i; 3427 int num_iomem, num_port, first_port = -1, i;
3430 3428
3429 /*
3430 * Should we try to make guesses for multiport serial devices later?
3431 */
3432 if ((dev->class >> 8) == PCI_CLASS_COMMUNICATION_MULTISERIAL)
3433 return -ENODEV;
3434
3431 num_iomem = num_port = 0; 3435 num_iomem = num_port = 0;
3432 for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) { 3436 for (i = 0; i < PCI_NUM_BAR_RESOURCES; i++) {
3433 if (pci_resource_flags(dev, i) & IORESOURCE_IO) { 3437 if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
@@ -4699,6 +4703,17 @@ static const struct pci_device_id serial_pci_tbl[] = {
4699 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */ 4703 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
4700 pbn_b2_4_115200 }, 4704 pbn_b2_4_115200 },
4701 /* 4705 /*
4706 * BrainBoxes UC-260
4707 */
4708 { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
4709 PCI_ANY_ID, PCI_ANY_ID,
4710 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
4711 pbn_b2_4_115200 },
4712 { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
4713 PCI_ANY_ID, PCI_ANY_ID,
4714 PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
4715 pbn_b2_4_115200 },
4716 /*
4702 * Perle PCI-RAS cards 4717 * Perle PCI-RAS cards
4703 */ 4718 */
4704 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, 4719 { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index df46a9e88c34..e287fe8f10fc 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1734,6 +1734,7 @@ static void atmel_get_ip_name(struct uart_port *port)
1734 switch (version) { 1734 switch (version) {
1735 case 0x302: 1735 case 0x302:
1736 case 0x10213: 1736 case 0x10213:
1737 case 0x10302:
1737 dev_dbg(port->dev, "This version is usart\n"); 1738 dev_dbg(port->dev, "This version is usart\n");
1738 atmel_port->has_frac_baudrate = true; 1739 atmel_port->has_frac_baudrate = true;
1739 atmel_port->has_hw_timer = true; 1740 atmel_port->has_hw_timer = true;
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index 870e84fb6e39..a24278380fec 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -245,11 +245,12 @@ int __init of_setup_earlycon(const struct earlycon_id *match,
245 } 245 }
246 port->mapbase = addr; 246 port->mapbase = addr;
247 port->uartclk = BASE_BAUD * 16; 247 port->uartclk = BASE_BAUD * 16;
248 port->membase = earlycon_map(port->mapbase, SZ_4K);
249 248
250 val = of_get_flat_dt_prop(node, "reg-offset", NULL); 249 val = of_get_flat_dt_prop(node, "reg-offset", NULL);
251 if (val) 250 if (val)
252 port->mapbase += be32_to_cpu(*val); 251 port->mapbase += be32_to_cpu(*val);
252 port->membase = earlycon_map(port->mapbase, SZ_4K);
253
253 val = of_get_flat_dt_prop(node, "reg-shift", NULL); 254 val = of_get_flat_dt_prop(node, "reg-shift", NULL);
254 if (val) 255 if (val)
255 port->regshift = be32_to_cpu(*val); 256 port->regshift = be32_to_cpu(*val);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 1d7ca382bc12..a33c685af990 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2093,7 +2093,7 @@ static int serial_imx_probe(struct platform_device *pdev)
2093 uart_get_rs485_mode(&pdev->dev, &sport->port.rs485); 2093 uart_get_rs485_mode(&pdev->dev, &sport->port.rs485);
2094 2094
2095 if (sport->port.rs485.flags & SER_RS485_ENABLED && 2095 if (sport->port.rs485.flags & SER_RS485_ENABLED &&
2096 (!sport->have_rtscts || !sport->have_rtsgpio)) 2096 (!sport->have_rtscts && !sport->have_rtsgpio))
2097 dev_err(&pdev->dev, "no RTS control, disabling rs485\n"); 2097 dev_err(&pdev->dev, "no RTS control, disabling rs485\n");
2098 2098
2099 imx_rs485_config(&sport->port, &sport->port.rs485); 2099 imx_rs485_config(&sport->port, &sport->port.rs485);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index c8dde56b532b..35b9201db3b4 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1144,6 +1144,8 @@ static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
1144 uport->ops->config_port(uport, flags); 1144 uport->ops->config_port(uport, flags);
1145 1145
1146 ret = uart_startup(tty, state, 1); 1146 ret = uart_startup(tty, state, 1);
1147 if (ret == 0)
1148 tty_port_set_initialized(port, true);
1147 if (ret > 0) 1149 if (ret > 0)
1148 ret = 0; 1150 ret = 0;
1149 } 1151 }
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 7257c078e155..44adf9db38f8 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -885,6 +885,8 @@ static void sci_receive_chars(struct uart_port *port)
885 /* Tell the rest of the system the news. New characters! */ 885 /* Tell the rest of the system the news. New characters! */
886 tty_flip_buffer_push(tport); 886 tty_flip_buffer_push(tport);
887 } else { 887 } else {
888 /* TTY buffers full; read from RX reg to prevent lockup */
889 serial_port_in(port, SCxRDR);
888 serial_port_in(port, SCxSR); /* dummy read */ 890 serial_port_in(port, SCxSR); /* dummy read */
889 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port)); 891 sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
890 } 892 }
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index eb9133b472f4..63114ea35ec1 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -586,6 +586,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
586 return; 586 return;
587 } 587 }
588 588
589 /*
590 * Some console devices aren't actually hung up for technical and
591 * historical reasons, which can lead to indefinite interruptible
592 * sleep in n_tty_read(). The following explicitly tells
593 * n_tty_read() to abort readers.
594 */
595 set_bit(TTY_HUPPING, &tty->flags);
596
589 /* inuse_filps is protected by the single tty lock, 597 /* inuse_filps is protected by the single tty lock,
590 this really needs to change if we want to flush the 598 this really needs to change if we want to flush the
591 workqueue with the lock held */ 599 workqueue with the lock held */
@@ -640,6 +648,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
640 * from the ldisc side, which is now guaranteed. 648 * from the ldisc side, which is now guaranteed.
641 */ 649 */
642 set_bit(TTY_HUPPED, &tty->flags); 650 set_bit(TTY_HUPPED, &tty->flags);
651 clear_bit(TTY_HUPPING, &tty->flags);
643 tty_unlock(tty); 652 tty_unlock(tty);
644 653
645 if (f) 654 if (f)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 88b902c525d7..b4e57c5a8bba 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1727,7 +1727,7 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
1727 default_attr(vc); 1727 default_attr(vc);
1728 update_attr(vc); 1728 update_attr(vc);
1729 1729
1730 vc->vc_tab_stop[0] = 0x01010100; 1730 vc->vc_tab_stop[0] =
1731 vc->vc_tab_stop[1] = 1731 vc->vc_tab_stop[1] =
1732 vc->vc_tab_stop[2] = 1732 vc->vc_tab_stop[2] =
1733 vc->vc_tab_stop[3] = 1733 vc->vc_tab_stop[3] =
@@ -1771,7 +1771,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1771 vc->vc_pos -= (vc->vc_x << 1); 1771 vc->vc_pos -= (vc->vc_x << 1);
1772 while (vc->vc_x < vc->vc_cols - 1) { 1772 while (vc->vc_x < vc->vc_cols - 1) {
1773 vc->vc_x++; 1773 vc->vc_x++;
1774 if (vc->vc_tab_stop[vc->vc_x >> 5] & (1 << (vc->vc_x & 31))) 1774 if (vc->vc_tab_stop[7 & (vc->vc_x >> 5)] & (1 << (vc->vc_x & 31)))
1775 break; 1775 break;
1776 } 1776 }
1777 vc->vc_pos += (vc->vc_x << 1); 1777 vc->vc_pos += (vc->vc_x << 1);
@@ -1831,7 +1831,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
1831 lf(vc); 1831 lf(vc);
1832 return; 1832 return;
1833 case 'H': 1833 case 'H':
1834 vc->vc_tab_stop[vc->vc_x >> 5] |= (1 << (vc->vc_x & 31)); 1834 vc->vc_tab_stop[7 & (vc->vc_x >> 5)] |= (1 << (vc->vc_x & 31));
1835 return; 1835 return;
1836 case 'Z': 1836 case 'Z':
1837 respond_ID(tty); 1837 respond_ID(tty);
@@ -2024,7 +2024,7 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
2024 return; 2024 return;
2025 case 'g': 2025 case 'g':
2026 if (!vc->vc_par[0]) 2026 if (!vc->vc_par[0])
2027 vc->vc_tab_stop[vc->vc_x >> 5] &= ~(1 << (vc->vc_x & 31)); 2027 vc->vc_tab_stop[7 & (vc->vc_x >> 5)] &= ~(1 << (vc->vc_x & 31));
2028 else if (vc->vc_par[0] == 3) { 2028 else if (vc->vc_par[0] == 3) {
2029 vc->vc_tab_stop[0] = 2029 vc->vc_tab_stop[0] =
2030 vc->vc_tab_stop[1] = 2030 vc->vc_tab_stop[1] =
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index c64cf6c4a83d..0c11d40a12bc 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -151,6 +151,10 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
151 151
152 ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout); 152 ret = usb_internal_control_msg(dev, pipe, dr, data, size, timeout);
153 153
154 /* Linger a bit, prior to the next control message. */
155 if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
156 msleep(200);
157
154 kfree(dr); 158 kfree(dr);
155 159
156 return ret; 160 return ret;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f4a548471f0f..54b019e267c5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -230,7 +230,8 @@ static const struct usb_device_id usb_quirk_list[] = {
230 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 230 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
231 231
232 /* Corsair Strafe RGB */ 232 /* Corsair Strafe RGB */
233 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, 233 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
234 USB_QUIRK_DELAY_CTRL_MSG },
234 235
235 /* Corsair K70 LUX */ 236 /* Corsair K70 LUX */
236 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT }, 237 { USB_DEVICE(0x1b1c, 0x1b36), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 03fd20f0b496..c4a47496d2fb 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -137,7 +137,7 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
137 p->activate_stm_fs_transceiver = true; 137 p->activate_stm_fs_transceiver = true;
138} 138}
139 139
140static void dwc2_set_stm32f7xx_hsotg_params(struct dwc2_hsotg *hsotg) 140static void dwc2_set_stm32f7_hsotg_params(struct dwc2_hsotg *hsotg)
141{ 141{
142 struct dwc2_core_params *p = &hsotg->params; 142 struct dwc2_core_params *p = &hsotg->params;
143 143
@@ -164,8 +164,8 @@ const struct of_device_id dwc2_of_match_table[] = {
164 { .compatible = "st,stm32f4x9-fsotg", 164 { .compatible = "st,stm32f4x9-fsotg",
165 .data = dwc2_set_stm32f4x9_fsotg_params }, 165 .data = dwc2_set_stm32f4x9_fsotg_params },
166 { .compatible = "st,stm32f4x9-hsotg" }, 166 { .compatible = "st,stm32f4x9-hsotg" },
167 { .compatible = "st,stm32f7xx-hsotg", 167 { .compatible = "st,stm32f7-hsotg",
168 .data = dwc2_set_stm32f7xx_hsotg_params }, 168 .data = dwc2_set_stm32f7_hsotg_params },
169 {}, 169 {},
170}; 170};
171MODULE_DEVICE_TABLE(of, dwc2_of_match_table); 171MODULE_DEVICE_TABLE(of, dwc2_of_match_table);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index f1d838a4acd6..e94bf91cc58a 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -175,7 +175,7 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
175 dwc->desired_dr_role = mode; 175 dwc->desired_dr_role = mode;
176 spin_unlock_irqrestore(&dwc->lock, flags); 176 spin_unlock_irqrestore(&dwc->lock, flags);
177 177
178 queue_work(system_power_efficient_wq, &dwc->drd_work); 178 queue_work(system_freezable_wq, &dwc->drd_work);
179} 179}
180 180
181u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type) 181u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index c2592d883f67..d2428a9e8900 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1538,7 +1538,6 @@ ffs_fs_kill_sb(struct super_block *sb)
1538 if (sb->s_fs_info) { 1538 if (sb->s_fs_info) {
1539 ffs_release_dev(sb->s_fs_info); 1539 ffs_release_dev(sb->s_fs_info);
1540 ffs_data_closed(sb->s_fs_info); 1540 ffs_data_closed(sb->s_fs_info);
1541 ffs_data_put(sb->s_fs_info);
1542 } 1541 }
1543} 1542}
1544 1543
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 84f88fa411cd..d088c340e4d0 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -447,7 +447,8 @@ static int ohci_init (struct ohci_hcd *ohci)
447 struct usb_hcd *hcd = ohci_to_hcd(ohci); 447 struct usb_hcd *hcd = ohci_to_hcd(ohci);
448 448
449 /* Accept arbitrarily long scatter-gather lists */ 449 /* Accept arbitrarily long scatter-gather lists */
450 hcd->self.sg_tablesize = ~0; 450 if (!(hcd->driver->flags & HCD_LOCAL_MEM))
451 hcd->self.sg_tablesize = ~0;
451 452
452 if (distrust_firmware) 453 if (distrust_firmware)
453 ohci->flags |= OHCI_QUIRK_HUB_POWER; 454 ohci->flags |= OHCI_QUIRK_HUB_POWER;
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index a1ab8acf39ba..c359bae7b754 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -328,13 +328,14 @@ dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req)
328int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, 328int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req,
329 gfp_t gfp_flags) 329 gfp_t gfp_flags)
330{ 330{
331 unsigned long flags;
331 struct xhci_dbc *dbc = dep->dbc; 332 struct xhci_dbc *dbc = dep->dbc;
332 int ret = -ESHUTDOWN; 333 int ret = -ESHUTDOWN;
333 334
334 spin_lock(&dbc->lock); 335 spin_lock_irqsave(&dbc->lock, flags);
335 if (dbc->state == DS_CONFIGURED) 336 if (dbc->state == DS_CONFIGURED)
336 ret = dbc_ep_do_queue(dep, req); 337 ret = dbc_ep_do_queue(dep, req);
337 spin_unlock(&dbc->lock); 338 spin_unlock_irqrestore(&dbc->lock, flags);
338 339
339 mod_delayed_work(system_wq, &dbc->event_work, 0); 340 mod_delayed_work(system_wq, &dbc->event_work, 0);
340 341
@@ -521,15 +522,16 @@ static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
521static int xhci_dbc_start(struct xhci_hcd *xhci) 522static int xhci_dbc_start(struct xhci_hcd *xhci)
522{ 523{
523 int ret; 524 int ret;
525 unsigned long flags;
524 struct xhci_dbc *dbc = xhci->dbc; 526 struct xhci_dbc *dbc = xhci->dbc;
525 527
526 WARN_ON(!dbc); 528 WARN_ON(!dbc);
527 529
528 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); 530 pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller);
529 531
530 spin_lock(&dbc->lock); 532 spin_lock_irqsave(&dbc->lock, flags);
531 ret = xhci_do_dbc_start(xhci); 533 ret = xhci_do_dbc_start(xhci);
532 spin_unlock(&dbc->lock); 534 spin_unlock_irqrestore(&dbc->lock, flags);
533 535
534 if (ret) { 536 if (ret) {
535 pm_runtime_put(xhci_to_hcd(xhci)->self.controller); 537 pm_runtime_put(xhci_to_hcd(xhci)->self.controller);
@@ -541,6 +543,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)
541 543
542static void xhci_dbc_stop(struct xhci_hcd *xhci) 544static void xhci_dbc_stop(struct xhci_hcd *xhci)
543{ 545{
546 unsigned long flags;
544 struct xhci_dbc *dbc = xhci->dbc; 547 struct xhci_dbc *dbc = xhci->dbc;
545 struct dbc_port *port = &dbc->port; 548 struct dbc_port *port = &dbc->port;
546 549
@@ -551,9 +554,9 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
551 if (port->registered) 554 if (port->registered)
552 xhci_dbc_tty_unregister_device(xhci); 555 xhci_dbc_tty_unregister_device(xhci);
553 556
554 spin_lock(&dbc->lock); 557 spin_lock_irqsave(&dbc->lock, flags);
555 xhci_do_dbc_stop(xhci); 558 xhci_do_dbc_stop(xhci);
556 spin_unlock(&dbc->lock); 559 spin_unlock_irqrestore(&dbc->lock, flags);
557 560
558 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); 561 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
559} 562}
@@ -779,14 +782,15 @@ static void xhci_dbc_handle_events(struct work_struct *work)
779 int ret; 782 int ret;
780 enum evtreturn evtr; 783 enum evtreturn evtr;
781 struct xhci_dbc *dbc; 784 struct xhci_dbc *dbc;
785 unsigned long flags;
782 struct xhci_hcd *xhci; 786 struct xhci_hcd *xhci;
783 787
784 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); 788 dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
785 xhci = dbc->xhci; 789 xhci = dbc->xhci;
786 790
787 spin_lock(&dbc->lock); 791 spin_lock_irqsave(&dbc->lock, flags);
788 evtr = xhci_dbc_do_handle_events(dbc); 792 evtr = xhci_dbc_do_handle_events(dbc);
789 spin_unlock(&dbc->lock); 793 spin_unlock_irqrestore(&dbc->lock, flags);
790 794
791 switch (evtr) { 795 switch (evtr) {
792 case EVT_GSER: 796 case EVT_GSER:
diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
index 8d47b6fbf973..75f0b92694ba 100644
--- a/drivers/usb/host/xhci-dbgtty.c
+++ b/drivers/usb/host/xhci-dbgtty.c
@@ -92,21 +92,23 @@ static void dbc_start_rx(struct dbc_port *port)
92static void 92static void
93dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req) 93dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
94{ 94{
95 unsigned long flags;
95 struct xhci_dbc *dbc = xhci->dbc; 96 struct xhci_dbc *dbc = xhci->dbc;
96 struct dbc_port *port = &dbc->port; 97 struct dbc_port *port = &dbc->port;
97 98
98 spin_lock(&port->port_lock); 99 spin_lock_irqsave(&port->port_lock, flags);
99 list_add_tail(&req->list_pool, &port->read_queue); 100 list_add_tail(&req->list_pool, &port->read_queue);
100 tasklet_schedule(&port->push); 101 tasklet_schedule(&port->push);
101 spin_unlock(&port->port_lock); 102 spin_unlock_irqrestore(&port->port_lock, flags);
102} 103}
103 104
104static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req) 105static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
105{ 106{
107 unsigned long flags;
106 struct xhci_dbc *dbc = xhci->dbc; 108 struct xhci_dbc *dbc = xhci->dbc;
107 struct dbc_port *port = &dbc->port; 109 struct dbc_port *port = &dbc->port;
108 110
109 spin_lock(&port->port_lock); 111 spin_lock_irqsave(&port->port_lock, flags);
110 list_add(&req->list_pool, &port->write_pool); 112 list_add(&req->list_pool, &port->write_pool);
111 switch (req->status) { 113 switch (req->status) {
112 case 0: 114 case 0:
@@ -119,7 +121,7 @@ static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
119 req->status); 121 req->status);
120 break; 122 break;
121 } 123 }
122 spin_unlock(&port->port_lock); 124 spin_unlock_irqrestore(&port->port_lock, flags);
123} 125}
124 126
125static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req) 127static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
@@ -327,12 +329,13 @@ static void dbc_rx_push(unsigned long _port)
327{ 329{
328 struct dbc_request *req; 330 struct dbc_request *req;
329 struct tty_struct *tty; 331 struct tty_struct *tty;
332 unsigned long flags;
330 bool do_push = false; 333 bool do_push = false;
331 bool disconnect = false; 334 bool disconnect = false;
332 struct dbc_port *port = (void *)_port; 335 struct dbc_port *port = (void *)_port;
333 struct list_head *queue = &port->read_queue; 336 struct list_head *queue = &port->read_queue;
334 337
335 spin_lock_irq(&port->port_lock); 338 spin_lock_irqsave(&port->port_lock, flags);
336 tty = port->port.tty; 339 tty = port->port.tty;
337 while (!list_empty(queue)) { 340 while (!list_empty(queue)) {
338 req = list_first_entry(queue, struct dbc_request, list_pool); 341 req = list_first_entry(queue, struct dbc_request, list_pool);
@@ -392,16 +395,17 @@ static void dbc_rx_push(unsigned long _port)
392 if (!disconnect) 395 if (!disconnect)
393 dbc_start_rx(port); 396 dbc_start_rx(port);
394 397
395 spin_unlock_irq(&port->port_lock); 398 spin_unlock_irqrestore(&port->port_lock, flags);
396} 399}
397 400
398static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty) 401static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
399{ 402{
403 unsigned long flags;
400 struct dbc_port *port = container_of(_port, struct dbc_port, port); 404 struct dbc_port *port = container_of(_port, struct dbc_port, port);
401 405
402 spin_lock_irq(&port->port_lock); 406 spin_lock_irqsave(&port->port_lock, flags);
403 dbc_start_rx(port); 407 dbc_start_rx(port);
404 spin_unlock_irq(&port->port_lock); 408 spin_unlock_irqrestore(&port->port_lock, flags);
405 409
406 return 0; 410 return 0;
407} 411}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5262fa571a5d..d9f831b67e57 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -126,6 +126,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) 126 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
127 xhci->quirks |= XHCI_AMD_PLL_FIX; 127 xhci->quirks |= XHCI_AMD_PLL_FIX;
128 128
129 if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb)
130 xhci->quirks |= XHCI_SUSPEND_DELAY;
131
129 if (pdev->vendor == PCI_VENDOR_ID_AMD) 132 if (pdev->vendor == PCI_VENDOR_ID_AMD)
130 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 133 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
131 134
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 6f038306c14d..6652e2d5bd2e 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -360,7 +360,6 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
360{ 360{
361 struct usb_hcd *hcd = dev_get_drvdata(dev); 361 struct usb_hcd *hcd = dev_get_drvdata(dev);
362 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 362 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
363 int ret;
364 363
365 /* 364 /*
366 * xhci_suspend() needs `do_wakeup` to know whether host is allowed 365 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
@@ -370,12 +369,7 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
370 * reconsider this when xhci_plat_suspend enlarges its scope, e.g., 369 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
371 * also applies to runtime suspend. 370 * also applies to runtime suspend.
372 */ 371 */
373 ret = xhci_suspend(xhci, device_may_wakeup(dev)); 372 return xhci_suspend(xhci, device_may_wakeup(dev));
374
375 if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk))
376 clk_disable_unprepare(xhci->clk);
377
378 return ret;
379} 373}
380 374
381static int __maybe_unused xhci_plat_resume(struct device *dev) 375static int __maybe_unused xhci_plat_resume(struct device *dev)
@@ -384,9 +378,6 @@ static int __maybe_unused xhci_plat_resume(struct device *dev)
384 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 378 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
385 int ret; 379 int ret;
386 380
387 if (!device_may_wakeup(dev) && !IS_ERR(xhci->clk))
388 clk_prepare_enable(xhci->clk);
389
390 ret = xhci_priv_resume_quirk(hcd); 381 ret = xhci_priv_resume_quirk(hcd);
391 if (ret) 382 if (ret)
392 return ret; 383 return ret;
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index f0b559660007..f33ffc2bc4ed 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -83,6 +83,10 @@ static const struct soc_device_attribute rcar_quirks_match[] = {
83 .soc_id = "r8a7796", 83 .soc_id = "r8a7796",
84 .data = (void *)RCAR_XHCI_FIRMWARE_V3, 84 .data = (void *)RCAR_XHCI_FIRMWARE_V3,
85 }, 85 },
86 {
87 .soc_id = "r8a77965",
88 .data = (void *)RCAR_XHCI_FIRMWARE_V3,
89 },
86 { /* sentinel */ }, 90 { /* sentinel */ },
87}; 91};
88 92
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 25d4b748a56f..5d37700ae4b0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -877,6 +877,9 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
877 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); 877 clear_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
878 del_timer_sync(&xhci->shared_hcd->rh_timer); 878 del_timer_sync(&xhci->shared_hcd->rh_timer);
879 879
880 if (xhci->quirks & XHCI_SUSPEND_DELAY)
881 usleep_range(1000, 1500);
882
880 spin_lock_irq(&xhci->lock); 883 spin_lock_irq(&xhci->lock);
881 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 884 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
882 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); 885 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index e4d7d3d06a75..866e141d4972 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -718,11 +718,12 @@ struct xhci_ep_ctx {
718/* bits 10:14 are Max Primary Streams */ 718/* bits 10:14 are Max Primary Streams */
719/* bit 15 is Linear Stream Array */ 719/* bit 15 is Linear Stream Array */
720/* Interval - period between requests to an endpoint - 125u increments. */ 720/* Interval - period between requests to an endpoint - 125u increments. */
721#define EP_INTERVAL(p) (((p) & 0xff) << 16) 721#define EP_INTERVAL(p) (((p) & 0xff) << 16)
722#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff)) 722#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
723#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff) 723#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff)
724#define EP_MAXPSTREAMS_MASK (0x1f << 10) 724#define EP_MAXPSTREAMS_MASK (0x1f << 10)
725#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) 725#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
726#define CTX_TO_EP_MAXPSTREAMS(p) (((p) & EP_MAXPSTREAMS_MASK) >> 10)
726/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ 727/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
727#define EP_HAS_LSA (1 << 15) 728#define EP_HAS_LSA (1 << 15)
728/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */ 729/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
@@ -1825,6 +1826,7 @@ struct xhci_hcd {
1825#define XHCI_U2_DISABLE_WAKE (1 << 27) 1826#define XHCI_U2_DISABLE_WAKE (1 << 27)
1826#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) 1827#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
1827#define XHCI_HW_LPM_DISABLE (1 << 29) 1828#define XHCI_HW_LPM_DISABLE (1 << 29)
1829#define XHCI_SUSPEND_DELAY (1 << 30)
1828 1830
1829 unsigned int num_active_eps; 1831 unsigned int num_active_eps;
1830 unsigned int limit_active_eps; 1832 unsigned int limit_active_eps;
@@ -2549,21 +2551,22 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
2549 u8 burst; 2551 u8 burst;
2550 u8 cerr; 2552 u8 cerr;
2551 u8 mult; 2553 u8 mult;
2552 u8 lsa; 2554
2553 u8 hid; 2555 bool lsa;
2556 bool hid;
2554 2557
2555 esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 | 2558 esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
2556 CTX_TO_MAX_ESIT_PAYLOAD(tx_info); 2559 CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
2557 2560
2558 ep_state = info & EP_STATE_MASK; 2561 ep_state = info & EP_STATE_MASK;
2559 max_pstr = info & EP_MAXPSTREAMS_MASK; 2562 max_pstr = CTX_TO_EP_MAXPSTREAMS(info);
2560 interval = CTX_TO_EP_INTERVAL(info); 2563 interval = CTX_TO_EP_INTERVAL(info);
2561 mult = CTX_TO_EP_MULT(info) + 1; 2564 mult = CTX_TO_EP_MULT(info) + 1;
2562 lsa = info & EP_HAS_LSA; 2565 lsa = !!(info & EP_HAS_LSA);
2563 2566
2564 cerr = (info2 & (3 << 1)) >> 1; 2567 cerr = (info2 & (3 << 1)) >> 1;
2565 ep_type = CTX_TO_EP_TYPE(info2); 2568 ep_type = CTX_TO_EP_TYPE(info2);
2566 hid = info2 & (1 << 7); 2569 hid = !!(info2 & (1 << 7));
2567 burst = CTX_TO_MAX_BURST(info2); 2570 burst = CTX_TO_MAX_BURST(info2);
2568 maxp = MAX_PACKET_DECODED(info2); 2571 maxp = MAX_PACKET_DECODED(info2);
2569 2572
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index f5e1bb5e5217..984f7e12a6a5 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -85,6 +85,8 @@ struct mon_reader_text {
85 85
86 wait_queue_head_t wait; 86 wait_queue_head_t wait;
87 int printf_size; 87 int printf_size;
88 size_t printf_offset;
89 size_t printf_togo;
88 char *printf_buf; 90 char *printf_buf;
89 struct mutex printf_lock; 91 struct mutex printf_lock;
90 92
@@ -376,75 +378,103 @@ err_alloc:
376 return rc; 378 return rc;
377} 379}
378 380
379/* 381static ssize_t mon_text_copy_to_user(struct mon_reader_text *rp,
380 * For simplicity, we read one record in one system call and throw out 382 char __user * const buf, const size_t nbytes)
381 * what does not fit. This means that the following does not work: 383{
382 * dd if=/dbg/usbmon/0t bs=10 384 const size_t togo = min(nbytes, rp->printf_togo);
383 * Also, we do not allow seeks and do not bother advancing the offset. 385
384 */ 386 if (copy_to_user(buf, &rp->printf_buf[rp->printf_offset], togo))
387 return -EFAULT;
388 rp->printf_togo -= togo;
389 rp->printf_offset += togo;
390 return togo;
391}
392
393/* ppos is not advanced since the llseek operation is not permitted. */
385static ssize_t mon_text_read_t(struct file *file, char __user *buf, 394static ssize_t mon_text_read_t(struct file *file, char __user *buf,
386 size_t nbytes, loff_t *ppos) 395 size_t nbytes, loff_t *ppos)
387{ 396{
388 struct mon_reader_text *rp = file->private_data; 397 struct mon_reader_text *rp = file->private_data;
389 struct mon_event_text *ep; 398 struct mon_event_text *ep;
390 struct mon_text_ptr ptr; 399 struct mon_text_ptr ptr;
400 ssize_t ret;
391 401
392 ep = mon_text_read_wait(rp, file);
393 if (IS_ERR(ep))
394 return PTR_ERR(ep);
395 mutex_lock(&rp->printf_lock); 402 mutex_lock(&rp->printf_lock);
396 ptr.cnt = 0; 403
397 ptr.pbuf = rp->printf_buf; 404 if (rp->printf_togo == 0) {
398 ptr.limit = rp->printf_size; 405
399 406 ep = mon_text_read_wait(rp, file);
400 mon_text_read_head_t(rp, &ptr, ep); 407 if (IS_ERR(ep)) {
401 mon_text_read_statset(rp, &ptr, ep); 408 mutex_unlock(&rp->printf_lock);
402 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt, 409 return PTR_ERR(ep);
403 " %d", ep->length); 410 }
404 mon_text_read_data(rp, &ptr, ep); 411 ptr.cnt = 0;
405 412 ptr.pbuf = rp->printf_buf;
406 if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) 413 ptr.limit = rp->printf_size;
407 ptr.cnt = -EFAULT; 414
415 mon_text_read_head_t(rp, &ptr, ep);
416 mon_text_read_statset(rp, &ptr, ep);
417 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
418 " %d", ep->length);
419 mon_text_read_data(rp, &ptr, ep);
420
421 rp->printf_togo = ptr.cnt;
422 rp->printf_offset = 0;
423
424 kmem_cache_free(rp->e_slab, ep);
425 }
426
427 ret = mon_text_copy_to_user(rp, buf, nbytes);
408 mutex_unlock(&rp->printf_lock); 428 mutex_unlock(&rp->printf_lock);
409 kmem_cache_free(rp->e_slab, ep); 429 return ret;
410 return ptr.cnt;
411} 430}
412 431
432/* ppos is not advanced since the llseek operation is not permitted. */
413static ssize_t mon_text_read_u(struct file *file, char __user *buf, 433static ssize_t mon_text_read_u(struct file *file, char __user *buf,
414 size_t nbytes, loff_t *ppos) 434 size_t nbytes, loff_t *ppos)
415{ 435{
416 struct mon_reader_text *rp = file->private_data; 436 struct mon_reader_text *rp = file->private_data;
417 struct mon_event_text *ep; 437 struct mon_event_text *ep;
418 struct mon_text_ptr ptr; 438 struct mon_text_ptr ptr;
439 ssize_t ret;
419 440
420 ep = mon_text_read_wait(rp, file);
421 if (IS_ERR(ep))
422 return PTR_ERR(ep);
423 mutex_lock(&rp->printf_lock); 441 mutex_lock(&rp->printf_lock);
424 ptr.cnt = 0;
425 ptr.pbuf = rp->printf_buf;
426 ptr.limit = rp->printf_size;
427 442
428 mon_text_read_head_u(rp, &ptr, ep); 443 if (rp->printf_togo == 0) {
429 if (ep->type == 'E') { 444
430 mon_text_read_statset(rp, &ptr, ep); 445 ep = mon_text_read_wait(rp, file);
431 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) { 446 if (IS_ERR(ep)) {
432 mon_text_read_isostat(rp, &ptr, ep); 447 mutex_unlock(&rp->printf_lock);
433 mon_text_read_isodesc(rp, &ptr, ep); 448 return PTR_ERR(ep);
434 } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) { 449 }
435 mon_text_read_intstat(rp, &ptr, ep); 450 ptr.cnt = 0;
436 } else { 451 ptr.pbuf = rp->printf_buf;
437 mon_text_read_statset(rp, &ptr, ep); 452 ptr.limit = rp->printf_size;
453
454 mon_text_read_head_u(rp, &ptr, ep);
455 if (ep->type == 'E') {
456 mon_text_read_statset(rp, &ptr, ep);
457 } else if (ep->xfertype == USB_ENDPOINT_XFER_ISOC) {
458 mon_text_read_isostat(rp, &ptr, ep);
459 mon_text_read_isodesc(rp, &ptr, ep);
460 } else if (ep->xfertype == USB_ENDPOINT_XFER_INT) {
461 mon_text_read_intstat(rp, &ptr, ep);
462 } else {
463 mon_text_read_statset(rp, &ptr, ep);
464 }
465 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
466 " %d", ep->length);
467 mon_text_read_data(rp, &ptr, ep);
468
469 rp->printf_togo = ptr.cnt;
470 rp->printf_offset = 0;
471
472 kmem_cache_free(rp->e_slab, ep);
438 } 473 }
439 ptr.cnt += snprintf(ptr.pbuf + ptr.cnt, ptr.limit - ptr.cnt,
440 " %d", ep->length);
441 mon_text_read_data(rp, &ptr, ep);
442 474
443 if (copy_to_user(buf, rp->printf_buf, ptr.cnt)) 475 ret = mon_text_copy_to_user(rp, buf, nbytes);
444 ptr.cnt = -EFAULT;
445 mutex_unlock(&rp->printf_lock); 476 mutex_unlock(&rp->printf_lock);
446 kmem_cache_free(rp->e_slab, ep); 477 return ret;
447 return ptr.cnt;
448} 478}
449 479
450static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp, 480static struct mon_event_text *mon_text_read_wait(struct mon_reader_text *rp,
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index eef4ad578b31..4d723077be2b 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1756,6 +1756,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1756 int vbus; 1756 int vbus;
1757 u8 devctl; 1757 u8 devctl;
1758 1758
1759 pm_runtime_get_sync(dev);
1759 spin_lock_irqsave(&musb->lock, flags); 1760 spin_lock_irqsave(&musb->lock, flags);
1760 val = musb->a_wait_bcon; 1761 val = musb->a_wait_bcon;
1761 vbus = musb_platform_get_vbus_status(musb); 1762 vbus = musb_platform_get_vbus_status(musb);
@@ -1769,6 +1770,7 @@ vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
1769 vbus = 0; 1770 vbus = 0;
1770 } 1771 }
1771 spin_unlock_irqrestore(&musb->lock, flags); 1772 spin_unlock_irqrestore(&musb->lock, flags);
1773 pm_runtime_put_sync(dev);
1772 1774
1773 return sprintf(buf, "Vbus %s, timeout %lu msec\n", 1775 return sprintf(buf, "Vbus %s, timeout %lu msec\n",
1774 vbus ? "on" : "off", val); 1776 vbus ? "on" : "off", val);
@@ -2471,11 +2473,11 @@ static int musb_remove(struct platform_device *pdev)
2471 musb_disable_interrupts(musb); 2473 musb_disable_interrupts(musb);
2472 musb_writeb(musb->mregs, MUSB_DEVCTL, 0); 2474 musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
2473 spin_unlock_irqrestore(&musb->lock, flags); 2475 spin_unlock_irqrestore(&musb->lock, flags);
2476 musb_platform_exit(musb);
2474 2477
2475 pm_runtime_dont_use_autosuspend(musb->controller); 2478 pm_runtime_dont_use_autosuspend(musb->controller);
2476 pm_runtime_put_sync(musb->controller); 2479 pm_runtime_put_sync(musb->controller);
2477 pm_runtime_disable(musb->controller); 2480 pm_runtime_disable(musb->controller);
2478 musb_platform_exit(musb);
2479 musb_phy_callback = NULL; 2481 musb_phy_callback = NULL;
2480 if (musb->dma_controller) 2482 if (musb->dma_controller)
2481 musb_dma_controller_destroy(musb->dma_controller); 2483 musb_dma_controller_destroy(musb->dma_controller);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 3b1b9695177a..6034c39b67d1 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -1076,7 +1076,7 @@ static int uas_post_reset(struct usb_interface *intf)
1076 return 0; 1076 return 0;
1077 1077
1078 err = uas_configure_endpoints(devinfo); 1078 err = uas_configure_endpoints(devinfo);
1079 if (err && err != ENODEV) 1079 if (err && err != -ENODEV)
1080 shost_printk(KERN_ERR, shost, 1080 shost_printk(KERN_ERR, shost,
1081 "%s: alloc streams error %d after reset", 1081 "%s: alloc streams error %d after reset",
1082 __func__, err); 1082 __func__, err);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 264af199aec8..747d3a9596d9 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2118,6 +2118,13 @@ UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
2118 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2118 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2119 US_FL_BROKEN_FUA ), 2119 US_FL_BROKEN_FUA ),
2120 2120
2121/* Reported by Teijo Kinnunen <teijo.kinnunen@code-q.fi> */
2122UNUSUAL_DEV( 0x152d, 0x2567, 0x0117, 0x0117,
2123 "JMicron",
2124 "USB to ATA/ATAPI Bridge",
2125 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2126 US_FL_BROKEN_FUA ),
2127
2121/* Reported-by George Cherian <george.cherian@cavium.com> */ 2128/* Reported-by George Cherian <george.cherian@cavium.com> */
2122UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999, 2129UNUSUAL_DEV(0x152d, 0x9561, 0x0000, 0x9999,
2123 "JMicron", 2130 "JMicron",
diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c
index 9ce4756adad6..dcd8ef085b30 100644
--- a/drivers/usb/typec/fusb302/fusb302.c
+++ b/drivers/usb/typec/fusb302/fusb302.c
@@ -1857,7 +1857,8 @@ static int fusb302_probe(struct i2c_client *client,
1857 chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev); 1857 chip->tcpm_port = tcpm_register_port(&client->dev, &chip->tcpc_dev);
1858 if (IS_ERR(chip->tcpm_port)) { 1858 if (IS_ERR(chip->tcpm_port)) {
1859 ret = PTR_ERR(chip->tcpm_port); 1859 ret = PTR_ERR(chip->tcpm_port);
1860 dev_err(dev, "cannot register tcpm port, ret=%d", ret); 1860 if (ret != -EPROBE_DEFER)
1861 dev_err(dev, "cannot register tcpm port, ret=%d", ret);
1861 goto destroy_workqueue; 1862 goto destroy_workqueue;
1862 } 1863 }
1863 1864
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index f4d563ee7690..8b637a4b474b 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -252,9 +252,6 @@ struct tcpm_port {
252 unsigned int nr_src_pdo; 252 unsigned int nr_src_pdo;
253 u32 snk_pdo[PDO_MAX_OBJECTS]; 253 u32 snk_pdo[PDO_MAX_OBJECTS];
254 unsigned int nr_snk_pdo; 254 unsigned int nr_snk_pdo;
255 unsigned int nr_fixed; /* number of fixed sink PDOs */
256 unsigned int nr_var; /* number of variable sink PDOs */
257 unsigned int nr_batt; /* number of battery sink PDOs */
258 u32 snk_vdo[VDO_MAX_OBJECTS]; 255 u32 snk_vdo[VDO_MAX_OBJECTS];
259 unsigned int nr_snk_vdo; 256 unsigned int nr_snk_vdo;
260 257
@@ -1770,90 +1767,39 @@ static int tcpm_pd_check_request(struct tcpm_port *port)
1770 return 0; 1767 return 0;
1771} 1768}
1772 1769
1773#define min_power(x, y) min(pdo_max_power(x), pdo_max_power(y)) 1770static int tcpm_pd_select_pdo(struct tcpm_port *port)
1774#define min_current(x, y) min(pdo_max_current(x), pdo_max_current(y))
1775
1776static int tcpm_pd_select_pdo(struct tcpm_port *port, int *sink_pdo,
1777 int *src_pdo)
1778{ 1771{
1779 unsigned int i, j, max_mw = 0, max_mv = 0, mw = 0, mv = 0, ma = 0; 1772 unsigned int i, max_mw = 0, max_mv = 0;
1780 int ret = -EINVAL; 1773 int ret = -EINVAL;
1781 1774
1782 /* 1775 /*
1783 * Select the source PDO providing the most power which has a 1776 * Select the source PDO providing the most power while staying within
1784 * matchig sink cap. 1777 * the board's voltage limits. Prefer PDO providing exp
1785 */ 1778 */
1786 for (i = 0; i < port->nr_source_caps; i++) { 1779 for (i = 0; i < port->nr_source_caps; i++) {
1787 u32 pdo = port->source_caps[i]; 1780 u32 pdo = port->source_caps[i];
1788 enum pd_pdo_type type = pdo_type(pdo); 1781 enum pd_pdo_type type = pdo_type(pdo);
1782 unsigned int mv, ma, mw;
1789 1783
1790 if (type == PDO_TYPE_FIXED) { 1784 if (type == PDO_TYPE_FIXED)
1791 for (j = 0; j < port->nr_fixed; j++) { 1785 mv = pdo_fixed_voltage(pdo);
1792 if (pdo_fixed_voltage(pdo) == 1786 else
1793 pdo_fixed_voltage(port->snk_pdo[j])) { 1787 mv = pdo_min_voltage(pdo);
1794 ma = min_current(pdo, port->snk_pdo[j]); 1788
1795 mv = pdo_fixed_voltage(pdo); 1789 if (type == PDO_TYPE_BATT) {
1796 mw = ma * mv / 1000; 1790 mw = pdo_max_power(pdo);
1797 if (mw > max_mw || 1791 } else {
1798 (mw == max_mw && mv > max_mv)) { 1792 ma = min(pdo_max_current(pdo),
1799 ret = 0; 1793 port->max_snk_ma);
1800 *src_pdo = i; 1794 mw = ma * mv / 1000;
1801 *sink_pdo = j; 1795 }
1802 max_mw = mw; 1796
1803 max_mv = mv; 1797 /* Perfer higher voltages if available */
1804 } 1798 if ((mw > max_mw || (mw == max_mw && mv > max_mv)) &&
1805 /* There could only be one fixed pdo 1799 mv <= port->max_snk_mv) {
1806 * at a specific voltage level. 1800 ret = i;
1807 * So breaking here. 1801 max_mw = mw;
1808 */ 1802 max_mv = mv;
1809 break;
1810 }
1811 }
1812 } else if (type == PDO_TYPE_BATT) {
1813 for (j = port->nr_fixed;
1814 j < port->nr_fixed +
1815 port->nr_batt;
1816 j++) {
1817 if (pdo_min_voltage(pdo) >=
1818 pdo_min_voltage(port->snk_pdo[j]) &&
1819 pdo_max_voltage(pdo) <=
1820 pdo_max_voltage(port->snk_pdo[j])) {
1821 mw = min_power(pdo, port->snk_pdo[j]);
1822 mv = pdo_min_voltage(pdo);
1823 if (mw > max_mw ||
1824 (mw == max_mw && mv > max_mv)) {
1825 ret = 0;
1826 *src_pdo = i;
1827 *sink_pdo = j;
1828 max_mw = mw;
1829 max_mv = mv;
1830 }
1831 }
1832 }
1833 } else if (type == PDO_TYPE_VAR) {
1834 for (j = port->nr_fixed +
1835 port->nr_batt;
1836 j < port->nr_fixed +
1837 port->nr_batt +
1838 port->nr_var;
1839 j++) {
1840 if (pdo_min_voltage(pdo) >=
1841 pdo_min_voltage(port->snk_pdo[j]) &&
1842 pdo_max_voltage(pdo) <=
1843 pdo_max_voltage(port->snk_pdo[j])) {
1844 ma = min_current(pdo, port->snk_pdo[j]);
1845 mv = pdo_min_voltage(pdo);
1846 mw = ma * mv / 1000;
1847 if (mw > max_mw ||
1848 (mw == max_mw && mv > max_mv)) {
1849 ret = 0;
1850 *src_pdo = i;
1851 *sink_pdo = j;
1852 max_mw = mw;
1853 max_mv = mv;
1854 }
1855 }
1856 }
1857 } 1803 }
1858 } 1804 }
1859 1805
@@ -1865,14 +1811,13 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1865 unsigned int mv, ma, mw, flags; 1811 unsigned int mv, ma, mw, flags;
1866 unsigned int max_ma, max_mw; 1812 unsigned int max_ma, max_mw;
1867 enum pd_pdo_type type; 1813 enum pd_pdo_type type;
1868 int src_pdo_index, snk_pdo_index; 1814 int index;
1869 u32 pdo, matching_snk_pdo; 1815 u32 pdo;
1870 1816
1871 if (tcpm_pd_select_pdo(port, &snk_pdo_index, &src_pdo_index) < 0) 1817 index = tcpm_pd_select_pdo(port);
1818 if (index < 0)
1872 return -EINVAL; 1819 return -EINVAL;
1873 1820 pdo = port->source_caps[index];
1874 pdo = port->source_caps[src_pdo_index];
1875 matching_snk_pdo = port->snk_pdo[snk_pdo_index];
1876 type = pdo_type(pdo); 1821 type = pdo_type(pdo);
1877 1822
1878 if (type == PDO_TYPE_FIXED) 1823 if (type == PDO_TYPE_FIXED)
@@ -1880,28 +1825,26 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1880 else 1825 else
1881 mv = pdo_min_voltage(pdo); 1826 mv = pdo_min_voltage(pdo);
1882 1827
1883 /* Select maximum available current within the sink pdo's limit */ 1828 /* Select maximum available current within the board's power limit */
1884 if (type == PDO_TYPE_BATT) { 1829 if (type == PDO_TYPE_BATT) {
1885 mw = min_power(pdo, matching_snk_pdo); 1830 mw = pdo_max_power(pdo);
1886 ma = 1000 * mw / mv; 1831 ma = 1000 * min(mw, port->max_snk_mw) / mv;
1887 } else { 1832 } else {
1888 ma = min_current(pdo, matching_snk_pdo); 1833 ma = min(pdo_max_current(pdo),
1889 mw = ma * mv / 1000; 1834 1000 * port->max_snk_mw / mv);
1890 } 1835 }
1836 ma = min(ma, port->max_snk_ma);
1891 1837
1892 flags = RDO_USB_COMM | RDO_NO_SUSPEND; 1838 flags = RDO_USB_COMM | RDO_NO_SUSPEND;
1893 1839
1894 /* Set mismatch bit if offered power is less than operating power */ 1840 /* Set mismatch bit if offered power is less than operating power */
1841 mw = ma * mv / 1000;
1895 max_ma = ma; 1842 max_ma = ma;
1896 max_mw = mw; 1843 max_mw = mw;
1897 if (mw < port->operating_snk_mw) { 1844 if (mw < port->operating_snk_mw) {
1898 flags |= RDO_CAP_MISMATCH; 1845 flags |= RDO_CAP_MISMATCH;
1899 if (type == PDO_TYPE_BATT && 1846 max_mw = port->operating_snk_mw;
1900 (pdo_max_power(matching_snk_pdo) > pdo_max_power(pdo))) 1847 max_ma = max_mw * 1000 / mv;
1901 max_mw = pdo_max_power(matching_snk_pdo);
1902 else if (pdo_max_current(matching_snk_pdo) >
1903 pdo_max_current(pdo))
1904 max_ma = pdo_max_current(matching_snk_pdo);
1905 } 1848 }
1906 1849
1907 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d", 1850 tcpm_log(port, "cc=%d cc1=%d cc2=%d vbus=%d vconn=%s polarity=%d",
@@ -1910,16 +1853,16 @@ static int tcpm_pd_build_request(struct tcpm_port *port, u32 *rdo)
1910 port->polarity); 1853 port->polarity);
1911 1854
1912 if (type == PDO_TYPE_BATT) { 1855 if (type == PDO_TYPE_BATT) {
1913 *rdo = RDO_BATT(src_pdo_index + 1, mw, max_mw, flags); 1856 *rdo = RDO_BATT(index + 1, mw, max_mw, flags);
1914 1857
1915 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s", 1858 tcpm_log(port, "Requesting PDO %d: %u mV, %u mW%s",
1916 src_pdo_index, mv, mw, 1859 index, mv, mw,
1917 flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); 1860 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1918 } else { 1861 } else {
1919 *rdo = RDO_FIXED(src_pdo_index + 1, ma, max_ma, flags); 1862 *rdo = RDO_FIXED(index + 1, ma, max_ma, flags);
1920 1863
1921 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s", 1864 tcpm_log(port, "Requesting PDO %d: %u mV, %u mA%s",
1922 src_pdo_index, mv, ma, 1865 index, mv, ma,
1923 flags & RDO_CAP_MISMATCH ? " [mismatch]" : ""); 1866 flags & RDO_CAP_MISMATCH ? " [mismatch]" : "");
1924 } 1867 }
1925 1868
@@ -3650,19 +3593,6 @@ int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
3650} 3593}
3651EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities); 3594EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
3652 3595
3653static int nr_type_pdos(const u32 *pdo, unsigned int nr_pdo,
3654 enum pd_pdo_type type)
3655{
3656 int count = 0;
3657 int i;
3658
3659 for (i = 0; i < nr_pdo; i++) {
3660 if (pdo_type(pdo[i]) == type)
3661 count++;
3662 }
3663 return count;
3664}
3665
3666struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) 3596struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3667{ 3597{
3668 struct tcpm_port *port; 3598 struct tcpm_port *port;
@@ -3708,15 +3638,6 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
3708 tcpc->config->nr_src_pdo); 3638 tcpc->config->nr_src_pdo);
3709 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo, 3639 port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, tcpc->config->snk_pdo,
3710 tcpc->config->nr_snk_pdo); 3640 tcpc->config->nr_snk_pdo);
3711 port->nr_fixed = nr_type_pdos(port->snk_pdo,
3712 port->nr_snk_pdo,
3713 PDO_TYPE_FIXED);
3714 port->nr_var = nr_type_pdos(port->snk_pdo,
3715 port->nr_snk_pdo,
3716 PDO_TYPE_VAR);
3717 port->nr_batt = nr_type_pdos(port->snk_pdo,
3718 port->nr_snk_pdo,
3719 PDO_TYPE_BATT);
3720 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo, 3641 port->nr_snk_vdo = tcpm_copy_vdos(port->snk_vdo, tcpc->config->snk_vdo,
3721 tcpc->config->nr_snk_vdo); 3642 tcpc->config->nr_snk_vdo);
3722 3643
diff --git a/drivers/usb/usbip/vudc_sysfs.c b/drivers/usb/usbip/vudc_sysfs.c
index d86f72bbbb91..6dcd3ff655c3 100644
--- a/drivers/usb/usbip/vudc_sysfs.c
+++ b/drivers/usb/usbip/vudc_sysfs.c
@@ -105,10 +105,14 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
105 if (rv != 0) 105 if (rv != 0)
106 return -EINVAL; 106 return -EINVAL;
107 107
108 if (!udc) {
109 dev_err(dev, "no device");
110 return -ENODEV;
111 }
108 spin_lock_irqsave(&udc->lock, flags); 112 spin_lock_irqsave(&udc->lock, flags);
109 /* Don't export what we don't have */ 113 /* Don't export what we don't have */
110 if (!udc || !udc->driver || !udc->pullup) { 114 if (!udc->driver || !udc->pullup) {
111 dev_err(dev, "no device or gadget not bound"); 115 dev_err(dev, "gadget not bound");
112 ret = -ENODEV; 116 ret = -ENODEV;
113 goto unlock; 117 goto unlock;
114 } 118 }
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index b0f759476900..8a1508a8e481 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -207,9 +207,6 @@ static bool vfio_pci_nointx(struct pci_dev *pdev)
207 } 207 }
208 } 208 }
209 209
210 if (!pdev->irq)
211 return true;
212
213 return false; 210 return false;
214} 211}
215 212
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 610cba276d47..8139bc70ad7d 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -170,7 +170,7 @@ static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
170 if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) { 170 if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
171 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head, 171 ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
172 vhost_net_buf_get_size(rxq), 172 vhost_net_buf_get_size(rxq),
173 __skb_array_destroy_skb); 173 tun_ptr_free);
174 rxq->head = rxq->tail = 0; 174 rxq->head = rxq->tail = 0;
175 } 175 }
176} 176}
@@ -948,6 +948,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
948 n->vqs[i].done_idx = 0; 948 n->vqs[i].done_idx = 0;
949 n->vqs[i].vhost_hlen = 0; 949 n->vqs[i].vhost_hlen = 0;
950 n->vqs[i].sock_hlen = 0; 950 n->vqs[i].sock_hlen = 0;
951 n->vqs[i].rx_ring = NULL;
951 vhost_net_buf_init(&n->vqs[i].rxq); 952 vhost_net_buf_init(&n->vqs[i].rxq);
952 } 953 }
953 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); 954 vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
@@ -972,6 +973,7 @@ static struct socket *vhost_net_stop_vq(struct vhost_net *n,
972 vhost_net_disable_vq(n, vq); 973 vhost_net_disable_vq(n, vq);
973 vq->private_data = NULL; 974 vq->private_data = NULL;
974 vhost_net_buf_unproduce(nvq); 975 vhost_net_buf_unproduce(nvq);
976 nvq->rx_ring = NULL;
975 mutex_unlock(&vq->mutex); 977 mutex_unlock(&vq->mutex);
976 return sock; 978 return sock;
977} 979}
@@ -1161,14 +1163,14 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1161 vhost_net_disable_vq(n, vq); 1163 vhost_net_disable_vq(n, vq);
1162 vq->private_data = sock; 1164 vq->private_data = sock;
1163 vhost_net_buf_unproduce(nvq); 1165 vhost_net_buf_unproduce(nvq);
1164 if (index == VHOST_NET_VQ_RX)
1165 nvq->rx_ring = get_tap_ptr_ring(fd);
1166 r = vhost_vq_init_access(vq); 1166 r = vhost_vq_init_access(vq);
1167 if (r) 1167 if (r)
1168 goto err_used; 1168 goto err_used;
1169 r = vhost_net_enable_vq(n, vq); 1169 r = vhost_net_enable_vq(n, vq);
1170 if (r) 1170 if (r)
1171 goto err_used; 1171 goto err_used;
1172 if (index == VHOST_NET_VQ_RX)
1173 nvq->rx_ring = get_tap_ptr_ring(fd);
1172 1174
1173 oldubufs = nvq->ubufs; 1175 oldubufs = nvq->ubufs;
1174 nvq->ubufs = ubufs; 1176 nvq->ubufs = ubufs;
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 6d1fbda0f461..0da9943d405f 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -392,7 +392,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
392 392
393 memset(&r, 0, sizeof(r)); 393 memset(&r, 0, sizeof(r));
394 r.start = gas->address; 394 r.start = gas->address;
395 r.end = r.start + gas->access_width; 395 r.end = r.start + gas->access_width - 1;
396 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { 396 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
397 r.flags = IORESOURCE_MEM; 397 r.flags = IORESOURCE_MEM;
398 } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) { 398 } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {