aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_watchdog.c2
-rw-r--r--drivers/acpi/apei/ghes.c16
-rw-r--r--drivers/acpi/arm64/iort.c35
-rw-r--r--drivers/acpi/property.c24
-rw-r--r--drivers/android/binder.c8
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/ata/ahci.c5
-rw-r--r--drivers/ata/ata_piix.c1
-rw-r--r--drivers/ata/libata-core.c20
-rw-r--r--drivers/auxdisplay/charlcd.c11
-rw-r--r--drivers/auxdisplay/panel.c11
-rw-r--r--drivers/base/arch_topology.c12
-rw-r--r--drivers/base/dma-coherent.c19
-rw-r--r--drivers/base/platform.c3
-rw-r--r--drivers/base/power/main.c9
-rw-r--r--drivers/base/power/opp/core.c7
-rw-r--r--drivers/base/power/qos.c10
-rw-r--r--drivers/block/Kconfig2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/loop.h6
-rw-r--r--drivers/block/nbd.c12
-rw-r--r--drivers/block/zram/zram_drv.c36
-rw-r--r--drivers/char/tpm/tpm-interface.c10
-rw-r--r--drivers/char/tpm/tpm.h9
-rw-r--r--drivers/char/tpm/tpm2-cmd.c2
-rw-r--r--drivers/char/tpm/tpm_crb.c2
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c98
-rw-r--r--drivers/char/tpm/tpm_infineon.c6
-rw-r--r--drivers/char/tpm/tpm_tis_core.c8
-rw-r--r--drivers/clk/clk-bulk.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3128.c12
-rw-r--r--drivers/clk/samsung/clk-exynos4.c15
-rw-r--r--drivers/clocksource/numachip.c2
-rw-r--r--drivers/clocksource/timer-integrator-ap.c4
-rw-r--r--drivers/cpufreq/cpufreq-dt-platdev.c16
-rw-r--r--drivers/cpufreq/ti-cpufreq.c2
-rw-r--r--drivers/cpuidle/cpuidle-arm.c6
-rw-r--r--drivers/crypto/caam/Kconfig5
-rw-r--r--drivers/crypto/caam/ctrl.c19
-rw-r--r--drivers/crypto/caam/regs.h59
-rw-r--r--drivers/crypto/inside-secure/safexcel_cipher.c2
-rw-r--r--drivers/crypto/inside-secure/safexcel_hash.c2
-rw-r--r--drivers/crypto/talitos.c9
-rw-r--r--drivers/fpga/altera-cvp.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c189
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c113
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c3
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c5
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c7
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c20
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c11
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c17
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c41
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/sun4i/Kconfig2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c19
-rw-r--r--drivers/gpu/drm/tegra/trace.h2
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-rmi.c13
-rw-r--r--drivers/hid/hidraw.c2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c3
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_sys.c7
-rw-r--r--drivers/hid/wacom_wac.c110
-rw-r--r--drivers/hv/channel_mgmt.c4
-rw-r--r--drivers/hv/hv_fcopy.c4
-rw-r--r--drivers/hwmon/xgene-hwmon.c19
-rw-r--r--drivers/hwtracing/intel_th/pci.c10
-rw-r--r--drivers/hwtracing/stm/core.c2
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-img-scb.c2
-rw-r--r--drivers/i2c/busses/i2c-sprd.c1
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c17
-rw-r--r--drivers/ide/ide-probe.c1
-rw-r--r--drivers/ide/ide-scan-pci.c13
-rw-r--r--drivers/ide/setup-pci.c63
-rw-r--r--drivers/infiniband/core/iwpm_msg.c8
-rw-r--r--drivers/infiniband/core/iwpm_util.c5
-rw-r--r--drivers/infiniband/core/security.c4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c14
-rw-r--r--drivers/infiniband/core/verbs.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h14
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c107
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c28
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c9
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c101
-rw-r--r--drivers/infiniband/hw/hfi1/chip.h3
-rw-r--r--drivers/infiniband/hw/hfi1/eprom.c20
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c41
-rw-r--r--drivers/infiniband/hw/hfi1/pcie.c50
-rw-r--r--drivers/infiniband/hw/hfi1/platform.c4
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.c154
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_cm.h5
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_ctrl.c2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_main.c39
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_p.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_puda.c11
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c6
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c14
-rw-r--r--drivers/infiniband/hw/mlx5/main.c20
-rw-r--r--drivers/infiniband/hw/mlx5/mem.c47
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c27
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c3
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h2
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c12
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma.h31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c16
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c30
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c2
-rw-r--r--drivers/input/ff-core.c13
-rw-r--r--drivers/input/misc/uinput.c57
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c2
-rw-r--r--drivers/iommu/Kconfig5
-rw-r--r--drivers/iommu/amd_iommu_init.c8
-rw-r--r--drivers/iommu/dmar.c2
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c2
-rw-r--r--drivers/iommu/mtk_iommu.c3
-rw-r--r--drivers/iommu/of_iommu.c5
-rw-r--r--drivers/irqchip/irq-gic-v3.c8
-rw-r--r--drivers/irqchip/irq-gic-v4.c12
-rw-r--r--drivers/irqchip/irq-mips-gic.c19
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c37
-rw-r--r--drivers/leds/leds-as3645a.c29
-rw-r--r--drivers/md/bcache/closure.c4
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c5
-rw-r--r--drivers/md/dm-ioctl.c37
-rw-r--r--drivers/md/dm-raid.c13
-rw-r--r--drivers/md/dm.c10
-rw-r--r--drivers/md/md.c72
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid5.c20
-rw-r--r--drivers/media/rc/ir-sharp-decoder.c2
-rw-r--r--drivers/misc/cxl/cxllib.c13
-rw-r--r--drivers/mmc/core/block.c3
-rw-r--r--drivers/mmc/core/mmc.c36
-rw-r--r--drivers/mmc/core/queue.c120
-rw-r--r--drivers/mmc/core/queue.h6
-rw-r--r--drivers/mmc/host/Kconfig2
-rw-r--r--drivers/mmc/host/cavium-thunderx.c6
-rw-r--r--drivers/mmc/host/cavium.c2
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c26
-rw-r--r--drivers/mmc/host/pxamci.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c15
-rw-r--r--drivers/mmc/host/sdhci-xenon.c24
-rw-r--r--drivers/mmc/host/sdhci-xenon.h1
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c47
-rw-r--r--drivers/mtd/mtdpart.c8
-rw-r--r--drivers/mtd/nand/atmel/pmecc.c2
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c3
-rw-r--r--drivers/mtd/nand/nand_base.c3
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c45
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c145
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c53
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c3
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c52
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c43
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c184
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c23
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c18
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-ethtool.c30
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c25
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c5
-rw-r--r--drivers/net/ethernet/realtek/8139too.c5
-rw-r--r--drivers/net/ethernet/rocker/rocker_tlv.h48
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c112
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c7
-rw-r--r--drivers/net/hyperv/netvsc_drv.c8
-rw-r--r--drivers/net/phy/Kconfig18
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/tun.c8
-rw-r--r--drivers/net/usb/cdc_ether.c21
-rw-r--r--drivers/net/usb/lan78xx.c34
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c7
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c37
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c62
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c10
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.h3
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/event.c2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c9
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h2
-rw-r--r--drivers/nvdimm/namespace_devs.c9
-rw-r--r--drivers/nvme/host/core.c11
-rw-r--r--drivers/nvme/host/fabrics.c18
-rw-r--r--drivers/nvme/host/fc.c21
-rw-r--r--drivers/nvme/host/pci.c48
-rw-r--r--drivers/nvme/host/rdma.c9
-rw-r--r--drivers/nvme/target/core.c9
-rw-r--r--drivers/nvme/target/fabrics-cmd.c9
-rw-r--r--drivers/nvme/target/fc.c24
-rw-r--r--drivers/nvme/target/fcloop.c104
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvmem/core.c3
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-test.c13
-rw-r--r--drivers/pci/pci-sysfs.c11
-rw-r--r--drivers/perf/arm_pmu_acpi.c1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c10
-rw-r--r--drivers/rapidio/devices/tsi721.c7
-rw-r--r--drivers/rapidio/rio-access.c40
-rw-r--r--drivers/reset/Kconfig9
-rw-r--r--drivers/reset/Makefile2
-rw-r--r--drivers/reset/reset-hsdk.c (renamed from drivers/reset/reset-hsdk-v1.c)44
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/scm_blk.c6
-rw-r--r--drivers/s390/cio/device.c12
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c12
-rw-r--r--drivers/s390/cio/io_sch.h2
-rw-r--r--drivers/scsi/aacraid/aachba.c12
-rw-r--r--drivers/scsi/aacraid/aacraid.h5
-rw-r--r--drivers/scsi/aacraid/linit.c20
-rw-r--r--drivers/scsi/aacraid/src.c2
-rw-r--r--drivers/scsi/arm/acornscsi.c6
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c2
-rw-r--r--drivers/scsi/libiscsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c2
-rw-r--r--drivers/scsi/scsi_error.c3
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_transport_fc.c16
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c18
-rw-r--r--drivers/scsi/sd.c37
-rw-r--r--drivers/scsi/sg.c64
-rw-r--r--drivers/tty/mxser.c16
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c5
-rw-r--r--drivers/tty/serial/fsl_lpuart.c40
-rw-r--r--drivers/tty/serial/sccnxp.c13
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/core/config.c16
-rw-r--r--drivers/usb/core/devio.c11
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/message.c4
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c1
-rw-r--r--drivers/usb/dwc3/ep0.c7
-rw-r--r--drivers/usb/gadget/function/f_fs.c17
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c27
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.h14
-rw-r--r--drivers/usb/gadget/function/f_printer.c7
-rw-r--r--drivers/usb/gadget/function/u_fs.h1
-rw-r--r--drivers/usb/gadget/legacy/inode.c46
-rw-r--r--drivers/usb/gadget/legacy/mass_storage.c26
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c4
-rw-r--r--drivers/usb/gadget/udc/core.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c65
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c19
-rw-r--r--drivers/usb/host/pci-quirks.c10
-rw-r--r--drivers/usb/host/xhci-hub.c14
-rw-r--r--drivers/usb/host/xhci-pci.c12
-rw-r--r--drivers/usb/host/xhci-plat.c16
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h10
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c21
-rw-r--r--drivers/usb/storage/transport.c14
-rw-r--r--drivers/usb/storage/uas-detect.h15
-rw-r--r--drivers/usb/storage/uas.c10
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/uwb/hwa-rc.c2
-rw-r--r--drivers/uwb/uwbd.c12
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c11
-rw-r--r--drivers/xen/xenbus/xenbus_client.c130
340 files changed, 3599 insertions, 2091 deletions
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index bf22c29d2517..11b113f8e367 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -66,7 +66,7 @@ void __init acpi_watchdog_init(void)
66 for (i = 0; i < wdat->entries; i++) { 66 for (i = 0; i < wdat->entries; i++) {
67 const struct acpi_generic_address *gas; 67 const struct acpi_generic_address *gas;
68 struct resource_entry *rentry; 68 struct resource_entry *rentry;
69 struct resource res; 69 struct resource res = {};
70 bool found; 70 bool found;
71 71
72 gas = &entries[i].register_region; 72 gas = &entries[i].register_region;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 077f9bad6f44..3c3a37b8503b 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -743,17 +743,19 @@ static int ghes_proc(struct ghes *ghes)
743 } 743 }
744 ghes_do_proc(ghes, ghes->estatus); 744 ghes_do_proc(ghes, ghes->estatus);
745 745
746out:
747 ghes_clear_estatus(ghes);
748
749 if (rc == -ENOENT)
750 return rc;
751
746 /* 752 /*
747 * GHESv2 type HEST entries introduce support for error acknowledgment, 753 * GHESv2 type HEST entries introduce support for error acknowledgment,
748 * so only acknowledge the error if this support is present. 754 * so only acknowledge the error if this support is present.
749 */ 755 */
750 if (is_hest_type_generic_v2(ghes)) { 756 if (is_hest_type_generic_v2(ghes))
751 rc = ghes_ack_error(ghes->generic_v2); 757 return ghes_ack_error(ghes->generic_v2);
752 if (rc) 758
753 return rc;
754 }
755out:
756 ghes_clear_estatus(ghes);
757 return rc; 759 return rc;
758} 760}
759 761
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 9565d572f8dd..de56394dd161 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1178,12 +1178,44 @@ dev_put:
1178 return ret; 1178 return ret;
1179} 1179}
1180 1180
1181static bool __init iort_enable_acs(struct acpi_iort_node *iort_node)
1182{
1183 if (iort_node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
1184 struct acpi_iort_node *parent;
1185 struct acpi_iort_id_mapping *map;
1186 int i;
1187
1188 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, iort_node,
1189 iort_node->mapping_offset);
1190
1191 for (i = 0; i < iort_node->mapping_count; i++, map++) {
1192 if (!map->output_reference)
1193 continue;
1194
1195 parent = ACPI_ADD_PTR(struct acpi_iort_node,
1196 iort_table, map->output_reference);
1197 /*
1198 * If we detect a RC->SMMU mapping, make sure
1199 * we enable ACS on the system.
1200 */
1201 if ((parent->type == ACPI_IORT_NODE_SMMU) ||
1202 (parent->type == ACPI_IORT_NODE_SMMU_V3)) {
1203 pci_request_acs();
1204 return true;
1205 }
1206 }
1207 }
1208
1209 return false;
1210}
1211
1181static void __init iort_init_platform_devices(void) 1212static void __init iort_init_platform_devices(void)
1182{ 1213{
1183 struct acpi_iort_node *iort_node, *iort_end; 1214 struct acpi_iort_node *iort_node, *iort_end;
1184 struct acpi_table_iort *iort; 1215 struct acpi_table_iort *iort;
1185 struct fwnode_handle *fwnode; 1216 struct fwnode_handle *fwnode;
1186 int i, ret; 1217 int i, ret;
1218 bool acs_enabled = false;
1187 1219
1188 /* 1220 /*
1189 * iort_table and iort both point to the start of IORT table, but 1221 * iort_table and iort both point to the start of IORT table, but
@@ -1203,6 +1235,9 @@ static void __init iort_init_platform_devices(void)
1203 return; 1235 return;
1204 } 1236 }
1205 1237
1238 if (!acs_enabled)
1239 acs_enabled = iort_enable_acs(iort_node);
1240
1206 if ((iort_node->type == ACPI_IORT_NODE_SMMU) || 1241 if ((iort_node->type == ACPI_IORT_NODE_SMMU) ||
1207 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) { 1242 (iort_node->type == ACPI_IORT_NODE_SMMU_V3)) {
1208 1243
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index c1c216163de3..3fb8ff513461 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -908,11 +908,12 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
908 struct fwnode_handle *child) 908 struct fwnode_handle *child)
909{ 909{
910 const struct acpi_device *adev = to_acpi_device_node(fwnode); 910 const struct acpi_device *adev = to_acpi_device_node(fwnode);
911 struct acpi_device *child_adev = NULL;
912 const struct list_head *head; 911 const struct list_head *head;
913 struct list_head *next; 912 struct list_head *next;
914 913
915 if (!child || is_acpi_device_node(child)) { 914 if (!child || is_acpi_device_node(child)) {
915 struct acpi_device *child_adev;
916
916 if (adev) 917 if (adev)
917 head = &adev->children; 918 head = &adev->children;
918 else 919 else
@@ -922,8 +923,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
922 goto nondev; 923 goto nondev;
923 924
924 if (child) { 925 if (child) {
925 child_adev = to_acpi_device_node(child); 926 adev = to_acpi_device_node(child);
926 next = child_adev->node.next; 927 next = adev->node.next;
927 if (next == head) { 928 if (next == head) {
928 child = NULL; 929 child = NULL;
929 goto nondev; 930 goto nondev;
@@ -941,8 +942,8 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
941 const struct acpi_data_node *data = to_acpi_data_node(fwnode); 942 const struct acpi_data_node *data = to_acpi_data_node(fwnode);
942 struct acpi_data_node *dn; 943 struct acpi_data_node *dn;
943 944
944 if (child_adev) 945 if (adev)
945 head = &child_adev->data.subnodes; 946 head = &adev->data.subnodes;
946 else if (data) 947 else if (data)
947 head = &data->data.subnodes; 948 head = &data->data.subnodes;
948 else 949 else
@@ -1293,3 +1294,16 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
1293DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops); 1294DECLARE_ACPI_FWNODE_OPS(acpi_device_fwnode_ops);
1294DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops); 1295DECLARE_ACPI_FWNODE_OPS(acpi_data_fwnode_ops);
1295const struct fwnode_operations acpi_static_fwnode_ops; 1296const struct fwnode_operations acpi_static_fwnode_ops;
1297
1298bool is_acpi_device_node(const struct fwnode_handle *fwnode)
1299{
1300 return !IS_ERR_OR_NULL(fwnode) &&
1301 fwnode->ops == &acpi_device_fwnode_ops;
1302}
1303EXPORT_SYMBOL(is_acpi_device_node);
1304
1305bool is_acpi_data_node(const struct fwnode_handle *fwnode)
1306{
1307 return !IS_ERR_OR_NULL(fwnode) && fwnode->ops == &acpi_data_fwnode_ops;
1308}
1309EXPORT_SYMBOL(is_acpi_data_node);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index d055b3f2a207..ab34239a76ee 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2217,7 +2217,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
2217 debug_id, (u64)fda->num_fds); 2217 debug_id, (u64)fda->num_fds);
2218 continue; 2218 continue;
2219 } 2219 }
2220 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 2220 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2221 for (fd_index = 0; fd_index < fda->num_fds; fd_index++) 2221 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2222 task_close_fd(proc, fd_array[fd_index]); 2222 task_close_fd(proc, fd_array[fd_index]);
2223 } break; 2223 } break;
@@ -2326,7 +2326,6 @@ static int binder_translate_handle(struct flat_binder_object *fp,
2326 (u64)node->ptr); 2326 (u64)node->ptr);
2327 binder_node_unlock(node); 2327 binder_node_unlock(node);
2328 } else { 2328 } else {
2329 int ret;
2330 struct binder_ref_data dest_rdata; 2329 struct binder_ref_data dest_rdata;
2331 2330
2332 binder_node_unlock(node); 2331 binder_node_unlock(node);
@@ -2442,7 +2441,7 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2442 */ 2441 */
2443 parent_buffer = parent->buffer - 2442 parent_buffer = parent->buffer -
2444 binder_alloc_get_user_buffer_offset(&target_proc->alloc); 2443 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2445 fd_array = (u32 *)(parent_buffer + fda->parent_offset); 2444 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2446 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) { 2445 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2447 binder_user_error("%d:%d parent offset not aligned correctly.\n", 2446 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2448 proc->pid, thread->pid); 2447 proc->pid, thread->pid);
@@ -2508,7 +2507,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
2508 proc->pid, thread->pid); 2507 proc->pid, thread->pid);
2509 return -EINVAL; 2508 return -EINVAL;
2510 } 2509 }
2511 parent_buffer = (u8 *)(parent->buffer - 2510 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2512 binder_alloc_get_user_buffer_offset( 2511 binder_alloc_get_user_buffer_offset(
2513 &target_proc->alloc)); 2512 &target_proc->alloc));
2514 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer; 2513 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
@@ -3083,6 +3082,7 @@ static void binder_transaction(struct binder_proc *proc,
3083err_dead_proc_or_thread: 3082err_dead_proc_or_thread:
3084 return_error = BR_DEAD_REPLY; 3083 return_error = BR_DEAD_REPLY;
3085 return_error_line = __LINE__; 3084 return_error_line = __LINE__;
3085 binder_dequeue_work(proc, tcomplete);
3086err_translate_failed: 3086err_translate_failed:
3087err_bad_object_type: 3087err_bad_object_type:
3088err_bad_offset: 3088err_bad_offset:
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 8fe165844e47..064f5e31ec55 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -913,6 +913,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
913 struct binder_alloc *alloc; 913 struct binder_alloc *alloc;
914 uintptr_t page_addr; 914 uintptr_t page_addr;
915 size_t index; 915 size_t index;
916 struct vm_area_struct *vma;
916 917
917 alloc = page->alloc; 918 alloc = page->alloc;
918 if (!mutex_trylock(&alloc->mutex)) 919 if (!mutex_trylock(&alloc->mutex))
@@ -923,16 +924,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
923 924
924 index = page - alloc->pages; 925 index = page - alloc->pages;
925 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 926 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
926 if (alloc->vma) { 927 vma = alloc->vma;
928 if (vma) {
927 mm = get_task_mm(alloc->tsk); 929 mm = get_task_mm(alloc->tsk);
928 if (!mm) 930 if (!mm)
929 goto err_get_task_mm_failed; 931 goto err_get_task_mm_failed;
930 if (!down_write_trylock(&mm->mmap_sem)) 932 if (!down_write_trylock(&mm->mmap_sem))
931 goto err_down_write_mmap_sem_failed; 933 goto err_down_write_mmap_sem_failed;
934 }
935
936 list_lru_isolate(lru, item);
937 spin_unlock(lock);
932 938
939 if (vma) {
933 trace_binder_unmap_user_start(alloc, index); 940 trace_binder_unmap_user_start(alloc, index);
934 941
935 zap_page_range(alloc->vma, 942 zap_page_range(vma,
936 page_addr + alloc->user_buffer_offset, 943 page_addr + alloc->user_buffer_offset,
937 PAGE_SIZE); 944 PAGE_SIZE);
938 945
@@ -950,13 +957,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
950 957
951 trace_binder_unmap_kernel_end(alloc, index); 958 trace_binder_unmap_kernel_end(alloc, index);
952 959
953 list_lru_isolate(lru, item); 960 spin_lock(lock);
954
955 mutex_unlock(&alloc->mutex); 961 mutex_unlock(&alloc->mutex);
956 return LRU_REMOVED; 962 return LRU_REMOVED_RETRY;
957 963
958err_down_write_mmap_sem_failed: 964err_down_write_mmap_sem_failed:
959 mmput(mm); 965 mmput_async(mm);
960err_get_task_mm_failed: 966err_get_task_mm_failed:
961err_page_already_freed: 967err_page_already_freed:
962 mutex_unlock(&alloc->mutex); 968 mutex_unlock(&alloc->mutex);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index cb9b0e9090e3..9f78bb03bb76 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -621,8 +621,11 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
621static int ahci_pci_reset_controller(struct ata_host *host) 621static int ahci_pci_reset_controller(struct ata_host *host)
622{ 622{
623 struct pci_dev *pdev = to_pci_dev(host->dev); 623 struct pci_dev *pdev = to_pci_dev(host->dev);
624 int rc;
624 625
625 ahci_reset_controller(host); 626 rc = ahci_reset_controller(host);
627 if (rc)
628 return rc;
626 629
627 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 630 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
628 struct ahci_host_priv *hpriv = host->private_data; 631 struct ahci_host_priv *hpriv = host->private_data;
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 8401c3b5be92..b702c20fbc2b 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -492,6 +492,7 @@ static const struct ich_laptop ich_laptop[] = {
492 { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */ 492 { 0x27DF, 0x152D, 0x0778 }, /* ICH7 on unknown Intel */
493 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ 493 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */
494 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ 494 { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
495 { 0x24CA, 0x10CF, 0x11AB }, /* ICH4M on Fujitsu-Siemens Lifebook S6120 */
495 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ 496 { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
496 { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ 497 { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
497 { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ 498 { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1945a8ea2099..ee4c1ec9dca0 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -3234,19 +3234,19 @@ static const struct ata_timing ata_timing[] = {
3234}; 3234};
3235 3235
3236#define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3236#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3237#define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3237#define EZ(v, unit) ((v)?ENOUGH(((v) * 1000), unit):0)
3238 3238
3239static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3239static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3240{ 3240{
3241 q->setup = EZ(t->setup * 1000, T); 3241 q->setup = EZ(t->setup, T);
3242 q->act8b = EZ(t->act8b * 1000, T); 3242 q->act8b = EZ(t->act8b, T);
3243 q->rec8b = EZ(t->rec8b * 1000, T); 3243 q->rec8b = EZ(t->rec8b, T);
3244 q->cyc8b = EZ(t->cyc8b * 1000, T); 3244 q->cyc8b = EZ(t->cyc8b, T);
3245 q->active = EZ(t->active * 1000, T); 3245 q->active = EZ(t->active, T);
3246 q->recover = EZ(t->recover * 1000, T); 3246 q->recover = EZ(t->recover, T);
3247 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 3247 q->dmack_hold = EZ(t->dmack_hold, T);
3248 q->cycle = EZ(t->cycle * 1000, T); 3248 q->cycle = EZ(t->cycle, T);
3249 q->udma = EZ(t->udma * 1000, UT); 3249 q->udma = EZ(t->udma, UT);
3250} 3250}
3251 3251
3252void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3252void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index cfeb049a01ef..642afd88870b 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -647,18 +647,25 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
647static int charlcd_open(struct inode *inode, struct file *file) 647static int charlcd_open(struct inode *inode, struct file *file)
648{ 648{
649 struct charlcd_priv *priv = to_priv(the_charlcd); 649 struct charlcd_priv *priv = to_priv(the_charlcd);
650 int ret;
650 651
652 ret = -EBUSY;
651 if (!atomic_dec_and_test(&charlcd_available)) 653 if (!atomic_dec_and_test(&charlcd_available))
652 return -EBUSY; /* open only once at a time */ 654 goto fail; /* open only once at a time */
653 655
656 ret = -EPERM;
654 if (file->f_mode & FMODE_READ) /* device is write-only */ 657 if (file->f_mode & FMODE_READ) /* device is write-only */
655 return -EPERM; 658 goto fail;
656 659
657 if (priv->must_clear) { 660 if (priv->must_clear) {
658 charlcd_clear_display(&priv->lcd); 661 charlcd_clear_display(&priv->lcd);
659 priv->must_clear = false; 662 priv->must_clear = false;
660 } 663 }
661 return nonseekable_open(inode, file); 664 return nonseekable_open(inode, file);
665
666 fail:
667 atomic_inc(&charlcd_available);
668 return ret;
662} 669}
663 670
664static int charlcd_release(struct inode *inode, struct file *file) 671static int charlcd_release(struct inode *inode, struct file *file)
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index df126dcdaf18..6911acd896d9 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1105,14 +1105,21 @@ static ssize_t keypad_read(struct file *file,
1105 1105
1106static int keypad_open(struct inode *inode, struct file *file) 1106static int keypad_open(struct inode *inode, struct file *file)
1107{ 1107{
1108 int ret;
1109
1110 ret = -EBUSY;
1108 if (!atomic_dec_and_test(&keypad_available)) 1111 if (!atomic_dec_and_test(&keypad_available))
1109 return -EBUSY; /* open only once at a time */ 1112 goto fail; /* open only once at a time */
1110 1113
1114 ret = -EPERM;
1111 if (file->f_mode & FMODE_WRITE) /* device is read-only */ 1115 if (file->f_mode & FMODE_WRITE) /* device is read-only */
1112 return -EPERM; 1116 goto fail;
1113 1117
1114 keypad_buflen = 0; /* flush the buffer on opening */ 1118 keypad_buflen = 0; /* flush the buffer on opening */
1115 return 0; 1119 return 0;
1120 fail:
1121 atomic_inc(&keypad_available);
1122 return ret;
1116} 1123}
1117 1124
1118static int keypad_release(struct inode *inode, struct file *file) 1125static int keypad_release(struct inode *inode, struct file *file)
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 41be9ff7d70a..6df7d6676a48 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -166,11 +166,11 @@ bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu)
166} 166}
167 167
168#ifdef CONFIG_CPU_FREQ 168#ifdef CONFIG_CPU_FREQ
169static cpumask_var_t cpus_to_visit; 169static cpumask_var_t cpus_to_visit __initdata;
170static void parsing_done_workfn(struct work_struct *work); 170static void __init parsing_done_workfn(struct work_struct *work);
171static DECLARE_WORK(parsing_done_work, parsing_done_workfn); 171static __initdata DECLARE_WORK(parsing_done_work, parsing_done_workfn);
172 172
173static int 173static int __init
174init_cpu_capacity_callback(struct notifier_block *nb, 174init_cpu_capacity_callback(struct notifier_block *nb,
175 unsigned long val, 175 unsigned long val,
176 void *data) 176 void *data)
@@ -206,7 +206,7 @@ init_cpu_capacity_callback(struct notifier_block *nb,
206 return 0; 206 return 0;
207} 207}
208 208
209static struct notifier_block init_cpu_capacity_notifier = { 209static struct notifier_block init_cpu_capacity_notifier __initdata = {
210 .notifier_call = init_cpu_capacity_callback, 210 .notifier_call = init_cpu_capacity_callback,
211}; 211};
212 212
@@ -232,7 +232,7 @@ static int __init register_cpufreq_notifier(void)
232} 232}
233core_initcall(register_cpufreq_notifier); 233core_initcall(register_cpufreq_notifier);
234 234
235static void parsing_done_workfn(struct work_struct *work) 235static void __init parsing_done_workfn(struct work_struct *work)
236{ 236{
237 cpufreq_unregister_notifier(&init_cpu_capacity_notifier, 237 cpufreq_unregister_notifier(&init_cpu_capacity_notifier,
238 CPUFREQ_POLICY_NOTIFIER); 238 CPUFREQ_POLICY_NOTIFIER);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index a39b2166b145..744f64f43454 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -348,16 +348,15 @@ static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
348 struct dma_coherent_mem *mem = rmem->priv; 348 struct dma_coherent_mem *mem = rmem->priv;
349 int ret; 349 int ret;
350 350
351 if (!mem) 351 if (!mem) {
352 return -ENODEV; 352 ret = dma_init_coherent_memory(rmem->base, rmem->base,
353 353 rmem->size,
354 ret = dma_init_coherent_memory(rmem->base, rmem->base, rmem->size, 354 DMA_MEMORY_EXCLUSIVE, &mem);
355 DMA_MEMORY_EXCLUSIVE, &mem); 355 if (ret) {
356 356 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
357 if (ret) { 357 &rmem->base, (unsigned long)rmem->size / SZ_1M);
358 pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n", 358 return ret;
359 &rmem->base, (unsigned long)rmem->size / SZ_1M); 359 }
360 return ret;
361 } 360 }
362 mem->use_dev_dma_pfn_offset = true; 361 mem->use_dev_dma_pfn_offset = true;
363 rmem->priv = mem; 362 rmem->priv = mem;
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index d1bd99271066..9045c5f3734e 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -868,7 +868,8 @@ static ssize_t driver_override_store(struct device *dev,
868 struct platform_device *pdev = to_platform_device(dev); 868 struct platform_device *pdev = to_platform_device(dev);
869 char *driver_override, *old, *cp; 869 char *driver_override, *old, *cp;
870 870
871 if (count > PATH_MAX) 871 /* We need to keep extra room for a newline */
872 if (count >= (PAGE_SIZE - 1))
872 return -EINVAL; 873 return -EINVAL;
873 874
874 driver_override = kstrndup(buf, count, GFP_KERNEL); 875 driver_override = kstrndup(buf, count, GFP_KERNEL);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index ea1732ed7a9d..770b1539a083 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1860,10 +1860,13 @@ void device_pm_check_callbacks(struct device *dev)
1860{ 1860{
1861 spin_lock_irq(&dev->power.lock); 1861 spin_lock_irq(&dev->power.lock);
1862 dev->power.no_pm_callbacks = 1862 dev->power.no_pm_callbacks =
1863 (!dev->bus || pm_ops_is_empty(dev->bus->pm)) && 1863 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1864 (!dev->class || pm_ops_is_empty(dev->class->pm)) && 1864 !dev->bus->suspend && !dev->bus->resume)) &&
1865 (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
1866 !dev->class->suspend && !dev->class->resume)) &&
1865 (!dev->type || pm_ops_is_empty(dev->type->pm)) && 1867 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1866 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) && 1868 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1867 (!dev->driver || pm_ops_is_empty(dev->driver->pm)); 1869 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1870 !dev->driver->suspend && !dev->driver->resume));
1868 spin_unlock_irq(&dev->power.lock); 1871 spin_unlock_irq(&dev->power.lock);
1869} 1872}
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index a8cc14fd8ae4..a6de32530693 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -1581,6 +1581,9 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
1581 1581
1582 opp->available = availability_req; 1582 opp->available = availability_req;
1583 1583
1584 dev_pm_opp_get(opp);
1585 mutex_unlock(&opp_table->lock);
1586
1584 /* Notify the change of the OPP availability */ 1587 /* Notify the change of the OPP availability */
1585 if (availability_req) 1588 if (availability_req)
1586 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, 1589 blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE,
@@ -1589,8 +1592,12 @@ static int _opp_set_availability(struct device *dev, unsigned long freq,
1589 blocking_notifier_call_chain(&opp_table->head, 1592 blocking_notifier_call_chain(&opp_table->head,
1590 OPP_EVENT_DISABLE, opp); 1593 OPP_EVENT_DISABLE, opp);
1591 1594
1595 dev_pm_opp_put(opp);
1596 goto put_table;
1597
1592unlock: 1598unlock:
1593 mutex_unlock(&opp_table->lock); 1599 mutex_unlock(&opp_table->lock);
1600put_table:
1594 dev_pm_opp_put_opp_table(opp_table); 1601 dev_pm_opp_put_opp_table(opp_table);
1595 return r; 1602 return r;
1596} 1603}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index f850daeffba4..277d43a83f53 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -277,11 +277,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
277 mutex_unlock(&dev_pm_qos_sysfs_mtx); 277 mutex_unlock(&dev_pm_qos_sysfs_mtx);
278} 278}
279 279
280static bool dev_pm_qos_invalid_request(struct device *dev, 280static bool dev_pm_qos_invalid_req_type(struct device *dev,
281 struct dev_pm_qos_request *req) 281 enum dev_pm_qos_req_type type)
282{ 282{
283 return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE 283 return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
284 && !dev->power.set_latency_tolerance); 284 !dev->power.set_latency_tolerance;
285} 285}
286 286
287static int __dev_pm_qos_add_request(struct device *dev, 287static int __dev_pm_qos_add_request(struct device *dev,
@@ -290,7 +290,7 @@ static int __dev_pm_qos_add_request(struct device *dev,
290{ 290{
291 int ret = 0; 291 int ret = 0;
292 292
293 if (!dev || dev_pm_qos_invalid_request(dev, req)) 293 if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
294 return -EINVAL; 294 return -EINVAL;
295 295
296 if (WARN(dev_pm_qos_request_active(req), 296 if (WARN(dev_pm_qos_request_active(req),
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 4a438b8abe27..2dfe99b328f8 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -17,7 +17,7 @@ if BLK_DEV
17 17
18config BLK_DEV_NULL_BLK 18config BLK_DEV_NULL_BLK
19 tristate "Null test block driver" 19 tristate "Null test block driver"
20 depends on CONFIGFS_FS 20 select CONFIGFS_FS
21 21
22config BLK_DEV_FD 22config BLK_DEV_FD
23 tristate "Normal floppy disk support" 23 tristate "Normal floppy disk support"
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index bbd0d186cfc0..2d7178f7754e 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -342,7 +342,7 @@ static long __brd_direct_access(struct brd_device *brd, pgoff_t pgoff,
342 342
343 if (!brd) 343 if (!brd)
344 return -ENODEV; 344 return -ENODEV;
345 page = brd_insert_page(brd, PFN_PHYS(pgoff) / 512); 345 page = brd_insert_page(brd, (sector_t)pgoff << PAGE_SECTORS_SHIFT);
346 if (!page) 346 if (!page)
347 return -ENOSPC; 347 return -ENOSPC;
348 *kaddr = page_address(page); 348 *kaddr = page_address(page);
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index f68c1d50802f..1f3956702993 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -67,10 +67,8 @@ struct loop_device {
67struct loop_cmd { 67struct loop_cmd {
68 struct kthread_work work; 68 struct kthread_work work;
69 struct request *rq; 69 struct request *rq;
70 union { 70 bool use_aio; /* use AIO interface to handle I/O */
71 bool use_aio; /* use AIO interface to handle I/O */ 71 atomic_t ref; /* only for aio */
72 atomic_t ref; /* only for aio */
73 };
74 long ret; 72 long ret;
75 struct kiocb iocb; 73 struct kiocb iocb;
76 struct bio_vec *bvec; 74 struct bio_vec *bvec;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2aa87cbdede0..883dfebd3014 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -820,9 +820,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
820 * appropriate. 820 * appropriate.
821 */ 821 */
822 ret = nbd_handle_cmd(cmd, hctx->queue_num); 822 ret = nbd_handle_cmd(cmd, hctx->queue_num);
823 if (ret < 0)
824 ret = BLK_STS_IOERR;
825 else if (!ret)
826 ret = BLK_STS_OK;
823 complete(&cmd->send_complete); 827 complete(&cmd->send_complete);
824 828
825 return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK; 829 return ret;
826} 830}
827 831
828static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, 832static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
@@ -1194,6 +1198,12 @@ static int nbd_ioctl(struct block_device *bdev, fmode_t mode,
1194 if (!capable(CAP_SYS_ADMIN)) 1198 if (!capable(CAP_SYS_ADMIN))
1195 return -EPERM; 1199 return -EPERM;
1196 1200
1201 /* The block layer will pass back some non-nbd ioctls in case we have
1202 * special handling for them, but we don't so just return an error.
1203 */
1204 if (_IOC_TYPE(cmd) != 0xab)
1205 return -EINVAL;
1206
1197 mutex_lock(&nbd->config_lock); 1207 mutex_lock(&nbd->config_lock);
1198 1208
1199 /* Don't allow ioctl operations on a nbd device that was created with 1209 /* Don't allow ioctl operations on a nbd device that was created with
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 2981c27d3aae..f149d3e61234 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -766,27 +766,6 @@ static void zram_slot_unlock(struct zram *zram, u32 index)
766 bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value); 766 bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
767} 767}
768 768
769static bool zram_same_page_read(struct zram *zram, u32 index,
770 struct page *page,
771 unsigned int offset, unsigned int len)
772{
773 zram_slot_lock(zram, index);
774 if (unlikely(!zram_get_handle(zram, index) ||
775 zram_test_flag(zram, index, ZRAM_SAME))) {
776 void *mem;
777
778 zram_slot_unlock(zram, index);
779 mem = kmap_atomic(page);
780 zram_fill_page(mem + offset, len,
781 zram_get_element(zram, index));
782 kunmap_atomic(mem);
783 return true;
784 }
785 zram_slot_unlock(zram, index);
786
787 return false;
788}
789
790static void zram_meta_free(struct zram *zram, u64 disksize) 769static void zram_meta_free(struct zram *zram, u64 disksize)
791{ 770{
792 size_t num_pages = disksize >> PAGE_SHIFT; 771 size_t num_pages = disksize >> PAGE_SHIFT;
@@ -884,11 +863,20 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index,
884 zram_slot_unlock(zram, index); 863 zram_slot_unlock(zram, index);
885 } 864 }
886 865
887 if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
888 return 0;
889
890 zram_slot_lock(zram, index); 866 zram_slot_lock(zram, index);
891 handle = zram_get_handle(zram, index); 867 handle = zram_get_handle(zram, index);
868 if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) {
869 unsigned long value;
870 void *mem;
871
872 value = handle ? zram_get_element(zram, index) : 0;
873 mem = kmap_atomic(page);
874 zram_fill_page(mem, PAGE_SIZE, value);
875 kunmap_atomic(mem);
876 zram_slot_unlock(zram, index);
877 return 0;
878 }
879
892 size = zram_get_obj_size(zram, index); 880 size = zram_get_obj_size(zram, index);
893 881
894 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); 882 src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index fe597e6c55c4..1d6729be4cd6 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -455,7 +455,7 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space,
455 goto out; 455 goto out;
456 } 456 }
457 457
458 msleep(TPM_TIMEOUT); /* CHECK */ 458 tpm_msleep(TPM_TIMEOUT);
459 rmb(); 459 rmb();
460 } while (time_before(jiffies, stop)); 460 } while (time_before(jiffies, stop));
461 461
@@ -970,7 +970,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
970 dev_info( 970 dev_info(
971 &chip->dev, HW_ERR 971 &chip->dev, HW_ERR
972 "TPM command timed out during continue self test"); 972 "TPM command timed out during continue self test");
973 msleep(delay_msec); 973 tpm_msleep(delay_msec);
974 continue; 974 continue;
975 } 975 }
976 976
@@ -985,7 +985,7 @@ int tpm_do_selftest(struct tpm_chip *chip)
985 } 985 }
986 if (rc != TPM_WARN_DOING_SELFTEST) 986 if (rc != TPM_WARN_DOING_SELFTEST)
987 return rc; 987 return rc;
988 msleep(delay_msec); 988 tpm_msleep(delay_msec);
989 } while (--loops > 0); 989 } while (--loops > 0);
990 990
991 return rc; 991 return rc;
@@ -1085,7 +1085,7 @@ again:
1085 } 1085 }
1086 } else { 1086 } else {
1087 do { 1087 do {
1088 msleep(TPM_TIMEOUT); 1088 tpm_msleep(TPM_TIMEOUT);
1089 status = chip->ops->status(chip); 1089 status = chip->ops->status(chip);
1090 if ((status & mask) == mask) 1090 if ((status & mask) == mask)
1091 return 0; 1091 return 0;
@@ -1150,7 +1150,7 @@ int tpm_pm_suspend(struct device *dev)
1150 */ 1150 */
1151 if (rc != TPM_WARN_RETRY) 1151 if (rc != TPM_WARN_RETRY)
1152 break; 1152 break;
1153 msleep(TPM_TIMEOUT_RETRY); 1153 tpm_msleep(TPM_TIMEOUT_RETRY);
1154 } 1154 }
1155 1155
1156 if (rc) 1156 if (rc)
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 04fbff2edbf3..2d5466a72e40 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -50,7 +50,8 @@ enum tpm_const {
50 50
51enum tpm_timeout { 51enum tpm_timeout {
52 TPM_TIMEOUT = 5, /* msecs */ 52 TPM_TIMEOUT = 5, /* msecs */
53 TPM_TIMEOUT_RETRY = 100 /* msecs */ 53 TPM_TIMEOUT_RETRY = 100, /* msecs */
54 TPM_TIMEOUT_RANGE_US = 300 /* usecs */
54}; 55};
55 56
56/* TPM addresses */ 57/* TPM addresses */
@@ -527,6 +528,12 @@ int tpm_pm_resume(struct device *dev);
527int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout, 528int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
528 wait_queue_head_t *queue, bool check_cancel); 529 wait_queue_head_t *queue, bool check_cancel);
529 530
531static inline void tpm_msleep(unsigned int delay_msec)
532{
533 usleep_range(delay_msec * 1000,
534 (delay_msec * 1000) + TPM_TIMEOUT_RANGE_US);
535};
536
530struct tpm_chip *tpm_chip_find_get(int chip_num); 537struct tpm_chip *tpm_chip_find_get(int chip_num);
531__must_check int tpm_try_get_ops(struct tpm_chip *chip); 538__must_check int tpm_try_get_ops(struct tpm_chip *chip);
532void tpm_put_ops(struct tpm_chip *chip); 539void tpm_put_ops(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index f7f34b2aa981..e1a41b788f08 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -899,7 +899,7 @@ static int tpm2_do_selftest(struct tpm_chip *chip)
899 if (rc != TPM2_RC_TESTING) 899 if (rc != TPM2_RC_TESTING)
900 break; 900 break;
901 901
902 msleep(delay_msec); 902 tpm_msleep(delay_msec);
903 } 903 }
904 904
905 return rc; 905 return rc;
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index a4ac63a21d8a..8f0a98dea327 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -665,7 +665,7 @@ static const struct dev_pm_ops crb_pm = {
665 SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL) 665 SET_RUNTIME_PM_OPS(crb_pm_runtime_suspend, crb_pm_runtime_resume, NULL)
666}; 666};
667 667
668static struct acpi_device_id crb_device_ids[] = { 668static const struct acpi_device_id crb_device_ids[] = {
669 {"MSFT0101", 0}, 669 {"MSFT0101", 0},
670 {"", 0}, 670 {"", 0},
671}; 671};
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index f01d083eced2..25f6e2665385 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -32,26 +32,70 @@
32 32
33static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm"; 33static const char tpm_ibmvtpm_driver_name[] = "tpm_ibmvtpm";
34 34
35static struct vio_device_id tpm_ibmvtpm_device_table[] = { 35static const struct vio_device_id tpm_ibmvtpm_device_table[] = {
36 { "IBM,vtpm", "IBM,vtpm"}, 36 { "IBM,vtpm", "IBM,vtpm"},
37 { "", "" } 37 { "", "" }
38}; 38};
39MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table); 39MODULE_DEVICE_TABLE(vio, tpm_ibmvtpm_device_table);
40 40
41/** 41/**
42 *
43 * ibmvtpm_send_crq_word - Send a CRQ request
44 * @vdev: vio device struct
45 * @w1: pre-constructed first word of tpm crq (second word is reserved)
46 *
47 * Return:
48 * 0 - Success
49 * Non-zero - Failure
50 */
51static int ibmvtpm_send_crq_word(struct vio_dev *vdev, u64 w1)
52{
53 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, 0);
54}
55
56/**
57 *
42 * ibmvtpm_send_crq - Send a CRQ request 58 * ibmvtpm_send_crq - Send a CRQ request
43 * 59 *
44 * @vdev: vio device struct 60 * @vdev: vio device struct
45 * @w1: first word 61 * @valid: Valid field
46 * @w2: second word 62 * @msg: Type field
63 * @len: Length field
64 * @data: Data field
65 *
66 * The ibmvtpm crq is defined as follows:
67 *
68 * Byte | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7
69 * -----------------------------------------------------------------------
70 * Word0 | Valid | Type | Length | Data
71 * -----------------------------------------------------------------------
72 * Word1 | Reserved
73 * -----------------------------------------------------------------------
74 *
75 * Which matches the following structure (on bigendian host):
76 *
77 * struct ibmvtpm_crq {
78 * u8 valid;
79 * u8 msg;
80 * __be16 len;
81 * __be32 data;
82 * __be64 reserved;
83 * } __attribute__((packed, aligned(8)));
84 *
85 * However, the value is passed in a register so just compute the numeric value
86 * to load into the register avoiding byteswap altogether. Endian only affects
87 * memory loads and stores - registers are internally represented the same.
47 * 88 *
48 * Return: 89 * Return:
49 * 0 -Sucess 90 * 0 (H_SUCCESS) - Success
50 * Non-zero - Failure 91 * Non-zero - Failure
51 */ 92 */
52static int ibmvtpm_send_crq(struct vio_dev *vdev, u64 w1, u64 w2) 93static int ibmvtpm_send_crq(struct vio_dev *vdev,
94 u8 valid, u8 msg, u16 len, u32 data)
53{ 95{
54 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, w1, w2); 96 u64 w1 = ((u64)valid << 56) | ((u64)msg << 48) | ((u64)len << 32) |
97 (u64)data;
98 return ibmvtpm_send_crq_word(vdev, w1);
55} 99}
56 100
57/** 101/**
@@ -109,8 +153,6 @@ static int tpm_ibmvtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
109static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) 153static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
110{ 154{
111 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); 155 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
112 struct ibmvtpm_crq crq;
113 __be64 *word = (__be64 *)&crq;
114 int rc, sig; 156 int rc, sig;
115 157
116 if (!ibmvtpm->rtce_buf) { 158 if (!ibmvtpm->rtce_buf) {
@@ -137,10 +179,6 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
137 spin_lock(&ibmvtpm->rtce_lock); 179 spin_lock(&ibmvtpm->rtce_lock);
138 ibmvtpm->res_len = 0; 180 ibmvtpm->res_len = 0;
139 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); 181 memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count);
140 crq.valid = (u8)IBMVTPM_VALID_CMD;
141 crq.msg = (u8)VTPM_TPM_COMMAND;
142 crq.len = cpu_to_be16(count);
143 crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle);
144 182
145 /* 183 /*
146 * set the processing flag before the Hcall, since we may get the 184 * set the processing flag before the Hcall, since we may get the
@@ -148,8 +186,9 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
148 */ 186 */
149 ibmvtpm->tpm_processing_cmd = true; 187 ibmvtpm->tpm_processing_cmd = true;
150 188
151 rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), 189 rc = ibmvtpm_send_crq(ibmvtpm->vdev,
152 be64_to_cpu(word[1])); 190 IBMVTPM_VALID_CMD, VTPM_TPM_COMMAND,
191 count, ibmvtpm->rtce_dma_handle);
153 if (rc != H_SUCCESS) { 192 if (rc != H_SUCCESS) {
154 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); 193 dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
155 rc = 0; 194 rc = 0;
@@ -182,15 +221,10 @@ static u8 tpm_ibmvtpm_status(struct tpm_chip *chip)
182 */ 221 */
183static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm) 222static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
184{ 223{
185 struct ibmvtpm_crq crq;
186 u64 *buf = (u64 *) &crq;
187 int rc; 224 int rc;
188 225
189 crq.valid = (u8)IBMVTPM_VALID_CMD; 226 rc = ibmvtpm_send_crq(ibmvtpm->vdev,
190 crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE; 227 IBMVTPM_VALID_CMD, VTPM_GET_RTCE_BUFFER_SIZE, 0, 0);
191
192 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
193 cpu_to_be64(buf[1]));
194 if (rc != H_SUCCESS) 228 if (rc != H_SUCCESS)
195 dev_err(ibmvtpm->dev, 229 dev_err(ibmvtpm->dev,
196 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc); 230 "ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
@@ -210,15 +244,10 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
210 */ 244 */
211static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm) 245static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
212{ 246{
213 struct ibmvtpm_crq crq;
214 u64 *buf = (u64 *) &crq;
215 int rc; 247 int rc;
216 248
217 crq.valid = (u8)IBMVTPM_VALID_CMD; 249 rc = ibmvtpm_send_crq(ibmvtpm->vdev,
218 crq.msg = (u8)VTPM_GET_VERSION; 250 IBMVTPM_VALID_CMD, VTPM_GET_VERSION, 0, 0);
219
220 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
221 cpu_to_be64(buf[1]));
222 if (rc != H_SUCCESS) 251 if (rc != H_SUCCESS)
223 dev_err(ibmvtpm->dev, 252 dev_err(ibmvtpm->dev,
224 "ibmvtpm_crq_get_version failed rc=%d\n", rc); 253 "ibmvtpm_crq_get_version failed rc=%d\n", rc);
@@ -238,7 +267,7 @@ static int ibmvtpm_crq_send_init_complete(struct ibmvtpm_dev *ibmvtpm)
238{ 267{
239 int rc; 268 int rc;
240 269
241 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_COMP_CMD, 0); 270 rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_COMP_CMD);
242 if (rc != H_SUCCESS) 271 if (rc != H_SUCCESS)
243 dev_err(ibmvtpm->dev, 272 dev_err(ibmvtpm->dev,
244 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc); 273 "ibmvtpm_crq_send_init_complete failed rc=%d\n", rc);
@@ -258,7 +287,7 @@ static int ibmvtpm_crq_send_init(struct ibmvtpm_dev *ibmvtpm)
258{ 287{
259 int rc; 288 int rc;
260 289
261 rc = ibmvtpm_send_crq(ibmvtpm->vdev, INIT_CRQ_CMD, 0); 290 rc = ibmvtpm_send_crq_word(ibmvtpm->vdev, INIT_CRQ_CMD);
262 if (rc != H_SUCCESS) 291 if (rc != H_SUCCESS)
263 dev_err(ibmvtpm->dev, 292 dev_err(ibmvtpm->dev,
264 "ibmvtpm_crq_send_init failed rc=%d\n", rc); 293 "ibmvtpm_crq_send_init failed rc=%d\n", rc);
@@ -340,15 +369,10 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
340{ 369{
341 struct tpm_chip *chip = dev_get_drvdata(dev); 370 struct tpm_chip *chip = dev_get_drvdata(dev);
342 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev); 371 struct ibmvtpm_dev *ibmvtpm = dev_get_drvdata(&chip->dev);
343 struct ibmvtpm_crq crq;
344 u64 *buf = (u64 *) &crq;
345 int rc = 0; 372 int rc = 0;
346 373
347 crq.valid = (u8)IBMVTPM_VALID_CMD; 374 rc = ibmvtpm_send_crq(ibmvtpm->vdev,
348 crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND; 375 IBMVTPM_VALID_CMD, VTPM_PREPARE_TO_SUSPEND, 0, 0);
349
350 rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
351 cpu_to_be64(buf[1]));
352 if (rc != H_SUCCESS) 376 if (rc != H_SUCCESS)
353 dev_err(ibmvtpm->dev, 377 dev_err(ibmvtpm->dev,
354 "tpm_ibmvtpm_suspend failed rc=%d\n", rc); 378 "tpm_ibmvtpm_suspend failed rc=%d\n", rc);
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 3b1b9f9322d5..d8f10047fbba 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -191,7 +191,7 @@ static int wait(struct tpm_chip *chip, int wait_for_bit)
191 /* check the status-register if wait_for_bit is set */ 191 /* check the status-register if wait_for_bit is set */
192 if (status & 1 << wait_for_bit) 192 if (status & 1 << wait_for_bit)
193 break; 193 break;
194 msleep(TPM_MSLEEP_TIME); 194 tpm_msleep(TPM_MSLEEP_TIME);
195 } 195 }
196 if (i == TPM_MAX_TRIES) { /* timeout occurs */ 196 if (i == TPM_MAX_TRIES) { /* timeout occurs */
197 if (wait_for_bit == STAT_XFE) 197 if (wait_for_bit == STAT_XFE)
@@ -226,7 +226,7 @@ static void tpm_wtx(struct tpm_chip *chip)
226 wait_and_send(chip, TPM_CTRL_WTX); 226 wait_and_send(chip, TPM_CTRL_WTX);
227 wait_and_send(chip, 0x00); 227 wait_and_send(chip, 0x00);
228 wait_and_send(chip, 0x00); 228 wait_and_send(chip, 0x00);
229 msleep(TPM_WTX_MSLEEP_TIME); 229 tpm_msleep(TPM_WTX_MSLEEP_TIME);
230} 230}
231 231
232static void tpm_wtx_abort(struct tpm_chip *chip) 232static void tpm_wtx_abort(struct tpm_chip *chip)
@@ -237,7 +237,7 @@ static void tpm_wtx_abort(struct tpm_chip *chip)
237 wait_and_send(chip, 0x00); 237 wait_and_send(chip, 0x00);
238 wait_and_send(chip, 0x00); 238 wait_and_send(chip, 0x00);
239 number_of_wtx = 0; 239 number_of_wtx = 0;
240 msleep(TPM_WTX_MSLEEP_TIME); 240 tpm_msleep(TPM_WTX_MSLEEP_TIME);
241} 241}
242 242
243static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count) 243static int tpm_inf_recv(struct tpm_chip *chip, u8 * buf, size_t count)
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index b617b2eeb080..63bc6c3b949e 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -51,7 +51,7 @@ static int wait_startup(struct tpm_chip *chip, int l)
51 51
52 if (access & TPM_ACCESS_VALID) 52 if (access & TPM_ACCESS_VALID)
53 return 0; 53 return 0;
54 msleep(TPM_TIMEOUT); 54 tpm_msleep(TPM_TIMEOUT);
55 } while (time_before(jiffies, stop)); 55 } while (time_before(jiffies, stop));
56 return -1; 56 return -1;
57} 57}
@@ -117,7 +117,7 @@ again:
117 do { 117 do {
118 if (check_locality(chip, l)) 118 if (check_locality(chip, l))
119 return l; 119 return l;
120 msleep(TPM_TIMEOUT); 120 tpm_msleep(TPM_TIMEOUT);
121 } while (time_before(jiffies, stop)); 121 } while (time_before(jiffies, stop));
122 } 122 }
123 return -1; 123 return -1;
@@ -164,7 +164,7 @@ static int get_burstcount(struct tpm_chip *chip)
164 burstcnt = (value >> 8) & 0xFFFF; 164 burstcnt = (value >> 8) & 0xFFFF;
165 if (burstcnt) 165 if (burstcnt)
166 return burstcnt; 166 return burstcnt;
167 msleep(TPM_TIMEOUT); 167 tpm_msleep(TPM_TIMEOUT);
168 } while (time_before(jiffies, stop)); 168 } while (time_before(jiffies, stop));
169 return -EBUSY; 169 return -EBUSY;
170} 170}
@@ -396,7 +396,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
396 priv->irq = irq; 396 priv->irq = irq;
397 chip->flags |= TPM_CHIP_FLAG_IRQ; 397 chip->flags |= TPM_CHIP_FLAG_IRQ;
398 if (!priv->irq_tested) 398 if (!priv->irq_tested)
399 msleep(1); 399 tpm_msleep(1);
400 if (!priv->irq_tested) 400 if (!priv->irq_tested)
401 disable_interrupts(chip); 401 disable_interrupts(chip);
402 priv->irq_tested = true; 402 priv->irq_tested = true;
diff --git a/drivers/clk/clk-bulk.c b/drivers/clk/clk-bulk.c
index c834f5abfc49..4c10456f8a32 100644
--- a/drivers/clk/clk-bulk.c
+++ b/drivers/clk/clk-bulk.c
@@ -105,6 +105,7 @@ err:
105 105
106 return ret; 106 return ret;
107} 107}
108EXPORT_SYMBOL_GPL(clk_bulk_prepare);
108 109
109#endif /* CONFIG_HAVE_CLK_PREPARE */ 110#endif /* CONFIG_HAVE_CLK_PREPARE */
110 111
diff --git a/drivers/clk/rockchip/clk-rk3128.c b/drivers/clk/rockchip/clk-rk3128.c
index 62d7854e4b87..5970a50671b9 100644
--- a/drivers/clk/rockchip/clk-rk3128.c
+++ b/drivers/clk/rockchip/clk-rk3128.c
@@ -315,13 +315,13 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
315 RK2928_CLKGATE_CON(10), 8, GFLAGS), 315 RK2928_CLKGATE_CON(10), 8, GFLAGS),
316 316
317 GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0, 317 GATE(SCLK_PVTM_CORE, "clk_pvtm_core", "xin24m", 0,
318 RK2928_CLKGATE_CON(10), 8, GFLAGS), 318 RK2928_CLKGATE_CON(10), 0, GFLAGS),
319 GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0, 319 GATE(SCLK_PVTM_GPU, "clk_pvtm_gpu", "xin24m", 0,
320 RK2928_CLKGATE_CON(10), 8, GFLAGS), 320 RK2928_CLKGATE_CON(10), 1, GFLAGS),
321 GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0, 321 GATE(SCLK_PVTM_FUNC, "clk_pvtm_func", "xin24m", 0,
322 RK2928_CLKGATE_CON(10), 8, GFLAGS), 322 RK2928_CLKGATE_CON(10), 2, GFLAGS),
323 GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED, 323 GATE(SCLK_MIPI_24M, "clk_mipi_24m", "xin24m", CLK_IGNORE_UNUSED,
324 RK2928_CLKGATE_CON(10), 8, GFLAGS), 324 RK2928_CLKGATE_CON(2), 15, GFLAGS),
325 325
326 COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0, 326 COMPOSITE(SCLK_SDMMC, "sclk_sdmmc0", mux_mmc_src_p, 0,
327 RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS, 327 RK2928_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
@@ -541,7 +541,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
541 GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS), 541 GATE(0, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
542 GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS), 542 GATE(0, "pclk_mipiphy", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 0, GFLAGS),
543 543
544 GATE(0, "pclk_pmu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 2, GFLAGS), 544 GATE(0, "pclk_pmu", "pclk_pmu_pre", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS),
545 GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS), 545 GATE(0, "pclk_pmu_niu", "pclk_pmu_pre", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(9), 3, GFLAGS),
546 546
547 /* PD_MMC */ 547 /* PD_MMC */
@@ -577,6 +577,8 @@ static const char *const rk3128_critical_clocks[] __initconst = {
577 "aclk_peri", 577 "aclk_peri",
578 "hclk_peri", 578 "hclk_peri",
579 "pclk_peri", 579 "pclk_peri",
580 "pclk_pmu",
581 "sclk_timer5",
580}; 582};
581 583
582static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np) 584static struct rockchip_clk_provider *__init rk3128_common_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index e40b77583c47..d8d3cb67b402 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -294,6 +294,18 @@ static const struct samsung_clk_reg_dump src_mask_suspend_e4210[] = {
294#define PLL_ENABLED (1 << 31) 294#define PLL_ENABLED (1 << 31)
295#define PLL_LOCKED (1 << 29) 295#define PLL_LOCKED (1 << 29)
296 296
297static void exynos4_clk_enable_pll(u32 reg)
298{
299 u32 pll_con = readl(reg_base + reg);
300 pll_con |= PLL_ENABLED;
301 writel(pll_con, reg_base + reg);
302
303 while (!(pll_con & PLL_LOCKED)) {
304 cpu_relax();
305 pll_con = readl(reg_base + reg);
306 }
307}
308
297static void exynos4_clk_wait_for_pll(u32 reg) 309static void exynos4_clk_wait_for_pll(u32 reg)
298{ 310{
299 u32 pll_con; 311 u32 pll_con;
@@ -315,6 +327,9 @@ static int exynos4_clk_suspend(void)
315 samsung_clk_save(reg_base, exynos4_save_pll, 327 samsung_clk_save(reg_base, exynos4_save_pll,
316 ARRAY_SIZE(exynos4_clk_pll_regs)); 328 ARRAY_SIZE(exynos4_clk_pll_regs));
317 329
330 exynos4_clk_enable_pll(EPLL_CON0);
331 exynos4_clk_enable_pll(VPLL_CON0);
332
318 if (exynos4_soc == EXYNOS4210) { 333 if (exynos4_soc == EXYNOS4210) {
319 samsung_clk_save(reg_base, exynos4_save_soc, 334 samsung_clk_save(reg_base, exynos4_save_soc,
320 ARRAY_SIZE(exynos4210_clk_save)); 335 ARRAY_SIZE(exynos4210_clk_save));
diff --git a/drivers/clocksource/numachip.c b/drivers/clocksource/numachip.c
index 6a20dc8b253f..9a7d7f0f23fe 100644
--- a/drivers/clocksource/numachip.c
+++ b/drivers/clocksource/numachip.c
@@ -43,7 +43,7 @@ static int numachip2_set_next_event(unsigned long delta, struct clock_event_devi
43 return 0; 43 return 0;
44} 44}
45 45
46static struct clock_event_device numachip2_clockevent = { 46static const struct clock_event_device numachip2_clockevent __initconst = {
47 .name = "numachip2", 47 .name = "numachip2",
48 .rating = 400, 48 .rating = 400,
49 .set_next_event = numachip2_set_next_event, 49 .set_next_event = numachip2_set_next_event,
diff --git a/drivers/clocksource/timer-integrator-ap.c b/drivers/clocksource/timer-integrator-ap.c
index 2ff64d9d4fb3..62d24690ba02 100644
--- a/drivers/clocksource/timer-integrator-ap.c
+++ b/drivers/clocksource/timer-integrator-ap.c
@@ -36,8 +36,8 @@ static u64 notrace integrator_read_sched_clock(void)
36 return -readl(sched_clk_base + TIMER_VALUE); 36 return -readl(sched_clk_base + TIMER_VALUE);
37} 37}
38 38
39static int integrator_clocksource_init(unsigned long inrate, 39static int __init integrator_clocksource_init(unsigned long inrate,
40 void __iomem *base) 40 void __iomem *base)
41{ 41{
42 u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC; 42 u32 ctrl = TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC;
43 unsigned long rate = inrate; 43 unsigned long rate = inrate;
diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c
index a020da7940d6..a753c50e9e41 100644
--- a/drivers/cpufreq/cpufreq-dt-platdev.c
+++ b/drivers/cpufreq/cpufreq-dt-platdev.c
@@ -106,6 +106,22 @@ static const struct of_device_id whitelist[] __initconst = {
106 * platforms using "operating-points-v2" property. 106 * platforms using "operating-points-v2" property.
107 */ 107 */
108static const struct of_device_id blacklist[] __initconst = { 108static const struct of_device_id blacklist[] __initconst = {
109 { .compatible = "calxeda,highbank", },
110 { .compatible = "calxeda,ecx-2000", },
111
112 { .compatible = "marvell,armadaxp", },
113
114 { .compatible = "nvidia,tegra124", },
115
116 { .compatible = "st,stih407", },
117 { .compatible = "st,stih410", },
118
119 { .compatible = "sigma,tango4", },
120
121 { .compatible = "ti,am33xx", },
122 { .compatible = "ti,am43", },
123 { .compatible = "ti,dra7", },
124
109 { } 125 { }
110}; 126};
111 127
diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c
index b29cd3398463..4bf47de6101f 100644
--- a/drivers/cpufreq/ti-cpufreq.c
+++ b/drivers/cpufreq/ti-cpufreq.c
@@ -190,7 +190,7 @@ static int ti_cpufreq_setup_syscon_register(struct ti_cpufreq_data *opp_data)
190 190
191static const struct of_device_id ti_cpufreq_of_match[] = { 191static const struct of_device_id ti_cpufreq_of_match[] = {
192 { .compatible = "ti,am33xx", .data = &am3x_soc_data, }, 192 { .compatible = "ti,am33xx", .data = &am3x_soc_data, },
193 { .compatible = "ti,am4372", .data = &am4x_soc_data, }, 193 { .compatible = "ti,am43", .data = &am4x_soc_data, },
194 { .compatible = "ti,dra7", .data = &dra7_soc_data }, 194 { .compatible = "ti,dra7", .data = &dra7_soc_data },
195 {}, 195 {},
196}; 196};
diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c
index 7080c384ad5d..52a75053ee03 100644
--- a/drivers/cpuidle/cpuidle-arm.c
+++ b/drivers/cpuidle/cpuidle-arm.c
@@ -104,13 +104,13 @@ static int __init arm_idle_init(void)
104 ret = dt_init_idle_driver(drv, arm_idle_state_match, 1); 104 ret = dt_init_idle_driver(drv, arm_idle_state_match, 1);
105 if (ret <= 0) { 105 if (ret <= 0) {
106 ret = ret ? : -ENODEV; 106 ret = ret ? : -ENODEV;
107 goto out_fail; 107 goto init_fail;
108 } 108 }
109 109
110 ret = cpuidle_register_driver(drv); 110 ret = cpuidle_register_driver(drv);
111 if (ret) { 111 if (ret) {
112 pr_err("Failed to register cpuidle driver\n"); 112 pr_err("Failed to register cpuidle driver\n");
113 goto out_fail; 113 goto init_fail;
114 } 114 }
115 115
116 /* 116 /*
@@ -149,6 +149,8 @@ static int __init arm_idle_init(void)
149 } 149 }
150 150
151 return 0; 151 return 0;
152init_fail:
153 kfree(drv);
152out_fail: 154out_fail:
153 while (--cpu >= 0) { 155 while (--cpu >= 0) {
154 dev = per_cpu(cpuidle_devices, cpu); 156 dev = per_cpu(cpuidle_devices, cpu);
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index e36aeacd7635..1eb852765469 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -1,6 +1,7 @@
1config CRYPTO_DEV_FSL_CAAM 1config CRYPTO_DEV_FSL_CAAM
2 tristate "Freescale CAAM-Multicore driver backend" 2 tristate "Freescale CAAM-Multicore driver backend"
3 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE 3 depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
4 select SOC_BUS
4 help 5 help
5 Enables the driver module for Freescale's Cryptographic Accelerator 6 Enables the driver module for Freescale's Cryptographic Accelerator
6 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4). 7 and Assurance Module (CAAM), also known as the SEC version 4 (SEC4).
@@ -141,10 +142,6 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
141 To compile this as a module, choose M here: the module 142 To compile this as a module, choose M here: the module
142 will be called caamrng. 143 will be called caamrng.
143 144
144config CRYPTO_DEV_FSL_CAAM_IMX
145 def_bool SOC_IMX6 || SOC_IMX7D
146 depends on CRYPTO_DEV_FSL_CAAM
147
148config CRYPTO_DEV_FSL_CAAM_DEBUG 145config CRYPTO_DEV_FSL_CAAM_DEBUG
149 bool "Enable debug output in CAAM driver" 146 bool "Enable debug output in CAAM driver"
150 depends on CRYPTO_DEV_FSL_CAAM 147 depends on CRYPTO_DEV_FSL_CAAM
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index dacb53fb690e..027e121c6f70 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -7,6 +7,7 @@
7#include <linux/device.h> 7#include <linux/device.h>
8#include <linux/of_address.h> 8#include <linux/of_address.h>
9#include <linux/of_irq.h> 9#include <linux/of_irq.h>
10#include <linux/sys_soc.h>
10 11
11#include "compat.h" 12#include "compat.h"
12#include "regs.h" 13#include "regs.h"
@@ -19,6 +20,8 @@ bool caam_little_end;
19EXPORT_SYMBOL(caam_little_end); 20EXPORT_SYMBOL(caam_little_end);
20bool caam_dpaa2; 21bool caam_dpaa2;
21EXPORT_SYMBOL(caam_dpaa2); 22EXPORT_SYMBOL(caam_dpaa2);
23bool caam_imx;
24EXPORT_SYMBOL(caam_imx);
22 25
23#ifdef CONFIG_CAAM_QI 26#ifdef CONFIG_CAAM_QI
24#include "qi.h" 27#include "qi.h"
@@ -28,19 +31,11 @@ EXPORT_SYMBOL(caam_dpaa2);
28 * i.MX targets tend to have clock control subsystems that can 31 * i.MX targets tend to have clock control subsystems that can
29 * enable/disable clocking to our device. 32 * enable/disable clocking to our device.
30 */ 33 */
31#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
32static inline struct clk *caam_drv_identify_clk(struct device *dev, 34static inline struct clk *caam_drv_identify_clk(struct device *dev,
33 char *clk_name) 35 char *clk_name)
34{ 36{
35 return devm_clk_get(dev, clk_name); 37 return caam_imx ? devm_clk_get(dev, clk_name) : NULL;
36} 38}
37#else
38static inline struct clk *caam_drv_identify_clk(struct device *dev,
39 char *clk_name)
40{
41 return NULL;
42}
43#endif
44 39
45/* 40/*
46 * Descriptor to instantiate RNG State Handle 0 in normal mode and 41 * Descriptor to instantiate RNG State Handle 0 in normal mode and
@@ -430,6 +425,10 @@ static int caam_probe(struct platform_device *pdev)
430{ 425{
431 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN; 426 int ret, ring, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
432 u64 caam_id; 427 u64 caam_id;
428 static const struct soc_device_attribute imx_soc[] = {
429 {.family = "Freescale i.MX"},
430 {},
431 };
433 struct device *dev; 432 struct device *dev;
434 struct device_node *nprop, *np; 433 struct device_node *nprop, *np;
435 struct caam_ctrl __iomem *ctrl; 434 struct caam_ctrl __iomem *ctrl;
@@ -451,6 +450,8 @@ static int caam_probe(struct platform_device *pdev)
451 dev_set_drvdata(dev, ctrlpriv); 450 dev_set_drvdata(dev, ctrlpriv);
452 nprop = pdev->dev.of_node; 451 nprop = pdev->dev.of_node;
453 452
453 caam_imx = (bool)soc_device_match(imx_soc);
454
454 /* Enable clocking */ 455 /* Enable clocking */
455 clk = caam_drv_identify_clk(&pdev->dev, "ipg"); 456 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
456 if (IS_ERR(clk)) { 457 if (IS_ERR(clk)) {
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index 2b5efff9ec3c..17cfd23a38fa 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -67,6 +67,7 @@
67 */ 67 */
68 68
69extern bool caam_little_end; 69extern bool caam_little_end;
70extern bool caam_imx;
70 71
71#define caam_to_cpu(len) \ 72#define caam_to_cpu(len) \
72static inline u##len caam##len ## _to_cpu(u##len val) \ 73static inline u##len caam##len ## _to_cpu(u##len val) \
@@ -154,13 +155,10 @@ static inline u64 rd_reg64(void __iomem *reg)
154#else /* CONFIG_64BIT */ 155#else /* CONFIG_64BIT */
155static inline void wr_reg64(void __iomem *reg, u64 data) 156static inline void wr_reg64(void __iomem *reg, u64 data)
156{ 157{
157#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX 158 if (!caam_imx && caam_little_end) {
158 if (caam_little_end) {
159 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32); 159 wr_reg32((u32 __iomem *)(reg) + 1, data >> 32);
160 wr_reg32((u32 __iomem *)(reg), data); 160 wr_reg32((u32 __iomem *)(reg), data);
161 } else 161 } else {
162#endif
163 {
164 wr_reg32((u32 __iomem *)(reg), data >> 32); 162 wr_reg32((u32 __iomem *)(reg), data >> 32);
165 wr_reg32((u32 __iomem *)(reg) + 1, data); 163 wr_reg32((u32 __iomem *)(reg) + 1, data);
166 } 164 }
@@ -168,41 +166,40 @@ static inline void wr_reg64(void __iomem *reg, u64 data)
168 166
169static inline u64 rd_reg64(void __iomem *reg) 167static inline u64 rd_reg64(void __iomem *reg)
170{ 168{
171#ifndef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX 169 if (!caam_imx && caam_little_end)
172 if (caam_little_end)
173 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 | 170 return ((u64)rd_reg32((u32 __iomem *)(reg) + 1) << 32 |
174 (u64)rd_reg32((u32 __iomem *)(reg))); 171 (u64)rd_reg32((u32 __iomem *)(reg)));
175 else 172
176#endif 173 return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 |
177 return ((u64)rd_reg32((u32 __iomem *)(reg)) << 32 | 174 (u64)rd_reg32((u32 __iomem *)(reg) + 1));
178 (u64)rd_reg32((u32 __iomem *)(reg) + 1));
179} 175}
180#endif /* CONFIG_64BIT */ 176#endif /* CONFIG_64BIT */
181 177
178static inline u64 cpu_to_caam_dma64(dma_addr_t value)
179{
180 if (caam_imx)
181 return (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) |
182 (u64)cpu_to_caam32(upper_32_bits(value)));
183
184 return cpu_to_caam64(value);
185}
186
187static inline u64 caam_dma64_to_cpu(u64 value)
188{
189 if (caam_imx)
190 return (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) |
191 (u64)caam32_to_cpu(upper_32_bits(value)));
192
193 return caam64_to_cpu(value);
194}
195
182#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 196#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
183#ifdef CONFIG_SOC_IMX7D 197#define cpu_to_caam_dma(value) cpu_to_caam_dma64(value)
184#define cpu_to_caam_dma(value) \ 198#define caam_dma_to_cpu(value) caam_dma64_to_cpu(value)
185 (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
186 (u64)cpu_to_caam32(upper_32_bits(value)))
187#define caam_dma_to_cpu(value) \
188 (((u64)caam32_to_cpu(lower_32_bits(value)) << 32) | \
189 (u64)caam32_to_cpu(upper_32_bits(value)))
190#else
191#define cpu_to_caam_dma(value) cpu_to_caam64(value)
192#define caam_dma_to_cpu(value) caam64_to_cpu(value)
193#endif /* CONFIG_SOC_IMX7D */
194#else 199#else
195#define cpu_to_caam_dma(value) cpu_to_caam32(value) 200#define cpu_to_caam_dma(value) cpu_to_caam32(value)
196#define caam_dma_to_cpu(value) caam32_to_cpu(value) 201#define caam_dma_to_cpu(value) caam32_to_cpu(value)
197#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */ 202#endif /* CONFIG_ARCH_DMA_ADDR_T_64BIT */
198
199#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
200#define cpu_to_caam_dma64(value) \
201 (((u64)cpu_to_caam32(lower_32_bits(value)) << 32) | \
202 (u64)cpu_to_caam32(upper_32_bits(value)))
203#else
204#define cpu_to_caam_dma64(value) cpu_to_caam64(value)
205#endif
206 203
207/* 204/*
208 * jr_outentry 205 * jr_outentry
diff --git a/drivers/crypto/inside-secure/safexcel_cipher.c b/drivers/crypto/inside-secure/safexcel_cipher.c
index d2207ac5ba19..5438552bc6d7 100644
--- a/drivers/crypto/inside-secure/safexcel_cipher.c
+++ b/drivers/crypto/inside-secure/safexcel_cipher.c
@@ -386,7 +386,7 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm)
386 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm); 386 struct safexcel_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
387 struct safexcel_crypto_priv *priv = ctx->priv; 387 struct safexcel_crypto_priv *priv = ctx->priv;
388 struct skcipher_request req; 388 struct skcipher_request req;
389 struct safexcel_inv_result result = { 0 }; 389 struct safexcel_inv_result result = {};
390 int ring = ctx->base.ring; 390 int ring = ctx->base.ring;
391 391
392 memset(&req, 0, sizeof(struct skcipher_request)); 392 memset(&req, 0, sizeof(struct skcipher_request));
diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
index 3f819399cd95..3980f946874f 100644
--- a/drivers/crypto/inside-secure/safexcel_hash.c
+++ b/drivers/crypto/inside-secure/safexcel_hash.c
@@ -419,7 +419,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
419 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm); 419 struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
420 struct safexcel_crypto_priv *priv = ctx->priv; 420 struct safexcel_crypto_priv *priv = ctx->priv;
421 struct ahash_request req; 421 struct ahash_request req;
422 struct safexcel_inv_result result = { 0 }; 422 struct safexcel_inv_result result = {};
423 int ring = ctx->base.ring; 423 int ring = ctx->base.ring;
424 424
425 memset(&req, 0, sizeof(struct ahash_request)); 425 memset(&req, 0, sizeof(struct ahash_request));
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 79791c690858..dff88838dce7 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1756,9 +1756,9 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1756 req_ctx->swinit = 0; 1756 req_ctx->swinit = 0;
1757 } else { 1757 } else {
1758 desc->ptr[1] = zero_entry; 1758 desc->ptr[1] = zero_entry;
1759 /* Indicate next op is not the first. */
1760 req_ctx->first = 0;
1761 } 1759 }
1760 /* Indicate next op is not the first. */
1761 req_ctx->first = 0;
1762 1762
1763 /* HMAC key */ 1763 /* HMAC key */
1764 if (ctx->keylen) 1764 if (ctx->keylen)
@@ -1769,7 +1769,7 @@ static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1769 1769
1770 sg_count = edesc->src_nents ?: 1; 1770 sg_count = edesc->src_nents ?: 1;
1771 if (is_sec1 && sg_count > 1) 1771 if (is_sec1 && sg_count > 1)
1772 sg_copy_to_buffer(areq->src, sg_count, edesc->buf, length); 1772 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1773 else 1773 else
1774 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count, 1774 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1775 DMA_TO_DEVICE); 1775 DMA_TO_DEVICE);
@@ -3057,7 +3057,8 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3057 t_alg->algt.alg.hash.final = ahash_final; 3057 t_alg->algt.alg.hash.final = ahash_final;
3058 t_alg->algt.alg.hash.finup = ahash_finup; 3058 t_alg->algt.alg.hash.finup = ahash_finup;
3059 t_alg->algt.alg.hash.digest = ahash_digest; 3059 t_alg->algt.alg.hash.digest = ahash_digest;
3060 t_alg->algt.alg.hash.setkey = ahash_setkey; 3060 if (!strncmp(alg->cra_name, "hmac", 4))
3061 t_alg->algt.alg.hash.setkey = ahash_setkey;
3061 t_alg->algt.alg.hash.import = ahash_import; 3062 t_alg->algt.alg.hash.import = ahash_import;
3062 t_alg->algt.alg.hash.export = ahash_export; 3063 t_alg->algt.alg.hash.export = ahash_export;
3063 3064
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 08629ee69d11..00e73d28077c 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -361,12 +361,12 @@ static const struct fpga_manager_ops altera_cvp_ops = {
361 .write_complete = altera_cvp_write_complete, 361 .write_complete = altera_cvp_write_complete,
362}; 362};
363 363
364static ssize_t show_chkcfg(struct device_driver *dev, char *buf) 364static ssize_t chkcfg_show(struct device_driver *dev, char *buf)
365{ 365{
366 return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg); 366 return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
367} 367}
368 368
369static ssize_t store_chkcfg(struct device_driver *drv, const char *buf, 369static ssize_t chkcfg_store(struct device_driver *drv, const char *buf,
370 size_t count) 370 size_t count)
371{ 371{
372 int ret; 372 int ret;
@@ -378,7 +378,7 @@ static ssize_t store_chkcfg(struct device_driver *drv, const char *buf,
378 return count; 378 return count;
379} 379}
380 380
381static DRIVER_ATTR(chkcfg, 0600, show_chkcfg, store_chkcfg); 381static DRIVER_ATTR_RW(chkcfg);
382 382
383static int altera_cvp_probe(struct pci_dev *pdev, 383static int altera_cvp_probe(struct pci_dev *pdev,
384 const struct pci_device_id *dev_id); 384 const struct pci_device_id *dev_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
index d228f5a99044..dbbe986f90f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
@@ -636,7 +636,194 @@ static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device *adev)
636 NUM_BANKS(ADDR_SURF_2_BANK); 636 NUM_BANKS(ADDR_SURF_2_BANK);
637 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) 637 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
638 WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]); 638 WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
639 } else if (adev->asic_type == CHIP_OLAND || adev->asic_type == CHIP_HAINAN) { 639 } else if (adev->asic_type == CHIP_OLAND) {
640 tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
641 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
642 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
643 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
644 NUM_BANKS(ADDR_SURF_16_BANK) |
645 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
646 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
647 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
648 tilemode[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
649 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
650 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
651 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
652 NUM_BANKS(ADDR_SURF_16_BANK) |
653 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
654 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
655 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
656 tilemode[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
657 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
658 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
659 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
660 NUM_BANKS(ADDR_SURF_16_BANK) |
661 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
662 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
663 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
664 tilemode[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
665 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
666 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
667 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
668 NUM_BANKS(ADDR_SURF_16_BANK) |
669 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
670 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
671 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
672 tilemode[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
673 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
674 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
675 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
676 NUM_BANKS(ADDR_SURF_16_BANK) |
677 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
678 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
679 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
680 tilemode[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
681 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
682 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
683 TILE_SPLIT(split_equal_to_row_size) |
684 NUM_BANKS(ADDR_SURF_16_BANK) |
685 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
686 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
687 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
688 tilemode[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
689 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
690 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
691 TILE_SPLIT(split_equal_to_row_size) |
692 NUM_BANKS(ADDR_SURF_16_BANK) |
693 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
694 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
695 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
696 tilemode[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
697 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
698 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
699 TILE_SPLIT(split_equal_to_row_size) |
700 NUM_BANKS(ADDR_SURF_16_BANK) |
701 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
702 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
703 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
704 tilemode[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
705 ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
706 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
707 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
708 NUM_BANKS(ADDR_SURF_16_BANK) |
709 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
710 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
711 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
712 tilemode[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
713 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
714 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
715 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
716 NUM_BANKS(ADDR_SURF_16_BANK) |
717 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
718 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
719 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
720 tilemode[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
721 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
722 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
723 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
724 NUM_BANKS(ADDR_SURF_16_BANK) |
725 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
726 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
727 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
728 tilemode[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
729 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
730 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
731 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
732 NUM_BANKS(ADDR_SURF_16_BANK) |
733 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
734 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
735 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
736 tilemode[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
737 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
738 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
739 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
740 NUM_BANKS(ADDR_SURF_16_BANK) |
741 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
742 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
743 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
744 tilemode[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
745 ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
746 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
747 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
748 NUM_BANKS(ADDR_SURF_16_BANK) |
749 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
750 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
751 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
752 tilemode[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
753 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
754 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
755 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
756 NUM_BANKS(ADDR_SURF_16_BANK) |
757 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
758 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
759 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
760 tilemode[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
761 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
762 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
763 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
764 NUM_BANKS(ADDR_SURF_16_BANK) |
765 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
766 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
767 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
768 tilemode[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
769 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
770 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
771 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
772 NUM_BANKS(ADDR_SURF_16_BANK) |
773 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
774 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
775 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
776 tilemode[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
777 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
778 PIPE_CONFIG(ADDR_SURF_P4_8x16) |
779 TILE_SPLIT(split_equal_to_row_size) |
780 NUM_BANKS(ADDR_SURF_16_BANK) |
781 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
782 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
783 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
784 tilemode[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
785 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
786 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
787 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
788 NUM_BANKS(ADDR_SURF_16_BANK) |
789 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
790 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
791 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
792 tilemode[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
793 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
794 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
795 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
796 NUM_BANKS(ADDR_SURF_16_BANK) |
797 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
798 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
799 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4);
800 tilemode[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
801 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
802 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
803 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
804 NUM_BANKS(ADDR_SURF_16_BANK) |
805 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
806 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
807 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
808 tilemode[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
809 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
810 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
811 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
812 NUM_BANKS(ADDR_SURF_16_BANK) |
813 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
814 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
815 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2);
816 tilemode[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
817 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
818 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
819 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
820 NUM_BANKS(ADDR_SURF_8_BANK) |
821 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
822 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
823 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1);
824 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++)
825 WREG32(mmGB_TILE_MODE0 + reg_offset, tilemode[reg_offset]);
826 } else if (adev->asic_type == CHIP_HAINAN) {
640 tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | 827 tilemode[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
641 ARRAY_MODE(ARRAY_2D_TILED_THIN1) | 828 ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
642 PIPE_CONFIG(ADDR_SURF_P2) | 829 PIPE_CONFIG(ADDR_SURF_P2) |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index e4a8c2e52cb2..660b3fbade41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -892,6 +892,8 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
892 int err = 0; 892 int err = 0;
893 893
894 dev = kfd_device_by_id(args->gpu_id); 894 dev = kfd_device_by_id(args->gpu_id);
895 if (!dev)
896 return -EINVAL;
895 897
896 dev->kfd2kgd->get_tile_config(dev->kgd, &config); 898 dev->kfd2kgd->get_tile_config(dev->kgd, &config);
897 899
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 5979158c3f7b..944abfad39c1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -292,7 +292,10 @@ static int create_signal_event(struct file *devkfd,
292 struct kfd_event *ev) 292 struct kfd_event *ev)
293{ 293{
294 if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) { 294 if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
295 pr_warn("Signal event wasn't created because limit was reached\n"); 295 if (!p->signal_event_limit_reached) {
296 pr_warn("Signal event wasn't created because limit was reached\n");
297 p->signal_event_limit_reached = true;
298 }
296 return -ENOMEM; 299 return -ENOMEM;
297 } 300 }
298 301
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 681b639f5133..ed71ad40e8f7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -183,8 +183,8 @@ static void uninitialize(struct kernel_queue *kq)
183{ 183{
184 if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) 184 if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
185 kq->mqd->destroy_mqd(kq->mqd, 185 kq->mqd->destroy_mqd(kq->mqd,
186 NULL, 186 kq->queue->mqd,
187 false, 187 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
188 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS, 188 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
189 kq->queue->pipe, 189 kq->queue->pipe,
190 kq->queue->queue); 190 kq->queue->queue);
@@ -210,6 +210,11 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
210 uint32_t wptr, rptr; 210 uint32_t wptr, rptr;
211 unsigned int *queue_address; 211 unsigned int *queue_address;
212 212
213 /* When rptr == wptr, the buffer is empty.
214 * When rptr == wptr + 1, the buffer is full.
215 * It is always rptr that advances to the position of wptr, rather than
216 * the opposite. So we can only use up to queue_size_dwords - 1 dwords.
217 */
213 rptr = *kq->rptr_kernel; 218 rptr = *kq->rptr_kernel;
214 wptr = *kq->wptr_kernel; 219 wptr = *kq->wptr_kernel;
215 queue_address = (unsigned int *)kq->pq_kernel_addr; 220 queue_address = (unsigned int *)kq->pq_kernel_addr;
@@ -219,11 +224,10 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
219 pr_debug("wptr: %d\n", wptr); 224 pr_debug("wptr: %d\n", wptr);
220 pr_debug("queue_address 0x%p\n", queue_address); 225 pr_debug("queue_address 0x%p\n", queue_address);
221 226
222 available_size = (rptr - 1 - wptr + queue_size_dwords) % 227 available_size = (rptr + queue_size_dwords - 1 - wptr) %
223 queue_size_dwords; 228 queue_size_dwords;
224 229
225 if (packet_size_in_dwords >= queue_size_dwords || 230 if (packet_size_in_dwords > available_size) {
226 packet_size_in_dwords >= available_size) {
227 /* 231 /*
228 * make sure calling functions know 232 * make sure calling functions know
229 * acquire_packet_buffer() failed 233 * acquire_packet_buffer() failed
@@ -233,6 +237,14 @@ static int acquire_packet_buffer(struct kernel_queue *kq,
233 } 237 }
234 238
235 if (wptr + packet_size_in_dwords >= queue_size_dwords) { 239 if (wptr + packet_size_in_dwords >= queue_size_dwords) {
240 /* make sure after rolling back to position 0, there is
241 * still enough space.
242 */
243 if (packet_size_in_dwords >= rptr) {
244 *buffer_ptr = NULL;
245 return -ENOMEM;
246 }
247 /* fill nops, roll back and start at position 0 */
236 while (wptr > 0) { 248 while (wptr > 0) {
237 queue_address[wptr] = kq->nop_packet; 249 queue_address[wptr] = kq->nop_packet;
238 wptr = (wptr + 1) % queue_size_dwords; 250 wptr = (wptr + 1) % queue_size_dwords;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index b397ec726400..b87e96cee5fa 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -521,6 +521,7 @@ struct kfd_process {
521 struct list_head signal_event_pages; 521 struct list_head signal_event_pages;
522 u32 next_nonsignal_event_id; 522 u32 next_nonsignal_event_id;
523 size_t signal_event_count; 523 size_t signal_event_count;
524 bool signal_event_limit_reached;
524}; 525};
525 526
526/** 527/**
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 1cae95e2b13a..03bec765b03d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -143,7 +143,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
143 int num_queues = 0; 143 int num_queues = 0;
144 struct queue *cur; 144 struct queue *cur;
145 145
146 memset(&q_properties, 0, sizeof(struct queue_properties));
147 memcpy(&q_properties, properties, sizeof(struct queue_properties)); 146 memcpy(&q_properties, properties, sizeof(struct queue_properties));
148 q = NULL; 147 q = NULL;
149 kq = NULL; 148 kq = NULL;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 5a634594a6ce..57881167ccd2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -551,12 +551,15 @@ static const struct etnaviv_gem_ops etnaviv_gem_shmem_ops = {
551void etnaviv_gem_free_object(struct drm_gem_object *obj) 551void etnaviv_gem_free_object(struct drm_gem_object *obj)
552{ 552{
553 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj); 553 struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
554 struct etnaviv_drm_private *priv = obj->dev->dev_private;
554 struct etnaviv_vram_mapping *mapping, *tmp; 555 struct etnaviv_vram_mapping *mapping, *tmp;
555 556
556 /* object should not be active */ 557 /* object should not be active */
557 WARN_ON(is_active(etnaviv_obj)); 558 WARN_ON(is_active(etnaviv_obj));
558 559
560 mutex_lock(&priv->gem_lock);
559 list_del(&etnaviv_obj->gem_node); 561 list_del(&etnaviv_obj->gem_node);
562 mutex_unlock(&priv->gem_lock);
560 563
561 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, 564 list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
562 obj_node) { 565 obj_node) {
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 026ef4e02f85..46dfe0737f43 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -445,8 +445,10 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
445 cmdbuf->user_size = ALIGN(args->stream_size, 8); 445 cmdbuf->user_size = ALIGN(args->stream_size, 8);
446 446
447 ret = etnaviv_gpu_submit(gpu, submit, cmdbuf); 447 ret = etnaviv_gpu_submit(gpu, submit, cmdbuf);
448 if (ret == 0) 448 if (ret)
449 cmdbuf = NULL; 449 goto out;
450
451 cmdbuf = NULL;
450 452
451 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) { 453 if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
452 /* 454 /*
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 730b8d9db187..6be5b53c3b27 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -14,6 +14,7 @@
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/component.h> 15#include <linux/component.h>
16#include <linux/iopoll.h> 16#include <linux/iopoll.h>
17#include <linux/irq.h>
17#include <linux/mfd/syscon.h> 18#include <linux/mfd/syscon.h>
18#include <linux/of_device.h> 19#include <linux/of_device.h>
19#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index b1f7299600f0..e651a58c18cf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -168,23 +168,19 @@ static struct drm_driver exynos_drm_driver = {
168static int exynos_drm_suspend(struct device *dev) 168static int exynos_drm_suspend(struct device *dev)
169{ 169{
170 struct drm_device *drm_dev = dev_get_drvdata(dev); 170 struct drm_device *drm_dev = dev_get_drvdata(dev);
171 struct drm_connector *connector; 171 struct exynos_drm_private *private = drm_dev->dev_private;
172 struct drm_connector_list_iter conn_iter;
173 172
174 if (pm_runtime_suspended(dev) || !drm_dev) 173 if (pm_runtime_suspended(dev) || !drm_dev)
175 return 0; 174 return 0;
176 175
177 drm_connector_list_iter_begin(drm_dev, &conn_iter); 176 drm_kms_helper_poll_disable(drm_dev);
178 drm_for_each_connector_iter(connector, &conn_iter) { 177 exynos_drm_fbdev_suspend(drm_dev);
179 int old_dpms = connector->dpms; 178 private->suspend_state = drm_atomic_helper_suspend(drm_dev);
180 179 if (IS_ERR(private->suspend_state)) {
181 if (connector->funcs->dpms) 180 exynos_drm_fbdev_resume(drm_dev);
182 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); 181 drm_kms_helper_poll_enable(drm_dev);
183 182 return PTR_ERR(private->suspend_state);
184 /* Set the old mode back to the connector for resume */
185 connector->dpms = old_dpms;
186 } 183 }
187 drm_connector_list_iter_end(&conn_iter);
188 184
189 return 0; 185 return 0;
190} 186}
@@ -192,22 +188,14 @@ static int exynos_drm_suspend(struct device *dev)
192static int exynos_drm_resume(struct device *dev) 188static int exynos_drm_resume(struct device *dev)
193{ 189{
194 struct drm_device *drm_dev = dev_get_drvdata(dev); 190 struct drm_device *drm_dev = dev_get_drvdata(dev);
195 struct drm_connector *connector; 191 struct exynos_drm_private *private = drm_dev->dev_private;
196 struct drm_connector_list_iter conn_iter;
197 192
198 if (pm_runtime_suspended(dev) || !drm_dev) 193 if (pm_runtime_suspended(dev) || !drm_dev)
199 return 0; 194 return 0;
200 195
201 drm_connector_list_iter_begin(drm_dev, &conn_iter); 196 drm_atomic_helper_resume(drm_dev, private->suspend_state);
202 drm_for_each_connector_iter(connector, &conn_iter) { 197 exynos_drm_fbdev_resume(drm_dev);
203 if (connector->funcs->dpms) { 198 drm_kms_helper_poll_enable(drm_dev);
204 int dpms = connector->dpms;
205
206 connector->dpms = DRM_MODE_DPMS_OFF;
207 connector->funcs->dpms(connector, dpms);
208 }
209 }
210 drm_connector_list_iter_end(&conn_iter);
211 199
212 return 0; 200 return 0;
213} 201}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index cf131c2aa23e..f8bae4cb4823 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -202,6 +202,7 @@ struct drm_exynos_file_private {
202 */ 202 */
203struct exynos_drm_private { 203struct exynos_drm_private {
204 struct drm_fb_helper *fb_helper; 204 struct drm_fb_helper *fb_helper;
205 struct drm_atomic_state *suspend_state;
205 206
206 struct device *dma_dev; 207 struct device *dma_dev;
207 void *mapping; 208 void *mapping;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index c3a068409b48..dfb66ecf417b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -18,6 +18,8 @@
18#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
19#include <drm/exynos_drm.h> 19#include <drm/exynos_drm.h>
20 20
21#include <linux/console.h>
22
21#include "exynos_drm_drv.h" 23#include "exynos_drm_drv.h"
22#include "exynos_drm_fb.h" 24#include "exynos_drm_fb.h"
23#include "exynos_drm_fbdev.h" 25#include "exynos_drm_fbdev.h"
@@ -285,3 +287,21 @@ void exynos_drm_output_poll_changed(struct drm_device *dev)
285 287
286 drm_fb_helper_hotplug_event(fb_helper); 288 drm_fb_helper_hotplug_event(fb_helper);
287} 289}
290
291void exynos_drm_fbdev_suspend(struct drm_device *dev)
292{
293 struct exynos_drm_private *private = dev->dev_private;
294
295 console_lock();
296 drm_fb_helper_set_suspend(private->fb_helper, 1);
297 console_unlock();
298}
299
300void exynos_drm_fbdev_resume(struct drm_device *dev)
301{
302 struct exynos_drm_private *private = dev->dev_private;
303
304 console_lock();
305 drm_fb_helper_set_suspend(private->fb_helper, 0);
306 console_unlock();
307}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
index 330eef87f718..645d1bb7f665 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.h
@@ -21,6 +21,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev);
21void exynos_drm_fbdev_fini(struct drm_device *dev); 21void exynos_drm_fbdev_fini(struct drm_device *dev);
22void exynos_drm_fbdev_restore_mode(struct drm_device *dev); 22void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
23void exynos_drm_output_poll_changed(struct drm_device *dev); 23void exynos_drm_output_poll_changed(struct drm_device *dev);
24void exynos_drm_fbdev_suspend(struct drm_device *drm);
25void exynos_drm_fbdev_resume(struct drm_device *drm);
24 26
25#else 27#else
26 28
@@ -39,6 +41,14 @@ static inline void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
39 41
40#define exynos_drm_output_poll_changed (NULL) 42#define exynos_drm_output_poll_changed (NULL)
41 43
44static inline void exynos_drm_fbdev_suspend(struct drm_device *drm)
45{
46}
47
48static inline void exynos_drm_fbdev_resume(struct drm_device *drm)
49{
50}
51
42#endif 52#endif
43 53
44#endif 54#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 214fa5e51963..0109ff40b1db 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -944,22 +944,27 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
944 struct drm_device *dev = encoder->dev; 944 struct drm_device *dev = encoder->dev;
945 struct drm_connector *connector; 945 struct drm_connector *connector;
946 struct drm_display_mode *m; 946 struct drm_display_mode *m;
947 struct drm_connector_list_iter conn_iter;
947 int mode_ok; 948 int mode_ok;
948 949
949 drm_mode_set_crtcinfo(adjusted_mode, 0); 950 drm_mode_set_crtcinfo(adjusted_mode, 0);
950 951
951 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 952 drm_connector_list_iter_begin(dev, &conn_iter);
953 drm_for_each_connector_iter(connector, &conn_iter) {
952 if (connector->encoder == encoder) 954 if (connector->encoder == encoder)
953 break; 955 break;
954 } 956 }
957 if (connector)
958 drm_connector_get(connector);
959 drm_connector_list_iter_end(&conn_iter);
955 960
956 if (connector->encoder != encoder) 961 if (!connector)
957 return true; 962 return true;
958 963
959 mode_ok = hdmi_mode_valid(connector, adjusted_mode); 964 mode_ok = hdmi_mode_valid(connector, adjusted_mode);
960 965
961 if (mode_ok == MODE_OK) 966 if (mode_ok == MODE_OK)
962 return true; 967 goto cleanup;
963 968
964 /* 969 /*
965 * Find the most suitable mode and copy it to adjusted_mode. 970 * Find the most suitable mode and copy it to adjusted_mode.
@@ -979,6 +984,9 @@ static bool hdmi_mode_fixup(struct drm_encoder *encoder,
979 } 984 }
980 } 985 }
981 986
987cleanup:
988 drm_connector_put(connector);
989
982 return true; 990 return true;
983} 991}
984 992
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 40af17ec6312..ff3154fe6588 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -197,78 +197,65 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
197static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, 197static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
198 void *p_data, unsigned int bytes) 198 void *p_data, unsigned int bytes)
199{ 199{
200 unsigned int bar_index =
201 (rounddown(offset, 8) % PCI_BASE_ADDRESS_0) / 8;
202 u32 new = *(u32 *)(p_data); 200 u32 new = *(u32 *)(p_data);
203 bool lo = IS_ALIGNED(offset, 8); 201 bool lo = IS_ALIGNED(offset, 8);
204 u64 size; 202 u64 size;
205 int ret = 0; 203 int ret = 0;
206 bool mmio_enabled = 204 bool mmio_enabled =
207 vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY; 205 vgpu_cfg_space(vgpu)[PCI_COMMAND] & PCI_COMMAND_MEMORY;
206 struct intel_vgpu_pci_bar *bars = vgpu->cfg_space.bar;
208 207
209 if (WARN_ON(bar_index >= INTEL_GVT_PCI_BAR_MAX)) 208 /*
210 return -EINVAL; 209 * Power-up software can determine how much address
211 210 * space the device requires by writing a value of
211 * all 1's to the register and then reading the value
212 * back. The device will return 0's in all don't-care
213 * address bits.
214 */
212 if (new == 0xffffffff) { 215 if (new == 0xffffffff) {
213 /* 216 switch (offset) {
214 * Power-up software can determine how much address 217 case PCI_BASE_ADDRESS_0:
215 * space the device requires by writing a value of 218 case PCI_BASE_ADDRESS_1:
216 * all 1's to the register and then reading the value 219 size = ~(bars[INTEL_GVT_PCI_BAR_GTTMMIO].size -1);
217 * back. The device will return 0's in all don't-care 220 intel_vgpu_write_pci_bar(vgpu, offset,
218 * address bits. 221 size >> (lo ? 0 : 32), lo);
219 */ 222 /*
220 size = vgpu->cfg_space.bar[bar_index].size; 223 * Untrap the BAR, since guest hasn't configured a
221 if (lo) { 224 * valid GPA
222 new = rounddown(new, size);
223 } else {
224 u32 val = vgpu_cfg_space(vgpu)[rounddown(offset, 8)];
225 /* for 32bit mode bar it returns all-0 in upper 32
226 * bit, for 64bit mode bar it will calculate the
227 * size with lower 32bit and return the corresponding
228 * value
229 */ 225 */
230 if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
231 new &= (~(size-1)) >> 32;
232 else
233 new = 0;
234 }
235 /*
236 * Unmapp & untrap the BAR, since guest hasn't configured a
237 * valid GPA
238 */
239 switch (bar_index) {
240 case INTEL_GVT_PCI_BAR_GTTMMIO:
241 ret = trap_gttmmio(vgpu, false); 226 ret = trap_gttmmio(vgpu, false);
242 break; 227 break;
243 case INTEL_GVT_PCI_BAR_APERTURE: 228 case PCI_BASE_ADDRESS_2:
229 case PCI_BASE_ADDRESS_3:
230 size = ~(bars[INTEL_GVT_PCI_BAR_APERTURE].size -1);
231 intel_vgpu_write_pci_bar(vgpu, offset,
232 size >> (lo ? 0 : 32), lo);
244 ret = map_aperture(vgpu, false); 233 ret = map_aperture(vgpu, false);
245 break; 234 break;
235 default:
236 /* Unimplemented BARs */
237 intel_vgpu_write_pci_bar(vgpu, offset, 0x0, false);
246 } 238 }
247 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
248 } else { 239 } else {
249 /* 240 switch (offset) {
250 * Unmapp & untrap the old BAR first, since guest has 241 case PCI_BASE_ADDRESS_0:
251 * re-configured the BAR 242 case PCI_BASE_ADDRESS_1:
252 */ 243 /*
253 switch (bar_index) { 244 * Untrap the old BAR first, since guest has
254 case INTEL_GVT_PCI_BAR_GTTMMIO: 245 * re-configured the BAR
255 ret = trap_gttmmio(vgpu, false); 246 */
247 trap_gttmmio(vgpu, false);
248 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
249 ret = trap_gttmmio(vgpu, mmio_enabled);
256 break; 250 break;
257 case INTEL_GVT_PCI_BAR_APERTURE: 251 case PCI_BASE_ADDRESS_2:
258 ret = map_aperture(vgpu, false); 252 case PCI_BASE_ADDRESS_3:
253 map_aperture(vgpu, false);
254 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
255 ret = map_aperture(vgpu, mmio_enabled);
259 break; 256 break;
260 } 257 default:
261 intel_vgpu_write_pci_bar(vgpu, offset, new, lo); 258 intel_vgpu_write_pci_bar(vgpu, offset, new, lo);
262 /* Track the new BAR */
263 if (mmio_enabled) {
264 switch (bar_index) {
265 case INTEL_GVT_PCI_BAR_GTTMMIO:
266 ret = trap_gttmmio(vgpu, true);
267 break;
268 case INTEL_GVT_PCI_BAR_APERTURE:
269 ret = map_aperture(vgpu, true);
270 break;
271 }
272 } 259 }
273 } 260 }
274 return ret; 261 return ret;
@@ -299,10 +286,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
299 } 286 }
300 287
301 switch (rounddown(offset, 4)) { 288 switch (rounddown(offset, 4)) {
302 case PCI_BASE_ADDRESS_0: 289 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
303 case PCI_BASE_ADDRESS_1:
304 case PCI_BASE_ADDRESS_2:
305 case PCI_BASE_ADDRESS_3:
306 if (WARN_ON(!IS_ALIGNED(offset, 4))) 290 if (WARN_ON(!IS_ALIGNED(offset, 4)))
307 return -EINVAL; 291 return -EINVAL;
308 return emulate_pci_bar_write(vgpu, offset, p_data, bytes); 292 return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
@@ -344,7 +328,6 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
344 struct intel_gvt *gvt = vgpu->gvt; 328 struct intel_gvt *gvt = vgpu->gvt;
345 const struct intel_gvt_device_info *info = &gvt->device_info; 329 const struct intel_gvt_device_info *info = &gvt->device_info;
346 u16 *gmch_ctl; 330 u16 *gmch_ctl;
347 int i;
348 331
349 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 332 memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
350 info->cfg_space_size); 333 info->cfg_space_size);
@@ -371,13 +354,13 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
371 */ 354 */
372 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 355 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
373 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 356 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
357 memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_4, 0, 8);
374 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); 358 memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
375 359
376 for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 360 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
377 vgpu->cfg_space.bar[i].size = pci_resource_len( 361 pci_resource_len(gvt->dev_priv->drm.pdev, 0);
378 gvt->dev_priv->drm.pdev, i * 2); 362 vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
379 vgpu->cfg_space.bar[i].tracked = false; 363 pci_resource_len(gvt->dev_priv->drm.pdev, 2);
380 }
381} 364}
382 365
383/** 366/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index e21ce9c18b6e..b63893eeca73 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -839,7 +839,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
839 pipe); 839 pipe);
840 int position; 840 int position;
841 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 841 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
842 bool in_vbl = true;
843 unsigned long irqflags; 842 unsigned long irqflags;
844 843
845 if (WARN_ON(!mode->crtc_clock)) { 844 if (WARN_ON(!mode->crtc_clock)) {
@@ -922,8 +921,6 @@ static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
922 921
923 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 922 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
924 923
925 in_vbl = position >= vbl_start && position < vbl_end;
926
927 /* 924 /*
928 * While in vblank, position will be negative 925 * While in vblank, position will be negative
929 * counting up towards 0 at vbl_end. And outside 926 * counting up towards 0 at vbl_end. And outside
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index d805b6e6fe71..27743be5b768 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -606,11 +606,6 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder,
606 connector->encoder->base.id, 606 connector->encoder->base.id,
607 connector->encoder->name); 607 connector->encoder->name);
608 608
609 /* ELD Conn_Type */
610 connector->eld[5] &= ~(3 << 2);
611 if (intel_crtc_has_dp_encoder(crtc_state))
612 connector->eld[5] |= (1 << 2);
613
614 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; 609 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
615 610
616 if (dev_priv->display.audio_codec_enable) 611 if (dev_priv->display.audio_codec_enable)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 183e87e8ea31..00c6aee0a9a1 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1163,6 +1163,13 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port,
1163 is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0; 1163 is_hdmi = is_dvi && (child->common.device_type & DEVICE_TYPE_NOT_HDMI_OUTPUT) == 0;
1164 is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR); 1164 is_edp = is_dp && (child->common.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
1165 1165
1166 if (port == PORT_A && is_dvi) {
1167 DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
1168 is_hdmi ? "/HDMI" : "");
1169 is_dvi = false;
1170 is_hdmi = false;
1171 }
1172
1166 info->supports_dvi = is_dvi; 1173 info->supports_dvi = is_dvi;
1167 info->supports_hdmi = is_hdmi; 1174 info->supports_hdmi = is_hdmi;
1168 info->supports_dp = is_dp; 1175 info->supports_dp = is_dp;
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 965988f79a55..92c1f8e166dc 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -216,7 +216,7 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
216 216
217 mask = DC_STATE_DEBUG_MASK_MEMORY_UP; 217 mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
218 218
219 if (IS_BROXTON(dev_priv)) 219 if (IS_GEN9_LP(dev_priv))
220 mask |= DC_STATE_DEBUG_MASK_CORES; 220 mask |= DC_STATE_DEBUG_MASK_CORES;
221 221
222 /* The below bit doesn't need to be cleared ever afterwards */ 222 /* The below bit doesn't need to be cleared ever afterwards */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 4b4fd1f8110b..476681d5940c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1655,7 +1655,8 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1655out: 1655out:
1656 if (ret && IS_GEN9_LP(dev_priv)) { 1656 if (ret && IS_GEN9_LP(dev_priv)) {
1657 tmp = I915_READ(BXT_PHY_CTL(port)); 1657 tmp = I915_READ(BXT_PHY_CTL(port));
1658 if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK | 1658 if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
1659 BXT_PHY_LANE_POWERDOWN_ACK |
1659 BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) 1660 BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
1660 DRM_ERROR("Port %c enabled but PHY powered down? " 1661 DRM_ERROR("Port %c enabled but PHY powered down? "
1661 "(PHY_CTL %08x)\n", port_name(port), tmp); 1662 "(PHY_CTL %08x)\n", port_name(port), tmp);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f17275519484..64f7b51ed97c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12359,7 +12359,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12359 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12359 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
12360 struct drm_crtc *crtc; 12360 struct drm_crtc *crtc;
12361 struct intel_crtc_state *intel_cstate; 12361 struct intel_crtc_state *intel_cstate;
12362 bool hw_check = intel_state->modeset;
12363 u64 put_domains[I915_MAX_PIPES] = {}; 12362 u64 put_domains[I915_MAX_PIPES] = {};
12364 unsigned crtc_vblank_mask = 0; 12363 unsigned crtc_vblank_mask = 0;
12365 int i; 12364 int i;
@@ -12376,7 +12375,6 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
12376 12375
12377 if (needs_modeset(new_crtc_state) || 12376 if (needs_modeset(new_crtc_state) ||
12378 to_intel_crtc_state(new_crtc_state)->update_pipe) { 12377 to_intel_crtc_state(new_crtc_state)->update_pipe) {
12379 hw_check = true;
12380 12378
12381 put_domains[to_intel_crtc(crtc)->pipe] = 12379 put_domains[to_intel_crtc(crtc)->pipe] =
12382 modeset_get_crtc_power_domains(crtc, 12380 modeset_get_crtc_power_domains(crtc,
@@ -14030,7 +14028,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
14030 14028
14031 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 14029 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
14032 DRM_DEBUG_KMS("bad plane %d handle\n", i); 14030 DRM_DEBUG_KMS("bad plane %d handle\n", i);
14033 return -EINVAL; 14031 goto err;
14034 } 14032 }
14035 14033
14036 stride_alignment = intel_fb_stride_alignment(fb, i); 14034 stride_alignment = intel_fb_stride_alignment(fb, i);
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
index 09b670929786..de38d014ed39 100644
--- a/drivers/gpu/drm/i915/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -208,12 +208,6 @@ static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
208 }, 208 },
209}; 209};
210 210
211static u32 bxt_phy_port_mask(const struct bxt_ddi_phy_info *phy_info)
212{
213 return (phy_info->dual_channel * BIT(phy_info->channel[DPIO_CH1].port)) |
214 BIT(phy_info->channel[DPIO_CH0].port);
215}
216
217static const struct bxt_ddi_phy_info * 211static const struct bxt_ddi_phy_info *
218bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count) 212bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
219{ 213{
@@ -313,7 +307,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
313 enum dpio_phy phy) 307 enum dpio_phy phy)
314{ 308{
315 const struct bxt_ddi_phy_info *phy_info; 309 const struct bxt_ddi_phy_info *phy_info;
316 enum port port;
317 310
318 phy_info = bxt_get_phy_info(dev_priv, phy); 311 phy_info = bxt_get_phy_info(dev_priv, phy);
319 312
@@ -335,19 +328,6 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
335 return false; 328 return false;
336 } 329 }
337 330
338 for_each_port_masked(port, bxt_phy_port_mask(phy_info)) {
339 u32 tmp = I915_READ(BXT_PHY_CTL(port));
340
341 if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
342 DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
343 "for port %c powered down "
344 "(PHY_CTL %08x)\n",
345 phy, port_name(port), tmp);
346
347 return false;
348 }
349 }
350
351 return true; 331 return true;
352} 332}
353 333
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index f0c11aec5ea5..7442891762be 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -892,8 +892,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
892 struct intel_crtc_state *old_crtc_state, 892 struct intel_crtc_state *old_crtc_state,
893 struct drm_connector_state *old_conn_state) 893 struct drm_connector_state *old_conn_state)
894{ 894{
895 struct drm_device *dev = encoder->base.dev;
896 struct drm_i915_private *dev_priv = dev->dev_private;
897 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 895 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
898 enum port port; 896 enum port port;
899 897
@@ -903,15 +901,6 @@ static void intel_dsi_disable(struct intel_encoder *encoder,
903 intel_panel_disable_backlight(old_conn_state); 901 intel_panel_disable_backlight(old_conn_state);
904 902
905 /* 903 /*
906 * Disable Device ready before the port shutdown in order
907 * to avoid split screen
908 */
909 if (IS_BROXTON(dev_priv)) {
910 for_each_dsi_port(port, intel_dsi->ports)
911 I915_WRITE(MIPI_DEVICE_READY(port), 0);
912 }
913
914 /*
915 * According to the spec we should send SHUTDOWN before 904 * According to the spec we should send SHUTDOWN before
916 * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing 905 * MIPI_SEQ_DISPLAY_OFF only for v3+ VBTs, but field testing
917 * has shown that the v3 sequence works for v2 VBTs too 906 * has shown that the v3 sequence works for v2 VBTs too
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 951e834dd274..28a778b785ac 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -30,6 +30,21 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "i915_drv.h" 31#include "i915_drv.h"
32 32
33static void intel_connector_update_eld_conn_type(struct drm_connector *connector)
34{
35 u8 conn_type;
36
37 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
38 connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
39 conn_type = DRM_ELD_CONN_TYPE_DP;
40 } else {
41 conn_type = DRM_ELD_CONN_TYPE_HDMI;
42 }
43
44 connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] &= ~DRM_ELD_CONN_TYPE_MASK;
45 connector->eld[DRM_ELD_SAD_COUNT_CONN_TYPE] |= conn_type;
46}
47
33/** 48/**
34 * intel_connector_update_modes - update connector from edid 49 * intel_connector_update_modes - update connector from edid
35 * @connector: DRM connector device to use 50 * @connector: DRM connector device to use
@@ -44,6 +59,8 @@ int intel_connector_update_modes(struct drm_connector *connector,
44 ret = drm_add_edid_modes(connector, edid); 59 ret = drm_add_edid_modes(connector, edid);
45 drm_edid_to_eld(connector, edid); 60 drm_edid_to_eld(connector, edid);
46 61
62 intel_connector_update_eld_conn_type(connector);
63
47 return ret; 64 return ret;
48} 65}
49 66
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index a17b1de7d7e0..3b1c5d783ee7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -1699,6 +1699,8 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
1699 if (!panel->backlight.max) 1699 if (!panel->backlight.max)
1700 return -ENODEV; 1700 return -ENODEV;
1701 1701
1702 panel->backlight.min = get_backlight_min_vbt(connector);
1703
1702 val = bxt_get_backlight(connector); 1704 val = bxt_get_backlight(connector);
1703 val = intel_panel_compute_brightness(connector, val); 1705 val = intel_panel_compute_brightness(connector, val);
1704 panel->backlight.level = clamp(val, panel->backlight.min, 1706 panel->backlight.level = clamp(val, panel->backlight.min,
@@ -1735,6 +1737,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
1735 if (!panel->backlight.max) 1737 if (!panel->backlight.max)
1736 return -ENODEV; 1738 return -ENODEV;
1737 1739
1740 panel->backlight.min = get_backlight_min_vbt(connector);
1741
1738 val = bxt_get_backlight(connector); 1742 val = bxt_get_backlight(connector);
1739 val = intel_panel_compute_brightness(connector, val); 1743 val = intel_panel_compute_brightness(connector, val);
1740 panel->backlight.level = clamp(val, panel->backlight.min, 1744 panel->backlight.level = clamp(val, panel->backlight.min,
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index b66d8e136aa3..b3a087cb0860 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2782,6 +2782,9 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
2782 2782
2783 /* 6. Enable DBUF */ 2783 /* 6. Enable DBUF */
2784 gen9_dbuf_enable(dev_priv); 2784 gen9_dbuf_enable(dev_priv);
2785
2786 if (resume && dev_priv->csr.dmc_payload)
2787 intel_csr_load_program(dev_priv);
2785} 2788}
2786 2789
2787#undef CNL_PROCMON_IDX 2790#undef CNL_PROCMON_IDX
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 14c5613b4388..afbf50d0c08f 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -509,23 +509,25 @@ static void qxl_primary_atomic_update(struct drm_plane *plane,
509 .y2 = qfb->base.height 509 .y2 = qfb->base.height
510 }; 510 };
511 511
512 if (!old_state->fb) { 512 if (old_state->fb) {
513 qxl_io_log(qdev, 513 qfb_old = to_qxl_framebuffer(old_state->fb);
514 "create primary fb: %dx%d,%d,%d\n", 514 bo_old = gem_to_qxl_bo(qfb_old->obj);
515 bo->surf.width, bo->surf.height, 515 } else {
516 bo->surf.stride, bo->surf.format); 516 bo_old = NULL;
517 }
517 518
518 qxl_io_create_primary(qdev, 0, bo); 519 if (bo == bo_old)
519 bo->is_primary = true;
520 return; 520 return;
521 521
522 } else { 522 if (bo_old && bo_old->is_primary) {
523 qfb_old = to_qxl_framebuffer(old_state->fb); 523 qxl_io_destroy_primary(qdev);
524 bo_old = gem_to_qxl_bo(qfb_old->obj);
525 bo_old->is_primary = false; 524 bo_old->is_primary = false;
526 } 525 }
527 526
528 bo->is_primary = true; 527 if (!bo->is_primary) {
528 qxl_io_create_primary(qdev, 0, bo);
529 bo->is_primary = true;
530 }
529 qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1); 531 qxl_draw_dirty_fb(qdev, qfb, bo, 0, 0, &norect, 1, 1);
530} 532}
531 533
@@ -534,13 +536,15 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
534{ 536{
535 struct qxl_device *qdev = plane->dev->dev_private; 537 struct qxl_device *qdev = plane->dev->dev_private;
536 538
537 if (old_state->fb) 539 if (old_state->fb) {
538 { struct qxl_framebuffer *qfb = 540 struct qxl_framebuffer *qfb =
539 to_qxl_framebuffer(old_state->fb); 541 to_qxl_framebuffer(old_state->fb);
540 struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); 542 struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj);
541 543
542 qxl_io_destroy_primary(qdev); 544 if (bo->is_primary) {
543 bo->is_primary = false; 545 qxl_io_destroy_primary(qdev);
546 bo->is_primary = false;
547 }
544 } 548 }
545} 549}
546 550
@@ -698,14 +702,15 @@ static void qxl_plane_cleanup_fb(struct drm_plane *plane,
698 struct drm_gem_object *obj; 702 struct drm_gem_object *obj;
699 struct qxl_bo *user_bo; 703 struct qxl_bo *user_bo;
700 704
701 if (!plane->state->fb) { 705 if (!old_state->fb) {
702 /* we never executed prepare_fb, so there's nothing to 706 /*
707 * we never executed prepare_fb, so there's nothing to
703 * unpin. 708 * unpin.
704 */ 709 */
705 return; 710 return;
706 } 711 }
707 712
708 obj = to_qxl_framebuffer(plane->state->fb)->obj; 713 obj = to_qxl_framebuffer(old_state->fb)->obj;
709 user_bo = gem_to_qxl_bo(obj); 714 user_bo = gem_to_qxl_bo(obj);
710 qxl_bo_unpin(user_bo); 715 qxl_bo_unpin(user_bo);
711} 716}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 997131d58c7f..ffc10cadcf34 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1663,7 +1663,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1663 radeon_agp_suspend(rdev); 1663 radeon_agp_suspend(rdev);
1664 1664
1665 pci_save_state(dev->pdev); 1665 pci_save_state(dev->pdev);
1666 if (freeze && rdev->family >= CHIP_CEDAR) { 1666 if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1667 rdev->asic->asic_reset(rdev, true); 1667 rdev->asic->asic_reset(rdev, true);
1668 pci_restore_state(dev->pdev); 1668 pci_restore_state(dev->pdev);
1669 } else if (suspend) { 1669 } else if (suspend) {
diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig
index 06f05302ee75..882d85db9053 100644
--- a/drivers/gpu/drm/sun4i/Kconfig
+++ b/drivers/gpu/drm/sun4i/Kconfig
@@ -26,7 +26,7 @@ config DRM_SUN4I_HDMI_CEC
26 bool "Allwinner A10 HDMI CEC Support" 26 bool "Allwinner A10 HDMI CEC Support"
27 depends on DRM_SUN4I_HDMI 27 depends on DRM_SUN4I_HDMI
28 select CEC_CORE 28 select CEC_CORE
29 depends on CEC_PIN 29 select CEC_PIN
30 help 30 help
31 Choose this option if you have an Allwinner SoC with an HDMI 31 Choose this option if you have an Allwinner SoC with an HDMI
32 controller and want to use CEC. 32 controller and want to use CEC.
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi.h b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
index 1457750988da..a1f8cba251a2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi.h
@@ -15,7 +15,7 @@
15#include <drm/drm_connector.h> 15#include <drm/drm_connector.h>
16#include <drm/drm_encoder.h> 16#include <drm/drm_encoder.h>
17 17
18#include <media/cec.h> 18#include <media/cec-pin.h>
19 19
20#define SUN4I_HDMI_CTRL_REG 0x004 20#define SUN4I_HDMI_CTRL_REG 0x004
21#define SUN4I_HDMI_CTRL_ENABLE BIT(31) 21#define SUN4I_HDMI_CTRL_ENABLE BIT(31)
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 9ea6cd5a1370..3cf1a6932fac 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -302,26 +302,29 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
302 hdmi->mod_clk = devm_clk_get(dev, "mod"); 302 hdmi->mod_clk = devm_clk_get(dev, "mod");
303 if (IS_ERR(hdmi->mod_clk)) { 303 if (IS_ERR(hdmi->mod_clk)) {
304 dev_err(dev, "Couldn't get the HDMI mod clock\n"); 304 dev_err(dev, "Couldn't get the HDMI mod clock\n");
305 return PTR_ERR(hdmi->mod_clk); 305 ret = PTR_ERR(hdmi->mod_clk);
306 goto err_disable_bus_clk;
306 } 307 }
307 clk_prepare_enable(hdmi->mod_clk); 308 clk_prepare_enable(hdmi->mod_clk);
308 309
309 hdmi->pll0_clk = devm_clk_get(dev, "pll-0"); 310 hdmi->pll0_clk = devm_clk_get(dev, "pll-0");
310 if (IS_ERR(hdmi->pll0_clk)) { 311 if (IS_ERR(hdmi->pll0_clk)) {
311 dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n"); 312 dev_err(dev, "Couldn't get the HDMI PLL 0 clock\n");
312 return PTR_ERR(hdmi->pll0_clk); 313 ret = PTR_ERR(hdmi->pll0_clk);
314 goto err_disable_mod_clk;
313 } 315 }
314 316
315 hdmi->pll1_clk = devm_clk_get(dev, "pll-1"); 317 hdmi->pll1_clk = devm_clk_get(dev, "pll-1");
316 if (IS_ERR(hdmi->pll1_clk)) { 318 if (IS_ERR(hdmi->pll1_clk)) {
317 dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n"); 319 dev_err(dev, "Couldn't get the HDMI PLL 1 clock\n");
318 return PTR_ERR(hdmi->pll1_clk); 320 ret = PTR_ERR(hdmi->pll1_clk);
321 goto err_disable_mod_clk;
319 } 322 }
320 323
321 ret = sun4i_tmds_create(hdmi); 324 ret = sun4i_tmds_create(hdmi);
322 if (ret) { 325 if (ret) {
323 dev_err(dev, "Couldn't create the TMDS clock\n"); 326 dev_err(dev, "Couldn't create the TMDS clock\n");
324 return ret; 327 goto err_disable_mod_clk;
325 } 328 }
326 329
327 writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG); 330 writel(SUN4I_HDMI_CTRL_ENABLE, hdmi->base + SUN4I_HDMI_CTRL_REG);
@@ -362,7 +365,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
362 ret = sun4i_hdmi_i2c_create(dev, hdmi); 365 ret = sun4i_hdmi_i2c_create(dev, hdmi);
363 if (ret) { 366 if (ret) {
364 dev_err(dev, "Couldn't create the HDMI I2C adapter\n"); 367 dev_err(dev, "Couldn't create the HDMI I2C adapter\n");
365 return ret; 368 goto err_disable_mod_clk;
366 } 369 }
367 370
368 drm_encoder_helper_add(&hdmi->encoder, 371 drm_encoder_helper_add(&hdmi->encoder,
@@ -422,6 +425,10 @@ err_cleanup_connector:
422 drm_encoder_cleanup(&hdmi->encoder); 425 drm_encoder_cleanup(&hdmi->encoder);
423err_del_i2c_adapter: 426err_del_i2c_adapter:
424 i2c_del_adapter(hdmi->i2c); 427 i2c_del_adapter(hdmi->i2c);
428err_disable_mod_clk:
429 clk_disable_unprepare(hdmi->mod_clk);
430err_disable_bus_clk:
431 clk_disable_unprepare(hdmi->bus_clk);
425 return ret; 432 return ret;
426} 433}
427 434
@@ -434,6 +441,8 @@ static void sun4i_hdmi_unbind(struct device *dev, struct device *master,
434 drm_connector_cleanup(&hdmi->connector); 441 drm_connector_cleanup(&hdmi->connector);
435 drm_encoder_cleanup(&hdmi->encoder); 442 drm_encoder_cleanup(&hdmi->encoder);
436 i2c_del_adapter(hdmi->i2c); 443 i2c_del_adapter(hdmi->i2c);
444 clk_disable_unprepare(hdmi->mod_clk);
445 clk_disable_unprepare(hdmi->bus_clk);
437} 446}
438 447
439static const struct component_ops sun4i_hdmi_ops = { 448static const struct component_ops sun4i_hdmi_ops = {
diff --git a/drivers/gpu/drm/tegra/trace.h b/drivers/gpu/drm/tegra/trace.h
index e9b7cdad5c4c..5a1ab4046e92 100644
--- a/drivers/gpu/drm/tegra/trace.h
+++ b/drivers/gpu/drm/tegra/trace.h
@@ -63,6 +63,6 @@ DEFINE_EVENT(register_access, sor_readl,
63 63
64/* This part must be outside protection */ 64/* This part must be outside protection */
65#undef TRACE_INCLUDE_PATH 65#undef TRACE_INCLUDE_PATH
66#define TRACE_INCLUDE_PATH . 66#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/tegra
67#define TRACE_INCLUDE_FILE trace 67#define TRACE_INCLUDE_FILE trace
68#include <trace/define_trace.h> 68#include <trace/define_trace.h>
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b397a14ab970..a98919199858 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -533,6 +533,7 @@
533#define USB_VENDOR_ID_IDEACOM 0x1cb6 533#define USB_VENDOR_ID_IDEACOM 0x1cb6
534#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650 534#define USB_DEVICE_ID_IDEACOM_IDC6650 0x6650
535#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651 535#define USB_DEVICE_ID_IDEACOM_IDC6651 0x6651
536#define USB_DEVICE_ID_IDEACOM_IDC6680 0x6680
536 537
537#define USB_VENDOR_ID_ILITEK 0x222a 538#define USB_VENDOR_ID_ILITEK 0x222a
538#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001 539#define USB_DEVICE_ID_ILITEK_MULTITOUCH 0x0001
@@ -660,6 +661,7 @@
660#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 661#define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048
661#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 662#define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067
662#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 663#define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
664#define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
663 665
664#define USB_VENDOR_ID_LG 0x1fd2 666#define USB_VENDOR_ID_LG 0x1fd2
665#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064 667#define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 440b999304a5..9e8c4d2ba11d 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -930,6 +930,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
930 field->application != HID_DG_PEN && 930 field->application != HID_DG_PEN &&
931 field->application != HID_DG_TOUCHPAD && 931 field->application != HID_DG_TOUCHPAD &&
932 field->application != HID_GD_KEYBOARD && 932 field->application != HID_GD_KEYBOARD &&
933 field->application != HID_GD_SYSTEM_CONTROL &&
933 field->application != HID_CP_CONSUMER_CONTROL && 934 field->application != HID_CP_CONSUMER_CONTROL &&
934 field->application != HID_GD_WIRELESS_RADIO_CTLS && 935 field->application != HID_GD_WIRELESS_RADIO_CTLS &&
935 !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS && 936 !(field->application == HID_VD_ASUS_CUSTOM_MEDIA_KEYS &&
@@ -1419,6 +1420,12 @@ static const struct hid_device_id mt_devices[] = {
1419 USB_VENDOR_ID_ALPS_JP, 1420 USB_VENDOR_ID_ALPS_JP,
1420 HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) }, 1421 HID_DEVICE_ID_ALPS_U1_DUAL_3BTN_PTP) },
1421 1422
1423 /* Lenovo X1 TAB Gen 2 */
1424 { .driver_data = MT_CLS_WIN_8_DUAL,
1425 HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
1426 USB_VENDOR_ID_LENOVO,
1427 USB_DEVICE_ID_LENOVO_X1_TAB) },
1428
1422 /* Anton devices */ 1429 /* Anton devices */
1423 { .driver_data = MT_CLS_EXPORT_ALL_INPUTS, 1430 { .driver_data = MT_CLS_EXPORT_ALL_INPUTS,
1424 MT_USB_DEVICE(USB_VENDOR_ID_ANTON, 1431 MT_USB_DEVICE(USB_VENDOR_ID_ANTON,
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 5b40c2614599..ef241d66562e 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -436,17 +436,24 @@ static int rmi_post_resume(struct hid_device *hdev)
436 if (!(data->device_flags & RMI_DEVICE)) 436 if (!(data->device_flags & RMI_DEVICE))
437 return 0; 437 return 0;
438 438
439 ret = rmi_reset_attn_mode(hdev); 439 /* Make sure the HID device is ready to receive events */
440 ret = hid_hw_open(hdev);
440 if (ret) 441 if (ret)
441 return ret; 442 return ret;
442 443
444 ret = rmi_reset_attn_mode(hdev);
445 if (ret)
446 goto out;
447
443 ret = rmi_driver_resume(rmi_dev, false); 448 ret = rmi_driver_resume(rmi_dev, false);
444 if (ret) { 449 if (ret) {
445 hid_warn(hdev, "Failed to resume device: %d\n", ret); 450 hid_warn(hdev, "Failed to resume device: %d\n", ret);
446 return ret; 451 goto out;
447 } 452 }
448 453
449 return 0; 454out:
455 hid_hw_close(hdev);
456 return ret;
450} 457}
451#endif /* CONFIG_PM */ 458#endif /* CONFIG_PM */
452 459
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index ec530454e6f6..5fbe0f81ab2e 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -337,8 +337,8 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
337 kfree(hidraw); 337 kfree(hidraw);
338 } else { 338 } else {
339 /* close device for last reader */ 339 /* close device for last reader */
340 hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
341 hid_hw_close(hidraw->hid); 340 hid_hw_close(hidraw->hid);
341 hid_hw_power(hidraw->hid, PM_HINT_NORMAL);
342 } 342 }
343 } 343 }
344} 344}
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 77396145d2d0..9145c2129a96 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -543,7 +543,8 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
543{ 543{
544 /* the worst case is computed from the set_report command with a 544 /* the worst case is computed from the set_report command with a
545 * reportID > 15 and the maximum report length */ 545 * reportID > 15 and the maximum report length */
546 int args_len = sizeof(__u8) + /* optional ReportID byte */ 546 int args_len = sizeof(__u8) + /* ReportID */
547 sizeof(__u8) + /* optional ReportID byte */
547 sizeof(__u16) + /* data register */ 548 sizeof(__u16) + /* data register */
548 sizeof(__u16) + /* size of the report */ 549 sizeof(__u16) + /* size of the report */
549 report_size; /* report */ 550 report_size; /* report */
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index a83fa76655b9..f489a5cfcb48 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -99,6 +99,7 @@ static const struct hid_blacklist {
99 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL }, 99 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
100 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL }, 100 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
101 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, 101 { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
102 { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
102 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL }, 103 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
103 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL }, 104 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
104 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET }, 105 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS, HID_QUIRK_NOGET },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index e82a696a1d07..906e654fb0ba 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -668,8 +668,10 @@ static struct wacom_hdev_data *wacom_get_hdev_data(struct hid_device *hdev)
668 668
669 /* Try to find an already-probed interface from the same device */ 669 /* Try to find an already-probed interface from the same device */
670 list_for_each_entry(data, &wacom_udev_list, list) { 670 list_for_each_entry(data, &wacom_udev_list, list) {
671 if (compare_device_paths(hdev, data->dev, '/')) 671 if (compare_device_paths(hdev, data->dev, '/')) {
672 kref_get(&data->kref);
672 return data; 673 return data;
674 }
673 } 675 }
674 676
675 /* Fallback to finding devices that appear to be "siblings" */ 677 /* Fallback to finding devices that appear to be "siblings" */
@@ -766,6 +768,9 @@ static int wacom_led_control(struct wacom *wacom)
766 if (!wacom->led.groups) 768 if (!wacom->led.groups)
767 return -ENOTSUPP; 769 return -ENOTSUPP;
768 770
771 if (wacom->wacom_wac.features.type == REMOTE)
772 return -ENOTSUPP;
773
769 if (wacom->wacom_wac.pid) { /* wireless connected */ 774 if (wacom->wacom_wac.pid) { /* wireless connected */
770 report_id = WAC_CMD_WL_LED_CONTROL; 775 report_id = WAC_CMD_WL_LED_CONTROL;
771 buf_size = 13; 776 buf_size = 13;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index bb17d7bbefd3..aa692e28b2cd 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -567,8 +567,8 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
567 keys = data[9] & 0x07; 567 keys = data[9] & 0x07;
568 } 568 }
569 } else { 569 } else {
570 buttons = ((data[6] & 0x10) << 10) | 570 buttons = ((data[6] & 0x10) << 5) |
571 ((data[5] & 0x10) << 9) | 571 ((data[5] & 0x10) << 4) |
572 ((data[6] & 0x0F) << 4) | 572 ((data[6] & 0x0F) << 4) |
573 (data[5] & 0x0F); 573 (data[5] & 0x0F);
574 } 574 }
@@ -1227,11 +1227,17 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
1227 continue; 1227 continue;
1228 1228
1229 if (range) { 1229 if (range) {
1230 /* Fix rotation alignment: userspace expects zero at left */
1231 int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
1232 rotation += 1800/4;
1233 if (rotation > 899)
1234 rotation -= 1800;
1235
1230 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1])); 1236 input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
1231 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3])); 1237 input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
1232 input_report_abs(pen_input, ABS_TILT_X, frame[7]); 1238 input_report_abs(pen_input, ABS_TILT_X, (char)frame[7]);
1233 input_report_abs(pen_input, ABS_TILT_Y, frame[8]); 1239 input_report_abs(pen_input, ABS_TILT_Y, (char)frame[8]);
1234 input_report_abs(pen_input, ABS_Z, get_unaligned_le16(&frame[9])); 1240 input_report_abs(pen_input, ABS_Z, rotation);
1235 input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11])); 1241 input_report_abs(pen_input, ABS_WHEEL, get_unaligned_le16(&frame[11]));
1236 } 1242 }
1237 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5])); 1243 input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
@@ -1319,12 +1325,19 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
1319 unsigned char *data = wacom->data; 1325 unsigned char *data = wacom->data;
1320 1326
1321 int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01); 1327 int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
1322 int ring = data[285]; 1328 int ring = data[285] & 0x7F;
1323 int prox = buttons | (ring & 0x80); 1329 bool ringstatus = data[285] & 0x80;
1330 bool prox = buttons || ringstatus;
1331
1332 /* Fix touchring data: userspace expects 0 at left and increasing clockwise */
1333 ring = 71 - ring;
1334 ring += 3*72/16;
1335 if (ring > 71)
1336 ring -= 72;
1324 1337
1325 wacom_report_numbered_buttons(pad_input, 9, buttons); 1338 wacom_report_numbered_buttons(pad_input, 9, buttons);
1326 1339
1327 input_report_abs(pad_input, ABS_WHEEL, (ring & 0x80) ? (ring & 0x7f) : 0); 1340 input_report_abs(pad_input, ABS_WHEEL, ringstatus ? ring : 0);
1328 1341
1329 input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0); 1342 input_report_key(pad_input, wacom->tool[1], prox ? 1 : 0);
1330 input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0); 1343 input_report_abs(pad_input, ABS_MISC, prox ? PAD_DEVICE_ID : 0);
@@ -1616,6 +1629,20 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len)
1616 return 0; 1629 return 0;
1617} 1630}
1618 1631
1632static int wacom_offset_rotation(struct input_dev *input, struct hid_usage *usage,
1633 int value, int num, int denom)
1634{
1635 struct input_absinfo *abs = &input->absinfo[usage->code];
1636 int range = (abs->maximum - abs->minimum + 1);
1637
1638 value += num*range/denom;
1639 if (value > abs->maximum)
1640 value -= range;
1641 else if (value < abs->minimum)
1642 value += range;
1643 return value;
1644}
1645
1619int wacom_equivalent_usage(int usage) 1646int wacom_equivalent_usage(int usage)
1620{ 1647{
1621 if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) { 1648 if ((usage & HID_USAGE_PAGE) == WACOM_HID_UP_WACOMDIGITIZER) {
@@ -1898,6 +1925,7 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
1898 unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); 1925 unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
1899 int i; 1926 int i;
1900 bool is_touch_on = value; 1927 bool is_touch_on = value;
1928 bool do_report = false;
1901 1929
1902 /* 1930 /*
1903 * Avoid reporting this event and setting inrange_state if this usage 1931 * Avoid reporting this event and setting inrange_state if this usage
@@ -1912,6 +1940,29 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
1912 } 1940 }
1913 1941
1914 switch (equivalent_usage) { 1942 switch (equivalent_usage) {
1943 case WACOM_HID_WD_TOUCHRING:
1944 /*
1945 * Userspace expects touchrings to increase in value with
1946 * clockwise gestures and have their zero point at the
1947 * tablet's left. HID events "should" be clockwise-
1948 * increasing and zero at top, though the MobileStudio
1949 * Pro and 2nd-gen Intuos Pro don't do this...
1950 */
1951 if (hdev->vendor == 0x56a &&
1952 (hdev->product == 0x34d || hdev->product == 0x34e || /* MobileStudio Pro */
1953 hdev->product == 0x357 || hdev->product == 0x358)) { /* Intuos Pro 2 */
1954 value = (field->logical_maximum - value);
1955
1956 if (hdev->product == 0x357 || hdev->product == 0x358)
1957 value = wacom_offset_rotation(input, usage, value, 3, 16);
1958 else if (hdev->product == 0x34d || hdev->product == 0x34e)
1959 value = wacom_offset_rotation(input, usage, value, 1, 2);
1960 }
1961 else {
1962 value = wacom_offset_rotation(input, usage, value, 1, 4);
1963 }
1964 do_report = true;
1965 break;
1915 case WACOM_HID_WD_TOUCHRINGSTATUS: 1966 case WACOM_HID_WD_TOUCHRINGSTATUS:
1916 if (!value) 1967 if (!value)
1917 input_event(input, usage->type, usage->code, 0); 1968 input_event(input, usage->type, usage->code, 0);
@@ -1945,10 +1996,14 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
1945 value, i); 1996 value, i);
1946 /* fall through*/ 1997 /* fall through*/
1947 default: 1998 default:
1999 do_report = true;
2000 break;
2001 }
2002
2003 if (do_report) {
1948 input_event(input, usage->type, usage->code, value); 2004 input_event(input, usage->type, usage->code, value);
1949 if (value) 2005 if (value)
1950 wacom_wac->hid_data.pad_input_event_flag = true; 2006 wacom_wac->hid_data.pad_input_event_flag = true;
1951 break;
1952 } 2007 }
1953} 2008}
1954 2009
@@ -2086,22 +2141,34 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
2086 wacom_wac->hid_data.tipswitch |= value; 2141 wacom_wac->hid_data.tipswitch |= value;
2087 return; 2142 return;
2088 case HID_DG_TOOLSERIALNUMBER: 2143 case HID_DG_TOOLSERIALNUMBER:
2089 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL); 2144 if (value) {
2090 wacom_wac->serial[0] |= (__u32)value; 2145 wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
2146 wacom_wac->serial[0] |= (__u32)value;
2147 }
2091 return; 2148 return;
2149 case HID_DG_TWIST:
2150 /*
2151 * Userspace expects pen twist to have its zero point when
2152 * the buttons/finger is on the tablet's left. HID values
2153 * are zero when buttons are toward the top.
2154 */
2155 value = wacom_offset_rotation(input, usage, value, 1, 4);
2156 break;
2092 case WACOM_HID_WD_SENSE: 2157 case WACOM_HID_WD_SENSE:
2093 wacom_wac->hid_data.sense_state = value; 2158 wacom_wac->hid_data.sense_state = value;
2094 return; 2159 return;
2095 case WACOM_HID_WD_SERIALHI: 2160 case WACOM_HID_WD_SERIALHI:
2096 wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF); 2161 if (value) {
2097 wacom_wac->serial[0] |= ((__u64)value) << 32; 2162 wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
2098 /* 2163 wacom_wac->serial[0] |= ((__u64)value) << 32;
2099 * Non-USI EMR devices may contain additional tool type 2164 /*
2100 * information here. See WACOM_HID_WD_TOOLTYPE case for 2165 * Non-USI EMR devices may contain additional tool type
2101 * more details. 2166 * information here. See WACOM_HID_WD_TOOLTYPE case for
2102 */ 2167 * more details.
2103 if (value >> 20 == 1) { 2168 */
2104 wacom_wac->id[0] |= value & 0xFFFFF; 2169 if (value >> 20 == 1) {
2170 wacom_wac->id[0] |= value & 0xFFFFF;
2171 }
2105 } 2172 }
2106 return; 2173 return;
2107 case WACOM_HID_WD_TOOLTYPE: 2174 case WACOM_HID_WD_TOOLTYPE:
@@ -2205,7 +2272,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
2205 input_report_key(input, wacom_wac->tool[0], prox); 2272 input_report_key(input, wacom_wac->tool[0], prox);
2206 if (wacom_wac->serial[0]) { 2273 if (wacom_wac->serial[0]) {
2207 input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]); 2274 input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]);
2208 input_report_abs(input, ABS_MISC, id); 2275 input_report_abs(input, ABS_MISC, prox ? id : 0);
2209 } 2276 }
2210 2277
2211 wacom_wac->hid_data.tipswitch = false; 2278 wacom_wac->hid_data.tipswitch = false;
@@ -2216,6 +2283,7 @@ static void wacom_wac_pen_report(struct hid_device *hdev,
2216 if (!prox) { 2283 if (!prox) {
2217 wacom_wac->tool[0] = 0; 2284 wacom_wac->tool[0] = 0;
2218 wacom_wac->id[0] = 0; 2285 wacom_wac->id[0] = 0;
2286 wacom_wac->serial[0] = 0;
2219 } 2287 }
2220} 2288}
2221 2289
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 060df71c2e8b..bcbb031f7263 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -936,14 +936,10 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
936 936
937void vmbus_hvsock_device_unregister(struct vmbus_channel *channel) 937void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
938{ 938{
939 mutex_lock(&vmbus_connection.channel_mutex);
940
941 BUG_ON(!is_hvsock_channel(channel)); 939 BUG_ON(!is_hvsock_channel(channel));
942 940
943 channel->rescind = true; 941 channel->rescind = true;
944 vmbus_device_unregister(channel->device_obj); 942 vmbus_device_unregister(channel->device_obj);
945
946 mutex_unlock(&vmbus_connection.channel_mutex);
947} 943}
948EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister); 944EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
949 945
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index daa75bd41f86..2364281d8593 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -170,6 +170,10 @@ static void fcopy_send_data(struct work_struct *dummy)
170 out_src = smsg_out; 170 out_src = smsg_out;
171 break; 171 break;
172 172
173 case WRITE_TO_FILE:
174 out_src = fcopy_transaction.fcopy_msg;
175 out_len = sizeof(struct hv_do_fcopy);
176 break;
173 default: 177 default:
174 out_src = fcopy_transaction.fcopy_msg; 178 out_src = fcopy_transaction.fcopy_msg;
175 out_len = fcopy_transaction.recv_len; 179 out_len = fcopy_transaction.recv_len;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 9c0dbb8191ad..e1be61095532 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -630,7 +630,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
630 sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE, 630 sizeof(struct slimpro_resp_msg) * ASYNC_MSG_FIFO_SIZE,
631 GFP_KERNEL); 631 GFP_KERNEL);
632 if (rc) 632 if (rc)
633 goto out_mbox_free; 633 return -ENOMEM;
634 634
635 INIT_WORK(&ctx->workq, xgene_hwmon_evt_work); 635 INIT_WORK(&ctx->workq, xgene_hwmon_evt_work);
636 636
@@ -646,7 +646,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
646 if (IS_ERR(ctx->mbox_chan)) { 646 if (IS_ERR(ctx->mbox_chan)) {
647 dev_err(&pdev->dev, 647 dev_err(&pdev->dev,
648 "SLIMpro mailbox channel request failed\n"); 648 "SLIMpro mailbox channel request failed\n");
649 return -ENODEV; 649 rc = -ENODEV;
650 goto out_mbox_free;
650 } 651 }
651 } else { 652 } else {
652 struct acpi_pcct_hw_reduced *cppc_ss; 653 struct acpi_pcct_hw_reduced *cppc_ss;
@@ -654,7 +655,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
654 if (device_property_read_u32(&pdev->dev, "pcc-channel", 655 if (device_property_read_u32(&pdev->dev, "pcc-channel",
655 &ctx->mbox_idx)) { 656 &ctx->mbox_idx)) {
656 dev_err(&pdev->dev, "no pcc-channel property\n"); 657 dev_err(&pdev->dev, "no pcc-channel property\n");
657 return -ENODEV; 658 rc = -ENODEV;
659 goto out_mbox_free;
658 } 660 }
659 661
660 cl->rx_callback = xgene_hwmon_pcc_rx_cb; 662 cl->rx_callback = xgene_hwmon_pcc_rx_cb;
@@ -662,7 +664,8 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
662 if (IS_ERR(ctx->mbox_chan)) { 664 if (IS_ERR(ctx->mbox_chan)) {
663 dev_err(&pdev->dev, 665 dev_err(&pdev->dev,
664 "PPC channel request failed\n"); 666 "PPC channel request failed\n");
665 return -ENODEV; 667 rc = -ENODEV;
668 goto out_mbox_free;
666 } 669 }
667 670
668 /* 671 /*
@@ -675,13 +678,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
675 if (!cppc_ss) { 678 if (!cppc_ss) {
676 dev_err(&pdev->dev, "PPC subspace not found\n"); 679 dev_err(&pdev->dev, "PPC subspace not found\n");
677 rc = -ENODEV; 680 rc = -ENODEV;
678 goto out_mbox_free; 681 goto out;
679 } 682 }
680 683
681 if (!ctx->mbox_chan->mbox->txdone_irq) { 684 if (!ctx->mbox_chan->mbox->txdone_irq) {
682 dev_err(&pdev->dev, "PCC IRQ not supported\n"); 685 dev_err(&pdev->dev, "PCC IRQ not supported\n");
683 rc = -ENODEV; 686 rc = -ENODEV;
684 goto out_mbox_free; 687 goto out;
685 } 688 }
686 689
687 /* 690 /*
@@ -696,14 +699,14 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
696 } else { 699 } else {
697 dev_err(&pdev->dev, "Failed to get PCC comm region\n"); 700 dev_err(&pdev->dev, "Failed to get PCC comm region\n");
698 rc = -ENODEV; 701 rc = -ENODEV;
699 goto out_mbox_free; 702 goto out;
700 } 703 }
701 704
702 if (!ctx->pcc_comm_addr) { 705 if (!ctx->pcc_comm_addr) {
703 dev_err(&pdev->dev, 706 dev_err(&pdev->dev,
704 "Failed to ioremap PCC comm region\n"); 707 "Failed to ioremap PCC comm region\n");
705 rc = -ENOMEM; 708 rc = -ENOMEM;
706 goto out_mbox_free; 709 goto out;
707 } 710 }
708 711
709 /* 712 /*
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index bc9cebc30526..c2a2ce8ee541 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -144,6 +144,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
144 .driver_data = (kernel_ulong_t)0, 144 .driver_data = (kernel_ulong_t)0,
145 }, 145 },
146 { 146 {
147 /* Lewisburg PCH */
148 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa1a6),
149 .driver_data = (kernel_ulong_t)0,
150 },
151 {
147 /* Gemini Lake */ 152 /* Gemini Lake */
148 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e), 153 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x318e),
149 .driver_data = (kernel_ulong_t)&intel_th_2x, 154 .driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -158,6 +163,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
158 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6), 163 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
159 .driver_data = (kernel_ulong_t)&intel_th_2x, 164 .driver_data = (kernel_ulong_t)&intel_th_2x,
160 }, 165 },
166 {
167 /* Cedar Fork PCH */
168 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
169 .driver_data = (kernel_ulong_t)&intel_th_2x,
170 },
161 { 0 }, 171 { 0 },
162}; 172};
163 173
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 9414900575d8..f129869e05a9 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -1119,7 +1119,7 @@ void stm_source_unregister_device(struct stm_source_data *data)
1119 1119
1120 stm_source_link_drop(src); 1120 stm_source_link_drop(src);
1121 1121
1122 device_destroy(&stm_source_class, src->dev.devt); 1122 device_unregister(&src->dev);
1123} 1123}
1124EXPORT_SYMBOL_GPL(stm_source_unregister_device); 1124EXPORT_SYMBOL_GPL(stm_source_unregister_device);
1125 1125
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c06dce2c1da7..45a3f3ca29b3 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@ config I2C_I801
131 Gemini Lake (SOC) 131 Gemini Lake (SOC)
132 Cannon Lake-H (PCH) 132 Cannon Lake-H (PCH)
133 Cannon Lake-LP (PCH) 133 Cannon Lake-LP (PCH)
134 Cedar Fork (PCH)
134 135
135 This driver can also be built as a module. If so, the module 136 This driver can also be built as a module. If so, the module
136 will be called i2c-i801. 137 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e114e4e00d29..9e12a53ef7b8 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -68,6 +68,7 @@
68 * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes 68 * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes
69 * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes 69 * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes
70 * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes 70 * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
71 * Cedar Fork (PCH) 0x18df 32 hard yes yes yes
71 * 72 *
72 * Features supported by this driver: 73 * Features supported by this driver:
73 * Software PEC no 74 * Software PEC no
@@ -204,6 +205,7 @@
204 205
205/* Older devices have their ID defined in <linux/pci_ids.h> */ 206/* Older devices have their ID defined in <linux/pci_ids.h> */
206#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 207#define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12
208#define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df
207#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df 209#define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df
208#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 210#define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22
209#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 211#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22
@@ -1025,6 +1027,7 @@ static const struct pci_device_id i801_ids[] = {
1025 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) }, 1027 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
1026 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) }, 1028 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
1027 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, 1029 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
1030 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
1028 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, 1031 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
1029 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, 1032 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
1030 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, 1033 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
@@ -1513,6 +1516,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1513 case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS: 1516 case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
1514 case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS: 1517 case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
1515 case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: 1518 case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
1519 case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
1516 case PCI_DEVICE_ID_INTEL_DNV_SMBUS: 1520 case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
1517 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: 1521 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
1518 priv->features |= FEATURE_I2C_BLOCK_READ; 1522 priv->features |= FEATURE_I2C_BLOCK_READ;
diff --git a/drivers/i2c/busses/i2c-img-scb.c b/drivers/i2c/busses/i2c-img-scb.c
index 84fb35f6837f..eb1d91b986fd 100644
--- a/drivers/i2c/busses/i2c-img-scb.c
+++ b/drivers/i2c/busses/i2c-img-scb.c
@@ -1459,6 +1459,6 @@ static struct platform_driver img_scb_i2c_driver = {
1459}; 1459};
1460module_platform_driver(img_scb_i2c_driver); 1460module_platform_driver(img_scb_i2c_driver);
1461 1461
1462MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>"); 1462MODULE_AUTHOR("James Hogan <jhogan@kernel.org>");
1463MODULE_DESCRIPTION("IMG host I2C driver"); 1463MODULE_DESCRIPTION("IMG host I2C driver");
1464MODULE_LICENSE("GPL v2"); 1464MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 22e08ae1704f..25fcc3c1e32b 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -627,6 +627,7 @@ static const struct dev_pm_ops sprd_i2c_pm_ops = {
627 627
628static const struct of_device_id sprd_i2c_of_match[] = { 628static const struct of_device_id sprd_i2c_of_match[] = {
629 { .compatible = "sprd,sc9860-i2c", }, 629 { .compatible = "sprd,sc9860-i2c", },
630 {},
630}; 631};
631 632
632static struct platform_driver sprd_i2c_driver = { 633static struct platform_driver sprd_i2c_driver = {
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 47c67b0ca896..d4a6e9c2e9aa 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -215,7 +215,7 @@ struct stm32f7_i2c_dev {
215 unsigned int msg_num; 215 unsigned int msg_num;
216 unsigned int msg_id; 216 unsigned int msg_id;
217 struct stm32f7_i2c_msg f7_msg; 217 struct stm32f7_i2c_msg f7_msg;
218 struct stm32f7_i2c_setup *setup; 218 struct stm32f7_i2c_setup setup;
219 struct stm32f7_i2c_timings timing; 219 struct stm32f7_i2c_timings timing;
220}; 220};
221 221
@@ -265,7 +265,7 @@ static struct stm32f7_i2c_spec i2c_specs[] = {
265 }, 265 },
266}; 266};
267 267
268struct stm32f7_i2c_setup stm32f7_setup = { 268static const struct stm32f7_i2c_setup stm32f7_setup = {
269 .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT, 269 .rise_time = STM32F7_I2C_RISE_TIME_DEFAULT,
270 .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT, 270 .fall_time = STM32F7_I2C_FALL_TIME_DEFAULT,
271 .dnf = STM32F7_I2C_DNF_DEFAULT, 271 .dnf = STM32F7_I2C_DNF_DEFAULT,
@@ -537,7 +537,7 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
537 writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR); 537 writel_relaxed(timing, i2c_dev->base + STM32F7_I2C_TIMINGR);
538 538
539 /* Enable I2C */ 539 /* Enable I2C */
540 if (i2c_dev->setup->analog_filter) 540 if (i2c_dev->setup.analog_filter)
541 stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, 541 stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
542 STM32F7_I2C_CR1_ANFOFF); 542 STM32F7_I2C_CR1_ANFOFF);
543 else 543 else
@@ -887,22 +887,19 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
887 } 887 }
888 888
889 setup = of_device_get_match_data(&pdev->dev); 889 setup = of_device_get_match_data(&pdev->dev);
890 i2c_dev->setup->rise_time = setup->rise_time; 890 i2c_dev->setup = *setup;
891 i2c_dev->setup->fall_time = setup->fall_time;
892 i2c_dev->setup->dnf = setup->dnf;
893 i2c_dev->setup->analog_filter = setup->analog_filter;
894 891
895 ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns", 892 ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-rising-time-ns",
896 &rise_time); 893 &rise_time);
897 if (!ret) 894 if (!ret)
898 i2c_dev->setup->rise_time = rise_time; 895 i2c_dev->setup.rise_time = rise_time;
899 896
900 ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns", 897 ret = device_property_read_u32(i2c_dev->dev, "i2c-scl-falling-time-ns",
901 &fall_time); 898 &fall_time);
902 if (!ret) 899 if (!ret)
903 i2c_dev->setup->fall_time = fall_time; 900 i2c_dev->setup.fall_time = fall_time;
904 901
905 ret = stm32f7_i2c_setup_timing(i2c_dev, i2c_dev->setup); 902 ret = stm32f7_i2c_setup_timing(i2c_dev, &i2c_dev->setup);
906 if (ret) 903 if (ret)
907 goto clk_free; 904 goto clk_free;
908 905
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index 01b2adfd8226..eaf39e5db08b 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1451,6 +1451,7 @@ int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
1451 if (hwif_init(hwif) == 0) { 1451 if (hwif_init(hwif) == 0) {
1452 printk(KERN_INFO "%s: failed to initialize IDE " 1452 printk(KERN_INFO "%s: failed to initialize IDE "
1453 "interface\n", hwif->name); 1453 "interface\n", hwif->name);
1454 device_unregister(hwif->portdev);
1454 device_unregister(&hwif->gendev); 1455 device_unregister(&hwif->gendev);
1455 ide_disable_port(hwif); 1456 ide_disable_port(hwif);
1456 continue; 1457 continue;
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index 86aa88aeb3a6..acf874800ca4 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -56,6 +56,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
56{ 56{
57 struct list_head *l; 57 struct list_head *l;
58 struct pci_driver *d; 58 struct pci_driver *d;
59 int ret;
59 60
60 list_for_each(l, &ide_pci_drivers) { 61 list_for_each(l, &ide_pci_drivers) {
61 d = list_entry(l, struct pci_driver, node); 62 d = list_entry(l, struct pci_driver, node);
@@ -63,10 +64,14 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
63 const struct pci_device_id *id = 64 const struct pci_device_id *id =
64 pci_match_id(d->id_table, dev); 65 pci_match_id(d->id_table, dev);
65 66
66 if (id != NULL && d->probe(dev, id) >= 0) { 67 if (id != NULL) {
67 dev->driver = d; 68 pci_assign_irq(dev);
68 pci_dev_get(dev); 69 ret = d->probe(dev, id);
69 return 1; 70 if (ret >= 0) {
71 dev->driver = d;
72 pci_dev_get(dev);
73 return 1;
74 }
70 } 75 }
71 } 76 }
72 } 77 }
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 112d2fe1bcdb..fdc8e813170c 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -179,6 +179,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
179/** 179/**
180 * ide_pci_enable - do PCI enables 180 * ide_pci_enable - do PCI enables
181 * @dev: PCI device 181 * @dev: PCI device
182 * @bars: PCI BARs mask
182 * @d: IDE port info 183 * @d: IDE port info
183 * 184 *
184 * Enable the IDE PCI device. We attempt to enable the device in full 185 * Enable the IDE PCI device. We attempt to enable the device in full
@@ -189,9 +190,10 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
189 * Returns zero on success or an error code 190 * Returns zero on success or an error code
190 */ 191 */
191 192
192static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d) 193static int ide_pci_enable(struct pci_dev *dev, int bars,
194 const struct ide_port_info *d)
193{ 195{
194 int ret, bars; 196 int ret;
195 197
196 if (pci_enable_device(dev)) { 198 if (pci_enable_device(dev)) {
197 ret = pci_enable_device_io(dev); 199 ret = pci_enable_device_io(dev);
@@ -216,18 +218,6 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
216 goto out; 218 goto out;
217 } 219 }
218 220
219 if (d->host_flags & IDE_HFLAG_SINGLE)
220 bars = (1 << 2) - 1;
221 else
222 bars = (1 << 4) - 1;
223
224 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
225 if (d->host_flags & IDE_HFLAG_CS5520)
226 bars |= (1 << 2);
227 else
228 bars |= (1 << 4);
229 }
230
231 ret = pci_request_selected_regions(dev, bars, d->name); 221 ret = pci_request_selected_regions(dev, bars, d->name);
232 if (ret < 0) 222 if (ret < 0)
233 printk(KERN_ERR "%s %s: can't reserve resources\n", 223 printk(KERN_ERR "%s %s: can't reserve resources\n",
@@ -403,6 +393,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
403/** 393/**
404 * ide_setup_pci_controller - set up IDE PCI 394 * ide_setup_pci_controller - set up IDE PCI
405 * @dev: PCI device 395 * @dev: PCI device
396 * @bars: PCI BARs mask
406 * @d: IDE port info 397 * @d: IDE port info
407 * @noisy: verbose flag 398 * @noisy: verbose flag
408 * 399 *
@@ -411,7 +402,7 @@ int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
411 * and enables it if need be 402 * and enables it if need be
412 */ 403 */
413 404
414static int ide_setup_pci_controller(struct pci_dev *dev, 405static int ide_setup_pci_controller(struct pci_dev *dev, int bars,
415 const struct ide_port_info *d, int noisy) 406 const struct ide_port_info *d, int noisy)
416{ 407{
417 int ret; 408 int ret;
@@ -420,7 +411,7 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
420 if (noisy) 411 if (noisy)
421 ide_setup_pci_noise(dev, d); 412 ide_setup_pci_noise(dev, d);
422 413
423 ret = ide_pci_enable(dev, d); 414 ret = ide_pci_enable(dev, bars, d);
424 if (ret < 0) 415 if (ret < 0)
425 goto out; 416 goto out;
426 417
@@ -428,16 +419,20 @@ static int ide_setup_pci_controller(struct pci_dev *dev,
428 if (ret < 0) { 419 if (ret < 0) {
429 printk(KERN_ERR "%s %s: error accessing PCI regs\n", 420 printk(KERN_ERR "%s %s: error accessing PCI regs\n",
430 d->name, pci_name(dev)); 421 d->name, pci_name(dev));
431 goto out; 422 goto out_free_bars;
432 } 423 }
433 if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */ 424 if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
434 ret = ide_pci_configure(dev, d); 425 ret = ide_pci_configure(dev, d);
435 if (ret < 0) 426 if (ret < 0)
436 goto out; 427 goto out_free_bars;
437 printk(KERN_INFO "%s %s: device enabled (Linux)\n", 428 printk(KERN_INFO "%s %s: device enabled (Linux)\n",
438 d->name, pci_name(dev)); 429 d->name, pci_name(dev));
439 } 430 }
440 431
432 goto out;
433
434out_free_bars:
435 pci_release_selected_regions(dev, bars);
441out: 436out:
442 return ret; 437 return ret;
443} 438}
@@ -540,13 +535,28 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
540{ 535{
541 struct pci_dev *pdev[] = { dev1, dev2 }; 536 struct pci_dev *pdev[] = { dev1, dev2 };
542 struct ide_host *host; 537 struct ide_host *host;
543 int ret, i, n_ports = dev2 ? 4 : 2; 538 int ret, i, n_ports = dev2 ? 4 : 2, bars;
544 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL }; 539 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
545 540
541 if (d->host_flags & IDE_HFLAG_SINGLE)
542 bars = (1 << 2) - 1;
543 else
544 bars = (1 << 4) - 1;
545
546 if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
547 if (d->host_flags & IDE_HFLAG_CS5520)
548 bars |= (1 << 2);
549 else
550 bars |= (1 << 4);
551 }
552
546 for (i = 0; i < n_ports / 2; i++) { 553 for (i = 0; i < n_ports / 2; i++) {
547 ret = ide_setup_pci_controller(pdev[i], d, !i); 554 ret = ide_setup_pci_controller(pdev[i], bars, d, !i);
548 if (ret < 0) 555 if (ret < 0) {
556 if (i == 1)
557 pci_release_selected_regions(pdev[0], bars);
549 goto out; 558 goto out;
559 }
550 560
551 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); 561 ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]);
552 } 562 }
@@ -554,7 +564,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
554 host = ide_host_alloc(d, hws, n_ports); 564 host = ide_host_alloc(d, hws, n_ports);
555 if (host == NULL) { 565 if (host == NULL) {
556 ret = -ENOMEM; 566 ret = -ENOMEM;
557 goto out; 567 goto out_free_bars;
558 } 568 }
559 569
560 host->dev[0] = &dev1->dev; 570 host->dev[0] = &dev1->dev;
@@ -576,7 +586,7 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
576 * do_ide_setup_pci_device() on the first device! 586 * do_ide_setup_pci_device() on the first device!
577 */ 587 */
578 if (ret < 0) 588 if (ret < 0)
579 goto out; 589 goto out_free_bars;
580 590
581 /* fixup IRQ */ 591 /* fixup IRQ */
582 if (ide_pci_is_in_compatibility_mode(pdev[i])) { 592 if (ide_pci_is_in_compatibility_mode(pdev[i])) {
@@ -589,6 +599,13 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
589 ret = ide_host_register(host, d, hws); 599 ret = ide_host_register(host, d, hws);
590 if (ret) 600 if (ret)
591 ide_host_free(host); 601 ide_host_free(host);
602 else
603 goto out;
604
605out_free_bars:
606 i = n_ports / 2;
607 while (i--)
608 pci_release_selected_regions(pdev[i], bars);
592out: 609out:
593 return ret; 610 return ret;
594} 611}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index 30825bb9b8e9..8861c052155a 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -100,6 +100,8 @@ int iwpm_register_pid(struct iwpm_dev_data *pm_msg, u8 nl_client)
100 if (ret) 100 if (ret)
101 goto pid_query_error; 101 goto pid_query_error;
102 102
103 nlmsg_end(skb, nlh);
104
103 pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n", 105 pr_debug("%s: Multicasting a nlmsg (dev = %s ifname = %s iwpm = %s)\n",
104 __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name); 106 __func__, pm_msg->dev_name, pm_msg->if_name, iwpm_ulib_name);
105 107
@@ -170,6 +172,8 @@ int iwpm_add_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
170 &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR); 172 &pm_msg->loc_addr, IWPM_NLA_MANAGE_ADDR);
171 if (ret) 173 if (ret)
172 goto add_mapping_error; 174 goto add_mapping_error;
175
176 nlmsg_end(skb, nlh);
173 nlmsg_request->req_buffer = pm_msg; 177 nlmsg_request->req_buffer = pm_msg;
174 178
175 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); 179 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -246,6 +250,8 @@ int iwpm_add_and_query_mapping(struct iwpm_sa_data *pm_msg, u8 nl_client)
246 &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR); 250 &pm_msg->rem_addr, IWPM_NLA_QUERY_REMOTE_ADDR);
247 if (ret) 251 if (ret)
248 goto query_mapping_error; 252 goto query_mapping_error;
253
254 nlmsg_end(skb, nlh);
249 nlmsg_request->req_buffer = pm_msg; 255 nlmsg_request->req_buffer = pm_msg;
250 256
251 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); 257 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
@@ -308,6 +314,8 @@ int iwpm_remove_mapping(struct sockaddr_storage *local_addr, u8 nl_client)
308 if (ret) 314 if (ret)
309 goto remove_mapping_error; 315 goto remove_mapping_error;
310 316
317 nlmsg_end(skb, nlh);
318
311 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid); 319 ret = rdma_nl_unicast_wait(skb, iwpm_user_pid);
312 if (ret) { 320 if (ret) {
313 skb = NULL; /* skb is freed in the netlink send-op handling */ 321 skb = NULL; /* skb is freed in the netlink send-op handling */
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index c81c55942626..3c4faadb8cdd 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -597,6 +597,9 @@ static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
597 &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM); 597 &mapping_num, IWPM_NLA_MAPINFO_SEND_NUM);
598 if (ret) 598 if (ret)
599 goto mapinfo_num_error; 599 goto mapinfo_num_error;
600
601 nlmsg_end(skb, nlh);
602
600 ret = rdma_nl_unicast(skb, iwpm_pid); 603 ret = rdma_nl_unicast(skb, iwpm_pid);
601 if (ret) { 604 if (ret) {
602 skb = NULL; 605 skb = NULL;
@@ -678,6 +681,8 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
678 if (ret) 681 if (ret)
679 goto send_mapping_info_unlock; 682 goto send_mapping_info_unlock;
680 683
684 nlmsg_end(skb, nlh);
685
681 iwpm_print_sockaddr(&map_info->local_sockaddr, 686 iwpm_print_sockaddr(&map_info->local_sockaddr,
682 "send_mapping_info: Local sockaddr:"); 687 "send_mapping_info: Local sockaddr:");
683 iwpm_print_sockaddr(&map_info->mapped_sockaddr, 688 iwpm_print_sockaddr(&map_info->mapped_sockaddr,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 70ad19c4c73e..88bdafb297f5 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -432,8 +432,10 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
432 atomic_set(&qp->qp_sec->error_list_count, 0); 432 atomic_set(&qp->qp_sec->error_list_count, 0);
433 init_completion(&qp->qp_sec->error_complete); 433 init_completion(&qp->qp_sec->error_complete);
434 ret = security_ib_alloc_security(&qp->qp_sec->security); 434 ret = security_ib_alloc_security(&qp->qp_sec->security);
435 if (ret) 435 if (ret) {
436 kfree(qp->qp_sec); 436 kfree(qp->qp_sec);
437 qp->qp_sec = NULL;
438 }
437 439
438 return ret; 440 return ret;
439} 441}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 4ab30d832ac5..52a2cf2d83aa 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3869,15 +3869,15 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3869 resp.raw_packet_caps = attr.raw_packet_caps; 3869 resp.raw_packet_caps = attr.raw_packet_caps;
3870 resp.response_length += sizeof(resp.raw_packet_caps); 3870 resp.response_length += sizeof(resp.raw_packet_caps);
3871 3871
3872 if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps)) 3872 if (ucore->outlen < resp.response_length + sizeof(resp.tm_caps))
3873 goto end; 3873 goto end;
3874 3874
3875 resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size; 3875 resp.tm_caps.max_rndv_hdr_size = attr.tm_caps.max_rndv_hdr_size;
3876 resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags; 3876 resp.tm_caps.max_num_tags = attr.tm_caps.max_num_tags;
3877 resp.xrq_caps.max_ops = attr.xrq_caps.max_ops; 3877 resp.tm_caps.max_ops = attr.tm_caps.max_ops;
3878 resp.xrq_caps.max_sge = attr.xrq_caps.max_sge; 3878 resp.tm_caps.max_sge = attr.tm_caps.max_sge;
3879 resp.xrq_caps.flags = attr.xrq_caps.flags; 3879 resp.tm_caps.flags = attr.tm_caps.flags;
3880 resp.response_length += sizeof(resp.xrq_caps); 3880 resp.response_length += sizeof(resp.tm_caps);
3881end: 3881end:
3882 err = ib_copy_to_udata(ucore, &resp, resp.response_length); 3882 err = ib_copy_to_udata(ucore, &resp, resp.response_length);
3883 return err; 3883 return err;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index ee9e27dc799b..de57d6c11a25 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1646,7 +1646,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1646 */ 1646 */
1647 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { 1647 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
1648 if (attr.qp_state >= IB_QPS_INIT) { 1648 if (attr.qp_state >= IB_QPS_INIT) {
1649 if (qp->device->get_link_layer(qp->device, attr.port_num) != 1649 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
1650 IB_LINK_LAYER_INFINIBAND) 1650 IB_LINK_LAYER_INFINIBAND)
1651 return true; 1651 return true;
1652 goto lid_check; 1652 goto lid_check;
@@ -1655,7 +1655,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1655 1655
1656 /* Can't get a quick answer, iterate over all ports */ 1656 /* Can't get a quick answer, iterate over all ports */
1657 for (port = 0; port < qp->device->phys_port_cnt; port++) 1657 for (port = 0; port < qp->device->phys_port_cnt; port++)
1658 if (qp->device->get_link_layer(qp->device, port) != 1658 if (rdma_port_get_link_layer(qp->device, port) !=
1659 IB_LINK_LAYER_INFINIBAND) 1659 IB_LINK_LAYER_INFINIBAND)
1660 num_eth_ports++; 1660 num_eth_ports++;
1661 1661
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index b3ad37fec578..ecbac91b2e14 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -93,11 +93,13 @@ struct bnxt_re_dev {
93 struct ib_device ibdev; 93 struct ib_device ibdev;
94 struct list_head list; 94 struct list_head list;
95 unsigned long flags; 95 unsigned long flags;
96#define BNXT_RE_FLAG_NETDEV_REGISTERED 0 96#define BNXT_RE_FLAG_NETDEV_REGISTERED 0
97#define BNXT_RE_FLAG_IBDEV_REGISTERED 1 97#define BNXT_RE_FLAG_IBDEV_REGISTERED 1
98#define BNXT_RE_FLAG_GOT_MSIX 2 98#define BNXT_RE_FLAG_GOT_MSIX 2
99#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 8 99#define BNXT_RE_FLAG_HAVE_L2_REF 3
100#define BNXT_RE_FLAG_QOS_WORK_REG 16 100#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
101#define BNXT_RE_FLAG_QOS_WORK_REG 5
102#define BNXT_RE_FLAG_TASK_IN_PROG 6
101 struct net_device *netdev; 103 struct net_device *netdev;
102 unsigned int version, major, minor; 104 unsigned int version, major, minor;
103 struct bnxt_en_dev *en_dev; 105 struct bnxt_en_dev *en_dev;
@@ -108,6 +110,8 @@ struct bnxt_re_dev {
108 110
109 struct delayed_work worker; 111 struct delayed_work worker;
110 u8 cur_prio_map; 112 u8 cur_prio_map;
113 u8 active_speed;
114 u8 active_width;
111 115
112 /* FP Notification Queue (CQ & SRQ) */ 116 /* FP Notification Queue (CQ & SRQ) */
113 struct tasklet_struct nq_task; 117 struct tasklet_struct nq_task;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 01eee15bbd65..0d89621d9fe8 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -259,14 +259,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u8 port_num,
259 port_attr->sm_sl = 0; 259 port_attr->sm_sl = 0;
260 port_attr->subnet_timeout = 0; 260 port_attr->subnet_timeout = 0;
261 port_attr->init_type_reply = 0; 261 port_attr->init_type_reply = 0;
262 /* call the underlying netdev's ethtool hooks to query speed settings 262 port_attr->active_speed = rdev->active_speed;
263 * for which we acquire rtnl_lock _only_ if it's registered with 263 port_attr->active_width = rdev->active_width;
264 * IB stack to avoid race in the NETDEV_UNREG path 264
265 */
266 if (test_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags))
267 if (ib_get_eth_speed(ibdev, port_num, &port_attr->active_speed,
268 &port_attr->active_width))
269 return -EINVAL;
270 return 0; 265 return 0;
271} 266}
272 267
@@ -319,6 +314,7 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
319 struct bnxt_re_gid_ctx *ctx, **ctx_tbl; 314 struct bnxt_re_gid_ctx *ctx, **ctx_tbl;
320 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); 315 struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
321 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; 316 struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl;
317 struct bnxt_qplib_gid *gid_to_del;
322 318
323 /* Delete the entry from the hardware */ 319 /* Delete the entry from the hardware */
324 ctx = *context; 320 ctx = *context;
@@ -328,11 +324,25 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num,
328 if (sgid_tbl && sgid_tbl->active) { 324 if (sgid_tbl && sgid_tbl->active) {
329 if (ctx->idx >= sgid_tbl->max) 325 if (ctx->idx >= sgid_tbl->max)
330 return -EINVAL; 326 return -EINVAL;
327 gid_to_del = &sgid_tbl->tbl[ctx->idx];
328 /* DEL_GID is called in WQ context(netdevice_event_work_handler)
329 * or via the ib_unregister_device path. In the former case QP1
330 * may not be destroyed yet, in which case just return as FW
331 * needs that entry to be present and will fail it's deletion.
332 * We could get invoked again after QP1 is destroyed OR get an
333 * ADD_GID call with a different GID value for the same index
334 * where we issue MODIFY_GID cmd to update the GID entry -- TBD
335 */
336 if (ctx->idx == 0 &&
337 rdma_link_local_addr((struct in6_addr *)gid_to_del) &&
338 ctx->refcnt == 1 && rdev->qp1_sqp) {
339 dev_dbg(rdev_to_dev(rdev),
340 "Trying to delete GID0 while QP1 is alive\n");
341 return -EFAULT;
342 }
331 ctx->refcnt--; 343 ctx->refcnt--;
332 if (!ctx->refcnt) { 344 if (!ctx->refcnt) {
333 rc = bnxt_qplib_del_sgid(sgid_tbl, 345 rc = bnxt_qplib_del_sgid(sgid_tbl, gid_to_del, true);
334 &sgid_tbl->tbl[ctx->idx],
335 true);
336 if (rc) { 346 if (rc) {
337 dev_err(rdev_to_dev(rdev), 347 dev_err(rdev_to_dev(rdev),
338 "Failed to remove GID: %#x", rc); 348 "Failed to remove GID: %#x", rc);
@@ -816,6 +826,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
816 826
817 kfree(rdev->sqp_ah); 827 kfree(rdev->sqp_ah);
818 kfree(rdev->qp1_sqp); 828 kfree(rdev->qp1_sqp);
829 rdev->qp1_sqp = NULL;
830 rdev->sqp_ah = NULL;
819 } 831 }
820 832
821 if (!IS_ERR_OR_NULL(qp->rumem)) 833 if (!IS_ERR_OR_NULL(qp->rumem))
@@ -1436,11 +1448,14 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1436 qp->qplib_qp.modify_flags |= 1448 qp->qplib_qp.modify_flags |=
1437 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 1449 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1438 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu); 1450 qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
1451 qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
1439 } else if (qp_attr->qp_state == IB_QPS_RTR) { 1452 } else if (qp_attr->qp_state == IB_QPS_RTR) {
1440 qp->qplib_qp.modify_flags |= 1453 qp->qplib_qp.modify_flags |=
1441 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU; 1454 CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
1442 qp->qplib_qp.path_mtu = 1455 qp->qplib_qp.path_mtu =
1443 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu)); 1456 __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
1457 qp->qplib_qp.mtu =
1458 ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
1444 } 1459 }
1445 1460
1446 if (qp_attr_mask & IB_QP_TIMEOUT) { 1461 if (qp_attr_mask & IB_QP_TIMEOUT) {
@@ -1551,43 +1566,46 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1551{ 1566{
1552 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); 1567 struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
1553 struct bnxt_re_dev *rdev = qp->rdev; 1568 struct bnxt_re_dev *rdev = qp->rdev;
1554 struct bnxt_qplib_qp qplib_qp; 1569 struct bnxt_qplib_qp *qplib_qp;
1555 int rc; 1570 int rc;
1556 1571
1557 memset(&qplib_qp, 0, sizeof(struct bnxt_qplib_qp)); 1572 qplib_qp = kzalloc(sizeof(*qplib_qp), GFP_KERNEL);
1558 qplib_qp.id = qp->qplib_qp.id; 1573 if (!qplib_qp)
1559 qplib_qp.ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index; 1574 return -ENOMEM;
1575
1576 qplib_qp->id = qp->qplib_qp.id;
1577 qplib_qp->ah.host_sgid_index = qp->qplib_qp.ah.host_sgid_index;
1560 1578
1561 rc = bnxt_qplib_query_qp(&rdev->qplib_res, &qplib_qp); 1579 rc = bnxt_qplib_query_qp(&rdev->qplib_res, qplib_qp);
1562 if (rc) { 1580 if (rc) {
1563 dev_err(rdev_to_dev(rdev), "Failed to query HW QP"); 1581 dev_err(rdev_to_dev(rdev), "Failed to query HW QP");
1564 return rc; 1582 goto out;
1565 } 1583 }
1566 qp_attr->qp_state = __to_ib_qp_state(qplib_qp.state); 1584 qp_attr->qp_state = __to_ib_qp_state(qplib_qp->state);
1567 qp_attr->en_sqd_async_notify = qplib_qp.en_sqd_async_notify ? 1 : 0; 1585 qp_attr->en_sqd_async_notify = qplib_qp->en_sqd_async_notify ? 1 : 0;
1568 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp.access); 1586 qp_attr->qp_access_flags = __to_ib_access_flags(qplib_qp->access);
1569 qp_attr->pkey_index = qplib_qp.pkey_index; 1587 qp_attr->pkey_index = qplib_qp->pkey_index;
1570 qp_attr->qkey = qplib_qp.qkey; 1588 qp_attr->qkey = qplib_qp->qkey;
1571 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 1589 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1572 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp.ah.flow_label, 1590 rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label,
1573 qplib_qp.ah.host_sgid_index, 1591 qplib_qp->ah.host_sgid_index,
1574 qplib_qp.ah.hop_limit, 1592 qplib_qp->ah.hop_limit,
1575 qplib_qp.ah.traffic_class); 1593 qplib_qp->ah.traffic_class);
1576 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp.ah.dgid.data); 1594 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, qplib_qp->ah.dgid.data);
1577 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp.ah.sl); 1595 rdma_ah_set_sl(&qp_attr->ah_attr, qplib_qp->ah.sl);
1578 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp.ah.dmac); 1596 ether_addr_copy(qp_attr->ah_attr.roce.dmac, qplib_qp->ah.dmac);
1579 qp_attr->path_mtu = __to_ib_mtu(qplib_qp.path_mtu); 1597 qp_attr->path_mtu = __to_ib_mtu(qplib_qp->path_mtu);
1580 qp_attr->timeout = qplib_qp.timeout; 1598 qp_attr->timeout = qplib_qp->timeout;
1581 qp_attr->retry_cnt = qplib_qp.retry_cnt; 1599 qp_attr->retry_cnt = qplib_qp->retry_cnt;
1582 qp_attr->rnr_retry = qplib_qp.rnr_retry; 1600 qp_attr->rnr_retry = qplib_qp->rnr_retry;
1583 qp_attr->min_rnr_timer = qplib_qp.min_rnr_timer; 1601 qp_attr->min_rnr_timer = qplib_qp->min_rnr_timer;
1584 qp_attr->rq_psn = qplib_qp.rq.psn; 1602 qp_attr->rq_psn = qplib_qp->rq.psn;
1585 qp_attr->max_rd_atomic = qplib_qp.max_rd_atomic; 1603 qp_attr->max_rd_atomic = qplib_qp->max_rd_atomic;
1586 qp_attr->sq_psn = qplib_qp.sq.psn; 1604 qp_attr->sq_psn = qplib_qp->sq.psn;
1587 qp_attr->max_dest_rd_atomic = qplib_qp.max_dest_rd_atomic; 1605 qp_attr->max_dest_rd_atomic = qplib_qp->max_dest_rd_atomic;
1588 qp_init_attr->sq_sig_type = qplib_qp.sig_type ? IB_SIGNAL_ALL_WR : 1606 qp_init_attr->sq_sig_type = qplib_qp->sig_type ? IB_SIGNAL_ALL_WR :
1589 IB_SIGNAL_REQ_WR; 1607 IB_SIGNAL_REQ_WR;
1590 qp_attr->dest_qp_num = qplib_qp.dest_qpn; 1608 qp_attr->dest_qp_num = qplib_qp->dest_qpn;
1591 1609
1592 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; 1610 qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe;
1593 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; 1611 qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge;
@@ -1596,7 +1614,9 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
1596 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data; 1614 qp_attr->cap.max_inline_data = qp->qplib_qp.max_inline_data;
1597 qp_init_attr->cap = qp_attr->cap; 1615 qp_init_attr->cap = qp_attr->cap;
1598 1616
1599 return 0; 1617out:
1618 kfree(qplib_qp);
1619 return rc;
1600} 1620}
1601 1621
1602/* Routine for sending QP1 packets for RoCE V1 an V2 1622/* Routine for sending QP1 packets for RoCE V1 an V2
@@ -1908,6 +1928,7 @@ static int bnxt_re_build_atomic_wqe(struct ib_send_wr *wr,
1908 switch (wr->opcode) { 1928 switch (wr->opcode) {
1909 case IB_WR_ATOMIC_CMP_AND_SWP: 1929 case IB_WR_ATOMIC_CMP_AND_SWP:
1910 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP; 1930 wqe->type = BNXT_QPLIB_SWQE_TYPE_ATOMIC_CMP_AND_SWP;
1931 wqe->atomic.cmp_data = atomic_wr(wr)->compare_add;
1911 wqe->atomic.swap_data = atomic_wr(wr)->swap; 1932 wqe->atomic.swap_data = atomic_wr(wr)->swap;
1912 break; 1933 break;
1913 case IB_WR_ATOMIC_FETCH_AND_ADD: 1934 case IB_WR_ATOMIC_FETCH_AND_ADD:
@@ -3062,7 +3083,7 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
3062 return rc; 3083 return rc;
3063 } 3084 }
3064 3085
3065 if (mr->npages && mr->pages) { 3086 if (mr->pages) {
3066 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, 3087 rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
3067 &mr->qplib_frpl); 3088 &mr->qplib_frpl);
3068 kfree(mr->pages); 3089 kfree(mr->pages);
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 82d1cbc27aee..e7450ea92aa9 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -1161,6 +1161,8 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
1161 } 1161 }
1162 } 1162 }
1163 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags); 1163 set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
1164 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1165 &rdev->active_width);
1164 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE); 1166 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_PORT_ACTIVE);
1165 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE); 1167 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, IB_EVENT_GID_CHANGE);
1166 1168
@@ -1255,10 +1257,14 @@ static void bnxt_re_task(struct work_struct *work)
1255 else if (netif_carrier_ok(rdev->netdev)) 1257 else if (netif_carrier_ok(rdev->netdev))
1256 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1, 1258 bnxt_re_dispatch_event(&rdev->ibdev, NULL, 1,
1257 IB_EVENT_PORT_ACTIVE); 1259 IB_EVENT_PORT_ACTIVE);
1260 ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
1261 &rdev->active_width);
1258 break; 1262 break;
1259 default: 1263 default:
1260 break; 1264 break;
1261 } 1265 }
1266 smp_mb__before_atomic();
1267 clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
1262 kfree(re_work); 1268 kfree(re_work);
1263} 1269}
1264 1270
@@ -1317,6 +1323,11 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
1317 break; 1323 break;
1318 1324
1319 case NETDEV_UNREGISTER: 1325 case NETDEV_UNREGISTER:
1326 /* netdev notifier will call NETDEV_UNREGISTER again later since
1327 * we are still holding the reference to the netdev
1328 */
1329 if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags))
1330 goto exit;
1320 bnxt_re_ib_unreg(rdev, false); 1331 bnxt_re_ib_unreg(rdev, false);
1321 bnxt_re_remove_one(rdev); 1332 bnxt_re_remove_one(rdev);
1322 bnxt_re_dev_unreg(rdev); 1333 bnxt_re_dev_unreg(rdev);
@@ -1335,6 +1346,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
1335 re_work->vlan_dev = (real_dev == netdev ? 1346 re_work->vlan_dev = (real_dev == netdev ?
1336 NULL : netdev); 1347 NULL : netdev);
1337 INIT_WORK(&re_work->work, bnxt_re_task); 1348 INIT_WORK(&re_work->work, bnxt_re_task);
1349 set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
1338 queue_work(bnxt_re_wq, &re_work->work); 1350 queue_work(bnxt_re_wq, &re_work->work);
1339 } 1351 }
1340 } 1352 }
@@ -1375,6 +1387,22 @@ err_netdev:
1375 1387
1376static void __exit bnxt_re_mod_exit(void) 1388static void __exit bnxt_re_mod_exit(void)
1377{ 1389{
1390 struct bnxt_re_dev *rdev;
1391 LIST_HEAD(to_be_deleted);
1392
1393 mutex_lock(&bnxt_re_dev_lock);
1394 /* Free all adapter allocated resources */
1395 if (!list_empty(&bnxt_re_dev_list))
1396 list_splice_init(&bnxt_re_dev_list, &to_be_deleted);
1397 mutex_unlock(&bnxt_re_dev_lock);
1398
1399 list_for_each_entry(rdev, &to_be_deleted, list) {
1400 dev_info(rdev_to_dev(rdev), "Unregistering Device");
1401 bnxt_re_dev_stop(rdev);
1402 bnxt_re_ib_unreg(rdev, true);
1403 bnxt_re_remove_one(rdev);
1404 bnxt_re_dev_unreg(rdev);
1405 }
1378 unregister_netdevice_notifier(&bnxt_re_netdev_notifier); 1406 unregister_netdevice_notifier(&bnxt_re_netdev_notifier);
1379 if (bnxt_re_wq) 1407 if (bnxt_re_wq)
1380 destroy_workqueue(bnxt_re_wq); 1408 destroy_workqueue(bnxt_re_wq);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 391bb7006e8f..2bdb1562bd21 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -107,6 +107,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
107 return -EINVAL; 107 return -EINVAL;
108 } 108 }
109 109
110 if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
111 return -ETIMEDOUT;
112
110 /* Cmdq are in 16-byte units, each request can consume 1 or more 113 /* Cmdq are in 16-byte units, each request can consume 1 or more
111 * cmdqe 114 * cmdqe
112 */ 115 */
@@ -226,6 +229,7 @@ int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
226 /* timed out */ 229 /* timed out */
227 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec", 230 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
228 cookie, opcode, RCFW_CMD_WAIT_TIME_MS); 231 cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
232 set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
229 return rc; 233 return rc;
230 } 234 }
231 235
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 0ed312f17c8d..85b16da287f9 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -162,8 +162,9 @@ struct bnxt_qplib_rcfw {
162 unsigned long *cmdq_bitmap; 162 unsigned long *cmdq_bitmap;
163 u32 bmap_size; 163 u32 bmap_size;
164 unsigned long flags; 164 unsigned long flags;
165#define FIRMWARE_INITIALIZED_FLAG 1 165#define FIRMWARE_INITIALIZED_FLAG BIT(0)
166#define FIRMWARE_FIRST_FLAG BIT(31) 166#define FIRMWARE_FIRST_FLAG BIT(31)
167#define FIRMWARE_TIMED_OUT BIT(3)
167 wait_queue_head_t waitq; 168 wait_queue_head_t waitq;
168 int (*aeq_handler)(struct bnxt_qplib_rcfw *, 169 int (*aeq_handler)(struct bnxt_qplib_rcfw *,
169 struct creq_func_event *); 170 struct creq_func_event *);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index ceaa2fa54d32..daf7a56e5d7e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2333,9 +2333,14 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2333 unsigned int stid = GET_TID(rpl); 2333 unsigned int stid = GET_TID(rpl);
2334 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2334 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2335 2335
2336 if (!ep) {
2337 pr_debug("%s stid %d lookup failure!\n", __func__, stid);
2338 goto out;
2339 }
2336 pr_debug("%s ep %p\n", __func__, ep); 2340 pr_debug("%s ep %p\n", __func__, ep);
2337 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2341 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2338 c4iw_put_ep(&ep->com); 2342 c4iw_put_ep(&ep->com);
2343out:
2339 return 0; 2344 return 0;
2340} 2345}
2341 2346
@@ -2594,9 +2599,9 @@ fail:
2594 c4iw_put_ep(&child_ep->com); 2599 c4iw_put_ep(&child_ep->com);
2595reject: 2600reject:
2596 reject_cr(dev, hwtid, skb); 2601 reject_cr(dev, hwtid, skb);
2602out:
2597 if (parent_ep) 2603 if (parent_ep)
2598 c4iw_put_ep(&parent_ep->com); 2604 c4iw_put_ep(&parent_ep->com);
2599out:
2600 return 0; 2605 return 0;
2601} 2606}
2602 2607
@@ -3457,7 +3462,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3457 cm_id->provider_data = ep; 3462 cm_id->provider_data = ep;
3458 goto out; 3463 goto out;
3459 } 3464 }
3460 3465 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
3461 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3466 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3462 ep->com.local_addr.ss_family); 3467 ep->com.local_addr.ss_family);
3463fail2: 3468fail2:
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index b2ed4b9cda6e..0be42787759f 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1066,6 +1066,8 @@ static int read_idle_sma(struct hfi1_devdata *dd, u64 *data);
1066static int thermal_init(struct hfi1_devdata *dd); 1066static int thermal_init(struct hfi1_devdata *dd);
1067 1067
1068static void update_statusp(struct hfi1_pportdata *ppd, u32 state); 1068static void update_statusp(struct hfi1_pportdata *ppd, u32 state);
1069static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
1070 int msecs);
1069static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state, 1071static int wait_logical_linkstate(struct hfi1_pportdata *ppd, u32 state,
1070 int msecs); 1072 int msecs);
1071static void log_state_transition(struct hfi1_pportdata *ppd, u32 state); 1073static void log_state_transition(struct hfi1_pportdata *ppd, u32 state);
@@ -8238,6 +8240,7 @@ static irqreturn_t general_interrupt(int irq, void *data)
8238 u64 regs[CCE_NUM_INT_CSRS]; 8240 u64 regs[CCE_NUM_INT_CSRS];
8239 u32 bit; 8241 u32 bit;
8240 int i; 8242 int i;
8243 irqreturn_t handled = IRQ_NONE;
8241 8244
8242 this_cpu_inc(*dd->int_counter); 8245 this_cpu_inc(*dd->int_counter);
8243 8246
@@ -8258,9 +8261,10 @@ static irqreturn_t general_interrupt(int irq, void *data)
8258 for_each_set_bit(bit, (unsigned long *)&regs[0], 8261 for_each_set_bit(bit, (unsigned long *)&regs[0],
8259 CCE_NUM_INT_CSRS * 64) { 8262 CCE_NUM_INT_CSRS * 64) {
8260 is_interrupt(dd, bit); 8263 is_interrupt(dd, bit);
8264 handled = IRQ_HANDLED;
8261 } 8265 }
8262 8266
8263 return IRQ_HANDLED; 8267 return handled;
8264} 8268}
8265 8269
8266static irqreturn_t sdma_interrupt(int irq, void *data) 8270static irqreturn_t sdma_interrupt(int irq, void *data)
@@ -9413,7 +9417,7 @@ static void set_qsfp_int_n(struct hfi1_pportdata *ppd, u8 enable)
9413 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask); 9417 write_csr(dd, dd->hfi1_id ? ASIC_QSFP2_MASK : ASIC_QSFP1_MASK, mask);
9414} 9418}
9415 9419
9416void reset_qsfp(struct hfi1_pportdata *ppd) 9420int reset_qsfp(struct hfi1_pportdata *ppd)
9417{ 9421{
9418 struct hfi1_devdata *dd = ppd->dd; 9422 struct hfi1_devdata *dd = ppd->dd;
9419 u64 mask, qsfp_mask; 9423 u64 mask, qsfp_mask;
@@ -9443,6 +9447,13 @@ void reset_qsfp(struct hfi1_pportdata *ppd)
9443 * for alarms and warnings 9447 * for alarms and warnings
9444 */ 9448 */
9445 set_qsfp_int_n(ppd, 1); 9449 set_qsfp_int_n(ppd, 1);
9450
9451 /*
9452 * After the reset, AOC transmitters are enabled by default. They need
9453 * to be turned off to complete the QSFP setup before they can be
9454 * enabled again.
9455 */
9456 return set_qsfp_tx(ppd, 0);
9446} 9457}
9447 9458
9448static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd, 9459static int handle_qsfp_error_conditions(struct hfi1_pportdata *ppd,
@@ -10305,6 +10316,7 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10305{ 10316{
10306 struct hfi1_devdata *dd = ppd->dd; 10317 struct hfi1_devdata *dd = ppd->dd;
10307 u32 previous_state; 10318 u32 previous_state;
10319 int offline_state_ret;
10308 int ret; 10320 int ret;
10309 10321
10310 update_lcb_cache(dd); 10322 update_lcb_cache(dd);
@@ -10326,28 +10338,11 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10326 ppd->offline_disabled_reason = 10338 ppd->offline_disabled_reason =
10327 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT); 10339 HFI1_ODR_MASK(OPA_LINKDOWN_REASON_TRANSIENT);
10328 10340
10329 /* 10341 offline_state_ret = wait_phys_link_offline_substates(ppd, 10000);
10330 * Wait for offline transition. It can take a while for 10342 if (offline_state_ret < 0)
10331 * the link to go down. 10343 return offline_state_ret;
10332 */
10333 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 10000);
10334 if (ret < 0)
10335 return ret;
10336
10337 /*
10338 * Now in charge of LCB - must be after the physical state is
10339 * offline.quiet and before host_link_state is changed.
10340 */
10341 set_host_lcb_access(dd);
10342 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10343
10344 /* make sure the logical state is also down */
10345 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10346 if (ret)
10347 force_logical_link_state_down(ppd);
10348
10349 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10350 10344
10345 /* Disabling AOC transmitters */
10351 if (ppd->port_type == PORT_TYPE_QSFP && 10346 if (ppd->port_type == PORT_TYPE_QSFP &&
10352 ppd->qsfp_info.limiting_active && 10347 ppd->qsfp_info.limiting_active &&
10353 qsfp_mod_present(ppd)) { 10348 qsfp_mod_present(ppd)) {
@@ -10365,6 +10360,30 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10365 } 10360 }
10366 10361
10367 /* 10362 /*
10363 * Wait for the offline.Quiet transition if it hasn't happened yet. It
10364 * can take a while for the link to go down.
10365 */
10366 if (offline_state_ret != PLS_OFFLINE_QUIET) {
10367 ret = wait_physical_linkstate(ppd, PLS_OFFLINE, 30000);
10368 if (ret < 0)
10369 return ret;
10370 }
10371
10372 /*
10373 * Now in charge of LCB - must be after the physical state is
10374 * offline.quiet and before host_link_state is changed.
10375 */
10376 set_host_lcb_access(dd);
10377 write_csr(dd, DC_LCB_ERR_EN, ~0ull); /* watch LCB errors */
10378
10379 /* make sure the logical state is also down */
10380 ret = wait_logical_linkstate(ppd, IB_PORT_DOWN, 1000);
10381 if (ret)
10382 force_logical_link_state_down(ppd);
10383
10384 ppd->host_link_state = HLS_LINK_COOLDOWN; /* LCB access allowed */
10385
10386 /*
10368 * The LNI has a mandatory wait time after the physical state 10387 * The LNI has a mandatory wait time after the physical state
10369 * moves to Offline.Quiet. The wait time may be different 10388 * moves to Offline.Quiet. The wait time may be different
10370 * depending on how the link went down. The 8051 firmware 10389 * depending on how the link went down. The 8051 firmware
@@ -10396,6 +10415,9 @@ static int goto_offline(struct hfi1_pportdata *ppd, u8 rem_reason)
10396 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) { 10415 & (HLS_DN_POLL | HLS_VERIFY_CAP | HLS_GOING_UP)) {
10397 /* went down while attempting link up */ 10416 /* went down while attempting link up */
10398 check_lni_states(ppd); 10417 check_lni_states(ppd);
10418
10419 /* The QSFP doesn't need to be reset on LNI failure */
10420 ppd->qsfp_info.reset_needed = 0;
10399 } 10421 }
10400 10422
10401 /* the active link width (downgrade) is 0 on link down */ 10423 /* the active link width (downgrade) is 0 on link down */
@@ -12804,6 +12826,39 @@ static int wait_physical_linkstate(struct hfi1_pportdata *ppd, u32 state,
12804 return 0; 12826 return 0;
12805} 12827}
12806 12828
12829/*
12830 * wait_phys_link_offline_quiet_substates - wait for any offline substate
12831 * @ppd: port device
12832 * @msecs: the number of milliseconds to wait
12833 *
12834 * Wait up to msecs milliseconds for any offline physical link
12835 * state change to occur.
12836 * Returns 0 if at least one state is reached, otherwise -ETIMEDOUT.
12837 */
12838static int wait_phys_link_offline_substates(struct hfi1_pportdata *ppd,
12839 int msecs)
12840{
12841 u32 read_state;
12842 unsigned long timeout;
12843
12844 timeout = jiffies + msecs_to_jiffies(msecs);
12845 while (1) {
12846 read_state = read_physical_state(ppd->dd);
12847 if ((read_state & 0xF0) == PLS_OFFLINE)
12848 break;
12849 if (time_after(jiffies, timeout)) {
12850 dd_dev_err(ppd->dd,
12851 "timeout waiting for phy link offline.quiet substates. Read state 0x%x, %dms\n",
12852 read_state, msecs);
12853 return -ETIMEDOUT;
12854 }
12855 usleep_range(1950, 2050); /* sleep 2ms-ish */
12856 }
12857
12858 log_state_transition(ppd, read_state);
12859 return read_state;
12860}
12861
12807#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ 12862#define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \
12808(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) 12863(r &= ~SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK)
12809 12864
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h
index b8345a60a0fb..50b8645d0b87 100644
--- a/drivers/infiniband/hw/hfi1/chip.h
+++ b/drivers/infiniband/hw/hfi1/chip.h
@@ -204,6 +204,7 @@
204#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92 204#define PLS_OFFLINE_READY_TO_QUIET_LT 0x92
205#define PLS_OFFLINE_REPORT_FAILURE 0x93 205#define PLS_OFFLINE_REPORT_FAILURE 0x93
206#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94 206#define PLS_OFFLINE_READY_TO_QUIET_BCC 0x94
207#define PLS_OFFLINE_QUIET_DURATION 0x95
207#define PLS_POLLING 0x20 208#define PLS_POLLING 0x20
208#define PLS_POLLING_QUIET 0x20 209#define PLS_POLLING_QUIET 0x20
209#define PLS_POLLING_ACTIVE 0x21 210#define PLS_POLLING_ACTIVE 0x21
@@ -722,7 +723,7 @@ void handle_link_downgrade(struct work_struct *work);
722void handle_link_bounce(struct work_struct *work); 723void handle_link_bounce(struct work_struct *work);
723void handle_start_link(struct work_struct *work); 724void handle_start_link(struct work_struct *work);
724void handle_sma_message(struct work_struct *work); 725void handle_sma_message(struct work_struct *work);
725void reset_qsfp(struct hfi1_pportdata *ppd); 726int reset_qsfp(struct hfi1_pportdata *ppd);
726void qsfp_event(struct work_struct *work); 727void qsfp_event(struct work_struct *work);
727void start_freeze_handling(struct hfi1_pportdata *ppd, int flags); 728void start_freeze_handling(struct hfi1_pportdata *ppd, int flags);
728int send_idle_sma(struct hfi1_devdata *dd, u64 message); 729int send_idle_sma(struct hfi1_devdata *dd, u64 message);
diff --git a/drivers/infiniband/hw/hfi1/eprom.c b/drivers/infiniband/hw/hfi1/eprom.c
index d46b17107901..1613af1c58d9 100644
--- a/drivers/infiniband/hw/hfi1/eprom.c
+++ b/drivers/infiniband/hw/hfi1/eprom.c
@@ -204,7 +204,10 @@ done_asic:
204 return ret; 204 return ret;
205} 205}
206 206
207/* magic character sequence that trails an image */ 207/* magic character sequence that begins an image */
208#define IMAGE_START_MAGIC "APO="
209
210/* magic character sequence that might trail an image */
208#define IMAGE_TRAIL_MAGIC "egamiAPO" 211#define IMAGE_TRAIL_MAGIC "egamiAPO"
209 212
210/* EPROM file types */ 213/* EPROM file types */
@@ -250,6 +253,7 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
250{ 253{
251 void *buffer; 254 void *buffer;
252 void *p; 255 void *p;
256 u32 length;
253 int ret; 257 int ret;
254 258
255 buffer = kmalloc(P1_SIZE, GFP_KERNEL); 259 buffer = kmalloc(P1_SIZE, GFP_KERNEL);
@@ -262,15 +266,21 @@ static int read_partition_platform_config(struct hfi1_devdata *dd, void **data,
262 return ret; 266 return ret;
263 } 267 }
264 268
265 /* scan for image magic that may trail the actual data */ 269 /* config partition is valid only if it starts with IMAGE_START_MAGIC */
266 p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE); 270 if (memcmp(buffer, IMAGE_START_MAGIC, strlen(IMAGE_START_MAGIC))) {
267 if (!p) {
268 kfree(buffer); 271 kfree(buffer);
269 return -ENOENT; 272 return -ENOENT;
270 } 273 }
271 274
275 /* scan for image magic that may trail the actual data */
276 p = strnstr(buffer, IMAGE_TRAIL_MAGIC, P1_SIZE);
277 if (p)
278 length = p - buffer;
279 else
280 length = P1_SIZE;
281
272 *data = buffer; 282 *data = buffer;
273 *size = p - buffer; 283 *size = length;
274 return 0; 284 return 0;
275} 285}
276 286
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index 2bc89260235a..d9a1e9893136 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -930,15 +930,8 @@ static int assign_ctxt(struct hfi1_filedata *fd, struct hfi1_user_info *uinfo)
930 switch (ret) { 930 switch (ret) {
931 case 0: 931 case 0:
932 ret = setup_base_ctxt(fd, uctxt); 932 ret = setup_base_ctxt(fd, uctxt);
933 if (uctxt->subctxt_cnt) { 933 if (ret)
934 /* 934 deallocate_ctxt(uctxt);
935 * Base context is done (successfully or not), notify
936 * anybody using a sub-context that is waiting for
937 * this completion.
938 */
939 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
940 wake_up(&uctxt->wait);
941 }
942 break; 935 break;
943 case 1: 936 case 1:
944 ret = complete_subctxt(fd); 937 ret = complete_subctxt(fd);
@@ -1305,25 +1298,25 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
1305 /* Now allocate the RcvHdr queue and eager buffers. */ 1298 /* Now allocate the RcvHdr queue and eager buffers. */
1306 ret = hfi1_create_rcvhdrq(dd, uctxt); 1299 ret = hfi1_create_rcvhdrq(dd, uctxt);
1307 if (ret) 1300 if (ret)
1308 return ret; 1301 goto done;
1309 1302
1310 ret = hfi1_setup_eagerbufs(uctxt); 1303 ret = hfi1_setup_eagerbufs(uctxt);
1311 if (ret) 1304 if (ret)
1312 goto setup_failed; 1305 goto done;
1313 1306
1314 /* If sub-contexts are enabled, do the appropriate setup */ 1307 /* If sub-contexts are enabled, do the appropriate setup */
1315 if (uctxt->subctxt_cnt) 1308 if (uctxt->subctxt_cnt)
1316 ret = setup_subctxt(uctxt); 1309 ret = setup_subctxt(uctxt);
1317 if (ret) 1310 if (ret)
1318 goto setup_failed; 1311 goto done;
1319 1312
1320 ret = hfi1_alloc_ctxt_rcv_groups(uctxt); 1313 ret = hfi1_alloc_ctxt_rcv_groups(uctxt);
1321 if (ret) 1314 if (ret)
1322 goto setup_failed; 1315 goto done;
1323 1316
1324 ret = init_user_ctxt(fd, uctxt); 1317 ret = init_user_ctxt(fd, uctxt);
1325 if (ret) 1318 if (ret)
1326 goto setup_failed; 1319 goto done;
1327 1320
1328 user_init(uctxt); 1321 user_init(uctxt);
1329 1322
@@ -1331,12 +1324,22 @@ static int setup_base_ctxt(struct hfi1_filedata *fd,
1331 fd->uctxt = uctxt; 1324 fd->uctxt = uctxt;
1332 hfi1_rcd_get(uctxt); 1325 hfi1_rcd_get(uctxt);
1333 1326
1334 return 0; 1327done:
1328 if (uctxt->subctxt_cnt) {
1329 /*
1330 * On error, set the failed bit so sub-contexts will clean up
1331 * correctly.
1332 */
1333 if (ret)
1334 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags);
1335 1335
1336setup_failed: 1336 /*
1337 /* Set the failed bit so sub-context init can do the right thing */ 1337 * Base context is done (successfully or not), notify anybody
1338 set_bit(HFI1_CTXT_BASE_FAILED, &uctxt->event_flags); 1338 * using a sub-context that is waiting for this completion.
1339 deallocate_ctxt(uctxt); 1339 */
1340 clear_bit(HFI1_CTXT_BASE_UNINIT, &uctxt->event_flags);
1341 wake_up(&uctxt->wait);
1342 }
1340 1343
1341 return ret; 1344 return ret;
1342} 1345}
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c
index 82447b7cdda1..09e50fd2a08f 100644
--- a/drivers/infiniband/hw/hfi1/pcie.c
+++ b/drivers/infiniband/hw/hfi1/pcie.c
@@ -68,7 +68,7 @@
68/* 68/*
69 * Code to adjust PCIe capabilities. 69 * Code to adjust PCIe capabilities.
70 */ 70 */
71static int tune_pcie_caps(struct hfi1_devdata *); 71static void tune_pcie_caps(struct hfi1_devdata *);
72 72
73/* 73/*
74 * Do all the common PCIe setup and initialization. 74 * Do all the common PCIe setup and initialization.
@@ -351,7 +351,7 @@ int pcie_speeds(struct hfi1_devdata *dd)
351 */ 351 */
352int request_msix(struct hfi1_devdata *dd, u32 msireq) 352int request_msix(struct hfi1_devdata *dd, u32 msireq)
353{ 353{
354 int nvec, ret; 354 int nvec;
355 355
356 nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq, 356 nvec = pci_alloc_irq_vectors(dd->pcidev, 1, msireq,
357 PCI_IRQ_MSIX | PCI_IRQ_LEGACY); 357 PCI_IRQ_MSIX | PCI_IRQ_LEGACY);
@@ -360,12 +360,7 @@ int request_msix(struct hfi1_devdata *dd, u32 msireq)
360 return nvec; 360 return nvec;
361 } 361 }
362 362
363 ret = tune_pcie_caps(dd); 363 tune_pcie_caps(dd);
364 if (ret) {
365 dd_dev_err(dd, "tune_pcie_caps() failed: %d\n", ret);
366 pci_free_irq_vectors(dd->pcidev);
367 return ret;
368 }
369 364
370 /* check for legacy IRQ */ 365 /* check for legacy IRQ */
371 if (nvec == 1 && !dd->pcidev->msix_enabled) 366 if (nvec == 1 && !dd->pcidev->msix_enabled)
@@ -502,7 +497,7 @@ uint aspm_mode = ASPM_MODE_DISABLED;
502module_param_named(aspm, aspm_mode, uint, S_IRUGO); 497module_param_named(aspm, aspm_mode, uint, S_IRUGO);
503MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic"); 498MODULE_PARM_DESC(aspm, "PCIe ASPM: 0: disable, 1: enable, 2: dynamic");
504 499
505static int tune_pcie_caps(struct hfi1_devdata *dd) 500static void tune_pcie_caps(struct hfi1_devdata *dd)
506{ 501{
507 struct pci_dev *parent; 502 struct pci_dev *parent;
508 u16 rc_mpss, rc_mps, ep_mpss, ep_mps; 503 u16 rc_mpss, rc_mps, ep_mpss, ep_mps;
@@ -513,22 +508,14 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
513 * Turn on extended tags in DevCtl in case the BIOS has turned it off 508 * Turn on extended tags in DevCtl in case the BIOS has turned it off
514 * to improve WFR SDMA bandwidth 509 * to improve WFR SDMA bandwidth
515 */ 510 */
516 ret = pcie_capability_read_word(dd->pcidev, 511 ret = pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &ectl);
517 PCI_EXP_DEVCTL, &ectl); 512 if ((!ret) && !(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
518 if (ret) {
519 dd_dev_err(dd, "Unable to read from PCI config\n");
520 return ret;
521 }
522
523 if (!(ectl & PCI_EXP_DEVCTL_EXT_TAG)) {
524 dd_dev_info(dd, "Enabling PCIe extended tags\n"); 513 dd_dev_info(dd, "Enabling PCIe extended tags\n");
525 ectl |= PCI_EXP_DEVCTL_EXT_TAG; 514 ectl |= PCI_EXP_DEVCTL_EXT_TAG;
526 ret = pcie_capability_write_word(dd->pcidev, 515 ret = pcie_capability_write_word(dd->pcidev,
527 PCI_EXP_DEVCTL, ectl); 516 PCI_EXP_DEVCTL, ectl);
528 if (ret) { 517 if (ret)
529 dd_dev_err(dd, "Unable to write to PCI config\n"); 518 dd_dev_info(dd, "Unable to write to PCI config\n");
530 return ret;
531 }
532 } 519 }
533 /* Find out supported and configured values for parent (root) */ 520 /* Find out supported and configured values for parent (root) */
534 parent = dd->pcidev->bus->self; 521 parent = dd->pcidev->bus->self;
@@ -536,15 +523,22 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
536 * The driver cannot perform the tuning if it does not have 523 * The driver cannot perform the tuning if it does not have
537 * access to the upstream component. 524 * access to the upstream component.
538 */ 525 */
539 if (!parent) 526 if (!parent) {
540 return -EINVAL; 527 dd_dev_info(dd, "Parent not found\n");
528 return;
529 }
541 if (!pci_is_root_bus(parent->bus)) { 530 if (!pci_is_root_bus(parent->bus)) {
542 dd_dev_info(dd, "Parent not root\n"); 531 dd_dev_info(dd, "Parent not root\n");
543 return -EINVAL; 532 return;
533 }
534 if (!pci_is_pcie(parent)) {
535 dd_dev_info(dd, "Parent is not PCI Express capable\n");
536 return;
537 }
538 if (!pci_is_pcie(dd->pcidev)) {
539 dd_dev_info(dd, "PCI device is not PCI Express capable\n");
540 return;
544 } 541 }
545
546 if (!pci_is_pcie(parent) || !pci_is_pcie(dd->pcidev))
547 return -EINVAL;
548 rc_mpss = parent->pcie_mpss; 542 rc_mpss = parent->pcie_mpss;
549 rc_mps = ffs(pcie_get_mps(parent)) - 8; 543 rc_mps = ffs(pcie_get_mps(parent)) - 8;
550 /* Find out supported and configured values for endpoint (us) */ 544 /* Find out supported and configured values for endpoint (us) */
@@ -590,8 +584,6 @@ static int tune_pcie_caps(struct hfi1_devdata *dd)
590 ep_mrrs = max_mrrs; 584 ep_mrrs = max_mrrs;
591 pcie_set_readrq(dd->pcidev, ep_mrrs); 585 pcie_set_readrq(dd->pcidev, ep_mrrs);
592 } 586 }
593
594 return 0;
595} 587}
596 588
597/* End of PCIe capability tuning */ 589/* End of PCIe capability tuning */
diff --git a/drivers/infiniband/hw/hfi1/platform.c b/drivers/infiniband/hw/hfi1/platform.c
index a8af96d2b1b0..d486355880cb 100644
--- a/drivers/infiniband/hw/hfi1/platform.c
+++ b/drivers/infiniband/hw/hfi1/platform.c
@@ -790,7 +790,9 @@ static int tune_active_qsfp(struct hfi1_pportdata *ppd, u32 *ptr_tx_preset,
790 * reuse of stale settings established in our previous pass through. 790 * reuse of stale settings established in our previous pass through.
791 */ 791 */
792 if (ppd->qsfp_info.reset_needed) { 792 if (ppd->qsfp_info.reset_needed) {
793 reset_qsfp(ppd); 793 ret = reset_qsfp(ppd);
794 if (ret)
795 return ret;
794 refresh_qsfp_cache(ppd, &ppd->qsfp_info); 796 refresh_qsfp_cache(ppd, &ppd->qsfp_info);
795 } else { 797 } else {
796 ppd->qsfp_info.reset_needed = 1; 798 ppd->qsfp_info.reset_needed = 1;
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 9b1566468744..a65e4cbdce2f 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -201,7 +201,6 @@ enum init_completion_state {
201 CEQ_CREATED, 201 CEQ_CREATED,
202 ILQ_CREATED, 202 ILQ_CREATED,
203 IEQ_CREATED, 203 IEQ_CREATED,
204 INET_NOTIFIER,
205 IP_ADDR_REGISTERED, 204 IP_ADDR_REGISTERED,
206 RDMA_DEV_REGISTERED 205 RDMA_DEV_REGISTERED
207}; 206};
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
index 14f36ba4e5be..5230dd3c938c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
@@ -1504,23 +1504,40 @@ static void i40iw_add_hte_node(struct i40iw_cm_core *cm_core,
1504} 1504}
1505 1505
1506/** 1506/**
1507 * listen_port_in_use - determine if port is in use 1507 * i40iw_port_in_use - determine if port is in use
1508 * @port: Listen port number 1508 * @port: port number
1509 * @active_side: flag for listener side vs active side
1509 */ 1510 */
1510static bool i40iw_listen_port_in_use(struct i40iw_cm_core *cm_core, u16 port) 1511static bool i40iw_port_in_use(struct i40iw_cm_core *cm_core, u16 port, bool active_side)
1511{ 1512{
1512 struct i40iw_cm_listener *listen_node; 1513 struct i40iw_cm_listener *listen_node;
1514 struct i40iw_cm_node *cm_node;
1513 unsigned long flags; 1515 unsigned long flags;
1514 bool ret = false; 1516 bool ret = false;
1515 1517
1516 spin_lock_irqsave(&cm_core->listen_list_lock, flags); 1518 if (active_side) {
1517 list_for_each_entry(listen_node, &cm_core->listen_nodes, list) { 1519 /* search connected node list */
1518 if (listen_node->loc_port == port) { 1520 spin_lock_irqsave(&cm_core->ht_lock, flags);
1519 ret = true; 1521 list_for_each_entry(cm_node, &cm_core->connected_nodes, list) {
1520 break; 1522 if (cm_node->loc_port == port) {
1523 ret = true;
1524 break;
1525 }
1526 }
1527 if (!ret)
1528 clear_bit(port, cm_core->active_side_ports);
1529 spin_unlock_irqrestore(&cm_core->ht_lock, flags);
1530 } else {
1531 spin_lock_irqsave(&cm_core->listen_list_lock, flags);
1532 list_for_each_entry(listen_node, &cm_core->listen_nodes, list) {
1533 if (listen_node->loc_port == port) {
1534 ret = true;
1535 break;
1536 }
1521 } 1537 }
1538 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1522 } 1539 }
1523 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 1540
1524 return ret; 1541 return ret;
1525} 1542}
1526 1543
@@ -1868,7 +1885,7 @@ static int i40iw_dec_refcnt_listen(struct i40iw_cm_core *cm_core,
1868 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags); 1885 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
1869 1886
1870 if (listener->iwdev) { 1887 if (listener->iwdev) {
1871 if (apbvt_del && !i40iw_listen_port_in_use(cm_core, listener->loc_port)) 1888 if (apbvt_del && !i40iw_port_in_use(cm_core, listener->loc_port, false))
1872 i40iw_manage_apbvt(listener->iwdev, 1889 i40iw_manage_apbvt(listener->iwdev,
1873 listener->loc_port, 1890 listener->loc_port,
1874 I40IW_MANAGE_APBVT_DEL); 1891 I40IW_MANAGE_APBVT_DEL);
@@ -2247,21 +2264,21 @@ static void i40iw_rem_ref_cm_node(struct i40iw_cm_node *cm_node)
2247 if (cm_node->listener) { 2264 if (cm_node->listener) {
2248 i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true); 2265 i40iw_dec_refcnt_listen(cm_core, cm_node->listener, 0, true);
2249 } else { 2266 } else {
2250 if (!i40iw_listen_port_in_use(cm_core, cm_node->loc_port) && 2267 if (!i40iw_port_in_use(cm_core, cm_node->loc_port, true) && cm_node->apbvt_set) {
2251 cm_node->apbvt_set) {
2252 i40iw_manage_apbvt(cm_node->iwdev, 2268 i40iw_manage_apbvt(cm_node->iwdev,
2253 cm_node->loc_port, 2269 cm_node->loc_port,
2254 I40IW_MANAGE_APBVT_DEL); 2270 I40IW_MANAGE_APBVT_DEL);
2255 i40iw_get_addr_info(cm_node, &nfo); 2271 cm_node->apbvt_set = 0;
2256 if (cm_node->qhash_set) { 2272 }
2257 i40iw_manage_qhash(cm_node->iwdev, 2273 i40iw_get_addr_info(cm_node, &nfo);
2258 &nfo, 2274 if (cm_node->qhash_set) {
2259 I40IW_QHASH_TYPE_TCP_ESTABLISHED, 2275 i40iw_manage_qhash(cm_node->iwdev,
2260 I40IW_QHASH_MANAGE_TYPE_DELETE, 2276 &nfo,
2261 NULL, 2277 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
2262 false); 2278 I40IW_QHASH_MANAGE_TYPE_DELETE,
2263 cm_node->qhash_set = 0; 2279 NULL,
2264 } 2280 false);
2281 cm_node->qhash_set = 0;
2265 } 2282 }
2266 } 2283 }
2267 2284
@@ -3255,7 +3272,8 @@ static void i40iw_init_tcp_ctx(struct i40iw_cm_node *cm_node,
3255 tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss)); 3272 tcp_info->snd_mss = cpu_to_le32(((u32)cm_node->tcp_cntxt.mss));
3256 if (cm_node->vlan_id < VLAN_TAG_PRESENT) { 3273 if (cm_node->vlan_id < VLAN_TAG_PRESENT) {
3257 tcp_info->insert_vlan_tag = true; 3274 tcp_info->insert_vlan_tag = true;
3258 tcp_info->vlan_tag = cpu_to_le16(cm_node->vlan_id); 3275 tcp_info->vlan_tag = cpu_to_le16(((u16)cm_node->user_pri << I40IW_VLAN_PRIO_SHIFT) |
3276 cm_node->vlan_id);
3259 } 3277 }
3260 if (cm_node->ipv4) { 3278 if (cm_node->ipv4) {
3261 tcp_info->src_port = cpu_to_le16(cm_node->loc_port); 3279 tcp_info->src_port = cpu_to_le16(cm_node->loc_port);
@@ -3737,10 +3755,8 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3737 struct sockaddr_in *raddr; 3755 struct sockaddr_in *raddr;
3738 struct sockaddr_in6 *laddr6; 3756 struct sockaddr_in6 *laddr6;
3739 struct sockaddr_in6 *raddr6; 3757 struct sockaddr_in6 *raddr6;
3740 bool qhash_set = false; 3758 int ret = 0;
3741 int apbvt_set = 0; 3759 unsigned long flags;
3742 int err = 0;
3743 enum i40iw_status_code status;
3744 3760
3745 ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn); 3761 ibqp = i40iw_get_qp(cm_id->device, conn_param->qpn);
3746 if (!ibqp) 3762 if (!ibqp)
@@ -3789,32 +3805,6 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3789 cm_info.user_pri = rt_tos2priority(cm_id->tos); 3805 cm_info.user_pri = rt_tos2priority(cm_id->tos);
3790 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n", 3806 i40iw_debug(&iwdev->sc_dev, I40IW_DEBUG_DCB, "%s TOS:[%d] UP:[%d]\n",
3791 __func__, cm_id->tos, cm_info.user_pri); 3807 __func__, cm_id->tos, cm_info.user_pri);
3792 if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
3793 (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
3794 raddr6->sin6_addr.in6_u.u6_addr32,
3795 sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
3796 status = i40iw_manage_qhash(iwdev,
3797 &cm_info,
3798 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3799 I40IW_QHASH_MANAGE_TYPE_ADD,
3800 NULL,
3801 true);
3802 if (status)
3803 return -EINVAL;
3804 qhash_set = true;
3805 }
3806 status = i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD);
3807 if (status) {
3808 i40iw_manage_qhash(iwdev,
3809 &cm_info,
3810 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3811 I40IW_QHASH_MANAGE_TYPE_DELETE,
3812 NULL,
3813 false);
3814 return -EINVAL;
3815 }
3816
3817 apbvt_set = 1;
3818 cm_id->add_ref(cm_id); 3808 cm_id->add_ref(cm_id);
3819 cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev, 3809 cm_node = i40iw_create_cm_node(&iwdev->cm_core, iwdev,
3820 conn_param->private_data_len, 3810 conn_param->private_data_len,
@@ -3822,17 +3812,40 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3822 &cm_info); 3812 &cm_info);
3823 3813
3824 if (IS_ERR(cm_node)) { 3814 if (IS_ERR(cm_node)) {
3825 err = PTR_ERR(cm_node); 3815 ret = PTR_ERR(cm_node);
3826 goto err_out; 3816 cm_id->rem_ref(cm_id);
3817 return ret;
3818 }
3819
3820 if ((cm_info.ipv4 && (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr)) ||
3821 (!cm_info.ipv4 && memcmp(laddr6->sin6_addr.in6_u.u6_addr32,
3822 raddr6->sin6_addr.in6_u.u6_addr32,
3823 sizeof(laddr6->sin6_addr.in6_u.u6_addr32)))) {
3824 if (i40iw_manage_qhash(iwdev, &cm_info, I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3825 I40IW_QHASH_MANAGE_TYPE_ADD, NULL, true)) {
3826 ret = -EINVAL;
3827 goto err;
3828 }
3829 cm_node->qhash_set = true;
3827 } 3830 }
3828 3831
3832 spin_lock_irqsave(&iwdev->cm_core.ht_lock, flags);
3833 if (!test_and_set_bit(cm_info.loc_port, iwdev->cm_core.active_side_ports)) {
3834 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
3835 if (i40iw_manage_apbvt(iwdev, cm_info.loc_port, I40IW_MANAGE_APBVT_ADD)) {
3836 ret = -EINVAL;
3837 goto err;
3838 }
3839 } else {
3840 spin_unlock_irqrestore(&iwdev->cm_core.ht_lock, flags);
3841 }
3842
3843 cm_node->apbvt_set = true;
3829 i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord); 3844 i40iw_record_ird_ord(cm_node, (u16)conn_param->ird, (u16)conn_param->ord);
3830 if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO && 3845 if (cm_node->send_rdma0_op == SEND_RDMA_READ_ZERO &&
3831 !cm_node->ord_size) 3846 !cm_node->ord_size)
3832 cm_node->ord_size = 1; 3847 cm_node->ord_size = 1;
3833 3848
3834 cm_node->apbvt_set = apbvt_set;
3835 cm_node->qhash_set = qhash_set;
3836 iwqp->cm_node = cm_node; 3849 iwqp->cm_node = cm_node;
3837 cm_node->iwqp = iwqp; 3850 cm_node->iwqp = iwqp;
3838 iwqp->cm_id = cm_id; 3851 iwqp->cm_id = cm_id;
@@ -3840,11 +3853,9 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3840 3853
3841 if (cm_node->state != I40IW_CM_STATE_OFFLOADED) { 3854 if (cm_node->state != I40IW_CM_STATE_OFFLOADED) {
3842 cm_node->state = I40IW_CM_STATE_SYN_SENT; 3855 cm_node->state = I40IW_CM_STATE_SYN_SENT;
3843 err = i40iw_send_syn(cm_node, 0); 3856 ret = i40iw_send_syn(cm_node, 0);
3844 if (err) { 3857 if (ret)
3845 i40iw_rem_ref_cm_node(cm_node); 3858 goto err;
3846 goto err_out;
3847 }
3848 } 3859 }
3849 3860
3850 i40iw_debug(cm_node->dev, 3861 i40iw_debug(cm_node->dev,
@@ -3853,9 +3864,10 @@ int i40iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3853 cm_node->rem_port, 3864 cm_node->rem_port,
3854 cm_node, 3865 cm_node,
3855 cm_node->cm_id); 3866 cm_node->cm_id);
3867
3856 return 0; 3868 return 0;
3857 3869
3858err_out: 3870err:
3859 if (cm_info.ipv4) 3871 if (cm_info.ipv4)
3860 i40iw_debug(&iwdev->sc_dev, 3872 i40iw_debug(&iwdev->sc_dev,
3861 I40IW_DEBUG_CM, 3873 I40IW_DEBUG_CM,
@@ -3867,22 +3879,10 @@ err_out:
3867 "Api - connect() FAILED: dest addr=%pI6", 3879 "Api - connect() FAILED: dest addr=%pI6",
3868 cm_info.rem_addr); 3880 cm_info.rem_addr);
3869 3881
3870 if (qhash_set) 3882 i40iw_rem_ref_cm_node(cm_node);
3871 i40iw_manage_qhash(iwdev,
3872 &cm_info,
3873 I40IW_QHASH_TYPE_TCP_ESTABLISHED,
3874 I40IW_QHASH_MANAGE_TYPE_DELETE,
3875 NULL,
3876 false);
3877
3878 if (apbvt_set && !i40iw_listen_port_in_use(&iwdev->cm_core,
3879 cm_info.loc_port))
3880 i40iw_manage_apbvt(iwdev,
3881 cm_info.loc_port,
3882 I40IW_MANAGE_APBVT_DEL);
3883 cm_id->rem_ref(cm_id); 3883 cm_id->rem_ref(cm_id);
3884 iwdev->cm_core.stats_connect_errs++; 3884 iwdev->cm_core.stats_connect_errs++;
3885 return err; 3885 return ret;
3886} 3886}
3887 3887
3888/** 3888/**
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.h b/drivers/infiniband/hw/i40iw/i40iw_cm.h
index 2e52e38ffcf3..45abef76295b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_cm.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_cm.h
@@ -71,6 +71,9 @@
71#define I40IW_HW_IRD_SETTING_32 32 71#define I40IW_HW_IRD_SETTING_32 32
72#define I40IW_HW_IRD_SETTING_64 64 72#define I40IW_HW_IRD_SETTING_64 64
73 73
74#define MAX_PORTS 65536
75#define I40IW_VLAN_PRIO_SHIFT 13
76
74enum ietf_mpa_flags { 77enum ietf_mpa_flags {
75 IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */ 78 IETF_MPA_FLAGS_MARKERS = 0x80, /* receive Markers */
76 IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */ 79 IETF_MPA_FLAGS_CRC = 0x40, /* receive Markers */
@@ -411,6 +414,8 @@ struct i40iw_cm_core {
411 spinlock_t ht_lock; /* manage hash table */ 414 spinlock_t ht_lock; /* manage hash table */
412 spinlock_t listen_list_lock; /* listen list */ 415 spinlock_t listen_list_lock; /* listen list */
413 416
417 unsigned long active_side_ports[BITS_TO_LONGS(MAX_PORTS)];
418
414 u64 stats_nodes_created; 419 u64 stats_nodes_created;
415 u64 stats_nodes_destroyed; 420 u64 stats_nodes_destroyed;
416 u64 stats_listen_created; 421 u64 stats_listen_created;
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
index d1f5345f04f0..42ca5346777d 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c
@@ -48,7 +48,7 @@
48 * @wqe: cqp wqe for header 48 * @wqe: cqp wqe for header
49 * @header: header for the cqp wqe 49 * @header: header for the cqp wqe
50 */ 50 */
51static inline void i40iw_insert_wqe_hdr(u64 *wqe, u64 header) 51void i40iw_insert_wqe_hdr(u64 *wqe, u64 header)
52{ 52{
53 wmb(); /* make sure WQE is populated before polarity is set */ 53 wmb(); /* make sure WQE is populated before polarity is set */
54 set_64bit_val(wqe, 24, header); 54 set_64bit_val(wqe, 24, header);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c
index cc742c3132c6..27590ae21881 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_main.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_main.c
@@ -99,8 +99,6 @@ static struct notifier_block i40iw_net_notifier = {
99 .notifier_call = i40iw_net_event 99 .notifier_call = i40iw_net_event
100}; 100};
101 101
102static atomic_t i40iw_notifiers_registered;
103
104/** 102/**
105 * i40iw_find_i40e_handler - find a handler given a client info 103 * i40iw_find_i40e_handler - find a handler given a client info
106 * @ldev: pointer to a client info 104 * @ldev: pointer to a client info
@@ -1376,11 +1374,20 @@ error:
1376 */ 1374 */
1377static void i40iw_register_notifiers(void) 1375static void i40iw_register_notifiers(void)
1378{ 1376{
1379 if (atomic_inc_return(&i40iw_notifiers_registered) == 1) { 1377 register_inetaddr_notifier(&i40iw_inetaddr_notifier);
1380 register_inetaddr_notifier(&i40iw_inetaddr_notifier); 1378 register_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1381 register_inet6addr_notifier(&i40iw_inetaddr6_notifier); 1379 register_netevent_notifier(&i40iw_net_notifier);
1382 register_netevent_notifier(&i40iw_net_notifier); 1380}
1383 } 1381
1382/**
1383 * i40iw_unregister_notifiers - unregister tcp ip notifiers
1384 */
1385
1386static void i40iw_unregister_notifiers(void)
1387{
1388 unregister_netevent_notifier(&i40iw_net_notifier);
1389 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1390 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1384} 1391}
1385 1392
1386/** 1393/**
@@ -1400,6 +1407,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev,
1400 u32 i; 1407 u32 i;
1401 u32 size; 1408 u32 size;
1402 1409
1410 if (!ldev->msix_count) {
1411 i40iw_pr_err("No MSI-X vectors\n");
1412 return I40IW_ERR_CONFIG;
1413 }
1414
1403 iwdev->msix_count = ldev->msix_count; 1415 iwdev->msix_count = ldev->msix_count;
1404 1416
1405 size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count; 1417 size = sizeof(struct i40iw_msix_vector) * iwdev->msix_count;
@@ -1462,12 +1474,6 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev)
1462 if (!iwdev->reset) 1474 if (!iwdev->reset)
1463 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); 1475 i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx);
1464 /* fallthrough */ 1476 /* fallthrough */
1465 case INET_NOTIFIER:
1466 if (!atomic_dec_return(&i40iw_notifiers_registered)) {
1467 unregister_netevent_notifier(&i40iw_net_notifier);
1468 unregister_inetaddr_notifier(&i40iw_inetaddr_notifier);
1469 unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier);
1470 }
1471 /* fallthrough */ 1477 /* fallthrough */
1472 case PBLE_CHUNK_MEM: 1478 case PBLE_CHUNK_MEM:
1473 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); 1479 i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc);
@@ -1550,7 +1556,7 @@ static enum i40iw_status_code i40iw_setup_init_state(struct i40iw_handler *hdl,
1550 1556
1551 status = i40iw_save_msix_info(iwdev, ldev); 1557 status = i40iw_save_msix_info(iwdev, ldev);
1552 if (status) 1558 if (status)
1553 goto exit; 1559 return status;
1554 iwdev->hw.dev_context = (void *)ldev->pcidev; 1560 iwdev->hw.dev_context = (void *)ldev->pcidev;
1555 iwdev->hw.hw_addr = ldev->hw_addr; 1561 iwdev->hw.hw_addr = ldev->hw_addr;
1556 status = i40iw_allocate_dma_mem(&iwdev->hw, 1562 status = i40iw_allocate_dma_mem(&iwdev->hw,
@@ -1667,8 +1673,6 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client)
1667 break; 1673 break;
1668 iwdev->init_state = PBLE_CHUNK_MEM; 1674 iwdev->init_state = PBLE_CHUNK_MEM;
1669 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); 1675 iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM);
1670 i40iw_register_notifiers();
1671 iwdev->init_state = INET_NOTIFIER;
1672 status = i40iw_add_mac_ip(iwdev); 1676 status = i40iw_add_mac_ip(iwdev);
1673 if (status) 1677 if (status)
1674 break; 1678 break;
@@ -2018,6 +2022,8 @@ static int __init i40iw_init_module(void)
2018 i40iw_client.type = I40E_CLIENT_IWARP; 2022 i40iw_client.type = I40E_CLIENT_IWARP;
2019 spin_lock_init(&i40iw_handler_lock); 2023 spin_lock_init(&i40iw_handler_lock);
2020 ret = i40e_register_client(&i40iw_client); 2024 ret = i40e_register_client(&i40iw_client);
2025 i40iw_register_notifiers();
2026
2021 return ret; 2027 return ret;
2022} 2028}
2023 2029
@@ -2029,6 +2035,7 @@ static int __init i40iw_init_module(void)
2029 */ 2035 */
2030static void __exit i40iw_exit_module(void) 2036static void __exit i40iw_exit_module(void)
2031{ 2037{
2038 i40iw_unregister_notifiers();
2032 i40e_unregister_client(&i40iw_client); 2039 i40e_unregister_client(&i40iw_client);
2033} 2040}
2034 2041
diff --git a/drivers/infiniband/hw/i40iw/i40iw_p.h b/drivers/infiniband/hw/i40iw/i40iw_p.h
index e217a1259f57..5498ad01c280 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_p.h
+++ b/drivers/infiniband/hw/i40iw/i40iw_p.h
@@ -59,6 +59,8 @@ enum i40iw_status_code i40iw_sc_mr_fast_register(struct i40iw_sc_qp *qp,
59 struct i40iw_fast_reg_stag_info *info, 59 struct i40iw_fast_reg_stag_info *info,
60 bool post_sq); 60 bool post_sq);
61 61
62void i40iw_insert_wqe_hdr(u64 *wqe, u64 header);
63
62/* HMC/FPM functions */ 64/* HMC/FPM functions */
63enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev, 65enum i40iw_status_code i40iw_sc_init_iw_hmc(struct i40iw_sc_dev *dev,
64 u8 hmc_fn_id); 66 u8 hmc_fn_id);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
index c2cab20c4bc5..59f70676f0e0 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
@@ -123,12 +123,11 @@ static void i40iw_puda_post_recvbuf(struct i40iw_puda_rsrc *rsrc, u32 wqe_idx,
123 get_64bit_val(wqe, 24, &offset24); 123 get_64bit_val(wqe, 24, &offset24);
124 124
125 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID); 125 offset24 = (offset24) ? 0 : LS_64(1, I40IWQPSQ_VALID);
126 set_64bit_val(wqe, 24, offset24);
127 126
128 set_64bit_val(wqe, 0, buf->mem.pa); 127 set_64bit_val(wqe, 0, buf->mem.pa);
129 set_64bit_val(wqe, 8, 128 set_64bit_val(wqe, 8,
130 LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN)); 129 LS_64(buf->mem.size, I40IWQPSQ_FRAG_LEN));
131 set_64bit_val(wqe, 24, offset24); 130 i40iw_insert_wqe_hdr(wqe, offset24);
132} 131}
133 132
134/** 133/**
@@ -409,9 +408,7 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp,
409 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); 408 set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN));
410 set_64bit_val(wqe, 16, header[0]); 409 set_64bit_val(wqe, 16, header[0]);
411 410
412 /* Ensure all data is written before writing valid bit */ 411 i40iw_insert_wqe_hdr(wqe, header[1]);
413 wmb();
414 set_64bit_val(wqe, 24, header[1]);
415 412
416 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); 413 i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32);
417 i40iw_qp_post_wr(&qp->qp_uk); 414 i40iw_qp_post_wr(&qp->qp_uk);
@@ -539,7 +536,7 @@ static enum i40iw_status_code i40iw_puda_qp_wqe(struct i40iw_sc_dev *dev, struct
539 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) | 536 LS_64(2, I40IW_CQPSQ_QP_NEXTIWSTATE) |
540 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); 537 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
541 538
542 set_64bit_val(wqe, 24, header); 539 i40iw_insert_wqe_hdr(wqe, header);
543 540
544 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32); 541 i40iw_debug_buf(cqp->dev, I40IW_DEBUG_PUDA, "PUDA CQE", wqe, 32);
545 i40iw_sc_cqp_post_sq(cqp); 542 i40iw_sc_cqp_post_sq(cqp);
@@ -655,7 +652,7 @@ static enum i40iw_status_code i40iw_puda_cq_wqe(struct i40iw_sc_dev *dev, struct
655 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) | 652 LS_64(1, I40IW_CQPSQ_CQ_ENCEQEMASK) |
656 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) | 653 LS_64(1, I40IW_CQPSQ_CQ_CEQIDVALID) |
657 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID); 654 LS_64(cqp->polarity, I40IW_CQPSQ_WQEVALID);
658 set_64bit_val(wqe, 24, header); 655 i40iw_insert_wqe_hdr(wqe, header);
659 656
660 i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE", 657 i40iw_debug_buf(dev, I40IW_DEBUG_PUDA, "PUDA CQE",
661 wqe, I40IW_CQP_WQE_SIZE * 8); 658 wqe, I40IW_CQP_WQE_SIZE * 8);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index 62f1f45b8737..e52dbbb4165e 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -160,7 +160,7 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
160 return NOTIFY_DONE; 160 return NOTIFY_DONE;
161 161
162 iwdev = &hdl->device; 162 iwdev = &hdl->device;
163 if (iwdev->init_state < INET_NOTIFIER) 163 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
164 return NOTIFY_DONE; 164 return NOTIFY_DONE;
165 165
166 netdev = iwdev->ldev->netdev; 166 netdev = iwdev->ldev->netdev;
@@ -217,7 +217,7 @@ int i40iw_inet6addr_event(struct notifier_block *notifier,
217 return NOTIFY_DONE; 217 return NOTIFY_DONE;
218 218
219 iwdev = &hdl->device; 219 iwdev = &hdl->device;
220 if (iwdev->init_state < INET_NOTIFIER) 220 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
221 return NOTIFY_DONE; 221 return NOTIFY_DONE;
222 222
223 netdev = iwdev->ldev->netdev; 223 netdev = iwdev->ldev->netdev;
@@ -266,7 +266,7 @@ int i40iw_net_event(struct notifier_block *notifier, unsigned long event, void *
266 if (!iwhdl) 266 if (!iwhdl)
267 return NOTIFY_DONE; 267 return NOTIFY_DONE;
268 iwdev = &iwhdl->device; 268 iwdev = &iwhdl->device;
269 if (iwdev->init_state < INET_NOTIFIER) 269 if (iwdev->init_state < IP_ADDR_REGISTERED || iwdev->closing)
270 return NOTIFY_DONE; 270 return NOTIFY_DONE;
271 p = (__be32 *)neigh->primary_key; 271 p = (__be32 *)neigh->primary_key;
272 i40iw_copy_ip_ntohl(local_ipaddr, p); 272 i40iw_copy_ip_ntohl(local_ipaddr, p);
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 1aa411034a27..62be0a41ad0b 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -826,12 +826,14 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
826 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE; 826 attr->cap.max_inline_data = I40IW_MAX_INLINE_DATA_SIZE;
827 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; 827 attr->cap.max_send_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
828 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT; 828 attr->cap.max_recv_sge = I40IW_MAX_WQ_FRAGMENT_COUNT;
829 attr->port_num = 1;
829 init_attr->event_handler = iwqp->ibqp.event_handler; 830 init_attr->event_handler = iwqp->ibqp.event_handler;
830 init_attr->qp_context = iwqp->ibqp.qp_context; 831 init_attr->qp_context = iwqp->ibqp.qp_context;
831 init_attr->send_cq = iwqp->ibqp.send_cq; 832 init_attr->send_cq = iwqp->ibqp.send_cq;
832 init_attr->recv_cq = iwqp->ibqp.recv_cq; 833 init_attr->recv_cq = iwqp->ibqp.recv_cq;
833 init_attr->srq = iwqp->ibqp.srq; 834 init_attr->srq = iwqp->ibqp.srq;
834 init_attr->cap = attr->cap; 835 init_attr->cap = attr->cap;
836 init_attr->port_num = 1;
835 return 0; 837 return 0;
836} 838}
837 839
@@ -1027,7 +1029,19 @@ int i40iw_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1027 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED; 1029 iwqp->hw_tcp_state = I40IW_TCP_STATE_CLOSED;
1028 iwqp->last_aeq = I40IW_AE_RESET_SENT; 1030 iwqp->last_aeq = I40IW_AE_RESET_SENT;
1029 spin_unlock_irqrestore(&iwqp->lock, flags); 1031 spin_unlock_irqrestore(&iwqp->lock, flags);
1032 i40iw_cm_disconn(iwqp);
1030 } 1033 }
1034 } else {
1035 spin_lock_irqsave(&iwqp->lock, flags);
1036 if (iwqp->cm_id) {
1037 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
1038 iwqp->cm_id->add_ref(iwqp->cm_id);
1039 i40iw_schedule_cm_timer(iwqp->cm_node,
1040 (struct i40iw_puda_buf *)iwqp,
1041 I40IW_TIMER_TYPE_CLOSE, 1, 0);
1042 }
1043 }
1044 spin_unlock_irqrestore(&iwqp->lock, flags);
1031 } 1045 }
1032 } 1046 }
1033 return 0; 1047 return 0;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index ab3c562d5ba7..552f7bd4ecc3 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -778,13 +778,13 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
778 } 778 }
779 779
780 if (MLX5_CAP_GEN(mdev, tag_matching)) { 780 if (MLX5_CAP_GEN(mdev, tag_matching)) {
781 props->xrq_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; 781 props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE;
782 props->xrq_caps.max_num_tags = 782 props->tm_caps.max_num_tags =
783 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; 783 (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1;
784 props->xrq_caps.flags = IB_TM_CAP_RC; 784 props->tm_caps.flags = IB_TM_CAP_RC;
785 props->xrq_caps.max_ops = 785 props->tm_caps.max_ops =
786 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); 786 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
787 props->xrq_caps.max_sge = MLX5_TM_MAX_SGE; 787 props->tm_caps.max_sge = MLX5_TM_MAX_SGE;
788 } 788 }
789 789
790 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) { 790 if (field_avail(typeof(resp), cqe_comp_caps, uhw->outlen)) {
@@ -3837,11 +3837,13 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
3837 if (!dbg) 3837 if (!dbg)
3838 return -ENOMEM; 3838 return -ENOMEM;
3839 3839
3840 dev->delay_drop.dbg = dbg;
3841
3840 dbg->dir_debugfs = 3842 dbg->dir_debugfs =
3841 debugfs_create_dir("delay_drop", 3843 debugfs_create_dir("delay_drop",
3842 dev->mdev->priv.dbg_root); 3844 dev->mdev->priv.dbg_root);
3843 if (!dbg->dir_debugfs) 3845 if (!dbg->dir_debugfs)
3844 return -ENOMEM; 3846 goto out_debugfs;
3845 3847
3846 dbg->events_cnt_debugfs = 3848 dbg->events_cnt_debugfs =
3847 debugfs_create_atomic_t("num_timeout_events", 0400, 3849 debugfs_create_atomic_t("num_timeout_events", 0400,
@@ -3865,8 +3867,6 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
3865 if (!dbg->timeout_debugfs) 3867 if (!dbg->timeout_debugfs)
3866 goto out_debugfs; 3868 goto out_debugfs;
3867 3869
3868 dev->delay_drop.dbg = dbg;
3869
3870 return 0; 3870 return 0;
3871 3871
3872out_debugfs: 3872out_debugfs:
@@ -4174,9 +4174,9 @@ err_bfreg:
4174err_uar_page: 4174err_uar_page:
4175 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); 4175 mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
4176 4176
4177err_cnt:
4178 mlx5_ib_cleanup_cong_debugfs(dev);
4179err_cong: 4177err_cong:
4178 mlx5_ib_cleanup_cong_debugfs(dev);
4179err_cnt:
4180 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) 4180 if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
4181 mlx5_ib_dealloc_counters(dev); 4181 mlx5_ib_dealloc_counters(dev);
4182 4182
diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c
index 914f212e7ef6..f3dbd75a0a96 100644
--- a/drivers/infiniband/hw/mlx5/mem.c
+++ b/drivers/infiniband/hw/mlx5/mem.c
@@ -50,13 +50,9 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
50{ 50{
51 unsigned long tmp; 51 unsigned long tmp;
52 unsigned long m; 52 unsigned long m;
53 int i, k; 53 u64 base = ~0, p = 0;
54 u64 base = 0; 54 u64 len, pfn;
55 int p = 0; 55 int i = 0;
56 int skip;
57 int mask;
58 u64 len;
59 u64 pfn;
60 struct scatterlist *sg; 56 struct scatterlist *sg;
61 int entry; 57 int entry;
62 unsigned long page_shift = umem->page_shift; 58 unsigned long page_shift = umem->page_shift;
@@ -76,33 +72,24 @@ void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr,
76 m = find_first_bit(&tmp, BITS_PER_LONG); 72 m = find_first_bit(&tmp, BITS_PER_LONG);
77 if (max_page_shift) 73 if (max_page_shift)
78 m = min_t(unsigned long, max_page_shift - page_shift, m); 74 m = min_t(unsigned long, max_page_shift - page_shift, m);
79 skip = 1 << m; 75
80 mask = skip - 1;
81 i = 0;
82 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { 76 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
83 len = sg_dma_len(sg) >> page_shift; 77 len = sg_dma_len(sg) >> page_shift;
84 pfn = sg_dma_address(sg) >> page_shift; 78 pfn = sg_dma_address(sg) >> page_shift;
85 for (k = 0; k < len; k++) { 79 if (base + p != pfn) {
86 if (!(i & mask)) { 80 /* If either the offset or the new
87 tmp = (unsigned long)pfn; 81 * base are unaligned update m
88 m = min_t(unsigned long, m, find_first_bit(&tmp, BITS_PER_LONG)); 82 */
89 skip = 1 << m; 83 tmp = (unsigned long)(pfn | p);
90 mask = skip - 1; 84 if (!IS_ALIGNED(tmp, 1 << m))
91 base = pfn; 85 m = find_first_bit(&tmp, BITS_PER_LONG);
92 p = 0; 86
93 } else { 87 base = pfn;
94 if (base + p != pfn) { 88 p = 0;
95 tmp = (unsigned long)p;
96 m = find_first_bit(&tmp, BITS_PER_LONG);
97 skip = 1 << m;
98 mask = skip - 1;
99 base = pfn;
100 p = 0;
101 }
102 }
103 p++;
104 i++;
105 } 89 }
90
91 p += len;
92 i += len;
106 } 93 }
107 94
108 if (i) { 95 if (i) {
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 0e2789d9bb4d..37bbc543847a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -47,7 +47,8 @@ enum {
47 47
48#define MLX5_UMR_ALIGN 2048 48#define MLX5_UMR_ALIGN 2048
49 49
50static int clean_mr(struct mlx5_ib_mr *mr); 50static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
51static int mr_cache_max_order(struct mlx5_ib_dev *dev); 52static int mr_cache_max_order(struct mlx5_ib_dev *dev);
52static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr); 53static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
53 54
@@ -1270,8 +1271,9 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1270 1271
1271 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift, 1272 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1272 update_xlt_flags); 1273 update_xlt_flags);
1274
1273 if (err) { 1275 if (err) {
1274 mlx5_ib_dereg_mr(&mr->ibmr); 1276 dereg_mr(dev, mr);
1275 return ERR_PTR(err); 1277 return ERR_PTR(err);
1276 } 1278 }
1277 } 1279 }
@@ -1356,7 +1358,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1356 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, 1358 err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
1357 &npages, &page_shift, &ncont, &order); 1359 &npages, &page_shift, &ncont, &order);
1358 if (err < 0) { 1360 if (err < 0) {
1359 clean_mr(mr); 1361 clean_mr(dev, mr);
1360 return err; 1362 return err;
1361 } 1363 }
1362 } 1364 }
@@ -1410,7 +1412,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1410 if (err) { 1412 if (err) {
1411 mlx5_ib_warn(dev, "Failed to rereg UMR\n"); 1413 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1412 ib_umem_release(mr->umem); 1414 ib_umem_release(mr->umem);
1413 clean_mr(mr); 1415 clean_mr(dev, mr);
1414 return err; 1416 return err;
1415 } 1417 }
1416 } 1418 }
@@ -1469,9 +1471,8 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1469 } 1471 }
1470} 1472}
1471 1473
1472static int clean_mr(struct mlx5_ib_mr *mr) 1474static int clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1473{ 1475{
1474 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1475 int allocated_from_cache = mr->allocated_from_cache; 1476 int allocated_from_cache = mr->allocated_from_cache;
1476 int err; 1477 int err;
1477 1478
@@ -1507,10 +1508,8 @@ static int clean_mr(struct mlx5_ib_mr *mr)
1507 return 0; 1508 return 0;
1508} 1509}
1509 1510
1510int mlx5_ib_dereg_mr(struct ib_mr *ibmr) 1511static int dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1511{ 1512{
1512 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1513 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1514 int npages = mr->npages; 1513 int npages = mr->npages;
1515 struct ib_umem *umem = mr->umem; 1514 struct ib_umem *umem = mr->umem;
1516 1515
@@ -1539,7 +1538,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1539 } 1538 }
1540#endif 1539#endif
1541 1540
1542 clean_mr(mr); 1541 clean_mr(dev, mr);
1543 1542
1544 if (umem) { 1543 if (umem) {
1545 ib_umem_release(umem); 1544 ib_umem_release(umem);
@@ -1549,6 +1548,14 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1549 return 0; 1548 return 0;
1550} 1549}
1551 1550
1551int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1552{
1553 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1554 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1555
1556 return dereg_mr(dev, mr);
1557}
1558
1552struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, 1559struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1553 enum ib_mr_type mr_type, 1560 enum ib_mr_type mr_type,
1554 u32 max_num_sg) 1561 u32 max_num_sg)
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index f0dc5f4aa177..442b9bdc0f03 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -3232,7 +3232,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3232 mr->ibmr.iova); 3232 mr->ibmr.iova);
3233 set_wqe_32bit_value(wqe->wqe_words, 3233 set_wqe_32bit_value(wqe->wqe_words,
3234 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX, 3234 NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
3235 mr->ibmr.length); 3235 lower_32_bits(mr->ibmr.length));
3236 set_wqe_32bit_value(wqe->wqe_words, 3236 set_wqe_32bit_value(wqe->wqe_words,
3237 NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0); 3237 NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
3238 set_wqe_32bit_value(wqe->wqe_words, 3238 set_wqe_32bit_value(wqe->wqe_words,
@@ -3274,7 +3274,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
3274 mr->npages * 8); 3274 mr->npages * 8);
3275 3275
3276 nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, " 3276 nes_debug(NES_DBG_IW_TX, "SQ_REG_MR: iova_start: %llx, "
3277 "length: %d, rkey: %0x, pgl_paddr: %llx, " 3277 "length: %lld, rkey: %0x, pgl_paddr: %llx, "
3278 "page_list_len: %u, wqe_misc: %x\n", 3278 "page_list_len: %u, wqe_misc: %x\n",
3279 (unsigned long long) mr->ibmr.iova, 3279 (unsigned long long) mr->ibmr.iova,
3280 mr->ibmr.length, 3280 mr->ibmr.length,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index dcb5942f9fb5..65b166cc7437 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -252,7 +252,10 @@ static int ocrdma_get_mbx_errno(u32 status)
252 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES: 252 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
253 err_num = -EAGAIN; 253 err_num = -EAGAIN;
254 break; 254 break;
255 default:
256 err_num = -EFAULT;
255 } 257 }
258 break;
256 default: 259 default:
257 err_num = -EFAULT; 260 err_num = -EFAULT;
258 } 261 }
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index b2bb42e2805d..254083b524bd 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -387,7 +387,7 @@ struct qedr_qp {
387 u8 wqe_size; 387 u8 wqe_size;
388 388
389 u8 smac[ETH_ALEN]; 389 u8 smac[ETH_ALEN];
390 u16 vlan_id; 390 u16 vlan;
391 int rc; 391 int rc;
392 } *rqe_wr_id; 392 } *rqe_wr_id;
393 393
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 4689e802b332..ad8965397cf7 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -105,7 +105,7 @@ void qedr_ll2_complete_rx_packet(void *cxt,
105 105
106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ? 106 qp->rqe_wr_id[qp->rq.gsi_cons].rc = data->u.data_length_error ?
107 -EINVAL : 0; 107 -EINVAL : 0;
108 qp->rqe_wr_id[qp->rq.gsi_cons].vlan_id = data->vlan; 108 qp->rqe_wr_id[qp->rq.gsi_cons].vlan = data->vlan;
109 /* note: length stands for data length i.e. GRH is excluded */ 109 /* note: length stands for data length i.e. GRH is excluded */
110 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length = 110 qp->rqe_wr_id[qp->rq.gsi_cons].sg_list[0].length =
111 data->length.data_length; 111 data->length.data_length;
@@ -694,6 +694,7 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
694 struct qedr_cq *cq = get_qedr_cq(ibcq); 694 struct qedr_cq *cq = get_qedr_cq(ibcq);
695 struct qedr_qp *qp = dev->gsi_qp; 695 struct qedr_qp *qp = dev->gsi_qp;
696 unsigned long flags; 696 unsigned long flags;
697 u16 vlan_id;
697 int i = 0; 698 int i = 0;
698 699
699 spin_lock_irqsave(&cq->cq_lock, flags); 700 spin_lock_irqsave(&cq->cq_lock, flags);
@@ -712,9 +713,14 @@ int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
712 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK; 713 wc[i].wc_flags |= IB_WC_GRH | IB_WC_IP_CSUM_OK;
713 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac); 714 ether_addr_copy(wc[i].smac, qp->rqe_wr_id[qp->rq.cons].smac);
714 wc[i].wc_flags |= IB_WC_WITH_SMAC; 715 wc[i].wc_flags |= IB_WC_WITH_SMAC;
715 if (qp->rqe_wr_id[qp->rq.cons].vlan_id) { 716
717 vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan &
718 VLAN_VID_MASK;
719 if (vlan_id) {
716 wc[i].wc_flags |= IB_WC_WITH_VLAN; 720 wc[i].wc_flags |= IB_WC_WITH_VLAN;
717 wc[i].vlan_id = qp->rqe_wr_id[qp->rq.cons].vlan_id; 721 wc[i].vlan_id = vlan_id;
722 wc[i].sl = (qp->rqe_wr_id[qp->rq.cons].vlan &
723 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
718 } 724 }
719 725
720 qedr_inc_sw_cons(&qp->rq); 726 qedr_inc_sw_cons(&qp->rq);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
index 663a0c301c43..984aa3484928 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
@@ -416,9 +416,34 @@ static inline enum ib_wc_status pvrdma_wc_status_to_ib(
416 return (enum ib_wc_status)status; 416 return (enum ib_wc_status)status;
417} 417}
418 418
419static inline int pvrdma_wc_opcode_to_ib(int opcode) 419static inline int pvrdma_wc_opcode_to_ib(unsigned int opcode)
420{ 420{
421 return opcode; 421 switch (opcode) {
422 case PVRDMA_WC_SEND:
423 return IB_WC_SEND;
424 case PVRDMA_WC_RDMA_WRITE:
425 return IB_WC_RDMA_WRITE;
426 case PVRDMA_WC_RDMA_READ:
427 return IB_WC_RDMA_READ;
428 case PVRDMA_WC_COMP_SWAP:
429 return IB_WC_COMP_SWAP;
430 case PVRDMA_WC_FETCH_ADD:
431 return IB_WC_FETCH_ADD;
432 case PVRDMA_WC_LOCAL_INV:
433 return IB_WC_LOCAL_INV;
434 case PVRDMA_WC_FAST_REG_MR:
435 return IB_WC_REG_MR;
436 case PVRDMA_WC_MASKED_COMP_SWAP:
437 return IB_WC_MASKED_COMP_SWAP;
438 case PVRDMA_WC_MASKED_FETCH_ADD:
439 return IB_WC_MASKED_FETCH_ADD;
440 case PVRDMA_WC_RECV:
441 return IB_WC_RECV;
442 case PVRDMA_WC_RECV_RDMA_WITH_IMM:
443 return IB_WC_RECV_RDMA_WITH_IMM;
444 default:
445 return IB_WC_SEND;
446 }
422} 447}
423 448
424static inline int pvrdma_wc_flags_to_ib(int flags) 449static inline int pvrdma_wc_flags_to_ib(int flags)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 14b62f7472b4..7774654c2ccb 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -823,12 +823,18 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
823 wc->status != IB_WC_WR_FLUSH_ERR) { 823 wc->status != IB_WC_WR_FLUSH_ERR) {
824 struct ipoib_neigh *neigh; 824 struct ipoib_neigh *neigh;
825 825
826 if (wc->status != IB_WC_RNR_RETRY_EXC_ERR) 826 /* IB_WC[_RNR]_RETRY_EXC_ERR error is part of the life cycle,
827 ipoib_warn(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", 827 * so don't make waves.
828 wc->status, wr_id, wc->vendor_err); 828 */
829 if (wc->status == IB_WC_RNR_RETRY_EXC_ERR ||
830 wc->status == IB_WC_RETRY_EXC_ERR)
831 ipoib_dbg(priv,
832 "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
833 __func__, wc->status, wr_id, wc->vendor_err);
829 else 834 else
830 ipoib_dbg(priv, "failed cm send event (status=%d, wrid=%d vend_err %x)\n", 835 ipoib_warn(priv,
831 wc->status, wr_id, wc->vendor_err); 836 "%s: failed cm send event (status=%d, wrid=%d vend_err 0x%x)\n",
837 __func__, wc->status, wr_id, wc->vendor_err);
832 838
833 spin_lock_irqsave(&priv->lock, flags); 839 spin_lock_irqsave(&priv->lock, flags);
834 neigh = tx->neigh; 840 neigh = tx->neigh;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 2e075377242e..6cd61638b441 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1000,19 +1000,6 @@ static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
1000 */ 1000 */
1001 priv->dev->broadcast[8] = priv->pkey >> 8; 1001 priv->dev->broadcast[8] = priv->pkey >> 8;
1002 priv->dev->broadcast[9] = priv->pkey & 0xff; 1002 priv->dev->broadcast[9] = priv->pkey & 0xff;
1003
1004 /*
1005 * Update the broadcast address in the priv->broadcast object,
1006 * in case it already exists, otherwise no one will do that.
1007 */
1008 if (priv->broadcast) {
1009 spin_lock_irq(&priv->lock);
1010 memcpy(priv->broadcast->mcmember.mgid.raw,
1011 priv->dev->broadcast + 4,
1012 sizeof(union ib_gid));
1013 spin_unlock_irq(&priv->lock);
1014 }
1015
1016 return 0; 1003 return 0;
1017 } 1004 }
1018 1005
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index bac95b509a9b..dcc77014018d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -2180,6 +2180,7 @@ static struct net_device *ipoib_add_port(const char *format,
2180{ 2180{
2181 struct ipoib_dev_priv *priv; 2181 struct ipoib_dev_priv *priv;
2182 struct ib_port_attr attr; 2182 struct ib_port_attr attr;
2183 struct rdma_netdev *rn;
2183 int result = -ENOMEM; 2184 int result = -ENOMEM;
2184 2185
2185 priv = ipoib_intf_alloc(hca, port, format); 2186 priv = ipoib_intf_alloc(hca, port, format);
@@ -2279,7 +2280,8 @@ register_failed:
2279 ipoib_dev_cleanup(priv->dev); 2280 ipoib_dev_cleanup(priv->dev);
2280 2281
2281device_init_failed: 2282device_init_failed:
2282 free_netdev(priv->dev); 2283 rn = netdev_priv(priv->dev);
2284 rn->free_rdma_netdev(priv->dev);
2283 kfree(priv); 2285 kfree(priv);
2284 2286
2285alloc_mem_failed: 2287alloc_mem_failed:
@@ -2328,7 +2330,7 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
2328 return; 2330 return;
2329 2331
2330 list_for_each_entry_safe(priv, tmp, dev_list, list) { 2332 list_for_each_entry_safe(priv, tmp, dev_list, list) {
2331 struct rdma_netdev *rn = netdev_priv(priv->dev); 2333 struct rdma_netdev *parent_rn = netdev_priv(priv->dev);
2332 2334
2333 ib_unregister_event_handler(&priv->event_handler); 2335 ib_unregister_event_handler(&priv->event_handler);
2334 flush_workqueue(ipoib_workqueue); 2336 flush_workqueue(ipoib_workqueue);
@@ -2350,10 +2352,15 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
2350 unregister_netdev(priv->dev); 2352 unregister_netdev(priv->dev);
2351 mutex_unlock(&priv->sysfs_mutex); 2353 mutex_unlock(&priv->sysfs_mutex);
2352 2354
2353 rn->free_rdma_netdev(priv->dev); 2355 parent_rn->free_rdma_netdev(priv->dev);
2356
2357 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
2358 struct rdma_netdev *child_rn;
2354 2359
2355 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) 2360 child_rn = netdev_priv(cpriv->dev);
2361 child_rn->free_rdma_netdev(cpriv->dev);
2356 kfree(cpriv); 2362 kfree(cpriv);
2363 }
2357 2364
2358 kfree(priv); 2365 kfree(priv);
2359 } 2366 }
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 9927cd6b7082..55a9b71ed05a 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -141,14 +141,17 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
141 return restart_syscall(); 141 return restart_syscall();
142 } 142 }
143 143
144 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); 144 if (!down_write_trylock(&ppriv->vlan_rwsem)) {
145 if (!priv) {
146 rtnl_unlock(); 145 rtnl_unlock();
147 mutex_unlock(&ppriv->sysfs_mutex); 146 mutex_unlock(&ppriv->sysfs_mutex);
148 return -ENOMEM; 147 return restart_syscall();
149 } 148 }
150 149
151 down_write(&ppriv->vlan_rwsem); 150 priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
151 if (!priv) {
152 result = -ENOMEM;
153 goto out;
154 }
152 155
153 /* 156 /*
154 * First ensure this isn't a duplicate. We check the parent device and 157 * First ensure this isn't a duplicate. We check the parent device and
@@ -175,8 +178,11 @@ out:
175 rtnl_unlock(); 178 rtnl_unlock();
176 mutex_unlock(&ppriv->sysfs_mutex); 179 mutex_unlock(&ppriv->sysfs_mutex);
177 180
178 if (result) { 181 if (result && priv) {
179 free_netdev(priv->dev); 182 struct rdma_netdev *rn;
183
184 rn = netdev_priv(priv->dev);
185 rn->free_rdma_netdev(priv->dev);
180 kfree(priv); 186 kfree(priv);
181 } 187 }
182 188
@@ -204,7 +210,12 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
204 return restart_syscall(); 210 return restart_syscall();
205 } 211 }
206 212
207 down_write(&ppriv->vlan_rwsem); 213 if (!down_write_trylock(&ppriv->vlan_rwsem)) {
214 rtnl_unlock();
215 mutex_unlock(&ppriv->sysfs_mutex);
216 return restart_syscall();
217 }
218
208 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) { 219 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
209 if (priv->pkey == pkey && 220 if (priv->pkey == pkey &&
210 priv->child_type == IPOIB_LEGACY_CHILD) { 221 priv->child_type == IPOIB_LEGACY_CHILD) {
@@ -224,7 +235,10 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
224 mutex_unlock(&ppriv->sysfs_mutex); 235 mutex_unlock(&ppriv->sysfs_mutex);
225 236
226 if (dev) { 237 if (dev) {
227 free_netdev(dev); 238 struct rdma_netdev *rn;
239
240 rn = netdev_priv(dev);
241 rn->free_rdma_netdev(priv->dev);
228 kfree(priv); 242 kfree(priv);
229 return 0; 243 return 0;
230 } 244 }
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 9c3e9ab53a41..322209d5ff58 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -154,7 +154,7 @@ static void iser_dump_page_vec(struct iser_page_vec *page_vec)
154{ 154{
155 int i; 155 int i;
156 156
157 iser_err("page vec npages %d data length %d\n", 157 iser_err("page vec npages %d data length %lld\n",
158 page_vec->npages, page_vec->fake_mr.length); 158 page_vec->npages, page_vec->fake_mr.length);
159 for (i = 0; i < page_vec->npages; i++) 159 for (i = 0; i < page_vec->npages; i++)
160 iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]); 160 iser_err("vec[%d]: %llx\n", i, page_vec->pages[i]);
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 8f2042432c85..66a46c84e28f 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -237,9 +237,15 @@ int input_ff_erase(struct input_dev *dev, int effect_id, struct file *file)
237EXPORT_SYMBOL_GPL(input_ff_erase); 237EXPORT_SYMBOL_GPL(input_ff_erase);
238 238
239/* 239/*
240 * flush_effects - erase all effects owned by a file handle 240 * input_ff_flush - erase all effects owned by a file handle
241 * @dev: input device to erase effect from
242 * @file: purported owner of the effects
243 *
244 * This function erases all force-feedback effects associated with
245 * the given owner from specified device. Note that @file may be %NULL,
246 * in which case all effects will be erased.
241 */ 247 */
242static int flush_effects(struct input_dev *dev, struct file *file) 248int input_ff_flush(struct input_dev *dev, struct file *file)
243{ 249{
244 struct ff_device *ff = dev->ff; 250 struct ff_device *ff = dev->ff;
245 int i; 251 int i;
@@ -255,6 +261,7 @@ static int flush_effects(struct input_dev *dev, struct file *file)
255 261
256 return 0; 262 return 0;
257} 263}
264EXPORT_SYMBOL_GPL(input_ff_flush);
258 265
259/** 266/**
260 * input_ff_event() - generic handler for force-feedback events 267 * input_ff_event() - generic handler for force-feedback events
@@ -343,7 +350,7 @@ int input_ff_create(struct input_dev *dev, unsigned int max_effects)
343 mutex_init(&ff->mutex); 350 mutex_init(&ff->mutex);
344 351
345 dev->ff = ff; 352 dev->ff = ff;
346 dev->flush = flush_effects; 353 dev->flush = input_ff_flush;
347 dev->event = input_ff_event; 354 dev->event = input_ff_event;
348 __set_bit(EV_FF, dev->evbit); 355 __set_bit(EV_FF, dev->evbit);
349 356
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 022be0e22eba..443151de90c6 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -98,14 +98,15 @@ static int uinput_request_reserve_slot(struct uinput_device *udev,
98 uinput_request_alloc_id(udev, request)); 98 uinput_request_alloc_id(udev, request));
99} 99}
100 100
101static void uinput_request_done(struct uinput_device *udev, 101static void uinput_request_release_slot(struct uinput_device *udev,
102 struct uinput_request *request) 102 unsigned int id)
103{ 103{
104 /* Mark slot as available */ 104 /* Mark slot as available */
105 udev->requests[request->id] = NULL; 105 spin_lock(&udev->requests_lock);
106 wake_up(&udev->requests_waitq); 106 udev->requests[id] = NULL;
107 spin_unlock(&udev->requests_lock);
107 108
108 complete(&request->done); 109 wake_up(&udev->requests_waitq);
109} 110}
110 111
111static int uinput_request_send(struct uinput_device *udev, 112static int uinput_request_send(struct uinput_device *udev,
@@ -138,20 +139,22 @@ static int uinput_request_send(struct uinput_device *udev,
138static int uinput_request_submit(struct uinput_device *udev, 139static int uinput_request_submit(struct uinput_device *udev,
139 struct uinput_request *request) 140 struct uinput_request *request)
140{ 141{
141 int error; 142 int retval;
142 143
143 error = uinput_request_reserve_slot(udev, request); 144 retval = uinput_request_reserve_slot(udev, request);
144 if (error) 145 if (retval)
145 return error; 146 return retval;
146 147
147 error = uinput_request_send(udev, request); 148 retval = uinput_request_send(udev, request);
148 if (error) { 149 if (retval)
149 uinput_request_done(udev, request); 150 goto out;
150 return error;
151 }
152 151
153 wait_for_completion(&request->done); 152 wait_for_completion(&request->done);
154 return request->retval; 153 retval = request->retval;
154
155 out:
156 uinput_request_release_slot(udev, request->id);
157 return retval;
155} 158}
156 159
157/* 160/*
@@ -169,7 +172,7 @@ static void uinput_flush_requests(struct uinput_device *udev)
169 request = udev->requests[i]; 172 request = udev->requests[i];
170 if (request) { 173 if (request) {
171 request->retval = -ENODEV; 174 request->retval = -ENODEV;
172 uinput_request_done(udev, request); 175 complete(&request->done);
173 } 176 }
174 } 177 }
175 178
@@ -230,6 +233,18 @@ static int uinput_dev_erase_effect(struct input_dev *dev, int effect_id)
230 return uinput_request_submit(udev, &request); 233 return uinput_request_submit(udev, &request);
231} 234}
232 235
236static int uinput_dev_flush(struct input_dev *dev, struct file *file)
237{
238 /*
239 * If we are called with file == NULL that means we are tearing
240 * down the device, and therefore we can not handle FF erase
241 * requests: either we are handling UI_DEV_DESTROY (and holding
242 * the udev->mutex), or the file descriptor is closed and there is
243 * nobody on the other side anymore.
244 */
245 return file ? input_ff_flush(dev, file) : 0;
246}
247
233static void uinput_destroy_device(struct uinput_device *udev) 248static void uinput_destroy_device(struct uinput_device *udev)
234{ 249{
235 const char *name, *phys; 250 const char *name, *phys;
@@ -297,6 +312,12 @@ static int uinput_create_device(struct uinput_device *udev)
297 dev->ff->playback = uinput_dev_playback; 312 dev->ff->playback = uinput_dev_playback;
298 dev->ff->set_gain = uinput_dev_set_gain; 313 dev->ff->set_gain = uinput_dev_set_gain;
299 dev->ff->set_autocenter = uinput_dev_set_autocenter; 314 dev->ff->set_autocenter = uinput_dev_set_autocenter;
315 /*
316 * The standard input_ff_flush() implementation does
317 * not quite work for uinput as we can't reasonably
318 * handle FF requests during device teardown.
319 */
320 dev->flush = uinput_dev_flush;
300 } 321 }
301 322
302 error = input_register_device(udev->dev); 323 error = input_register_device(udev->dev);
@@ -939,7 +960,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
939 } 960 }
940 961
941 req->retval = ff_up.retval; 962 req->retval = ff_up.retval;
942 uinput_request_done(udev, req); 963 complete(&req->done);
943 goto out; 964 goto out;
944 965
945 case UI_END_FF_ERASE: 966 case UI_END_FF_ERASE:
@@ -955,7 +976,7 @@ static long uinput_ioctl_handler(struct file *file, unsigned int cmd,
955 } 976 }
956 977
957 req->retval = ff_erase.retval; 978 req->retval = ff_erase.retval;
958 uinput_request_done(udev, req); 979 complete(&req->done);
959 goto out; 980 goto out;
960 } 981 }
961 982
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 15b1330606c1..e19eb60b3d2f 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -598,7 +598,7 @@ static int elan_i2c_write_fw_block(struct i2c_client *client,
598 } 598 }
599 599
600 /* Wait for F/W to update one page ROM data. */ 600 /* Wait for F/W to update one page ROM data. */
601 msleep(20); 601 msleep(35);
602 602
603 error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val); 603 error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val);
604 if (error) { 604 if (error) {
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 49bd2ab8c507..f3a21343e636 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -278,7 +278,7 @@ config EXYNOS_IOMMU_DEBUG
278config IPMMU_VMSA 278config IPMMU_VMSA
279 bool "Renesas VMSA-compatible IPMMU" 279 bool "Renesas VMSA-compatible IPMMU"
280 depends on ARM || IOMMU_DMA 280 depends on ARM || IOMMU_DMA
281 depends on ARCH_RENESAS || COMPILE_TEST 281 depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64)
282 select IOMMU_API 282 select IOMMU_API
283 select IOMMU_IO_PGTABLE_LPAE 283 select IOMMU_IO_PGTABLE_LPAE
284 select ARM_DMA_USE_IOMMU 284 select ARM_DMA_USE_IOMMU
@@ -373,7 +373,8 @@ config MTK_IOMMU_V1
373config QCOM_IOMMU 373config QCOM_IOMMU
374 # Note: iommu drivers cannot (yet?) be built as modules 374 # Note: iommu drivers cannot (yet?) be built as modules
375 bool "Qualcomm IOMMU Support" 375 bool "Qualcomm IOMMU Support"
376 depends on ARCH_QCOM || COMPILE_TEST 376 depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
377 depends on HAS_DMA
377 select IOMMU_API 378 select IOMMU_API
378 select IOMMU_IO_PGTABLE_LPAE 379 select IOMMU_IO_PGTABLE_LPAE
379 select ARM_DMA_USE_IOMMU 380 select ARM_DMA_USE_IOMMU
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 382de42b8359..6fe2d0346073 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -874,7 +874,7 @@ static bool copy_device_table(void)
874 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4); 874 hi = readl(iommu->mmio_base + MMIO_DEV_TABLE_OFFSET + 4);
875 entry = (((u64) hi) << 32) + lo; 875 entry = (((u64) hi) << 32) + lo;
876 if (last_entry && last_entry != entry) { 876 if (last_entry && last_entry != entry) {
877 pr_err("IOMMU:%d should use the same dev table as others!/n", 877 pr_err("IOMMU:%d should use the same dev table as others!\n",
878 iommu->index); 878 iommu->index);
879 return false; 879 return false;
880 } 880 }
@@ -882,7 +882,7 @@ static bool copy_device_table(void)
882 882
883 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12; 883 old_devtb_size = ((entry & ~PAGE_MASK) + 1) << 12;
884 if (old_devtb_size != dev_table_size) { 884 if (old_devtb_size != dev_table_size) {
885 pr_err("The device table size of IOMMU:%d is not expected!/n", 885 pr_err("The device table size of IOMMU:%d is not expected!\n",
886 iommu->index); 886 iommu->index);
887 return false; 887 return false;
888 } 888 }
@@ -890,7 +890,7 @@ static bool copy_device_table(void)
890 890
891 old_devtb_phys = entry & PAGE_MASK; 891 old_devtb_phys = entry & PAGE_MASK;
892 if (old_devtb_phys >= 0x100000000ULL) { 892 if (old_devtb_phys >= 0x100000000ULL) {
893 pr_err("The address of old device table is above 4G, not trustworthy!/n"); 893 pr_err("The address of old device table is above 4G, not trustworthy!\n");
894 return false; 894 return false;
895 } 895 }
896 old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB); 896 old_devtb = memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
@@ -901,7 +901,7 @@ static bool copy_device_table(void)
901 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag, 901 old_dev_tbl_cpy = (void *)__get_free_pages(gfp_flag,
902 get_order(dev_table_size)); 902 get_order(dev_table_size));
903 if (old_dev_tbl_cpy == NULL) { 903 if (old_dev_tbl_cpy == NULL) {
904 pr_err("Failed to allocate memory for copying old device table!/n"); 904 pr_err("Failed to allocate memory for copying old device table!\n");
905 return false; 905 return false;
906 } 906 }
907 907
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index ca5ebaeafd6a..57c920c1372d 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -497,7 +497,7 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
497#define dmar_parse_one_rhsa dmar_res_noop 497#define dmar_parse_one_rhsa dmar_res_noop
498#endif 498#endif
499 499
500static void __init 500static void
501dmar_table_print_dmar_entry(struct acpi_dmar_header *header) 501dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
502{ 502{
503 struct acpi_dmar_hardware_unit *drhd; 503 struct acpi_dmar_hardware_unit *drhd;
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index d665d0dc16e8..6961fc393f0b 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -245,7 +245,7 @@ static void __arm_v7s_free_table(void *table, int lvl,
245static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, 245static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries,
246 struct io_pgtable_cfg *cfg) 246 struct io_pgtable_cfg *cfg)
247{ 247{
248 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) 248 if (cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)
249 return; 249 return;
250 250
251 dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), 251 dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep),
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index bd515be5b380..16d33ac19db0 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -371,7 +371,8 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
371 int ret; 371 int ret;
372 372
373 spin_lock_irqsave(&dom->pgtlock, flags); 373 spin_lock_irqsave(&dom->pgtlock, flags);
374 ret = dom->iop->map(dom->iop, iova, paddr, size, prot); 374 ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
375 size, prot);
375 spin_unlock_irqrestore(&dom->pgtlock, flags); 376 spin_unlock_irqrestore(&dom->pgtlock, flags);
376 377
377 return ret; 378 return ret;
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index e60e3dba85a0..50947ebb6d17 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -157,10 +157,7 @@ static int of_pci_iommu_init(struct pci_dev *pdev, u16 alias, void *data)
157 157
158 err = of_iommu_xlate(info->dev, &iommu_spec); 158 err = of_iommu_xlate(info->dev, &iommu_spec);
159 of_node_put(iommu_spec.np); 159 of_node_put(iommu_spec.np);
160 if (err) 160 return err;
161 return err;
162
163 return info->np == pdev->bus->dev.of_node;
164} 161}
165 162
166const struct iommu_ops *of_iommu_configure(struct device *dev, 163const struct iommu_ops *of_iommu_configure(struct device *dev,
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 519149ec9053..b5df99c6f680 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -1042,7 +1042,7 @@ static int get_cpu_number(struct device_node *dn)
1042{ 1042{
1043 const __be32 *cell; 1043 const __be32 *cell;
1044 u64 hwid; 1044 u64 hwid;
1045 int i; 1045 int cpu;
1046 1046
1047 cell = of_get_property(dn, "reg", NULL); 1047 cell = of_get_property(dn, "reg", NULL);
1048 if (!cell) 1048 if (!cell)
@@ -1056,9 +1056,9 @@ static int get_cpu_number(struct device_node *dn)
1056 if (hwid & ~MPIDR_HWID_BITMASK) 1056 if (hwid & ~MPIDR_HWID_BITMASK)
1057 return -1; 1057 return -1;
1058 1058
1059 for (i = 0; i < num_possible_cpus(); i++) 1059 for_each_possible_cpu(cpu)
1060 if (cpu_logical_map(i) == hwid) 1060 if (cpu_logical_map(cpu) == hwid)
1061 return i; 1061 return cpu;
1062 1062
1063 return -1; 1063 return -1;
1064} 1064}
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 2370e6d9e603..cd0bcc3b7e33 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -173,7 +173,9 @@ int its_map_vlpi(int irq, struct its_vlpi_map *map)
173{ 173{
174 struct its_cmd_info info = { 174 struct its_cmd_info info = {
175 .cmd_type = MAP_VLPI, 175 .cmd_type = MAP_VLPI,
176 .map = map, 176 {
177 .map = map,
178 },
177 }; 179 };
178 180
179 /* 181 /*
@@ -189,7 +191,9 @@ int its_get_vlpi(int irq, struct its_vlpi_map *map)
189{ 191{
190 struct its_cmd_info info = { 192 struct its_cmd_info info = {
191 .cmd_type = GET_VLPI, 193 .cmd_type = GET_VLPI,
192 .map = map, 194 {
195 .map = map,
196 },
193 }; 197 };
194 198
195 return irq_set_vcpu_affinity(irq, &info); 199 return irq_set_vcpu_affinity(irq, &info);
@@ -205,7 +209,9 @@ int its_prop_update_vlpi(int irq, u8 config, bool inv)
205{ 209{
206 struct its_cmd_info info = { 210 struct its_cmd_info info = {
207 .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI, 211 .cmd_type = inv ? PROP_UPDATE_AND_INV_VLPI : PROP_UPDATE_VLPI,
208 .config = config, 212 {
213 .config = config,
214 },
209 }; 215 };
210 216
211 return irq_set_vcpu_affinity(irq, &info); 217 return irq_set_vcpu_affinity(irq, &info);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 6e52a88bbd9e..c90976d7e53c 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -169,20 +169,19 @@ static void gic_mask_irq(struct irq_data *d)
169{ 169{
170 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 170 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
171 171
172 write_gic_rmask(BIT(intr)); 172 write_gic_rmask(intr);
173 gic_clear_pcpu_masks(intr); 173 gic_clear_pcpu_masks(intr);
174} 174}
175 175
176static void gic_unmask_irq(struct irq_data *d) 176static void gic_unmask_irq(struct irq_data *d)
177{ 177{
178 struct cpumask *affinity = irq_data_get_affinity_mask(d);
179 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 178 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq);
180 unsigned int cpu; 179 unsigned int cpu;
181 180
182 write_gic_smask(BIT(intr)); 181 write_gic_smask(intr);
183 182
184 gic_clear_pcpu_masks(intr); 183 gic_clear_pcpu_masks(intr);
185 cpu = cpumask_first_and(affinity, cpu_online_mask); 184 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d));
186 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 185 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
187} 186}
188 187
@@ -420,13 +419,17 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
420 irq_hw_number_t hw, unsigned int cpu) 419 irq_hw_number_t hw, unsigned int cpu)
421{ 420{
422 int intr = GIC_HWIRQ_TO_SHARED(hw); 421 int intr = GIC_HWIRQ_TO_SHARED(hw);
422 struct irq_data *data;
423 unsigned long flags; 423 unsigned long flags;
424 424
425 data = irq_get_irq_data(virq);
426
425 spin_lock_irqsave(&gic_lock, flags); 427 spin_lock_irqsave(&gic_lock, flags);
426 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 428 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
427 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 429 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
428 gic_clear_pcpu_masks(intr); 430 gic_clear_pcpu_masks(intr);
429 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 431 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
432 irq_data_update_effective_affinity(data, cpumask_of(cpu));
430 spin_unlock_irqrestore(&gic_lock, flags); 433 spin_unlock_irqrestore(&gic_lock, flags);
431 434
432 return 0; 435 return 0;
@@ -645,7 +648,7 @@ static int __init gic_of_init(struct device_node *node,
645 648
646 /* Find the first available CPU vector. */ 649 /* Find the first available CPU vector. */
647 i = 0; 650 i = 0;
648 reserved = (C_SW0 | C_SW1) >> __fls(C_SW0); 651 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0);
649 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 652 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
650 i++, &cpu_vec)) 653 i++, &cpu_vec))
651 reserved |= BIT(cpu_vec); 654 reserved |= BIT(cpu_vec);
@@ -684,11 +687,11 @@ static int __init gic_of_init(struct device_node *node,
684 687
685 gicconfig = read_gic_config(); 688 gicconfig = read_gic_config();
686 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS; 689 gic_shared_intrs = gicconfig & GIC_CONFIG_NUMINTERRUPTS;
687 gic_shared_intrs >>= __fls(GIC_CONFIG_NUMINTERRUPTS); 690 gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
688 gic_shared_intrs = (gic_shared_intrs + 1) * 8; 691 gic_shared_intrs = (gic_shared_intrs + 1) * 8;
689 692
690 gic_vpes = gicconfig & GIC_CONFIG_PVPS; 693 gic_vpes = gicconfig & GIC_CONFIG_PVPS;
691 gic_vpes >>= __fls(GIC_CONFIG_PVPS); 694 gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
692 gic_vpes = gic_vpes + 1; 695 gic_vpes = gic_vpes + 1;
693 696
694 if (cpu_has_veic) { 697 if (cpu_has_veic) {
@@ -767,7 +770,7 @@ static int __init gic_of_init(struct device_node *node,
767 for (i = 0; i < gic_shared_intrs; i++) { 770 for (i = 0; i < gic_shared_intrs; i++) {
768 change_gic_pol(i, GIC_POL_ACTIVE_HIGH); 771 change_gic_pol(i, GIC_POL_ACTIVE_HIGH);
769 change_gic_trig(i, GIC_TRIG_LEVEL); 772 change_gic_trig(i, GIC_TRIG_LEVEL);
770 write_gic_rmask(BIT(i)); 773 write_gic_rmask(i);
771 } 774 }
772 775
773 for (i = 0; i < gic_vpes; i++) { 776 for (i = 0; i < gic_vpes; i++) {
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 6c44609fd83a..cd2b3c69771a 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -825,7 +825,6 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
825 isdn_net_local *lp; 825 isdn_net_local *lp;
826 struct ippp_struct *is; 826 struct ippp_struct *is;
827 int proto; 827 int proto;
828 unsigned char protobuf[4];
829 828
830 is = file->private_data; 829 is = file->private_data;
831 830
@@ -839,24 +838,28 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
839 if (!lp) 838 if (!lp)
840 printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n"); 839 printk(KERN_DEBUG "isdn_ppp_write: lp == NULL\n");
841 else { 840 else {
842 /* 841 if (lp->isdn_device < 0 || lp->isdn_channel < 0) {
843 * Don't reset huptimer for 842 unsigned char protobuf[4];
844 * LCP packets. (Echo requests). 843 /*
845 */ 844 * Don't reset huptimer for
846 if (copy_from_user(protobuf, buf, 4)) 845 * LCP packets. (Echo requests).
847 return -EFAULT; 846 */
848 proto = PPP_PROTOCOL(protobuf); 847 if (copy_from_user(protobuf, buf, 4))
849 if (proto != PPP_LCP) 848 return -EFAULT;
850 lp->huptimer = 0; 849
850 proto = PPP_PROTOCOL(protobuf);
851 if (proto != PPP_LCP)
852 lp->huptimer = 0;
851 853
852 if (lp->isdn_device < 0 || lp->isdn_channel < 0)
853 return 0; 854 return 0;
855 }
854 856
855 if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) && 857 if ((dev->drv[lp->isdn_device]->flags & DRV_FLAG_RUNNING) &&
856 lp->dialstate == 0 && 858 lp->dialstate == 0 &&
857 (lp->flags & ISDN_NET_CONNECTED)) { 859 (lp->flags & ISDN_NET_CONNECTED)) {
858 unsigned short hl; 860 unsigned short hl;
859 struct sk_buff *skb; 861 struct sk_buff *skb;
862 unsigned char *cpy_buf;
860 /* 863 /*
861 * we need to reserve enough space in front of 864 * we need to reserve enough space in front of
862 * sk_buff. old call to dev_alloc_skb only reserved 865 * sk_buff. old call to dev_alloc_skb only reserved
@@ -869,11 +872,21 @@ isdn_ppp_write(int min, struct file *file, const char __user *buf, int count)
869 return count; 872 return count;
870 } 873 }
871 skb_reserve(skb, hl); 874 skb_reserve(skb, hl);
872 if (copy_from_user(skb_put(skb, count), buf, count)) 875 cpy_buf = skb_put(skb, count);
876 if (copy_from_user(cpy_buf, buf, count))
873 { 877 {
874 kfree_skb(skb); 878 kfree_skb(skb);
875 return -EFAULT; 879 return -EFAULT;
876 } 880 }
881
882 /*
883 * Don't reset huptimer for
884 * LCP packets. (Echo requests).
885 */
886 proto = PPP_PROTOCOL(cpy_buf);
887 if (proto != PPP_LCP)
888 lp->huptimer = 0;
889
877 if (is->debug & 0x40) { 890 if (is->debug & 0x40) {
878 printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len); 891 printk(KERN_DEBUG "ppp xmit: len %d\n", (int) skb->len);
879 isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot); 892 isdn_ppp_frame_log("xmit", skb->data, skb->len, 32, is->unit, lp->ppp_slot);
diff --git a/drivers/leds/leds-as3645a.c b/drivers/leds/leds-as3645a.c
index bbbbe0898233..9a257f969300 100644
--- a/drivers/leds/leds-as3645a.c
+++ b/drivers/leds/leds-as3645a.c
@@ -112,6 +112,10 @@
112#define AS_PEAK_mA_TO_REG(a) \ 112#define AS_PEAK_mA_TO_REG(a) \
113 ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250) 113 ((min_t(u32, AS_PEAK_mA_MAX, a) - 1250) / 250)
114 114
115/* LED numbers for Devicetree */
116#define AS_LED_FLASH 0
117#define AS_LED_INDICATOR 1
118
115enum as_mode { 119enum as_mode {
116 AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT, 120 AS_MODE_EXT_TORCH = 0 << AS_CONTROL_MODE_SETTING_SHIFT,
117 AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT, 121 AS_MODE_INDICATOR = 1 << AS_CONTROL_MODE_SETTING_SHIFT,
@@ -491,10 +495,29 @@ static int as3645a_parse_node(struct as3645a *flash,
491 struct device_node *node) 495 struct device_node *node)
492{ 496{
493 struct as3645a_config *cfg = &flash->cfg; 497 struct as3645a_config *cfg = &flash->cfg;
498 struct device_node *child;
494 const char *name; 499 const char *name;
495 int rval; 500 int rval;
496 501
497 flash->flash_node = of_get_child_by_name(node, "flash"); 502 for_each_child_of_node(node, child) {
503 u32 id = 0;
504
505 of_property_read_u32(child, "reg", &id);
506
507 switch (id) {
508 case AS_LED_FLASH:
509 flash->flash_node = of_node_get(child);
510 break;
511 case AS_LED_INDICATOR:
512 flash->indicator_node = of_node_get(child);
513 break;
514 default:
515 dev_warn(&flash->client->dev,
516 "unknown LED %u encountered, ignoring\n", id);
517 break;
518 }
519 }
520
498 if (!flash->flash_node) { 521 if (!flash->flash_node) {
499 dev_err(&flash->client->dev, "can't find flash node\n"); 522 dev_err(&flash->client->dev, "can't find flash node\n");
500 return -ENODEV; 523 return -ENODEV;
@@ -534,11 +557,10 @@ static int as3645a_parse_node(struct as3645a *flash,
534 of_property_read_u32(flash->flash_node, "voltage-reference", 557 of_property_read_u32(flash->flash_node, "voltage-reference",
535 &cfg->voltage_reference); 558 &cfg->voltage_reference);
536 559
537 of_property_read_u32(flash->flash_node, "peak-current-limit", 560 of_property_read_u32(flash->flash_node, "ams,input-max-microamp",
538 &cfg->peak); 561 &cfg->peak);
539 cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak); 562 cfg->peak = AS_PEAK_mA_TO_REG(cfg->peak);
540 563
541 flash->indicator_node = of_get_child_by_name(node, "indicator");
542 if (!flash->indicator_node) { 564 if (!flash->indicator_node) {
543 dev_warn(&flash->client->dev, 565 dev_warn(&flash->client->dev,
544 "can't find indicator node\n"); 566 "can't find indicator node\n");
@@ -721,6 +743,7 @@ static int as3645a_remove(struct i2c_client *client)
721 as3645a_set_control(flash, AS_MODE_EXT_TORCH, false); 743 as3645a_set_control(flash, AS_MODE_EXT_TORCH, false);
722 744
723 v4l2_flash_release(flash->vf); 745 v4l2_flash_release(flash->vf);
746 v4l2_flash_release(flash->vfind);
724 747
725 led_classdev_flash_unregister(&flash->fled); 748 led_classdev_flash_unregister(&flash->fled);
726 led_classdev_unregister(&flash->iled_cdev); 749 led_classdev_unregister(&flash->iled_cdev);
diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
index 7d5286b05036..1841d0359bac 100644
--- a/drivers/md/bcache/closure.c
+++ b/drivers/md/bcache/closure.c
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
64void __closure_wake_up(struct closure_waitlist *wait_list) 64void __closure_wake_up(struct closure_waitlist *wait_list)
65{ 65{
66 struct llist_node *list; 66 struct llist_node *list;
67 struct closure *cl; 67 struct closure *cl, *t;
68 struct llist_node *reverse = NULL; 68 struct llist_node *reverse = NULL;
69 69
70 list = llist_del_all(&wait_list->list); 70 list = llist_del_all(&wait_list->list);
@@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
73 reverse = llist_reverse_order(list); 73 reverse = llist_reverse_order(list);
74 74
75 /* Then do the wakeups */ 75 /* Then do the wakeups */
76 llist_for_each_entry(cl, reverse, list) { 76 llist_for_each_entry_safe(cl, t, reverse, list) {
77 closure_set_waiting(cl, 0); 77 closure_set_waiting(cl, 0);
78 closure_sub(cl, CLOSURE_WAITING + 1); 78 closure_sub(cl, CLOSURE_WAITING + 1);
79 } 79 }
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 24eddbdf2ab4..203144762f36 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -149,5 +149,6 @@ static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen
149 149
150extern atomic_t dm_global_event_nr; 150extern atomic_t dm_global_event_nr;
151extern wait_queue_head_t dm_global_eventq; 151extern wait_queue_head_t dm_global_eventq;
152void dm_issue_global_event(void);
152 153
153#endif 154#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a55ffd4f5933..96ab46512e1f 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -2466,6 +2466,7 @@ static int crypt_ctr_cipher_old(struct dm_target *ti, char *cipher_in, char *key
2466 kfree(cipher_api); 2466 kfree(cipher_api);
2467 return ret; 2467 return ret;
2468 } 2468 }
2469 kfree(cipher_api);
2469 2470
2470 return 0; 2471 return 0;
2471bad_mem: 2472bad_mem:
@@ -2584,6 +2585,10 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar
2584 ti->error = "Invalid feature value for sector_size"; 2585 ti->error = "Invalid feature value for sector_size";
2585 return -EINVAL; 2586 return -EINVAL;
2586 } 2587 }
2588 if (ti->len & ((cc->sector_size >> SECTOR_SHIFT) - 1)) {
2589 ti->error = "Device size is not multiple of sector_size feature";
2590 return -EINVAL;
2591 }
2587 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT; 2592 cc->sector_shift = __ffs(cc->sector_size) - SECTOR_SHIFT;
2588 } else if (!strcasecmp(opt_string, "iv_large_sectors")) 2593 } else if (!strcasecmp(opt_string, "iv_large_sectors"))
2589 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags); 2594 set_bit(CRYPT_IV_LARGE_SECTORS, &cc->cipher_flags);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 8756a6850431..e52676fa9832 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -477,9 +477,13 @@ static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_si
477 * Round up the ptr to an 8-byte boundary. 477 * Round up the ptr to an 8-byte boundary.
478 */ 478 */
479#define ALIGN_MASK 7 479#define ALIGN_MASK 7
480static inline size_t align_val(size_t val)
481{
482 return (val + ALIGN_MASK) & ~ALIGN_MASK;
483}
480static inline void *align_ptr(void *ptr) 484static inline void *align_ptr(void *ptr)
481{ 485{
482 return (void *) (((size_t) (ptr + ALIGN_MASK)) & ~ALIGN_MASK); 486 return (void *)align_val((size_t)ptr);
483} 487}
484 488
485/* 489/*
@@ -505,7 +509,7 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
505 struct hash_cell *hc; 509 struct hash_cell *hc;
506 size_t len, needed = 0; 510 size_t len, needed = 0;
507 struct gendisk *disk; 511 struct gendisk *disk;
508 struct dm_name_list *nl, *old_nl = NULL; 512 struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
509 uint32_t *event_nr; 513 uint32_t *event_nr;
510 514
511 down_write(&_hash_lock); 515 down_write(&_hash_lock);
@@ -516,17 +520,15 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
516 */ 520 */
517 for (i = 0; i < NUM_BUCKETS; i++) { 521 for (i = 0; i < NUM_BUCKETS; i++) {
518 list_for_each_entry (hc, _name_buckets + i, name_list) { 522 list_for_each_entry (hc, _name_buckets + i, name_list) {
519 needed += sizeof(struct dm_name_list); 523 needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
520 needed += strlen(hc->name) + 1; 524 needed += align_val(sizeof(uint32_t));
521 needed += ALIGN_MASK;
522 needed += (sizeof(uint32_t) + ALIGN_MASK) & ~ALIGN_MASK;
523 } 525 }
524 } 526 }
525 527
526 /* 528 /*
527 * Grab our output buffer. 529 * Grab our output buffer.
528 */ 530 */
529 nl = get_result_buffer(param, param_size, &len); 531 nl = orig_nl = get_result_buffer(param, param_size, &len);
530 if (len < needed) { 532 if (len < needed) {
531 param->flags |= DM_BUFFER_FULL_FLAG; 533 param->flags |= DM_BUFFER_FULL_FLAG;
532 goto out; 534 goto out;
@@ -549,11 +551,16 @@ static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_
549 strcpy(nl->name, hc->name); 551 strcpy(nl->name, hc->name);
550 552
551 old_nl = nl; 553 old_nl = nl;
552 event_nr = align_ptr(((void *) (nl + 1)) + strlen(hc->name) + 1); 554 event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
553 *event_nr = dm_get_event_nr(hc->md); 555 *event_nr = dm_get_event_nr(hc->md);
554 nl = align_ptr(event_nr + 1); 556 nl = align_ptr(event_nr + 1);
555 } 557 }
556 } 558 }
559 /*
560 * If mismatch happens, security may be compromised due to buffer
561 * overflow, so it's better to crash.
562 */
563 BUG_ON((char *)nl - (char *)orig_nl != needed);
557 564
558 out: 565 out:
559 up_write(&_hash_lock); 566 up_write(&_hash_lock);
@@ -1621,7 +1628,8 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
1621 * which has a variable size, is not used by the function processing 1628 * which has a variable size, is not used by the function processing
1622 * the ioctl. 1629 * the ioctl.
1623 */ 1630 */
1624#define IOCTL_FLAGS_NO_PARAMS 1 1631#define IOCTL_FLAGS_NO_PARAMS 1
1632#define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
1625 1633
1626/*----------------------------------------------------------------- 1634/*-----------------------------------------------------------------
1627 * Implementation of open/close/ioctl on the special char 1635 * Implementation of open/close/ioctl on the special char
@@ -1635,12 +1643,12 @@ static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
1635 ioctl_fn fn; 1643 ioctl_fn fn;
1636 } _ioctls[] = { 1644 } _ioctls[] = {
1637 {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */ 1645 {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
1638 {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS, remove_all}, 1646 {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
1639 {DM_LIST_DEVICES_CMD, 0, list_devices}, 1647 {DM_LIST_DEVICES_CMD, 0, list_devices},
1640 1648
1641 {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_create}, 1649 {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
1642 {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS, dev_remove}, 1650 {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
1643 {DM_DEV_RENAME_CMD, 0, dev_rename}, 1651 {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
1644 {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend}, 1652 {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
1645 {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status}, 1653 {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
1646 {DM_DEV_WAIT_CMD, 0, dev_wait}, 1654 {DM_DEV_WAIT_CMD, 0, dev_wait},
@@ -1869,6 +1877,9 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us
1869 unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS)) 1877 unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
1870 DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd); 1878 DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
1871 1879
1880 if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
1881 dm_issue_global_event();
1882
1872 /* 1883 /*
1873 * Copy the results back to userland. 1884 * Copy the results back to userland.
1874 */ 1885 */
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 5bfe285ea9d1..2245d06d2045 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
3238 if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) 3238 if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
3239 return DM_MAPIO_REQUEUE; 3239 return DM_MAPIO_REQUEUE;
3240 3240
3241 mddev->pers->make_request(mddev, bio); 3241 md_handle_request(mddev, bio);
3242 3242
3243 return DM_MAPIO_SUBMITTED; 3243 return DM_MAPIO_SUBMITTED;
3244} 3244}
@@ -3297,11 +3297,10 @@ static const char *__raid_dev_status(struct raid_set *rs, struct md_rdev *rdev,
3297static sector_t rs_get_progress(struct raid_set *rs, 3297static sector_t rs_get_progress(struct raid_set *rs,
3298 sector_t resync_max_sectors, bool *array_in_sync) 3298 sector_t resync_max_sectors, bool *array_in_sync)
3299{ 3299{
3300 sector_t r, recovery_cp, curr_resync_completed; 3300 sector_t r, curr_resync_completed;
3301 struct mddev *mddev = &rs->md; 3301 struct mddev *mddev = &rs->md;
3302 3302
3303 curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp; 3303 curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp;
3304 recovery_cp = mddev->recovery_cp;
3305 *array_in_sync = false; 3304 *array_in_sync = false;
3306 3305
3307 if (rs_is_raid0(rs)) { 3306 if (rs_is_raid0(rs)) {
@@ -3330,9 +3329,11 @@ static sector_t rs_get_progress(struct raid_set *rs,
3330 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3329 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
3331 r = curr_resync_completed; 3330 r = curr_resync_completed;
3332 else 3331 else
3333 r = recovery_cp; 3332 r = mddev->recovery_cp;
3334 3333
3335 if (r == MaxSector) { 3334 if ((r == MaxSector) ||
3335 (test_bit(MD_RECOVERY_DONE, &mddev->recovery) &&
3336 (mddev->curr_resync_completed == resync_max_sectors))) {
3336 /* 3337 /*
3337 * Sync complete. 3338 * Sync complete.
3338 */ 3339 */
@@ -3892,7 +3893,7 @@ static void raid_resume(struct dm_target *ti)
3892 3893
3893static struct target_type raid_target = { 3894static struct target_type raid_target = {
3894 .name = "raid", 3895 .name = "raid",
3895 .version = {1, 12, 1}, 3896 .version = {1, 13, 0},
3896 .module = THIS_MODULE, 3897 .module = THIS_MODULE,
3897 .ctr = raid_ctr, 3898 .ctr = raid_ctr,
3898 .dtr = raid_dtr, 3899 .dtr = raid_dtr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6e54145969c5..4be85324f44d 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -52,6 +52,12 @@ static struct workqueue_struct *deferred_remove_workqueue;
52atomic_t dm_global_event_nr = ATOMIC_INIT(0); 52atomic_t dm_global_event_nr = ATOMIC_INIT(0);
53DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq); 53DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
54 54
55void dm_issue_global_event(void)
56{
57 atomic_inc(&dm_global_event_nr);
58 wake_up(&dm_global_eventq);
59}
60
55/* 61/*
56 * One of these is allocated per bio. 62 * One of these is allocated per bio.
57 */ 63 */
@@ -1865,9 +1871,8 @@ static void event_callback(void *context)
1865 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); 1871 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
1866 1872
1867 atomic_inc(&md->event_nr); 1873 atomic_inc(&md->event_nr);
1868 atomic_inc(&dm_global_event_nr);
1869 wake_up(&md->eventq); 1874 wake_up(&md->eventq);
1870 wake_up(&dm_global_eventq); 1875 dm_issue_global_event();
1871} 1876}
1872 1877
1873/* 1878/*
@@ -2283,6 +2288,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2283 } 2288 }
2284 2289
2285 map = __bind(md, table, &limits); 2290 map = __bind(md, table, &limits);
2291 dm_issue_global_event();
2286 2292
2287out: 2293out:
2288 mutex_unlock(&md->suspend_lock); 2294 mutex_unlock(&md->suspend_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 08fcaebc61bd..0ff1bbf6c90e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
266 * call has finished, the bio has been linked into some internal structure 266 * call has finished, the bio has been linked into some internal structure
267 * and so is visible to ->quiesce(), so we don't need the refcount any more. 267 * and so is visible to ->quiesce(), so we don't need the refcount any more.
268 */ 268 */
269void md_handle_request(struct mddev *mddev, struct bio *bio)
270{
271check_suspended:
272 rcu_read_lock();
273 if (mddev->suspended) {
274 DEFINE_WAIT(__wait);
275 for (;;) {
276 prepare_to_wait(&mddev->sb_wait, &__wait,
277 TASK_UNINTERRUPTIBLE);
278 if (!mddev->suspended)
279 break;
280 rcu_read_unlock();
281 schedule();
282 rcu_read_lock();
283 }
284 finish_wait(&mddev->sb_wait, &__wait);
285 }
286 atomic_inc(&mddev->active_io);
287 rcu_read_unlock();
288
289 if (!mddev->pers->make_request(mddev, bio)) {
290 atomic_dec(&mddev->active_io);
291 wake_up(&mddev->sb_wait);
292 goto check_suspended;
293 }
294
295 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
296 wake_up(&mddev->sb_wait);
297}
298EXPORT_SYMBOL(md_handle_request);
299
269static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) 300static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
270{ 301{
271 const int rw = bio_data_dir(bio); 302 const int rw = bio_data_dir(bio);
@@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
285 bio_endio(bio); 316 bio_endio(bio);
286 return BLK_QC_T_NONE; 317 return BLK_QC_T_NONE;
287 } 318 }
288check_suspended:
289 rcu_read_lock();
290 if (mddev->suspended) {
291 DEFINE_WAIT(__wait);
292 for (;;) {
293 prepare_to_wait(&mddev->sb_wait, &__wait,
294 TASK_UNINTERRUPTIBLE);
295 if (!mddev->suspended)
296 break;
297 rcu_read_unlock();
298 schedule();
299 rcu_read_lock();
300 }
301 finish_wait(&mddev->sb_wait, &__wait);
302 }
303 atomic_inc(&mddev->active_io);
304 rcu_read_unlock();
305 319
306 /* 320 /*
307 * save the sectors now since our bio can 321 * save the sectors now since our bio can
@@ -310,20 +324,14 @@ check_suspended:
310 sectors = bio_sectors(bio); 324 sectors = bio_sectors(bio);
311 /* bio could be mergeable after passing to underlayer */ 325 /* bio could be mergeable after passing to underlayer */
312 bio->bi_opf &= ~REQ_NOMERGE; 326 bio->bi_opf &= ~REQ_NOMERGE;
313 if (!mddev->pers->make_request(mddev, bio)) { 327
314 atomic_dec(&mddev->active_io); 328 md_handle_request(mddev, bio);
315 wake_up(&mddev->sb_wait);
316 goto check_suspended;
317 }
318 329
319 cpu = part_stat_lock(); 330 cpu = part_stat_lock();
320 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 331 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
321 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); 332 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
322 part_stat_unlock(); 333 part_stat_unlock();
323 334
324 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
325 wake_up(&mddev->sb_wait);
326
327 return BLK_QC_T_NONE; 335 return BLK_QC_T_NONE;
328} 336}
329 337
@@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws)
439 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 447 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
440 struct bio *bio = mddev->flush_bio; 448 struct bio *bio = mddev->flush_bio;
441 449
450 /*
451 * must reset flush_bio before calling into md_handle_request to avoid a
452 * deadlock, because other bios passed md_handle_request suspend check
453 * could wait for this and below md_handle_request could wait for those
454 * bios because of suspend check
455 */
456 mddev->flush_bio = NULL;
457 wake_up(&mddev->sb_wait);
458
442 if (bio->bi_iter.bi_size == 0) 459 if (bio->bi_iter.bi_size == 0)
443 /* an empty barrier - all done */ 460 /* an empty barrier - all done */
444 bio_endio(bio); 461 bio_endio(bio);
445 else { 462 else {
446 bio->bi_opf &= ~REQ_PREFLUSH; 463 bio->bi_opf &= ~REQ_PREFLUSH;
447 mddev->pers->make_request(mddev, bio); 464 md_handle_request(mddev, bio);
448 } 465 }
449
450 mddev->flush_bio = NULL;
451 wake_up(&mddev->sb_wait);
452} 466}
453 467
454void md_flush_request(struct mddev *mddev, struct bio *bio) 468void md_flush_request(struct mddev *mddev, struct bio *bio)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 561d22b9a9a8..d8287d3cd1bf 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -692,6 +692,7 @@ extern void md_stop_writes(struct mddev *mddev);
692extern int md_rdev_init(struct md_rdev *rdev); 692extern int md_rdev_init(struct md_rdev *rdev);
693extern void md_rdev_clear(struct md_rdev *rdev); 693extern void md_rdev_clear(struct md_rdev *rdev);
694 694
695extern void md_handle_request(struct mddev *mddev, struct bio *bio);
695extern void mddev_suspend(struct mddev *mddev); 696extern void mddev_suspend(struct mddev *mddev);
696extern void mddev_resume(struct mddev *mddev); 697extern void mddev_resume(struct mddev *mddev);
697extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 698extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4188a4881148..928e24a07133 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -811,6 +811,14 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
811 spin_unlock(&head->batch_head->batch_lock); 811 spin_unlock(&head->batch_head->batch_lock);
812 goto unlock_out; 812 goto unlock_out;
813 } 813 }
814 /*
815 * We must assign batch_head of this stripe within the
816 * batch_lock, otherwise clear_batch_ready of batch head
817 * stripe could clear BATCH_READY bit of this stripe and
818 * this stripe->batch_head doesn't get assigned, which
819 * could confuse clear_batch_ready for this stripe
820 */
821 sh->batch_head = head->batch_head;
814 822
815 /* 823 /*
816 * at this point, head's BATCH_READY could be cleared, but we 824 * at this point, head's BATCH_READY could be cleared, but we
@@ -818,8 +826,6 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
818 */ 826 */
819 list_add(&sh->batch_list, &head->batch_list); 827 list_add(&sh->batch_list, &head->batch_list);
820 spin_unlock(&head->batch_head->batch_lock); 828 spin_unlock(&head->batch_head->batch_lock);
821
822 sh->batch_head = head->batch_head;
823 } else { 829 } else {
824 head->batch_head = head; 830 head->batch_head = head;
825 sh->batch_head = head->batch_head; 831 sh->batch_head = head->batch_head;
@@ -4599,7 +4605,8 @@ static void break_stripe_batch_list(struct stripe_head *head_sh,
4599 4605
4600 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | 4606 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
4601 (1 << STRIPE_PREREAD_ACTIVE) | 4607 (1 << STRIPE_PREREAD_ACTIVE) |
4602 (1 << STRIPE_DEGRADED)), 4608 (1 << STRIPE_DEGRADED) |
4609 (1 << STRIPE_ON_UNPLUG_LIST)),
4603 head_sh->state & (1 << STRIPE_INSYNC)); 4610 head_sh->state & (1 << STRIPE_INSYNC));
4604 4611
4605 sh->check_state = head_sh->check_state; 4612 sh->check_state = head_sh->check_state;
@@ -6568,14 +6575,17 @@ static ssize_t
6568raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 6575raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
6569{ 6576{
6570 struct r5conf *conf; 6577 struct r5conf *conf;
6571 unsigned long new; 6578 unsigned int new;
6572 int err; 6579 int err;
6573 struct r5worker_group *new_groups, *old_groups; 6580 struct r5worker_group *new_groups, *old_groups;
6574 int group_cnt, worker_cnt_per_group; 6581 int group_cnt, worker_cnt_per_group;
6575 6582
6576 if (len >= PAGE_SIZE) 6583 if (len >= PAGE_SIZE)
6577 return -EINVAL; 6584 return -EINVAL;
6578 if (kstrtoul(page, 10, &new)) 6585 if (kstrtouint(page, 10, &new))
6586 return -EINVAL;
6587 /* 8192 should be big enough */
6588 if (new > 8192)
6579 return -EINVAL; 6589 return -EINVAL;
6580 6590
6581 err = mddev_lock(mddev); 6591 err = mddev_lock(mddev);
diff --git a/drivers/media/rc/ir-sharp-decoder.c b/drivers/media/rc/ir-sharp-decoder.c
index ed43a4212479..129b558acc92 100644
--- a/drivers/media/rc/ir-sharp-decoder.c
+++ b/drivers/media/rc/ir-sharp-decoder.c
@@ -245,5 +245,5 @@ module_init(ir_sharp_decode_init);
245module_exit(ir_sharp_decode_exit); 245module_exit(ir_sharp_decode_exit);
246 246
247MODULE_LICENSE("GPL"); 247MODULE_LICENSE("GPL");
248MODULE_AUTHOR("James Hogan <james.hogan@imgtec.com>"); 248MODULE_AUTHOR("James Hogan <jhogan@kernel.org>");
249MODULE_DESCRIPTION("Sharp IR protocol decoder"); 249MODULE_DESCRIPTION("Sharp IR protocol decoder");
diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
index 5dba23ca2e5f..dc9bc1807fdf 100644
--- a/drivers/misc/cxl/cxllib.c
+++ b/drivers/misc/cxl/cxllib.c
@@ -219,8 +219,17 @@ int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
219 219
220 down_read(&mm->mmap_sem); 220 down_read(&mm->mmap_sem);
221 221
222 for (dar = addr; dar < addr + size; dar += page_size) { 222 vma = find_vma(mm, addr);
223 if (!vma || dar < vma->vm_start || dar > vma->vm_end) { 223 if (!vma) {
224 pr_err("Can't find vma for addr %016llx\n", addr);
225 rc = -EFAULT;
226 goto out;
227 }
228 /* get the size of the pages allocated */
229 page_size = vma_kernel_pagesize(vma);
230
231 for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
232 if (dar < vma->vm_start || dar >= vma->vm_end) {
224 vma = find_vma(mm, addr); 233 vma = find_vma(mm, addr);
225 if (!vma) { 234 if (!vma) {
226 pr_err("Can't find vma for addr %016llx\n", addr); 235 pr_err("Can't find vma for addr %016llx\n", addr);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 29fc1e662891..2ad7b5c69156 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -1634,8 +1634,6 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq,
1634 } 1634 }
1635 1635
1636 mqrq->areq.mrq = &brq->mrq; 1636 mqrq->areq.mrq = &brq->mrq;
1637
1638 mmc_queue_bounce_pre(mqrq);
1639} 1637}
1640 1638
1641static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, 1639static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -1829,7 +1827,6 @@ static void mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *new_req)
1829 brq = &mq_rq->brq; 1827 brq = &mq_rq->brq;
1830 old_req = mmc_queue_req_to_req(mq_rq); 1828 old_req = mmc_queue_req_to_req(mq_rq);
1831 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; 1829 type = rq_data_dir(old_req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
1832 mmc_queue_bounce_post(mq_rq);
1833 1830
1834 switch (status) { 1831 switch (status) {
1835 case MMC_BLK_SUCCESS: 1832 case MMC_BLK_SUCCESS:
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index a7eb623f8daa..36217ad5e9b1 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1286,6 +1286,23 @@ out_err:
1286 return err; 1286 return err;
1287} 1287}
1288 1288
1289static void mmc_select_driver_type(struct mmc_card *card)
1290{
1291 int card_drv_type, drive_strength, drv_type;
1292
1293 card_drv_type = card->ext_csd.raw_driver_strength |
1294 mmc_driver_type_mask(0);
1295
1296 drive_strength = mmc_select_drive_strength(card,
1297 card->ext_csd.hs200_max_dtr,
1298 card_drv_type, &drv_type);
1299
1300 card->drive_strength = drive_strength;
1301
1302 if (drv_type)
1303 mmc_set_driver_type(card->host, drv_type);
1304}
1305
1289static int mmc_select_hs400es(struct mmc_card *card) 1306static int mmc_select_hs400es(struct mmc_card *card)
1290{ 1307{
1291 struct mmc_host *host = card->host; 1308 struct mmc_host *host = card->host;
@@ -1341,6 +1358,8 @@ static int mmc_select_hs400es(struct mmc_card *card)
1341 goto out_err; 1358 goto out_err;
1342 } 1359 }
1343 1360
1361 mmc_select_driver_type(card);
1362
1344 /* Switch card to HS400 */ 1363 /* Switch card to HS400 */
1345 val = EXT_CSD_TIMING_HS400 | 1364 val = EXT_CSD_TIMING_HS400 |
1346 card->drive_strength << EXT_CSD_DRV_STR_SHIFT; 1365 card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
@@ -1374,23 +1393,6 @@ out_err:
1374 return err; 1393 return err;
1375} 1394}
1376 1395
1377static void mmc_select_driver_type(struct mmc_card *card)
1378{
1379 int card_drv_type, drive_strength, drv_type;
1380
1381 card_drv_type = card->ext_csd.raw_driver_strength |
1382 mmc_driver_type_mask(0);
1383
1384 drive_strength = mmc_select_drive_strength(card,
1385 card->ext_csd.hs200_max_dtr,
1386 card_drv_type, &drv_type);
1387
1388 card->drive_strength = drive_strength;
1389
1390 if (drv_type)
1391 mmc_set_driver_type(card->host, drv_type);
1392}
1393
1394/* 1396/*
1395 * For device supporting HS200 mode, the following sequence 1397 * For device supporting HS200 mode, the following sequence
1396 * should be done before executing the tuning process. 1398 * should be done before executing the tuning process.
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index affa7370ba82..0a4e77a5ba33 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -23,8 +23,6 @@
23#include "core.h" 23#include "core.h"
24#include "card.h" 24#include "card.h"
25 25
26#define MMC_QUEUE_BOUNCESZ 65536
27
28/* 26/*
29 * Prepare a MMC request. This just filters out odd stuff. 27 * Prepare a MMC request. This just filters out odd stuff.
30 */ 28 */
@@ -150,26 +148,6 @@ static void mmc_queue_setup_discard(struct request_queue *q,
150 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); 148 queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q);
151} 149}
152 150
153static unsigned int mmc_queue_calc_bouncesz(struct mmc_host *host)
154{
155 unsigned int bouncesz = MMC_QUEUE_BOUNCESZ;
156
157 if (host->max_segs != 1 || (host->caps & MMC_CAP_NO_BOUNCE_BUFF))
158 return 0;
159
160 if (bouncesz > host->max_req_size)
161 bouncesz = host->max_req_size;
162 if (bouncesz > host->max_seg_size)
163 bouncesz = host->max_seg_size;
164 if (bouncesz > host->max_blk_count * 512)
165 bouncesz = host->max_blk_count * 512;
166
167 if (bouncesz <= 512)
168 return 0;
169
170 return bouncesz;
171}
172
173/** 151/**
174 * mmc_init_request() - initialize the MMC-specific per-request data 152 * mmc_init_request() - initialize the MMC-specific per-request data
175 * @q: the request queue 153 * @q: the request queue
@@ -184,26 +162,9 @@ static int mmc_init_request(struct request_queue *q, struct request *req,
184 struct mmc_card *card = mq->card; 162 struct mmc_card *card = mq->card;
185 struct mmc_host *host = card->host; 163 struct mmc_host *host = card->host;
186 164
187 if (card->bouncesz) { 165 mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
188 mq_rq->bounce_buf = kmalloc(card->bouncesz, gfp); 166 if (!mq_rq->sg)
189 if (!mq_rq->bounce_buf) 167 return -ENOMEM;
190 return -ENOMEM;
191 if (card->bouncesz > 512) {
192 mq_rq->sg = mmc_alloc_sg(1, gfp);
193 if (!mq_rq->sg)
194 return -ENOMEM;
195 mq_rq->bounce_sg = mmc_alloc_sg(card->bouncesz / 512,
196 gfp);
197 if (!mq_rq->bounce_sg)
198 return -ENOMEM;
199 }
200 } else {
201 mq_rq->bounce_buf = NULL;
202 mq_rq->bounce_sg = NULL;
203 mq_rq->sg = mmc_alloc_sg(host->max_segs, gfp);
204 if (!mq_rq->sg)
205 return -ENOMEM;
206 }
207 168
208 return 0; 169 return 0;
209} 170}
@@ -212,13 +173,6 @@ static void mmc_exit_request(struct request_queue *q, struct request *req)
212{ 173{
213 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req); 174 struct mmc_queue_req *mq_rq = req_to_mmc_queue_req(req);
214 175
215 /* It is OK to kfree(NULL) so this will be smooth */
216 kfree(mq_rq->bounce_sg);
217 mq_rq->bounce_sg = NULL;
218
219 kfree(mq_rq->bounce_buf);
220 mq_rq->bounce_buf = NULL;
221
222 kfree(mq_rq->sg); 176 kfree(mq_rq->sg);
223 mq_rq->sg = NULL; 177 mq_rq->sg = NULL;
224} 178}
@@ -265,18 +219,11 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
265 if (mmc_can_erase(card)) 219 if (mmc_can_erase(card))
266 mmc_queue_setup_discard(mq->queue, card); 220 mmc_queue_setup_discard(mq->queue, card);
267 221
268 card->bouncesz = mmc_queue_calc_bouncesz(host); 222 blk_queue_bounce_limit(mq->queue, limit);
269 if (card->bouncesz) { 223 blk_queue_max_hw_sectors(mq->queue,
270 blk_queue_max_hw_sectors(mq->queue, card->bouncesz / 512); 224 min(host->max_blk_count, host->max_req_size / 512));
271 blk_queue_max_segments(mq->queue, card->bouncesz / 512); 225 blk_queue_max_segments(mq->queue, host->max_segs);
272 blk_queue_max_segment_size(mq->queue, card->bouncesz); 226 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
273 } else {
274 blk_queue_bounce_limit(mq->queue, limit);
275 blk_queue_max_hw_sectors(mq->queue,
276 min(host->max_blk_count, host->max_req_size / 512));
277 blk_queue_max_segments(mq->queue, host->max_segs);
278 blk_queue_max_segment_size(mq->queue, host->max_seg_size);
279 }
280 227
281 sema_init(&mq->thread_sem, 1); 228 sema_init(&mq->thread_sem, 1);
282 229
@@ -365,56 +312,7 @@ void mmc_queue_resume(struct mmc_queue *mq)
365 */ 312 */
366unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) 313unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
367{ 314{
368 unsigned int sg_len;
369 size_t buflen;
370 struct scatterlist *sg;
371 struct request *req = mmc_queue_req_to_req(mqrq); 315 struct request *req = mmc_queue_req_to_req(mqrq);
372 int i;
373
374 if (!mqrq->bounce_buf)
375 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
376
377 sg_len = blk_rq_map_sg(mq->queue, req, mqrq->bounce_sg);
378
379 mqrq->bounce_sg_len = sg_len;
380
381 buflen = 0;
382 for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
383 buflen += sg->length;
384
385 sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
386
387 return 1;
388}
389
390/*
391 * If writing, bounce the data to the buffer before the request
392 * is sent to the host driver
393 */
394void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
395{
396 if (!mqrq->bounce_buf)
397 return;
398
399 if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != WRITE)
400 return;
401
402 sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
403 mqrq->bounce_buf, mqrq->sg[0].length);
404}
405
406/*
407 * If reading, bounce the data from the buffer after the request
408 * has been handled by the host driver
409 */
410void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
411{
412 if (!mqrq->bounce_buf)
413 return;
414
415 if (rq_data_dir(mmc_queue_req_to_req(mqrq)) != READ)
416 return;
417 316
418 sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, 317 return blk_rq_map_sg(mq->queue, req, mqrq->sg);
419 mqrq->bounce_buf, mqrq->sg[0].length);
420} 318}
diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h
index 04fc89360a7a..f18d3f656baa 100644
--- a/drivers/mmc/core/queue.h
+++ b/drivers/mmc/core/queue.h
@@ -49,9 +49,6 @@ enum mmc_drv_op {
49struct mmc_queue_req { 49struct mmc_queue_req {
50 struct mmc_blk_request brq; 50 struct mmc_blk_request brq;
51 struct scatterlist *sg; 51 struct scatterlist *sg;
52 char *bounce_buf;
53 struct scatterlist *bounce_sg;
54 unsigned int bounce_sg_len;
55 struct mmc_async_req areq; 52 struct mmc_async_req areq;
56 enum mmc_drv_op drv_op; 53 enum mmc_drv_op drv_op;
57 int drv_op_result; 54 int drv_op_result;
@@ -81,11 +78,8 @@ extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
81extern void mmc_cleanup_queue(struct mmc_queue *); 78extern void mmc_cleanup_queue(struct mmc_queue *);
82extern void mmc_queue_suspend(struct mmc_queue *); 79extern void mmc_queue_suspend(struct mmc_queue *);
83extern void mmc_queue_resume(struct mmc_queue *); 80extern void mmc_queue_resume(struct mmc_queue *);
84
85extern unsigned int mmc_queue_map_sg(struct mmc_queue *, 81extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
86 struct mmc_queue_req *); 82 struct mmc_queue_req *);
87extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
88extern void mmc_queue_bounce_post(struct mmc_queue_req *);
89 83
90extern int mmc_access_rpmb(struct mmc_queue *); 84extern int mmc_access_rpmb(struct mmc_queue *);
91 85
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 02179ed2a40d..8c15637178ff 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -5,7 +5,7 @@
5comment "MMC/SD/SDIO Host Controller Drivers" 5comment "MMC/SD/SDIO Host Controller Drivers"
6 6
7config MMC_DEBUG 7config MMC_DEBUG
8 bool "MMC host drivers debugginG" 8 bool "MMC host drivers debugging"
9 depends on MMC != n 9 depends on MMC != n
10 help 10 help
11 This is an option for use by developers; most people should 11 This is an option for use by developers; most people should
diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c
index b9cc95998799..eee08d81b242 100644
--- a/drivers/mmc/host/cavium-thunderx.c
+++ b/drivers/mmc/host/cavium-thunderx.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright (C) 2016 Cavium Inc. 8 * Copyright (C) 2016 Cavium Inc.
9 */ 9 */
10#include <linux/device.h>
10#include <linux/dma-mapping.h> 11#include <linux/dma-mapping.h>
11#include <linux/interrupt.h> 12#include <linux/interrupt.h>
12#include <linux/mmc/mmc.h> 13#include <linux/mmc/mmc.h>
@@ -149,8 +150,11 @@ error:
149 for (i = 0; i < CAVIUM_MAX_MMC; i++) { 150 for (i = 0; i < CAVIUM_MAX_MMC; i++) {
150 if (host->slot[i]) 151 if (host->slot[i])
151 cvm_mmc_of_slot_remove(host->slot[i]); 152 cvm_mmc_of_slot_remove(host->slot[i]);
152 if (host->slot_pdev[i]) 153 if (host->slot_pdev[i]) {
154 get_device(&host->slot_pdev[i]->dev);
153 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL); 155 of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
156 put_device(&host->slot_pdev[i]->dev);
157 }
154 } 158 }
155 clk_disable_unprepare(host->clk); 159 clk_disable_unprepare(host->clk);
156 return ret; 160 return ret;
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index 27fb625cbcf3..fbd29f00fca0 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -1038,7 +1038,7 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
1038 */ 1038 */
1039 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | 1039 mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
1040 MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD | 1040 MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_POWER_OFF_CARD |
1041 MMC_CAP_3_3V_DDR | MMC_CAP_NO_BOUNCE_BUFF; 1041 MMC_CAP_3_3V_DDR;
1042 1042
1043 if (host->use_sg) 1043 if (host->use_sg)
1044 mmc->max_segs = 16; 1044 mmc->max_segs = 16;
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index c885c2d4b904..85745ef179e2 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -531,8 +531,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
531 div->shift = __ffs(CLK_DIV_MASK); 531 div->shift = __ffs(CLK_DIV_MASK);
532 div->width = __builtin_popcountl(CLK_DIV_MASK); 532 div->width = __builtin_popcountl(CLK_DIV_MASK);
533 div->hw.init = &init; 533 div->hw.init = &init;
534 div->flags = (CLK_DIVIDER_ONE_BASED | 534 div->flags = CLK_DIVIDER_ONE_BASED;
535 CLK_DIVIDER_ROUND_CLOSEST);
536 535
537 clk = devm_clk_register(host->dev, &div->hw); 536 clk = devm_clk_register(host->dev, &div->hw);
538 if (WARN_ON(IS_ERR(clk))) 537 if (WARN_ON(IS_ERR(clk)))
@@ -717,6 +716,22 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
717static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode) 716static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
718{ 717{
719 struct meson_host *host = mmc_priv(mmc); 718 struct meson_host *host = mmc_priv(mmc);
719 int ret;
720
721 /*
722 * If this is the initial tuning, try to get a sane Rx starting
723 * phase before doing the actual tuning.
724 */
725 if (!mmc->doing_retune) {
726 ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
727
728 if (ret)
729 return ret;
730 }
731
732 ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
733 if (ret)
734 return ret;
720 735
721 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk); 736 return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
722} 737}
@@ -746,6 +761,11 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
746 case MMC_POWER_UP: 761 case MMC_POWER_UP:
747 if (!IS_ERR(mmc->supply.vmmc)) 762 if (!IS_ERR(mmc->supply.vmmc))
748 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); 763 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
764
765 /* Reset phases */
766 clk_set_phase(host->rx_clk, 0);
767 clk_set_phase(host->tx_clk, 270);
768
749 break; 769 break;
750 770
751 case MMC_POWER_ON: 771 case MMC_POWER_ON:
@@ -759,8 +779,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
759 host->vqmmc_enabled = true; 779 host->vqmmc_enabled = true;
760 } 780 }
761 781
762 /* Reset rx phase */
763 clk_set_phase(host->rx_clk, 0);
764 break; 782 break;
765 } 783 }
766 784
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 59ab194cb009..c763b404510f 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -702,11 +702,7 @@ static int pxamci_probe(struct platform_device *pdev)
702 702
703 pxamci_init_ocr(host); 703 pxamci_init_ocr(host);
704 704
705 /* 705 mmc->caps = 0;
706 * This architecture used to disable bounce buffers through its
707 * defconfig, now it is done at runtime as a host property.
708 */
709 mmc->caps = MMC_CAP_NO_BOUNCE_BUFF;
710 host->cmdat = 0; 706 host->cmdat = 0;
711 if (!cpu_is_pxa25x()) { 707 if (!cpu_is_pxa25x()) {
712 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; 708 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index bbaddf18a1b3..d0ccc6729fd2 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -392,6 +392,7 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
392 392
393enum { 393enum {
394 INTEL_DSM_FNS = 0, 394 INTEL_DSM_FNS = 0,
395 INTEL_DSM_V18_SWITCH = 3,
395 INTEL_DSM_DRV_STRENGTH = 9, 396 INTEL_DSM_DRV_STRENGTH = 9,
396 INTEL_DSM_D3_RETUNE = 10, 397 INTEL_DSM_D3_RETUNE = 10,
397}; 398};
@@ -557,6 +558,19 @@ static void intel_hs400_enhanced_strobe(struct mmc_host *mmc,
557 sdhci_writel(host, val, INTEL_HS400_ES_REG); 558 sdhci_writel(host, val, INTEL_HS400_ES_REG);
558} 559}
559 560
561static void sdhci_intel_voltage_switch(struct sdhci_host *host)
562{
563 struct sdhci_pci_slot *slot = sdhci_priv(host);
564 struct intel_host *intel_host = sdhci_pci_priv(slot);
565 struct device *dev = &slot->chip->pdev->dev;
566 u32 result = 0;
567 int err;
568
569 err = intel_dsm(intel_host, dev, INTEL_DSM_V18_SWITCH, &result);
570 pr_debug("%s: %s DSM error %d result %u\n",
571 mmc_hostname(host->mmc), __func__, err, result);
572}
573
560static const struct sdhci_ops sdhci_intel_byt_ops = { 574static const struct sdhci_ops sdhci_intel_byt_ops = {
561 .set_clock = sdhci_set_clock, 575 .set_clock = sdhci_set_clock,
562 .set_power = sdhci_intel_set_power, 576 .set_power = sdhci_intel_set_power,
@@ -565,6 +579,7 @@ static const struct sdhci_ops sdhci_intel_byt_ops = {
565 .reset = sdhci_reset, 579 .reset = sdhci_reset,
566 .set_uhs_signaling = sdhci_set_uhs_signaling, 580 .set_uhs_signaling = sdhci_set_uhs_signaling,
567 .hw_reset = sdhci_pci_hw_reset, 581 .hw_reset = sdhci_pci_hw_reset,
582 .voltage_switch = sdhci_intel_voltage_switch,
568}; 583};
569 584
570static void byt_read_dsm(struct sdhci_pci_slot *slot) 585static void byt_read_dsm(struct sdhci_pci_slot *slot)
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 2eec2e652c53..0842bbc2d7ad 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -466,6 +466,7 @@ static int xenon_probe(struct platform_device *pdev)
466{ 466{
467 struct sdhci_pltfm_host *pltfm_host; 467 struct sdhci_pltfm_host *pltfm_host;
468 struct sdhci_host *host; 468 struct sdhci_host *host;
469 struct xenon_priv *priv;
469 int err; 470 int err;
470 471
471 host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata, 472 host = sdhci_pltfm_init(pdev, &sdhci_xenon_pdata,
@@ -474,6 +475,7 @@ static int xenon_probe(struct platform_device *pdev)
474 return PTR_ERR(host); 475 return PTR_ERR(host);
475 476
476 pltfm_host = sdhci_priv(host); 477 pltfm_host = sdhci_priv(host);
478 priv = sdhci_pltfm_priv(pltfm_host);
477 479
478 /* 480 /*
479 * Link Xenon specific mmc_host_ops function, 481 * Link Xenon specific mmc_host_ops function,
@@ -491,9 +493,20 @@ static int xenon_probe(struct platform_device *pdev)
491 if (err) 493 if (err)
492 goto free_pltfm; 494 goto free_pltfm;
493 495
496 priv->axi_clk = devm_clk_get(&pdev->dev, "axi");
497 if (IS_ERR(priv->axi_clk)) {
498 err = PTR_ERR(priv->axi_clk);
499 if (err == -EPROBE_DEFER)
500 goto err_clk;
501 } else {
502 err = clk_prepare_enable(priv->axi_clk);
503 if (err)
504 goto err_clk;
505 }
506
494 err = mmc_of_parse(host->mmc); 507 err = mmc_of_parse(host->mmc);
495 if (err) 508 if (err)
496 goto err_clk; 509 goto err_clk_axi;
497 510
498 sdhci_get_of_property(pdev); 511 sdhci_get_of_property(pdev);
499 512
@@ -502,11 +515,11 @@ static int xenon_probe(struct platform_device *pdev)
502 /* Xenon specific dt parse */ 515 /* Xenon specific dt parse */
503 err = xenon_probe_dt(pdev); 516 err = xenon_probe_dt(pdev);
504 if (err) 517 if (err)
505 goto err_clk; 518 goto err_clk_axi;
506 519
507 err = xenon_sdhc_prepare(host); 520 err = xenon_sdhc_prepare(host);
508 if (err) 521 if (err)
509 goto err_clk; 522 goto err_clk_axi;
510 523
511 pm_runtime_get_noresume(&pdev->dev); 524 pm_runtime_get_noresume(&pdev->dev);
512 pm_runtime_set_active(&pdev->dev); 525 pm_runtime_set_active(&pdev->dev);
@@ -527,6 +540,8 @@ remove_sdhc:
527 pm_runtime_disable(&pdev->dev); 540 pm_runtime_disable(&pdev->dev);
528 pm_runtime_put_noidle(&pdev->dev); 541 pm_runtime_put_noidle(&pdev->dev);
529 xenon_sdhc_unprepare(host); 542 xenon_sdhc_unprepare(host);
543err_clk_axi:
544 clk_disable_unprepare(priv->axi_clk);
530err_clk: 545err_clk:
531 clk_disable_unprepare(pltfm_host->clk); 546 clk_disable_unprepare(pltfm_host->clk);
532free_pltfm: 547free_pltfm:
@@ -538,6 +553,7 @@ static int xenon_remove(struct platform_device *pdev)
538{ 553{
539 struct sdhci_host *host = platform_get_drvdata(pdev); 554 struct sdhci_host *host = platform_get_drvdata(pdev);
540 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 555 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
556 struct xenon_priv *priv = sdhci_pltfm_priv(pltfm_host);
541 557
542 pm_runtime_get_sync(&pdev->dev); 558 pm_runtime_get_sync(&pdev->dev);
543 pm_runtime_disable(&pdev->dev); 559 pm_runtime_disable(&pdev->dev);
@@ -546,7 +562,7 @@ static int xenon_remove(struct platform_device *pdev)
546 sdhci_remove_host(host, 0); 562 sdhci_remove_host(host, 0);
547 563
548 xenon_sdhc_unprepare(host); 564 xenon_sdhc_unprepare(host);
549 565 clk_disable_unprepare(priv->axi_clk);
550 clk_disable_unprepare(pltfm_host->clk); 566 clk_disable_unprepare(pltfm_host->clk);
551 567
552 sdhci_pltfm_free(pdev); 568 sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-xenon.h b/drivers/mmc/host/sdhci-xenon.h
index 2bc0510c0769..9994995c7c56 100644
--- a/drivers/mmc/host/sdhci-xenon.h
+++ b/drivers/mmc/host/sdhci-xenon.h
@@ -83,6 +83,7 @@ struct xenon_priv {
83 unsigned char bus_width; 83 unsigned char bus_width;
84 unsigned char timing; 84 unsigned char timing;
85 unsigned int clock; 85 unsigned int clock;
86 struct clk *axi_clk;
86 87
87 int phy_type; 88 int phy_type;
88 /* 89 /*
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 12cf8288d663..a7293e186e03 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -129,50 +129,6 @@ static int tmio_mmc_next_sg(struct tmio_mmc_host *host)
129 129
130#define CMDREQ_TIMEOUT 5000 130#define CMDREQ_TIMEOUT 5000
131 131
132#ifdef CONFIG_MMC_DEBUG
133
134#define STATUS_TO_TEXT(a, status, i) \
135 do { \
136 if ((status) & TMIO_STAT_##a) { \
137 if ((i)++) \
138 printk(KERN_DEBUG " | "); \
139 printk(KERN_DEBUG #a); \
140 } \
141 } while (0)
142
143static void pr_debug_status(u32 status)
144{
145 int i = 0;
146
147 pr_debug("status: %08x = ", status);
148 STATUS_TO_TEXT(CARD_REMOVE, status, i);
149 STATUS_TO_TEXT(CARD_INSERT, status, i);
150 STATUS_TO_TEXT(SIGSTATE, status, i);
151 STATUS_TO_TEXT(WRPROTECT, status, i);
152 STATUS_TO_TEXT(CARD_REMOVE_A, status, i);
153 STATUS_TO_TEXT(CARD_INSERT_A, status, i);
154 STATUS_TO_TEXT(SIGSTATE_A, status, i);
155 STATUS_TO_TEXT(CMD_IDX_ERR, status, i);
156 STATUS_TO_TEXT(STOPBIT_ERR, status, i);
157 STATUS_TO_TEXT(ILL_FUNC, status, i);
158 STATUS_TO_TEXT(CMD_BUSY, status, i);
159 STATUS_TO_TEXT(CMDRESPEND, status, i);
160 STATUS_TO_TEXT(DATAEND, status, i);
161 STATUS_TO_TEXT(CRCFAIL, status, i);
162 STATUS_TO_TEXT(DATATIMEOUT, status, i);
163 STATUS_TO_TEXT(CMDTIMEOUT, status, i);
164 STATUS_TO_TEXT(RXOVERFLOW, status, i);
165 STATUS_TO_TEXT(TXUNDERRUN, status, i);
166 STATUS_TO_TEXT(RXRDY, status, i);
167 STATUS_TO_TEXT(TXRQ, status, i);
168 STATUS_TO_TEXT(ILL_ACCESS, status, i);
169 printk("\n");
170}
171
172#else
173#define pr_debug_status(s) do { } while (0)
174#endif
175
176static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) 132static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
177{ 133{
178 struct tmio_mmc_host *host = mmc_priv(mmc); 134 struct tmio_mmc_host *host = mmc_priv(mmc);
@@ -762,9 +718,6 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
762 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS); 718 status = sd_ctrl_read16_and_16_as_32(host, CTL_STATUS);
763 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; 719 ireg = status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask;
764 720
765 pr_debug_status(status);
766 pr_debug_status(ireg);
767
768 /* Clear the status except the interrupt status */ 721 /* Clear the status except the interrupt status */
769 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ); 722 sd_ctrl_write32_as_16_and_16(host, CTL_STATUS, TMIO_MASK_IRQ);
770 723
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 5736b0c90b33..a308e707392d 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -581,6 +581,14 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
581 slave->mtd.erasesize = parent->erasesize; 581 slave->mtd.erasesize = parent->erasesize;
582 } 582 }
583 583
584 /*
585 * Slave erasesize might differ from the master one if the master
586 * exposes several regions with different erasesize. Adjust
587 * wr_alignment accordingly.
588 */
589 if (!(slave->mtd.flags & MTD_NO_ERASE))
590 wr_alignment = slave->mtd.erasesize;
591
584 tmp = slave->offset; 592 tmp = slave->offset;
585 remainder = do_div(tmp, wr_alignment); 593 remainder = do_div(tmp, wr_alignment);
586 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { 594 if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) {
diff --git a/drivers/mtd/nand/atmel/pmecc.c b/drivers/mtd/nand/atmel/pmecc.c
index 146af8218314..8268636675ef 100644
--- a/drivers/mtd/nand/atmel/pmecc.c
+++ b/drivers/mtd/nand/atmel/pmecc.c
@@ -363,7 +363,7 @@ atmel_pmecc_create_user(struct atmel_pmecc *pmecc,
363 size += (req->ecc.strength + 1) * sizeof(u16); 363 size += (req->ecc.strength + 1) * sizeof(u16);
364 /* Reserve space for mu, dmu and delta. */ 364 /* Reserve space for mu, dmu and delta. */
365 size = ALIGN(size, sizeof(s32)); 365 size = ALIGN(size, sizeof(s32));
366 size += (req->ecc.strength + 1) * sizeof(s32); 366 size += (req->ecc.strength + 1) * sizeof(s32) * 3;
367 367
368 user = kzalloc(size, GFP_KERNEL); 368 user = kzalloc(size, GFP_KERNEL);
369 if (!user) 369 if (!user)
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index c3bb358ef01e..5796468db653 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -707,7 +707,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
707 } 707 }
708 res = clk_prepare_enable(host->clk); 708 res = clk_prepare_enable(host->clk);
709 if (res) 709 if (res)
710 goto err_exit1; 710 goto err_put_clk;
711 711
712 nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl; 712 nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
713 nand_chip->dev_ready = lpc32xx_nand_device_ready; 713 nand_chip->dev_ready = lpc32xx_nand_device_ready;
@@ -814,6 +814,7 @@ err_exit3:
814 dma_release_channel(host->dma_chan); 814 dma_release_channel(host->dma_chan);
815err_exit2: 815err_exit2:
816 clk_disable_unprepare(host->clk); 816 clk_disable_unprepare(host->clk);
817err_put_clk:
817 clk_put(host->clk); 818 clk_put(host->clk);
818err_exit1: 819err_exit1:
819 lpc32xx_wp_enable(host); 820 lpc32xx_wp_enable(host);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index bcc8cef1c615..12edaae17d81 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2668,7 +2668,7 @@ static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
2668static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, 2668static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2669 struct mtd_oob_ops *ops) 2669 struct mtd_oob_ops *ops)
2670{ 2670{
2671 int chipnr, realpage, page, blockmask, column; 2671 int chipnr, realpage, page, column;
2672 struct nand_chip *chip = mtd_to_nand(mtd); 2672 struct nand_chip *chip = mtd_to_nand(mtd);
2673 uint32_t writelen = ops->len; 2673 uint32_t writelen = ops->len;
2674 2674
@@ -2704,7 +2704,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2704 2704
2705 realpage = (int)(to >> chip->page_shift); 2705 realpage = (int)(to >> chip->page_shift);
2706 page = realpage & chip->pagemask; 2706 page = realpage & chip->pagemask;
2707 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
2708 2707
2709 /* Invalidate the page cache, when we write to the cached page */ 2708 /* Invalidate the page cache, when we write to the cached page */
2710 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) && 2709 if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index cf1d4a15e10a..19c000722cbc 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1784,7 +1784,7 @@ spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
1784 * @nor: pointer to a 'struct spi_nor' 1784 * @nor: pointer to a 'struct spi_nor'
1785 * @addr: offset in the SFDP area to start reading data from 1785 * @addr: offset in the SFDP area to start reading data from
1786 * @len: number of bytes to read 1786 * @len: number of bytes to read
1787 * @buf: buffer where the SFDP data are copied into 1787 * @buf: buffer where the SFDP data are copied into (dma-safe memory)
1788 * 1788 *
1789 * Whatever the actual numbers of bytes for address and dummy cycles are 1789 * Whatever the actual numbers of bytes for address and dummy cycles are
1790 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always 1790 * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
@@ -1829,6 +1829,36 @@ read_err:
1829 return ret; 1829 return ret;
1830} 1830}
1831 1831
1832/**
1833 * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
1834 * @nor: pointer to a 'struct spi_nor'
1835 * @addr: offset in the SFDP area to start reading data from
1836 * @len: number of bytes to read
1837 * @buf: buffer where the SFDP data are copied into
1838 *
1839 * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
1840 * guaranteed to be dma-safe.
1841 *
1842 * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
1843 * otherwise.
1844 */
1845static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
1846 size_t len, void *buf)
1847{
1848 void *dma_safe_buf;
1849 int ret;
1850
1851 dma_safe_buf = kmalloc(len, GFP_KERNEL);
1852 if (!dma_safe_buf)
1853 return -ENOMEM;
1854
1855 ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
1856 memcpy(buf, dma_safe_buf, len);
1857 kfree(dma_safe_buf);
1858
1859 return ret;
1860}
1861
1832struct sfdp_parameter_header { 1862struct sfdp_parameter_header {
1833 u8 id_lsb; 1863 u8 id_lsb;
1834 u8 minor; 1864 u8 minor;
@@ -2101,7 +2131,7 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
2101 bfpt_header->length * sizeof(u32)); 2131 bfpt_header->length * sizeof(u32));
2102 addr = SFDP_PARAM_HEADER_PTP(bfpt_header); 2132 addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
2103 memset(&bfpt, 0, sizeof(bfpt)); 2133 memset(&bfpt, 0, sizeof(bfpt));
2104 err = spi_nor_read_sfdp(nor, addr, len, &bfpt); 2134 err = spi_nor_read_sfdp_dma_unsafe(nor, addr, len, &bfpt);
2105 if (err < 0) 2135 if (err < 0)
2106 return err; 2136 return err;
2107 2137
@@ -2127,6 +2157,15 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor,
2127 params->size = bfpt.dwords[BFPT_DWORD(2)]; 2157 params->size = bfpt.dwords[BFPT_DWORD(2)];
2128 if (params->size & BIT(31)) { 2158 if (params->size & BIT(31)) {
2129 params->size &= ~BIT(31); 2159 params->size &= ~BIT(31);
2160
2161 /*
2162 * Prevent overflows on params->size. Anyway, a NOR of 2^64
2163 * bits is unlikely to exist so this error probably means
2164 * the BFPT we are reading is corrupted/wrong.
2165 */
2166 if (params->size > 63)
2167 return -EINVAL;
2168
2130 params->size = 1ULL << params->size; 2169 params->size = 1ULL << params->size;
2131 } else { 2170 } else {
2132 params->size++; 2171 params->size++;
@@ -2243,7 +2282,7 @@ static int spi_nor_parse_sfdp(struct spi_nor *nor,
2243 int i, err; 2282 int i, err;
2244 2283
2245 /* Get the SFDP header. */ 2284 /* Get the SFDP header. */
2246 err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header); 2285 err = spi_nor_read_sfdp_dma_unsafe(nor, 0, sizeof(header), &header);
2247 if (err < 0) 2286 if (err < 0)
2248 return err; 2287 return err;
2249 2288
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index c6678aa9b4ef..d74c7335c512 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -1100,6 +1100,10 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
1100 }; 1100 };
1101 int i, err; 1101 int i, err;
1102 1102
1103 /* DSA and CPU ports have to be members of multiple vlans */
1104 if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
1105 return 0;
1106
1103 if (!vid_begin) 1107 if (!vid_begin)
1104 return -EOPNOTSUPP; 1108 return -EOPNOTSUPP;
1105 1109
@@ -3947,7 +3951,9 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
3947 if (chip->irq > 0) { 3951 if (chip->irq > 0) {
3948 if (chip->info->g2_irqs > 0) 3952 if (chip->info->g2_irqs > 0)
3949 mv88e6xxx_g2_irq_free(chip); 3953 mv88e6xxx_g2_irq_free(chip);
3954 mutex_lock(&chip->reg_lock);
3950 mv88e6xxx_g1_irq_free(chip); 3955 mv88e6xxx_g1_irq_free(chip);
3956 mutex_unlock(&chip->reg_lock);
3951 } 3957 }
3952} 3958}
3953 3959
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 214986436ece..0fdaaa643073 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -51,6 +51,10 @@
51 51
52#define AQ_CFG_SKB_FRAGS_MAX 32U 52#define AQ_CFG_SKB_FRAGS_MAX 32U
53 53
54/* Number of descriptors available in one ring to resume this ring queue
55 */
56#define AQ_CFG_RESTART_DESC_THRES (AQ_CFG_SKB_FRAGS_MAX * 2)
57
54#define AQ_CFG_NAPI_WEIGHT 64U 58#define AQ_CFG_NAPI_WEIGHT 64U
55 59
56#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U 60#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 6ac9e2602d6d..0a5bb4114eb4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -119,6 +119,35 @@ int aq_nic_cfg_start(struct aq_nic_s *self)
119 return 0; 119 return 0;
120} 120}
121 121
122static int aq_nic_update_link_status(struct aq_nic_s *self)
123{
124 int err = self->aq_hw_ops.hw_get_link_status(self->aq_hw);
125
126 if (err)
127 return err;
128
129 if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps)
130 pr_info("%s: link change old %d new %d\n",
131 AQ_CFG_DRV_NAME, self->link_status.mbps,
132 self->aq_hw->aq_link_status.mbps);
133
134 self->link_status = self->aq_hw->aq_link_status;
135 if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) {
136 aq_utils_obj_set(&self->header.flags,
137 AQ_NIC_FLAG_STARTED);
138 aq_utils_obj_clear(&self->header.flags,
139 AQ_NIC_LINK_DOWN);
140 netif_carrier_on(self->ndev);
141 netif_tx_wake_all_queues(self->ndev);
142 }
143 if (netif_carrier_ok(self->ndev) && !self->link_status.mbps) {
144 netif_carrier_off(self->ndev);
145 netif_tx_disable(self->ndev);
146 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
147 }
148 return 0;
149}
150
122static void aq_nic_service_timer_cb(unsigned long param) 151static void aq_nic_service_timer_cb(unsigned long param)
123{ 152{
124 struct aq_nic_s *self = (struct aq_nic_s *)param; 153 struct aq_nic_s *self = (struct aq_nic_s *)param;
@@ -131,26 +160,13 @@ static void aq_nic_service_timer_cb(unsigned long param)
131 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) 160 if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
132 goto err_exit; 161 goto err_exit;
133 162
134 err = self->aq_hw_ops.hw_get_link_status(self->aq_hw); 163 err = aq_nic_update_link_status(self);
135 if (err < 0) 164 if (err)
136 goto err_exit; 165 goto err_exit;
137 166
138 self->link_status = self->aq_hw->aq_link_status;
139
140 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, 167 self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
141 self->aq_nic_cfg.is_interrupt_moderation); 168 self->aq_nic_cfg.is_interrupt_moderation);
142 169
143 if (self->link_status.mbps) {
144 aq_utils_obj_set(&self->header.flags,
145 AQ_NIC_FLAG_STARTED);
146 aq_utils_obj_clear(&self->header.flags,
147 AQ_NIC_LINK_DOWN);
148 netif_carrier_on(self->ndev);
149 } else {
150 netif_carrier_off(self->ndev);
151 aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
152 }
153
154 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); 170 memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
155 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); 171 memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
156 for (i = AQ_DIMOF(self->aq_vec); i--;) { 172 for (i = AQ_DIMOF(self->aq_vec); i--;) {
@@ -214,7 +230,6 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
214 SET_NETDEV_DEV(ndev, dev); 230 SET_NETDEV_DEV(ndev, dev);
215 231
216 ndev->if_port = port; 232 ndev->if_port = port;
217 ndev->min_mtu = ETH_MIN_MTU;
218 self->ndev = ndev; 233 self->ndev = ndev;
219 234
220 self->aq_pci_func = aq_pci_func; 235 self->aq_pci_func = aq_pci_func;
@@ -241,7 +256,6 @@ err_exit:
241int aq_nic_ndev_register(struct aq_nic_s *self) 256int aq_nic_ndev_register(struct aq_nic_s *self)
242{ 257{
243 int err = 0; 258 int err = 0;
244 unsigned int i = 0U;
245 259
246 if (!self->ndev) { 260 if (!self->ndev) {
247 err = -EINVAL; 261 err = -EINVAL;
@@ -263,8 +277,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
263 277
264 netif_carrier_off(self->ndev); 278 netif_carrier_off(self->ndev);
265 279
266 for (i = AQ_CFG_VECS_MAX; i--;) 280 netif_tx_disable(self->ndev);
267 aq_nic_ndev_queue_stop(self, i);
268 281
269 err = register_netdev(self->ndev); 282 err = register_netdev(self->ndev);
270 if (err < 0) 283 if (err < 0)
@@ -283,6 +296,7 @@ int aq_nic_ndev_init(struct aq_nic_s *self)
283 self->ndev->features = aq_hw_caps->hw_features; 296 self->ndev->features = aq_hw_caps->hw_features;
284 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags; 297 self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
285 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN; 298 self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
299 self->ndev->max_mtu = self->aq_hw_caps.mtu - ETH_FCS_LEN - ETH_HLEN;
286 300
287 return 0; 301 return 0;
288} 302}
@@ -318,12 +332,8 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
318 err = -EINVAL; 332 err = -EINVAL;
319 goto err_exit; 333 goto err_exit;
320 } 334 }
321 if (netif_running(ndev)) { 335 if (netif_running(ndev))
322 unsigned int i; 336 netif_tx_disable(ndev);
323
324 for (i = AQ_CFG_VECS_MAX; i--;)
325 netif_stop_subqueue(ndev, i);
326 }
327 337
328 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; 338 for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
329 self->aq_vecs++) { 339 self->aq_vecs++) {
@@ -383,16 +393,6 @@ err_exit:
383 return err; 393 return err;
384} 394}
385 395
386void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
387{
388 netif_start_subqueue(self->ndev, idx);
389}
390
391void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
392{
393 netif_stop_subqueue(self->ndev, idx);
394}
395
396int aq_nic_start(struct aq_nic_s *self) 396int aq_nic_start(struct aq_nic_s *self)
397{ 397{
398 struct aq_vec_s *aq_vec = NULL; 398 struct aq_vec_s *aq_vec = NULL;
@@ -451,10 +451,6 @@ int aq_nic_start(struct aq_nic_s *self)
451 goto err_exit; 451 goto err_exit;
452 } 452 }
453 453
454 for (i = 0U, aq_vec = self->aq_vec[0];
455 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
456 aq_nic_ndev_queue_start(self, i);
457
458 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs); 454 err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
459 if (err < 0) 455 if (err < 0)
460 goto err_exit; 456 goto err_exit;
@@ -463,6 +459,8 @@ int aq_nic_start(struct aq_nic_s *self)
463 if (err < 0) 459 if (err < 0)
464 goto err_exit; 460 goto err_exit;
465 461
462 netif_tx_start_all_queues(self->ndev);
463
466err_exit: 464err_exit:
467 return err; 465 return err;
468} 466}
@@ -475,6 +473,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
475 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 473 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
476 unsigned int frag_count = 0U; 474 unsigned int frag_count = 0U;
477 unsigned int dx = ring->sw_tail; 475 unsigned int dx = ring->sw_tail;
476 struct aq_ring_buff_s *first = NULL;
478 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx]; 477 struct aq_ring_buff_s *dx_buff = &ring->buff_ring[dx];
479 478
480 if (unlikely(skb_is_gso(skb))) { 479 if (unlikely(skb_is_gso(skb))) {
@@ -485,6 +484,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
485 dx_buff->len_l4 = tcp_hdrlen(skb); 484 dx_buff->len_l4 = tcp_hdrlen(skb);
486 dx_buff->mss = skb_shinfo(skb)->gso_size; 485 dx_buff->mss = skb_shinfo(skb)->gso_size;
487 dx_buff->is_txc = 1U; 486 dx_buff->is_txc = 1U;
487 dx_buff->eop_index = 0xffffU;
488 488
489 dx_buff->is_ipv6 = 489 dx_buff->is_ipv6 =
490 (ip_hdr(skb)->version == 6) ? 1U : 0U; 490 (ip_hdr(skb)->version == 6) ? 1U : 0U;
@@ -504,6 +504,7 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
504 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) 504 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
505 goto exit; 505 goto exit;
506 506
507 first = dx_buff;
507 dx_buff->len_pkt = skb->len; 508 dx_buff->len_pkt = skb->len;
508 dx_buff->is_sop = 1U; 509 dx_buff->is_sop = 1U;
509 dx_buff->is_mapped = 1U; 510 dx_buff->is_mapped = 1U;
@@ -532,40 +533,46 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
532 533
533 for (; nr_frags--; ++frag_count) { 534 for (; nr_frags--; ++frag_count) {
534 unsigned int frag_len = 0U; 535 unsigned int frag_len = 0U;
536 unsigned int buff_offset = 0U;
537 unsigned int buff_size = 0U;
535 dma_addr_t frag_pa; 538 dma_addr_t frag_pa;
536 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count]; 539 skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
537 540
538 frag_len = skb_frag_size(frag); 541 frag_len = skb_frag_size(frag);
539 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
540 frag_len, DMA_TO_DEVICE);
541 542
542 if (unlikely(dma_mapping_error(aq_nic_get_dev(self), frag_pa))) 543 while (frag_len) {
543 goto mapping_error; 544 if (frag_len > AQ_CFG_TX_FRAME_MAX)
545 buff_size = AQ_CFG_TX_FRAME_MAX;
546 else
547 buff_size = frag_len;
548
549 frag_pa = skb_frag_dma_map(aq_nic_get_dev(self),
550 frag,
551 buff_offset,
552 buff_size,
553 DMA_TO_DEVICE);
554
555 if (unlikely(dma_mapping_error(aq_nic_get_dev(self),
556 frag_pa)))
557 goto mapping_error;
544 558
545 while (frag_len > AQ_CFG_TX_FRAME_MAX) {
546 dx = aq_ring_next_dx(ring, dx); 559 dx = aq_ring_next_dx(ring, dx);
547 dx_buff = &ring->buff_ring[dx]; 560 dx_buff = &ring->buff_ring[dx];
548 561
549 dx_buff->flags = 0U; 562 dx_buff->flags = 0U;
550 dx_buff->len = AQ_CFG_TX_FRAME_MAX; 563 dx_buff->len = buff_size;
551 dx_buff->pa = frag_pa; 564 dx_buff->pa = frag_pa;
552 dx_buff->is_mapped = 1U; 565 dx_buff->is_mapped = 1U;
566 dx_buff->eop_index = 0xffffU;
567
568 frag_len -= buff_size;
569 buff_offset += buff_size;
553 570
554 frag_len -= AQ_CFG_TX_FRAME_MAX;
555 frag_pa += AQ_CFG_TX_FRAME_MAX;
556 ++ret; 571 ++ret;
557 } 572 }
558
559 dx = aq_ring_next_dx(ring, dx);
560 dx_buff = &ring->buff_ring[dx];
561
562 dx_buff->flags = 0U;
563 dx_buff->len = frag_len;
564 dx_buff->pa = frag_pa;
565 dx_buff->is_mapped = 1U;
566 ++ret;
567 } 573 }
568 574
575 first->eop_index = dx;
569 dx_buff->is_eop = 1U; 576 dx_buff->is_eop = 1U;
570 dx_buff->skb = skb; 577 dx_buff->skb = skb;
571 goto exit; 578 goto exit;
@@ -602,7 +609,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
602 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs; 609 unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
603 unsigned int tc = 0U; 610 unsigned int tc = 0U;
604 int err = NETDEV_TX_OK; 611 int err = NETDEV_TX_OK;
605 bool is_nic_in_bad_state;
606 612
607 frags = skb_shinfo(skb)->nr_frags + 1; 613 frags = skb_shinfo(skb)->nr_frags + 1;
608 614
@@ -613,13 +619,10 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
613 goto err_exit; 619 goto err_exit;
614 } 620 }
615 621
616 is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags, 622 aq_ring_update_queue_state(ring);
617 AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
618 (aq_ring_avail_dx(ring) <
619 AQ_CFG_SKB_FRAGS_MAX);
620 623
621 if (is_nic_in_bad_state) { 624 /* Above status update may stop the queue. Check this. */
622 aq_nic_ndev_queue_stop(self, ring->idx); 625 if (__netif_subqueue_stopped(self->ndev, ring->idx)) {
623 err = NETDEV_TX_BUSY; 626 err = NETDEV_TX_BUSY;
624 goto err_exit; 627 goto err_exit;
625 } 628 }
@@ -631,9 +634,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
631 ring, 634 ring,
632 frags); 635 frags);
633 if (err >= 0) { 636 if (err >= 0) {
634 if (aq_ring_avail_dx(ring) < AQ_CFG_SKB_FRAGS_MAX + 1)
635 aq_nic_ndev_queue_stop(self, ring->idx);
636
637 ++ring->stats.tx.packets; 637 ++ring->stats.tx.packets;
638 ring->stats.tx.bytes += skb->len; 638 ring->stats.tx.bytes += skb->len;
639 } 639 }
@@ -693,16 +693,9 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
693 693
694int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 694int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
695{ 695{
696 int err = 0;
697
698 if (new_mtu > self->aq_hw_caps.mtu) {
699 err = -EINVAL;
700 goto err_exit;
701 }
702 self->aq_nic_cfg.mtu = new_mtu; 696 self->aq_nic_cfg.mtu = new_mtu;
703 697
704err_exit: 698 return 0;
705 return err;
706} 699}
707 700
708int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev) 701int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
@@ -905,9 +898,7 @@ int aq_nic_stop(struct aq_nic_s *self)
905 struct aq_vec_s *aq_vec = NULL; 898 struct aq_vec_s *aq_vec = NULL;
906 unsigned int i = 0U; 899 unsigned int i = 0U;
907 900
908 for (i = 0U, aq_vec = self->aq_vec[0]; 901 netif_tx_disable(self->ndev);
909 self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
910 aq_nic_ndev_queue_stop(self, i);
911 902
912 del_timer_sync(&self->service_timer); 903 del_timer_sync(&self->service_timer);
913 904
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 7fc2a5ecb2b7..0ddd556ff901 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -83,8 +83,6 @@ struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
83int aq_nic_init(struct aq_nic_s *self); 83int aq_nic_init(struct aq_nic_s *self);
84int aq_nic_cfg_start(struct aq_nic_s *self); 84int aq_nic_cfg_start(struct aq_nic_s *self);
85int aq_nic_ndev_register(struct aq_nic_s *self); 85int aq_nic_ndev_register(struct aq_nic_s *self);
86void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
87void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
88void aq_nic_ndev_free(struct aq_nic_s *self); 86void aq_nic_ndev_free(struct aq_nic_s *self);
89int aq_nic_start(struct aq_nic_s *self); 87int aq_nic_start(struct aq_nic_s *self);
90int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb); 88int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 4eee1996a825..0654e0c76bc2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -104,6 +104,38 @@ int aq_ring_init(struct aq_ring_s *self)
104 return 0; 104 return 0;
105} 105}
106 106
107static inline bool aq_ring_dx_in_range(unsigned int h, unsigned int i,
108 unsigned int t)
109{
110 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
111}
112
113void aq_ring_update_queue_state(struct aq_ring_s *ring)
114{
115 if (aq_ring_avail_dx(ring) <= AQ_CFG_SKB_FRAGS_MAX)
116 aq_ring_queue_stop(ring);
117 else if (aq_ring_avail_dx(ring) > AQ_CFG_RESTART_DESC_THRES)
118 aq_ring_queue_wake(ring);
119}
120
121void aq_ring_queue_wake(struct aq_ring_s *ring)
122{
123 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
124
125 if (__netif_subqueue_stopped(ndev, ring->idx)) {
126 netif_wake_subqueue(ndev, ring->idx);
127 ring->stats.tx.queue_restarts++;
128 }
129}
130
131void aq_ring_queue_stop(struct aq_ring_s *ring)
132{
133 struct net_device *ndev = aq_nic_get_ndev(ring->aq_nic);
134
135 if (!__netif_subqueue_stopped(ndev, ring->idx))
136 netif_stop_subqueue(ndev, ring->idx);
137}
138
107void aq_ring_tx_clean(struct aq_ring_s *self) 139void aq_ring_tx_clean(struct aq_ring_s *self)
108{ 140{
109 struct device *dev = aq_nic_get_dev(self->aq_nic); 141 struct device *dev = aq_nic_get_dev(self->aq_nic);
@@ -113,23 +145,28 @@ void aq_ring_tx_clean(struct aq_ring_s *self)
113 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head]; 145 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
114 146
115 if (likely(buff->is_mapped)) { 147 if (likely(buff->is_mapped)) {
116 if (unlikely(buff->is_sop)) 148 if (unlikely(buff->is_sop)) {
149 if (!buff->is_eop &&
150 buff->eop_index != 0xffffU &&
151 (!aq_ring_dx_in_range(self->sw_head,
152 buff->eop_index,
153 self->hw_head)))
154 break;
155
117 dma_unmap_single(dev, buff->pa, buff->len, 156 dma_unmap_single(dev, buff->pa, buff->len,
118 DMA_TO_DEVICE); 157 DMA_TO_DEVICE);
119 else 158 } else {
120 dma_unmap_page(dev, buff->pa, buff->len, 159 dma_unmap_page(dev, buff->pa, buff->len,
121 DMA_TO_DEVICE); 160 DMA_TO_DEVICE);
161 }
122 } 162 }
123 163
124 if (unlikely(buff->is_eop)) 164 if (unlikely(buff->is_eop))
125 dev_kfree_skb_any(buff->skb); 165 dev_kfree_skb_any(buff->skb);
126 }
127}
128 166
129static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i, 167 buff->pa = 0U;
130 unsigned int t) 168 buff->eop_index = 0xffffU;
131{ 169 }
132 return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
133} 170}
134 171
135#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 172#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 782176c5f4f8..5844078764bd 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -65,7 +65,7 @@ struct __packed aq_ring_buff_s {
65 }; 65 };
66 union { 66 union {
67 struct { 67 struct {
68 u32 len:16; 68 u16 len;
69 u32 is_ip_cso:1; 69 u32 is_ip_cso:1;
70 u32 is_udp_cso:1; 70 u32 is_udp_cso:1;
71 u32 is_tcp_cso:1; 71 u32 is_tcp_cso:1;
@@ -77,8 +77,10 @@ struct __packed aq_ring_buff_s {
77 u32 is_cleaned:1; 77 u32 is_cleaned:1;
78 u32 is_error:1; 78 u32 is_error:1;
79 u32 rsvd3:6; 79 u32 rsvd3:6;
80 u16 eop_index;
81 u16 rsvd4;
80 }; 82 };
81 u32 flags; 83 u64 flags;
82 }; 84 };
83}; 85};
84 86
@@ -94,6 +96,7 @@ struct aq_ring_stats_tx_s {
94 u64 errors; 96 u64 errors;
95 u64 packets; 97 u64 packets;
96 u64 bytes; 98 u64 bytes;
99 u64 queue_restarts;
97}; 100};
98 101
99union aq_ring_stats_s { 102union aq_ring_stats_s {
@@ -147,6 +150,9 @@ struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
147int aq_ring_init(struct aq_ring_s *self); 150int aq_ring_init(struct aq_ring_s *self);
148void aq_ring_rx_deinit(struct aq_ring_s *self); 151void aq_ring_rx_deinit(struct aq_ring_s *self);
149void aq_ring_free(struct aq_ring_s *self); 152void aq_ring_free(struct aq_ring_s *self);
153void aq_ring_update_queue_state(struct aq_ring_s *ring);
154void aq_ring_queue_wake(struct aq_ring_s *ring);
155void aq_ring_queue_stop(struct aq_ring_s *ring);
150void aq_ring_tx_clean(struct aq_ring_s *self); 156void aq_ring_tx_clean(struct aq_ring_s *self);
151int aq_ring_rx_clean(struct aq_ring_s *self, 157int aq_ring_rx_clean(struct aq_ring_s *self,
152 struct napi_struct *napi, 158 struct napi_struct *napi,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index ebf588004c46..305ff8ffac2c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -59,12 +59,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget)
59 if (ring[AQ_VEC_TX_ID].sw_head != 59 if (ring[AQ_VEC_TX_ID].sw_head !=
60 ring[AQ_VEC_TX_ID].hw_head) { 60 ring[AQ_VEC_TX_ID].hw_head) {
61 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]); 61 aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
62 62 aq_ring_update_queue_state(&ring[AQ_VEC_TX_ID]);
63 if (aq_ring_avail_dx(&ring[AQ_VEC_TX_ID]) >
64 AQ_CFG_SKB_FRAGS_MAX) {
65 aq_nic_ndev_queue_start(self->aq_nic,
66 ring[AQ_VEC_TX_ID].idx);
67 }
68 was_tx_cleaned = true; 63 was_tx_cleaned = true;
69 } 64 }
70 65
@@ -364,6 +359,7 @@ void aq_vec_add_stats(struct aq_vec_s *self,
364 stats_tx->packets += tx->packets; 359 stats_tx->packets += tx->packets;
365 stats_tx->bytes += tx->bytes; 360 stats_tx->bytes += tx->bytes;
366 stats_tx->errors += tx->errors; 361 stats_tx->errors += tx->errors;
362 stats_tx->queue_restarts += tx->queue_restarts;
367 } 363 }
368} 364}
369 365
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
index f3957e930340..fcf89e25a773 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -16,7 +16,7 @@
16 16
17#include "../aq_common.h" 17#include "../aq_common.h"
18 18
19#define HW_ATL_B0_MTU_JUMBO (16000U) 19#define HW_ATL_B0_MTU_JUMBO 16352U
20#define HW_ATL_B0_MTU 1514U 20#define HW_ATL_B0_MTU 1514U
21 21
22#define HW_ATL_B0_TX_RINGS 4U 22#define HW_ATL_B0_TX_RINGS 4U
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 4f5ec9a0fbfb..bf734b32e44b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -351,8 +351,7 @@ int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self)
351 break; 351 break;
352 352
353 default: 353 default:
354 link_status->mbps = 0U; 354 return -EBUSY;
355 break;
356 } 355 }
357 } 356 }
358 357
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index c3c53f6cd9e6..83eec9a8c275 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -432,6 +432,27 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
432 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n"); 432 netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
433} 433}
434 434
435static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
436 u64 *tx_bytes, u64 *tx_packets)
437{
438 struct bcm_sysport_tx_ring *ring;
439 u64 bytes = 0, packets = 0;
440 unsigned int start;
441 unsigned int q;
442
443 for (q = 0; q < priv->netdev->num_tx_queues; q++) {
444 ring = &priv->tx_rings[q];
445 do {
446 start = u64_stats_fetch_begin_irq(&priv->syncp);
447 bytes = ring->bytes;
448 packets = ring->packets;
449 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
450
451 *tx_bytes += bytes;
452 *tx_packets += packets;
453 }
454}
455
435static void bcm_sysport_get_stats(struct net_device *dev, 456static void bcm_sysport_get_stats(struct net_device *dev,
436 struct ethtool_stats *stats, u64 *data) 457 struct ethtool_stats *stats, u64 *data)
437{ 458{
@@ -439,11 +460,16 @@ static void bcm_sysport_get_stats(struct net_device *dev,
439 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 460 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
440 struct u64_stats_sync *syncp = &priv->syncp; 461 struct u64_stats_sync *syncp = &priv->syncp;
441 struct bcm_sysport_tx_ring *ring; 462 struct bcm_sysport_tx_ring *ring;
463 u64 tx_bytes = 0, tx_packets = 0;
442 unsigned int start; 464 unsigned int start;
443 int i, j; 465 int i, j;
444 466
445 if (netif_running(dev)) 467 if (netif_running(dev)) {
446 bcm_sysport_update_mib_counters(priv); 468 bcm_sysport_update_mib_counters(priv);
469 bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
470 stats64->tx_bytes = tx_bytes;
471 stats64->tx_packets = tx_packets;
472 }
447 473
448 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) { 474 for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
449 const struct bcm_sysport_stats *s; 475 const struct bcm_sysport_stats *s;
@@ -461,12 +487,13 @@ static void bcm_sysport_get_stats(struct net_device *dev,
461 continue; 487 continue;
462 p += s->stat_offset; 488 p += s->stat_offset;
463 489
464 if (s->stat_sizeof == sizeof(u64)) 490 if (s->stat_sizeof == sizeof(u64) &&
491 s->type == BCM_SYSPORT_STAT_NETDEV64) {
465 do { 492 do {
466 start = u64_stats_fetch_begin_irq(syncp); 493 start = u64_stats_fetch_begin_irq(syncp);
467 data[i] = *(u64 *)p; 494 data[i] = *(u64 *)p;
468 } while (u64_stats_fetch_retry_irq(syncp, start)); 495 } while (u64_stats_fetch_retry_irq(syncp, start));
469 else 496 } else
470 data[i] = *(u32 *)p; 497 data[i] = *(u32 *)p;
471 j++; 498 j++;
472 } 499 }
@@ -1716,27 +1743,12 @@ static void bcm_sysport_get_stats64(struct net_device *dev,
1716{ 1743{
1717 struct bcm_sysport_priv *priv = netdev_priv(dev); 1744 struct bcm_sysport_priv *priv = netdev_priv(dev);
1718 struct bcm_sysport_stats64 *stats64 = &priv->stats64; 1745 struct bcm_sysport_stats64 *stats64 = &priv->stats64;
1719 struct bcm_sysport_tx_ring *ring;
1720 u64 tx_packets = 0, tx_bytes = 0;
1721 unsigned int start; 1746 unsigned int start;
1722 unsigned int q;
1723 1747
1724 netdev_stats_to_stats64(stats, &dev->stats); 1748 netdev_stats_to_stats64(stats, &dev->stats);
1725 1749
1726 for (q = 0; q < dev->num_tx_queues; q++) { 1750 bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
1727 ring = &priv->tx_rings[q]; 1751 &stats->tx_packets);
1728 do {
1729 start = u64_stats_fetch_begin_irq(&priv->syncp);
1730 tx_bytes = ring->bytes;
1731 tx_packets = ring->packets;
1732 } while (u64_stats_fetch_retry_irq(&priv->syncp, start));
1733
1734 stats->tx_bytes += tx_bytes;
1735 stats->tx_packets += tx_packets;
1736 }
1737
1738 stats64->tx_bytes = stats->tx_bytes;
1739 stats64->tx_packets = stats->tx_packets;
1740 1752
1741 do { 1753 do {
1742 start = u64_stats_fetch_begin_irq(&priv->syncp); 1754 start = u64_stats_fetch_begin_irq(&priv->syncp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index ccd699fb2d70..7dd3d131043a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -750,6 +750,10 @@ int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
750{ 750{
751 int rc = 0; 751 int rc = 0;
752 752
753 if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
754 cls_flower->common.chain_index)
755 return -EOPNOTSUPP;
756
753 switch (cls_flower->command) { 757 switch (cls_flower->command) {
754 case TC_CLSFLOWER_REPLACE: 758 case TC_CLSFLOWER_REPLACE:
755 rc = bnxt_tc_add_flow(bp, src_fid, cls_flower); 759 rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index cec94bbb2ea5..8bc126a156e8 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1278,7 +1278,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1278 1278
1279 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0); 1279 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1280 if (ret) 1280 if (ret)
1281 return -ENOMEM; 1281 goto error;
1282 1282
1283 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE; 1283 n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1284 for (i = 0, j = 0; i < cp->max_cid_space; i++) { 1284 for (i = 0, j = 0; i < cp->max_cid_space; i++) {
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 38c7b21e5d63..ede1876a9a19 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -374,8 +374,8 @@ struct bufdesc_ex {
374#define FEC_ENET_TS_AVAIL ((uint)0x00010000) 374#define FEC_ENET_TS_AVAIL ((uint)0x00010000)
375#define FEC_ENET_TS_TIMER ((uint)0x00008000) 375#define FEC_ENET_TS_TIMER ((uint)0x00008000)
376 376
377#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII | FEC_ENET_TS_TIMER) 377#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII)
378#define FEC_NAPI_IMASK (FEC_ENET_MII | FEC_ENET_TS_TIMER) 378#define FEC_NAPI_IMASK FEC_ENET_MII
379#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) 379#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
380 380
381/* ENET interrupt coalescing macro define */ 381/* ENET interrupt coalescing macro define */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 56f56d6ada9c..3dc2d771a222 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1559,14 +1559,14 @@ fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
1559 if (int_events == 0) 1559 if (int_events == 0)
1560 return false; 1560 return false;
1561 1561
1562 if (int_events & FEC_ENET_RXF) 1562 if (int_events & FEC_ENET_RXF_0)
1563 fep->work_rx |= (1 << 2); 1563 fep->work_rx |= (1 << 2);
1564 if (int_events & FEC_ENET_RXF_1) 1564 if (int_events & FEC_ENET_RXF_1)
1565 fep->work_rx |= (1 << 0); 1565 fep->work_rx |= (1 << 0);
1566 if (int_events & FEC_ENET_RXF_2) 1566 if (int_events & FEC_ENET_RXF_2)
1567 fep->work_rx |= (1 << 1); 1567 fep->work_rx |= (1 << 1);
1568 1568
1569 if (int_events & FEC_ENET_TXF) 1569 if (int_events & FEC_ENET_TXF_0)
1570 fep->work_tx |= (1 << 2); 1570 fep->work_tx |= (1 << 2);
1571 if (int_events & FEC_ENET_TXF_1) 1571 if (int_events & FEC_ENET_TXF_1)
1572 fep->work_tx |= (1 << 0); 1572 fep->work_tx |= (1 << 0);
@@ -1604,8 +1604,8 @@ fec_enet_interrupt(int irq, void *dev_id)
1604 } 1604 }
1605 1605
1606 if (fep->ptp_clock) 1606 if (fep->ptp_clock)
1607 fec_ptp_check_pps_event(fep); 1607 if (fec_ptp_check_pps_event(fep))
1608 1608 ret = IRQ_HANDLED;
1609 return ret; 1609 return ret;
1610} 1610}
1611 1611
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index 59efbd605416..5bcb2238acb2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -37,20 +37,15 @@ static bool hnae3_client_match(enum hnae3_client_type client_type,
37} 37}
38 38
39static int hnae3_match_n_instantiate(struct hnae3_client *client, 39static int hnae3_match_n_instantiate(struct hnae3_client *client,
40 struct hnae3_ae_dev *ae_dev, 40 struct hnae3_ae_dev *ae_dev, bool is_reg)
41 bool is_reg, bool *matched)
42{ 41{
43 int ret; 42 int ret;
44 43
45 *matched = false;
46
47 /* check if this client matches the type of ae_dev */ 44 /* check if this client matches the type of ae_dev */
48 if (!(hnae3_client_match(client->type, ae_dev->dev_type) && 45 if (!(hnae3_client_match(client->type, ae_dev->dev_type) &&
49 hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) { 46 hnae_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
50 return 0; 47 return 0;
51 } 48 }
52 /* there is a match of client and dev */
53 *matched = true;
54 49
55 /* now, (un-)instantiate client by calling lower layer */ 50 /* now, (un-)instantiate client by calling lower layer */
56 if (is_reg) { 51 if (is_reg) {
@@ -69,7 +64,6 @@ int hnae3_register_client(struct hnae3_client *client)
69{ 64{
70 struct hnae3_client *client_tmp; 65 struct hnae3_client *client_tmp;
71 struct hnae3_ae_dev *ae_dev; 66 struct hnae3_ae_dev *ae_dev;
72 bool matched;
73 int ret = 0; 67 int ret = 0;
74 68
75 mutex_lock(&hnae3_common_lock); 69 mutex_lock(&hnae3_common_lock);
@@ -86,7 +80,7 @@ int hnae3_register_client(struct hnae3_client *client)
86 /* if the client could not be initialized on current port, for 80 /* if the client could not be initialized on current port, for
87 * any error reasons, move on to next available port 81 * any error reasons, move on to next available port
88 */ 82 */
89 ret = hnae3_match_n_instantiate(client, ae_dev, true, &matched); 83 ret = hnae3_match_n_instantiate(client, ae_dev, true);
90 if (ret) 84 if (ret)
91 dev_err(&ae_dev->pdev->dev, 85 dev_err(&ae_dev->pdev->dev,
92 "match and instantiation failed for port\n"); 86 "match and instantiation failed for port\n");
@@ -102,12 +96,11 @@ EXPORT_SYMBOL(hnae3_register_client);
102void hnae3_unregister_client(struct hnae3_client *client) 96void hnae3_unregister_client(struct hnae3_client *client)
103{ 97{
104 struct hnae3_ae_dev *ae_dev; 98 struct hnae3_ae_dev *ae_dev;
105 bool matched;
106 99
107 mutex_lock(&hnae3_common_lock); 100 mutex_lock(&hnae3_common_lock);
108 /* un-initialize the client on every matched port */ 101 /* un-initialize the client on every matched port */
109 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) { 102 list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
110 hnae3_match_n_instantiate(client, ae_dev, false, &matched); 103 hnae3_match_n_instantiate(client, ae_dev, false);
111 } 104 }
112 105
113 list_del(&client->node); 106 list_del(&client->node);
@@ -124,7 +117,6 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
124 const struct pci_device_id *id; 117 const struct pci_device_id *id;
125 struct hnae3_ae_dev *ae_dev; 118 struct hnae3_ae_dev *ae_dev;
126 struct hnae3_client *client; 119 struct hnae3_client *client;
127 bool matched;
128 int ret = 0; 120 int ret = 0;
129 121
130 mutex_lock(&hnae3_common_lock); 122 mutex_lock(&hnae3_common_lock);
@@ -151,13 +143,10 @@ int hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
151 * initialize the figure out client instance 143 * initialize the figure out client instance
152 */ 144 */
153 list_for_each_entry(client, &hnae3_client_list, node) { 145 list_for_each_entry(client, &hnae3_client_list, node) {
154 ret = hnae3_match_n_instantiate(client, ae_dev, true, 146 ret = hnae3_match_n_instantiate(client, ae_dev, true);
155 &matched);
156 if (ret) 147 if (ret)
157 dev_err(&ae_dev->pdev->dev, 148 dev_err(&ae_dev->pdev->dev,
158 "match and instantiation failed\n"); 149 "match and instantiation failed\n");
159 if (matched)
160 break;
161 } 150 }
162 } 151 }
163 152
@@ -175,7 +164,6 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
175 const struct pci_device_id *id; 164 const struct pci_device_id *id;
176 struct hnae3_ae_dev *ae_dev; 165 struct hnae3_ae_dev *ae_dev;
177 struct hnae3_client *client; 166 struct hnae3_client *client;
178 bool matched;
179 167
180 mutex_lock(&hnae3_common_lock); 168 mutex_lock(&hnae3_common_lock);
181 /* Check if there are matched ae_dev */ 169 /* Check if there are matched ae_dev */
@@ -187,12 +175,8 @@ void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
187 /* check the client list for the match with this ae_dev type and 175 /* check the client list for the match with this ae_dev type and
188 * un-initialize the figure out client instance 176 * un-initialize the figure out client instance
189 */ 177 */
190 list_for_each_entry(client, &hnae3_client_list, node) { 178 list_for_each_entry(client, &hnae3_client_list, node)
191 hnae3_match_n_instantiate(client, ae_dev, false, 179 hnae3_match_n_instantiate(client, ae_dev, false);
192 &matched);
193 if (matched)
194 break;
195 }
196 180
197 ae_algo->ops->uninit_ae_dev(ae_dev); 181 ae_algo->ops->uninit_ae_dev(ae_dev);
198 hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); 182 hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
@@ -212,7 +196,6 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
212 const struct pci_device_id *id; 196 const struct pci_device_id *id;
213 struct hnae3_ae_algo *ae_algo; 197 struct hnae3_ae_algo *ae_algo;
214 struct hnae3_client *client; 198 struct hnae3_client *client;
215 bool matched;
216 int ret = 0; 199 int ret = 0;
217 200
218 mutex_lock(&hnae3_common_lock); 201 mutex_lock(&hnae3_common_lock);
@@ -246,13 +229,10 @@ int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
246 * initialize the figure out client instance 229 * initialize the figure out client instance
247 */ 230 */
248 list_for_each_entry(client, &hnae3_client_list, node) { 231 list_for_each_entry(client, &hnae3_client_list, node) {
249 ret = hnae3_match_n_instantiate(client, ae_dev, true, 232 ret = hnae3_match_n_instantiate(client, ae_dev, true);
250 &matched);
251 if (ret) 233 if (ret)
252 dev_err(&ae_dev->pdev->dev, 234 dev_err(&ae_dev->pdev->dev,
253 "match and instantiation failed\n"); 235 "match and instantiation failed\n");
254 if (matched)
255 break;
256 } 236 }
257 237
258out_err: 238out_err:
@@ -270,7 +250,6 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
270 const struct pci_device_id *id; 250 const struct pci_device_id *id;
271 struct hnae3_ae_algo *ae_algo; 251 struct hnae3_ae_algo *ae_algo;
272 struct hnae3_client *client; 252 struct hnae3_client *client;
273 bool matched;
274 253
275 mutex_lock(&hnae3_common_lock); 254 mutex_lock(&hnae3_common_lock);
276 /* Check if there are matched ae_algo */ 255 /* Check if there are matched ae_algo */
@@ -279,12 +258,8 @@ void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
279 if (!id) 258 if (!id)
280 continue; 259 continue;
281 260
282 list_for_each_entry(client, &hnae3_client_list, node) { 261 list_for_each_entry(client, &hnae3_client_list, node)
283 hnae3_match_n_instantiate(client, ae_dev, false, 262 hnae3_match_n_instantiate(client, ae_dev, false);
284 &matched);
285 if (matched)
286 break;
287 }
288 263
289 ae_algo->ops->uninit_ae_dev(ae_dev); 264 ae_algo->ops->uninit_ae_dev(ae_dev);
290 hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0); 265 hnae_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index b2f28ae81273..1a01cadfe5f3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -49,7 +49,17 @@
49#define HNAE3_CLASS_NAME_SIZE 16 49#define HNAE3_CLASS_NAME_SIZE 16
50 50
51#define HNAE3_DEV_INITED_B 0x0 51#define HNAE3_DEV_INITED_B 0x0
52#define HNAE_DEV_SUPPORT_ROCE_B 0x1 52#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
53#define HNAE3_DEV_SUPPORT_DCB_B 0x2
54
55#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
56 BIT(HNAE3_DEV_SUPPORT_ROCE_B))
57
58#define hnae3_dev_roce_supported(hdev) \
59 hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
60
61#define hnae3_dev_dcb_supported(hdev) \
62 hnae_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
53 63
54#define ring_ptr_move_fw(ring, p) \ 64#define ring_ptr_move_fw(ring, p) \
55 ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) 65 ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
@@ -366,12 +376,12 @@ struct hnae3_ae_algo {
366struct hnae3_tc_info { 376struct hnae3_tc_info {
367 u16 tqp_offset; /* TQP offset from base TQP */ 377 u16 tqp_offset; /* TQP offset from base TQP */
368 u16 tqp_count; /* Total TQPs */ 378 u16 tqp_count; /* Total TQPs */
369 u8 up; /* user priority */
370 u8 tc; /* TC index */ 379 u8 tc; /* TC index */
371 bool enable; /* If this TC is enable or not */ 380 bool enable; /* If this TC is enable or not */
372}; 381};
373 382
374#define HNAE3_MAX_TC 8 383#define HNAE3_MAX_TC 8
384#define HNAE3_MAX_USER_PRIO 8
375struct hnae3_knic_private_info { 385struct hnae3_knic_private_info {
376 struct net_device *netdev; /* Set by KNIC client when init instance */ 386 struct net_device *netdev; /* Set by KNIC client when init instance */
377 u16 rss_size; /* Allocated RSS queues */ 387 u16 rss_size; /* Allocated RSS queues */
@@ -379,6 +389,7 @@ struct hnae3_knic_private_info {
379 u16 num_desc; 389 u16 num_desc;
380 390
381 u8 num_tc; /* Total number of enabled TCs */ 391 u8 num_tc; /* Total number of enabled TCs */
392 u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
382 struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */ 393 struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
383 394
384 u16 num_tqps; /* total number of TQPs in this handle */ 395 u16 num_tqps; /* total number of TQPs in this handle */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 91ae0135ee50..758cf3948131 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -238,7 +238,7 @@ struct hclge_tqp_map {
238 u8 rsv[18]; 238 u8 rsv[18];
239}; 239};
240 240
241#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11 241#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10
242 242
243enum hclge_int_type { 243enum hclge_int_type {
244 HCLGE_INT_TX, 244 HCLGE_INT_TX,
@@ -252,8 +252,12 @@ struct hclge_ctrl_vector_chain {
252#define HCLGE_INT_TYPE_S 0 252#define HCLGE_INT_TYPE_S 0
253#define HCLGE_INT_TYPE_M 0x3 253#define HCLGE_INT_TYPE_M 0x3
254#define HCLGE_TQP_ID_S 2 254#define HCLGE_TQP_ID_S 2
255#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S) 255#define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S)
256#define HCLGE_INT_GL_IDX_S 13
257#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S)
256 __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; 258 __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
259 u8 vfid;
260 u8 rsv;
257}; 261};
258 262
259#define HCLGE_TC_NUM 8 263#define HCLGE_TC_NUM 8
@@ -266,7 +270,8 @@ struct hclge_tx_buff_alloc {
266 270
267struct hclge_rx_priv_buff { 271struct hclge_rx_priv_buff {
268 __le16 buf_num[HCLGE_TC_NUM]; 272 __le16 buf_num[HCLGE_TC_NUM];
269 u8 rsv[8]; 273 __le16 shared_buf;
274 u8 rsv[6];
270}; 275};
271 276
272struct hclge_query_version { 277struct hclge_query_version {
@@ -684,6 +689,7 @@ struct hclge_reset_tqp_queue {
684#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */ 689#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
685#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ 690#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
686#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ 691#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
692#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
687 693
688#define HCLGE_TYPE_CRQ 0 694#define HCLGE_TYPE_CRQ 0
689#define HCLGE_TYPE_CSQ 1 695#define HCLGE_TYPE_CSQ 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index bb45365fb817..c1cdbfd83bdb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -46,17 +46,7 @@ static const struct pci_device_id ae_algo_pci_tbl[] = {
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
49 /* Required last entry */ 49 /* required last entry */
50 {0, }
51};
52
53static const struct pci_device_id roce_pci_tbl[] = {
54 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
55 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
57 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
58 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
59 /* Required last entry */
60 {0, } 50 {0, }
61}; 51};
62 52
@@ -894,7 +884,7 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
894 hdev->num_tqps = __le16_to_cpu(req->tqp_num); 884 hdev->num_tqps = __le16_to_cpu(req->tqp_num);
895 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; 885 hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
896 886
897 if (hnae_get_bit(hdev->ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B)) { 887 if (hnae3_dev_roce_supported(hdev)) {
898 hdev->num_roce_msix = 888 hdev->num_roce_msix =
899 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number), 889 hnae_get_field(__le16_to_cpu(req->pf_intr_vector_number),
900 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S); 890 HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
@@ -1063,9 +1053,9 @@ static int hclge_configure(struct hclge_dev *hdev)
1063 hdev->base_tqp_pid = 0; 1053 hdev->base_tqp_pid = 0;
1064 hdev->rss_size_max = 1; 1054 hdev->rss_size_max = 1;
1065 hdev->rx_buf_len = cfg.rx_buf_len; 1055 hdev->rx_buf_len = cfg.rx_buf_len;
1066 for (i = 0; i < ETH_ALEN; i++) 1056 ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
1067 hdev->hw.mac.mac_addr[i] = cfg.mac_addr[i];
1068 hdev->hw.mac.media_type = cfg.media_type; 1057 hdev->hw.mac.media_type = cfg.media_type;
1058 hdev->hw.mac.phy_addr = cfg.phy_addr;
1069 hdev->num_desc = cfg.tqp_desc_num; 1059 hdev->num_desc = cfg.tqp_desc_num;
1070 hdev->tm_info.num_pg = 1; 1060 hdev->tm_info.num_pg = 1;
1071 hdev->tm_info.num_tc = cfg.tc_num; 1061 hdev->tm_info.num_tc = cfg.tc_num;
@@ -1454,7 +1444,11 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, u32 rx_all)
1454 tc_num = hclge_get_tc_num(hdev); 1444 tc_num = hclge_get_tc_num(hdev);
1455 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); 1445 pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
1456 1446
1457 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; 1447 if (hnae3_dev_dcb_supported(hdev))
1448 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
1449 else
1450 shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
1451
1458 shared_buf_tc = pfc_enable_num * hdev->mps + 1452 shared_buf_tc = pfc_enable_num * hdev->mps +
1459 (tc_num - pfc_enable_num) * hdev->mps / 2 + 1453 (tc_num - pfc_enable_num) * hdev->mps / 2 +
1460 hdev->mps; 1454 hdev->mps;
@@ -1495,6 +1489,16 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
1495 struct hclge_priv_buf *priv; 1489 struct hclge_priv_buf *priv;
1496 int i; 1490 int i;
1497 1491
1492 /* When DCB is not supported, rx private
1493 * buffer is not allocated.
1494 */
1495 if (!hnae3_dev_dcb_supported(hdev)) {
1496 if (!hclge_is_rx_buf_ok(hdev, rx_all))
1497 return -ENOMEM;
1498
1499 return 0;
1500 }
1501
1498 /* step 1, try to alloc private buffer for all enabled tc */ 1502 /* step 1, try to alloc private buffer for all enabled tc */
1499 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1503 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1500 priv = &hdev->priv_buf[i]; 1504 priv = &hdev->priv_buf[i];
@@ -1510,6 +1514,11 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
1510 priv->wl.high = 2 * hdev->mps; 1514 priv->wl.high = 2 * hdev->mps;
1511 priv->buf_size = priv->wl.high; 1515 priv->buf_size = priv->wl.high;
1512 } 1516 }
1517 } else {
1518 priv->enable = 0;
1519 priv->wl.low = 0;
1520 priv->wl.high = 0;
1521 priv->buf_size = 0;
1513 } 1522 }
1514 } 1523 }
1515 1524
@@ -1522,8 +1531,15 @@ int hclge_rx_buffer_calc(struct hclge_dev *hdev, u32 tx_size)
1522 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 1531 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
1523 priv = &hdev->priv_buf[i]; 1532 priv = &hdev->priv_buf[i];
1524 1533
1525 if (hdev->hw_tc_map & BIT(i)) 1534 priv->enable = 0;
1526 priv->enable = 1; 1535 priv->wl.low = 0;
1536 priv->wl.high = 0;
1537 priv->buf_size = 0;
1538
1539 if (!(hdev->hw_tc_map & BIT(i)))
1540 continue;
1541
1542 priv->enable = 1;
1527 1543
1528 if (hdev->tm_info.hw_pfc_map & BIT(i)) { 1544 if (hdev->tm_info.hw_pfc_map & BIT(i)) {
1529 priv->wl.low = 128; 1545 priv->wl.low = 128;
@@ -1616,6 +1632,10 @@ static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev)
1616 cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B); 1632 cpu_to_le16(true << HCLGE_TC0_PRI_BUF_EN_B);
1617 } 1633 }
1618 1634
1635 req->shared_buf =
1636 cpu_to_le16((hdev->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
1637 (1 << HCLGE_TC0_PRI_BUF_EN_B));
1638
1619 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1639 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1620 if (ret) { 1640 if (ret) {
1621 dev_err(&hdev->pdev->dev, 1641 dev_err(&hdev->pdev->dev,
@@ -1782,18 +1802,22 @@ int hclge_buffer_alloc(struct hclge_dev *hdev)
1782 return ret; 1802 return ret;
1783 } 1803 }
1784 1804
1785 ret = hclge_rx_priv_wl_config(hdev); 1805 if (hnae3_dev_dcb_supported(hdev)) {
1786 if (ret) { 1806 ret = hclge_rx_priv_wl_config(hdev);
1787 dev_err(&hdev->pdev->dev, 1807 if (ret) {
1788 "could not configure rx private waterline %d\n", ret); 1808 dev_err(&hdev->pdev->dev,
1789 return ret; 1809 "could not configure rx private waterline %d\n",
1790 } 1810 ret);
1811 return ret;
1812 }
1791 1813
1792 ret = hclge_common_thrd_config(hdev); 1814 ret = hclge_common_thrd_config(hdev);
1793 if (ret) { 1815 if (ret) {
1794 dev_err(&hdev->pdev->dev, 1816 dev_err(&hdev->pdev->dev,
1795 "could not configure common threshold %d\n", ret); 1817 "could not configure common threshold %d\n",
1796 return ret; 1818 ret);
1819 return ret;
1820 }
1797 } 1821 }
1798 1822
1799 ret = hclge_common_wl_config(hdev); 1823 ret = hclge_common_wl_config(hdev);
@@ -2582,6 +2606,7 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
2582 u16 tc_valid[HCLGE_MAX_TC_NUM]; 2606 u16 tc_valid[HCLGE_MAX_TC_NUM];
2583 u16 tc_size[HCLGE_MAX_TC_NUM]; 2607 u16 tc_size[HCLGE_MAX_TC_NUM];
2584 u32 *rss_indir = NULL; 2608 u32 *rss_indir = NULL;
2609 u16 rss_size = 0, roundup_size;
2585 const u8 *key; 2610 const u8 *key;
2586 int i, ret, j; 2611 int i, ret, j;
2587 2612
@@ -2596,7 +2621,13 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
2596 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) { 2621 for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
2597 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) { 2622 for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++) {
2598 vport[j].rss_indirection_tbl[i] = 2623 vport[j].rss_indirection_tbl[i] =
2599 i % hdev->rss_size_max; 2624 i % vport[j].alloc_rss_size;
2625
2626 /* vport 0 is for PF */
2627 if (j != 0)
2628 continue;
2629
2630 rss_size = vport[j].alloc_rss_size;
2600 rss_indir[i] = vport[j].rss_indirection_tbl[i]; 2631 rss_indir[i] = vport[j].rss_indirection_tbl[i];
2601 } 2632 }
2602 } 2633 }
@@ -2613,42 +2644,32 @@ static int hclge_rss_init_hw(struct hclge_dev *hdev)
2613 if (ret) 2644 if (ret)
2614 goto err; 2645 goto err;
2615 2646
2647 /* Each TC have the same queue size, and tc_size set to hardware is
2648 * the log2 of roundup power of two of rss_size, the acutal queue
2649 * size is limited by indirection table.
2650 */
2651 if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
2652 dev_err(&hdev->pdev->dev,
2653 "Configure rss tc size failed, invalid TC_SIZE = %d\n",
2654 rss_size);
2655 ret = -EINVAL;
2656 goto err;
2657 }
2658
2659 roundup_size = roundup_pow_of_two(rss_size);
2660 roundup_size = ilog2(roundup_size);
2661
2616 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { 2662 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
2617 if (hdev->hw_tc_map & BIT(i)) 2663 tc_valid[i] = 0;
2618 tc_valid[i] = 1;
2619 else
2620 tc_valid[i] = 0;
2621 2664
2622 switch (hdev->rss_size_max) { 2665 if (!(hdev->hw_tc_map & BIT(i)))
2623 case HCLGE_RSS_TC_SIZE_0: 2666 continue;
2624 tc_size[i] = 0; 2667
2625 break; 2668 tc_valid[i] = 1;
2626 case HCLGE_RSS_TC_SIZE_1: 2669 tc_size[i] = roundup_size;
2627 tc_size[i] = 1; 2670 tc_offset[i] = rss_size * i;
2628 break;
2629 case HCLGE_RSS_TC_SIZE_2:
2630 tc_size[i] = 2;
2631 break;
2632 case HCLGE_RSS_TC_SIZE_3:
2633 tc_size[i] = 3;
2634 break;
2635 case HCLGE_RSS_TC_SIZE_4:
2636 tc_size[i] = 4;
2637 break;
2638 case HCLGE_RSS_TC_SIZE_5:
2639 tc_size[i] = 5;
2640 break;
2641 case HCLGE_RSS_TC_SIZE_6:
2642 tc_size[i] = 6;
2643 break;
2644 case HCLGE_RSS_TC_SIZE_7:
2645 tc_size[i] = 7;
2646 break;
2647 default:
2648 break;
2649 }
2650 tc_offset[i] = hdev->rss_size_max * i;
2651 } 2671 }
2672
2652 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset); 2673 ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
2653 2674
2654err: 2675err:
@@ -2679,7 +2700,11 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
2679 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 2700 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2680 hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, 2701 hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
2681 HCLGE_TQP_ID_S, node->tqp_index); 2702 HCLGE_TQP_ID_S, node->tqp_index);
2703 hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
2704 HCLGE_INT_GL_IDX_S,
2705 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2682 req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); 2706 req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
2707 req->vfid = vport->vport_id;
2683 2708
2684 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 2709 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2685 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 2710 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -2763,8 +2788,12 @@ static int hclge_unmap_ring_from_vector(
2763 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); 2788 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2764 hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, 2789 hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
2765 HCLGE_TQP_ID_S, node->tqp_index); 2790 HCLGE_TQP_ID_S, node->tqp_index);
2791 hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
2792 HCLGE_INT_GL_IDX_S,
2793 hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
2766 2794
2767 req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); 2795 req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
2796 req->vfid = vport->vport_id;
2768 2797
2769 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { 2798 if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
2770 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; 2799 req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@@ -2778,7 +2807,7 @@ static int hclge_unmap_ring_from_vector(
2778 } 2807 }
2779 i = 0; 2808 i = 0;
2780 hclge_cmd_setup_basic_desc(&desc, 2809 hclge_cmd_setup_basic_desc(&desc,
2781 HCLGE_OPC_ADD_RING_TO_VECTOR, 2810 HCLGE_OPC_DEL_RING_TO_VECTOR,
2782 false); 2811 false);
2783 req->int_vector_id = vector_id; 2812 req->int_vector_id = vector_id;
2784 } 2813 }
@@ -3665,6 +3694,7 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
3665{ 3694{
3666#define HCLGE_VLAN_TYPE_VF_TABLE 0 3695#define HCLGE_VLAN_TYPE_VF_TABLE 0
3667#define HCLGE_VLAN_TYPE_PORT_TABLE 1 3696#define HCLGE_VLAN_TYPE_PORT_TABLE 1
3697 struct hnae3_handle *handle;
3668 int ret; 3698 int ret;
3669 3699
3670 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE, 3700 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_VF_TABLE,
@@ -3674,8 +3704,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev)
3674 3704
3675 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE, 3705 ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_VLAN_TYPE_PORT_TABLE,
3676 true); 3706 true);
3707 if (ret)
3708 return ret;
3677 3709
3678 return ret; 3710 handle = &hdev->vport[0].nic;
3711 return hclge_set_port_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
3679} 3712}
3680 3713
3681static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) 3714static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
@@ -3920,8 +3953,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
3920 goto err; 3953 goto err;
3921 3954
3922 if (hdev->roce_client && 3955 if (hdev->roce_client &&
3923 hnae_get_bit(hdev->ae_dev->flag, 3956 hnae3_dev_roce_supported(hdev)) {
3924 HNAE_DEV_SUPPORT_ROCE_B)) {
3925 struct hnae3_client *rc = hdev->roce_client; 3957 struct hnae3_client *rc = hdev->roce_client;
3926 3958
3927 ret = hclge_init_roce_base_info(vport); 3959 ret = hclge_init_roce_base_info(vport);
@@ -3944,8 +3976,7 @@ static int hclge_init_client_instance(struct hnae3_client *client,
3944 3976
3945 break; 3977 break;
3946 case HNAE3_CLIENT_ROCE: 3978 case HNAE3_CLIENT_ROCE:
3947 if (hnae_get_bit(hdev->ae_dev->flag, 3979 if (hnae3_dev_roce_supported(hdev)) {
3948 HNAE_DEV_SUPPORT_ROCE_B)) {
3949 hdev->roce_client = client; 3980 hdev->roce_client = client;
3950 vport->roce.client = client; 3981 vport->roce.client = client;
3951 } 3982 }
@@ -4057,7 +4088,6 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
4057static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) 4088static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4058{ 4089{
4059 struct pci_dev *pdev = ae_dev->pdev; 4090 struct pci_dev *pdev = ae_dev->pdev;
4060 const struct pci_device_id *id;
4061 struct hclge_dev *hdev; 4091 struct hclge_dev *hdev;
4062 int ret; 4092 int ret;
4063 4093
@@ -4072,10 +4102,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4072 hdev->ae_dev = ae_dev; 4102 hdev->ae_dev = ae_dev;
4073 ae_dev->priv = hdev; 4103 ae_dev->priv = hdev;
4074 4104
4075 id = pci_match_id(roce_pci_tbl, ae_dev->pdev);
4076 if (id)
4077 hnae_set_bit(ae_dev->flag, HNAE_DEV_SUPPORT_ROCE_B, 1);
4078
4079 ret = hclge_pci_init(hdev); 4105 ret = hclge_pci_init(hdev);
4080 if (ret) { 4106 if (ret) {
4081 dev_err(&pdev->dev, "PCI init failed\n"); 4107 dev_err(&pdev->dev, "PCI init failed\n");
@@ -4138,12 +4164,6 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4138 return ret; 4164 return ret;
4139 } 4165 }
4140 4166
4141 ret = hclge_rss_init_hw(hdev);
4142 if (ret) {
4143 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4144 return ret;
4145 }
4146
4147 ret = hclge_init_vlan_config(hdev); 4167 ret = hclge_init_vlan_config(hdev);
4148 if (ret) { 4168 if (ret) {
4149 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); 4169 dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
@@ -4156,6 +4176,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
4156 return ret; 4176 return ret;
4157 } 4177 }
4158 4178
4179 ret = hclge_rss_init_hw(hdev);
4180 if (ret) {
4181 dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
4182 return ret;
4183 }
4184
4159 setup_timer(&hdev->service_timer, hclge_service_timer, 4185 setup_timer(&hdev->service_timer, hclge_service_timer,
4160 (unsigned long)hdev); 4186 (unsigned long)hdev);
4161 INIT_WORK(&hdev->service_task, hclge_service_task); 4187 INIT_WORK(&hdev->service_task, hclge_service_task);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index edb10ad075eb..9fcfd9395424 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -176,7 +176,6 @@ struct hclge_pg_info {
176struct hclge_tc_info { 176struct hclge_tc_info {
177 u8 tc_id; 177 u8 tc_id;
178 u8 tc_sch_mode; /* 0: sp; 1: dwrr */ 178 u8 tc_sch_mode; /* 0: sp; 1: dwrr */
179 u8 up;
180 u8 pgid; 179 u8 pgid;
181 u32 bw_limit; 180 u32 bw_limit;
182}; 181};
@@ -197,6 +196,7 @@ struct hclge_tm_info {
197 u8 num_tc; 196 u8 num_tc;
198 u8 num_pg; /* It must be 1 if vNET-Base schd */ 197 u8 num_pg; /* It must be 1 if vNET-Base schd */
199 u8 pg_dwrr[HCLGE_PG_NUM]; 198 u8 pg_dwrr[HCLGE_PG_NUM];
199 u8 prio_tc[HNAE3_MAX_USER_PRIO];
200 struct hclge_pg_info pg_info[HCLGE_PG_NUM]; 200 struct hclge_pg_info pg_info[HCLGE_PG_NUM];
201 struct hclge_tc_info tc_info[HNAE3_MAX_TC]; 201 struct hclge_tc_info tc_info[HNAE3_MAX_TC];
202 enum hclge_fc_mode fc_mode; 202 enum hclge_fc_mode fc_mode;
@@ -477,6 +477,7 @@ struct hclge_vport {
477 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */ 477 u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
478 /* User configured lookup table entries */ 478 /* User configured lookup table entries */
479 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE]; 479 u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
480 u16 alloc_rss_size;
480 481
481 u16 qs_offset; 482 u16 qs_offset;
482 u16 bw_limit; /* VSI BW Limit (0 = disabled) */ 483 u16 bw_limit; /* VSI BW Limit (0 = disabled) */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 1c577d268f00..73a75d7cc551 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -128,9 +128,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
128{ 128{
129 u8 tc; 129 u8 tc;
130 130
131 for (tc = 0; tc < hdev->tm_info.num_tc; tc++) 131 tc = hdev->tm_info.prio_tc[pri_id];
132 if (hdev->tm_info.tc_info[tc].up == pri_id)
133 break;
134 132
135 if (tc >= hdev->tm_info.num_tc) 133 if (tc >= hdev->tm_info.num_tc)
136 return -EINVAL; 134 return -EINVAL;
@@ -158,7 +156,7 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
158 156
159 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); 157 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
160 158
161 for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) { 159 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
162 ret = hclge_fill_pri_array(hdev, pri, pri_id); 160 ret = hclge_fill_pri_array(hdev, pri, pri_id);
163 if (ret) 161 if (ret)
164 return ret; 162 return ret;
@@ -280,11 +278,11 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
280 278
281 shap_cfg_cmd->pg_id = pg_id; 279 shap_cfg_cmd->pg_id = pg_id;
282 280
283 hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b); 281 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
284 hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u); 282 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
285 hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s); 283 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
286 hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b); 284 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
287 hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s); 285 hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
288 286
289 return hclge_cmd_send(&hdev->hw, &desc, 1); 287 return hclge_cmd_send(&hdev->hw, &desc, 1);
290} 288}
@@ -307,11 +305,11 @@ static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
307 305
308 shap_cfg_cmd->pri_id = pri_id; 306 shap_cfg_cmd->pri_id = pri_id;
309 307
310 hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b); 308 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
311 hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u); 309 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
312 hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s); 310 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
313 hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b); 311 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
314 hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s); 312 hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
315 313
316 return hclge_cmd_send(&hdev->hw, &desc, 1); 314 return hclge_cmd_send(&hdev->hw, &desc, 1);
317} 315}
@@ -397,6 +395,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
397 kinfo->num_tqps / kinfo->num_tc); 395 kinfo->num_tqps / kinfo->num_tc);
398 vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; 396 vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
399 vport->dwrr = 100; /* 100 percent as init */ 397 vport->dwrr = 100; /* 100 percent as init */
398 vport->alloc_rss_size = kinfo->rss_size;
400 399
401 for (i = 0; i < kinfo->num_tc; i++) { 400 for (i = 0; i < kinfo->num_tc; i++) {
402 if (hdev->hw_tc_map & BIT(i)) { 401 if (hdev->hw_tc_map & BIT(i)) {
@@ -404,16 +403,17 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
404 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 403 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
405 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 404 kinfo->tc_info[i].tqp_count = kinfo->rss_size;
406 kinfo->tc_info[i].tc = i; 405 kinfo->tc_info[i].tc = i;
407 kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up;
408 } else { 406 } else {
409 /* Set to default queue if TC is disable */ 407 /* Set to default queue if TC is disable */
410 kinfo->tc_info[i].enable = false; 408 kinfo->tc_info[i].enable = false;
411 kinfo->tc_info[i].tqp_offset = 0; 409 kinfo->tc_info[i].tqp_offset = 0;
412 kinfo->tc_info[i].tqp_count = 1; 410 kinfo->tc_info[i].tqp_count = 1;
413 kinfo->tc_info[i].tc = 0; 411 kinfo->tc_info[i].tc = 0;
414 kinfo->tc_info[i].up = 0;
415 } 412 }
416 } 413 }
414
415 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
416 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
417} 417}
418 418
419static void hclge_tm_vport_info_update(struct hclge_dev *hdev) 419static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
@@ -435,12 +435,15 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
435 for (i = 0; i < hdev->tm_info.num_tc; i++) { 435 for (i = 0; i < hdev->tm_info.num_tc; i++) {
436 hdev->tm_info.tc_info[i].tc_id = i; 436 hdev->tm_info.tc_info[i].tc_id = i;
437 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; 437 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
438 hdev->tm_info.tc_info[i].up = i;
439 hdev->tm_info.tc_info[i].pgid = 0; 438 hdev->tm_info.tc_info[i].pgid = 0;
440 hdev->tm_info.tc_info[i].bw_limit = 439 hdev->tm_info.tc_info[i].bw_limit =
441 hdev->tm_info.pg_info[0].bw_limit; 440 hdev->tm_info.pg_info[0].bw_limit;
442 } 441 }
443 442
443 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
444 hdev->tm_info.prio_tc[i] =
445 (i >= hdev->tm_info.num_tc) ? 0 : i;
446
444 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 447 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
445} 448}
446 449
@@ -976,6 +979,10 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev)
976 if (ret) 979 if (ret)
977 return ret; 980 return ret;
978 981
982 /* Only DCB-supported dev supports qset back pressure setting */
983 if (!hnae3_dev_dcb_supported(hdev))
984 return 0;
985
979 for (i = 0; i < hdev->tm_info.num_tc; i++) { 986 for (i = 0; i < hdev->tm_info.num_tc; i++) {
980 ret = hclge_tm_qs_bp_cfg(hdev, i); 987 ret = hclge_tm_qs_bp_cfg(hdev, i);
981 if (ret) 988 if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 7e67337dfaf2..85158b0d73fe 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -94,10 +94,10 @@ struct hclge_bp_to_qs_map_cmd {
94 u32 rsvd1; 94 u32 rsvd1;
95}; 95};
96 96
97#define hclge_tm_set_feild(dest, string, val) \ 97#define hclge_tm_set_field(dest, string, val) \
98 hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \ 98 hnae_set_field((dest), (HCLGE_TM_SHAP_##string##_MSK), \
99 (HCLGE_TM_SHAP_##string##_LSH), val) 99 (HCLGE_TM_SHAP_##string##_LSH), val)
100#define hclge_tm_get_feild(src, string) \ 100#define hclge_tm_get_field(src, string) \
101 hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \ 101 hnae_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
102 (HCLGE_TM_SHAP_##string##_LSH)) 102 (HCLGE_TM_SHAP_##string##_LSH))
103 103
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
index 1c3e29447891..35369e1c8036 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c
@@ -41,11 +41,16 @@ static struct hnae3_client client;
41static const struct pci_device_id hns3_pci_tbl[] = { 41static const struct pci_device_id hns3_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0}, 42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0}, 43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0}, 44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
45 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0}, 45 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0}, 46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
47 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0}, 47 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0}, 48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
49 /* required last entry */ 54 /* required last entry */
50 {0, } 55 {0, }
51}; 56};
@@ -1348,6 +1353,7 @@ static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1348 } 1353 }
1349 1354
1350 ae_dev->pdev = pdev; 1355 ae_dev->pdev = pdev;
1356 ae_dev->flag = ent->driver_data;
1351 ae_dev->dev_type = HNAE3_DEV_KNIC; 1357 ae_dev->dev_type = HNAE3_DEV_KNIC;
1352 pci_set_drvdata(pdev, ae_dev); 1358 pci_set_drvdata(pdev, ae_dev);
1353 1359
@@ -2705,10 +2711,11 @@ static void hns3_init_mac_addr(struct net_device *netdev)
2705 eth_hw_addr_random(netdev); 2711 eth_hw_addr_random(netdev);
2706 dev_warn(priv->dev, "using random MAC address %pM\n", 2712 dev_warn(priv->dev, "using random MAC address %pM\n",
2707 netdev->dev_addr); 2713 netdev->dev_addr);
2708 /* Also copy this new MAC address into hdev */
2709 if (h->ae_algo->ops->set_mac_addr)
2710 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2711 } 2714 }
2715
2716 if (h->ae_algo->ops->set_mac_addr)
2717 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2718
2712} 2719}
2713 2720
2714static void hns3_nic_set_priv_ops(struct net_device *netdev) 2721static void hns3_nic_set_priv_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 2c74baa2398a..fff09dcf9e34 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -402,7 +402,7 @@ static int mal_poll(struct napi_struct *napi, int budget)
402 unsigned long flags; 402 unsigned long flags;
403 403
404 MAL_DBG2(mal, "poll(%d)" NL, budget); 404 MAL_DBG2(mal, "poll(%d)" NL, budget);
405 again: 405
406 /* Process TX skbs */ 406 /* Process TX skbs */
407 list_for_each(l, &mal->poll_list) { 407 list_for_each(l, &mal->poll_list) {
408 struct mal_commac *mc = 408 struct mal_commac *mc =
@@ -451,7 +451,6 @@ static int mal_poll(struct napi_struct *napi, int budget)
451 spin_lock_irqsave(&mal->lock, flags); 451 spin_lock_irqsave(&mal->lock, flags);
452 mal_disable_eob_irq(mal); 452 mal_disable_eob_irq(mal);
453 spin_unlock_irqrestore(&mal->lock, flags); 453 spin_unlock_irqrestore(&mal->lock, flags);
454 goto again;
455 } 454 }
456 mc->ops->poll_tx(mc->dev); 455 mc->ops->poll_tx(mc->dev);
457 } 456 }
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index dd0ee2691c86..9c86cb7cb988 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -333,7 +333,7 @@
333#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) 333#define MVPP2_GMAC_INBAND_AN_MASK BIT(0)
334#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1) 334#define MVPP2_GMAC_FLOW_CTRL_MASK GENMASK(2, 1)
335#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) 335#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3)
336#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) 336#define MVPP2_GMAC_INTERNAL_CLK_MASK BIT(4)
337#define MVPP2_GMAC_DISABLE_PADDING BIT(5) 337#define MVPP2_GMAC_DISABLE_PADDING BIT(5)
338#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) 338#define MVPP2_GMAC_PORT_RESET_MASK BIT(6)
339#define MVPP2_GMAC_AUTONEG_CONFIG 0xc 339#define MVPP2_GMAC_AUTONEG_CONFIG 0xc
@@ -676,6 +676,7 @@ enum mvpp2_tag_type {
676#define MVPP2_PRS_RI_L3_MCAST BIT(15) 676#define MVPP2_PRS_RI_L3_MCAST BIT(15)
677#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) 677#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16))
678#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 678#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000
679#define MVPP2_PRS_RI_IP_FRAG_TRUE BIT(17)
679#define MVPP2_PRS_RI_UDF3_MASK 0x300000 680#define MVPP2_PRS_RI_UDF3_MASK 0x300000
680#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) 681#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21)
681#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 682#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000
@@ -792,6 +793,7 @@ struct mvpp2 {
792 struct clk *pp_clk; 793 struct clk *pp_clk;
793 struct clk *gop_clk; 794 struct clk *gop_clk;
794 struct clk *mg_clk; 795 struct clk *mg_clk;
796 struct clk *axi_clk;
795 797
796 /* List of pointers to port structures */ 798 /* List of pointers to port structures */
797 struct mvpp2_port **port_list; 799 struct mvpp2_port **port_list;
@@ -2315,7 +2317,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2315 (proto != IPPROTO_IGMP)) 2317 (proto != IPPROTO_IGMP))
2316 return -EINVAL; 2318 return -EINVAL;
2317 2319
2318 /* Fragmented packet */ 2320 /* Not fragmented packet */
2319 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2321 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2320 MVPP2_PE_LAST_FREE_TID); 2322 MVPP2_PE_LAST_FREE_TID);
2321 if (tid < 0) 2323 if (tid < 0)
@@ -2334,8 +2336,12 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2334 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 2336 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
2335 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, 2337 mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
2336 MVPP2_PRS_IPV4_DIP_AI_BIT); 2338 MVPP2_PRS_IPV4_DIP_AI_BIT);
2337 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, 2339 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2338 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); 2340
2341 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00,
2342 MVPP2_PRS_TCAM_PROTO_MASK_L);
2343 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00,
2344 MVPP2_PRS_TCAM_PROTO_MASK);
2339 2345
2340 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); 2346 mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
2341 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 2347 mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
@@ -2346,7 +2352,7 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2346 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 2352 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
2347 mvpp2_prs_hw_write(priv, &pe); 2353 mvpp2_prs_hw_write(priv, &pe);
2348 2354
2349 /* Not fragmented packet */ 2355 /* Fragmented packet */
2350 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, 2356 tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
2351 MVPP2_PE_LAST_FREE_TID); 2357 MVPP2_PE_LAST_FREE_TID);
2352 if (tid < 0) 2358 if (tid < 0)
@@ -2358,8 +2364,11 @@ static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
2358 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 2364 pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
2359 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); 2365 mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
2360 2366
2361 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); 2367 mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_TRUE,
2362 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); 2368 ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
2369
2370 mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, 0x0);
2371 mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, 0x0);
2363 2372
2364 /* Update shadow table and hw entry */ 2373 /* Update shadow table and hw entry */
2365 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); 2374 mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
@@ -4591,7 +4600,6 @@ static void mvpp2_port_mii_gmac_configure(struct mvpp2_port *port)
4591 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK; 4600 val |= MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK;
4592 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { 4601 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
4593 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 4602 val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
4594 val |= MVPP2_GMAC_PORT_RGMII_MASK;
4595 } 4603 }
4596 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG); 4604 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4597 4605
@@ -7496,7 +7504,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
7496/* Ports initialization */ 7504/* Ports initialization */
7497static int mvpp2_port_probe(struct platform_device *pdev, 7505static int mvpp2_port_probe(struct platform_device *pdev,
7498 struct device_node *port_node, 7506 struct device_node *port_node,
7499 struct mvpp2 *priv) 7507 struct mvpp2 *priv, int index)
7500{ 7508{
7501 struct device_node *phy_node; 7509 struct device_node *phy_node;
7502 struct phy *comphy; 7510 struct phy *comphy;
@@ -7670,7 +7678,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
7670 } 7678 }
7671 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 7679 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
7672 7680
7673 priv->port_list[id] = port; 7681 priv->port_list[index] = port;
7674 return 0; 7682 return 0;
7675 7683
7676err_free_port_pcpu: 7684err_free_port_pcpu:
@@ -7963,6 +7971,18 @@ static int mvpp2_probe(struct platform_device *pdev)
7963 err = clk_prepare_enable(priv->mg_clk); 7971 err = clk_prepare_enable(priv->mg_clk);
7964 if (err < 0) 7972 if (err < 0)
7965 goto err_gop_clk; 7973 goto err_gop_clk;
7974
7975 priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
7976 if (IS_ERR(priv->axi_clk)) {
7977 err = PTR_ERR(priv->axi_clk);
7978 if (err == -EPROBE_DEFER)
7979 goto err_gop_clk;
7980 priv->axi_clk = NULL;
7981 } else {
7982 err = clk_prepare_enable(priv->axi_clk);
7983 if (err < 0)
7984 goto err_gop_clk;
7985 }
7966 } 7986 }
7967 7987
7968 /* Get system's tclk rate */ 7988 /* Get system's tclk rate */
@@ -8005,16 +8025,19 @@ static int mvpp2_probe(struct platform_device *pdev)
8005 } 8025 }
8006 8026
8007 /* Initialize ports */ 8027 /* Initialize ports */
8028 i = 0;
8008 for_each_available_child_of_node(dn, port_node) { 8029 for_each_available_child_of_node(dn, port_node) {
8009 err = mvpp2_port_probe(pdev, port_node, priv); 8030 err = mvpp2_port_probe(pdev, port_node, priv, i);
8010 if (err < 0) 8031 if (err < 0)
8011 goto err_mg_clk; 8032 goto err_mg_clk;
8033 i++;
8012 } 8034 }
8013 8035
8014 platform_set_drvdata(pdev, priv); 8036 platform_set_drvdata(pdev, priv);
8015 return 0; 8037 return 0;
8016 8038
8017err_mg_clk: 8039err_mg_clk:
8040 clk_disable_unprepare(priv->axi_clk);
8018 if (priv->hw_version == MVPP22) 8041 if (priv->hw_version == MVPP22)
8019 clk_disable_unprepare(priv->mg_clk); 8042 clk_disable_unprepare(priv->mg_clk);
8020err_gop_clk: 8043err_gop_clk:
@@ -8052,6 +8075,7 @@ static int mvpp2_remove(struct platform_device *pdev)
8052 aggr_txq->descs_dma); 8075 aggr_txq->descs_dma);
8053 } 8076 }
8054 8077
8078 clk_disable_unprepare(priv->axi_clk);
8055 clk_disable_unprepare(priv->mg_clk); 8079 clk_disable_unprepare(priv->mg_clk);
8056 clk_disable_unprepare(priv->pp_clk); 8080 clk_disable_unprepare(priv->pp_clk);
8057 clk_disable_unprepare(priv->gop_clk); 8081 clk_disable_unprepare(priv->gop_clk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
index 1e3a6c3e4132..80eef4163f52 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
@@ -139,7 +139,7 @@ TRACE_EVENT(mlx5_fs_del_fg,
139 {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} 139 {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
140 140
141TRACE_EVENT(mlx5_fs_set_fte, 141TRACE_EVENT(mlx5_fs_set_fte,
142 TP_PROTO(const struct fs_fte *fte, bool new_fte), 142 TP_PROTO(const struct fs_fte *fte, int new_fte),
143 TP_ARGS(fte, new_fte), 143 TP_ARGS(fte, new_fte),
144 TP_STRUCT__entry( 144 TP_STRUCT__entry(
145 __field(const struct fs_fte *, fte) 145 __field(const struct fs_fte *, fte)
@@ -149,7 +149,7 @@ TRACE_EVENT(mlx5_fs_set_fte,
149 __field(u32, action) 149 __field(u32, action)
150 __field(u32, flow_tag) 150 __field(u32, flow_tag)
151 __field(u8, mask_enable) 151 __field(u8, mask_enable)
152 __field(bool, new_fte) 152 __field(int, new_fte)
153 __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) 153 __array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
154 __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4)) 154 __array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
155 __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc)) 155 __array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index f11fd07ac4dd..850cdc980ab5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -291,7 +291,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
291 priv->fs.vlan.filter_disabled = false; 291 priv->fs.vlan.filter_disabled = false;
292 if (priv->netdev->flags & IFF_PROMISC) 292 if (priv->netdev->flags & IFF_PROMISC)
293 return; 293 return;
294 mlx5e_del_any_vid_rules(priv); 294 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
295} 295}
296 296
297void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv) 297void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
@@ -302,7 +302,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
302 priv->fs.vlan.filter_disabled = true; 302 priv->fs.vlan.filter_disabled = true;
303 if (priv->netdev->flags & IFF_PROMISC) 303 if (priv->netdev->flags & IFF_PROMISC)
304 return; 304 return;
305 mlx5e_add_any_vid_rules(priv); 305 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
306} 306}
307 307
308int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, 308int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index dfc29720ab77..cc11bbbd0309 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -184,7 +184,6 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
184 struct mlx5e_sw_stats temp, *s = &temp; 184 struct mlx5e_sw_stats temp, *s = &temp;
185 struct mlx5e_rq_stats *rq_stats; 185 struct mlx5e_rq_stats *rq_stats;
186 struct mlx5e_sq_stats *sq_stats; 186 struct mlx5e_sq_stats *sq_stats;
187 u64 tx_offload_none = 0;
188 int i, j; 187 int i, j;
189 188
190 memset(s, 0, sizeof(*s)); 189 memset(s, 0, sizeof(*s));
@@ -199,6 +198,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
199 s->rx_lro_bytes += rq_stats->lro_bytes; 198 s->rx_lro_bytes += rq_stats->lro_bytes;
200 s->rx_csum_none += rq_stats->csum_none; 199 s->rx_csum_none += rq_stats->csum_none;
201 s->rx_csum_complete += rq_stats->csum_complete; 200 s->rx_csum_complete += rq_stats->csum_complete;
201 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
202 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; 202 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
203 s->rx_xdp_drop += rq_stats->xdp_drop; 203 s->rx_xdp_drop += rq_stats->xdp_drop;
204 s->rx_xdp_tx += rq_stats->xdp_tx; 204 s->rx_xdp_tx += rq_stats->xdp_tx;
@@ -229,14 +229,11 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
229 s->tx_queue_dropped += sq_stats->dropped; 229 s->tx_queue_dropped += sq_stats->dropped;
230 s->tx_xmit_more += sq_stats->xmit_more; 230 s->tx_xmit_more += sq_stats->xmit_more;
231 s->tx_csum_partial_inner += sq_stats->csum_partial_inner; 231 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
232 tx_offload_none += sq_stats->csum_none; 232 s->tx_csum_none += sq_stats->csum_none;
233 s->tx_csum_partial += sq_stats->csum_partial;
233 } 234 }
234 } 235 }
235 236
236 /* Update calculated offload counters */
237 s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
238 s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
239
240 s->link_down_events_phy = MLX5_GET(ppcnt_reg, 237 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
241 priv->stats.pport.phy_counters, 238 priv->stats.pport.phy_counters,
242 counter_set.phys_layer_cntrs.link_down_events); 239 counter_set.phys_layer_cntrs.link_down_events);
@@ -3333,8 +3330,8 @@ static int mlx5e_handle_feature(struct net_device *netdev,
3333 3330
3334 err = feature_handler(netdev, enable); 3331 err = feature_handler(netdev, enable);
3335 if (err) { 3332 if (err) {
3336 netdev_err(netdev, "%s feature 0x%llx failed err %d\n", 3333 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3337 enable ? "Enable" : "Disable", feature, err); 3334 enable ? "Enable" : "Disable", &feature, err);
3338 return err; 3335 return err;
3339 } 3336 }
3340 3337
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index f1dd638384d3..15a1687483cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -627,6 +627,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
627 627
628 if (lro) { 628 if (lro) {
629 skb->ip_summed = CHECKSUM_UNNECESSARY; 629 skb->ip_summed = CHECKSUM_UNNECESSARY;
630 rq->stats.csum_unnecessary++;
630 return; 631 return;
631 } 632 }
632 633
@@ -644,7 +645,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
644 skb->csum_level = 1; 645 skb->csum_level = 1;
645 skb->encapsulation = 1; 646 skb->encapsulation = 1;
646 rq->stats.csum_unnecessary_inner++; 647 rq->stats.csum_unnecessary_inner++;
648 return;
647 } 649 }
650 rq->stats.csum_unnecessary++;
648 return; 651 return;
649 } 652 }
650csum_none: 653csum_none:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 6d199ffb1c0b..f8637213afc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -68,6 +68,7 @@ struct mlx5e_sw_stats {
68 u64 rx_xdp_drop; 68 u64 rx_xdp_drop;
69 u64 rx_xdp_tx; 69 u64 rx_xdp_tx;
70 u64 rx_xdp_tx_full; 70 u64 rx_xdp_tx_full;
71 u64 tx_csum_none;
71 u64 tx_csum_partial; 72 u64 tx_csum_partial;
72 u64 tx_csum_partial_inner; 73 u64 tx_csum_partial_inner;
73 u64 tx_queue_stopped; 74 u64 tx_queue_stopped;
@@ -108,6 +109,7 @@ static const struct counter_desc sw_stats_desc[] = {
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, 109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, 110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, 111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, 113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, 114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) }, 115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
@@ -339,6 +341,7 @@ struct mlx5e_rq_stats {
339 u64 packets; 341 u64 packets;
340 u64 bytes; 342 u64 bytes;
341 u64 csum_complete; 343 u64 csum_complete;
344 u64 csum_unnecessary;
342 u64 csum_unnecessary_inner; 345 u64 csum_unnecessary_inner;
343 u64 csum_none; 346 u64 csum_none;
344 u64 lro_packets; 347 u64 lro_packets;
@@ -363,6 +366,7 @@ static const struct counter_desc rq_stats_desc[] = {
363 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) }, 366 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
364 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) }, 367 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
365 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) }, 368 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
369 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
366 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 370 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
367 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, 371 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
368 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, 372 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
@@ -392,6 +396,7 @@ struct mlx5e_sq_stats {
392 u64 tso_bytes; 396 u64 tso_bytes;
393 u64 tso_inner_packets; 397 u64 tso_inner_packets;
394 u64 tso_inner_bytes; 398 u64 tso_inner_bytes;
399 u64 csum_partial;
395 u64 csum_partial_inner; 400 u64 csum_partial_inner;
396 u64 nop; 401 u64 nop;
397 /* less likely accessed in data path */ 402 /* less likely accessed in data path */
@@ -408,6 +413,7 @@ static const struct counter_desc sq_stats_desc[] = {
408 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) }, 413 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
409 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) }, 414 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
410 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) }, 415 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
416 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
411 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) }, 417 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
412 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) }, 418 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
413 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) }, 419 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index da503e6411da..1aa2028ed995 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1317,6 +1317,69 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
1317 return true; 1317 return true;
1318} 1318}
1319 1319
1320static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1321 struct tcf_exts *exts)
1322{
1323 const struct tc_action *a;
1324 bool modify_ip_header;
1325 LIST_HEAD(actions);
1326 u8 htype, ip_proto;
1327 void *headers_v;
1328 u16 ethertype;
1329 int nkeys, i;
1330
1331 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1332 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1333
1334 /* for non-IP we only re-write MACs, so we're okay */
1335 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
1336 goto out_ok;
1337
1338 modify_ip_header = false;
1339 tcf_exts_to_list(exts, &actions);
1340 list_for_each_entry(a, &actions, list) {
1341 if (!is_tcf_pedit(a))
1342 continue;
1343
1344 nkeys = tcf_pedit_nkeys(a);
1345 for (i = 0; i < nkeys; i++) {
1346 htype = tcf_pedit_htype(a, i);
1347 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1348 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1349 modify_ip_header = true;
1350 break;
1351 }
1352 }
1353 }
1354
1355 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1356 if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
1357 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
1358 return false;
1359 }
1360
1361out_ok:
1362 return true;
1363}
1364
1365static bool actions_match_supported(struct mlx5e_priv *priv,
1366 struct tcf_exts *exts,
1367 struct mlx5e_tc_flow_parse_attr *parse_attr,
1368 struct mlx5e_tc_flow *flow)
1369{
1370 u32 actions;
1371
1372 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1373 actions = flow->esw_attr->action;
1374 else
1375 actions = flow->nic_attr->action;
1376
1377 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1378 return modify_header_match_supported(&parse_attr->spec, exts);
1379
1380 return true;
1381}
1382
1320static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, 1383static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1321 struct mlx5e_tc_flow_parse_attr *parse_attr, 1384 struct mlx5e_tc_flow_parse_attr *parse_attr,
1322 struct mlx5e_tc_flow *flow) 1385 struct mlx5e_tc_flow *flow)
@@ -1378,6 +1441,9 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1378 return -EINVAL; 1441 return -EINVAL;
1379 } 1442 }
1380 1443
1444 if (!actions_match_supported(priv, exts, parse_attr, flow))
1445 return -EOPNOTSUPP;
1446
1381 return 0; 1447 return 0;
1382} 1448}
1383 1449
@@ -1564,7 +1630,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1564 break; 1630 break;
1565 default: 1631 default:
1566 err = -EOPNOTSUPP; 1632 err = -EOPNOTSUPP;
1567 goto out; 1633 goto free_encap;
1568 } 1634 }
1569 fl4.flowi4_tos = tun_key->tos; 1635 fl4.flowi4_tos = tun_key->tos;
1570 fl4.daddr = tun_key->u.ipv4.dst; 1636 fl4.daddr = tun_key->u.ipv4.dst;
@@ -1573,7 +1639,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1573 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, 1639 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
1574 &fl4, &n, &ttl); 1640 &fl4, &n, &ttl);
1575 if (err) 1641 if (err)
1576 goto out; 1642 goto free_encap;
1577 1643
1578 /* used by mlx5e_detach_encap to lookup a neigh hash table 1644 /* used by mlx5e_detach_encap to lookup a neigh hash table
1579 * entry in the neigh hash table when a user deletes a rule 1645 * entry in the neigh hash table when a user deletes a rule
@@ -1590,7 +1656,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1590 */ 1656 */
1591 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); 1657 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1592 if (err) 1658 if (err)
1593 goto out; 1659 goto free_encap;
1594 1660
1595 read_lock_bh(&n->lock); 1661 read_lock_bh(&n->lock);
1596 nud_state = n->nud_state; 1662 nud_state = n->nud_state;
@@ -1630,8 +1696,9 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1630 1696
1631destroy_neigh_entry: 1697destroy_neigh_entry:
1632 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 1698 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1633out: 1699free_encap:
1634 kfree(encap_header); 1700 kfree(encap_header);
1701out:
1635 if (n) 1702 if (n)
1636 neigh_release(n); 1703 neigh_release(n);
1637 return err; 1704 return err;
@@ -1668,7 +1735,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1668 break; 1735 break;
1669 default: 1736 default:
1670 err = -EOPNOTSUPP; 1737 err = -EOPNOTSUPP;
1671 goto out; 1738 goto free_encap;
1672 } 1739 }
1673 1740
1674 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); 1741 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
@@ -1678,7 +1745,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1678 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, 1745 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
1679 &fl6, &n, &ttl); 1746 &fl6, &n, &ttl);
1680 if (err) 1747 if (err)
1681 goto out; 1748 goto free_encap;
1682 1749
1683 /* used by mlx5e_detach_encap to lookup a neigh hash table 1750 /* used by mlx5e_detach_encap to lookup a neigh hash table
1684 * entry in the neigh hash table when a user deletes a rule 1751 * entry in the neigh hash table when a user deletes a rule
@@ -1695,7 +1762,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1695 */ 1762 */
1696 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); 1763 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1697 if (err) 1764 if (err)
1698 goto out; 1765 goto free_encap;
1699 1766
1700 read_lock_bh(&n->lock); 1767 read_lock_bh(&n->lock);
1701 nud_state = n->nud_state; 1768 nud_state = n->nud_state;
@@ -1736,8 +1803,9 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1736 1803
1737destroy_neigh_entry: 1804destroy_neigh_entry:
1738 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); 1805 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1739out: 1806free_encap:
1740 kfree(encap_header); 1807 kfree(encap_header);
1808out:
1741 if (n) 1809 if (n)
1742 neigh_release(n); 1810 neigh_release(n);
1743 return err; 1811 return err;
@@ -1791,6 +1859,7 @@ vxlan_encap_offload_err:
1791 } 1859 }
1792 } 1860 }
1793 1861
1862 /* must verify if encap is valid or not */
1794 if (found) 1863 if (found)
1795 goto attach_flow; 1864 goto attach_flow;
1796 1865
@@ -1817,6 +1886,8 @@ attach_flow:
1817 *encap_dev = e->out_dev; 1886 *encap_dev = e->out_dev;
1818 if (e->flags & MLX5_ENCAP_ENTRY_VALID) 1887 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1819 attr->encap_id = e->encap_id; 1888 attr->encap_id = e->encap_id;
1889 else
1890 err = -EAGAIN;
1820 1891
1821 return err; 1892 return err;
1822 1893
@@ -1934,6 +2005,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1934 2005
1935 return -EINVAL; 2006 return -EINVAL;
1936 } 2007 }
2008
2009 if (!actions_match_supported(priv, exts, parse_attr, flow))
2010 return -EOPNOTSUPP;
2011
1937 return err; 2012 return err;
1938} 2013}
1939 2014
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index fee43e40fa16..1d6925d4369a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -193,6 +193,7 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct
193 sq->stats.csum_partial_inner++; 193 sq->stats.csum_partial_inner++;
194 } else { 194 } else {
195 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; 195 eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
196 sq->stats.csum_partial++;
196 } 197 }
197 } else 198 } else
198 sq->stats.csum_none++; 199 sq->stats.csum_none++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
index e37453d838db..c0fd2212e890 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.c
@@ -71,11 +71,11 @@ int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
71 return 0; 71 return 0;
72} 72}
73 73
74int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps) 74int mlx5_fpga_caps(struct mlx5_core_dev *dev)
75{ 75{
76 u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0}; 76 u32 in[MLX5_ST_SZ_DW(fpga_cap)] = {0};
77 77
78 return mlx5_core_access_reg(dev, in, sizeof(in), caps, 78 return mlx5_core_access_reg(dev, in, sizeof(in), dev->caps.fpga,
79 MLX5_ST_SZ_BYTES(fpga_cap), 79 MLX5_ST_SZ_BYTES(fpga_cap),
80 MLX5_REG_FPGA_CAP, 0, 0); 80 MLX5_REG_FPGA_CAP, 0, 0);
81} 81}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
index 94bdfd47c3f0..d05233c9b4f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/cmd.h
@@ -65,7 +65,7 @@ struct mlx5_fpga_qp_counters {
65 u64 rx_total_drop; 65 u64 rx_total_drop;
66}; 66};
67 67
68int mlx5_fpga_caps(struct mlx5_core_dev *dev, u32 *caps); 68int mlx5_fpga_caps(struct mlx5_core_dev *dev);
69int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query); 69int mlx5_fpga_query(struct mlx5_core_dev *dev, struct mlx5_fpga_query *query);
70int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op); 70int mlx5_fpga_ctrl_op(struct mlx5_core_dev *dev, u8 op);
71int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr, 71int mlx5_fpga_access_reg(struct mlx5_core_dev *dev, u8 size, u64 addr,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
index 9034e9960a76..dc8970346521 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
@@ -139,8 +139,7 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev)
139 if (err) 139 if (err)
140 goto out; 140 goto out;
141 141
142 err = mlx5_fpga_caps(fdev->mdev, 142 err = mlx5_fpga_caps(fdev->mdev);
143 fdev->mdev->caps.hca_cur[MLX5_CAP_FPGA]);
144 if (err) 143 if (err)
145 goto out; 144 goto out;
146 145
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index e0d0efd903bc..36ecc2b2e187 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -293,6 +293,9 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
293 } 293 }
294 294
295 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { 295 if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
296 int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
297 log_max_flow_counter,
298 ft->type));
296 int list_size = 0; 299 int list_size = 0;
297 300
298 list_for_each_entry(dst, &fte->node.children, node.list) { 301 list_for_each_entry(dst, &fte->node.children, node.list) {
@@ -305,12 +308,17 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
305 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); 308 in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
306 list_size++; 309 list_size++;
307 } 310 }
311 if (list_size > max_list_size) {
312 err = -EINVAL;
313 goto err_out;
314 }
308 315
309 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size, 316 MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
310 list_size); 317 list_size);
311 } 318 }
312 319
313 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 320 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
321err_out:
314 kvfree(in); 322 kvfree(in);
315 return err; 323 return err;
316} 324}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 5509a752f98e..48dd78975062 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -52,6 +52,7 @@ enum fs_flow_table_type {
52 FS_FT_FDB = 0X4, 52 FS_FT_FDB = 0X4,
53 FS_FT_SNIFFER_RX = 0X5, 53 FS_FT_SNIFFER_RX = 0X5,
54 FS_FT_SNIFFER_TX = 0X6, 54 FS_FT_SNIFFER_TX = 0X6,
55 FS_FT_MAX_TYPE = FS_FT_SNIFFER_TX,
55}; 56};
56 57
57enum fs_flow_table_op_mod { 58enum fs_flow_table_op_mod {
@@ -260,4 +261,14 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
260#define fs_for_each_dst(pos, fte) \ 261#define fs_for_each_dst(pos, fte) \
261 fs_list_for_each_entry(pos, &(fte)->node.children) 262 fs_list_for_each_entry(pos, &(fte)->node.children)
262 263
264#define MLX5_CAP_FLOWTABLE_TYPE(mdev, cap, type) ( \
265 (type == FS_FT_NIC_RX) ? MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) : \
266 (type == FS_FT_ESW_EGRESS_ACL) ? MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) : \
267 (type == FS_FT_ESW_INGRESS_ACL) ? MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) : \
268 (type == FS_FT_FDB) ? MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) : \
269 (type == FS_FT_SNIFFER_RX) ? MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) : \
270 (type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
271 (BUILD_BUG_ON_ZERO(FS_FT_SNIFFER_TX != FS_FT_MAX_TYPE))\
272 )
273
263#endif 274#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 85298051a3e4..145e392ab849 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -572,12 +572,13 @@ void mlx5_rdma_netdev_free(struct net_device *netdev)
572{ 572{
573 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 573 struct mlx5e_priv *priv = mlx5i_epriv(netdev);
574 const struct mlx5e_profile *profile = priv->profile; 574 const struct mlx5e_profile *profile = priv->profile;
575 struct mlx5_core_dev *mdev = priv->mdev;
575 576
576 mlx5e_detach_netdev(priv); 577 mlx5e_detach_netdev(priv);
577 profile->cleanup(priv); 578 profile->cleanup(priv);
578 destroy_workqueue(priv->wq); 579 destroy_workqueue(priv->wq);
579 free_netdev(netdev); 580 free_netdev(netdev);
580 581
581 mlx5e_destroy_mdev_resources(priv->mdev); 582 mlx5e_destroy_mdev_resources(mdev);
582} 583}
583EXPORT_SYMBOL(mlx5_rdma_netdev_free); 584EXPORT_SYMBOL(mlx5_rdma_netdev_free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 6c48e9959b65..2a8b529ce6dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -109,7 +109,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
109 mlx5_core_warn(dev, 109 mlx5_core_warn(dev,
110 "failed to restore VF %d settings, err %d\n", 110 "failed to restore VF %d settings, err %d\n",
111 vf, err); 111 vf, err);
112 continue; 112 continue;
113 } 113 }
114 } 114 }
115 mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf); 115 mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 2cfb3f5d092d..032089efc1a0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -2723,6 +2723,7 @@ static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
2723 mlxsw_sp_nexthop_rif_fini(nh); 2723 mlxsw_sp_nexthop_rif_fini(nh);
2724 break; 2724 break;
2725 case MLXSW_SP_NEXTHOP_TYPE_IPIP: 2725 case MLXSW_SP_NEXTHOP_TYPE_IPIP:
2726 mlxsw_sp_nexthop_rif_fini(nh);
2726 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh); 2727 mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
2727 break; 2728 break;
2728 } 2729 }
@@ -2742,7 +2743,11 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
2742 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, 2743 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
2743 MLXSW_SP_L3_PROTO_IPV4)) { 2744 MLXSW_SP_L3_PROTO_IPV4)) {
2744 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 2745 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
2745 return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); 2746 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
2747 if (err)
2748 return err;
2749 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
2750 return 0;
2746 } 2751 }
2747 2752
2748 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; 2753 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -4009,7 +4014,11 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4009 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, 4014 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
4010 MLXSW_SP_L3_PROTO_IPV6)) { 4015 MLXSW_SP_L3_PROTO_IPV6)) {
4011 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 4016 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4012 return mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev); 4017 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, ipipt, nh, dev);
4018 if (err)
4019 return err;
4020 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
4021 return 0;
4013 } 4022 }
4014 4023
4015 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; 4024 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -5068,6 +5077,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
5068 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN); 5077 vr = mlxsw_sp_vr_get(mlxsw_sp, tb_id ? : RT_TABLE_MAIN);
5069 if (IS_ERR(vr)) 5078 if (IS_ERR(vr))
5070 return ERR_CAST(vr); 5079 return ERR_CAST(vr);
5080 vr->rif_count++;
5071 5081
5072 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index); 5082 err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
5073 if (err) 5083 if (err)
@@ -5099,7 +5109,6 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
5099 5109
5100 mlxsw_sp_rif_counters_alloc(rif); 5110 mlxsw_sp_rif_counters_alloc(rif);
5101 mlxsw_sp->router->rifs[rif_index] = rif; 5111 mlxsw_sp->router->rifs[rif_index] = rif;
5102 vr->rif_count++;
5103 5112
5104 return rif; 5113 return rif;
5105 5114
@@ -5110,6 +5119,7 @@ err_fid_get:
5110 kfree(rif); 5119 kfree(rif);
5111err_rif_alloc: 5120err_rif_alloc:
5112err_rif_index_alloc: 5121err_rif_index_alloc:
5122 vr->rif_count--;
5113 mlxsw_sp_vr_put(vr); 5123 mlxsw_sp_vr_put(vr);
5114 return ERR_PTR(err); 5124 return ERR_PTR(err);
5115} 5125}
@@ -5124,7 +5134,6 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
5124 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif); 5134 mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
5125 vr = &mlxsw_sp->router->vrs[rif->vr_id]; 5135 vr = &mlxsw_sp->router->vrs[rif->vr_id];
5126 5136
5127 vr->rif_count--;
5128 mlxsw_sp->router->rifs[rif->rif_index] = NULL; 5137 mlxsw_sp->router->rifs[rif->rif_index] = NULL;
5129 mlxsw_sp_rif_counters_free(rif); 5138 mlxsw_sp_rif_counters_free(rif);
5130 ops->deconfigure(rif); 5139 ops->deconfigure(rif);
@@ -5132,6 +5141,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
5132 /* Loopback RIFs are not associated with a FID. */ 5141 /* Loopback RIFs are not associated with a FID. */
5133 mlxsw_sp_fid_put(fid); 5142 mlxsw_sp_fid_put(fid);
5134 kfree(rif); 5143 kfree(rif);
5144 vr->rif_count--;
5135 mlxsw_sp_vr_put(vr); 5145 mlxsw_sp_vr_put(vr);
5136} 5146}
5137 5147
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
index bbe24639aa5a..c8c6231b87f3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-ethtool.c
@@ -88,6 +88,8 @@ static void emac_set_msglevel(struct net_device *netdev, u32 data)
88static int emac_get_sset_count(struct net_device *netdev, int sset) 88static int emac_get_sset_count(struct net_device *netdev, int sset)
89{ 89{
90 switch (sset) { 90 switch (sset) {
91 case ETH_SS_PRIV_FLAGS:
92 return 1;
91 case ETH_SS_STATS: 93 case ETH_SS_STATS:
92 return EMAC_STATS_LEN; 94 return EMAC_STATS_LEN;
93 default: 95 default:
@@ -100,6 +102,10 @@ static void emac_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
100 unsigned int i; 102 unsigned int i;
101 103
102 switch (stringset) { 104 switch (stringset) {
105 case ETH_SS_PRIV_FLAGS:
106 strcpy(data, "single-pause-mode");
107 break;
108
103 case ETH_SS_STATS: 109 case ETH_SS_STATS:
104 for (i = 0; i < EMAC_STATS_LEN; i++) { 110 for (i = 0; i < EMAC_STATS_LEN; i++) {
105 strlcpy(data, emac_ethtool_stat_strings[i], 111 strlcpy(data, emac_ethtool_stat_strings[i],
@@ -230,6 +236,27 @@ static int emac_get_regs_len(struct net_device *netdev)
230 return EMAC_MAX_REG_SIZE * sizeof(u32); 236 return EMAC_MAX_REG_SIZE * sizeof(u32);
231} 237}
232 238
239#define EMAC_PRIV_ENABLE_SINGLE_PAUSE BIT(0)
240
241static int emac_set_priv_flags(struct net_device *netdev, u32 flags)
242{
243 struct emac_adapter *adpt = netdev_priv(netdev);
244
245 adpt->single_pause_mode = !!(flags & EMAC_PRIV_ENABLE_SINGLE_PAUSE);
246
247 if (netif_running(netdev))
248 return emac_reinit_locked(adpt);
249
250 return 0;
251}
252
253static u32 emac_get_priv_flags(struct net_device *netdev)
254{
255 struct emac_adapter *adpt = netdev_priv(netdev);
256
257 return adpt->single_pause_mode ? EMAC_PRIV_ENABLE_SINGLE_PAUSE : 0;
258}
259
233static const struct ethtool_ops emac_ethtool_ops = { 260static const struct ethtool_ops emac_ethtool_ops = {
234 .get_link_ksettings = phy_ethtool_get_link_ksettings, 261 .get_link_ksettings = phy_ethtool_get_link_ksettings,
235 .set_link_ksettings = phy_ethtool_set_link_ksettings, 262 .set_link_ksettings = phy_ethtool_set_link_ksettings,
@@ -253,6 +280,9 @@ static const struct ethtool_ops emac_ethtool_ops = {
253 280
254 .get_regs_len = emac_get_regs_len, 281 .get_regs_len = emac_get_regs_len,
255 .get_regs = emac_get_regs, 282 .get_regs = emac_get_regs,
283
284 .set_priv_flags = emac_set_priv_flags,
285 .get_priv_flags = emac_get_priv_flags,
256}; 286};
257 287
258void emac_set_ethtool_ops(struct net_device *netdev) 288void emac_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index bcd4708b3745..3ed9033e56db 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -551,6 +551,28 @@ static void emac_mac_start(struct emac_adapter *adpt)
551 mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL | 551 mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL |
552 DEBUG_MODE | SINGLE_PAUSE_MODE); 552 DEBUG_MODE | SINGLE_PAUSE_MODE);
553 553
554 /* Enable single-pause-frame mode if requested.
555 *
556 * If enabled, the EMAC will send a single pause frame when the RX
557 * queue is full. This normally leads to packet loss because
558 * the pause frame disables the remote MAC only for 33ms (the quanta),
559 * and then the remote MAC continues sending packets even though
560 * the RX queue is still full.
561 *
562 * If disabled, the EMAC sends a pause frame every 31ms until the RX
563 * queue is no longer full. Normally, this is the preferred
564 * method of operation. However, when the system is hung (e.g.
565 * cores are halted), the EMAC interrupt handler is never called
566 * and so the RX queue fills up quickly and stays full. The resuling
567 * non-stop "flood" of pause frames sometimes has the effect of
568 * disabling nearby switches. In some cases, other nearby switches
569 * are also affected, shutting down the entire network.
570 *
571 * The user can enable or disable single-pause-frame mode
572 * via ethtool.
573 */
574 mac |= adpt->single_pause_mode ? SINGLE_PAUSE_MODE : 0;
575
554 writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1); 576 writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1);
555 577
556 writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL); 578 writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL);
@@ -876,7 +898,8 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
876 898
877 curr_rxbuf->dma_addr = 899 curr_rxbuf->dma_addr =
878 dma_map_single(adpt->netdev->dev.parent, skb->data, 900 dma_map_single(adpt->netdev->dev.parent, skb->data,
879 curr_rxbuf->length, DMA_FROM_DEVICE); 901 adpt->rxbuf_size, DMA_FROM_DEVICE);
902
880 ret = dma_mapping_error(adpt->netdev->dev.parent, 903 ret = dma_mapping_error(adpt->netdev->dev.parent,
881 curr_rxbuf->dma_addr); 904 curr_rxbuf->dma_addr);
882 if (ret) { 905 if (ret) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 60850bfa3d32..759543512117 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -443,6 +443,9 @@ static void emac_init_adapter(struct emac_adapter *adpt)
443 443
444 /* default to automatic flow control */ 444 /* default to automatic flow control */
445 adpt->automatic = true; 445 adpt->automatic = true;
446
447 /* Disable single-pause-frame mode by default */
448 adpt->single_pause_mode = false;
446} 449}
447 450
448/* Get the clock */ 451/* Get the clock */
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h
index 8ee4ec6aef2e..d7c9f44209d4 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac.h
@@ -363,6 +363,9 @@ struct emac_adapter {
363 bool tx_flow_control; 363 bool tx_flow_control;
364 bool rx_flow_control; 364 bool rx_flow_control;
365 365
366 /* True == use single-pause-frame mode. */
367 bool single_pause_mode;
368
366 /* Ring parameter */ 369 /* Ring parameter */
367 u8 tpd_burst; 370 u8 tpd_burst;
368 u8 rfd_burst; 371 u8 rfd_burst;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 98f22551eb45..1e33aea59f50 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -51,10 +51,7 @@ struct rmnet_walk_data {
51 51
52static int rmnet_is_real_dev_registered(const struct net_device *real_dev) 52static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
53{ 53{
54 rx_handler_func_t *rx_handler; 54 return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
55
56 rx_handler = rcu_dereference(real_dev->rx_handler);
57 return (rx_handler == rmnet_rx_handler);
58} 55}
59 56
60/* Needs rtnl lock */ 57/* Needs rtnl lock */
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index ca22f2898664..d24b47b8e0b2 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2135,11 +2135,12 @@ static int rtl8139_poll(struct napi_struct *napi, int budget)
2135 if (likely(RTL_R16(IntrStatus) & RxAckBits)) 2135 if (likely(RTL_R16(IntrStatus) & RxAckBits))
2136 work_done += rtl8139_rx(dev, tp, budget); 2136 work_done += rtl8139_rx(dev, tp, budget);
2137 2137
2138 if (work_done < budget && napi_complete_done(napi, work_done)) { 2138 if (work_done < budget) {
2139 unsigned long flags; 2139 unsigned long flags;
2140 2140
2141 spin_lock_irqsave(&tp->lock, flags); 2141 spin_lock_irqsave(&tp->lock, flags);
2142 RTL_W16_F(IntrMask, rtl8139_intr_mask); 2142 if (napi_complete_done(napi, work_done))
2143 RTL_W16_F(IntrMask, rtl8139_intr_mask);
2143 spin_unlock_irqrestore(&tp->lock, flags); 2144 spin_unlock_irqrestore(&tp->lock, flags);
2144 } 2145 }
2145 spin_unlock(&tp->rx_lock); 2146 spin_unlock(&tp->rx_lock);
diff --git a/drivers/net/ethernet/rocker/rocker_tlv.h b/drivers/net/ethernet/rocker/rocker_tlv.h
index a63ef82e7c72..dfae3c9d57c6 100644
--- a/drivers/net/ethernet/rocker/rocker_tlv.h
+++ b/drivers/net/ethernet/rocker/rocker_tlv.h
@@ -139,40 +139,52 @@ rocker_tlv_start(struct rocker_desc_info *desc_info)
139int rocker_tlv_put(struct rocker_desc_info *desc_info, 139int rocker_tlv_put(struct rocker_desc_info *desc_info,
140 int attrtype, int attrlen, const void *data); 140 int attrtype, int attrlen, const void *data);
141 141
142static inline int rocker_tlv_put_u8(struct rocker_desc_info *desc_info, 142static inline int
143 int attrtype, u8 value) 143rocker_tlv_put_u8(struct rocker_desc_info *desc_info, int attrtype, u8 value)
144{ 144{
145 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value); 145 u8 tmp = value; /* work around GCC PR81715 */
146
147 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &tmp);
146} 148}
147 149
148static inline int rocker_tlv_put_u16(struct rocker_desc_info *desc_info, 150static inline int
149 int attrtype, u16 value) 151rocker_tlv_put_u16(struct rocker_desc_info *desc_info, int attrtype, u16 value)
150{ 152{
151 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value); 153 u16 tmp = value;
154
155 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &tmp);
152} 156}
153 157
154static inline int rocker_tlv_put_be16(struct rocker_desc_info *desc_info, 158static inline int
155 int attrtype, __be16 value) 159rocker_tlv_put_be16(struct rocker_desc_info *desc_info, int attrtype, __be16 value)
156{ 160{
157 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value); 161 __be16 tmp = value;
162
163 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &tmp);
158} 164}
159 165
160static inline int rocker_tlv_put_u32(struct rocker_desc_info *desc_info, 166static inline int
161 int attrtype, u32 value) 167rocker_tlv_put_u32(struct rocker_desc_info *desc_info, int attrtype, u32 value)
162{ 168{
163 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value); 169 u32 tmp = value;
170
171 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &tmp);
164} 172}
165 173
166static inline int rocker_tlv_put_be32(struct rocker_desc_info *desc_info, 174static inline int
167 int attrtype, __be32 value) 175rocker_tlv_put_be32(struct rocker_desc_info *desc_info, int attrtype, __be32 value)
168{ 176{
169 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value); 177 __be32 tmp = value;
178
179 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &tmp);
170} 180}
171 181
172static inline int rocker_tlv_put_u64(struct rocker_desc_info *desc_info, 182static inline int
173 int attrtype, u64 value) 183rocker_tlv_put_u64(struct rocker_desc_info *desc_info, int attrtype, u64 value)
174{ 184{
175 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value); 185 u64 tmp = value;
186
187 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &tmp);
176} 188}
177 189
178static inline struct rocker_tlv * 190static inline struct rocker_tlv *
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
index dd6a2f9791cc..5efef8001edf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -511,6 +511,7 @@ static struct platform_driver dwc_eth_dwmac_driver = {
511 .remove = dwc_eth_dwmac_remove, 511 .remove = dwc_eth_dwmac_remove,
512 .driver = { 512 .driver = {
513 .name = "dwc-eth-dwmac", 513 .name = "dwc-eth-dwmac",
514 .pm = &stmmac_pltfr_pm_ops,
514 .of_match_table = dwc_eth_dwmac_match, 515 .of_match_table = dwc_eth_dwmac_match,
515 }, 516 },
516}; 517};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 99823f54696a..13133b30b575 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -83,6 +83,117 @@ struct rk_priv_data {
83 (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \ 83 (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
84 ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE)) 84 ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
85 85
86#define RK3128_GRF_MAC_CON0 0x0168
87#define RK3128_GRF_MAC_CON1 0x016c
88
89/* RK3128_GRF_MAC_CON0 */
90#define RK3128_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
91#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
92#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
93#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
94#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
95#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
96
97/* RK3128_GRF_MAC_CON1 */
98#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
99 (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
100#define RK3128_GMAC_PHY_INTF_SEL_RMII \
101 (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
102#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
103#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
104#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
105#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
106#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
107#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
108#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
109#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
110#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
111#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
112#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
113
114static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
115 int tx_delay, int rx_delay)
116{
117 struct device *dev = &bsp_priv->pdev->dev;
118
119 if (IS_ERR(bsp_priv->grf)) {
120 dev_err(dev, "Missing rockchip,grf property\n");
121 return;
122 }
123
124 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
125 RK3128_GMAC_PHY_INTF_SEL_RGMII |
126 RK3128_GMAC_RMII_MODE_CLR);
127 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
128 DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
129 RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) |
130 RK3128_GMAC_CLK_TX_DL_CFG(tx_delay));
131}
132
133static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
134{
135 struct device *dev = &bsp_priv->pdev->dev;
136
137 if (IS_ERR(bsp_priv->grf)) {
138 dev_err(dev, "Missing rockchip,grf property\n");
139 return;
140 }
141
142 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
143 RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
144}
145
146static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
147{
148 struct device *dev = &bsp_priv->pdev->dev;
149
150 if (IS_ERR(bsp_priv->grf)) {
151 dev_err(dev, "Missing rockchip,grf property\n");
152 return;
153 }
154
155 if (speed == 10)
156 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
157 RK3128_GMAC_CLK_2_5M);
158 else if (speed == 100)
159 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
160 RK3128_GMAC_CLK_25M);
161 else if (speed == 1000)
162 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
163 RK3128_GMAC_CLK_125M);
164 else
165 dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
166}
167
168static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
169{
170 struct device *dev = &bsp_priv->pdev->dev;
171
172 if (IS_ERR(bsp_priv->grf)) {
173 dev_err(dev, "Missing rockchip,grf property\n");
174 return;
175 }
176
177 if (speed == 10) {
178 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
179 RK3128_GMAC_RMII_CLK_2_5M |
180 RK3128_GMAC_SPEED_10M);
181 } else if (speed == 100) {
182 regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
183 RK3128_GMAC_RMII_CLK_25M |
184 RK3128_GMAC_SPEED_100M);
185 } else {
186 dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
187 }
188}
189
190static const struct rk_gmac_ops rk3128_ops = {
191 .set_to_rgmii = rk3128_set_to_rgmii,
192 .set_to_rmii = rk3128_set_to_rmii,
193 .set_rgmii_speed = rk3128_set_rgmii_speed,
194 .set_rmii_speed = rk3128_set_rmii_speed,
195};
196
86#define RK3228_GRF_MAC_CON0 0x0900 197#define RK3228_GRF_MAC_CON0 0x0900
87#define RK3228_GRF_MAC_CON1 0x0904 198#define RK3228_GRF_MAC_CON1 0x0904
88 199
@@ -1313,6 +1424,7 @@ static int rk_gmac_resume(struct device *dev)
1313static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume); 1424static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
1314 1425
1315static const struct of_device_id rk_gmac_dwmac_match[] = { 1426static const struct of_device_id rk_gmac_dwmac_match[] = {
1427 { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
1316 { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops }, 1428 { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
1317 { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops }, 1429 { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
1318 { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops }, 1430 { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index c4407e8e39a3..2f7d7ec59962 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -296,6 +296,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
296{ 296{
297 void __iomem *ioaddr = hw->pcsr; 297 void __iomem *ioaddr = hw->pcsr;
298 unsigned int pmt = 0; 298 unsigned int pmt = 0;
299 u32 config;
299 300
300 if (mode & WAKE_MAGIC) { 301 if (mode & WAKE_MAGIC) {
301 pr_debug("GMAC: WOL Magic frame\n"); 302 pr_debug("GMAC: WOL Magic frame\n");
@@ -306,6 +307,12 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
306 pmt |= power_down | global_unicast | wake_up_frame_en; 307 pmt |= power_down | global_unicast | wake_up_frame_en;
307 } 308 }
308 309
310 if (pmt) {
311 /* The receiver must be enabled for WOL before powering down */
312 config = readl(ioaddr + GMAC_CONFIG);
313 config |= GMAC_CONFIG_RE;
314 writel(config, ioaddr + GMAC_CONFIG);
315 }
309 writel(pmt, ioaddr + GMAC_PMT); 316 writel(pmt, ioaddr + GMAC_PMT);
310} 317}
311 318
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index a366b3747eeb..8a280b48e3a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -315,6 +315,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
315 { .compatible = "allwinner,sun8i-h3-emac" }, 315 { .compatible = "allwinner,sun8i-h3-emac" },
316 { .compatible = "allwinner,sun8i-v3s-emac" }, 316 { .compatible = "allwinner,sun8i-v3s-emac" },
317 { .compatible = "allwinner,sun50i-a64-emac" }, 317 { .compatible = "allwinner,sun50i-a64-emac" },
318 {},
318 }; 319 };
319 320
320 /* If phy-handle property is passed from DT, use it as the PHY */ 321 /* If phy-handle property is passed from DT, use it as the PHY */
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index d98cdfb1536b..5176be76ca7d 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -150,6 +150,8 @@ struct netvsc_device_info {
150 u32 num_chn; 150 u32 num_chn;
151 u32 send_sections; 151 u32 send_sections;
152 u32 recv_sections; 152 u32 recv_sections;
153 u32 send_section_size;
154 u32 recv_section_size;
153}; 155};
154 156
155enum rndis_device_state { 157enum rndis_device_state {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index a5511b7326af..8d5077fb0492 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -76,9 +76,6 @@ static struct netvsc_device *alloc_net_device(void)
76 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 76 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
77 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 77 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
78 78
79 net_device->recv_section_size = NETVSC_RECV_SECTION_SIZE;
80 net_device->send_section_size = NETVSC_SEND_SECTION_SIZE;
81
82 init_completion(&net_device->channel_init_wait); 79 init_completion(&net_device->channel_init_wait);
83 init_waitqueue_head(&net_device->subchan_open); 80 init_waitqueue_head(&net_device->subchan_open);
84 INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); 81 INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
@@ -262,7 +259,7 @@ static int netvsc_init_buf(struct hv_device *device,
262 int ret = 0; 259 int ret = 0;
263 260
264 /* Get receive buffer area. */ 261 /* Get receive buffer area. */
265 buf_size = device_info->recv_sections * net_device->recv_section_size; 262 buf_size = device_info->recv_sections * device_info->recv_section_size;
266 buf_size = roundup(buf_size, PAGE_SIZE); 263 buf_size = roundup(buf_size, PAGE_SIZE);
267 264
268 net_device->recv_buf = vzalloc(buf_size); 265 net_device->recv_buf = vzalloc(buf_size);
@@ -344,7 +341,7 @@ static int netvsc_init_buf(struct hv_device *device,
344 goto cleanup; 341 goto cleanup;
345 342
346 /* Now setup the send buffer. */ 343 /* Now setup the send buffer. */
347 buf_size = device_info->send_sections * net_device->send_section_size; 344 buf_size = device_info->send_sections * device_info->send_section_size;
348 buf_size = round_up(buf_size, PAGE_SIZE); 345 buf_size = round_up(buf_size, PAGE_SIZE);
349 346
350 net_device->send_buf = vzalloc(buf_size); 347 net_device->send_buf = vzalloc(buf_size);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d4902ee5f260..a32ae02e1b6c 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -848,7 +848,9 @@ static int netvsc_set_channels(struct net_device *net,
848 device_info.num_chn = count; 848 device_info.num_chn = count;
849 device_info.ring_size = ring_size; 849 device_info.ring_size = ring_size;
850 device_info.send_sections = nvdev->send_section_cnt; 850 device_info.send_sections = nvdev->send_section_cnt;
851 device_info.send_section_size = nvdev->send_section_size;
851 device_info.recv_sections = nvdev->recv_section_cnt; 852 device_info.recv_sections = nvdev->recv_section_cnt;
853 device_info.recv_section_size = nvdev->recv_section_size;
852 854
853 rndis_filter_device_remove(dev, nvdev); 855 rndis_filter_device_remove(dev, nvdev);
854 856
@@ -963,7 +965,9 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
963 device_info.ring_size = ring_size; 965 device_info.ring_size = ring_size;
964 device_info.num_chn = nvdev->num_chn; 966 device_info.num_chn = nvdev->num_chn;
965 device_info.send_sections = nvdev->send_section_cnt; 967 device_info.send_sections = nvdev->send_section_cnt;
968 device_info.send_section_size = nvdev->send_section_size;
966 device_info.recv_sections = nvdev->recv_section_cnt; 969 device_info.recv_sections = nvdev->recv_section_cnt;
970 device_info.recv_section_size = nvdev->recv_section_size;
967 971
968 rndis_filter_device_remove(hdev, nvdev); 972 rndis_filter_device_remove(hdev, nvdev);
969 973
@@ -1485,7 +1489,9 @@ static int netvsc_set_ringparam(struct net_device *ndev,
1485 device_info.num_chn = nvdev->num_chn; 1489 device_info.num_chn = nvdev->num_chn;
1486 device_info.ring_size = ring_size; 1490 device_info.ring_size = ring_size;
1487 device_info.send_sections = new_tx; 1491 device_info.send_sections = new_tx;
1492 device_info.send_section_size = nvdev->send_section_size;
1488 device_info.recv_sections = new_rx; 1493 device_info.recv_sections = new_rx;
1494 device_info.recv_section_size = nvdev->recv_section_size;
1489 1495
1490 netif_device_detach(ndev); 1496 netif_device_detach(ndev);
1491 was_opened = rndis_filter_opened(nvdev); 1497 was_opened = rndis_filter_opened(nvdev);
@@ -1934,7 +1940,9 @@ static int netvsc_probe(struct hv_device *dev,
1934 device_info.ring_size = ring_size; 1940 device_info.ring_size = ring_size;
1935 device_info.num_chn = VRSS_CHANNEL_DEFAULT; 1941 device_info.num_chn = VRSS_CHANNEL_DEFAULT;
1936 device_info.send_sections = NETVSC_DEFAULT_TX; 1942 device_info.send_sections = NETVSC_DEFAULT_TX;
1943 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
1937 device_info.recv_sections = NETVSC_DEFAULT_RX; 1944 device_info.recv_sections = NETVSC_DEFAULT_RX;
1945 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
1938 1946
1939 nvdev = rndis_filter_device_add(dev, &device_info); 1947 nvdev = rndis_filter_device_add(dev, &device_info);
1940 if (IS_ERR(nvdev)) { 1948 if (IS_ERR(nvdev)) {
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a9d16a3af514..cd931cf9dcc2 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -160,15 +160,6 @@ config MDIO_XGENE
160 160
161endif 161endif
162 162
163menuconfig PHYLIB
164 tristate "PHY Device support and infrastructure"
165 depends on NETDEVICES
166 select MDIO_DEVICE
167 help
168 Ethernet controllers are usually attached to PHY
169 devices. This option provides infrastructure for
170 managing PHY devices.
171
172config PHYLINK 163config PHYLINK
173 tristate 164 tristate
174 depends on NETDEVICES 165 depends on NETDEVICES
@@ -179,6 +170,15 @@ config PHYLINK
179 configuration links, PHYs, and Serdes links with MAC level 170 configuration links, PHYs, and Serdes links with MAC level
180 autonegotiation modes. 171 autonegotiation modes.
181 172
173menuconfig PHYLIB
174 tristate "PHY Device support and infrastructure"
175 depends on NETDEVICES
176 select MDIO_DEVICE
177 help
178 Ethernet controllers are usually attached to PHY
179 devices. This option provides infrastructure for
180 managing PHY devices.
181
182if PHYLIB 182if PHYLIB
183 183
184config SWPHY 184config SWPHY
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index e842d2cd1ee7..2b1e67bc1e73 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -373,7 +373,8 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
373 cmd->base.port = PORT_BNC; 373 cmd->base.port = PORT_BNC;
374 else 374 else
375 cmd->base.port = PORT_MII; 375 cmd->base.port = PORT_MII;
376 376 cmd->base.transceiver = phy_is_internal(phydev) ?
377 XCVR_INTERNAL : XCVR_EXTERNAL;
377 cmd->base.phy_address = phydev->mdio.addr; 378 cmd->base.phy_address = phydev->mdio.addr;
378 cmd->base.autoneg = phydev->autoneg; 379 cmd->base.autoneg = phydev->autoneg;
379 cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl; 380 cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8cf0c5901f95..67f25ac29025 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -879,7 +879,7 @@ void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
879{ 879{
880 const char *drv_name = phydev->drv ? phydev->drv->name : "unbound"; 880 const char *drv_name = phydev->drv ? phydev->drv->name : "unbound";
881 char *irq_str; 881 char *irq_str;
882 char irq_num[4]; 882 char irq_num[8];
883 883
884 switch(phydev->irq) { 884 switch(phydev->irq) {
885 case PHY_POLL: 885 case PHY_POLL:
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index d15dd3938ba8..2e5150b0b8d5 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -44,7 +44,7 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
44 priv->phy_drv->read_status(phydev); 44 priv->phy_drv->read_status(phydev);
45 45
46 val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG); 46 val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
47 val &= XILINX_GMII2RGMII_SPEED_MASK; 47 val &= ~XILINX_GMII2RGMII_SPEED_MASK;
48 48
49 if (phydev->speed == SPEED_1000) 49 if (phydev->speed == SPEED_1000)
50 val |= BMCR_SPEED1000; 50 val |= BMCR_SPEED1000;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index a404552555d4..c3f77e3b7819 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -120,7 +120,7 @@ struct ppp {
120 int n_channels; /* how many channels are attached 54 */ 120 int n_channels; /* how many channels are attached 54 */
121 spinlock_t rlock; /* lock for receive side 58 */ 121 spinlock_t rlock; /* lock for receive side 58 */
122 spinlock_t wlock; /* lock for transmit side 5c */ 122 spinlock_t wlock; /* lock for transmit side 5c */
123 int *xmit_recursion __percpu; /* xmit recursion detect */ 123 int __percpu *xmit_recursion; /* xmit recursion detect */
124 int mru; /* max receive unit 60 */ 124 int mru; /* max receive unit 60 */
125 unsigned int flags; /* control bits 64 */ 125 unsigned int flags; /* control bits 64 */
126 unsigned int xstate; /* transmit state bits 68 */ 126 unsigned int xstate; /* transmit state bits 68 */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 3c9985f29950..5ce580f413b9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1496,11 +1496,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1496 switch (tun->flags & TUN_TYPE_MASK) { 1496 switch (tun->flags & TUN_TYPE_MASK) {
1497 case IFF_TUN: 1497 case IFF_TUN:
1498 if (tun->flags & IFF_NO_PI) { 1498 if (tun->flags & IFF_NO_PI) {
1499 switch (skb->data[0] & 0xf0) { 1499 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1500 case 0x40: 1500
1501 switch (ip_version) {
1502 case 4:
1501 pi.proto = htons(ETH_P_IP); 1503 pi.proto = htons(ETH_P_IP);
1502 break; 1504 break;
1503 case 0x60: 1505 case 6:
1504 pi.proto = htons(ETH_P_IPV6); 1506 pi.proto = htons(ETH_P_IPV6);
1505 break; 1507 break;
1506 default: 1508 default:
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8ab281b478f2..29c7e2ec0dcb 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -54,11 +54,19 @@ static int is_wireless_rndis(struct usb_interface_descriptor *desc)
54 desc->bInterfaceProtocol == 3); 54 desc->bInterfaceProtocol == 3);
55} 55}
56 56
57static int is_novatel_rndis(struct usb_interface_descriptor *desc)
58{
59 return (desc->bInterfaceClass == USB_CLASS_MISC &&
60 desc->bInterfaceSubClass == 4 &&
61 desc->bInterfaceProtocol == 1);
62}
63
57#else 64#else
58 65
59#define is_rndis(desc) 0 66#define is_rndis(desc) 0
60#define is_activesync(desc) 0 67#define is_activesync(desc) 0
61#define is_wireless_rndis(desc) 0 68#define is_wireless_rndis(desc) 0
69#define is_novatel_rndis(desc) 0
62 70
63#endif 71#endif
64 72
@@ -150,7 +158,8 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
150 */ 158 */
151 rndis = (is_rndis(&intf->cur_altsetting->desc) || 159 rndis = (is_rndis(&intf->cur_altsetting->desc) ||
152 is_activesync(&intf->cur_altsetting->desc) || 160 is_activesync(&intf->cur_altsetting->desc) ||
153 is_wireless_rndis(&intf->cur_altsetting->desc)); 161 is_wireless_rndis(&intf->cur_altsetting->desc) ||
162 is_novatel_rndis(&intf->cur_altsetting->desc));
154 163
155 memset(info, 0, sizeof(*info)); 164 memset(info, 0, sizeof(*info));
156 info->control = intf; 165 info->control = intf;
@@ -547,6 +556,7 @@ static const struct driver_info wwan_info = {
547#define REALTEK_VENDOR_ID 0x0bda 556#define REALTEK_VENDOR_ID 0x0bda
548#define SAMSUNG_VENDOR_ID 0x04e8 557#define SAMSUNG_VENDOR_ID 0x04e8
549#define LENOVO_VENDOR_ID 0x17ef 558#define LENOVO_VENDOR_ID 0x17ef
559#define LINKSYS_VENDOR_ID 0x13b1
550#define NVIDIA_VENDOR_ID 0x0955 560#define NVIDIA_VENDOR_ID 0x0955
551#define HP_VENDOR_ID 0x03f0 561#define HP_VENDOR_ID 0x03f0
552#define MICROSOFT_VENDOR_ID 0x045e 562#define MICROSOFT_VENDOR_ID 0x045e
@@ -737,6 +747,15 @@ static const struct usb_device_id products[] = {
737 .driver_info = 0, 747 .driver_info = 0,
738}, 748},
739 749
750#if IS_ENABLED(CONFIG_USB_RTL8152)
751/* Linksys USB3GIGV1 Ethernet Adapter */
752{
753 USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
754 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
755 .driver_info = 0,
756},
757#endif
758
740/* ThinkPad USB-C Dock (based on Realtek RTL8153) */ 759/* ThinkPad USB-C Dock (based on Realtek RTL8153) */
741{ 760{
742 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM, 761 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x3062, USB_CLASS_COMM,
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index b99a7fb09f8e..0161f77641fa 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1265,30 +1265,45 @@ static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1265 struct ethtool_eeprom *ee, u8 *data) 1265 struct ethtool_eeprom *ee, u8 *data)
1266{ 1266{
1267 struct lan78xx_net *dev = netdev_priv(netdev); 1267 struct lan78xx_net *dev = netdev_priv(netdev);
1268 int ret;
1269
1270 ret = usb_autopm_get_interface(dev->intf);
1271 if (ret)
1272 return ret;
1268 1273
1269 ee->magic = LAN78XX_EEPROM_MAGIC; 1274 ee->magic = LAN78XX_EEPROM_MAGIC;
1270 1275
1271 return lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data); 1276 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1277
1278 usb_autopm_put_interface(dev->intf);
1279
1280 return ret;
1272} 1281}
1273 1282
1274static int lan78xx_ethtool_set_eeprom(struct net_device *netdev, 1283static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1275 struct ethtool_eeprom *ee, u8 *data) 1284 struct ethtool_eeprom *ee, u8 *data)
1276{ 1285{
1277 struct lan78xx_net *dev = netdev_priv(netdev); 1286 struct lan78xx_net *dev = netdev_priv(netdev);
1287 int ret;
1288
1289 ret = usb_autopm_get_interface(dev->intf);
1290 if (ret)
1291 return ret;
1278 1292
1279 /* Allow entire eeprom update only */ 1293 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1280 if ((ee->magic == LAN78XX_EEPROM_MAGIC) && 1294 * to load data from EEPROM
1281 (ee->offset == 0) && 1295 */
1282 (ee->len == 512) && 1296 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1283 (data[0] == EEPROM_INDICATOR)) 1297 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1284 return lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1285 else if ((ee->magic == LAN78XX_OTP_MAGIC) && 1298 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1286 (ee->offset == 0) && 1299 (ee->offset == 0) &&
1287 (ee->len == 512) && 1300 (ee->len == 512) &&
1288 (data[0] == OTP_INDICATOR_1)) 1301 (data[0] == OTP_INDICATOR_1))
1289 return lan78xx_write_raw_otp(dev, ee->offset, ee->len, data); 1302 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1303
1304 usb_autopm_put_interface(dev->intf);
1290 1305
1291 return -EINVAL; 1306 return ret;
1292} 1307}
1293 1308
1294static void lan78xx_get_strings(struct net_device *netdev, u32 stringset, 1309static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
@@ -2434,7 +2449,6 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2434 /* LAN7801 only has RGMII mode */ 2449 /* LAN7801 only has RGMII mode */
2435 if (dev->chipid == ID_REV_CHIP_ID_7801_) 2450 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2436 buf &= ~MAC_CR_GMII_EN_; 2451 buf &= ~MAC_CR_GMII_EN_;
2437 buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
2438 ret = lan78xx_write_reg(dev, MAC_CR, buf); 2452 ret = lan78xx_write_reg(dev, MAC_CR, buf);
2439 2453
2440 ret = lan78xx_read_reg(dev, MAC_TX, &buf); 2454 ret = lan78xx_read_reg(dev, MAC_TX, &buf);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ceb78e2ea4f0..941ece08ba78 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -613,6 +613,7 @@ enum rtl8152_flags {
613#define VENDOR_ID_MICROSOFT 0x045e 613#define VENDOR_ID_MICROSOFT 0x045e
614#define VENDOR_ID_SAMSUNG 0x04e8 614#define VENDOR_ID_SAMSUNG 0x04e8
615#define VENDOR_ID_LENOVO 0x17ef 615#define VENDOR_ID_LENOVO 0x17ef
616#define VENDOR_ID_LINKSYS 0x13b1
616#define VENDOR_ID_NVIDIA 0x0955 617#define VENDOR_ID_NVIDIA 0x0955
617 618
618#define MCU_TYPE_PLA 0x0100 619#define MCU_TYPE_PLA 0x0100
@@ -5316,6 +5317,7 @@ static const struct usb_device_id rtl8152_table[] = {
5316 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)}, 5317 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
5317 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)}, 5318 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
5318 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, 5319 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
5320 {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
5319 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, 5321 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
5320 {} 5322 {}
5321}; 5323};
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index a151f267aebb..b807c91abe1d 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -632,6 +632,10 @@ static const struct usb_device_id products [] = {
632 /* RNDIS for tethering */ 632 /* RNDIS for tethering */
633 USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3), 633 USB_INTERFACE_INFO(USB_CLASS_WIRELESS_CONTROLLER, 1, 3),
634 .driver_info = (unsigned long) &rndis_info, 634 .driver_info = (unsigned long) &rndis_info,
635}, {
636 /* Novatel Verizon USB730L */
637 USB_INTERFACE_INFO(USB_CLASS_MISC, 4, 1),
638 .driver_info = (unsigned long) &rndis_info,
635}, 639},
636 { }, // END 640 { }, // END
637}; 641};
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index bc1633945a56..195dafb98131 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3396,9 +3396,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
3396 3396
3397MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table); 3397MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
3398 3398
3399#ifdef CONFIG_PM 3399static __maybe_unused int ath10k_pci_pm_suspend(struct device *dev)
3400
3401static int ath10k_pci_pm_suspend(struct device *dev)
3402{ 3400{
3403 struct ath10k *ar = dev_get_drvdata(dev); 3401 struct ath10k *ar = dev_get_drvdata(dev);
3404 int ret; 3402 int ret;
@@ -3414,7 +3412,7 @@ static int ath10k_pci_pm_suspend(struct device *dev)
3414 return ret; 3412 return ret;
3415} 3413}
3416 3414
3417static int ath10k_pci_pm_resume(struct device *dev) 3415static __maybe_unused int ath10k_pci_pm_resume(struct device *dev)
3418{ 3416{
3419 struct ath10k *ar = dev_get_drvdata(dev); 3417 struct ath10k *ar = dev_get_drvdata(dev);
3420 int ret; 3418 int ret;
@@ -3433,7 +3431,6 @@ static int ath10k_pci_pm_resume(struct device *dev)
3433static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops, 3431static SIMPLE_DEV_PM_OPS(ath10k_pci_pm_ops,
3434 ath10k_pci_pm_suspend, 3432 ath10k_pci_pm_suspend,
3435 ath10k_pci_pm_resume); 3433 ath10k_pci_pm_resume);
3436#endif
3437 3434
3438static struct pci_driver ath10k_pci_driver = { 3435static struct pci_driver ath10k_pci_driver = {
3439 .name = "ath10k_pci", 3436 .name = "ath10k_pci",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index aaed4ab503ad..4157c90ad973 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -980,7 +980,7 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
980 980
981 eth_broadcast_addr(params_le->bssid); 981 eth_broadcast_addr(params_le->bssid);
982 params_le->bss_type = DOT11_BSSTYPE_ANY; 982 params_le->bss_type = DOT11_BSSTYPE_ANY;
983 params_le->scan_type = 0; 983 params_le->scan_type = BRCMF_SCANTYPE_ACTIVE;
984 params_le->channel_num = 0; 984 params_le->channel_num = 0;
985 params_le->nprobes = cpu_to_le32(-1); 985 params_le->nprobes = cpu_to_le32(-1);
986 params_le->active_time = cpu_to_le32(-1); 986 params_le->active_time = cpu_to_le32(-1);
@@ -988,12 +988,9 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
988 params_le->home_time = cpu_to_le32(-1); 988 params_le->home_time = cpu_to_le32(-1);
989 memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le)); 989 memset(&params_le->ssid_le, 0, sizeof(params_le->ssid_le));
990 990
991 /* if request is null exit so it will be all channel broadcast scan */
992 if (!request)
993 return;
994
995 n_ssids = request->n_ssids; 991 n_ssids = request->n_ssids;
996 n_channels = request->n_channels; 992 n_channels = request->n_channels;
993
997 /* Copy channel array if applicable */ 994 /* Copy channel array if applicable */
998 brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n", 995 brcmf_dbg(SCAN, "### List of channelspecs to scan ### %d\n",
999 n_channels); 996 n_channels);
@@ -1030,16 +1027,8 @@ static void brcmf_escan_prep(struct brcmf_cfg80211_info *cfg,
1030 ptr += sizeof(ssid_le); 1027 ptr += sizeof(ssid_le);
1031 } 1028 }
1032 } else { 1029 } else {
1033 brcmf_dbg(SCAN, "Broadcast scan %p\n", request->ssids); 1030 brcmf_dbg(SCAN, "Performing passive scan\n");
1034 if ((request->ssids) && request->ssids->ssid_len) { 1031 params_le->scan_type = BRCMF_SCANTYPE_PASSIVE;
1035 brcmf_dbg(SCAN, "SSID %s len=%d\n",
1036 params_le->ssid_le.SSID,
1037 request->ssids->ssid_len);
1038 params_le->ssid_le.SSID_len =
1039 cpu_to_le32(request->ssids->ssid_len);
1040 memcpy(&params_le->ssid_le.SSID, request->ssids->ssid,
1041 request->ssids->ssid_len);
1042 }
1043 } 1032 }
1044 /* Adding mask to channel numbers */ 1033 /* Adding mask to channel numbers */
1045 params_le->channel_num = 1034 params_le->channel_num =
@@ -3162,6 +3151,7 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
3162 struct brcmf_cfg80211_info *cfg = ifp->drvr->config; 3151 struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
3163 s32 status; 3152 s32 status;
3164 struct brcmf_escan_result_le *escan_result_le; 3153 struct brcmf_escan_result_le *escan_result_le;
3154 u32 escan_buflen;
3165 struct brcmf_bss_info_le *bss_info_le; 3155 struct brcmf_bss_info_le *bss_info_le;
3166 struct brcmf_bss_info_le *bss = NULL; 3156 struct brcmf_bss_info_le *bss = NULL;
3167 u32 bi_length; 3157 u32 bi_length;
@@ -3181,11 +3171,23 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
3181 3171
3182 if (status == BRCMF_E_STATUS_PARTIAL) { 3172 if (status == BRCMF_E_STATUS_PARTIAL) {
3183 brcmf_dbg(SCAN, "ESCAN Partial result\n"); 3173 brcmf_dbg(SCAN, "ESCAN Partial result\n");
3174 if (e->datalen < sizeof(*escan_result_le)) {
3175 brcmf_err("invalid event data length\n");
3176 goto exit;
3177 }
3184 escan_result_le = (struct brcmf_escan_result_le *) data; 3178 escan_result_le = (struct brcmf_escan_result_le *) data;
3185 if (!escan_result_le) { 3179 if (!escan_result_le) {
3186 brcmf_err("Invalid escan result (NULL pointer)\n"); 3180 brcmf_err("Invalid escan result (NULL pointer)\n");
3187 goto exit; 3181 goto exit;
3188 } 3182 }
3183 escan_buflen = le32_to_cpu(escan_result_le->buflen);
3184 if (escan_buflen > BRCMF_ESCAN_BUF_SIZE ||
3185 escan_buflen > e->datalen ||
3186 escan_buflen < sizeof(*escan_result_le)) {
3187 brcmf_err("Invalid escan buffer length: %d\n",
3188 escan_buflen);
3189 goto exit;
3190 }
3189 if (le16_to_cpu(escan_result_le->bss_count) != 1) { 3191 if (le16_to_cpu(escan_result_le->bss_count) != 1) {
3190 brcmf_err("Invalid bss_count %d: ignoring\n", 3192 brcmf_err("Invalid bss_count %d: ignoring\n",
3191 escan_result_le->bss_count); 3193 escan_result_le->bss_count);
@@ -3202,9 +3204,8 @@ brcmf_cfg80211_escan_handler(struct brcmf_if *ifp,
3202 } 3204 }
3203 3205
3204 bi_length = le32_to_cpu(bss_info_le->length); 3206 bi_length = le32_to_cpu(bss_info_le->length);
3205 if (bi_length != (le32_to_cpu(escan_result_le->buflen) - 3207 if (bi_length != escan_buflen - WL_ESCAN_RESULTS_FIXED_SIZE) {
3206 WL_ESCAN_RESULTS_FIXED_SIZE)) { 3208 brcmf_err("Ignoring invalid bss_info length: %d\n",
3207 brcmf_err("Invalid bss_info length %d: ignoring\n",
3208 bi_length); 3209 bi_length);
3209 goto exit; 3210 goto exit;
3210 } 3211 }
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index 8391989b1882..e0d22fedb2b4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -45,6 +45,11 @@
45#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff 45#define BRCMF_SCAN_PARAMS_COUNT_MASK 0x0000ffff
46#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16 46#define BRCMF_SCAN_PARAMS_NSSID_SHIFT 16
47 47
48/* scan type definitions */
49#define BRCMF_SCANTYPE_DEFAULT 0xFF
50#define BRCMF_SCANTYPE_ACTIVE 0
51#define BRCMF_SCANTYPE_PASSIVE 1
52
48#define BRCMF_WSEC_MAX_PSK_LEN 32 53#define BRCMF_WSEC_MAX_PSK_LEN 32
49#define BRCMF_WSEC_PASSPHRASE BIT(0) 54#define BRCMF_WSEC_PASSPHRASE BIT(0)
50 55
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 5de19ea10575..b205a7bfb828 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -2167,7 +2167,7 @@ out:
2167 * 1. We are not using a unified image 2167 * 1. We are not using a unified image
2168 * 2. We are using a unified image but had an error while exiting D3 2168 * 2. We are using a unified image but had an error while exiting D3
2169 */ 2169 */
2170 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 2170 set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
2171 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status); 2171 set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
2172 /* 2172 /*
2173 * When switching images we return 1, which causes mac80211 2173 * When switching images we return 1, which causes mac80211
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 15f2d826bb4b..3bcaa82f59b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1546,6 +1546,11 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1546 struct iwl_mvm_mc_iter_data *data = _data; 1546 struct iwl_mvm_mc_iter_data *data = _data;
1547 struct iwl_mvm *mvm = data->mvm; 1547 struct iwl_mvm *mvm = data->mvm;
1548 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; 1548 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd;
1549 struct iwl_host_cmd hcmd = {
1550 .id = MCAST_FILTER_CMD,
1551 .flags = CMD_ASYNC,
1552 .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
1553 };
1549 int ret, len; 1554 int ret, len;
1550 1555
1551 /* if we don't have free ports, mcast frames will be dropped */ 1556 /* if we don't have free ports, mcast frames will be dropped */
@@ -1560,7 +1565,10 @@ static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac,
1560 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1565 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
1561 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1566 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4);
1562 1567
1563 ret = iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_ASYNC, len, cmd); 1568 hcmd.len[0] = len;
1569 hcmd.data[0] = cmd;
1570
1571 ret = iwl_mvm_send_cmd(mvm, &hcmd);
1564 if (ret) 1572 if (ret)
1565 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1573 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret);
1566} 1574}
@@ -1635,6 +1643,12 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
1635 if (!cmd) 1643 if (!cmd)
1636 goto out; 1644 goto out;
1637 1645
1646 if (changed_flags & FIF_ALLMULTI)
1647 cmd->pass_all = !!(*total_flags & FIF_ALLMULTI);
1648
1649 if (cmd->pass_all)
1650 cmd->count = 0;
1651
1638 iwl_mvm_recalc_multicast(mvm); 1652 iwl_mvm_recalc_multicast(mvm);
1639out: 1653out:
1640 mutex_unlock(&mvm->mutex); 1654 mutex_unlock(&mvm->mutex);
@@ -2563,7 +2577,7 @@ static void iwl_mvm_purge_deferred_tx_frames(struct iwl_mvm *mvm,
2563 * queues, so we should never get a second deferred 2577 * queues, so we should never get a second deferred
2564 * frame for the RA/TID. 2578 * frame for the RA/TID.
2565 */ 2579 */
2566 iwl_mvm_start_mac_queues(mvm, info->hw_queue); 2580 iwl_mvm_start_mac_queues(mvm, BIT(info->hw_queue));
2567 ieee80211_free_txskb(mvm->hw, skb); 2581 ieee80211_free_txskb(mvm->hw, skb);
2568 } 2582 }
2569 } 2583 }
@@ -3975,6 +3989,43 @@ out_unlock:
3975 return ret; 3989 return ret;
3976} 3990}
3977 3991
3992static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
3993{
3994 if (drop) {
3995 if (iwl_mvm_has_new_tx_api(mvm))
3996 /* TODO new tx api */
3997 WARN_ONCE(1,
3998 "Need to implement flush TX queue\n");
3999 else
4000 iwl_mvm_flush_tx_path(mvm,
4001 iwl_mvm_flushable_queues(mvm) & queues,
4002 0);
4003 } else {
4004 if (iwl_mvm_has_new_tx_api(mvm)) {
4005 struct ieee80211_sta *sta;
4006 int i;
4007
4008 mutex_lock(&mvm->mutex);
4009
4010 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) {
4011 sta = rcu_dereference_protected(
4012 mvm->fw_id_to_mac_id[i],
4013 lockdep_is_held(&mvm->mutex));
4014 if (IS_ERR_OR_NULL(sta))
4015 continue;
4016
4017 iwl_mvm_wait_sta_queues_empty(mvm,
4018 iwl_mvm_sta_from_mac80211(sta));
4019 }
4020
4021 mutex_unlock(&mvm->mutex);
4022 } else {
4023 iwl_trans_wait_tx_queues_empty(mvm->trans,
4024 queues);
4025 }
4026 }
4027}
4028
3978static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, 4029static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3979 struct ieee80211_vif *vif, u32 queues, bool drop) 4030 struct ieee80211_vif *vif, u32 queues, bool drop)
3980{ 4031{
@@ -3985,7 +4036,12 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
3985 int i; 4036 int i;
3986 u32 msk = 0; 4037 u32 msk = 0;
3987 4038
3988 if (!vif || vif->type != NL80211_IFTYPE_STATION) 4039 if (!vif) {
4040 iwl_mvm_flush_no_vif(mvm, queues, drop);
4041 return;
4042 }
4043
4044 if (vif->type != NL80211_IFTYPE_STATION)
3989 return; 4045 return;
3990 4046
3991 /* Make sure we're done with the deferred traffic before flushing */ 4047 /* Make sure we're done with the deferred traffic before flushing */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index ba7bd049d3d4..0fe723ca844e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -661,7 +661,8 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
661 (lq_sta->tx_agg_tid_en & BIT(tid)) && 661 (lq_sta->tx_agg_tid_en & BIT(tid)) &&
662 (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) { 662 (tid_data->tx_count_last >= IWL_MVM_RS_AGG_START_THRESHOLD)) {
663 IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid); 663 IWL_DEBUG_RATE(mvm, "try to aggregate tid %d\n", tid);
664 rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta); 664 if (rs_tl_turn_on_agg_for_tid(mvm, lq_sta, tid, sta) == 0)
665 tid_data->state = IWL_AGG_QUEUED;
665 } 666 }
666} 667}
667 668
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 67ffd9774712..77f77bc5d083 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -672,11 +672,12 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
672 * If there was a significant jump in the nssn - adjust. 672 * If there was a significant jump in the nssn - adjust.
673 * If the SN is smaller than the NSSN it might need to first go into 673 * If the SN is smaller than the NSSN it might need to first go into
674 * the reorder buffer, in which case we just release up to it and the 674 * the reorder buffer, in which case we just release up to it and the
675 * rest of the function will take of storing it and releasing up to the 675 * rest of the function will take care of storing it and releasing up to
676 * nssn 676 * the nssn
677 */ 677 */
678 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size, 678 if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
679 buffer->buf_size)) { 679 buffer->buf_size) ||
680 !ieee80211_sn_less(sn, buffer->head_sn + buffer->buf_size)) {
680 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn; 681 u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;
681 682
682 iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn); 683 iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 50983615dce6..774122fed454 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -555,7 +555,7 @@ static int iwl_mvm_lmac_scan_abort(struct iwl_mvm *mvm)
555 struct iwl_host_cmd cmd = { 555 struct iwl_host_cmd cmd = {
556 .id = SCAN_OFFLOAD_ABORT_CMD, 556 .id = SCAN_OFFLOAD_ABORT_CMD,
557 }; 557 };
558 u32 status; 558 u32 status = CAN_ABORT_STATUS;
559 559
560 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status); 560 ret = iwl_mvm_send_cmd_status(mvm, &cmd, &status);
561 if (ret) 561 if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 411a2055dc45..c4a343534c5e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1285,7 +1285,7 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1285{ 1285{
1286 struct iwl_mvm_add_sta_cmd cmd; 1286 struct iwl_mvm_add_sta_cmd cmd;
1287 int ret; 1287 int ret;
1288 u32 status; 1288 u32 status = ADD_STA_SUCCESS;
1289 1289
1290 lockdep_assert_held(&mvm->mutex); 1290 lockdep_assert_held(&mvm->mutex);
1291 1291
@@ -2385,8 +2385,10 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2385 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 2385 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2386 return -EINVAL; 2386 return -EINVAL;
2387 2387
2388 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { 2388 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2389 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", 2389 mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2390 IWL_ERR(mvm,
2391 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2390 mvmsta->tid_data[tid].state); 2392 mvmsta->tid_data[tid].state);
2391 return -ENXIO; 2393 return -ENXIO;
2392 } 2394 }
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index d13893806513..aedabe101cf0 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -281,6 +281,7 @@ struct iwl_mvm_vif;
281 * These states relate to a specific RA / TID. 281 * These states relate to a specific RA / TID.
282 * 282 *
283 * @IWL_AGG_OFF: aggregation is not used 283 * @IWL_AGG_OFF: aggregation is not used
284 * @IWL_AGG_QUEUED: aggregation start work has been queued
284 * @IWL_AGG_STARTING: aggregation are starting (between start and oper) 285 * @IWL_AGG_STARTING: aggregation are starting (between start and oper)
285 * @IWL_AGG_ON: aggregation session is up 286 * @IWL_AGG_ON: aggregation session is up
286 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the 287 * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
@@ -290,6 +291,7 @@ struct iwl_mvm_vif;
290 */ 291 */
291enum iwl_mvm_agg_state { 292enum iwl_mvm_agg_state {
292 IWL_AGG_OFF = 0, 293 IWL_AGG_OFF = 0,
294 IWL_AGG_QUEUED,
293 IWL_AGG_STARTING, 295 IWL_AGG_STARTING,
294 IWL_AGG_ON, 296 IWL_AGG_ON,
295 IWL_EMPTYING_HW_QUEUE_ADDBA, 297 IWL_EMPTYING_HW_QUEUE_ADDBA,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 8876c2abc440..4d907f60bce9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -529,6 +529,7 @@ int iwl_mvm_ctdp_command(struct iwl_mvm *mvm, u32 op, u32 state)
529 529
530 lockdep_assert_held(&mvm->mutex); 530 lockdep_assert_held(&mvm->mutex);
531 531
532 status = 0;
532 ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP, 533 ret = iwl_mvm_send_cmd_pdu_status(mvm, WIDE_ID(PHY_OPS_GROUP,
533 CTDP_CONFIG_CMD), 534 CTDP_CONFIG_CMD),
534 sizeof(cmd), &cmd, &status); 535 sizeof(cmd), &cmd, &status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 172b5e63d3fb..6f2e2af23219 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -564,8 +564,8 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
564 case NL80211_IFTYPE_AP: 564 case NL80211_IFTYPE_AP:
565 case NL80211_IFTYPE_ADHOC: 565 case NL80211_IFTYPE_ADHOC:
566 /* 566 /*
567 * Handle legacy hostapd as well, where station will be added 567 * Non-bufferable frames use the broadcast station, thus they
568 * only just before sending the association response. 568 * use the probe queue.
569 * Also take care of the case where we send a deauth to a 569 * Also take care of the case where we send a deauth to a
570 * station that we don't have, or similarly an association 570 * station that we don't have, or similarly an association
571 * response (with non-success status) for a station we can't 571 * response (with non-success status) for a station we can't
@@ -573,9 +573,9 @@ static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
573 * Also, disassociate frames might happen, particular with 573 * Also, disassociate frames might happen, particular with
574 * reason 7 ("Class 3 frame received from nonassociated STA"). 574 * reason 7 ("Class 3 frame received from nonassociated STA").
575 */ 575 */
576 if (ieee80211_is_probe_resp(fc) || ieee80211_is_auth(fc) || 576 if (ieee80211_is_mgmt(fc) &&
577 ieee80211_is_deauth(fc) || ieee80211_is_assoc_resp(fc) || 577 (!ieee80211_is_bufferable_mmpdu(fc) ||
578 ieee80211_is_disassoc(fc)) 578 ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
579 return mvm->probe_queue; 579 return mvm->probe_queue;
580 if (info->hw_queue == info->control.vif->cab_queue) 580 if (info->hw_queue == info->control.vif->cab_queue)
581 return mvmvif->cab_queue; 581 return mvmvif->cab_queue;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 856fa6e8327e..a450bc6bc774 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -115,6 +115,8 @@ int qtnf_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
115 115
116 vif = qtnf_netdev_get_priv(wdev->netdev); 116 vif = qtnf_netdev_get_priv(wdev->netdev);
117 117
118 qtnf_scan_done(vif->mac, true);
119
118 if (qtnf_cmd_send_del_intf(vif)) 120 if (qtnf_cmd_send_del_intf(vif))
119 pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid, 121 pr_err("VIF%u.%u: failed to delete VIF\n", vif->mac->macid,
120 vif->vifid); 122 vif->vifid);
@@ -335,6 +337,8 @@ static int qtnf_stop_ap(struct wiphy *wiphy, struct net_device *dev)
335 struct qtnf_vif *vif = qtnf_netdev_get_priv(dev); 337 struct qtnf_vif *vif = qtnf_netdev_get_priv(dev);
336 int ret; 338 int ret;
337 339
340 qtnf_scan_done(vif->mac, true);
341
338 ret = qtnf_cmd_send_stop_ap(vif); 342 ret = qtnf_cmd_send_stop_ap(vif);
339 if (ret) { 343 if (ret) {
340 pr_err("VIF%u.%u: failed to stop AP operation in FW\n", 344 pr_err("VIF%u.%u: failed to stop AP operation in FW\n",
@@ -570,8 +574,6 @@ qtnf_del_station(struct wiphy *wiphy, struct net_device *dev,
570 !qtnf_sta_list_lookup(&vif->sta_list, params->mac)) 574 !qtnf_sta_list_lookup(&vif->sta_list, params->mac))
571 return 0; 575 return 0;
572 576
573 qtnf_scan_done(vif->mac, true);
574
575 ret = qtnf_cmd_send_del_sta(vif, params); 577 ret = qtnf_cmd_send_del_sta(vif, params);
576 if (ret) 578 if (ret)
577 pr_err("VIF%u.%u: failed to delete STA %pM\n", 579 pr_err("VIF%u.%u: failed to delete STA %pM\n",
@@ -1134,8 +1136,9 @@ void qtnf_virtual_intf_cleanup(struct net_device *ndev)
1134 } 1136 }
1135 1137
1136 vif->sta_state = QTNF_STA_DISCONNECTED; 1138 vif->sta_state = QTNF_STA_DISCONNECTED;
1137 qtnf_scan_done(mac, true);
1138 } 1139 }
1140
1141 qtnf_scan_done(mac, true);
1139} 1142}
1140 1143
1141void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif) 1144void qtnf_cfg80211_vif_reset(struct qtnf_vif *vif)
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
index 6a4af52522b8..66db26613b1f 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.h
@@ -34,6 +34,9 @@ static inline void qtnf_scan_done(struct qtnf_wmac *mac, bool aborted)
34 .aborted = aborted, 34 .aborted = aborted,
35 }; 35 };
36 36
37 if (timer_pending(&mac->scan_timeout))
38 del_timer_sync(&mac->scan_timeout);
39
37 mutex_lock(&mac->mac_lock); 40 mutex_lock(&mac->mac_lock);
38 41
39 if (mac->scan_req) { 42 if (mac->scan_req) {
diff --git a/drivers/net/wireless/quantenna/qtnfmac/event.c b/drivers/net/wireless/quantenna/qtnfmac/event.c
index 0fc2814eafad..43d2e7fd6e02 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/event.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/event.c
@@ -345,8 +345,6 @@ qtnf_event_handle_scan_complete(struct qtnf_wmac *mac,
345 return -EINVAL; 345 return -EINVAL;
346 } 346 }
347 347
348 if (timer_pending(&mac->scan_timeout))
349 del_timer_sync(&mac->scan_timeout);
350 qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED); 348 qtnf_scan_done(mac, le32_to_cpu(status->flags) & QLINK_SCAN_ABORTED);
351 349
352 return 0; 350 return 0;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
index 502e72b7cdcc..69131965a298 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie.c
@@ -661,14 +661,18 @@ static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb)
661 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); 661 struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus);
662 dma_addr_t txbd_paddr, skb_paddr; 662 dma_addr_t txbd_paddr, skb_paddr;
663 struct qtnf_tx_bd *txbd; 663 struct qtnf_tx_bd *txbd;
664 unsigned long flags;
664 int len, i; 665 int len, i;
665 u32 info; 666 u32 info;
666 int ret = 0; 667 int ret = 0;
667 668
669 spin_lock_irqsave(&priv->tx0_lock, flags);
670
668 if (!qtnf_tx_queue_ready(priv)) { 671 if (!qtnf_tx_queue_ready(priv)) {
669 if (skb->dev) 672 if (skb->dev)
670 netif_stop_queue(skb->dev); 673 netif_stop_queue(skb->dev);
671 674
675 spin_unlock_irqrestore(&priv->tx0_lock, flags);
672 return NETDEV_TX_BUSY; 676 return NETDEV_TX_BUSY;
673 } 677 }
674 678
@@ -717,8 +721,10 @@ tx_done:
717 dev_kfree_skb_any(skb); 721 dev_kfree_skb_any(skb);
718 } 722 }
719 723
720 qtnf_pcie_data_tx_reclaim(priv);
721 priv->tx_done_count++; 724 priv->tx_done_count++;
725 spin_unlock_irqrestore(&priv->tx0_lock, flags);
726
727 qtnf_pcie_data_tx_reclaim(priv);
722 728
723 return NETDEV_TX_OK; 729 return NETDEV_TX_OK;
724} 730}
@@ -1247,6 +1253,7 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1247 strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); 1253 strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME);
1248 init_completion(&bus->request_firmware_complete); 1254 init_completion(&bus->request_firmware_complete);
1249 mutex_init(&bus->bus_lock); 1255 mutex_init(&bus->bus_lock);
1256 spin_lock_init(&pcie_priv->tx0_lock);
1250 spin_lock_init(&pcie_priv->irq_lock); 1257 spin_lock_init(&pcie_priv->irq_lock);
1251 spin_lock_init(&pcie_priv->tx_reclaim_lock); 1258 spin_lock_init(&pcie_priv->tx_reclaim_lock);
1252 1259
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
index e76a23716ee0..86ac1ccedb52 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/pearl/pcie_bus_priv.h
@@ -34,6 +34,8 @@ struct qtnf_pcie_bus_priv {
34 34
35 /* lock for tx reclaim operations */ 35 /* lock for tx reclaim operations */
36 spinlock_t tx_reclaim_lock; 36 spinlock_t tx_reclaim_lock;
37 /* lock for tx0 operations */
38 spinlock_t tx0_lock;
37 u8 msi_enabled; 39 u8 msi_enabled;
38 int mps; 40 int mps;
39 41
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 1427a386a033..3e4d1e7998da 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1417,6 +1417,15 @@ static int btt_claim_class(struct device *dev)
1417 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); 1417 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1418 struct nd_namespace_index *nsindex; 1418 struct nd_namespace_index *nsindex;
1419 1419
1420 /*
1421 * If any of the DIMMs do not support labels the only
1422 * possible BTT format is v1.
1423 */
1424 if (!ndd) {
1425 loop_bitmask = 0;
1426 break;
1427 }
1428
1420 nsindex = to_namespace_index(ndd, ndd->ns_current); 1429 nsindex = to_namespace_index(ndd, ndd->ns_current);
1421 if (nsindex == NULL) 1430 if (nsindex == NULL)
1422 loop_bitmask |= 1; 1431 loop_bitmask |= 1;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index acc816b67582..5a14cc7f28ee 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -134,8 +134,6 @@ static inline bool nvme_req_needs_retry(struct request *req)
134 return false; 134 return false;
135 if (nvme_req(req)->status & NVME_SC_DNR) 135 if (nvme_req(req)->status & NVME_SC_DNR)
136 return false; 136 return false;
137 if (jiffies - req->start_time >= req->timeout)
138 return false;
139 if (nvme_req(req)->retries >= nvme_max_retries) 137 if (nvme_req(req)->retries >= nvme_max_retries)
140 return false; 138 return false;
141 return true; 139 return true;
@@ -2138,7 +2136,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
2138 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); 2136 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
2139 2137
2140 if (a == &dev_attr_uuid.attr) { 2138 if (a == &dev_attr_uuid.attr) {
2141 if (uuid_is_null(&ns->uuid) || 2139 if (uuid_is_null(&ns->uuid) &&
2142 !memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) 2140 !memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
2143 return 0; 2141 return 0;
2144 } 2142 }
@@ -2590,7 +2588,7 @@ static void nvme_async_event_work(struct work_struct *work)
2590 container_of(work, struct nvme_ctrl, async_event_work); 2588 container_of(work, struct nvme_ctrl, async_event_work);
2591 2589
2592 spin_lock_irq(&ctrl->lock); 2590 spin_lock_irq(&ctrl->lock);
2593 while (ctrl->event_limit > 0) { 2591 while (ctrl->state == NVME_CTRL_LIVE && ctrl->event_limit > 0) {
2594 int aer_idx = --ctrl->event_limit; 2592 int aer_idx = --ctrl->event_limit;
2595 2593
2596 spin_unlock_irq(&ctrl->lock); 2594 spin_unlock_irq(&ctrl->lock);
@@ -2677,7 +2675,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
2677 /*FALLTHRU*/ 2675 /*FALLTHRU*/
2678 case NVME_SC_ABORT_REQ: 2676 case NVME_SC_ABORT_REQ:
2679 ++ctrl->event_limit; 2677 ++ctrl->event_limit;
2680 queue_work(nvme_wq, &ctrl->async_event_work); 2678 if (ctrl->state == NVME_CTRL_LIVE)
2679 queue_work(nvme_wq, &ctrl->async_event_work);
2681 break; 2680 break;
2682 default: 2681 default:
2683 break; 2682 break;
@@ -2692,7 +2691,7 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
2692 nvme_queue_scan(ctrl); 2691 nvme_queue_scan(ctrl);
2693 break; 2692 break;
2694 case NVME_AER_NOTICE_FW_ACT_STARTING: 2693 case NVME_AER_NOTICE_FW_ACT_STARTING:
2695 schedule_work(&ctrl->fw_act_work); 2694 queue_work(nvme_wq, &ctrl->fw_act_work);
2696 break; 2695 break;
2697 default: 2696 default:
2698 dev_warn(ctrl->device, "async event result %08x\n", result); 2697 dev_warn(ctrl->device, "async event result %08x\n", result);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 47307752dc65..555c976cc2ee 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -565,6 +565,7 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
565 opts->queue_size = NVMF_DEF_QUEUE_SIZE; 565 opts->queue_size = NVMF_DEF_QUEUE_SIZE;
566 opts->nr_io_queues = num_online_cpus(); 566 opts->nr_io_queues = num_online_cpus();
567 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY; 567 opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
568 opts->kato = NVME_DEFAULT_KATO;
568 569
569 options = o = kstrdup(buf, GFP_KERNEL); 570 options = o = kstrdup(buf, GFP_KERNEL);
570 if (!options) 571 if (!options)
@@ -655,21 +656,22 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
655 goto out; 656 goto out;
656 } 657 }
657 658
658 if (opts->discovery_nqn) {
659 pr_err("Discovery controllers cannot accept keep_alive_tmo != 0\n");
660 ret = -EINVAL;
661 goto out;
662 }
663
664 if (token < 0) { 659 if (token < 0) {
665 pr_err("Invalid keep_alive_tmo %d\n", token); 660 pr_err("Invalid keep_alive_tmo %d\n", token);
666 ret = -EINVAL; 661 ret = -EINVAL;
667 goto out; 662 goto out;
668 } else if (token == 0) { 663 } else if (token == 0 && !opts->discovery_nqn) {
669 /* Allowed for debug */ 664 /* Allowed for debug */
670 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n"); 665 pr_warn("keep_alive_tmo 0 won't execute keep alives!!!\n");
671 } 666 }
672 opts->kato = token; 667 opts->kato = token;
668
669 if (opts->discovery_nqn && opts->kato) {
670 pr_err("Discovery controllers cannot accept KATO != 0\n");
671 ret = -EINVAL;
672 goto out;
673 }
674
673 break; 675 break;
674 case NVMF_OPT_CTRL_LOSS_TMO: 676 case NVMF_OPT_CTRL_LOSS_TMO:
675 if (match_int(args, &token)) { 677 if (match_int(args, &token)) {
@@ -762,8 +764,6 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
762 uuid_copy(&opts->host->id, &hostid); 764 uuid_copy(&opts->host->id, &hostid);
763 765
764out: 766out:
765 if (!opts->discovery_nqn && !opts->kato)
766 opts->kato = NVME_DEFAULT_KATO;
767 kfree(options); 767 kfree(options);
768 return ret; 768 return ret;
769} 769}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index d2e882c0f496..af075e998944 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1376,7 +1376,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1376 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED) 1376 if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
1377 status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); 1377 status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
1378 else if (freq->status) 1378 else if (freq->status)
1379 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); 1379 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1380 1380
1381 /* 1381 /*
1382 * For the linux implementation, if we have an unsuccesful 1382 * For the linux implementation, if we have an unsuccesful
@@ -1404,7 +1404,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1404 */ 1404 */
1405 if (freq->transferred_length != 1405 if (freq->transferred_length !=
1406 be32_to_cpu(op->cmd_iu.data_len)) { 1406 be32_to_cpu(op->cmd_iu.data_len)) {
1407 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); 1407 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1408 goto done; 1408 goto done;
1409 } 1409 }
1410 result.u64 = 0; 1410 result.u64 = 0;
@@ -1421,7 +1421,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1421 freq->transferred_length || 1421 freq->transferred_length ||
1422 op->rsp_iu.status_code || 1422 op->rsp_iu.status_code ||
1423 sqe->common.command_id != cqe->command_id)) { 1423 sqe->common.command_id != cqe->command_id)) {
1424 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); 1424 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1425 goto done; 1425 goto done;
1426 } 1426 }
1427 result = cqe->result; 1427 result = cqe->result;
@@ -1429,7 +1429,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
1429 break; 1429 break;
1430 1430
1431 default: 1431 default:
1432 status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1); 1432 status = cpu_to_le16(NVME_SC_INTERNAL << 1);
1433 goto done; 1433 goto done;
1434 } 1434 }
1435 1435
@@ -1989,16 +1989,17 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1989 * as well as those by FC-NVME spec. 1989 * as well as those by FC-NVME spec.
1990 */ 1990 */
1991 WARN_ON_ONCE(sqe->common.metadata); 1991 WARN_ON_ONCE(sqe->common.metadata);
1992 WARN_ON_ONCE(sqe->common.dptr.prp1);
1993 WARN_ON_ONCE(sqe->common.dptr.prp2);
1994 sqe->common.flags |= NVME_CMD_SGL_METABUF; 1992 sqe->common.flags |= NVME_CMD_SGL_METABUF;
1995 1993
1996 /* 1994 /*
1997 * format SQE DPTR field per FC-NVME rules 1995 * format SQE DPTR field per FC-NVME rules:
1998 * type=data block descr; subtype=offset; 1996 * type=0x5 Transport SGL Data Block Descriptor
1999 * offset is currently 0. 1997 * subtype=0xA Transport-specific value
1998 * address=0
1999 * length=length of the data series
2000 */ 2000 */
2001 sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET; 2001 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
2002 NVME_SGL_FMT_TRANSPORT_A;
2002 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); 2003 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
2003 sqe->rw.dptr.sgl.addr = 0; 2004 sqe->rw.dptr.sgl.addr = 0;
2004 2005
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 4a2121335f48..3f5a04c586ce 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -24,6 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/once.h>
27#include <linux/pci.h> 28#include <linux/pci.h>
28#include <linux/poison.h> 29#include <linux/poison.h>
29#include <linux/t10-pi.h> 30#include <linux/t10-pi.h>
@@ -93,7 +94,7 @@ struct nvme_dev {
93 struct mutex shutdown_lock; 94 struct mutex shutdown_lock;
94 bool subsystem; 95 bool subsystem;
95 void __iomem *cmb; 96 void __iomem *cmb;
96 dma_addr_t cmb_dma_addr; 97 pci_bus_addr_t cmb_bus_addr;
97 u64 cmb_size; 98 u64 cmb_size;
98 u32 cmbsz; 99 u32 cmbsz;
99 u32 cmbloc; 100 u32 cmbloc;
@@ -540,6 +541,20 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
540} 541}
541#endif 542#endif
542 543
544static void nvme_print_sgl(struct scatterlist *sgl, int nents)
545{
546 int i;
547 struct scatterlist *sg;
548
549 for_each_sg(sgl, sg, nents, i) {
550 dma_addr_t phys = sg_phys(sg);
551 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
552 "dma_address:%pad dma_length:%d\n",
553 i, &phys, sg->offset, sg->length, &sg_dma_address(sg),
554 sg_dma_len(sg));
555 }
556}
557
543static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) 558static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
544{ 559{
545 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 560 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -622,19 +637,10 @@ static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req)
622 return BLK_STS_OK; 637 return BLK_STS_OK;
623 638
624 bad_sgl: 639 bad_sgl:
625 if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n", 640 WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
626 blk_rq_payload_bytes(req), iod->nents)) { 641 "Invalid SGL for payload:%d nents:%d\n",
627 for_each_sg(iod->sg, sg, iod->nents, i) { 642 blk_rq_payload_bytes(req), iod->nents);
628 dma_addr_t phys = sg_phys(sg);
629 pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d "
630 "dma_address:%pad dma_length:%d\n", i, &phys,
631 sg->offset, sg->length,
632 &sg_dma_address(sg),
633 sg_dma_len(sg));
634 }
635 }
636 return BLK_STS_IOERR; 643 return BLK_STS_IOERR;
637
638} 644}
639 645
640static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, 646static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
@@ -1220,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
1220 if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) { 1226 if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
1221 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth), 1227 unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
1222 dev->ctrl.page_size); 1228 dev->ctrl.page_size);
1223 nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset; 1229 nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
1224 nvmeq->sq_cmds_io = dev->cmb + offset; 1230 nvmeq->sq_cmds_io = dev->cmb + offset;
1225 } else { 1231 } else {
1226 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth), 1232 nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
@@ -1313,11 +1319,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1313 if (result < 0) 1319 if (result < 0)
1314 goto release_cq; 1320 goto release_cq;
1315 1321
1322 nvme_init_queue(nvmeq, qid);
1316 result = queue_request_irq(nvmeq); 1323 result = queue_request_irq(nvmeq);
1317 if (result < 0) 1324 if (result < 0)
1318 goto release_sq; 1325 goto release_sq;
1319 1326
1320 nvme_init_queue(nvmeq, qid);
1321 return result; 1327 return result;
1322 1328
1323 release_sq: 1329 release_sq:
@@ -1464,6 +1470,7 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
1464 return result; 1470 return result;
1465 1471
1466 nvmeq->cq_vector = 0; 1472 nvmeq->cq_vector = 0;
1473 nvme_init_queue(nvmeq, 0);
1467 result = queue_request_irq(nvmeq); 1474 result = queue_request_irq(nvmeq);
1468 if (result) { 1475 if (result) {
1469 nvmeq->cq_vector = -1; 1476 nvmeq->cq_vector = -1;
@@ -1520,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1520 resource_size_t bar_size; 1527 resource_size_t bar_size;
1521 struct pci_dev *pdev = to_pci_dev(dev->dev); 1528 struct pci_dev *pdev = to_pci_dev(dev->dev);
1522 void __iomem *cmb; 1529 void __iomem *cmb;
1523 dma_addr_t dma_addr; 1530 int bar;
1524 1531
1525 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); 1532 dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
1526 if (!(NVME_CMB_SZ(dev->cmbsz))) 1533 if (!(NVME_CMB_SZ(dev->cmbsz)))
@@ -1533,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1533 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz)); 1540 szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
1534 size = szu * NVME_CMB_SZ(dev->cmbsz); 1541 size = szu * NVME_CMB_SZ(dev->cmbsz);
1535 offset = szu * NVME_CMB_OFST(dev->cmbloc); 1542 offset = szu * NVME_CMB_OFST(dev->cmbloc);
1536 bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc)); 1543 bar = NVME_CMB_BIR(dev->cmbloc);
1544 bar_size = pci_resource_len(pdev, bar);
1537 1545
1538 if (offset > bar_size) 1546 if (offset > bar_size)
1539 return NULL; 1547 return NULL;
@@ -1546,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
1546 if (size > bar_size - offset) 1554 if (size > bar_size - offset)
1547 size = bar_size - offset; 1555 size = bar_size - offset;
1548 1556
1549 dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset; 1557 cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
1550 cmb = ioremap_wc(dma_addr, size);
1551 if (!cmb) 1558 if (!cmb)
1552 return NULL; 1559 return NULL;
1553 1560
1554 dev->cmb_dma_addr = dma_addr; 1561 dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
1555 dev->cmb_size = size; 1562 dev->cmb_size = size;
1556 return cmb; 1563 return cmb;
1557} 1564}
@@ -2156,7 +2163,6 @@ static void nvme_reset_work(struct work_struct *work)
2156 if (result) 2163 if (result)
2157 goto out; 2164 goto out;
2158 2165
2159 nvme_init_queue(dev->queues[0], 0);
2160 result = nvme_alloc_admin_tags(dev); 2166 result = nvme_alloc_admin_tags(dev);
2161 if (result) 2167 if (result)
2162 goto out; 2168 goto out;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 58983000964b..92a03ff5fb4d 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -942,7 +942,12 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
942 } 942 }
943 943
944 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); 944 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
945 WARN_ON_ONCE(!changed); 945 if (!changed) {
946 /* state change failure is ok if we're in DELETING state */
947 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING);
948 return;
949 }
950
946 ctrl->ctrl.nr_reconnects = 0; 951 ctrl->ctrl.nr_reconnects = 0;
947 952
948 nvme_start_ctrl(&ctrl->ctrl); 953 nvme_start_ctrl(&ctrl->ctrl);
@@ -962,7 +967,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
962 struct nvme_rdma_ctrl *ctrl = container_of(work, 967 struct nvme_rdma_ctrl *ctrl = container_of(work,
963 struct nvme_rdma_ctrl, err_work); 968 struct nvme_rdma_ctrl, err_work);
964 969
965 nvme_stop_ctrl(&ctrl->ctrl); 970 nvme_stop_keep_alive(&ctrl->ctrl);
966 971
967 if (ctrl->ctrl.queue_count > 1) { 972 if (ctrl->ctrl.queue_count > 1) {
968 nvme_stop_queues(&ctrl->ctrl); 973 nvme_stop_queues(&ctrl->ctrl);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 7c23eaf8e563..1b208beeef50 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -390,10 +390,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
390 if (status) 390 if (status)
391 nvmet_set_status(req, status); 391 nvmet_set_status(req, status);
392 392
393 /* XXX: need to fill in something useful for sq_head */ 393 if (req->sq->size)
394 req->rsp->sq_head = 0; 394 req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size;
395 if (likely(req->sq)) /* may happen during early failure */ 395 req->rsp->sq_head = cpu_to_le16(req->sq->sqhd);
396 req->rsp->sq_id = cpu_to_le16(req->sq->qid); 396 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
397 req->rsp->command_id = req->cmd->common.command_id; 397 req->rsp->command_id = req->cmd->common.command_id;
398 398
399 if (req->ns) 399 if (req->ns)
@@ -420,6 +420,7 @@ void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
420void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, 420void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
421 u16 qid, u16 size) 421 u16 qid, u16 size)
422{ 422{
423 sq->sqhd = 0;
423 sq->qid = qid; 424 sq->qid = qid;
424 sq->size = size; 425 sq->size = size;
425 426
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 859a66725291..db3bf6b8bf9e 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -109,9 +109,14 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
109 pr_warn("queue already connected!\n"); 109 pr_warn("queue already connected!\n");
110 return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR; 110 return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
111 } 111 }
112 if (!sqsize) {
113 pr_warn("queue size zero!\n");
114 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
115 }
112 116
113 nvmet_cq_setup(ctrl, req->cq, qid, sqsize); 117 /* note: convert queue size from 0's-based value to 1's-based value */
114 nvmet_sq_setup(ctrl, req->sq, qid, sqsize); 118 nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
119 nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
115 return 0; 120 return 0;
116} 121}
117 122
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 421e43bf1dd7..58e010bdda3e 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -148,7 +148,7 @@ struct nvmet_fc_tgt_assoc {
148 u32 a_id; 148 u32 a_id;
149 struct nvmet_fc_tgtport *tgtport; 149 struct nvmet_fc_tgtport *tgtport;
150 struct list_head a_list; 150 struct list_head a_list;
151 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES]; 151 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
152 struct kref ref; 152 struct kref ref;
153}; 153};
154 154
@@ -608,7 +608,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
608 unsigned long flags; 608 unsigned long flags;
609 int ret; 609 int ret;
610 610
611 if (qid >= NVMET_NR_QUEUES) 611 if (qid > NVMET_NR_QUEUES)
612 return NULL; 612 return NULL;
613 613
614 queue = kzalloc((sizeof(*queue) + 614 queue = kzalloc((sizeof(*queue) +
@@ -783,6 +783,9 @@ nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
783 u16 qid = nvmet_fc_getqueueid(connection_id); 783 u16 qid = nvmet_fc_getqueueid(connection_id);
784 unsigned long flags; 784 unsigned long flags;
785 785
786 if (qid > NVMET_NR_QUEUES)
787 return NULL;
788
786 spin_lock_irqsave(&tgtport->lock, flags); 789 spin_lock_irqsave(&tgtport->lock, flags);
787 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) { 790 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
788 if (association_id == assoc->association_id) { 791 if (association_id == assoc->association_id) {
@@ -888,7 +891,7 @@ nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
888 int i; 891 int i;
889 892
890 spin_lock_irqsave(&tgtport->lock, flags); 893 spin_lock_irqsave(&tgtport->lock, flags);
891 for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) { 894 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
892 queue = assoc->queues[i]; 895 queue = assoc->queues[i];
893 if (queue) { 896 if (queue) {
894 if (!nvmet_fc_tgt_q_get(queue)) 897 if (!nvmet_fc_tgt_q_get(queue))
@@ -1910,8 +1913,7 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1910 spin_lock_irqsave(&fod->flock, flags); 1913 spin_lock_irqsave(&fod->flock, flags);
1911 fod->writedataactive = false; 1914 fod->writedataactive = false;
1912 spin_unlock_irqrestore(&fod->flock, flags); 1915 spin_unlock_irqrestore(&fod->flock, flags);
1913 nvmet_req_complete(&fod->req, 1916 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1914 NVME_SC_FC_TRANSPORT_ERROR);
1915 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ { 1917 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1916 fcpreq->fcp_error = ret; 1918 fcpreq->fcp_error = ret;
1917 fcpreq->transferred_length = 0; 1919 fcpreq->transferred_length = 0;
@@ -1929,8 +1931,7 @@ __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1929 /* if in the middle of an io and we need to tear down */ 1931 /* if in the middle of an io and we need to tear down */
1930 if (abort) { 1932 if (abort) {
1931 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { 1933 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1932 nvmet_req_complete(&fod->req, 1934 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1933 NVME_SC_FC_TRANSPORT_ERROR);
1934 return true; 1935 return true;
1935 } 1936 }
1936 1937
@@ -1968,8 +1969,7 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1968 fod->abort = true; 1969 fod->abort = true;
1969 spin_unlock(&fod->flock); 1970 spin_unlock(&fod->flock);
1970 1971
1971 nvmet_req_complete(&fod->req, 1972 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1972 NVME_SC_FC_TRANSPORT_ERROR);
1973 return; 1973 return;
1974 } 1974 }
1975 1975
@@ -2533,13 +2533,17 @@ nvmet_fc_remove_port(struct nvmet_port *port)
2533{ 2533{
2534 struct nvmet_fc_tgtport *tgtport = port->priv; 2534 struct nvmet_fc_tgtport *tgtport = port->priv;
2535 unsigned long flags; 2535 unsigned long flags;
2536 bool matched = false;
2536 2537
2537 spin_lock_irqsave(&nvmet_fc_tgtlock, flags); 2538 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2538 if (tgtport->port == port) { 2539 if (tgtport->port == port) {
2539 nvmet_fc_tgtport_put(tgtport); 2540 matched = true;
2540 tgtport->port = NULL; 2541 tgtport->port = NULL;
2541 } 2542 }
2542 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags); 2543 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2544
2545 if (matched)
2546 nvmet_fc_tgtport_put(tgtport);
2543} 2547}
2544 2548
2545static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = { 2549static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 1cb9847ec261..7b75d9de55ab 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -224,8 +224,6 @@ struct fcloop_nport {
224 struct fcloop_lport *lport; 224 struct fcloop_lport *lport;
225 struct list_head nport_list; 225 struct list_head nport_list;
226 struct kref ref; 226 struct kref ref;
227 struct completion rport_unreg_done;
228 struct completion tport_unreg_done;
229 u64 node_name; 227 u64 node_name;
230 u64 port_name; 228 u64 port_name;
231 u32 port_role; 229 u32 port_role;
@@ -576,7 +574,7 @@ fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
576 tfcp_req->aborted = true; 574 tfcp_req->aborted = true;
577 spin_unlock(&tfcp_req->reqlock); 575 spin_unlock(&tfcp_req->reqlock);
578 576
579 tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED; 577 tfcp_req->status = NVME_SC_INTERNAL;
580 578
581 /* 579 /*
582 * nothing more to do. If io wasn't active, the transport should 580 * nothing more to do. If io wasn't active, the transport should
@@ -631,6 +629,32 @@ fcloop_fcp_abort(struct nvme_fc_local_port *localport,
631} 629}
632 630
633static void 631static void
632fcloop_nport_free(struct kref *ref)
633{
634 struct fcloop_nport *nport =
635 container_of(ref, struct fcloop_nport, ref);
636 unsigned long flags;
637
638 spin_lock_irqsave(&fcloop_lock, flags);
639 list_del(&nport->nport_list);
640 spin_unlock_irqrestore(&fcloop_lock, flags);
641
642 kfree(nport);
643}
644
645static void
646fcloop_nport_put(struct fcloop_nport *nport)
647{
648 kref_put(&nport->ref, fcloop_nport_free);
649}
650
651static int
652fcloop_nport_get(struct fcloop_nport *nport)
653{
654 return kref_get_unless_zero(&nport->ref);
655}
656
657static void
634fcloop_localport_delete(struct nvme_fc_local_port *localport) 658fcloop_localport_delete(struct nvme_fc_local_port *localport)
635{ 659{
636 struct fcloop_lport *lport = localport->private; 660 struct fcloop_lport *lport = localport->private;
@@ -644,8 +668,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
644{ 668{
645 struct fcloop_rport *rport = remoteport->private; 669 struct fcloop_rport *rport = remoteport->private;
646 670
647 /* release any threads waiting for the unreg to complete */ 671 fcloop_nport_put(rport->nport);
648 complete(&rport->nport->rport_unreg_done);
649} 672}
650 673
651static void 674static void
@@ -653,8 +676,7 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
653{ 676{
654 struct fcloop_tport *tport = targetport->private; 677 struct fcloop_tport *tport = targetport->private;
655 678
656 /* release any threads waiting for the unreg to complete */ 679 fcloop_nport_put(tport->nport);
657 complete(&tport->nport->tport_unreg_done);
658} 680}
659 681
660#define FCLOOP_HW_QUEUES 4 682#define FCLOOP_HW_QUEUES 4
@@ -722,6 +744,7 @@ fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
722 goto out_free_opts; 744 goto out_free_opts;
723 } 745 }
724 746
747 memset(&pinfo, 0, sizeof(pinfo));
725 pinfo.node_name = opts->wwnn; 748 pinfo.node_name = opts->wwnn;
726 pinfo.port_name = opts->wwpn; 749 pinfo.port_name = opts->wwpn;
727 pinfo.port_role = opts->roles; 750 pinfo.port_role = opts->roles;
@@ -804,32 +827,6 @@ fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
804 return ret ? ret : count; 827 return ret ? ret : count;
805} 828}
806 829
807static void
808fcloop_nport_free(struct kref *ref)
809{
810 struct fcloop_nport *nport =
811 container_of(ref, struct fcloop_nport, ref);
812 unsigned long flags;
813
814 spin_lock_irqsave(&fcloop_lock, flags);
815 list_del(&nport->nport_list);
816 spin_unlock_irqrestore(&fcloop_lock, flags);
817
818 kfree(nport);
819}
820
821static void
822fcloop_nport_put(struct fcloop_nport *nport)
823{
824 kref_put(&nport->ref, fcloop_nport_free);
825}
826
827static int
828fcloop_nport_get(struct fcloop_nport *nport)
829{
830 return kref_get_unless_zero(&nport->ref);
831}
832
833static struct fcloop_nport * 830static struct fcloop_nport *
834fcloop_alloc_nport(const char *buf, size_t count, bool remoteport) 831fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
835{ 832{
@@ -938,6 +935,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
938 if (!nport) 935 if (!nport)
939 return -EIO; 936 return -EIO;
940 937
938 memset(&pinfo, 0, sizeof(pinfo));
941 pinfo.node_name = nport->node_name; 939 pinfo.node_name = nport->node_name;
942 pinfo.port_name = nport->port_name; 940 pinfo.port_name = nport->port_name;
943 pinfo.port_role = nport->port_role; 941 pinfo.port_role = nport->port_role;
@@ -979,24 +977,12 @@ __unlink_remote_port(struct fcloop_nport *nport)
979} 977}
980 978
981static int 979static int
982__wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport) 980__remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
983{ 981{
984 int ret;
985
986 if (!rport) 982 if (!rport)
987 return -EALREADY; 983 return -EALREADY;
988 984
989 init_completion(&nport->rport_unreg_done); 985 return nvme_fc_unregister_remoteport(rport->remoteport);
990
991 ret = nvme_fc_unregister_remoteport(rport->remoteport);
992 if (ret)
993 return ret;
994
995 wait_for_completion(&nport->rport_unreg_done);
996
997 fcloop_nport_put(nport);
998
999 return ret;
1000} 986}
1001 987
1002static ssize_t 988static ssize_t
@@ -1029,7 +1015,7 @@ fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1029 if (!nport) 1015 if (!nport)
1030 return -ENOENT; 1016 return -ENOENT;
1031 1017
1032 ret = __wait_remoteport_unreg(nport, rport); 1018 ret = __remoteport_unreg(nport, rport);
1033 1019
1034 return ret ? ret : count; 1020 return ret ? ret : count;
1035} 1021}
@@ -1086,24 +1072,12 @@ __unlink_target_port(struct fcloop_nport *nport)
1086} 1072}
1087 1073
1088static int 1074static int
1089__wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport) 1075__targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1090{ 1076{
1091 int ret;
1092
1093 if (!tport) 1077 if (!tport)
1094 return -EALREADY; 1078 return -EALREADY;
1095 1079
1096 init_completion(&nport->tport_unreg_done); 1080 return nvmet_fc_unregister_targetport(tport->targetport);
1097
1098 ret = nvmet_fc_unregister_targetport(tport->targetport);
1099 if (ret)
1100 return ret;
1101
1102 wait_for_completion(&nport->tport_unreg_done);
1103
1104 fcloop_nport_put(nport);
1105
1106 return ret;
1107} 1081}
1108 1082
1109static ssize_t 1083static ssize_t
@@ -1136,7 +1110,7 @@ fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1136 if (!nport) 1110 if (!nport)
1137 return -ENOENT; 1111 return -ENOENT;
1138 1112
1139 ret = __wait_targetport_unreg(nport, tport); 1113 ret = __targetport_unreg(nport, tport);
1140 1114
1141 return ret ? ret : count; 1115 return ret ? ret : count;
1142} 1116}
@@ -1223,11 +1197,11 @@ static void __exit fcloop_exit(void)
1223 1197
1224 spin_unlock_irqrestore(&fcloop_lock, flags); 1198 spin_unlock_irqrestore(&fcloop_lock, flags);
1225 1199
1226 ret = __wait_targetport_unreg(nport, tport); 1200 ret = __targetport_unreg(nport, tport);
1227 if (ret) 1201 if (ret)
1228 pr_warn("%s: Failed deleting target port\n", __func__); 1202 pr_warn("%s: Failed deleting target port\n", __func__);
1229 1203
1230 ret = __wait_remoteport_unreg(nport, rport); 1204 ret = __remoteport_unreg(nport, rport);
1231 if (ret) 1205 if (ret)
1232 pr_warn("%s: Failed deleting remote port\n", __func__); 1206 pr_warn("%s: Failed deleting remote port\n", __func__);
1233 1207
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7d261ab894f4..7b8e20adf760 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -74,6 +74,7 @@ struct nvmet_sq {
74 struct percpu_ref ref; 74 struct percpu_ref ref;
75 u16 qid; 75 u16 qid;
76 u16 size; 76 u16 size;
77 u16 sqhd;
77 struct completion free_done; 78 struct completion free_done;
78 struct completion confirm_done; 79 struct completion confirm_done;
79}; 80};
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index de54c7f5048a..d12e5de78e70 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -135,7 +135,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
135 135
136 /* Stop the user from writing */ 136 /* Stop the user from writing */
137 if (pos >= nvmem->size) 137 if (pos >= nvmem->size)
138 return 0; 138 return -EFBIG;
139 139
140 if (count < nvmem->word_size) 140 if (count < nvmem->word_size)
141 return -EINVAL; 141 return -EINVAL;
@@ -789,6 +789,7 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
789 return ERR_PTR(-EINVAL); 789 return ERR_PTR(-EINVAL);
790 790
791 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL); 791 nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
792 of_node_put(nvmem_np);
792 if (IS_ERR(nvmem)) 793 if (IS_ERR(nvmem))
793 return ERR_CAST(nvmem); 794 return ERR_CAST(nvmem);
794 795
diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
index 4ddc6e8f9fe7..f9308c2f22e6 100644
--- a/drivers/pci/endpoint/functions/pci-epf-test.c
+++ b/drivers/pci/endpoint/functions/pci-epf-test.c
@@ -251,9 +251,8 @@ err:
251 return ret; 251 return ret;
252} 252}
253 253
254static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test) 254static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq)
255{ 255{
256 u8 irq;
257 u8 msi_count; 256 u8 msi_count;
258 struct pci_epf *epf = epf_test->epf; 257 struct pci_epf *epf = epf_test->epf;
259 struct pci_epc *epc = epf->epc; 258 struct pci_epc *epc = epf->epc;
@@ -262,7 +261,6 @@ static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test)
262 261
263 reg->status |= STATUS_IRQ_RAISED; 262 reg->status |= STATUS_IRQ_RAISED;
264 msi_count = pci_epc_get_msi(epc); 263 msi_count = pci_epc_get_msi(epc);
265 irq = (reg->command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
266 if (irq > msi_count || msi_count <= 0) 264 if (irq > msi_count || msi_count <= 0)
267 pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); 265 pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
268 else 266 else
@@ -289,6 +287,8 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
289 reg->command = 0; 287 reg->command = 0;
290 reg->status = 0; 288 reg->status = 0;
291 289
290 irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
291
292 if (command & COMMAND_RAISE_LEGACY_IRQ) { 292 if (command & COMMAND_RAISE_LEGACY_IRQ) {
293 reg->status = STATUS_IRQ_RAISED; 293 reg->status = STATUS_IRQ_RAISED;
294 pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0); 294 pci_epc_raise_irq(epc, PCI_EPC_IRQ_LEGACY, 0);
@@ -301,7 +301,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
301 reg->status |= STATUS_WRITE_FAIL; 301 reg->status |= STATUS_WRITE_FAIL;
302 else 302 else
303 reg->status |= STATUS_WRITE_SUCCESS; 303 reg->status |= STATUS_WRITE_SUCCESS;
304 pci_epf_test_raise_irq(epf_test); 304 pci_epf_test_raise_irq(epf_test, irq);
305 goto reset_handler; 305 goto reset_handler;
306 } 306 }
307 307
@@ -311,7 +311,7 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
311 reg->status |= STATUS_READ_SUCCESS; 311 reg->status |= STATUS_READ_SUCCESS;
312 else 312 else
313 reg->status |= STATUS_READ_FAIL; 313 reg->status |= STATUS_READ_FAIL;
314 pci_epf_test_raise_irq(epf_test); 314 pci_epf_test_raise_irq(epf_test, irq);
315 goto reset_handler; 315 goto reset_handler;
316 } 316 }
317 317
@@ -321,13 +321,12 @@ static void pci_epf_test_cmd_handler(struct work_struct *work)
321 reg->status |= STATUS_COPY_SUCCESS; 321 reg->status |= STATUS_COPY_SUCCESS;
322 else 322 else
323 reg->status |= STATUS_COPY_FAIL; 323 reg->status |= STATUS_COPY_FAIL;
324 pci_epf_test_raise_irq(epf_test); 324 pci_epf_test_raise_irq(epf_test, irq);
325 goto reset_handler; 325 goto reset_handler;
326 } 326 }
327 327
328 if (command & COMMAND_RAISE_MSI_IRQ) { 328 if (command & COMMAND_RAISE_MSI_IRQ) {
329 msi_count = pci_epc_get_msi(epc); 329 msi_count = pci_epc_get_msi(epc);
330 irq = (command & MSI_NUMBER_MASK) >> MSI_NUMBER_SHIFT;
331 if (irq > msi_count || msi_count <= 0) 330 if (irq > msi_count || msi_count <= 0)
332 goto reset_handler; 331 goto reset_handler;
333 reg->status = STATUS_IRQ_RAISED; 332 reg->status = STATUS_IRQ_RAISED;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 1eecfa301f7f..8e075ea2743e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -686,7 +686,7 @@ static ssize_t driver_override_store(struct device *dev,
686 const char *buf, size_t count) 686 const char *buf, size_t count)
687{ 687{
688 struct pci_dev *pdev = to_pci_dev(dev); 688 struct pci_dev *pdev = to_pci_dev(dev);
689 char *driver_override, *old = pdev->driver_override, *cp; 689 char *driver_override, *old, *cp;
690 690
691 /* We need to keep extra room for a newline */ 691 /* We need to keep extra room for a newline */
692 if (count >= (PAGE_SIZE - 1)) 692 if (count >= (PAGE_SIZE - 1))
@@ -700,12 +700,15 @@ static ssize_t driver_override_store(struct device *dev,
700 if (cp) 700 if (cp)
701 *cp = '\0'; 701 *cp = '\0';
702 702
703 device_lock(dev);
704 old = pdev->driver_override;
703 if (strlen(driver_override)) { 705 if (strlen(driver_override)) {
704 pdev->driver_override = driver_override; 706 pdev->driver_override = driver_override;
705 } else { 707 } else {
706 kfree(driver_override); 708 kfree(driver_override);
707 pdev->driver_override = NULL; 709 pdev->driver_override = NULL;
708 } 710 }
711 device_unlock(dev);
709 712
710 kfree(old); 713 kfree(old);
711 714
@@ -716,8 +719,12 @@ static ssize_t driver_override_show(struct device *dev,
716 struct device_attribute *attr, char *buf) 719 struct device_attribute *attr, char *buf)
717{ 720{
718 struct pci_dev *pdev = to_pci_dev(dev); 721 struct pci_dev *pdev = to_pci_dev(dev);
722 ssize_t len;
719 723
720 return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); 724 device_lock(dev);
725 len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
726 device_unlock(dev);
727 return len;
721} 728}
722static DEVICE_ATTR_RW(driver_override); 729static DEVICE_ATTR_RW(driver_override);
723 730
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 0a9b78705ee8..3303dd8d8eb5 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -235,6 +235,7 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn)
235 ret = armpmu_register(pmu); 235 ret = armpmu_register(pmu);
236 if (ret) { 236 if (ret) {
237 pr_warn("Failed to register PMU for CPU%d\n", cpu); 237 pr_warn("Failed to register PMU for CPU%d\n", cpu);
238 kfree(pmu->name);
238 return ret; 239 return ret;
239 } 240 }
240 } 241 }
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 85de30f93a9c..56a8195096a2 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -254,10 +254,12 @@ static int bl_update_status(struct backlight_device *b)
254{ 254{
255 struct acpi_device *device = bl_get_data(b); 255 struct acpi_device *device = bl_get_data(b);
256 256
257 if (b->props.power == FB_BLANK_POWERDOWN) 257 if (fext) {
258 call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3); 258 if (b->props.power == FB_BLANK_POWERDOWN)
259 else 259 call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
260 call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0); 260 else
261 call_fext_func(fext, FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
262 }
261 263
262 return set_lcd_level(device, b->props.brightness); 264 return set_lcd_level(device, b->props.brightness);
263} 265}
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 315a4be8dc1e..9a68914100ad 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -51,6 +51,8 @@ module_param(mbox_sel, byte, S_IRUGO);
51MODULE_PARM_DESC(mbox_sel, 51MODULE_PARM_DESC(mbox_sel,
52 "RIO Messaging MBOX Selection Mask (default: 0x0f = all)"); 52 "RIO Messaging MBOX Selection Mask (default: 0x0f = all)");
53 53
54static DEFINE_SPINLOCK(tsi721_maint_lock);
55
54static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); 56static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
55static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); 57static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
56 58
@@ -124,12 +126,15 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
124 void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); 126 void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
125 struct tsi721_dma_desc *bd_ptr; 127 struct tsi721_dma_desc *bd_ptr;
126 u32 rd_count, swr_ptr, ch_stat; 128 u32 rd_count, swr_ptr, ch_stat;
129 unsigned long flags;
127 int i, err = 0; 130 int i, err = 0;
128 u32 op = do_wr ? MAINT_WR : MAINT_RD; 131 u32 op = do_wr ? MAINT_WR : MAINT_RD;
129 132
130 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) 133 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
131 return -EINVAL; 134 return -EINVAL;
132 135
136 spin_lock_irqsave(&tsi721_maint_lock, flags);
137
133 bd_ptr = priv->mdma.bd_base; 138 bd_ptr = priv->mdma.bd_base;
134 139
135 rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); 140 rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
@@ -197,7 +202,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
197 */ 202 */
198 swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); 203 swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
199 iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); 204 iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
205
200err_out: 206err_out:
207 spin_unlock_irqrestore(&tsi721_maint_lock, flags);
201 208
202 return err; 209 return err;
203} 210}
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c
index a3824baca2e5..3ee9af83b638 100644
--- a/drivers/rapidio/rio-access.c
+++ b/drivers/rapidio/rio-access.c
@@ -14,16 +14,8 @@
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16/* 16/*
17 * These interrupt-safe spinlocks protect all accesses to RIO
18 * configuration space and doorbell access.
19 */
20static DEFINE_SPINLOCK(rio_config_lock);
21static DEFINE_SPINLOCK(rio_doorbell_lock);
22
23/*
24 * Wrappers for all RIO configuration access functions. They just check 17 * Wrappers for all RIO configuration access functions. They just check
25 * alignment, do locking and call the low-level functions pointed to 18 * alignment and call the low-level functions pointed to by rio_mport->ops.
26 * by rio_mport->ops.
27 */ 19 */
28 20
29#define RIO_8_BAD 0 21#define RIO_8_BAD 0
@@ -44,13 +36,10 @@ int __rio_local_read_config_##size \
44 (struct rio_mport *mport, u32 offset, type *value) \ 36 (struct rio_mport *mport, u32 offset, type *value) \
45{ \ 37{ \
46 int res; \ 38 int res; \
47 unsigned long flags; \
48 u32 data = 0; \ 39 u32 data = 0; \
49 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 40 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
50 spin_lock_irqsave(&rio_config_lock, flags); \
51 res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ 41 res = mport->ops->lcread(mport, mport->id, offset, len, &data); \
52 *value = (type)data; \ 42 *value = (type)data; \
53 spin_unlock_irqrestore(&rio_config_lock, flags); \
54 return res; \ 43 return res; \
55} 44}
56 45
@@ -67,13 +56,8 @@ int __rio_local_read_config_##size \
67int __rio_local_write_config_##size \ 56int __rio_local_write_config_##size \
68 (struct rio_mport *mport, u32 offset, type value) \ 57 (struct rio_mport *mport, u32 offset, type value) \
69{ \ 58{ \
70 int res; \
71 unsigned long flags; \
72 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 59 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
73 spin_lock_irqsave(&rio_config_lock, flags); \ 60 return mport->ops->lcwrite(mport, mport->id, offset, len, value);\
74 res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\
75 spin_unlock_irqrestore(&rio_config_lock, flags); \
76 return res; \
77} 61}
78 62
79RIO_LOP_READ(8, u8, 1) 63RIO_LOP_READ(8, u8, 1)
@@ -104,13 +88,10 @@ int rio_mport_read_config_##size \
104 (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ 88 (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \
105{ \ 89{ \
106 int res; \ 90 int res; \
107 unsigned long flags; \
108 u32 data = 0; \ 91 u32 data = 0; \
109 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 92 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
110 spin_lock_irqsave(&rio_config_lock, flags); \
111 res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ 93 res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \
112 *value = (type)data; \ 94 *value = (type)data; \
113 spin_unlock_irqrestore(&rio_config_lock, flags); \
114 return res; \ 95 return res; \
115} 96}
116 97
@@ -127,13 +108,9 @@ int rio_mport_read_config_##size \
127int rio_mport_write_config_##size \ 108int rio_mport_write_config_##size \
128 (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ 109 (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \
129{ \ 110{ \
130 int res; \
131 unsigned long flags; \
132 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ 111 if (RIO_##size##_BAD) return RIO_BAD_SIZE; \
133 spin_lock_irqsave(&rio_config_lock, flags); \ 112 return mport->ops->cwrite(mport, mport->id, destid, hopcount, \
134 res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \ 113 offset, len, value); \
135 spin_unlock_irqrestore(&rio_config_lock, flags); \
136 return res; \
137} 114}
138 115
139RIO_OP_READ(8, u8, 1) 116RIO_OP_READ(8, u8, 1)
@@ -162,14 +139,7 @@ EXPORT_SYMBOL_GPL(rio_mport_write_config_32);
162 */ 139 */
163int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) 140int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data)
164{ 141{
165 int res; 142 return mport->ops->dsend(mport, mport->id, destid, data);
166 unsigned long flags;
167
168 spin_lock_irqsave(&rio_doorbell_lock, flags);
169 res = mport->ops->dsend(mport, mport->id, destid, data);
170 spin_unlock_irqrestore(&rio_doorbell_lock, flags);
171
172 return res;
173} 143}
174 144
175EXPORT_SYMBOL_GPL(rio_mport_send_doorbell); 145EXPORT_SYMBOL_GPL(rio_mport_send_doorbell);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index e0c393214264..e2baecbb9dd3 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -34,11 +34,12 @@ config RESET_BERLIN
34 help 34 help
35 This enables the reset controller driver for Marvell Berlin SoCs. 35 This enables the reset controller driver for Marvell Berlin SoCs.
36 36
37config RESET_HSDK_V1 37config RESET_HSDK
38 bool "HSDK v1 Reset Driver" 38 bool "Synopsys HSDK Reset Driver"
39 default n 39 depends on HAS_IOMEM
40 depends on ARC_SOC_HSDK || COMPILE_TEST
40 help 41 help
41 This enables the reset controller driver for HSDK v1. 42 This enables the reset controller driver for HSDK board.
42 43
43config RESET_IMX7 44config RESET_IMX7
44 bool "i.MX7 Reset Driver" if COMPILE_TEST 45 bool "i.MX7 Reset Driver" if COMPILE_TEST
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index d368367110e5..af1c15c330b3 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -5,7 +5,7 @@ obj-$(CONFIG_ARCH_TEGRA) += tegra/
5obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o 5obj-$(CONFIG_RESET_A10SR) += reset-a10sr.o
6obj-$(CONFIG_RESET_ATH79) += reset-ath79.o 6obj-$(CONFIG_RESET_ATH79) += reset-ath79.o
7obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o 7obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
8obj-$(CONFIG_RESET_HSDK_V1) += reset-hsdk-v1.o 8obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
9obj-$(CONFIG_RESET_IMX7) += reset-imx7.o 9obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
10obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o 10obj-$(CONFIG_RESET_LANTIQ) += reset-lantiq.o
11obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o 11obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o
diff --git a/drivers/reset/reset-hsdk-v1.c b/drivers/reset/reset-hsdk.c
index bca13e4bf622..8bce391c6943 100644
--- a/drivers/reset/reset-hsdk-v1.c
+++ b/drivers/reset/reset-hsdk.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2017 Synopsys. 2 * Copyright (C) 2017 Synopsys.
3 * 3 *
4 * Synopsys HSDKv1 SDP reset driver. 4 * Synopsys HSDK Development platform reset driver.
5 * 5 *
6 * This file is licensed under the terms of the GNU General Public 6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any 7 * License version 2. This program is licensed "as is" without any
@@ -18,9 +18,9 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/types.h> 19#include <linux/types.h>
20 20
21#define to_hsdkv1_rst(p) container_of((p), struct hsdkv1_rst, rcdev) 21#define to_hsdk_rst(p) container_of((p), struct hsdk_rst, rcdev)
22 22
23struct hsdkv1_rst { 23struct hsdk_rst {
24 void __iomem *regs_ctl; 24 void __iomem *regs_ctl;
25 void __iomem *regs_rst; 25 void __iomem *regs_rst;
26 spinlock_t lock; 26 spinlock_t lock;
@@ -49,12 +49,12 @@ static const u32 rst_map[] = {
49#define CGU_IP_SW_RESET_RESET BIT(0) 49#define CGU_IP_SW_RESET_RESET BIT(0)
50#define SW_RESET_TIMEOUT 10000 50#define SW_RESET_TIMEOUT 10000
51 51
52static void hsdkv1_reset_config(struct hsdkv1_rst *rst, unsigned long id) 52static void hsdk_reset_config(struct hsdk_rst *rst, unsigned long id)
53{ 53{
54 writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL); 54 writel(rst_map[id], rst->regs_ctl + CGU_SYS_RST_CTRL);
55} 55}
56 56
57static int hsdkv1_reset_do(struct hsdkv1_rst *rst) 57static int hsdk_reset_do(struct hsdk_rst *rst)
58{ 58{
59 u32 reg; 59 u32 reg;
60 60
@@ -69,28 +69,28 @@ static int hsdkv1_reset_do(struct hsdkv1_rst *rst)
69 !(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT); 69 !(reg & CGU_IP_SW_RESET_RESET), 5, SW_RESET_TIMEOUT);
70} 70}
71 71
72static int hsdkv1_reset_reset(struct reset_controller_dev *rcdev, 72static int hsdk_reset_reset(struct reset_controller_dev *rcdev,
73 unsigned long id) 73 unsigned long id)
74{ 74{
75 struct hsdkv1_rst *rst = to_hsdkv1_rst(rcdev); 75 struct hsdk_rst *rst = to_hsdk_rst(rcdev);
76 unsigned long flags; 76 unsigned long flags;
77 int ret; 77 int ret;
78 78
79 spin_lock_irqsave(&rst->lock, flags); 79 spin_lock_irqsave(&rst->lock, flags);
80 hsdkv1_reset_config(rst, id); 80 hsdk_reset_config(rst, id);
81 ret = hsdkv1_reset_do(rst); 81 ret = hsdk_reset_do(rst);
82 spin_unlock_irqrestore(&rst->lock, flags); 82 spin_unlock_irqrestore(&rst->lock, flags);
83 83
84 return ret; 84 return ret;
85} 85}
86 86
87static const struct reset_control_ops hsdkv1_reset_ops = { 87static const struct reset_control_ops hsdk_reset_ops = {
88 .reset = hsdkv1_reset_reset, 88 .reset = hsdk_reset_reset,
89}; 89};
90 90
91static int hsdkv1_reset_probe(struct platform_device *pdev) 91static int hsdk_reset_probe(struct platform_device *pdev)
92{ 92{
93 struct hsdkv1_rst *rst; 93 struct hsdk_rst *rst;
94 struct resource *mem; 94 struct resource *mem;
95 95
96 rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL); 96 rst = devm_kzalloc(&pdev->dev, sizeof(*rst), GFP_KERNEL);
@@ -110,7 +110,7 @@ static int hsdkv1_reset_probe(struct platform_device *pdev)
110 spin_lock_init(&rst->lock); 110 spin_lock_init(&rst->lock);
111 111
112 rst->rcdev.owner = THIS_MODULE; 112 rst->rcdev.owner = THIS_MODULE;
113 rst->rcdev.ops = &hsdkv1_reset_ops; 113 rst->rcdev.ops = &hsdk_reset_ops;
114 rst->rcdev.of_node = pdev->dev.of_node; 114 rst->rcdev.of_node = pdev->dev.of_node;
115 rst->rcdev.nr_resets = HSDK_MAX_RESETS; 115 rst->rcdev.nr_resets = HSDK_MAX_RESETS;
116 rst->rcdev.of_reset_n_cells = 1; 116 rst->rcdev.of_reset_n_cells = 1;
@@ -118,20 +118,20 @@ static int hsdkv1_reset_probe(struct platform_device *pdev)
118 return reset_controller_register(&rst->rcdev); 118 return reset_controller_register(&rst->rcdev);
119} 119}
120 120
121static const struct of_device_id hsdkv1_reset_dt_match[] = { 121static const struct of_device_id hsdk_reset_dt_match[] = {
122 { .compatible = "snps,hsdk-v1.0-reset" }, 122 { .compatible = "snps,hsdk-reset" },
123 { }, 123 { },
124}; 124};
125 125
126static struct platform_driver hsdkv1_reset_driver = { 126static struct platform_driver hsdk_reset_driver = {
127 .probe = hsdkv1_reset_probe, 127 .probe = hsdk_reset_probe,
128 .driver = { 128 .driver = {
129 .name = "hsdk-v1.0-reset", 129 .name = "hsdk-reset",
130 .of_match_table = hsdkv1_reset_dt_match, 130 .of_match_table = hsdk_reset_dt_match,
131 }, 131 },
132}; 132};
133builtin_platform_driver(hsdkv1_reset_driver); 133builtin_platform_driver(hsdk_reset_driver);
134 134
135MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>"); 135MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
136MODULE_DESCRIPTION("Synopsys HSDKv1 SDP reset driver"); 136MODULE_DESCRIPTION("Synopsys HSDK SDP reset driver");
137MODULE_LICENSE("GPL v2"); 137MODULE_LICENSE("GPL v2");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ea19b4ff87a2..29f35e29d480 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1644,7 +1644,9 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
1644 dasd_schedule_device_bh(device); 1644 dasd_schedule_device_bh(device);
1645 if (device->block) { 1645 if (device->block) {
1646 dasd_schedule_block_bh(device->block); 1646 dasd_schedule_block_bh(device->block);
1647 blk_mq_run_hw_queues(device->block->request_queue, true); 1647 if (device->block->request_queue)
1648 blk_mq_run_hw_queues(device->block->request_queue,
1649 true);
1648 } 1650 }
1649} 1651}
1650EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1652EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
@@ -3759,7 +3761,9 @@ int dasd_generic_path_operational(struct dasd_device *device)
3759 dasd_schedule_device_bh(device); 3761 dasd_schedule_device_bh(device);
3760 if (device->block) { 3762 if (device->block) {
3761 dasd_schedule_block_bh(device->block); 3763 dasd_schedule_block_bh(device->block);
3762 blk_mq_run_hw_queues(device->block->request_queue, true); 3764 if (device->block->request_queue)
3765 blk_mq_run_hw_queues(device->block->request_queue,
3766 true);
3763 } 3767 }
3764 3768
3765 if (!device->stopped) 3769 if (!device->stopped)
@@ -4025,7 +4029,9 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
4025 4029
4026 if (device->block) { 4030 if (device->block) {
4027 dasd_schedule_block_bh(device->block); 4031 dasd_schedule_block_bh(device->block);
4028 blk_mq_run_hw_queues(device->block->request_queue, true); 4032 if (device->block->request_queue)
4033 blk_mq_run_hw_queues(device->block->request_queue,
4034 true);
4029 } 4035 }
4030 4036
4031 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 4037 clear_bit(DASD_FLAG_SUSPENDED, &device->flags);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 2e7fd966c515..eb51893c74a4 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -249,7 +249,7 @@ static void scm_request_requeue(struct scm_request *scmrq)
249static void scm_request_finish(struct scm_request *scmrq) 249static void scm_request_finish(struct scm_request *scmrq)
250{ 250{
251 struct scm_blk_dev *bdev = scmrq->bdev; 251 struct scm_blk_dev *bdev = scmrq->bdev;
252 int *error; 252 blk_status_t *error;
253 int i; 253 int i;
254 254
255 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) { 255 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
@@ -415,7 +415,7 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
415 415
416static void scm_blk_request_done(struct request *req) 416static void scm_blk_request_done(struct request *req)
417{ 417{
418 int *error = blk_mq_rq_to_pdu(req); 418 blk_status_t *error = blk_mq_rq_to_pdu(req);
419 419
420 blk_mq_end_request(req, *error); 420 blk_mq_end_request(req, *error);
421} 421}
@@ -450,7 +450,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
450 atomic_set(&bdev->queued_reqs, 0); 450 atomic_set(&bdev->queued_reqs, 0);
451 451
452 bdev->tag_set.ops = &scm_mq_ops; 452 bdev->tag_set.ops = &scm_mq_ops;
453 bdev->tag_set.cmd_size = sizeof(int); 453 bdev->tag_set.cmd_size = sizeof(blk_status_t);
454 bdev->tag_set.nr_hw_queues = nr_requests; 454 bdev->tag_set.nr_hw_queues = nr_requests;
455 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests; 455 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
456 bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 456 bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 489b583f263d..e5c32f4b5287 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1225,10 +1225,16 @@ static int device_is_disconnected(struct ccw_device *cdev)
1225static int recovery_check(struct device *dev, void *data) 1225static int recovery_check(struct device *dev, void *data)
1226{ 1226{
1227 struct ccw_device *cdev = to_ccwdev(dev); 1227 struct ccw_device *cdev = to_ccwdev(dev);
1228 struct subchannel *sch;
1228 int *redo = data; 1229 int *redo = data;
1229 1230
1230 spin_lock_irq(cdev->ccwlock); 1231 spin_lock_irq(cdev->ccwlock);
1231 switch (cdev->private->state) { 1232 switch (cdev->private->state) {
1233 case DEV_STATE_ONLINE:
1234 sch = to_subchannel(cdev->dev.parent);
1235 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1236 break;
1237 /* fall through */
1232 case DEV_STATE_DISCONNECTED: 1238 case DEV_STATE_DISCONNECTED:
1233 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1239 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n",
1234 cdev->private->dev_id.ssid, 1240 cdev->private->dev_id.ssid,
@@ -1260,7 +1266,7 @@ static void recovery_work_func(struct work_struct *unused)
1260 } 1266 }
1261 spin_unlock_irq(&recovery_lock); 1267 spin_unlock_irq(&recovery_lock);
1262 } else 1268 } else
1263 CIO_MSG_EVENT(4, "recovery: end\n"); 1269 CIO_MSG_EVENT(3, "recovery: end\n");
1264} 1270}
1265 1271
1266static DECLARE_WORK(recovery_work, recovery_work_func); 1272static DECLARE_WORK(recovery_work, recovery_work_func);
@@ -1274,11 +1280,11 @@ static void recovery_func(unsigned long data)
1274 schedule_work(&recovery_work); 1280 schedule_work(&recovery_work);
1275} 1281}
1276 1282
1277static void ccw_device_schedule_recovery(void) 1283void ccw_device_schedule_recovery(void)
1278{ 1284{
1279 unsigned long flags; 1285 unsigned long flags;
1280 1286
1281 CIO_MSG_EVENT(4, "recovery: schedule\n"); 1287 CIO_MSG_EVENT(3, "recovery: schedule\n");
1282 spin_lock_irqsave(&recovery_lock, flags); 1288 spin_lock_irqsave(&recovery_lock, flags);
1283 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { 1289 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) {
1284 recovery_phase = 0; 1290 recovery_phase = 0;
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index ec497af99dd8..69cb70f080a5 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -134,6 +134,7 @@ void ccw_device_set_disconnected(struct ccw_device *cdev);
134void ccw_device_set_notoper(struct ccw_device *cdev); 134void ccw_device_set_notoper(struct ccw_device *cdev);
135 135
136void ccw_device_set_timeout(struct ccw_device *, int); 136void ccw_device_set_timeout(struct ccw_device *, int);
137void ccw_device_schedule_recovery(void);
137 138
138/* Channel measurement facility related */ 139/* Channel measurement facility related */
139void retry_set_schib(struct ccw_device *cdev); 140void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 12016e32e519..f98ea674c3d8 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -476,6 +476,17 @@ static void create_fake_irb(struct irb *irb, int type)
476 } 476 }
477} 477}
478 478
479static void ccw_device_handle_broken_paths(struct ccw_device *cdev)
480{
481 struct subchannel *sch = to_subchannel(cdev->dev.parent);
482 u8 broken_paths = (sch->schib.pmcw.pam & sch->opm) ^ sch->vpm;
483
484 if (broken_paths && (cdev->private->path_broken_mask != broken_paths))
485 ccw_device_schedule_recovery();
486
487 cdev->private->path_broken_mask = broken_paths;
488}
489
479void ccw_device_verify_done(struct ccw_device *cdev, int err) 490void ccw_device_verify_done(struct ccw_device *cdev, int err)
480{ 491{
481 struct subchannel *sch; 492 struct subchannel *sch;
@@ -508,6 +519,7 @@ callback:
508 memset(&cdev->private->irb, 0, sizeof(struct irb)); 519 memset(&cdev->private->irb, 0, sizeof(struct irb));
509 } 520 }
510 ccw_device_report_path_events(cdev); 521 ccw_device_report_path_events(cdev);
522 ccw_device_handle_broken_paths(cdev);
511 break; 523 break;
512 case -ETIME: 524 case -ETIME:
513 case -EUSERS: 525 case -EUSERS:
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 220f49145b2f..9a1b56b2df3e 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -131,6 +131,8 @@ struct ccw_device_private {
131 not operable */ 131 not operable */
132 u8 path_gone_mask; /* mask of paths, that became unavailable */ 132 u8 path_gone_mask; /* mask of paths, that became unavailable */
133 u8 path_new_mask; /* mask of paths, that became available */ 133 u8 path_new_mask; /* mask of paths, that became available */
134 u8 path_broken_mask; /* mask of paths, which were found to be
135 unusable */
134 struct { 136 struct {
135 unsigned int fast:1; /* post with "channel end" */ 137 unsigned int fast:1; /* post with "channel end" */
136 unsigned int repall:1; /* report every interrupt status */ 138 unsigned int repall:1; /* report every interrupt status */
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a64285ab0728..af3e4d3f9735 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -699,13 +699,13 @@ static void _aac_probe_container1(void * context, struct fib * fibptr)
699 int status; 699 int status;
700 700
701 dresp = (struct aac_mount *) fib_data(fibptr); 701 dresp = (struct aac_mount *) fib_data(fibptr);
702 if (!(fibptr->dev->supplement_adapter_info.supported_options2 & 702 if (!aac_supports_2T(fibptr->dev)) {
703 AAC_OPTION_VARIABLE_BLOCK_SIZE))
704 dresp->mnt[0].capacityhigh = 0; 703 dresp->mnt[0].capacityhigh = 0;
705 if ((le32_to_cpu(dresp->status) != ST_OK) || 704 if ((le32_to_cpu(dresp->status) == ST_OK) &&
706 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) { 705 (le32_to_cpu(dresp->mnt[0].vol) != CT_NONE)) {
707 _aac_probe_container2(context, fibptr); 706 _aac_probe_container2(context, fibptr);
708 return; 707 return;
708 }
709 } 709 }
710 scsicmd = (struct scsi_cmnd *) context; 710 scsicmd = (struct scsi_cmnd *) context;
711 711
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 92fabf2b0c24..403a639574e5 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2701,6 +2701,11 @@ static inline int aac_is_src(struct aac_dev *dev)
2701 return 0; 2701 return 0;
2702} 2702}
2703 2703
2704static inline int aac_supports_2T(struct aac_dev *dev)
2705{
2706 return (dev->adapter_info.options & AAC_OPT_NEW_COMM_64);
2707}
2708
2704char * get_container_type(unsigned type); 2709char * get_container_type(unsigned type);
2705extern int numacb; 2710extern int numacb;
2706extern char aac_driver_version[]; 2711extern char aac_driver_version[];
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 87cc4a93e637..62beb2596466 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -906,12 +906,14 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
906 906
907 bus = aac_logical_to_phys(scmd_channel(cmd)); 907 bus = aac_logical_to_phys(scmd_channel(cmd));
908 cid = scmd_id(cmd); 908 cid = scmd_id(cmd);
909 info = &aac->hba_map[bus][cid]; 909
910 if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || 910 if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
911 info->devtype != AAC_DEVTYPE_NATIVE_RAW)
912 return FAILED; 911 return FAILED;
913 912
914 if (info->reset_state > 0) 913 info = &aac->hba_map[bus][cid];
914
915 if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
916 info->reset_state > 0)
915 return FAILED; 917 return FAILED;
916 918
917 pr_err("%s: Host adapter reset request. SCSI hang ?\n", 919 pr_err("%s: Host adapter reset request. SCSI hang ?\n",
@@ -962,12 +964,14 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
962 964
963 bus = aac_logical_to_phys(scmd_channel(cmd)); 965 bus = aac_logical_to_phys(scmd_channel(cmd));
964 cid = scmd_id(cmd); 966 cid = scmd_id(cmd);
965 info = &aac->hba_map[bus][cid]; 967
966 if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS || 968 if (bus >= AAC_MAX_BUSES || cid >= AAC_MAX_TARGETS)
967 info->devtype != AAC_DEVTYPE_NATIVE_RAW)
968 return FAILED; 969 return FAILED;
969 970
970 if (info->reset_state > 0) 971 info = &aac->hba_map[bus][cid];
972
973 if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
974 info->reset_state > 0)
971 return FAILED; 975 return FAILED;
972 976
973 pr_err("%s: Host adapter reset request. SCSI hang ?\n", 977 pr_err("%s: Host adapter reset request. SCSI hang ?\n",
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 48c2b2b34b72..0c9361c87ec8 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -740,6 +740,8 @@ static void aac_send_iop_reset(struct aac_dev *dev)
740 aac_set_intx_mode(dev); 740 aac_set_intx_mode(dev);
741 741
742 src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK); 742 src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
743
744 msleep(5000);
743} 745}
744 746
745static void aac_send_hardware_soft_reset(struct aac_dev *dev) 747static void aac_send_hardware_soft_reset(struct aac_dev *dev)
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 690816f3c6af..421fe869a11e 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2725,9 +2725,9 @@ int acornscsi_abort(struct scsi_cmnd *SCpnt)
2725 * Params : SCpnt - command causing reset 2725 * Params : SCpnt - command causing reset
2726 * Returns : one of SCSI_RESET_ macros 2726 * Returns : one of SCSI_RESET_ macros
2727 */ 2727 */
2728int acornscsi_host_reset(struct Scsi_Host *shpnt) 2728int acornscsi_host_reset(struct scsi_cmnd *SCpnt)
2729{ 2729{
2730 AS_Host *host = (AS_Host *)shpnt->hostdata; 2730 AS_Host *host = (AS_Host *)SCpnt->device->host->hostdata;
2731 struct scsi_cmnd *SCptr; 2731 struct scsi_cmnd *SCptr;
2732 2732
2733 host->stats.resets += 1; 2733 host->stats.resets += 1;
@@ -2741,7 +2741,7 @@ int acornscsi_host_reset(struct Scsi_Host *shpnt)
2741 2741
2742 printk(KERN_WARNING "acornscsi_reset: "); 2742 printk(KERN_WARNING "acornscsi_reset: ");
2743 print_sbic_status(asr, ssr, host->scsi.phase); 2743 print_sbic_status(asr, ssr, host->scsi.phase);
2744 for (devidx = 0; devidx < 9; devidx ++) { 2744 for (devidx = 0; devidx < 9; devidx++)
2745 acornscsi_dumplog(host, devidx); 2745 acornscsi_dumplog(host, devidx);
2746 } 2746 }
2747#endif 2747#endif
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 785fb42f6650..2799a6b08f73 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3767,7 +3767,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
3767 */ 3767 */
3768 if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { 3768 if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
3769 pr_err("write_pending failed since: %d\n", vscsi->flags); 3769 pr_err("write_pending failed since: %d\n", vscsi->flags);
3770 return 0; 3770 return -EIO;
3771 } 3771 }
3772 3772
3773 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 3773 rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index bd4605a34f54..c62e8d111fd9 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2851,9 +2851,6 @@ EXPORT_SYMBOL_GPL(iscsi_session_setup);
2851/** 2851/**
2852 * iscsi_session_teardown - destroy session, host, and cls_session 2852 * iscsi_session_teardown - destroy session, host, and cls_session
2853 * @cls_session: iscsi session 2853 * @cls_session: iscsi session
2854 *
2855 * The driver must have called iscsi_remove_session before
2856 * calling this.
2857 */ 2854 */
2858void iscsi_session_teardown(struct iscsi_cls_session *cls_session) 2855void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2859{ 2856{
@@ -2863,6 +2860,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2863 2860
2864 iscsi_pool_free(&session->cmdpool); 2861 iscsi_pool_free(&session->cmdpool);
2865 2862
2863 iscsi_remove_session(cls_session);
2864
2866 kfree(session->password); 2865 kfree(session->password);
2867 kfree(session->password_in); 2866 kfree(session->password_in);
2868 kfree(session->username); 2867 kfree(session->username);
@@ -2877,7 +2876,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
2877 kfree(session->portal_type); 2876 kfree(session->portal_type);
2878 kfree(session->discovery_parent_type); 2877 kfree(session->discovery_parent_type);
2879 2878
2880 iscsi_destroy_session(cls_session); 2879 iscsi_free_session(cls_session);
2880
2881 iscsi_host_dec_session_cnt(shost); 2881 iscsi_host_dec_session_cnt(shost);
2882 module_put(owner); 2882 module_put(owner);
2883} 2883}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7e7ae786121b..100bc4c8798d 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -6131,6 +6131,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
6131 "Extents and RPI headers enabled.\n"); 6131 "Extents and RPI headers enabled.\n");
6132 } 6132 }
6133 mempool_free(mboxq, phba->mbox_mem_pool); 6133 mempool_free(mboxq, phba->mbox_mem_pool);
6134 rc = -EIO;
6134 goto out_free_bsmbx; 6135 goto out_free_bsmbx;
6135 } 6136 }
6136 6137
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 79ba3ce063a4..23bdb1ca106e 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -884,7 +884,7 @@ out_err:
884 wcqe->total_data_placed); 884 wcqe->total_data_placed);
885 nCmd->transferred_length = 0; 885 nCmd->transferred_length = 0;
886 nCmd->rcv_rsplen = 0; 886 nCmd->rcv_rsplen = 0;
887 nCmd->status = NVME_SC_FC_TRANSPORT_ERROR; 887 nCmd->status = NVME_SC_INTERNAL;
888 } 888 }
889 } 889 }
890 890
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 1f59e7a74c7b..6b33a1f24f56 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -180,7 +180,7 @@ static void qla_nvme_sp_done(void *ptr, int res)
180 goto rel; 180 goto rel;
181 181
182 if (unlikely(res == QLA_FUNCTION_FAILED)) 182 if (unlikely(res == QLA_FUNCTION_FAILED))
183 fd->status = NVME_SC_FC_TRANSPORT_ERROR; 183 fd->status = NVME_SC_INTERNAL;
184 else 184 else
185 fd->status = 0; 185 fd->status = 0;
186 186
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 38942050b265..dab876c65473 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -580,7 +580,8 @@ int scsi_check_sense(struct scsi_cmnd *scmd)
580 if (sshdr.asc == 0x20 || /* Invalid command operation code */ 580 if (sshdr.asc == 0x20 || /* Invalid command operation code */
581 sshdr.asc == 0x21 || /* Logical block address out of range */ 581 sshdr.asc == 0x21 || /* Logical block address out of range */
582 sshdr.asc == 0x24 || /* Invalid field in cdb */ 582 sshdr.asc == 0x24 || /* Invalid field in cdb */
583 sshdr.asc == 0x26) { /* Parameter value invalid */ 583 sshdr.asc == 0x26 || /* Parameter value invalid */
584 sshdr.asc == 0x27) { /* Write protected */
584 set_host_byte(scmd, DID_TARGET_FAILURE); 585 set_host_byte(scmd, DID_TARGET_FAILURE);
585 } 586 }
586 return SUCCESS; 587 return SUCCESS;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index e7818afeda2b..15590a063ad9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -956,6 +956,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
956 if (*bflags & BLIST_NO_DIF) 956 if (*bflags & BLIST_NO_DIF)
957 sdev->no_dif = 1; 957 sdev->no_dif = 1;
958 958
959 if (*bflags & BLIST_UNMAP_LIMIT_WS)
960 sdev->unmap_limit_for_ws = 1;
961
959 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT; 962 sdev->eh_timeout = SCSI_DEFAULT_EH_TIMEOUT;
960 963
961 if (*bflags & BLIST_TRY_VPD_PAGES) 964 if (*bflags & BLIST_TRY_VPD_PAGES)
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 3c6bc0081fcb..cbd4495d0ff9 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2739,7 +2739,8 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2739 2739
2740 list_for_each_entry(rport, &fc_host->rports, peers) { 2740 list_for_each_entry(rport, &fc_host->rports, peers) {
2741 2741
2742 if ((rport->port_state == FC_PORTSTATE_BLOCKED) && 2742 if ((rport->port_state == FC_PORTSTATE_BLOCKED ||
2743 rport->port_state == FC_PORTSTATE_NOTPRESENT) &&
2743 (rport->channel == channel)) { 2744 (rport->channel == channel)) {
2744 2745
2745 switch (fc_host->tgtid_bind_type) { 2746 switch (fc_host->tgtid_bind_type) {
@@ -2876,7 +2877,6 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2876 memcpy(&rport->port_name, &ids->port_name, 2877 memcpy(&rport->port_name, &ids->port_name,
2877 sizeof(rport->port_name)); 2878 sizeof(rport->port_name));
2878 rport->port_id = ids->port_id; 2879 rport->port_id = ids->port_id;
2879 rport->roles = ids->roles;
2880 rport->port_state = FC_PORTSTATE_ONLINE; 2880 rport->port_state = FC_PORTSTATE_ONLINE;
2881 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT; 2881 rport->flags &= ~FC_RPORT_FAST_FAIL_TIMEDOUT;
2882 2882
@@ -2885,15 +2885,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2885 fci->f->dd_fcrport_size); 2885 fci->f->dd_fcrport_size);
2886 spin_unlock_irqrestore(shost->host_lock, flags); 2886 spin_unlock_irqrestore(shost->host_lock, flags);
2887 2887
2888 if (ids->roles & FC_PORT_ROLE_FCP_TARGET) { 2888 fc_remote_port_rolechg(rport, ids->roles);
2889 scsi_target_unblock(&rport->dev, SDEV_RUNNING);
2890
2891 /* initiate a scan of the target */
2892 spin_lock_irqsave(shost->host_lock, flags);
2893 rport->flags |= FC_RPORT_SCAN_PENDING;
2894 scsi_queue_work(shost, &rport->scan_work);
2895 spin_unlock_irqrestore(shost->host_lock, flags);
2896 }
2897 return rport; 2889 return rport;
2898 } 2890 }
2899 } 2891 }
@@ -3571,7 +3563,7 @@ fc_vport_sched_delete(struct work_struct *work)
3571static enum blk_eh_timer_return 3563static enum blk_eh_timer_return
3572fc_bsg_job_timeout(struct request *req) 3564fc_bsg_job_timeout(struct request *req)
3573{ 3565{
3574 struct bsg_job *job = (void *) req->special; 3566 struct bsg_job *job = blk_mq_rq_to_pdu(req);
3575 struct Scsi_Host *shost = fc_bsg_to_shost(job); 3567 struct Scsi_Host *shost = fc_bsg_to_shost(job);
3576 struct fc_rport *rport = fc_bsg_to_rport(job); 3568 struct fc_rport *rport = fc_bsg_to_rport(job);
3577 struct fc_internal *i = to_fc_internal(shost->transportt); 3569 struct fc_internal *i = to_fc_internal(shost->transportt);
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 8934f19bce8e..7404d26895f5 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2211,22 +2211,6 @@ void iscsi_free_session(struct iscsi_cls_session *session)
2211EXPORT_SYMBOL_GPL(iscsi_free_session); 2211EXPORT_SYMBOL_GPL(iscsi_free_session);
2212 2212
2213/** 2213/**
2214 * iscsi_destroy_session - destroy iscsi session
2215 * @session: iscsi_session
2216 *
2217 * Can be called by a LLD or iscsi_transport. There must not be
2218 * any running connections.
2219 */
2220int iscsi_destroy_session(struct iscsi_cls_session *session)
2221{
2222 iscsi_remove_session(session);
2223 ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
2224 iscsi_free_session(session);
2225 return 0;
2226}
2227EXPORT_SYMBOL_GPL(iscsi_destroy_session);
2228
2229/**
2230 * iscsi_create_conn - create iscsi class connection 2214 * iscsi_create_conn - create iscsi class connection
2231 * @session: iscsi cls session 2215 * @session: iscsi cls session
2232 * @dd_size: private driver data size 2216 * @dd_size: private driver data size
@@ -3689,7 +3673,7 @@ iscsi_if_rx(struct sk_buff *skb)
3689 uint32_t group; 3673 uint32_t group;
3690 3674
3691 nlh = nlmsg_hdr(skb); 3675 nlh = nlmsg_hdr(skb);
3692 if (nlh->nlmsg_len < sizeof(*nlh) || 3676 if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) ||
3693 skb->len < nlh->nlmsg_len) { 3677 skb->len < nlh->nlmsg_len) {
3694 break; 3678 break;
3695 } 3679 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 11c1738c2100..d175c5c5ccf8 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -715,13 +715,21 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
715 break; 715 break;
716 716
717 case SD_LBP_WS16: 717 case SD_LBP_WS16:
718 max_blocks = min_not_zero(sdkp->max_ws_blocks, 718 if (sdkp->device->unmap_limit_for_ws)
719 (u32)SD_MAX_WS16_BLOCKS); 719 max_blocks = sdkp->max_unmap_blocks;
720 else
721 max_blocks = sdkp->max_ws_blocks;
722
723 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
720 break; 724 break;
721 725
722 case SD_LBP_WS10: 726 case SD_LBP_WS10:
723 max_blocks = min_not_zero(sdkp->max_ws_blocks, 727 if (sdkp->device->unmap_limit_for_ws)
724 (u32)SD_MAX_WS10_BLOCKS); 728 max_blocks = sdkp->max_unmap_blocks;
729 else
730 max_blocks = sdkp->max_ws_blocks;
731
732 max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
725 break; 733 break;
726 734
727 case SD_LBP_ZERO: 735 case SD_LBP_ZERO:
@@ -2915,8 +2923,6 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
2915 sd_config_discard(sdkp, SD_LBP_WS16); 2923 sd_config_discard(sdkp, SD_LBP_WS16);
2916 else if (sdkp->lbpws10) 2924 else if (sdkp->lbpws10)
2917 sd_config_discard(sdkp, SD_LBP_WS10); 2925 sd_config_discard(sdkp, SD_LBP_WS10);
2918 else if (sdkp->lbpu && sdkp->max_unmap_blocks)
2919 sd_config_discard(sdkp, SD_LBP_UNMAP);
2920 else 2926 else
2921 sd_config_discard(sdkp, SD_LBP_DISABLE); 2927 sd_config_discard(sdkp, SD_LBP_DISABLE);
2922 } 2928 }
@@ -3101,8 +3107,6 @@ static int sd_revalidate_disk(struct gendisk *disk)
3101 sd_read_security(sdkp, buffer); 3107 sd_read_security(sdkp, buffer);
3102 } 3108 }
3103 3109
3104 sdkp->first_scan = 0;
3105
3106 /* 3110 /*
3107 * We now have all cache related info, determine how we deal 3111 * We now have all cache related info, determine how we deal
3108 * with flush requests. 3112 * with flush requests.
@@ -3117,7 +3121,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
3117 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); 3121 q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);
3118 3122
3119 /* 3123 /*
3120 * Use the device's preferred I/O size for reads and writes 3124 * Determine the device's preferred I/O size for reads and writes
3121 * unless the reported value is unreasonably small, large, or 3125 * unless the reported value is unreasonably small, large, or
3122 * garbage. 3126 * garbage.
3123 */ 3127 */
@@ -3131,8 +3135,19 @@ static int sd_revalidate_disk(struct gendisk *disk)
3131 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max), 3135 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
3132 (sector_t)BLK_DEF_MAX_SECTORS); 3136 (sector_t)BLK_DEF_MAX_SECTORS);
3133 3137
3134 /* Combine with controller limits */ 3138 /* Do not exceed controller limit */
3135 q->limits.max_sectors = min(rw_max, queue_max_hw_sectors(q)); 3139 rw_max = min(rw_max, queue_max_hw_sectors(q));
3140
3141 /*
3142 * Only update max_sectors if previously unset or if the current value
3143 * exceeds the capabilities of the hardware.
3144 */
3145 if (sdkp->first_scan ||
3146 q->limits.max_sectors > q->limits.max_dev_sectors ||
3147 q->limits.max_sectors > q->limits.max_hw_sectors)
3148 q->limits.max_sectors = rw_max;
3149
3150 sdkp->first_scan = 0;
3136 3151
3137 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity)); 3152 set_capacity(disk, logical_to_sectors(sdp, sdkp->capacity));
3138 sd_config_write_same(sdkp); 3153 sd_config_write_same(sdkp);
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index cf0e71db9e51..0419c2298eab 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -828,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q)
828 return max_sectors << 9; 828 return max_sectors << 9;
829} 829}
830 830
831static void
832sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
833{
834 Sg_request *srp;
835 int val;
836 unsigned int ms;
837
838 val = 0;
839 list_for_each_entry(srp, &sfp->rq_list, entry) {
840 if (val > SG_MAX_QUEUE)
841 break;
842 rinfo[val].req_state = srp->done + 1;
843 rinfo[val].problem =
844 srp->header.masked_status &
845 srp->header.host_status &
846 srp->header.driver_status;
847 if (srp->done)
848 rinfo[val].duration =
849 srp->header.duration;
850 else {
851 ms = jiffies_to_msecs(jiffies);
852 rinfo[val].duration =
853 (ms > srp->header.duration) ?
854 (ms - srp->header.duration) : 0;
855 }
856 rinfo[val].orphan = srp->orphan;
857 rinfo[val].sg_io_owned = srp->sg_io_owned;
858 rinfo[val].pack_id = srp->header.pack_id;
859 rinfo[val].usr_ptr = srp->header.usr_ptr;
860 val++;
861 }
862}
863
831static long 864static long
832sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) 865sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
833{ 866{
@@ -1012,38 +1045,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
1012 return -EFAULT; 1045 return -EFAULT;
1013 else { 1046 else {
1014 sg_req_info_t *rinfo; 1047 sg_req_info_t *rinfo;
1015 unsigned int ms;
1016 1048
1017 rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, 1049 rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
1018 GFP_KERNEL); 1050 GFP_KERNEL);
1019 if (!rinfo) 1051 if (!rinfo)
1020 return -ENOMEM; 1052 return -ENOMEM;
1021 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1053 read_lock_irqsave(&sfp->rq_list_lock, iflags);
1022 val = 0; 1054 sg_fill_request_table(sfp, rinfo);
1023 list_for_each_entry(srp, &sfp->rq_list, entry) {
1024 if (val >= SG_MAX_QUEUE)
1025 break;
1026 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
1027 rinfo[val].req_state = srp->done + 1;
1028 rinfo[val].problem =
1029 srp->header.masked_status &
1030 srp->header.host_status &
1031 srp->header.driver_status;
1032 if (srp->done)
1033 rinfo[val].duration =
1034 srp->header.duration;
1035 else {
1036 ms = jiffies_to_msecs(jiffies);
1037 rinfo[val].duration =
1038 (ms > srp->header.duration) ?
1039 (ms - srp->header.duration) : 0;
1040 }
1041 rinfo[val].orphan = srp->orphan;
1042 rinfo[val].sg_io_owned = srp->sg_io_owned;
1043 rinfo[val].pack_id = srp->header.pack_id;
1044 rinfo[val].usr_ptr = srp->header.usr_ptr;
1045 val++;
1046 }
1047 read_unlock_irqrestore(&sfp->rq_list_lock, iflags); 1055 read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
1048 result = __copy_to_user(p, rinfo, 1056 result = __copy_to_user(p, rinfo,
1049 SZ_SG_REQ_INFO * SG_MAX_QUEUE); 1057 SZ_SG_REQ_INFO * SG_MAX_QUEUE);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 1c0c9553bc05..7dd38047ba23 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -246,11 +246,11 @@ struct mxser_port {
246 unsigned char err_shadow; 246 unsigned char err_shadow;
247 247
248 struct async_icount icount; /* kernel counters for 4 input interrupts */ 248 struct async_icount icount; /* kernel counters for 4 input interrupts */
249 int timeout; 249 unsigned int timeout;
250 250
251 int read_status_mask; 251 int read_status_mask;
252 int ignore_status_mask; 252 int ignore_status_mask;
253 int xmit_fifo_size; 253 unsigned int xmit_fifo_size;
254 int xmit_head; 254 int xmit_head;
255 int xmit_tail; 255 int xmit_tail;
256 int xmit_cnt; 256 int xmit_cnt;
@@ -572,8 +572,9 @@ static void mxser_dtr_rts(struct tty_port *port, int on)
572static int mxser_set_baud(struct tty_struct *tty, long newspd) 572static int mxser_set_baud(struct tty_struct *tty, long newspd)
573{ 573{
574 struct mxser_port *info = tty->driver_data; 574 struct mxser_port *info = tty->driver_data;
575 int quot = 0, baud; 575 unsigned int quot = 0, baud;
576 unsigned char cval; 576 unsigned char cval;
577 u64 timeout;
577 578
578 if (!info->ioaddr) 579 if (!info->ioaddr)
579 return -1; 580 return -1;
@@ -594,8 +595,13 @@ static int mxser_set_baud(struct tty_struct *tty, long newspd)
594 quot = 0; 595 quot = 0;
595 } 596 }
596 597
597 info->timeout = ((info->xmit_fifo_size * HZ * 10 * quot) / info->baud_base); 598 /*
598 info->timeout += HZ / 50; /* Add .02 seconds of slop */ 599 * worst case (128 * 1000 * 10 * 18432) needs 35 bits, so divide in the
600 * u64 domain
601 */
602 timeout = (u64)info->xmit_fifo_size * HZ * 10 * quot;
603 do_div(timeout, info->baud_base);
604 info->timeout = timeout + HZ / 50; /* Add .02 seconds of slop */
599 605
600 if (quot) { 606 if (quot) {
601 info->MCR |= UART_MCR_DTR; 607 info->MCR |= UART_MCR_DTR;
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 583c9a0c7ecc..8c48c3784831 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -507,9 +507,14 @@ static void bcm_uart_set_termios(struct uart_port *port,
507{ 507{
508 unsigned int ctl, baud, quot, ier; 508 unsigned int ctl, baud, quot, ier;
509 unsigned long flags; 509 unsigned long flags;
510 int tries;
510 511
511 spin_lock_irqsave(&port->lock, flags); 512 spin_lock_irqsave(&port->lock, flags);
512 513
514 /* Drain the hot tub fully before we power it off for the winter. */
515 for (tries = 3; !bcm_uart_tx_empty(port) && tries; tries--)
516 mdelay(10);
517
513 /* disable uart while changing speed */ 518 /* disable uart while changing speed */
514 bcm_uart_disable(port); 519 bcm_uart_disable(port);
515 bcm_uart_flush(port); 520 bcm_uart_flush(port);
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 849c1f9991ce..f0252184291e 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1276,7 +1276,6 @@ static void rx_dma_timer_init(struct lpuart_port *sport)
1276static int lpuart_startup(struct uart_port *port) 1276static int lpuart_startup(struct uart_port *port)
1277{ 1277{
1278 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); 1278 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
1279 int ret;
1280 unsigned long flags; 1279 unsigned long flags;
1281 unsigned char temp; 1280 unsigned char temp;
1282 1281
@@ -1291,11 +1290,6 @@ static int lpuart_startup(struct uart_port *port)
1291 sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) & 1290 sport->rxfifo_size = 0x1 << (((temp >> UARTPFIFO_RXSIZE_OFF) &
1292 UARTPFIFO_FIFOSIZE_MASK) + 1); 1291 UARTPFIFO_FIFOSIZE_MASK) + 1);
1293 1292
1294 ret = devm_request_irq(port->dev, port->irq, lpuart_int, 0,
1295 DRIVER_NAME, sport);
1296 if (ret)
1297 return ret;
1298
1299 spin_lock_irqsave(&sport->port.lock, flags); 1293 spin_lock_irqsave(&sport->port.lock, flags);
1300 1294
1301 lpuart_setup_watermark(sport); 1295 lpuart_setup_watermark(sport);
@@ -1333,7 +1327,6 @@ static int lpuart_startup(struct uart_port *port)
1333static int lpuart32_startup(struct uart_port *port) 1327static int lpuart32_startup(struct uart_port *port)
1334{ 1328{
1335 struct lpuart_port *sport = container_of(port, struct lpuart_port, port); 1329 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
1336 int ret;
1337 unsigned long flags; 1330 unsigned long flags;
1338 unsigned long temp; 1331 unsigned long temp;
1339 1332
@@ -1346,11 +1339,6 @@ static int lpuart32_startup(struct uart_port *port)
1346 sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) & 1339 sport->rxfifo_size = 0x1 << (((temp >> UARTFIFO_RXSIZE_OFF) &
1347 UARTFIFO_FIFOSIZE_MASK) - 1); 1340 UARTFIFO_FIFOSIZE_MASK) - 1);
1348 1341
1349 ret = devm_request_irq(port->dev, port->irq, lpuart32_int, 0,
1350 DRIVER_NAME, sport);
1351 if (ret)
1352 return ret;
1353
1354 spin_lock_irqsave(&sport->port.lock, flags); 1342 spin_lock_irqsave(&sport->port.lock, flags);
1355 1343
1356 lpuart32_setup_watermark(sport); 1344 lpuart32_setup_watermark(sport);
@@ -1380,8 +1368,6 @@ static void lpuart_shutdown(struct uart_port *port)
1380 1368
1381 spin_unlock_irqrestore(&port->lock, flags); 1369 spin_unlock_irqrestore(&port->lock, flags);
1382 1370
1383 devm_free_irq(port->dev, port->irq, sport);
1384
1385 if (sport->lpuart_dma_rx_use) { 1371 if (sport->lpuart_dma_rx_use) {
1386 del_timer_sync(&sport->lpuart_timer); 1372 del_timer_sync(&sport->lpuart_timer);
1387 lpuart_dma_rx_free(&sport->port); 1373 lpuart_dma_rx_free(&sport->port);
@@ -1400,7 +1386,6 @@ static void lpuart_shutdown(struct uart_port *port)
1400 1386
1401static void lpuart32_shutdown(struct uart_port *port) 1387static void lpuart32_shutdown(struct uart_port *port)
1402{ 1388{
1403 struct lpuart_port *sport = container_of(port, struct lpuart_port, port);
1404 unsigned long temp; 1389 unsigned long temp;
1405 unsigned long flags; 1390 unsigned long flags;
1406 1391
@@ -1413,8 +1398,6 @@ static void lpuart32_shutdown(struct uart_port *port)
1413 lpuart32_write(port, temp, UARTCTRL); 1398 lpuart32_write(port, temp, UARTCTRL);
1414 1399
1415 spin_unlock_irqrestore(&port->lock, flags); 1400 spin_unlock_irqrestore(&port->lock, flags);
1416
1417 devm_free_irq(port->dev, port->irq, sport);
1418} 1401}
1419 1402
1420static void 1403static void
@@ -2212,16 +2195,22 @@ static int lpuart_probe(struct platform_device *pdev)
2212 2195
2213 platform_set_drvdata(pdev, &sport->port); 2196 platform_set_drvdata(pdev, &sport->port);
2214 2197
2215 if (lpuart_is_32(sport)) 2198 if (lpuart_is_32(sport)) {
2216 lpuart_reg.cons = LPUART32_CONSOLE; 2199 lpuart_reg.cons = LPUART32_CONSOLE;
2217 else 2200 ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart32_int, 0,
2201 DRIVER_NAME, sport);
2202 } else {
2218 lpuart_reg.cons = LPUART_CONSOLE; 2203 lpuart_reg.cons = LPUART_CONSOLE;
2204 ret = devm_request_irq(&pdev->dev, sport->port.irq, lpuart_int, 0,
2205 DRIVER_NAME, sport);
2206 }
2207
2208 if (ret)
2209 goto failed_irq_request;
2219 2210
2220 ret = uart_add_one_port(&lpuart_reg, &sport->port); 2211 ret = uart_add_one_port(&lpuart_reg, &sport->port);
2221 if (ret) { 2212 if (ret)
2222 clk_disable_unprepare(sport->clk); 2213 goto failed_attach_port;
2223 return ret;
2224 }
2225 2214
2226 sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx"); 2215 sport->dma_tx_chan = dma_request_slave_channel(sport->port.dev, "tx");
2227 if (!sport->dma_tx_chan) 2216 if (!sport->dma_tx_chan)
@@ -2240,6 +2229,11 @@ static int lpuart_probe(struct platform_device *pdev)
2240 } 2229 }
2241 2230
2242 return 0; 2231 return 0;
2232
2233failed_attach_port:
2234failed_irq_request:
2235 clk_disable_unprepare(sport->clk);
2236 return ret;
2243} 2237}
2244 2238
2245static int lpuart_remove(struct platform_device *pdev) 2239static int lpuart_remove(struct platform_device *pdev)
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index cdd2f942317c..b9c7a904c1ea 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -889,7 +889,16 @@ static int sccnxp_probe(struct platform_device *pdev)
889 goto err_out; 889 goto err_out;
890 uartclk = 0; 890 uartclk = 0;
891 } else { 891 } else {
892 clk_prepare_enable(clk); 892 ret = clk_prepare_enable(clk);
893 if (ret)
894 goto err_out;
895
896 ret = devm_add_action_or_reset(&pdev->dev,
897 (void(*)(void *))clk_disable_unprepare,
898 clk);
899 if (ret)
900 goto err_out;
901
893 uartclk = clk_get_rate(clk); 902 uartclk = clk_get_rate(clk);
894 } 903 }
895 904
@@ -988,7 +997,7 @@ static int sccnxp_probe(struct platform_device *pdev)
988 uart_unregister_driver(&s->uart); 997 uart_unregister_driver(&s->uart);
989err_out: 998err_out:
990 if (!IS_ERR(s->regulator)) 999 if (!IS_ERR(s->regulator))
991 return regulator_disable(s->regulator); 1000 regulator_disable(s->regulator);
992 1001
993 return ret; 1002 return ret;
994} 1003}
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 5aacea1978a5..3e865dbf878c 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -190,8 +190,10 @@ static void wdm_in_callback(struct urb *urb)
190 /* 190 /*
191 * only set a new error if there is no previous error. 191 * only set a new error if there is no previous error.
192 * Errors are only cleared during read/open 192 * Errors are only cleared during read/open
193 * Avoid propagating -EPIPE (stall) to userspace since it is
194 * better handled as an empty read
193 */ 195 */
194 if (desc->rerr == 0) 196 if (desc->rerr == 0 && status != -EPIPE)
195 desc->rerr = status; 197 desc->rerr = status;
196 198
197 if (length + desc->length > desc->wMaxCommand) { 199 if (length + desc->length > desc->wMaxCommand) {
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 4be52c602e9b..68b54bd88d1e 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -643,15 +643,23 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
643 643
644 } else if (header->bDescriptorType == 644 } else if (header->bDescriptorType ==
645 USB_DT_INTERFACE_ASSOCIATION) { 645 USB_DT_INTERFACE_ASSOCIATION) {
646 struct usb_interface_assoc_descriptor *d;
647
648 d = (struct usb_interface_assoc_descriptor *)header;
649 if (d->bLength < USB_DT_INTERFACE_ASSOCIATION_SIZE) {
650 dev_warn(ddev,
651 "config %d has an invalid interface association descriptor of length %d, skipping\n",
652 cfgno, d->bLength);
653 continue;
654 }
655
646 if (iad_num == USB_MAXIADS) { 656 if (iad_num == USB_MAXIADS) {
647 dev_warn(ddev, "found more Interface " 657 dev_warn(ddev, "found more Interface "
648 "Association Descriptors " 658 "Association Descriptors "
649 "than allocated for in " 659 "than allocated for in "
650 "configuration %d\n", cfgno); 660 "configuration %d\n", cfgno);
651 } else { 661 } else {
652 config->intf_assoc[iad_num] = 662 config->intf_assoc[iad_num] = d;
653 (struct usb_interface_assoc_descriptor
654 *)header;
655 iad_num++; 663 iad_num++;
656 } 664 }
657 665
@@ -852,7 +860,7 @@ int usb_get_configuration(struct usb_device *dev)
852 } 860 }
853 861
854 if (dev->quirks & USB_QUIRK_DELAY_INIT) 862 if (dev->quirks & USB_QUIRK_DELAY_INIT)
855 msleep(100); 863 msleep(200);
856 864
857 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, 865 result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
858 bigbuffer, length); 866 bigbuffer, length);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 318bb3b96687..4664e543cf2f 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -140,6 +140,9 @@ module_param(usbfs_memory_mb, uint, 0644);
140MODULE_PARM_DESC(usbfs_memory_mb, 140MODULE_PARM_DESC(usbfs_memory_mb,
141 "maximum MB allowed for usbfs buffers (0 = no limit)"); 141 "maximum MB allowed for usbfs buffers (0 = no limit)");
142 142
143/* Hard limit, necessary to avoid arithmetic overflow */
144#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
145
143static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */ 146static atomic64_t usbfs_memory_usage; /* Total memory currently allocated */
144 147
145/* Check whether it's okay to allocate more memory for a transfer */ 148/* Check whether it's okay to allocate more memory for a transfer */
@@ -1460,6 +1463,8 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1460 USBDEVFS_URB_ZERO_PACKET | 1463 USBDEVFS_URB_ZERO_PACKET |
1461 USBDEVFS_URB_NO_INTERRUPT)) 1464 USBDEVFS_URB_NO_INTERRUPT))
1462 return -EINVAL; 1465 return -EINVAL;
1466 if ((unsigned int)uurb->buffer_length >= USBFS_XFER_MAX)
1467 return -EINVAL;
1463 if (uurb->buffer_length > 0 && !uurb->buffer) 1468 if (uurb->buffer_length > 0 && !uurb->buffer)
1464 return -EINVAL; 1469 return -EINVAL;
1465 if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL && 1470 if (!(uurb->type == USBDEVFS_URB_TYPE_CONTROL &&
@@ -1571,7 +1576,11 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
1571 totlen += isopkt[u].length; 1576 totlen += isopkt[u].length;
1572 } 1577 }
1573 u *= sizeof(struct usb_iso_packet_descriptor); 1578 u *= sizeof(struct usb_iso_packet_descriptor);
1574 uurb->buffer_length = totlen; 1579 if (totlen <= uurb->buffer_length)
1580 uurb->buffer_length = totlen;
1581 else
1582 WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d",
1583 totlen, uurb->buffer_length);
1575 break; 1584 break;
1576 1585
1577 default: 1586 default:
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 41eaf0b52518..b5c733613823 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4838,7 +4838,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4838 goto loop; 4838 goto loop;
4839 4839
4840 if (udev->quirks & USB_QUIRK_DELAY_INIT) 4840 if (udev->quirks & USB_QUIRK_DELAY_INIT)
4841 msleep(1000); 4841 msleep(2000);
4842 4842
4843 /* consecutive bus-powered hubs aren't reliable; they can 4843 /* consecutive bus-powered hubs aren't reliable; they can
4844 * violate the voltage drop budget. if the new child has 4844 * violate the voltage drop budget. if the new child has
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 4c38ea41ae96..371a07d874a3 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2069,6 +2069,10 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
2069 elength = 1; 2069 elength = 1;
2070 goto next_desc; 2070 goto next_desc;
2071 } 2071 }
2072 if ((buflen < elength) || (elength < 3)) {
2073 dev_err(&intf->dev, "invalid descriptor buffer length\n");
2074 break;
2075 }
2072 if (buffer[1] != USB_DT_CS_INTERFACE) { 2076 if (buffer[1] != USB_DT_CS_INTERFACE) {
2073 dev_err(&intf->dev, "skipping garbage\n"); 2077 dev_err(&intf->dev, "skipping garbage\n");
2074 goto next_desc; 2078 goto next_desc;
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 4cef7d4f9cd0..a26d1fde0f5e 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -177,6 +177,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
177 { .compatible = "rockchip,rk3399-dwc3" }, 177 { .compatible = "rockchip,rk3399-dwc3" },
178 { .compatible = "xlnx,zynqmp-dwc3" }, 178 { .compatible = "xlnx,zynqmp-dwc3" },
179 { .compatible = "cavium,octeon-7130-usb-uctl" }, 179 { .compatible = "cavium,octeon-7130-usb-uctl" },
180 { .compatible = "sprd,sc9860-dwc3" },
180 { /* Sentinel */ } 181 { /* Sentinel */ }
181}; 182};
182MODULE_DEVICE_TABLE(of, of_dwc3_simple_match); 183MODULE_DEVICE_TABLE(of, of_dwc3_simple_match);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 827e376bfa97..75e6cb044eb2 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -990,6 +990,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
990 DWC3_TRBCTL_CONTROL_DATA, 990 DWC3_TRBCTL_CONTROL_DATA,
991 true); 991 true);
992 992
993 req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
994
993 /* Now prepare one extra TRB to align transfer size */ 995 /* Now prepare one extra TRB to align transfer size */
994 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, 996 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
995 maxpacket - rem, 997 maxpacket - rem,
@@ -1015,6 +1017,8 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
1015 DWC3_TRBCTL_CONTROL_DATA, 1017 DWC3_TRBCTL_CONTROL_DATA,
1016 true); 1018 true);
1017 1019
1020 req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1];
1021
1018 /* Now prepare one extra TRB to align transfer size */ 1022 /* Now prepare one extra TRB to align transfer size */
1019 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, 1023 dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr,
1020 0, DWC3_TRBCTL_CONTROL_DATA, 1024 0, DWC3_TRBCTL_CONTROL_DATA,
@@ -1029,6 +1033,9 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
1029 dwc3_ep0_prepare_one_trb(dep, req->request.dma, 1033 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
1030 req->request.length, DWC3_TRBCTL_CONTROL_DATA, 1034 req->request.length, DWC3_TRBCTL_CONTROL_DATA,
1031 false); 1035 false);
1036
1037 req->trb = &dwc->ep0_trb[dep->trb_enqueue];
1038
1032 ret = dwc3_ep0_start_trans(dep); 1039 ret = dwc3_ep0_start_trans(dep);
1033 } 1040 }
1034 1041
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 9990944a7245..8b342587f8ad 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -46,7 +46,8 @@
46static void ffs_data_get(struct ffs_data *ffs); 46static void ffs_data_get(struct ffs_data *ffs);
47static void ffs_data_put(struct ffs_data *ffs); 47static void ffs_data_put(struct ffs_data *ffs);
48/* Creates new ffs_data object. */ 48/* Creates new ffs_data object. */
49static struct ffs_data *__must_check ffs_data_new(void) __attribute__((malloc)); 49static struct ffs_data *__must_check ffs_data_new(const char *dev_name)
50 __attribute__((malloc));
50 51
51/* Opened counter handling. */ 52/* Opened counter handling. */
52static void ffs_data_opened(struct ffs_data *ffs); 53static void ffs_data_opened(struct ffs_data *ffs);
@@ -780,11 +781,12 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
780 struct usb_request *req) 781 struct usb_request *req)
781{ 782{
782 struct ffs_io_data *io_data = req->context; 783 struct ffs_io_data *io_data = req->context;
784 struct ffs_data *ffs = io_data->ffs;
783 785
784 ENTER(); 786 ENTER();
785 787
786 INIT_WORK(&io_data->work, ffs_user_copy_worker); 788 INIT_WORK(&io_data->work, ffs_user_copy_worker);
787 schedule_work(&io_data->work); 789 queue_work(ffs->io_completion_wq, &io_data->work);
788} 790}
789 791
790static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile) 792static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
@@ -1500,7 +1502,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
1500 if (unlikely(ret < 0)) 1502 if (unlikely(ret < 0))
1501 return ERR_PTR(ret); 1503 return ERR_PTR(ret);
1502 1504
1503 ffs = ffs_data_new(); 1505 ffs = ffs_data_new(dev_name);
1504 if (unlikely(!ffs)) 1506 if (unlikely(!ffs))
1505 return ERR_PTR(-ENOMEM); 1507 return ERR_PTR(-ENOMEM);
1506 ffs->file_perms = data.perms; 1508 ffs->file_perms = data.perms;
@@ -1610,6 +1612,7 @@ static void ffs_data_put(struct ffs_data *ffs)
1610 BUG_ON(waitqueue_active(&ffs->ev.waitq) || 1612 BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
1611 waitqueue_active(&ffs->ep0req_completion.wait) || 1613 waitqueue_active(&ffs->ep0req_completion.wait) ||
1612 waitqueue_active(&ffs->wait)); 1614 waitqueue_active(&ffs->wait));
1615 destroy_workqueue(ffs->io_completion_wq);
1613 kfree(ffs->dev_name); 1616 kfree(ffs->dev_name);
1614 kfree(ffs); 1617 kfree(ffs);
1615 } 1618 }
@@ -1642,7 +1645,7 @@ static void ffs_data_closed(struct ffs_data *ffs)
1642 ffs_data_put(ffs); 1645 ffs_data_put(ffs);
1643} 1646}
1644 1647
1645static struct ffs_data *ffs_data_new(void) 1648static struct ffs_data *ffs_data_new(const char *dev_name)
1646{ 1649{
1647 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL); 1650 struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
1648 if (unlikely(!ffs)) 1651 if (unlikely(!ffs))
@@ -1650,6 +1653,12 @@ static struct ffs_data *ffs_data_new(void)
1650 1653
1651 ENTER(); 1654 ENTER();
1652 1655
1656 ffs->io_completion_wq = alloc_ordered_workqueue("%s", 0, dev_name);
1657 if (!ffs->io_completion_wq) {
1658 kfree(ffs);
1659 return NULL;
1660 }
1661
1653 refcount_set(&ffs->ref, 1); 1662 refcount_set(&ffs->ref, 1);
1654 atomic_set(&ffs->opened, 0); 1663 atomic_set(&ffs->opened, 0);
1655 ffs->state = FFS_READ_DESCRIPTORS; 1664 ffs->state = FFS_READ_DESCRIPTORS;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index d6bd0244b008..5153e29870c3 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -307,8 +307,6 @@ struct fsg_common {
307 struct completion thread_notifier; 307 struct completion thread_notifier;
308 struct task_struct *thread_task; 308 struct task_struct *thread_task;
309 309
310 /* Callback functions. */
311 const struct fsg_operations *ops;
312 /* Gadget's private data. */ 310 /* Gadget's private data. */
313 void *private_data; 311 void *private_data;
314 312
@@ -2438,6 +2436,7 @@ static void handle_exception(struct fsg_common *common)
2438static int fsg_main_thread(void *common_) 2436static int fsg_main_thread(void *common_)
2439{ 2437{
2440 struct fsg_common *common = common_; 2438 struct fsg_common *common = common_;
2439 int i;
2441 2440
2442 /* 2441 /*
2443 * Allow the thread to be killed by a signal, but set the signal mask 2442 * Allow the thread to be killed by a signal, but set the signal mask
@@ -2476,21 +2475,16 @@ static int fsg_main_thread(void *common_)
2476 common->thread_task = NULL; 2475 common->thread_task = NULL;
2477 spin_unlock_irq(&common->lock); 2476 spin_unlock_irq(&common->lock);
2478 2477
2479 if (!common->ops || !common->ops->thread_exits 2478 /* Eject media from all LUNs */
2480 || common->ops->thread_exits(common) < 0) {
2481 int i;
2482 2479
2483 down_write(&common->filesem); 2480 down_write(&common->filesem);
2484 for (i = 0; i < ARRAY_SIZE(common->luns); i++) { 2481 for (i = 0; i < ARRAY_SIZE(common->luns); i++) {
2485 struct fsg_lun *curlun = common->luns[i]; 2482 struct fsg_lun *curlun = common->luns[i];
2486 if (!curlun || !fsg_lun_is_open(curlun))
2487 continue;
2488 2483
2484 if (curlun && fsg_lun_is_open(curlun))
2489 fsg_lun_close(curlun); 2485 fsg_lun_close(curlun);
2490 curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
2491 }
2492 up_write(&common->filesem);
2493 } 2486 }
2487 up_write(&common->filesem);
2494 2488
2495 /* Let fsg_unbind() know the thread has exited */ 2489 /* Let fsg_unbind() know the thread has exited */
2496 complete_and_exit(&common->thread_notifier, 0); 2490 complete_and_exit(&common->thread_notifier, 0);
@@ -2681,13 +2675,6 @@ void fsg_common_remove_luns(struct fsg_common *common)
2681} 2675}
2682EXPORT_SYMBOL_GPL(fsg_common_remove_luns); 2676EXPORT_SYMBOL_GPL(fsg_common_remove_luns);
2683 2677
2684void fsg_common_set_ops(struct fsg_common *common,
2685 const struct fsg_operations *ops)
2686{
2687 common->ops = ops;
2688}
2689EXPORT_SYMBOL_GPL(fsg_common_set_ops);
2690
2691void fsg_common_free_buffers(struct fsg_common *common) 2678void fsg_common_free_buffers(struct fsg_common *common)
2692{ 2679{
2693 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers); 2680 _fsg_common_free_buffers(common->buffhds, common->fsg_num_buffers);
diff --git a/drivers/usb/gadget/function/f_mass_storage.h b/drivers/usb/gadget/function/f_mass_storage.h
index d3902313b8ac..dc05ca0c4359 100644
--- a/drivers/usb/gadget/function/f_mass_storage.h
+++ b/drivers/usb/gadget/function/f_mass_storage.h
@@ -60,17 +60,6 @@ struct fsg_module_parameters {
60struct fsg_common; 60struct fsg_common;
61 61
62/* FSF callback functions */ 62/* FSF callback functions */
63struct fsg_operations {
64 /*
65 * Callback function to call when thread exits. If no
66 * callback is set or it returns value lower then zero MSF
67 * will force eject all LUNs it operates on (including those
68 * marked as non-removable or with prevent_medium_removal flag
69 * set).
70 */
71 int (*thread_exits)(struct fsg_common *common);
72};
73
74struct fsg_lun_opts { 63struct fsg_lun_opts {
75 struct config_group group; 64 struct config_group group;
76 struct fsg_lun *lun; 65 struct fsg_lun *lun;
@@ -142,9 +131,6 @@ void fsg_common_remove_lun(struct fsg_lun *lun);
142 131
143void fsg_common_remove_luns(struct fsg_common *common); 132void fsg_common_remove_luns(struct fsg_common *common);
144 133
145void fsg_common_set_ops(struct fsg_common *common,
146 const struct fsg_operations *ops);
147
148int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg, 134int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg,
149 unsigned int id, const char *name, 135 unsigned int id, const char *name,
150 const char **name_pfx); 136 const char **name_pfx);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 8df244fc9d80..ea0da35a44e2 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -555,6 +555,7 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
555 size_t size; /* Amount of data in a TX request. */ 555 size_t size; /* Amount of data in a TX request. */
556 size_t bytes_copied = 0; 556 size_t bytes_copied = 0;
557 struct usb_request *req; 557 struct usb_request *req;
558 int value;
558 559
559 DBG(dev, "printer_write trying to send %d bytes\n", (int)len); 560 DBG(dev, "printer_write trying to send %d bytes\n", (int)len);
560 561
@@ -634,7 +635,11 @@ printer_write(struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
634 return -EAGAIN; 635 return -EAGAIN;
635 } 636 }
636 637
637 if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) { 638 /* here, we unlock, and only unlock, to avoid deadlock. */
639 spin_unlock(&dev->lock);
640 value = usb_ep_queue(dev->in_ep, req, GFP_ATOMIC);
641 spin_lock(&dev->lock);
642 if (value) {
638 list_add(&req->list, &dev->tx_reqs); 643 list_add(&req->list, &dev->tx_reqs);
639 spin_unlock_irqrestore(&dev->lock, flags); 644 spin_unlock_irqrestore(&dev->lock, flags);
640 mutex_unlock(&dev->lock_printer_io); 645 mutex_unlock(&dev->lock_printer_io);
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index 540f1c48c1a8..79f70ebf85dc 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -279,6 +279,7 @@ struct ffs_data {
279 } file_perms; 279 } file_perms;
280 280
281 struct eventfd_ctx *ffs_eventfd; 281 struct eventfd_ctx *ffs_eventfd;
282 struct workqueue_struct *io_completion_wq;
282 bool no_disconnect; 283 bool no_disconnect;
283 struct work_struct reset_work; 284 struct work_struct reset_work;
284 285
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 684900fcfe24..5c28bee327e1 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -28,7 +28,7 @@
28#include <linux/aio.h> 28#include <linux/aio.h>
29#include <linux/uio.h> 29#include <linux/uio.h>
30#include <linux/refcount.h> 30#include <linux/refcount.h>
31 31#include <linux/delay.h>
32#include <linux/device.h> 32#include <linux/device.h>
33#include <linux/moduleparam.h> 33#include <linux/moduleparam.h>
34 34
@@ -116,6 +116,7 @@ enum ep0_state {
116struct dev_data { 116struct dev_data {
117 spinlock_t lock; 117 spinlock_t lock;
118 refcount_t count; 118 refcount_t count;
119 int udc_usage;
119 enum ep0_state state; /* P: lock */ 120 enum ep0_state state; /* P: lock */
120 struct usb_gadgetfs_event event [N_EVENT]; 121 struct usb_gadgetfs_event event [N_EVENT];
121 unsigned ev_next; 122 unsigned ev_next;
@@ -513,9 +514,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
513 INIT_WORK(&priv->work, ep_user_copy_worker); 514 INIT_WORK(&priv->work, ep_user_copy_worker);
514 schedule_work(&priv->work); 515 schedule_work(&priv->work);
515 } 516 }
516 spin_unlock(&epdata->dev->lock);
517 517
518 usb_ep_free_request(ep, req); 518 usb_ep_free_request(ep, req);
519 spin_unlock(&epdata->dev->lock);
519 put_ep(epdata); 520 put_ep(epdata);
520} 521}
521 522
@@ -939,9 +940,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
939 struct usb_request *req = dev->req; 940 struct usb_request *req = dev->req;
940 941
941 if ((retval = setup_req (ep, req, 0)) == 0) { 942 if ((retval = setup_req (ep, req, 0)) == 0) {
943 ++dev->udc_usage;
942 spin_unlock_irq (&dev->lock); 944 spin_unlock_irq (&dev->lock);
943 retval = usb_ep_queue (ep, req, GFP_KERNEL); 945 retval = usb_ep_queue (ep, req, GFP_KERNEL);
944 spin_lock_irq (&dev->lock); 946 spin_lock_irq (&dev->lock);
947 --dev->udc_usage;
945 } 948 }
946 dev->state = STATE_DEV_CONNECTED; 949 dev->state = STATE_DEV_CONNECTED;
947 950
@@ -983,11 +986,14 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
983 retval = -EIO; 986 retval = -EIO;
984 else { 987 else {
985 len = min (len, (size_t)dev->req->actual); 988 len = min (len, (size_t)dev->req->actual);
986// FIXME don't call this with the spinlock held ... 989 ++dev->udc_usage;
990 spin_unlock_irq(&dev->lock);
987 if (copy_to_user (buf, dev->req->buf, len)) 991 if (copy_to_user (buf, dev->req->buf, len))
988 retval = -EFAULT; 992 retval = -EFAULT;
989 else 993 else
990 retval = len; 994 retval = len;
995 spin_lock_irq(&dev->lock);
996 --dev->udc_usage;
991 clean_req (dev->gadget->ep0, dev->req); 997 clean_req (dev->gadget->ep0, dev->req);
992 /* NOTE userspace can't yet choose to stall */ 998 /* NOTE userspace can't yet choose to stall */
993 } 999 }
@@ -1131,6 +1137,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1131 retval = setup_req (dev->gadget->ep0, dev->req, len); 1137 retval = setup_req (dev->gadget->ep0, dev->req, len);
1132 if (retval == 0) { 1138 if (retval == 0) {
1133 dev->state = STATE_DEV_CONNECTED; 1139 dev->state = STATE_DEV_CONNECTED;
1140 ++dev->udc_usage;
1134 spin_unlock_irq (&dev->lock); 1141 spin_unlock_irq (&dev->lock);
1135 if (copy_from_user (dev->req->buf, buf, len)) 1142 if (copy_from_user (dev->req->buf, buf, len))
1136 retval = -EFAULT; 1143 retval = -EFAULT;
@@ -1142,6 +1149,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1142 GFP_KERNEL); 1149 GFP_KERNEL);
1143 } 1150 }
1144 spin_lock_irq(&dev->lock); 1151 spin_lock_irq(&dev->lock);
1152 --dev->udc_usage;
1145 if (retval < 0) { 1153 if (retval < 0) {
1146 clean_req (dev->gadget->ep0, dev->req); 1154 clean_req (dev->gadget->ep0, dev->req);
1147 } else 1155 } else
@@ -1243,9 +1251,21 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
1243 struct usb_gadget *gadget = dev->gadget; 1251 struct usb_gadget *gadget = dev->gadget;
1244 long ret = -ENOTTY; 1252 long ret = -ENOTTY;
1245 1253
1246 if (gadget->ops->ioctl) 1254 spin_lock_irq(&dev->lock);
1255 if (dev->state == STATE_DEV_OPENED ||
1256 dev->state == STATE_DEV_UNBOUND) {
1257 /* Not bound to a UDC */
1258 } else if (gadget->ops->ioctl) {
1259 ++dev->udc_usage;
1260 spin_unlock_irq(&dev->lock);
1261
1247 ret = gadget->ops->ioctl (gadget, code, value); 1262 ret = gadget->ops->ioctl (gadget, code, value);
1248 1263
1264 spin_lock_irq(&dev->lock);
1265 --dev->udc_usage;
1266 }
1267 spin_unlock_irq(&dev->lock);
1268
1249 return ret; 1269 return ret;
1250} 1270}
1251 1271
@@ -1463,10 +1483,12 @@ delegate:
1463 if (value < 0) 1483 if (value < 0)
1464 break; 1484 break;
1465 1485
1486 ++dev->udc_usage;
1466 spin_unlock (&dev->lock); 1487 spin_unlock (&dev->lock);
1467 value = usb_ep_queue (gadget->ep0, dev->req, 1488 value = usb_ep_queue (gadget->ep0, dev->req,
1468 GFP_KERNEL); 1489 GFP_KERNEL);
1469 spin_lock (&dev->lock); 1490 spin_lock (&dev->lock);
1491 --dev->udc_usage;
1470 if (value < 0) { 1492 if (value < 0) {
1471 clean_req (gadget->ep0, dev->req); 1493 clean_req (gadget->ep0, dev->req);
1472 break; 1494 break;
@@ -1490,8 +1512,12 @@ delegate:
1490 req->length = value; 1512 req->length = value;
1491 req->zero = value < w_length; 1513 req->zero = value < w_length;
1492 1514
1515 ++dev->udc_usage;
1493 spin_unlock (&dev->lock); 1516 spin_unlock (&dev->lock);
1494 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL); 1517 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
1518 spin_lock(&dev->lock);
1519 --dev->udc_usage;
1520 spin_unlock(&dev->lock);
1495 if (value < 0) { 1521 if (value < 0) {
1496 DBG (dev, "ep_queue --> %d\n", value); 1522 DBG (dev, "ep_queue --> %d\n", value);
1497 req->status = 0; 1523 req->status = 0;
@@ -1518,21 +1544,24 @@ static void destroy_ep_files (struct dev_data *dev)
1518 /* break link to FS */ 1544 /* break link to FS */
1519 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles); 1545 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles);
1520 list_del_init (&ep->epfiles); 1546 list_del_init (&ep->epfiles);
1547 spin_unlock_irq (&dev->lock);
1548
1521 dentry = ep->dentry; 1549 dentry = ep->dentry;
1522 ep->dentry = NULL; 1550 ep->dentry = NULL;
1523 parent = d_inode(dentry->d_parent); 1551 parent = d_inode(dentry->d_parent);
1524 1552
1525 /* break link to controller */ 1553 /* break link to controller */
1554 mutex_lock(&ep->lock);
1526 if (ep->state == STATE_EP_ENABLED) 1555 if (ep->state == STATE_EP_ENABLED)
1527 (void) usb_ep_disable (ep->ep); 1556 (void) usb_ep_disable (ep->ep);
1528 ep->state = STATE_EP_UNBOUND; 1557 ep->state = STATE_EP_UNBOUND;
1529 usb_ep_free_request (ep->ep, ep->req); 1558 usb_ep_free_request (ep->ep, ep->req);
1530 ep->ep = NULL; 1559 ep->ep = NULL;
1560 mutex_unlock(&ep->lock);
1561
1531 wake_up (&ep->wait); 1562 wake_up (&ep->wait);
1532 put_ep (ep); 1563 put_ep (ep);
1533 1564
1534 spin_unlock_irq (&dev->lock);
1535
1536 /* break link to dcache */ 1565 /* break link to dcache */
1537 inode_lock(parent); 1566 inode_lock(parent);
1538 d_delete (dentry); 1567 d_delete (dentry);
@@ -1603,6 +1632,11 @@ gadgetfs_unbind (struct usb_gadget *gadget)
1603 1632
1604 spin_lock_irq (&dev->lock); 1633 spin_lock_irq (&dev->lock);
1605 dev->state = STATE_DEV_UNBOUND; 1634 dev->state = STATE_DEV_UNBOUND;
1635 while (dev->udc_usage > 0) {
1636 spin_unlock_irq(&dev->lock);
1637 usleep_range(1000, 2000);
1638 spin_lock_irq(&dev->lock);
1639 }
1606 spin_unlock_irq (&dev->lock); 1640 spin_unlock_irq (&dev->lock);
1607 1641
1608 destroy_ep_files (dev); 1642 destroy_ep_files (dev);
diff --git a/drivers/usb/gadget/legacy/mass_storage.c b/drivers/usb/gadget/legacy/mass_storage.c
index e99ab57ee3e5..fcba59782f26 100644
--- a/drivers/usb/gadget/legacy/mass_storage.c
+++ b/drivers/usb/gadget/legacy/mass_storage.c
@@ -107,15 +107,6 @@ static unsigned int fsg_num_buffers = CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS;
107 107
108FSG_MODULE_PARAMETERS(/* no prefix */, mod_data); 108FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
109 109
110static unsigned long msg_registered;
111static void msg_cleanup(void);
112
113static int msg_thread_exits(struct fsg_common *common)
114{
115 msg_cleanup();
116 return 0;
117}
118
119static int msg_do_config(struct usb_configuration *c) 110static int msg_do_config(struct usb_configuration *c)
120{ 111{
121 struct fsg_opts *opts; 112 struct fsg_opts *opts;
@@ -154,9 +145,6 @@ static struct usb_configuration msg_config_driver = {
154 145
155static int msg_bind(struct usb_composite_dev *cdev) 146static int msg_bind(struct usb_composite_dev *cdev)
156{ 147{
157 static const struct fsg_operations ops = {
158 .thread_exits = msg_thread_exits,
159 };
160 struct fsg_opts *opts; 148 struct fsg_opts *opts;
161 struct fsg_config config; 149 struct fsg_config config;
162 int status; 150 int status;
@@ -173,8 +161,6 @@ static int msg_bind(struct usb_composite_dev *cdev)
173 if (status) 161 if (status)
174 goto fail; 162 goto fail;
175 163
176 fsg_common_set_ops(opts->common, &ops);
177
178 status = fsg_common_set_cdev(opts->common, cdev, config.can_stall); 164 status = fsg_common_set_cdev(opts->common, cdev, config.can_stall);
179 if (status) 165 if (status)
180 goto fail_set_cdev; 166 goto fail_set_cdev;
@@ -256,18 +242,12 @@ MODULE_LICENSE("GPL");
256 242
257static int __init msg_init(void) 243static int __init msg_init(void)
258{ 244{
259 int ret; 245 return usb_composite_probe(&msg_driver);
260
261 ret = usb_composite_probe(&msg_driver);
262 set_bit(0, &msg_registered);
263
264 return ret;
265} 246}
266module_init(msg_init); 247module_init(msg_init);
267 248
268static void msg_cleanup(void) 249static void __exit msg_cleanup(void)
269{ 250{
270 if (test_and_clear_bit(0, &msg_registered)) 251 usb_composite_unregister(&msg_driver);
271 usb_composite_unregister(&msg_driver);
272} 252}
273module_exit(msg_cleanup); 253module_exit(msg_cleanup);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 7cd5c969fcbe..1e9567091d86 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -273,6 +273,7 @@ config USB_SNP_CORE
273config USB_SNP_UDC_PLAT 273config USB_SNP_UDC_PLAT
274 tristate "Synopsys USB 2.0 Device controller" 274 tristate "Synopsys USB 2.0 Device controller"
275 depends on USB_GADGET && OF && HAS_DMA 275 depends on USB_GADGET && OF && HAS_DMA
276 depends on EXTCON || EXTCON=n
276 select USB_GADGET_DUALSPEED 277 select USB_GADGET_DUALSPEED
277 select USB_SNP_CORE 278 select USB_SNP_CORE
278 default ARCH_BCM_IPROC 279 default ARCH_BCM_IPROC
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 98d71400f8a1..a884c022df7a 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -29,6 +29,8 @@
29#include <linux/of_gpio.h> 29#include <linux/of_gpio.h>
30 30
31#include "atmel_usba_udc.h" 31#include "atmel_usba_udc.h"
32#define USBA_VBUS_IRQFLAGS (IRQF_ONESHOT \
33 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING)
32 34
33#ifdef CONFIG_USB_GADGET_DEBUG_FS 35#ifdef CONFIG_USB_GADGET_DEBUG_FS
34#include <linux/debugfs.h> 36#include <linux/debugfs.h>
@@ -2361,7 +2363,7 @@ static int usba_udc_probe(struct platform_device *pdev)
2361 IRQ_NOAUTOEN); 2363 IRQ_NOAUTOEN);
2362 ret = devm_request_threaded_irq(&pdev->dev, 2364 ret = devm_request_threaded_irq(&pdev->dev,
2363 gpio_to_irq(udc->vbus_pin), NULL, 2365 gpio_to_irq(udc->vbus_pin), NULL,
2364 usba_vbus_irq_thread, IRQF_ONESHOT, 2366 usba_vbus_irq_thread, USBA_VBUS_IRQFLAGS,
2365 "atmel_usba_udc", udc); 2367 "atmel_usba_udc", udc);
2366 if (ret) { 2368 if (ret) {
2367 udc->vbus_pin = -ENODEV; 2369 udc->vbus_pin = -ENODEV;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 75c51ca4ee0f..d41d07aae0ce 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1320,8 +1320,7 @@ static int udc_bind_to_driver(struct usb_udc *udc, struct usb_gadget_driver *dri
1320 udc->dev.driver = &driver->driver; 1320 udc->dev.driver = &driver->driver;
1321 udc->gadget->dev.driver = &driver->driver; 1321 udc->gadget->dev.driver = &driver->driver;
1322 1322
1323 if (driver->max_speed < udc->gadget->max_speed) 1323 usb_gadget_udc_set_speed(udc, driver->max_speed);
1324 usb_gadget_udc_set_speed(udc, driver->max_speed);
1325 1324
1326 ret = driver->bind(udc->gadget, driver); 1325 ret = driver->bind(udc->gadget, driver);
1327 if (ret) 1326 if (ret)
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index a030d7923d7d..b17618a55f1b 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -237,6 +237,8 @@ struct dummy_hcd {
237 237
238 struct usb_device *udev; 238 struct usb_device *udev;
239 struct list_head urbp_list; 239 struct list_head urbp_list;
240 struct urbp *next_frame_urbp;
241
240 u32 stream_en_ep; 242 u32 stream_en_ep;
241 u8 num_stream[30 / 2]; 243 u8 num_stream[30 / 2];
242 244
@@ -253,11 +255,13 @@ struct dummy {
253 */ 255 */
254 struct dummy_ep ep[DUMMY_ENDPOINTS]; 256 struct dummy_ep ep[DUMMY_ENDPOINTS];
255 int address; 257 int address;
258 int callback_usage;
256 struct usb_gadget gadget; 259 struct usb_gadget gadget;
257 struct usb_gadget_driver *driver; 260 struct usb_gadget_driver *driver;
258 struct dummy_request fifo_req; 261 struct dummy_request fifo_req;
259 u8 fifo_buf[FIFO_SIZE]; 262 u8 fifo_buf[FIFO_SIZE];
260 u16 devstatus; 263 u16 devstatus;
264 unsigned ints_enabled:1;
261 unsigned udc_suspended:1; 265 unsigned udc_suspended:1;
262 unsigned pullup:1; 266 unsigned pullup:1;
263 267
@@ -375,11 +379,10 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
375 USB_PORT_STAT_CONNECTION) == 0) 379 USB_PORT_STAT_CONNECTION) == 0)
376 dum_hcd->port_status |= 380 dum_hcd->port_status |=
377 (USB_PORT_STAT_C_CONNECTION << 16); 381 (USB_PORT_STAT_C_CONNECTION << 16);
378 if ((dum_hcd->port_status & 382 if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) &&
379 USB_PORT_STAT_ENABLE) == 1 && 383 (dum_hcd->port_status &
380 (dum_hcd->port_status & 384 USB_PORT_STAT_LINK_STATE) == USB_SS_PORT_LS_U0 &&
381 USB_SS_PORT_LS_U0) == 1 && 385 dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
382 dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
383 dum_hcd->active = 1; 386 dum_hcd->active = 1;
384 } 387 }
385 } else { 388 } else {
@@ -440,18 +443,27 @@ static void set_link_state(struct dummy_hcd *dum_hcd)
440 (~dum_hcd->old_status) & dum_hcd->port_status; 443 (~dum_hcd->old_status) & dum_hcd->port_status;
441 444
442 /* Report reset and disconnect events to the driver */ 445 /* Report reset and disconnect events to the driver */
443 if (dum->driver && (disconnect || reset)) { 446 if (dum->ints_enabled && (disconnect || reset)) {
444 stop_activity(dum); 447 stop_activity(dum);
448 ++dum->callback_usage;
449 spin_unlock(&dum->lock);
445 if (reset) 450 if (reset)
446 usb_gadget_udc_reset(&dum->gadget, dum->driver); 451 usb_gadget_udc_reset(&dum->gadget, dum->driver);
447 else 452 else
448 dum->driver->disconnect(&dum->gadget); 453 dum->driver->disconnect(&dum->gadget);
454 spin_lock(&dum->lock);
455 --dum->callback_usage;
449 } 456 }
450 } else if (dum_hcd->active != dum_hcd->old_active) { 457 } else if (dum_hcd->active != dum_hcd->old_active &&
458 dum->ints_enabled) {
459 ++dum->callback_usage;
460 spin_unlock(&dum->lock);
451 if (dum_hcd->old_active && dum->driver->suspend) 461 if (dum_hcd->old_active && dum->driver->suspend)
452 dum->driver->suspend(&dum->gadget); 462 dum->driver->suspend(&dum->gadget);
453 else if (!dum_hcd->old_active && dum->driver->resume) 463 else if (!dum_hcd->old_active && dum->driver->resume)
454 dum->driver->resume(&dum->gadget); 464 dum->driver->resume(&dum->gadget);
465 spin_lock(&dum->lock);
466 --dum->callback_usage;
455 } 467 }
456 468
457 dum_hcd->old_status = dum_hcd->port_status; 469 dum_hcd->old_status = dum_hcd->port_status;
@@ -972,8 +984,11 @@ static int dummy_udc_start(struct usb_gadget *g,
972 * can't enumerate without help from the driver we're binding. 984 * can't enumerate without help from the driver we're binding.
973 */ 985 */
974 986
987 spin_lock_irq(&dum->lock);
975 dum->devstatus = 0; 988 dum->devstatus = 0;
976 dum->driver = driver; 989 dum->driver = driver;
990 dum->ints_enabled = 1;
991 spin_unlock_irq(&dum->lock);
977 992
978 return 0; 993 return 0;
979} 994}
@@ -984,6 +999,16 @@ static int dummy_udc_stop(struct usb_gadget *g)
984 struct dummy *dum = dum_hcd->dum; 999 struct dummy *dum = dum_hcd->dum;
985 1000
986 spin_lock_irq(&dum->lock); 1001 spin_lock_irq(&dum->lock);
1002 dum->ints_enabled = 0;
1003 stop_activity(dum);
1004
1005 /* emulate synchronize_irq(): wait for callbacks to finish */
1006 while (dum->callback_usage > 0) {
1007 spin_unlock_irq(&dum->lock);
1008 usleep_range(1000, 2000);
1009 spin_lock_irq(&dum->lock);
1010 }
1011
987 dum->driver = NULL; 1012 dum->driver = NULL;
988 spin_unlock_irq(&dum->lock); 1013 spin_unlock_irq(&dum->lock);
989 1014
@@ -1037,7 +1062,12 @@ static int dummy_udc_probe(struct platform_device *pdev)
1037 memzero_explicit(&dum->gadget, sizeof(struct usb_gadget)); 1062 memzero_explicit(&dum->gadget, sizeof(struct usb_gadget));
1038 dum->gadget.name = gadget_name; 1063 dum->gadget.name = gadget_name;
1039 dum->gadget.ops = &dummy_ops; 1064 dum->gadget.ops = &dummy_ops;
1040 dum->gadget.max_speed = USB_SPEED_SUPER; 1065 if (mod_data.is_super_speed)
1066 dum->gadget.max_speed = USB_SPEED_SUPER;
1067 else if (mod_data.is_high_speed)
1068 dum->gadget.max_speed = USB_SPEED_HIGH;
1069 else
1070 dum->gadget.max_speed = USB_SPEED_FULL;
1041 1071
1042 dum->gadget.dev.parent = &pdev->dev; 1072 dum->gadget.dev.parent = &pdev->dev;
1043 init_dummy_udc_hw(dum); 1073 init_dummy_udc_hw(dum);
@@ -1246,6 +1276,8 @@ static int dummy_urb_enqueue(
1246 1276
1247 list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list); 1277 list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
1248 urb->hcpriv = urbp; 1278 urb->hcpriv = urbp;
1279 if (!dum_hcd->next_frame_urbp)
1280 dum_hcd->next_frame_urbp = urbp;
1249 if (usb_pipetype(urb->pipe) == PIPE_CONTROL) 1281 if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
1250 urb->error_count = 1; /* mark as a new urb */ 1282 urb->error_count = 1; /* mark as a new urb */
1251 1283
@@ -1521,6 +1553,8 @@ static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
1521 if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ? 1553 if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
1522 dum->ss_hcd : dum->hs_hcd))) 1554 dum->ss_hcd : dum->hs_hcd)))
1523 return NULL; 1555 return NULL;
1556 if (!dum->ints_enabled)
1557 return NULL;
1524 if ((address & ~USB_DIR_IN) == 0) 1558 if ((address & ~USB_DIR_IN) == 0)
1525 return &dum->ep[0]; 1559 return &dum->ep[0];
1526 for (i = 1; i < DUMMY_ENDPOINTS; i++) { 1560 for (i = 1; i < DUMMY_ENDPOINTS; i++) {
@@ -1762,6 +1796,7 @@ static void dummy_timer(unsigned long _dum_hcd)
1762 spin_unlock_irqrestore(&dum->lock, flags); 1796 spin_unlock_irqrestore(&dum->lock, flags);
1763 return; 1797 return;
1764 } 1798 }
1799 dum_hcd->next_frame_urbp = NULL;
1765 1800
1766 for (i = 0; i < DUMMY_ENDPOINTS; i++) { 1801 for (i = 0; i < DUMMY_ENDPOINTS; i++) {
1767 if (!ep_info[i].name) 1802 if (!ep_info[i].name)
@@ -1778,6 +1813,10 @@ restart:
1778 int type; 1813 int type;
1779 int status = -EINPROGRESS; 1814 int status = -EINPROGRESS;
1780 1815
1816 /* stop when we reach URBs queued after the timer interrupt */
1817 if (urbp == dum_hcd->next_frame_urbp)
1818 break;
1819
1781 urb = urbp->urb; 1820 urb = urbp->urb;
1782 if (urb->unlinked) 1821 if (urb->unlinked)
1783 goto return_urb; 1822 goto return_urb;
@@ -1857,10 +1896,12 @@ restart:
1857 * until setup() returns; no reentrancy issues etc. 1896 * until setup() returns; no reentrancy issues etc.
1858 */ 1897 */
1859 if (value > 0) { 1898 if (value > 0) {
1899 ++dum->callback_usage;
1860 spin_unlock(&dum->lock); 1900 spin_unlock(&dum->lock);
1861 value = dum->driver->setup(&dum->gadget, 1901 value = dum->driver->setup(&dum->gadget,
1862 &setup); 1902 &setup);
1863 spin_lock(&dum->lock); 1903 spin_lock(&dum->lock);
1904 --dum->callback_usage;
1864 1905
1865 if (value >= 0) { 1906 if (value >= 0) {
1866 /* no delays (max 64KB data stage) */ 1907 /* no delays (max 64KB data stage) */
@@ -2561,8 +2602,6 @@ static struct hc_driver dummy_hcd = {
2561 .product_desc = "Dummy host controller", 2602 .product_desc = "Dummy host controller",
2562 .hcd_priv_size = sizeof(struct dummy_hcd), 2603 .hcd_priv_size = sizeof(struct dummy_hcd),
2563 2604
2564 .flags = HCD_USB3 | HCD_SHARED,
2565
2566 .reset = dummy_setup, 2605 .reset = dummy_setup,
2567 .start = dummy_start, 2606 .start = dummy_start,
2568 .stop = dummy_stop, 2607 .stop = dummy_stop,
@@ -2591,8 +2630,12 @@ static int dummy_hcd_probe(struct platform_device *pdev)
2591 dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc); 2630 dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
2592 dum = *((void **)dev_get_platdata(&pdev->dev)); 2631 dum = *((void **)dev_get_platdata(&pdev->dev));
2593 2632
2594 if (!mod_data.is_super_speed) 2633 if (mod_data.is_super_speed)
2634 dummy_hcd.flags = HCD_USB3 | HCD_SHARED;
2635 else if (mod_data.is_high_speed)
2595 dummy_hcd.flags = HCD_USB2; 2636 dummy_hcd.flags = HCD_USB2;
2637 else
2638 dummy_hcd.flags = HCD_USB11;
2596 hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev)); 2639 hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
2597 if (!hs_hcd) 2640 if (!hs_hcd)
2598 return -ENOMEM; 2641 return -ENOMEM;
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index df37c1e6e9d5..63a206122058 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -1038,7 +1038,7 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
1038 usb3_ep->ep.maxpacket); 1038 usb3_ep->ep.maxpacket);
1039 u8 *buf = usb3_req->req.buf + usb3_req->req.actual; 1039 u8 *buf = usb3_req->req.buf + usb3_req->req.actual;
1040 u32 tmp = 0; 1040 u32 tmp = 0;
1041 bool is_last; 1041 bool is_last = !len ? true : false;
1042 1042
1043 if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0) 1043 if (usb3_wait_pipe_status(usb3_ep, PX_STA_BUFSTS) < 0)
1044 return -EBUSY; 1044 return -EBUSY;
@@ -1059,7 +1059,8 @@ static int usb3_write_pipe(struct renesas_usb3_ep *usb3_ep,
1059 usb3_write(usb3, tmp, fifo_reg); 1059 usb3_write(usb3, tmp, fifo_reg);
1060 } 1060 }
1061 1061
1062 is_last = usb3_is_transfer_complete(usb3_ep, usb3_req); 1062 if (!is_last)
1063 is_last = usb3_is_transfer_complete(usb3_ep, usb3_req);
1063 /* Send the data */ 1064 /* Send the data */
1064 usb3_set_px_con_send(usb3_ep, len, is_last); 1065 usb3_set_px_con_send(usb3_ep, len, is_last);
1065 1066
@@ -1150,7 +1151,8 @@ static void usb3_start_pipe0(struct renesas_usb3_ep *usb3_ep,
1150 usb3_set_p0_con_for_ctrl_read_data(usb3); 1151 usb3_set_p0_con_for_ctrl_read_data(usb3);
1151 } else { 1152 } else {
1152 usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD); 1153 usb3_clear_bit(usb3, P0_MOD_DIR, USB3_P0_MOD);
1153 usb3_set_p0_con_for_ctrl_write_data(usb3); 1154 if (usb3_req->req.length)
1155 usb3_set_p0_con_for_ctrl_write_data(usb3);
1154 } 1156 }
1155 1157
1156 usb3_p0_xfer(usb3_ep, usb3_req); 1158 usb3_p0_xfer(usb3_ep, usb3_req);
@@ -2053,7 +2055,16 @@ static u32 usb3_calc_ramarea(int ram_size)
2053static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep, 2055static u32 usb3_calc_rammap_val(struct renesas_usb3_ep *usb3_ep,
2054 const struct usb_endpoint_descriptor *desc) 2056 const struct usb_endpoint_descriptor *desc)
2055{ 2057{
2056 return usb3_ep->rammap_val | PN_RAMMAP_MPKT(usb_endpoint_maxp(desc)); 2058 int i;
2059 const u32 max_packet_array[] = {8, 16, 32, 64, 512};
2060 u32 mpkt = PN_RAMMAP_MPKT(1024);
2061
2062 for (i = 0; i < ARRAY_SIZE(max_packet_array); i++) {
2063 if (usb_endpoint_maxp(desc) <= max_packet_array[i])
2064 mpkt = PN_RAMMAP_MPKT(max_packet_array[i]);
2065 }
2066
2067 return usb3_ep->rammap_val | mpkt;
2057} 2068}
2058 2069
2059static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep, 2070static int usb3_enable_pipe_n(struct renesas_usb3_ep *usb3_ep,
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 658d9d1f9ea3..6dda3623a276 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -447,7 +447,7 @@ static int usb_asmedia_wait_write(struct pci_dev *pdev)
447 if ((value & ASMT_CONTROL_WRITE_BIT) == 0) 447 if ((value & ASMT_CONTROL_WRITE_BIT) == 0)
448 return 0; 448 return 0;
449 449
450 usleep_range(40, 60); 450 udelay(50);
451 } 451 }
452 452
453 dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__); 453 dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__);
@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
1022 * 1022 *
1023 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. 1023 * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS.
1024 * It signals to the BIOS that the OS wants control of the host controller, 1024 * It signals to the BIOS that the OS wants control of the host controller,
1025 * and then waits 5 seconds for the BIOS to hand over control. 1025 * and then waits 1 second for the BIOS to hand over control.
1026 * If we timeout, assume the BIOS is broken and take control anyway. 1026 * If we timeout, assume the BIOS is broken and take control anyway.
1027 */ 1027 */
1028static void quirk_usb_handoff_xhci(struct pci_dev *pdev) 1028static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
@@ -1069,9 +1069,9 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
1069 if (val & XHCI_HC_BIOS_OWNED) { 1069 if (val & XHCI_HC_BIOS_OWNED) {
1070 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); 1070 writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset);
1071 1071
1072 /* Wait for 5 seconds with 10 microsecond polling interval */ 1072 /* Wait for 1 second with 10 microsecond polling interval */
1073 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, 1073 timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED,
1074 0, 5000, 10); 1074 0, 1000000, 10);
1075 1075
1076 /* Assume a buggy BIOS and take HC ownership anyway */ 1076 /* Assume a buggy BIOS and take HC ownership anyway */
1077 if (timeout) { 1077 if (timeout) {
@@ -1100,7 +1100,7 @@ hc_init:
1100 * operational or runtime registers. Wait 5 seconds and no more. 1100 * operational or runtime registers. Wait 5 seconds and no more.
1101 */ 1101 */
1102 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, 1102 timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0,
1103 5000, 10); 1103 5000000, 10);
1104 /* Assume a buggy HC and start HC initialization anyway */ 1104 /* Assume a buggy HC and start HC initialization anyway */
1105 if (timeout) { 1105 if (timeout) {
1106 val = readl(op_reg_base + XHCI_STS_OFFSET); 1106 val = readl(op_reg_base + XHCI_STS_OFFSET);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index ad89a6d4111b..da9158f171cb 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -112,7 +112,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
112 112
113 /* If PSI table exists, add the custom speed attributes from it */ 113 /* If PSI table exists, add the custom speed attributes from it */
114 if (usb3_1 && xhci->usb3_rhub.psi_count) { 114 if (usb3_1 && xhci->usb3_rhub.psi_count) {
115 u32 ssp_cap_base, bm_attrib, psi; 115 u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
116 int offset; 116 int offset;
117 117
118 ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE; 118 ssp_cap_base = USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE;
@@ -139,6 +139,15 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
139 for (i = 0; i < xhci->usb3_rhub.psi_count; i++) { 139 for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
140 psi = xhci->usb3_rhub.psi[i]; 140 psi = xhci->usb3_rhub.psi[i];
141 psi &= ~USB_SSP_SUBLINK_SPEED_RSVD; 141 psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
142 psi_exp = XHCI_EXT_PORT_PSIE(psi);
143 psi_mant = XHCI_EXT_PORT_PSIM(psi);
144
145 /* Shift to Gbps and set SSP Link BIT(14) if 10Gpbs */
146 for (; psi_exp < 3; psi_exp++)
147 psi_mant /= 1000;
148 if (psi_mant >= 10)
149 psi |= BIT(14);
150
142 if ((psi & PLT_MASK) == PLT_SYM) { 151 if ((psi & PLT_MASK) == PLT_SYM) {
143 /* Symmetric, create SSA RX and TX from one PSI entry */ 152 /* Symmetric, create SSA RX and TX from one PSI entry */
144 put_unaligned_le32(psi, &buf[offset]); 153 put_unaligned_le32(psi, &buf[offset]);
@@ -1506,9 +1515,6 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1506 t2 |= PORT_WKOC_E | PORT_WKCONN_E; 1515 t2 |= PORT_WKOC_E | PORT_WKCONN_E;
1507 t2 &= ~PORT_WKDISC_E; 1516 t2 &= ~PORT_WKDISC_E;
1508 } 1517 }
1509 if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
1510 (hcd->speed < HCD_USB3))
1511 t2 &= ~PORT_WAKE_BITS;
1512 } else 1518 } else
1513 t2 &= ~PORT_WAKE_BITS; 1519 t2 &= ~PORT_WAKE_BITS;
1514 1520
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 8071c8fdd15e..76f392954733 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -54,11 +54,6 @@
54#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 54#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
55#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0 55#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
56 56
57#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
58#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
59#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
60#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
61
62#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 57#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
63 58
64static const char hcd_name[] = "xhci_hcd"; 59static const char hcd_name[] = "xhci_hcd";
@@ -142,13 +137,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
142 if (pdev->vendor == PCI_VENDOR_ID_AMD) 137 if (pdev->vendor == PCI_VENDOR_ID_AMD)
143 xhci->quirks |= XHCI_TRUST_TX_LENGTH; 138 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
144 139
145 if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
146 ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
147 (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
148 (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
149 (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
150 xhci->quirks |= XHCI_U2_DISABLE_WAKE;
151
152 if (pdev->vendor == PCI_VENDOR_ID_INTEL) { 140 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
153 xhci->quirks |= XHCI_LPM_SUPPORT; 141 xhci->quirks |= XHCI_LPM_SUPPORT;
154 xhci->quirks |= XHCI_INTEL_HOST; 142 xhci->quirks |= XHCI_INTEL_HOST;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 163bafde709f..1cb6eaef4ae1 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -178,14 +178,18 @@ static int xhci_plat_probe(struct platform_device *pdev)
178 * 2. xhci_plat is child of a device from firmware (dwc3-plat) 178 * 2. xhci_plat is child of a device from firmware (dwc3-plat)
179 * 3. xhci_plat is grandchild of a pci device (dwc3-pci) 179 * 3. xhci_plat is grandchild of a pci device (dwc3-pci)
180 */ 180 */
181 sysdev = &pdev->dev; 181 for (sysdev = &pdev->dev; sysdev; sysdev = sysdev->parent) {
182 if (sysdev->parent && !sysdev->of_node && sysdev->parent->of_node) 182 if (is_of_node(sysdev->fwnode) ||
183 sysdev = sysdev->parent; 183 is_acpi_device_node(sysdev->fwnode))
184 break;
184#ifdef CONFIG_PCI 185#ifdef CONFIG_PCI
185 else if (sysdev->parent && sysdev->parent->parent && 186 else if (sysdev->bus == &pci_bus_type)
186 sysdev->parent->parent->bus == &pci_bus_type) 187 break;
187 sysdev = sysdev->parent->parent;
188#endif 188#endif
189 }
190
191 if (!sysdev)
192 sysdev = &pdev->dev;
189 193
190 /* Try to set 64-bit DMA first */ 194 /* Try to set 64-bit DMA first */
191 if (WARN_ON(!sysdev->dma_mask)) 195 if (WARN_ON(!sysdev->dma_mask))
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index b2ff1ff1a02f..ee198ea47f49 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1703,7 +1703,8 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1703 if (xhci->quirks & XHCI_MTK_HOST) { 1703 if (xhci->quirks & XHCI_MTK_HOST) {
1704 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep); 1704 ret = xhci_mtk_add_ep_quirk(hcd, udev, ep);
1705 if (ret < 0) { 1705 if (ret < 0) {
1706 xhci_free_endpoint_ring(xhci, virt_dev, ep_index); 1706 xhci_ring_free(xhci, virt_dev->eps[ep_index].new_ring);
1707 virt_dev->eps[ep_index].new_ring = NULL;
1707 return ret; 1708 return ret;
1708 } 1709 }
1709 } 1710 }
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2abaa4d6d39d..2b48aa4f6b76 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -735,6 +735,8 @@ struct xhci_ep_ctx {
735#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK) 735#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
736/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */ 736/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
737#define EP_HAS_LSA (1 << 15) 737#define EP_HAS_LSA (1 << 15)
738/* hosts with LEC=1 use bits 31:24 as ESIT high bits. */
739#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p) (((p) >> 24) & 0xff)
738 740
739/* ep_info2 bitmasks */ 741/* ep_info2 bitmasks */
740/* 742/*
@@ -1681,7 +1683,7 @@ struct xhci_bus_state {
1681 1683
1682static inline unsigned int hcd_index(struct usb_hcd *hcd) 1684static inline unsigned int hcd_index(struct usb_hcd *hcd)
1683{ 1685{
1684 if (hcd->speed == HCD_USB3) 1686 if (hcd->speed >= HCD_USB3)
1685 return 0; 1687 return 0;
1686 else 1688 else
1687 return 1; 1689 return 1;
@@ -1826,7 +1828,7 @@ struct xhci_hcd {
1826/* For controller with a broken Port Disable implementation */ 1828/* For controller with a broken Port Disable implementation */
1827#define XHCI_BROKEN_PORT_PED (1 << 25) 1829#define XHCI_BROKEN_PORT_PED (1 << 25)
1828#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) 1830#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
1829#define XHCI_U2_DISABLE_WAKE (1 << 27) 1831/* Reserved. It was XHCI_U2_DISABLE_WAKE */
1830#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) 1832#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
1831 1833
1832 unsigned int num_active_eps; 1834 unsigned int num_active_eps;
@@ -2540,8 +2542,8 @@ static inline const char *xhci_decode_ep_context(u32 info, u32 info2, u64 deq,
2540 u8 lsa; 2542 u8 lsa;
2541 u8 hid; 2543 u8 hid;
2542 2544
2543 esit = EP_MAX_ESIT_PAYLOAD_HI(info) << 16 | 2545 esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
2544 EP_MAX_ESIT_PAYLOAD_LO(tx_info); 2546 CTX_TO_MAX_ESIT_PAYLOAD(tx_info);
2545 2547
2546 ep_state = info & EP_STATE_MASK; 2548 ep_state = info & EP_STATE_MASK;
2547 max_pstr = info & EP_MAXPSTREAMS_MASK; 2549 max_pstr = info & EP_MAXPSTREAMS_MASK;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index d1af831f43eb..68f26904c316 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -282,11 +282,26 @@ static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
282 struct usbhs_fifo *fifo) 282 struct usbhs_fifo *fifo)
283{ 283{
284 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); 284 struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
285 int ret = 0;
285 286
286 if (!usbhs_pipe_is_dcp(pipe)) 287 if (!usbhs_pipe_is_dcp(pipe)) {
287 usbhsf_fifo_barrier(priv, fifo); 288 /*
289 * This driver checks the pipe condition first to avoid -EBUSY
290 * from usbhsf_fifo_barrier() with about 10 msec delay in
291 * the interrupt handler if the pipe is RX direction and empty.
292 */
293 if (usbhs_pipe_is_dir_in(pipe))
294 ret = usbhs_pipe_is_accessible(pipe);
295 if (!ret)
296 ret = usbhsf_fifo_barrier(priv, fifo);
297 }
288 298
289 usbhs_write(priv, fifo->ctr, BCLR); 299 /*
300 * if non-DCP pipe, this driver should set BCLR when
301 * usbhsf_fifo_barrier() returns 0.
302 */
303 if (!ret)
304 usbhs_write(priv, fifo->ctr, BCLR);
290} 305}
291 306
292static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv, 307static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 1a59f335b063..a3ccb899df60 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -834,13 +834,25 @@ Retry_Sense:
834 if (result == USB_STOR_TRANSPORT_GOOD) { 834 if (result == USB_STOR_TRANSPORT_GOOD) {
835 srb->result = SAM_STAT_GOOD; 835 srb->result = SAM_STAT_GOOD;
836 srb->sense_buffer[0] = 0x0; 836 srb->sense_buffer[0] = 0x0;
837 }
838
839 /*
840 * ATA-passthru commands use sense data to report
841 * the command completion status, and often devices
842 * return Check Condition status when nothing is
843 * wrong.
844 */
845 else if (srb->cmnd[0] == ATA_16 ||
846 srb->cmnd[0] == ATA_12) {
847 /* leave the data alone */
848 }
837 849
838 /* 850 /*
839 * If there was a problem, report an unspecified 851 * If there was a problem, report an unspecified
840 * hardware error to prevent the higher layers from 852 * hardware error to prevent the higher layers from
841 * entering an infinite retry loop. 853 * entering an infinite retry loop.
842 */ 854 */
843 } else { 855 else {
844 srb->result = DID_ERROR << 16; 856 srb->result = DID_ERROR << 16;
845 if ((sshdr.response_code & 0x72) == 0x72) 857 if ((sshdr.response_code & 0x72) == 0x72)
846 srb->sense_buffer[1] = HARDWARE_ERROR; 858 srb->sense_buffer[1] = HARDWARE_ERROR;
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index f58caa9e6a27..a155cd02bce2 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -9,7 +9,8 @@ static int uas_is_interface(struct usb_host_interface *intf)
9 intf->desc.bInterfaceProtocol == USB_PR_UAS); 9 intf->desc.bInterfaceProtocol == USB_PR_UAS);
10} 10}
11 11
12static int uas_find_uas_alt_setting(struct usb_interface *intf) 12static struct usb_host_interface *uas_find_uas_alt_setting(
13 struct usb_interface *intf)
13{ 14{
14 int i; 15 int i;
15 16
@@ -17,10 +18,10 @@ static int uas_find_uas_alt_setting(struct usb_interface *intf)
17 struct usb_host_interface *alt = &intf->altsetting[i]; 18 struct usb_host_interface *alt = &intf->altsetting[i];
18 19
19 if (uas_is_interface(alt)) 20 if (uas_is_interface(alt))
20 return alt->desc.bAlternateSetting; 21 return alt;
21 } 22 }
22 23
23 return -ENODEV; 24 return NULL;
24} 25}
25 26
26static int uas_find_endpoints(struct usb_host_interface *alt, 27static int uas_find_endpoints(struct usb_host_interface *alt,
@@ -58,14 +59,14 @@ static int uas_use_uas_driver(struct usb_interface *intf,
58 struct usb_device *udev = interface_to_usbdev(intf); 59 struct usb_device *udev = interface_to_usbdev(intf);
59 struct usb_hcd *hcd = bus_to_hcd(udev->bus); 60 struct usb_hcd *hcd = bus_to_hcd(udev->bus);
60 unsigned long flags = id->driver_info; 61 unsigned long flags = id->driver_info;
61 int r, alt; 62 struct usb_host_interface *alt;
62 63 int r;
63 64
64 alt = uas_find_uas_alt_setting(intf); 65 alt = uas_find_uas_alt_setting(intf);
65 if (alt < 0) 66 if (!alt)
66 return 0; 67 return 0;
67 68
68 r = uas_find_endpoints(&intf->altsetting[alt], eps); 69 r = uas_find_endpoints(alt, eps);
69 if (r < 0) 70 if (r < 0)
70 return 0; 71 return 0;
71 72
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index cfb1e3bbd434..63cf981ed81c 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -873,14 +873,14 @@ MODULE_DEVICE_TABLE(usb, uas_usb_ids);
873static int uas_switch_interface(struct usb_device *udev, 873static int uas_switch_interface(struct usb_device *udev,
874 struct usb_interface *intf) 874 struct usb_interface *intf)
875{ 875{
876 int alt; 876 struct usb_host_interface *alt;
877 877
878 alt = uas_find_uas_alt_setting(intf); 878 alt = uas_find_uas_alt_setting(intf);
879 if (alt < 0) 879 if (!alt)
880 return alt; 880 return -ENODEV;
881 881
882 return usb_set_interface(udev, 882 return usb_set_interface(udev, alt->desc.bInterfaceNumber,
883 intf->altsetting[0].desc.bInterfaceNumber, alt); 883 alt->desc.bAlternateSetting);
884} 884}
885 885
886static int uas_configure_endpoints(struct uas_dev_info *devinfo) 886static int uas_configure_endpoints(struct uas_dev_info *devinfo)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 5a70c33ef0e0..eb06d88b41d6 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1459,6 +1459,13 @@ UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
1459 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1459 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1460 US_FL_SANE_SENSE ), 1460 US_FL_SANE_SENSE ),
1461 1461
1462/* Reported by Kris Lindgren <kris.lindgren@gmail.com> */
1463UNUSUAL_DEV( 0x0bc2, 0x3332, 0x0000, 0x9999,
1464 "Seagate",
1465 "External",
1466 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1467 US_FL_NO_WP_DETECT ),
1468
1462UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999, 1469UNUSUAL_DEV( 0x0d49, 0x7310, 0x0000, 0x9999,
1463 "Maxtor", 1470 "Maxtor",
1464 "USB to SATA", 1471 "USB to SATA",
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 35a1e777b449..9a53912bdfe9 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -825,6 +825,8 @@ static int hwarc_probe(struct usb_interface *iface,
825 825
826 if (iface->cur_altsetting->desc.bNumEndpoints < 1) 826 if (iface->cur_altsetting->desc.bNumEndpoints < 1)
827 return -ENODEV; 827 return -ENODEV;
828 if (!usb_endpoint_xfer_int(&iface->cur_altsetting->endpoint[0].desc))
829 return -ENODEV;
828 830
829 result = -ENOMEM; 831 result = -ENOMEM;
830 uwb_rc = uwb_rc_alloc(); 832 uwb_rc = uwb_rc_alloc();
diff --git a/drivers/uwb/uwbd.c b/drivers/uwb/uwbd.c
index 01c20a260a8b..39dd4ef53c77 100644
--- a/drivers/uwb/uwbd.c
+++ b/drivers/uwb/uwbd.c
@@ -302,18 +302,22 @@ static int uwbd(void *param)
302/** Start the UWB daemon */ 302/** Start the UWB daemon */
303void uwbd_start(struct uwb_rc *rc) 303void uwbd_start(struct uwb_rc *rc)
304{ 304{
305 rc->uwbd.task = kthread_run(uwbd, rc, "uwbd"); 305 struct task_struct *task = kthread_run(uwbd, rc, "uwbd");
306 if (rc->uwbd.task == NULL) 306 if (IS_ERR(task)) {
307 rc->uwbd.task = NULL;
307 printk(KERN_ERR "UWB: Cannot start management daemon; " 308 printk(KERN_ERR "UWB: Cannot start management daemon; "
308 "UWB won't work\n"); 309 "UWB won't work\n");
309 else 310 } else {
311 rc->uwbd.task = task;
310 rc->uwbd.pid = rc->uwbd.task->pid; 312 rc->uwbd.pid = rc->uwbd.task->pid;
313 }
311} 314}
312 315
313/* Stop the UWB daemon and free any unprocessed events */ 316/* Stop the UWB daemon and free any unprocessed events */
314void uwbd_stop(struct uwb_rc *rc) 317void uwbd_stop(struct uwb_rc *rc)
315{ 318{
316 kthread_stop(rc->uwbd.task); 319 if (rc->uwbd.task)
320 kthread_stop(rc->uwbd.task);
317 uwbd_flush(rc); 321 uwbd_flush(rc);
318} 322}
319 323
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index 5fbfd9cfb6d6..5b3d57fc82d3 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -169,6 +169,9 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
169static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data) 169static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
170{ 170{
171 struct pci_bar_info *bar = data; 171 struct pci_bar_info *bar = data;
172 unsigned int pos = (offset - PCI_BASE_ADDRESS_0) / 4;
173 const struct resource *res = dev->resource;
174 u32 mask;
172 175
173 if (unlikely(!bar)) { 176 if (unlikely(!bar)) {
174 pr_warn(DRV_NAME ": driver data not found for %s\n", 177 pr_warn(DRV_NAME ": driver data not found for %s\n",
@@ -179,7 +182,13 @@ static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
179 /* A write to obtain the length must happen as a 32-bit write. 182 /* A write to obtain the length must happen as a 32-bit write.
180 * This does not (yet) support writing individual bytes 183 * This does not (yet) support writing individual bytes
181 */ 184 */
182 if (value == ~0) 185 if (res[pos].flags & IORESOURCE_IO)
186 mask = ~PCI_BASE_ADDRESS_IO_MASK;
187 else if (pos && (res[pos - 1].flags & IORESOURCE_MEM_64))
188 mask = 0;
189 else
190 mask = ~PCI_BASE_ADDRESS_MEM_MASK;
191 if ((value | mask) == ~0U)
183 bar->which = 1; 192 bar->which = 1;
184 else { 193 else {
185 u32 tmpval; 194 u32 tmpval;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 82a8866758ee..a1c17000129b 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -519,64 +519,6 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
519 return err; 519 return err;
520} 520}
521 521
522static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
523 grant_ref_t *gnt_refs,
524 unsigned int nr_grefs,
525 void **vaddr)
526{
527 struct xenbus_map_node *node;
528 struct vm_struct *area;
529 pte_t *ptes[XENBUS_MAX_RING_GRANTS];
530 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
531 int err = GNTST_okay;
532 int i;
533 bool leaked;
534
535 *vaddr = NULL;
536
537 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
538 return -EINVAL;
539
540 node = kzalloc(sizeof(*node), GFP_KERNEL);
541 if (!node)
542 return -ENOMEM;
543
544 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
545 if (!area) {
546 kfree(node);
547 return -ENOMEM;
548 }
549
550 for (i = 0; i < nr_grefs; i++)
551 phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
552
553 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
554 phys_addrs,
555 GNTMAP_host_map | GNTMAP_contains_pte,
556 &leaked);
557 if (err)
558 goto failed;
559
560 node->nr_handles = nr_grefs;
561 node->pv.area = area;
562
563 spin_lock(&xenbus_valloc_lock);
564 list_add(&node->next, &xenbus_valloc_pages);
565 spin_unlock(&xenbus_valloc_lock);
566
567 *vaddr = area->addr;
568 return 0;
569
570failed:
571 if (!leaked)
572 free_vm_area(area);
573 else
574 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
575
576 kfree(node);
577 return err;
578}
579
580struct map_ring_valloc_hvm 522struct map_ring_valloc_hvm
581{ 523{
582 unsigned int idx; 524 unsigned int idx;
@@ -725,6 +667,65 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
725} 667}
726EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); 668EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
727 669
670#ifdef CONFIG_XEN_PV
671static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
672 grant_ref_t *gnt_refs,
673 unsigned int nr_grefs,
674 void **vaddr)
675{
676 struct xenbus_map_node *node;
677 struct vm_struct *area;
678 pte_t *ptes[XENBUS_MAX_RING_GRANTS];
679 phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
680 int err = GNTST_okay;
681 int i;
682 bool leaked;
683
684 *vaddr = NULL;
685
686 if (nr_grefs > XENBUS_MAX_RING_GRANTS)
687 return -EINVAL;
688
689 node = kzalloc(sizeof(*node), GFP_KERNEL);
690 if (!node)
691 return -ENOMEM;
692
693 area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
694 if (!area) {
695 kfree(node);
696 return -ENOMEM;
697 }
698
699 for (i = 0; i < nr_grefs; i++)
700 phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
701
702 err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
703 phys_addrs,
704 GNTMAP_host_map | GNTMAP_contains_pte,
705 &leaked);
706 if (err)
707 goto failed;
708
709 node->nr_handles = nr_grefs;
710 node->pv.area = area;
711
712 spin_lock(&xenbus_valloc_lock);
713 list_add(&node->next, &xenbus_valloc_pages);
714 spin_unlock(&xenbus_valloc_lock);
715
716 *vaddr = area->addr;
717 return 0;
718
719failed:
720 if (!leaked)
721 free_vm_area(area);
722 else
723 pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
724
725 kfree(node);
726 return err;
727}
728
728static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr) 729static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
729{ 730{
730 struct xenbus_map_node *node; 731 struct xenbus_map_node *node;
@@ -788,6 +789,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
788 return err; 789 return err;
789} 790}
790 791
792static const struct xenbus_ring_ops ring_ops_pv = {
793 .map = xenbus_map_ring_valloc_pv,
794 .unmap = xenbus_unmap_ring_vfree_pv,
795};
796#endif
797
791struct unmap_ring_vfree_hvm 798struct unmap_ring_vfree_hvm
792{ 799{
793 unsigned int idx; 800 unsigned int idx;
@@ -916,11 +923,6 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
916} 923}
917EXPORT_SYMBOL_GPL(xenbus_read_driver_state); 924EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
918 925
919static const struct xenbus_ring_ops ring_ops_pv = {
920 .map = xenbus_map_ring_valloc_pv,
921 .unmap = xenbus_unmap_ring_vfree_pv,
922};
923
924static const struct xenbus_ring_ops ring_ops_hvm = { 926static const struct xenbus_ring_ops ring_ops_hvm = {
925 .map = xenbus_map_ring_valloc_hvm, 927 .map = xenbus_map_ring_valloc_hvm,
926 .unmap = xenbus_unmap_ring_vfree_hvm, 928 .unmap = xenbus_unmap_ring_vfree_hvm,
@@ -928,8 +930,10 @@ static const struct xenbus_ring_ops ring_ops_hvm = {
928 930
929void __init xenbus_ring_ops_init(void) 931void __init xenbus_ring_ops_init(void)
930{ 932{
933#ifdef CONFIG_XEN_PV
931 if (!xen_feature(XENFEAT_auto_translated_physmap)) 934 if (!xen_feature(XENFEAT_auto_translated_physmap))
932 ring_ops = &ring_ops_pv; 935 ring_ops = &ring_ops_pv;
933 else 936 else
937#endif
934 ring_ops = &ring_ops_hvm; 938 ring_ops = &ring_ops_hvm;
935} 939}