aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-03-13 19:42:34 -0400
committerDave Airlie <airlied@redhat.com>2016-03-13 19:46:02 -0400
commit9b61c0fcdf0cfd20a85d9856d46142e7f297de0a (patch)
treed4abe6aa3f4e1e088f9da1d0597e078b1fe58912 /drivers
parent550e3b23a53c88adfa46e64f9d442743e65d47da (diff)
parent125234dc8b1cc862f52d8bd5b37c36cc59b2cb86 (diff)
Merge drm-fixes into drm-next.
Nouveau wanted this to avoid some worse conflicts when I merge that.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/psargs.c9
-rw-r--r--drivers/acpi/nfit.c105
-rw-r--r--drivers/acpi/pci_irq.c17
-rw-r--r--drivers/acpi/pci_link.c128
-rw-r--r--drivers/android/binder.c2
-rw-r--r--drivers/ata/ahci.c69
-rw-r--r--drivers/ata/ahci.h6
-rw-r--r--drivers/ata/ahci_brcmstb.c1
-rw-r--r--drivers/ata/ahci_xgene.c85
-rw-r--r--drivers/ata/libahci.c90
-rw-r--r--drivers/ata/libata-core.c1
-rw-r--r--drivers/ata/libata-scsi.c11
-rw-r--r--drivers/ata/libata-sff.c35
-rw-r--r--drivers/ata/pata_rb532_cf.c11
-rw-r--r--drivers/base/component.c49
-rw-r--r--drivers/base/property.c8
-rw-r--r--drivers/base/regmap/regmap-mmio.c16
-rw-r--r--drivers/block/floppy.c67
-rw-r--r--drivers/block/null_blk.c8
-rw-r--r--drivers/block/xen-blkfront.c74
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/random.c22
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-gpio.c2
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/mvebu/dove-divider.c2
-rw-r--r--drivers/clk/qcom/gcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/gcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8660.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8916.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8960.c2
-rw-r--r--drivers/clk/qcom/gcc-msm8974.c1
-rw-r--r--drivers/clk/qcom/lcc-ipq806x.c1
-rw-r--r--drivers/clk/qcom/lcc-msm8960.c1
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2
-rw-r--r--drivers/clk/qcom/mmcc-msm8974.c1
-rw-r--r--drivers/clk/rockchip/clk-rk3036.c26
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c26
-rw-r--r--drivers/clk/tegra/clk-emc.c6
-rw-r--r--drivers/clk/tegra/clk-id.h1
-rw-r--r--drivers/clk/tegra/clk-pll.c50
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c5
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c6
-rw-r--r--drivers/clk/tegra/clk-tegra210.c132
-rw-r--r--drivers/clk/ti/dpll3xxx.c3
-rw-r--r--drivers/clk/versatile/clk-icst.c3
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/Kconfig.arm4
-rw-r--r--drivers/cpufreq/mt8173-cpufreq.c1
-rw-r--r--drivers/crypto/atmel-sha.c23
-rw-r--r--drivers/crypto/marvell/cesa.c2
-rw-r--r--drivers/devfreq/tegra-devfreq.c2
-rw-r--r--drivers/dma/at_xdmac.c42
-rw-r--r--drivers/dma/dw/core.c15
-rw-r--r--drivers/dma/dw/pci.c4
-rw-r--r--drivers/dma/edma.c41
-rw-r--r--drivers/dma/fsldma.c2
-rw-r--r--drivers/dma/ioat/dma.c34
-rw-r--r--drivers/dma/pxa_dma.c8
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/firmware/efi/efivars.c35
-rw-r--r--drivers/firmware/efi/vars.c144
-rw-r--r--drivers/gpio/gpio-altera.c5
-rw-r--r--drivers/gpio/gpio-davinci.c7
-rw-r--r--drivers/gpio/gpio-rcar.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c157
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h32
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c1
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c19
-rw-r--r--drivers/gpu/drm/ast/ast_main.c2
-rw-r--r--drivers/gpu/drm/drm_atomic.c44
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c49
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c37
-rw-r--r--drivers/gpu/drm/drm_irq.c73
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c10
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c3
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c6
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c6
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h6
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_prime.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c16
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h13
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c65
-rw-r--r--drivers/gpu/drm/vc4/vc4_irq.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_v3d.c48
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/host1x/bus.c2
-rw-r--r--drivers/gpu/host1x/dev.c7
-rw-r--r--drivers/gpu/host1x/dev.h1
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c31
-rw-r--r--drivers/hwmon/ads1015.c2
-rw-r--r--drivers/hwmon/gpio-fan.c7
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c3
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c4
-rw-r--r--drivers/i2c/busses/i2c-uniphier-f.c2
-rw-r--r--drivers/i2c/busses/i2c-uniphier.c2
-rw-r--r--drivers/infiniband/core/device.c1
-rw-r--r--drivers/infiniband/core/sa_query.c2
-rw-r--r--drivers/infiniband/core/sysfs.c7
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c9
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c63
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c12
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c41
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c16
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c25
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c24
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/keyboard/adp5589-keys.c7
-rw-r--r--drivers/input/keyboard/cap11xx.c8
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/misc/sirfsoc-onkey.c2
-rw-r--r--drivers/input/mouse/vmmouse.c13
-rw-r--r--drivers/input/serio/serio.c2
-rw-r--r--drivers/input/touchscreen/colibri-vf50-ts.c1
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c18
-rw-r--r--drivers/iommu/amd_iommu.c4
-rw-r--r--drivers/iommu/amd_iommu_init.c63
-rw-r--r--drivers/iommu/dmar.c7
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/intel-svm.c37
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c35
-rw-r--r--drivers/irqchip/irq-gic.c13
-rw-r--r--drivers/irqchip/irq-sun4i.c1
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c9
-rw-r--r--drivers/isdn/hardware/mISDN/netjet.c2
-rw-r--r--drivers/lightnvm/core.c25
-rw-r--r--drivers/lightnvm/rrpc.c4
-rw-r--r--drivers/lightnvm/rrpc.h5
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/i2c/adv7604.c3
-rw-r--r--drivers/media/media-device.c23
-rw-r--r--drivers/mfd/db8500-prcmu.c3
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/mei/main.c6
-rw-r--r--drivers/mmc/card/block.c7
-rw-r--r--drivers/mmc/host/mmc_spi.c15
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/pxamci.c37
-rw-r--r--drivers/mmc/host/sdhci-acpi.c30
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c1
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c31
-rw-r--r--drivers/mmc/host/sdhci.c5
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/mtd/ubi/upd.c2
-rw-r--r--drivers/net/bonding/bond_main.c40
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c14
-rw-r--r--drivers/net/can/usb/gs_usb.c24
-rw-r--r--drivers/net/dsa/mv88e6352.c1
-rw-r--r--drivers/net/dsa/mv88e6xxx.c27
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c1
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c1
-rw-r--r--drivers/net/ethernet/amd/am79c961a.c64
-rw-r--r--drivers/net/ethernet/amd/lance.c4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c74
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c299
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c72
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c46
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c3
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c25
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h9
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c6
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c18
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c8
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c34
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c19
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c34
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h9
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c20
-rw-r--r--drivers/net/ethernet/ethoc.c1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c37
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c15
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c5
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c62
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h4
-rw-r--r--drivers/net/ethernet/jme.c26
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c184
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c69
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c32
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c20
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c10
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c9
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c45
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c12
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c105
-rw-r--r--drivers/net/geneve.c52
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/phy/bcm7xxx.c43
-rw-r--r--drivers/net/phy/marvell.c15
-rw-r--r--drivers/net/phy/micrel.c28
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c11
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/ax88172a.c1
-rw-r--r--drivers/net/usb/cdc_ncm.c26
-rw-r--r--drivers/net/usb/qmi_wwan.c8
-rw-r--r--drivers/net/usb/usbnet.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c73
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c13
-rw-r--r--drivers/net/vxlan.c64
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c42
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c188
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rc.c5
-rw-r--r--drivers/net/wireless/ti/wlcore/io.c8
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h4
-rw-r--r--drivers/nvdimm/bus.c20
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/nvme/host/Kconfig5
-rw-r--r--drivers/nvme/host/core.c112
-rw-r--r--drivers/nvme/host/lightnvm.c12
-rw-r--r--drivers/nvme/host/nvme.h12
-rw-r--r--drivers/nvme/host/pci.c160
-rw-r--r--drivers/nvmem/core.c6
-rw-r--r--drivers/nvmem/qfprom.c1
-rw-r--r--drivers/of/irq.c9
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/pci/host/Kconfig1
-rw-r--r--drivers/pci/host/pci-keystone-dw.c11
-rw-r--r--drivers/pci/host/pci-layerscape.c21
-rw-r--r--drivers/pci/host/pcie-iproc.c29
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c4
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c2
-rw-r--r--drivers/pci/xen-pcifront.c10
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/phy-core.c16
-rw-r--r--drivers/phy/phy-twl4030-usb.c14
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c9
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c5
-rw-r--r--drivers/pinctrl/pxa/pinctrl-pxa2xx.c1
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c48
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c1
-rw-r--r--drivers/platform/x86/intel-hid.c3
-rw-r--r--drivers/platform/x86/intel_scu_ipcutil.c2
-rw-r--r--drivers/power/bq27xxx_battery_i2c.c37
-rw-r--r--drivers/s390/block/dasd.c1
-rw-r--r--drivers/s390/block/dasd_alias.c23
-rw-r--r--drivers/s390/block/dasd_diag.c9
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c4
-rw-r--r--drivers/scsi/hisi_sas/Kconfig2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c9
-rw-r--r--drivers/scsi/ipr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c68
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h59
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c16
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c76
-rw-r--r--drivers/scsi/scsi_devinfo.c2
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/sd.c4
-rw-r--r--drivers/scsi/storvsc_drv.c16
-rw-r--r--drivers/sh/pm_runtime.c2
-rw-r--r--drivers/spi/spi-atmel.c1
-rw-r--r--drivers/spi/spi-bcm2835aux.c4
-rw-r--r--drivers/spi/spi-fsl-espi.c4
-rw-r--r--drivers/spi/spi-imx.c25
-rw-r--r--drivers/spi/spi-loopback-test.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/spi/spi-rockchip.c3
-rw-r--r--drivers/ssb/Kconfig1
-rw-r--r--drivers/target/target_core_configfs.c2
-rw-r--r--drivers/target/target_core_device.c44
-rw-r--r--drivers/target/target_core_file.c29
-rw-r--r--drivers/target/target_core_iblock.c58
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_tmr.c139
-rw-r--r--drivers/target/target_core_transport.c327
-rw-r--r--drivers/target/target_core_user.c2
-rw-r--r--drivers/thermal/Kconfig6
-rw-r--r--drivers/thermal/cpu_cooling.c14
-rw-r--r--drivers/thermal/of-thermal.c18
-rw-r--r--drivers/thermal/rcar_thermal.c45
-rw-r--r--drivers/thermal/spear_thermal.c6
-rw-r--r--drivers/tty/pty.c21
-rw-r--r--drivers/tty/serial/8250/8250_pci.c21
-rw-r--r--drivers/tty/serial/omap-serial.c10
-rw-r--r--drivers/tty/tty_io.c3
-rw-r--r--drivers/tty/tty_mutex.c7
-rw-r--r--drivers/usb/chipidea/ci_hdrc_pci.c4
-rw-r--r--drivers/usb/chipidea/debug.c3
-rw-r--r--drivers/usb/chipidea/otg.c2
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/dwc2/Kconfig1
-rw-r--r--drivers/usb/dwc2/core.c6
-rw-r--r--drivers/usb/dwc2/hcd_ddma.c23
-rw-r--r--drivers/usb/dwc2/hcd_intr.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/ep0.c5
-rw-r--r--drivers/usb/dwc3/gadget.c70
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/udc/net2280.h15
-rw-r--r--drivers/usb/gadget/udc/udc-core.c3
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/phy/phy-msm-usb.c20
-rw-r--r--drivers/usb/serial/Kconfig16
-rw-r--r--drivers/usb/serial/Makefile1
-rw-r--r--drivers/usb/serial/cp210x.c3
-rw-r--r--drivers/usb/serial/mxu11x0.c1006
-rw-r--r--drivers/usb/serial/option.c14
-rw-r--r--drivers/usb/serial/qcserial.c7
-rw-r--r--drivers/vfio/pci/vfio_pci.c9
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c9
-rw-r--r--drivers/vfio/vfio_iommu_type1.c6
-rw-r--r--drivers/vhost/vhost.c15
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--drivers/video/fbdev/da8xx-fb.c6
-rw-r--r--drivers/video/fbdev/exynos/s6e8ax0.c13
-rw-r--r--drivers/video/fbdev/imxfb.c15
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.c3
-rw-r--r--drivers/video/fbdev/ocfb.c4
-rw-r--r--drivers/virtio/virtio_pci_modern.c2
-rw-r--r--drivers/watchdog/Kconfig11
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/sun4v_wdt.c191
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c9
-rw-r--r--drivers/xen/xen-scsiback.c80
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c2
451 files changed, 5657 insertions, 3968 deletions
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 305218539df2..d48cbed342c1 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -269,8 +269,7 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
269 */ 269 */
270 if (ACPI_SUCCESS(status) && 270 if (ACPI_SUCCESS(status) &&
271 possible_method_call && (node->type == ACPI_TYPE_METHOD)) { 271 possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
272 if (GET_CURRENT_ARG_TYPE(walk_state->arg_types) == 272 if (walk_state->opcode == AML_UNLOAD_OP) {
273 ARGP_SUPERNAME) {
274 /* 273 /*
275 * acpi_ps_get_next_namestring has increased the AML pointer, 274 * acpi_ps_get_next_namestring has increased the AML pointer,
276 * so we need to restore the saved AML pointer for method call. 275 * so we need to restore the saved AML pointer for method call.
@@ -697,7 +696,7 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
697 * 696 *
698 * PARAMETERS: walk_state - Current state 697 * PARAMETERS: walk_state - Current state
699 * parser_state - Current parser state object 698 * parser_state - Current parser state object
700 * arg_type - The parser argument type (ARGP_*) 699 * arg_type - The argument type (AML_*_ARG)
701 * return_arg - Where the next arg is returned 700 * return_arg - Where the next arg is returned
702 * 701 *
703 * RETURN: Status, and an op object containing the next argument. 702 * RETURN: Status, and an op object containing the next argument.
@@ -817,9 +816,9 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
817 return_ACPI_STATUS(AE_NO_MEMORY); 816 return_ACPI_STATUS(AE_NO_MEMORY);
818 } 817 }
819 818
820 /* super_name allows argument to be a method call */ 819 /* To support super_name arg of Unload */
821 820
822 if (arg_type == ARGP_SUPERNAME) { 821 if (walk_state->opcode == AML_UNLOAD_OP) {
823 status = 822 status =
824 acpi_ps_get_next_namepath(walk_state, 823 acpi_ps_get_next_namepath(walk_state,
825 parser_state, arg, 824 parser_state, arg,
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index ad6d8c6b777e..35947ac87644 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -469,37 +469,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
469 nfit_mem->bdw = NULL; 469 nfit_mem->bdw = NULL;
470} 470}
471 471
472static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc, 472static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
473 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa) 473 struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
474{ 474{
475 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index; 475 u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
476 struct nfit_memdev *nfit_memdev; 476 struct nfit_memdev *nfit_memdev;
477 struct nfit_flush *nfit_flush; 477 struct nfit_flush *nfit_flush;
478 struct nfit_dcr *nfit_dcr;
479 struct nfit_bdw *nfit_bdw; 478 struct nfit_bdw *nfit_bdw;
480 struct nfit_idt *nfit_idt; 479 struct nfit_idt *nfit_idt;
481 u16 idt_idx, range_index; 480 u16 idt_idx, range_index;
482 481
483 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
484 if (nfit_dcr->dcr->region_index != dcr)
485 continue;
486 nfit_mem->dcr = nfit_dcr->dcr;
487 break;
488 }
489
490 if (!nfit_mem->dcr) {
491 dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
492 spa->range_index, __to_nfit_memdev(nfit_mem)
493 ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
494 return -ENODEV;
495 }
496
497 /*
498 * We've found enough to create an nvdimm, optionally
499 * find an associated BDW
500 */
501 list_add(&nfit_mem->list, &acpi_desc->dimms);
502
503 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) { 482 list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
504 if (nfit_bdw->bdw->region_index != dcr) 483 if (nfit_bdw->bdw->region_index != dcr)
505 continue; 484 continue;
@@ -508,12 +487,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
508 } 487 }
509 488
510 if (!nfit_mem->bdw) 489 if (!nfit_mem->bdw)
511 return 0; 490 return;
512 491
513 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem); 492 nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
514 493
515 if (!nfit_mem->spa_bdw) 494 if (!nfit_mem->spa_bdw)
516 return 0; 495 return;
517 496
518 range_index = nfit_mem->spa_bdw->range_index; 497 range_index = nfit_mem->spa_bdw->range_index;
519 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 498 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -538,8 +517,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
538 } 517 }
539 break; 518 break;
540 } 519 }
541
542 return 0;
543} 520}
544 521
545static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc, 522static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@@ -548,7 +525,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
548 struct nfit_mem *nfit_mem, *found; 525 struct nfit_mem *nfit_mem, *found;
549 struct nfit_memdev *nfit_memdev; 526 struct nfit_memdev *nfit_memdev;
550 int type = nfit_spa_type(spa); 527 int type = nfit_spa_type(spa);
551 u16 dcr;
552 528
553 switch (type) { 529 switch (type) {
554 case NFIT_SPA_DCR: 530 case NFIT_SPA_DCR:
@@ -559,14 +535,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
559 } 535 }
560 536
561 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { 537 list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
562 int rc; 538 struct nfit_dcr *nfit_dcr;
539 u32 device_handle;
540 u16 dcr;
563 541
564 if (nfit_memdev->memdev->range_index != spa->range_index) 542 if (nfit_memdev->memdev->range_index != spa->range_index)
565 continue; 543 continue;
566 found = NULL; 544 found = NULL;
567 dcr = nfit_memdev->memdev->region_index; 545 dcr = nfit_memdev->memdev->region_index;
546 device_handle = nfit_memdev->memdev->device_handle;
568 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) 547 list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
569 if (__to_nfit_memdev(nfit_mem)->region_index == dcr) { 548 if (__to_nfit_memdev(nfit_mem)->device_handle
549 == device_handle) {
570 found = nfit_mem; 550 found = nfit_mem;
571 break; 551 break;
572 } 552 }
@@ -579,6 +559,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
579 if (!nfit_mem) 559 if (!nfit_mem)
580 return -ENOMEM; 560 return -ENOMEM;
581 INIT_LIST_HEAD(&nfit_mem->list); 561 INIT_LIST_HEAD(&nfit_mem->list);
562 list_add(&nfit_mem->list, &acpi_desc->dimms);
563 }
564
565 list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
566 if (nfit_dcr->dcr->region_index != dcr)
567 continue;
568 /*
569 * Record the control region for the dimm. For
570 * the ACPI 6.1 case, where there are separate
571 * control regions for the pmem vs blk
572 * interfaces, be sure to record the extended
573 * blk details.
574 */
575 if (!nfit_mem->dcr)
576 nfit_mem->dcr = nfit_dcr->dcr;
577 else if (nfit_mem->dcr->windows == 0
578 && nfit_dcr->dcr->windows)
579 nfit_mem->dcr = nfit_dcr->dcr;
580 break;
581 }
582
583 if (dcr && !nfit_mem->dcr) {
584 dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
585 spa->range_index, dcr);
586 return -ENODEV;
582 } 587 }
583 588
584 if (type == NFIT_SPA_DCR) { 589 if (type == NFIT_SPA_DCR) {
@@ -595,6 +600,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
595 nfit_mem->idt_dcr = nfit_idt->idt; 600 nfit_mem->idt_dcr = nfit_idt->idt;
596 break; 601 break;
597 } 602 }
603 nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
598 } else { 604 } else {
599 /* 605 /*
600 * A single dimm may belong to multiple SPA-PM 606 * A single dimm may belong to multiple SPA-PM
@@ -603,13 +609,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
603 */ 609 */
604 nfit_mem->memdev_pmem = nfit_memdev->memdev; 610 nfit_mem->memdev_pmem = nfit_memdev->memdev;
605 } 611 }
606
607 if (found)
608 continue;
609
610 rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
611 if (rc)
612 return rc;
613 } 612 }
614 613
615 return 0; 614 return 0;
@@ -1504,9 +1503,7 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
1504 case 1: 1503 case 1:
1505 /* ARS unsupported, but we should never get here */ 1504 /* ARS unsupported, but we should never get here */
1506 return 0; 1505 return 0;
1507 case 2: 1506 case 6:
1508 return -EINVAL;
1509 case 3:
1510 /* ARS is in progress */ 1507 /* ARS is in progress */
1511 msleep(1000); 1508 msleep(1000);
1512 break; 1509 break;
@@ -1517,13 +1514,13 @@ static int ars_do_start(struct nvdimm_bus_descriptor *nd_desc,
1517} 1514}
1518 1515
1519static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc, 1516static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
1520 struct nd_cmd_ars_status *cmd) 1517 struct nd_cmd_ars_status *cmd, u32 size)
1521{ 1518{
1522 int rc; 1519 int rc;
1523 1520
1524 while (1) { 1521 while (1) {
1525 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd, 1522 rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
1526 sizeof(*cmd)); 1523 size);
1527 if (rc || cmd->status & 0xffff) 1524 if (rc || cmd->status & 0xffff)
1528 return -ENXIO; 1525 return -ENXIO;
1529 1526
@@ -1538,6 +1535,8 @@ static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
1538 case 2: 1535 case 2:
1539 /* No ARS performed for the current boot */ 1536 /* No ARS performed for the current boot */
1540 return 0; 1537 return 0;
1538 case 3:
1539 /* TODO: error list overflow support */
1541 default: 1540 default:
1542 return -ENXIO; 1541 return -ENXIO;
1543 } 1542 }
@@ -1581,6 +1580,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1581 struct nd_cmd_ars_start *ars_start = NULL; 1580 struct nd_cmd_ars_start *ars_start = NULL;
1582 struct nd_cmd_ars_cap *ars_cap = NULL; 1581 struct nd_cmd_ars_cap *ars_cap = NULL;
1583 u64 start, len, cur, remaining; 1582 u64 start, len, cur, remaining;
1583 u32 ars_status_size;
1584 int rc; 1584 int rc;
1585 1585
1586 ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL); 1586 ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
@@ -1590,14 +1590,21 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1590 start = ndr_desc->res->start; 1590 start = ndr_desc->res->start;
1591 len = ndr_desc->res->end - ndr_desc->res->start + 1; 1591 len = ndr_desc->res->end - ndr_desc->res->start + 1;
1592 1592
1593 /*
1594 * If ARS is unimplemented, unsupported, or if the 'Persistent Memory
1595 * Scrub' flag in extended status is not set, skip this but continue
1596 * initialization
1597 */
1593 rc = ars_get_cap(nd_desc, ars_cap, start, len); 1598 rc = ars_get_cap(nd_desc, ars_cap, start, len);
1599 if (rc == -ENOTTY) {
1600 dev_dbg(acpi_desc->dev,
1601 "Address Range Scrub is not implemented, won't create an error list\n");
1602 rc = 0;
1603 goto out;
1604 }
1594 if (rc) 1605 if (rc)
1595 goto out; 1606 goto out;
1596 1607
1597 /*
1598 * If ARS is unsupported, or if the 'Persistent Memory Scrub' flag in
1599 * extended status is not set, skip this but continue initialization
1600 */
1601 if ((ars_cap->status & 0xffff) || 1608 if ((ars_cap->status & 0xffff) ||
1602 !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) { 1609 !(ars_cap->status >> 16 & ND_ARS_PERSISTENT)) {
1603 dev_warn(acpi_desc->dev, 1610 dev_warn(acpi_desc->dev,
@@ -1610,14 +1617,14 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1610 * Check if a full-range ARS has been run. If so, use those results 1617 * Check if a full-range ARS has been run. If so, use those results
1611 * without having to start a new ARS. 1618 * without having to start a new ARS.
1612 */ 1619 */
1613 ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status), 1620 ars_status_size = ars_cap->max_ars_out;
1614 GFP_KERNEL); 1621 ars_status = kzalloc(ars_status_size, GFP_KERNEL);
1615 if (!ars_status) { 1622 if (!ars_status) {
1616 rc = -ENOMEM; 1623 rc = -ENOMEM;
1617 goto out; 1624 goto out;
1618 } 1625 }
1619 1626
1620 rc = ars_get_status(nd_desc, ars_status); 1627 rc = ars_get_status(nd_desc, ars_status, ars_status_size);
1621 if (rc) 1628 if (rc)
1622 goto out; 1629 goto out;
1623 1630
@@ -1647,7 +1654,7 @@ static int acpi_nfit_find_poison(struct acpi_nfit_desc *acpi_desc,
1647 if (rc) 1654 if (rc)
1648 goto out; 1655 goto out;
1649 1656
1650 rc = ars_get_status(nd_desc, ars_status); 1657 rc = ars_get_status(nd_desc, ars_status, ars_status_size);
1651 if (rc) 1658 if (rc)
1652 goto out; 1659 goto out;
1653 1660
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index d30184c7f3bc..c8e169e46673 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -406,7 +406,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
406 return 0; 406 return 0;
407 } 407 }
408 408
409 if (pci_has_managed_irq(dev)) 409 if (dev->irq_managed && dev->irq > 0)
410 return 0; 410 return 0;
411 411
412 entry = acpi_pci_irq_lookup(dev, pin); 412 entry = acpi_pci_irq_lookup(dev, pin);
@@ -451,7 +451,8 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
451 kfree(entry); 451 kfree(entry);
452 return rc; 452 return rc;
453 } 453 }
454 pci_set_managed_irq(dev, rc); 454 dev->irq = rc;
455 dev->irq_managed = 1;
455 456
456 if (link) 457 if (link)
457 snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); 458 snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -474,9 +475,17 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
474 u8 pin; 475 u8 pin;
475 476
476 pin = dev->pin; 477 pin = dev->pin;
477 if (!pin || !pci_has_managed_irq(dev)) 478 if (!pin || !dev->irq_managed || dev->irq <= 0)
478 return; 479 return;
479 480
481 /* Keep IOAPIC pin configuration when suspending */
482 if (dev->dev.power.is_prepared)
483 return;
484#ifdef CONFIG_PM
485 if (dev->dev.power.runtime_status == RPM_SUSPENDING)
486 return;
487#endif
488
480 entry = acpi_pci_irq_lookup(dev, pin); 489 entry = acpi_pci_irq_lookup(dev, pin);
481 if (!entry) 490 if (!entry)
482 return; 491 return;
@@ -496,6 +505,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
496 dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); 505 dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
497 if (gsi >= 0) { 506 if (gsi >= 0) {
498 acpi_unregister_gsi(gsi); 507 acpi_unregister_gsi(gsi);
499 pci_reset_managed_irq(dev); 508 dev->irq_managed = 0;
500 } 509 }
501} 510}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index fa2863567eed..ededa909df2f 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -4,7 +4,6 @@
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de> 6 * Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
7 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
8 * 7 *
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
10 * 9 *
@@ -438,6 +437,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
438 * enabled system. 437 * enabled system.
439 */ 438 */
440 439
440#define ACPI_MAX_IRQS 256
441#define ACPI_MAX_ISA_IRQ 16 441#define ACPI_MAX_ISA_IRQ 16
442 442
443#define PIRQ_PENALTY_PCI_AVAILABLE (0) 443#define PIRQ_PENALTY_PCI_AVAILABLE (0)
@@ -447,7 +447,7 @@ static int acpi_pci_link_set(struct acpi_pci_link *link, int irq)
447#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16) 447#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
448#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16) 448#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
449 449
450static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = { 450static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
451 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */ 451 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
452 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */ 452 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
453 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */ 453 PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
@@ -464,68 +464,9 @@ static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
464 PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */ 464 PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
465 PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */ 465 PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
466 PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */ 466 PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */
467 /* >IRQ15 */
467}; 468};
468 469
469struct irq_penalty_info {
470 int irq;
471 int penalty;
472 struct list_head node;
473};
474
475static LIST_HEAD(acpi_irq_penalty_list);
476
477static int acpi_irq_get_penalty(int irq)
478{
479 struct irq_penalty_info *irq_info;
480
481 if (irq < ACPI_MAX_ISA_IRQ)
482 return acpi_irq_isa_penalty[irq];
483
484 list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
485 if (irq_info->irq == irq)
486 return irq_info->penalty;
487 }
488
489 return 0;
490}
491
492static int acpi_irq_set_penalty(int irq, int new_penalty)
493{
494 struct irq_penalty_info *irq_info;
495
496 /* see if this is a ISA IRQ */
497 if (irq < ACPI_MAX_ISA_IRQ) {
498 acpi_irq_isa_penalty[irq] = new_penalty;
499 return 0;
500 }
501
502 /* next, try to locate from the dynamic list */
503 list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
504 if (irq_info->irq == irq) {
505 irq_info->penalty = new_penalty;
506 return 0;
507 }
508 }
509
510 /* nope, let's allocate a slot for this IRQ */
511 irq_info = kzalloc(sizeof(*irq_info), GFP_KERNEL);
512 if (!irq_info)
513 return -ENOMEM;
514
515 irq_info->irq = irq;
516 irq_info->penalty = new_penalty;
517 list_add_tail(&irq_info->node, &acpi_irq_penalty_list);
518
519 return 0;
520}
521
522static void acpi_irq_add_penalty(int irq, int penalty)
523{
524 int curpen = acpi_irq_get_penalty(irq);
525
526 acpi_irq_set_penalty(irq, curpen + penalty);
527}
528
529int __init acpi_irq_penalty_init(void) 470int __init acpi_irq_penalty_init(void)
530{ 471{
531 struct acpi_pci_link *link; 472 struct acpi_pci_link *link;
@@ -546,16 +487,15 @@ int __init acpi_irq_penalty_init(void)
546 link->irq.possible_count; 487 link->irq.possible_count;
547 488
548 for (i = 0; i < link->irq.possible_count; i++) { 489 for (i = 0; i < link->irq.possible_count; i++) {
549 if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) { 490 if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
550 int irqpos = link->irq.possible[i]; 491 acpi_irq_penalty[link->irq.
551 492 possible[i]] +=
552 acpi_irq_add_penalty(irqpos, penalty); 493 penalty;
553 }
554 } 494 }
555 495
556 } else if (link->irq.active) { 496 } else if (link->irq.active) {
557 acpi_irq_add_penalty(link->irq.active, 497 acpi_irq_penalty[link->irq.active] +=
558 PIRQ_PENALTY_PCI_POSSIBLE); 498 PIRQ_PENALTY_PCI_POSSIBLE;
559 } 499 }
560 } 500 }
561 501
@@ -607,12 +547,12 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
607 * the use of IRQs 9, 10, 11, and >15. 547 * the use of IRQs 9, 10, 11, and >15.
608 */ 548 */
609 for (i = (link->irq.possible_count - 1); i >= 0; i--) { 549 for (i = (link->irq.possible_count - 1); i >= 0; i--) {
610 if (acpi_irq_get_penalty(irq) > 550 if (acpi_irq_penalty[irq] >
611 acpi_irq_get_penalty(link->irq.possible[i])) 551 acpi_irq_penalty[link->irq.possible[i]])
612 irq = link->irq.possible[i]; 552 irq = link->irq.possible[i];
613 } 553 }
614 } 554 }
615 if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) { 555 if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
616 printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. " 556 printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
617 "Try pci=noacpi or acpi=off\n", 557 "Try pci=noacpi or acpi=off\n",
618 acpi_device_name(link->device), 558 acpi_device_name(link->device),
@@ -628,8 +568,7 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
628 acpi_device_bid(link->device)); 568 acpi_device_bid(link->device));
629 return -ENODEV; 569 return -ENODEV;
630 } else { 570 } else {
631 acpi_irq_add_penalty(link->irq.active, PIRQ_PENALTY_PCI_USING); 571 acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
632
633 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n", 572 printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
634 acpi_device_name(link->device), 573 acpi_device_name(link->device),
635 acpi_device_bid(link->device), link->irq.active); 574 acpi_device_bid(link->device), link->irq.active);
@@ -839,7 +778,7 @@ static void acpi_pci_link_remove(struct acpi_device *device)
839} 778}
840 779
841/* 780/*
842 * modify penalty from cmdline 781 * modify acpi_irq_penalty[] from cmdline
843 */ 782 */
844static int __init acpi_irq_penalty_update(char *str, int used) 783static int __init acpi_irq_penalty_update(char *str, int used)
845{ 784{
@@ -857,10 +796,13 @@ static int __init acpi_irq_penalty_update(char *str, int used)
857 if (irq < 0) 796 if (irq < 0)
858 continue; 797 continue;
859 798
799 if (irq >= ARRAY_SIZE(acpi_irq_penalty))
800 continue;
801
860 if (used) 802 if (used)
861 acpi_irq_add_penalty(irq, PIRQ_PENALTY_ISA_USED); 803 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
862 else 804 else
863 acpi_irq_set_penalty(irq, PIRQ_PENALTY_PCI_AVAILABLE); 805 acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
864 806
865 if (retval != 2) /* no next number */ 807 if (retval != 2) /* no next number */
866 break; 808 break;
@@ -877,15 +819,18 @@ static int __init acpi_irq_penalty_update(char *str, int used)
877 */ 819 */
878void acpi_penalize_isa_irq(int irq, int active) 820void acpi_penalize_isa_irq(int irq, int active)
879{ 821{
880 if (irq >= 0) 822 if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
881 acpi_irq_add_penalty(irq, active ? 823 if (active)
882 PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING); 824 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
825 else
826 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
827 }
883} 828}
884 829
885bool acpi_isa_irq_available(int irq) 830bool acpi_isa_irq_available(int irq)
886{ 831{
887 return irq >= 0 && 832 return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
888 (acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS); 833 acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
889} 834}
890 835
891/* 836/*
@@ -895,18 +840,13 @@ bool acpi_isa_irq_available(int irq)
895 */ 840 */
896void acpi_penalize_sci_irq(int irq, int trigger, int polarity) 841void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
897{ 842{
898 int penalty; 843 if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
899 844 if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
900 if (irq < 0) 845 polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
901 return; 846 acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
902 847 else
903 if (trigger != ACPI_MADT_TRIGGER_LEVEL || 848 acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
904 polarity != ACPI_MADT_POLARITY_ACTIVE_LOW) 849 }
905 penalty = PIRQ_PENALTY_ISA_ALWAYS;
906 else
907 penalty = PIRQ_PENALTY_PCI_USING;
908
909 acpi_irq_add_penalty(irq, penalty);
910} 850}
911 851
912/* 852/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a39e85f9efa9..7d00b7a015ea 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
2074 if (get_user(cookie, (binder_uintptr_t __user *)ptr)) 2074 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
2075 return -EFAULT; 2075 return -EFAULT;
2076 2076
2077 ptr += sizeof(void *); 2077 ptr += sizeof(cookie);
2078 list_for_each_entry(w, &proc->delivered_death, entry) { 2078 list_for_each_entry(w, &proc->delivered_death, entry) {
2079 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); 2079 struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
2080 2080
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 594fcabd22cd..146dc0b8ec61 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -264,6 +264,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
264 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */ 264 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
265 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */ 265 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
266 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */ 266 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
267 { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
268 { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
269 { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
270 { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
271 { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
272 { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
273 { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
274 { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
275 { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
276 { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
277 { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
278 { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
279 { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
280 { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
281 { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
282 { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
283 { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
284 { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
285 { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
286 { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
267 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */ 287 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
268 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */ 288 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
269 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */ 289 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
@@ -347,15 +367,21 @@ static const struct pci_device_id ahci_pci_tbl[] = {
347 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ 367 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
348 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ 368 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
349 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/ 369 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
370 { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
350 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/ 371 { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
372 { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
351 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/ 373 { PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
352 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/ 374 { PCI_VDEVICE(INTEL, 0xa184), board_ahci }, /* Lewisburg RAID*/
353 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/ 375 { PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
354 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/ 376 { PCI_VDEVICE(INTEL, 0xa18e), board_ahci }, /* Lewisburg RAID*/
377 { PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
378 { PCI_VDEVICE(INTEL, 0xa1d6), board_ahci }, /* Lewisburg RAID*/
355 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/ 379 { PCI_VDEVICE(INTEL, 0xa202), board_ahci }, /* Lewisburg AHCI*/
356 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/ 380 { PCI_VDEVICE(INTEL, 0xa204), board_ahci }, /* Lewisburg RAID*/
357 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/ 381 { PCI_VDEVICE(INTEL, 0xa206), board_ahci }, /* Lewisburg RAID*/
358 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/ 382 { PCI_VDEVICE(INTEL, 0xa20e), board_ahci }, /* Lewisburg RAID*/
383 { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
384 { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
359 385
360 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 386 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
361 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 387 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1305,6 +1331,44 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host)
1305{} 1331{}
1306#endif 1332#endif
1307 1333
1334#ifdef CONFIG_ARM64
1335/*
1336 * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently.
1337 * Workaround is to make sure all pending IRQs are served before leaving
1338 * handler.
1339 */
1340static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
1341{
1342 struct ata_host *host = dev_instance;
1343 struct ahci_host_priv *hpriv;
1344 unsigned int rc = 0;
1345 void __iomem *mmio;
1346 u32 irq_stat, irq_masked;
1347 unsigned int handled = 1;
1348
1349 VPRINTK("ENTER\n");
1350 hpriv = host->private_data;
1351 mmio = hpriv->mmio;
1352 irq_stat = readl(mmio + HOST_IRQ_STAT);
1353 if (!irq_stat)
1354 return IRQ_NONE;
1355
1356 do {
1357 irq_masked = irq_stat & hpriv->port_map;
1358 spin_lock(&host->lock);
1359 rc = ahci_handle_port_intr(host, irq_masked);
1360 if (!rc)
1361 handled = 0;
1362 writel(irq_stat, mmio + HOST_IRQ_STAT);
1363 irq_stat = readl(mmio + HOST_IRQ_STAT);
1364 spin_unlock(&host->lock);
1365 } while (irq_stat);
1366 VPRINTK("EXIT\n");
1367
1368 return IRQ_RETVAL(handled);
1369}
1370#endif
1371
1308/* 1372/*
1309 * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer 1373 * ahci_init_msix() - optionally enable per-port MSI-X otherwise defer
1310 * to single msi. 1374 * to single msi.
@@ -1540,6 +1604,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1540 if (ahci_broken_devslp(pdev)) 1604 if (ahci_broken_devslp(pdev))
1541 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP; 1605 hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
1542 1606
1607#ifdef CONFIG_ARM64
1608 if (pdev->vendor == 0x177d && pdev->device == 0xa01c)
1609 hpriv->irq_handler = ahci_thunderx_irq_handler;
1610#endif
1611
1543 /* save initial config */ 1612 /* save initial config */
1544 ahci_pci_save_initial_config(pdev, hpriv); 1613 ahci_pci_save_initial_config(pdev, hpriv);
1545 1614
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index a4faa438889c..167ba7e3b92e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -240,8 +240,7 @@ enum {
240 error-handling stage) */ 240 error-handling stage) */
241 AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */ 241 AHCI_HFLAG_NO_DEVSLP = (1 << 17), /* no device sleep */
242 AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */ 242 AHCI_HFLAG_NO_FBS = (1 << 18), /* no FBS */
243 AHCI_HFLAG_EDGE_IRQ = (1 << 19), /* HOST_IRQ_STAT behaves as 243
244 Edge Triggered */
245#ifdef CONFIG_PCI_MSI 244#ifdef CONFIG_PCI_MSI
246 AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */ 245 AHCI_HFLAG_MULTI_MSI = (1 << 20), /* multiple PCI MSIs */
247 AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */ 246 AHCI_HFLAG_MULTI_MSIX = (1 << 21), /* per-port MSI-X */
@@ -250,6 +249,7 @@ enum {
250 AHCI_HFLAG_MULTI_MSI = 0, 249 AHCI_HFLAG_MULTI_MSI = 0,
251 AHCI_HFLAG_MULTI_MSIX = 0, 250 AHCI_HFLAG_MULTI_MSIX = 0,
252#endif 251#endif
252 AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
253 253
254 /* ap->flags bits */ 254 /* ap->flags bits */
255 255
@@ -360,6 +360,7 @@ struct ahci_host_priv {
360 * be overridden anytime before the host is activated. 360 * be overridden anytime before the host is activated.
361 */ 361 */
362 void (*start_engine)(struct ata_port *ap); 362 void (*start_engine)(struct ata_port *ap);
363 irqreturn_t (*irq_handler)(int irq, void *dev_instance);
363}; 364};
364 365
365#ifdef CONFIG_PCI_MSI 366#ifdef CONFIG_PCI_MSI
@@ -423,6 +424,7 @@ int ahci_reset_em(struct ata_host *host);
423void ahci_print_info(struct ata_host *host, const char *scc_s); 424void ahci_print_info(struct ata_host *host, const char *scc_s);
424int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht); 425int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht);
425void ahci_error_handler(struct ata_port *ap); 426void ahci_error_handler(struct ata_port *ap);
427u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked);
426 428
427static inline void __iomem *__ahci_port_base(struct ata_host *host, 429static inline void __iomem *__ahci_port_base(struct ata_host *host,
428 unsigned int port_no) 430 unsigned int port_no)
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index b36cae2fd04b..e87bcec0fd7c 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -317,6 +317,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
317 if (IS_ERR(hpriv)) 317 if (IS_ERR(hpriv))
318 return PTR_ERR(hpriv); 318 return PTR_ERR(hpriv);
319 hpriv->plat_data = priv; 319 hpriv->plat_data = priv;
320 hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
320 321
321 brcm_sata_alpm_init(hpriv); 322 brcm_sata_alpm_init(hpriv);
322 323
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index e2c6d9e0c5ac..8e3f7faf00d3 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -548,6 +548,88 @@ softreset_retry:
548 return rc; 548 return rc;
549} 549}
550 550
551/**
552 * xgene_ahci_handle_broken_edge_irq - Handle the broken irq.
553 * @ata_host: Host that recieved the irq
554 * @irq_masked: HOST_IRQ_STAT value
555 *
556 * For hardware with broken edge trigger latch
557 * the HOST_IRQ_STAT register misses the edge interrupt
558 * when clearing of HOST_IRQ_STAT register and hardware
559 * reporting the PORT_IRQ_STAT register at the
560 * same clock cycle.
561 * As such, the algorithm below outlines the workaround.
562 *
563 * 1. Read HOST_IRQ_STAT register and save the state.
564 * 2. Clear the HOST_IRQ_STAT register.
565 * 3. Read back the HOST_IRQ_STAT register.
566 * 4. If HOST_IRQ_STAT register equals to zero, then
567 * traverse the rest of port's PORT_IRQ_STAT register
568 * to check if an interrupt is triggered at that point else
569 * go to step 6.
570 * 5. If PORT_IRQ_STAT register of rest ports is not equal to zero
571 * then update the state of HOST_IRQ_STAT saved in step 1.
572 * 6. Handle port interrupts.
573 * 7. Exit
574 */
575static int xgene_ahci_handle_broken_edge_irq(struct ata_host *host,
576 u32 irq_masked)
577{
578 struct ahci_host_priv *hpriv = host->private_data;
579 void __iomem *port_mmio;
580 int i;
581
582 if (!readl(hpriv->mmio + HOST_IRQ_STAT)) {
583 for (i = 0; i < host->n_ports; i++) {
584 if (irq_masked & (1 << i))
585 continue;
586
587 port_mmio = ahci_port_base(host->ports[i]);
588 if (readl(port_mmio + PORT_IRQ_STAT))
589 irq_masked |= (1 << i);
590 }
591 }
592
593 return ahci_handle_port_intr(host, irq_masked);
594}
595
596static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
597{
598 struct ata_host *host = dev_instance;
599 struct ahci_host_priv *hpriv;
600 unsigned int rc = 0;
601 void __iomem *mmio;
602 u32 irq_stat, irq_masked;
603
604 VPRINTK("ENTER\n");
605
606 hpriv = host->private_data;
607 mmio = hpriv->mmio;
608
609 /* sigh. 0xffffffff is a valid return from h/w */
610 irq_stat = readl(mmio + HOST_IRQ_STAT);
611 if (!irq_stat)
612 return IRQ_NONE;
613
614 irq_masked = irq_stat & hpriv->port_map;
615
616 spin_lock(&host->lock);
617
618 /*
619 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
620 * it should be cleared before all the port events are cleared.
621 */
622 writel(irq_stat, mmio + HOST_IRQ_STAT);
623
624 rc = xgene_ahci_handle_broken_edge_irq(host, irq_masked);
625
626 spin_unlock(&host->lock);
627
628 VPRINTK("EXIT\n");
629
630 return IRQ_RETVAL(rc);
631}
632
551static struct ata_port_operations xgene_ahci_v1_ops = { 633static struct ata_port_operations xgene_ahci_v1_ops = {
552 .inherits = &ahci_ops, 634 .inherits = &ahci_ops,
553 .host_stop = xgene_ahci_host_stop, 635 .host_stop = xgene_ahci_host_stop,
@@ -779,7 +861,8 @@ skip_clk_phy:
779 hpriv->flags = AHCI_HFLAG_NO_NCQ; 861 hpriv->flags = AHCI_HFLAG_NO_NCQ;
780 break; 862 break;
781 case XGENE_AHCI_V2: 863 case XGENE_AHCI_V2:
782 hpriv->flags |= AHCI_HFLAG_YES_FBS | AHCI_HFLAG_EDGE_IRQ; 864 hpriv->flags |= AHCI_HFLAG_YES_FBS;
865 hpriv->irq_handler = xgene_ahci_irq_intr;
783 break; 866 break;
784 default: 867 default:
785 break; 868 break;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d61740e78d6d..85ea5142a095 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -113,6 +113,7 @@ static ssize_t ahci_store_em_buffer(struct device *dev,
113 const char *buf, size_t size); 113 const char *buf, size_t size);
114static ssize_t ahci_show_em_supported(struct device *dev, 114static ssize_t ahci_show_em_supported(struct device *dev,
115 struct device_attribute *attr, char *buf); 115 struct device_attribute *attr, char *buf);
116static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance);
116 117
117static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL); 118static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
118static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL); 119static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
@@ -496,8 +497,8 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
496 } 497 }
497 } 498 }
498 499
499 /* fabricate port_map from cap.nr_ports */ 500 /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
500 if (!port_map) { 501 if (!port_map && vers < 0x10300) {
501 port_map = (1 << ahci_nr_ports(cap)) - 1; 502 port_map = (1 << ahci_nr_ports(cap)) - 1;
502 dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map); 503 dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
503 504
@@ -512,6 +513,9 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
512 513
513 if (!hpriv->start_engine) 514 if (!hpriv->start_engine)
514 hpriv->start_engine = ahci_start_engine; 515 hpriv->start_engine = ahci_start_engine;
516
517 if (!hpriv->irq_handler)
518 hpriv->irq_handler = ahci_single_level_irq_intr;
515} 519}
516EXPORT_SYMBOL_GPL(ahci_save_initial_config); 520EXPORT_SYMBOL_GPL(ahci_save_initial_config);
517 521
@@ -593,8 +597,22 @@ EXPORT_SYMBOL_GPL(ahci_start_engine);
593int ahci_stop_engine(struct ata_port *ap) 597int ahci_stop_engine(struct ata_port *ap)
594{ 598{
595 void __iomem *port_mmio = ahci_port_base(ap); 599 void __iomem *port_mmio = ahci_port_base(ap);
600 struct ahci_host_priv *hpriv = ap->host->private_data;
596 u32 tmp; 601 u32 tmp;
597 602
603 /*
604 * On some controllers, stopping a port's DMA engine while the port
605 * is in ALPM state (partial or slumber) results in failures on
606 * subsequent DMA engine starts. For those controllers, put the
607 * port back in active state before stopping its DMA engine.
608 */
609 if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
610 (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
611 ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
612 dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
613 return -EIO;
614 }
615
598 tmp = readl(port_mmio + PORT_CMD); 616 tmp = readl(port_mmio + PORT_CMD);
599 617
600 /* check if the HBA is idle */ 618 /* check if the HBA is idle */
@@ -689,6 +707,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
689 void __iomem *port_mmio = ahci_port_base(ap); 707 void __iomem *port_mmio = ahci_port_base(ap);
690 708
691 if (policy != ATA_LPM_MAX_POWER) { 709 if (policy != ATA_LPM_MAX_POWER) {
710 /* wakeup flag only applies to the max power policy */
711 hints &= ~ATA_LPM_WAKE_ONLY;
712
692 /* 713 /*
693 * Disable interrupts on Phy Ready. This keeps us from 714 * Disable interrupts on Phy Ready. This keeps us from
694 * getting woken up due to spurious phy ready 715 * getting woken up due to spurious phy ready
@@ -704,7 +725,8 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
704 u32 cmd = readl(port_mmio + PORT_CMD); 725 u32 cmd = readl(port_mmio + PORT_CMD);
705 726
706 if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) { 727 if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
707 cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE); 728 if (!(hints & ATA_LPM_WAKE_ONLY))
729 cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
708 cmd |= PORT_CMD_ICC_ACTIVE; 730 cmd |= PORT_CMD_ICC_ACTIVE;
709 731
710 writel(cmd, port_mmio + PORT_CMD); 732 writel(cmd, port_mmio + PORT_CMD);
@@ -712,6 +734,9 @@ static int ahci_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
712 734
713 /* wait 10ms to be sure we've come out of LPM state */ 735 /* wait 10ms to be sure we've come out of LPM state */
714 ata_msleep(ap, 10); 736 ata_msleep(ap, 10);
737
738 if (hints & ATA_LPM_WAKE_ONLY)
739 return 0;
715 } else { 740 } else {
716 cmd |= PORT_CMD_ALPE; 741 cmd |= PORT_CMD_ALPE;
717 if (policy == ATA_LPM_MIN_POWER) 742 if (policy == ATA_LPM_MIN_POWER)
@@ -1143,8 +1168,7 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
1143 1168
1144 /* mark esata ports */ 1169 /* mark esata ports */
1145 tmp = readl(port_mmio + PORT_CMD); 1170 tmp = readl(port_mmio + PORT_CMD);
1146 if ((tmp & PORT_CMD_HPCP) || 1171 if ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS))
1147 ((tmp & PORT_CMD_ESP) && (hpriv->cap & HOST_CAP_SXS)))
1148 ap->pflags |= ATA_PFLAG_EXTERNAL; 1172 ap->pflags |= ATA_PFLAG_EXTERNAL;
1149} 1173}
1150 1174
@@ -1825,7 +1849,7 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
1825 return IRQ_HANDLED; 1849 return IRQ_HANDLED;
1826} 1850}
1827 1851
1828static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked) 1852u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
1829{ 1853{
1830 unsigned int i, handled = 0; 1854 unsigned int i, handled = 0;
1831 1855
@@ -1851,43 +1875,7 @@ static u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
1851 1875
1852 return handled; 1876 return handled;
1853} 1877}
1854 1878EXPORT_SYMBOL_GPL(ahci_handle_port_intr);
1855static irqreturn_t ahci_single_edge_irq_intr(int irq, void *dev_instance)
1856{
1857 struct ata_host *host = dev_instance;
1858 struct ahci_host_priv *hpriv;
1859 unsigned int rc = 0;
1860 void __iomem *mmio;
1861 u32 irq_stat, irq_masked;
1862
1863 VPRINTK("ENTER\n");
1864
1865 hpriv = host->private_data;
1866 mmio = hpriv->mmio;
1867
1868 /* sigh. 0xffffffff is a valid return from h/w */
1869 irq_stat = readl(mmio + HOST_IRQ_STAT);
1870 if (!irq_stat)
1871 return IRQ_NONE;
1872
1873 irq_masked = irq_stat & hpriv->port_map;
1874
1875 spin_lock(&host->lock);
1876
1877 /*
1878 * HOST_IRQ_STAT behaves as edge triggered latch meaning that
1879 * it should be cleared before all the port events are cleared.
1880 */
1881 writel(irq_stat, mmio + HOST_IRQ_STAT);
1882
1883 rc = ahci_handle_port_intr(host, irq_masked);
1884
1885 spin_unlock(&host->lock);
1886
1887 VPRINTK("EXIT\n");
1888
1889 return IRQ_RETVAL(rc);
1890}
1891 1879
1892static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance) 1880static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
1893{ 1881{
@@ -2514,14 +2502,18 @@ int ahci_host_activate(struct ata_host *host, struct scsi_host_template *sht)
2514 int irq = hpriv->irq; 2502 int irq = hpriv->irq;
2515 int rc; 2503 int rc;
2516 2504
2517 if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) 2505 if (hpriv->flags & (AHCI_HFLAG_MULTI_MSI | AHCI_HFLAG_MULTI_MSIX)) {
2506 if (hpriv->irq_handler)
2507 dev_warn(host->dev, "both AHCI_HFLAG_MULTI_MSI flag set \
2508 and custom irq handler implemented\n");
2509
2518 rc = ahci_host_activate_multi_irqs(host, sht); 2510 rc = ahci_host_activate_multi_irqs(host, sht);
2519 else if (hpriv->flags & AHCI_HFLAG_EDGE_IRQ) 2511 } else {
2520 rc = ata_host_activate(host, irq, ahci_single_edge_irq_intr, 2512 rc = ata_host_activate(host, irq, hpriv->irq_handler,
2521 IRQF_SHARED, sht);
2522 else
2523 rc = ata_host_activate(host, irq, ahci_single_level_irq_intr,
2524 IRQF_SHARED, sht); 2513 IRQF_SHARED, sht);
2514 }
2515
2516
2525 return rc; 2517 return rc;
2526} 2518}
2527EXPORT_SYMBOL_GPL(ahci_host_activate); 2519EXPORT_SYMBOL_GPL(ahci_host_activate);
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cbb74719d2c1..55e257c268dd 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4125,6 +4125,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4125 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4125 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4126 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4126 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4127 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4127 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
4128 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
4128 /* Odd clown on sil3726/4726 PMPs */ 4129 /* Odd clown on sil3726/4726 PMPs */
4129 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4130 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
4130 4131
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 7e959f90c020..e417e1a1d02c 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -675,19 +675,18 @@ static int ata_ioc32(struct ata_port *ap)
675int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev, 675int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *scsidev,
676 int cmd, void __user *arg) 676 int cmd, void __user *arg)
677{ 677{
678 int val = -EINVAL, rc = -EINVAL; 678 unsigned long val;
679 int rc = -EINVAL;
679 unsigned long flags; 680 unsigned long flags;
680 681
681 switch (cmd) { 682 switch (cmd) {
682 case ATA_IOC_GET_IO32: 683 case HDIO_GET_32BIT:
683 spin_lock_irqsave(ap->lock, flags); 684 spin_lock_irqsave(ap->lock, flags);
684 val = ata_ioc32(ap); 685 val = ata_ioc32(ap);
685 spin_unlock_irqrestore(ap->lock, flags); 686 spin_unlock_irqrestore(ap->lock, flags);
686 if (copy_to_user(arg, &val, 1)) 687 return put_user(val, (unsigned long __user *)arg);
687 return -EFAULT;
688 return 0;
689 688
690 case ATA_IOC_SET_IO32: 689 case HDIO_SET_32BIT:
691 val = (unsigned long) arg; 690 val = (unsigned long) arg;
692 rc = 0; 691 rc = 0;
693 spin_lock_irqsave(ap->lock, flags); 692 spin_lock_irqsave(ap->lock, flags);
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index cdf6215a9a22..051b6158d1b7 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
997static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 997static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
998{ 998{
999 struct ata_port *ap = qc->ap; 999 struct ata_port *ap = qc->ap;
1000 unsigned long flags;
1001 1000
1002 if (ap->ops->error_handler) { 1001 if (ap->ops->error_handler) {
1003 if (in_wq) { 1002 if (in_wq) {
1004 spin_lock_irqsave(ap->lock, flags);
1005
1006 /* EH might have kicked in while host lock is 1003 /* EH might have kicked in while host lock is
1007 * released. 1004 * released.
1008 */ 1005 */
@@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1014 } else 1011 } else
1015 ata_port_freeze(ap); 1012 ata_port_freeze(ap);
1016 } 1013 }
1017
1018 spin_unlock_irqrestore(ap->lock, flags);
1019 } else { 1014 } else {
1020 if (likely(!(qc->err_mask & AC_ERR_HSM))) 1015 if (likely(!(qc->err_mask & AC_ERR_HSM)))
1021 ata_qc_complete(qc); 1016 ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1024 } 1019 }
1025 } else { 1020 } else {
1026 if (in_wq) { 1021 if (in_wq) {
1027 spin_lock_irqsave(ap->lock, flags);
1028 ata_sff_irq_on(ap); 1022 ata_sff_irq_on(ap);
1029 ata_qc_complete(qc); 1023 ata_qc_complete(qc);
1030 spin_unlock_irqrestore(ap->lock, flags);
1031 } else 1024 } else
1032 ata_qc_complete(qc); 1025 ata_qc_complete(qc);
1033 } 1026 }
@@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1048{ 1041{
1049 struct ata_link *link = qc->dev->link; 1042 struct ata_link *link = qc->dev->link;
1050 struct ata_eh_info *ehi = &link->eh_info; 1043 struct ata_eh_info *ehi = &link->eh_info;
1051 unsigned long flags = 0;
1052 int poll_next; 1044 int poll_next;
1053 1045
1046 lockdep_assert_held(ap->lock);
1047
1054 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 1048 WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1055 1049
1056 /* Make sure ata_sff_qc_issue() does not throw things 1050 /* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@ fsm_start:
1112 } 1106 }
1113 } 1107 }
1114 1108
1115 /* Send the CDB (atapi) or the first data block (ata pio out).
1116 * During the state transition, interrupt handler shouldn't
1117 * be invoked before the data transfer is complete and
1118 * hsm_task_state is changed. Hence, the following locking.
1119 */
1120 if (in_wq)
1121 spin_lock_irqsave(ap->lock, flags);
1122
1123 if (qc->tf.protocol == ATA_PROT_PIO) { 1109 if (qc->tf.protocol == ATA_PROT_PIO) {
1124 /* PIO data out protocol. 1110 /* PIO data out protocol.
1125 * send first data block. 1111 * send first data block.
@@ -1135,9 +1121,6 @@ fsm_start:
1135 /* send CDB */ 1121 /* send CDB */
1136 atapi_send_cdb(ap, qc); 1122 atapi_send_cdb(ap, qc);
1137 1123
1138 if (in_wq)
1139 spin_unlock_irqrestore(ap->lock, flags);
1140
1141 /* if polling, ata_sff_pio_task() handles the rest. 1124 /* if polling, ata_sff_pio_task() handles the rest.
1142 * otherwise, interrupt handler takes over from here. 1125 * otherwise, interrupt handler takes over from here.
1143 */ 1126 */
@@ -1296,7 +1279,8 @@ fsm_start:
1296 break; 1279 break;
1297 default: 1280 default:
1298 poll_next = 0; 1281 poll_next = 0;
1299 BUG(); 1282 WARN(true, "ata%d: SFF host state machine in invalid state %d",
1283 ap->print_id, ap->hsm_task_state);
1300 } 1284 }
1301 1285
1302 return poll_next; 1286 return poll_next;
@@ -1361,12 +1345,14 @@ static void ata_sff_pio_task(struct work_struct *work)
1361 u8 status; 1345 u8 status;
1362 int poll_next; 1346 int poll_next;
1363 1347
1348 spin_lock_irq(ap->lock);
1349
1364 BUG_ON(ap->sff_pio_task_link == NULL); 1350 BUG_ON(ap->sff_pio_task_link == NULL);
1365 /* qc can be NULL if timeout occurred */ 1351 /* qc can be NULL if timeout occurred */
1366 qc = ata_qc_from_tag(ap, link->active_tag); 1352 qc = ata_qc_from_tag(ap, link->active_tag);
1367 if (!qc) { 1353 if (!qc) {
1368 ap->sff_pio_task_link = NULL; 1354 ap->sff_pio_task_link = NULL;
1369 return; 1355 goto out_unlock;
1370 } 1356 }
1371 1357
1372fsm_start: 1358fsm_start:
@@ -1381,11 +1367,14 @@ fsm_start:
1381 */ 1367 */
1382 status = ata_sff_busy_wait(ap, ATA_BUSY, 5); 1368 status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1383 if (status & ATA_BUSY) { 1369 if (status & ATA_BUSY) {
1370 spin_unlock_irq(ap->lock);
1384 ata_msleep(ap, 2); 1371 ata_msleep(ap, 2);
1372 spin_lock_irq(ap->lock);
1373
1385 status = ata_sff_busy_wait(ap, ATA_BUSY, 10); 1374 status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1386 if (status & ATA_BUSY) { 1375 if (status & ATA_BUSY) {
1387 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE); 1376 ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1388 return; 1377 goto out_unlock;
1389 } 1378 }
1390 } 1379 }
1391 1380
@@ -1402,6 +1391,8 @@ fsm_start:
1402 */ 1391 */
1403 if (poll_next) 1392 if (poll_next)
1404 goto fsm_start; 1393 goto fsm_start;
1394out_unlock:
1395 spin_unlock_irq(ap->lock);
1405} 1396}
1406 1397
1407/** 1398/**
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 12fe0f3bb7e9..c8b6a780a290 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -32,6 +32,8 @@
32#include <linux/libata.h> 32#include <linux/libata.h>
33#include <scsi/scsi_host.h> 33#include <scsi/scsi_host.h>
34 34
35#include <asm/mach-rc32434/rb.h>
36
35#define DRV_NAME "pata-rb532-cf" 37#define DRV_NAME "pata-rb532-cf"
36#define DRV_VERSION "0.1.0" 38#define DRV_VERSION "0.1.0"
37#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash" 39#define DRV_DESC "PATA driver for RouterBOARD 532 Compact Flash"
@@ -107,6 +109,7 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
107 int gpio; 109 int gpio;
108 struct resource *res; 110 struct resource *res;
109 struct ata_host *ah; 111 struct ata_host *ah;
112 struct cf_device *pdata;
110 struct rb532_cf_info *info; 113 struct rb532_cf_info *info;
111 int ret; 114 int ret;
112 115
@@ -122,7 +125,13 @@ static int rb532_pata_driver_probe(struct platform_device *pdev)
122 return -ENOENT; 125 return -ENOENT;
123 } 126 }
124 127
125 gpio = irq_to_gpio(irq); 128 pdata = dev_get_platdata(&pdev->dev);
129 if (!pdata) {
130 dev_err(&pdev->dev, "no platform data specified\n");
131 return -EINVAL;
132 }
133
134 gpio = pdata->gpio_pin;
126 if (gpio < 0) { 135 if (gpio < 0) {
127 dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq); 136 dev_err(&pdev->dev, "no GPIO found for irq%d\n", irq);
128 return -ENOENT; 137 return -ENOENT;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 89f5cf68d80a..04a1582e80bb 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -206,6 +206,8 @@ static void component_match_release(struct device *master,
206 if (mc->release) 206 if (mc->release)
207 mc->release(master, mc->data); 207 mc->release(master, mc->data);
208 } 208 }
209
210 kfree(match->compare);
209} 211}
210 212
211static void devm_component_match_release(struct device *dev, void *res) 213static void devm_component_match_release(struct device *dev, void *res)
@@ -221,14 +223,14 @@ static int component_match_realloc(struct device *dev,
221 if (match->alloc == num) 223 if (match->alloc == num)
222 return 0; 224 return 0;
223 225
224 new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL); 226 new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
225 if (!new) 227 if (!new)
226 return -ENOMEM; 228 return -ENOMEM;
227 229
228 if (match->compare) { 230 if (match->compare) {
229 memcpy(new, match->compare, sizeof(*new) * 231 memcpy(new, match->compare, sizeof(*new) *
230 min(match->num, num)); 232 min(match->num, num));
231 devm_kfree(dev, match->compare); 233 kfree(match->compare);
232 } 234 }
233 match->compare = new; 235 match->compare = new;
234 match->alloc = num; 236 match->alloc = num;
@@ -283,6 +285,24 @@ void component_match_add_release(struct device *master,
283} 285}
284EXPORT_SYMBOL(component_match_add_release); 286EXPORT_SYMBOL(component_match_add_release);
285 287
288static void free_master(struct master *master)
289{
290 struct component_match *match = master->match;
291 int i;
292
293 list_del(&master->node);
294
295 if (match) {
296 for (i = 0; i < match->num; i++) {
297 struct component *c = match->compare[i].component;
298 if (c)
299 c->master = NULL;
300 }
301 }
302
303 kfree(master);
304}
305
286int component_master_add_with_match(struct device *dev, 306int component_master_add_with_match(struct device *dev,
287 const struct component_master_ops *ops, 307 const struct component_master_ops *ops,
288 struct component_match *match) 308 struct component_match *match)
@@ -309,11 +329,9 @@ int component_master_add_with_match(struct device *dev,
309 329
310 ret = try_to_bring_up_master(master, NULL); 330 ret = try_to_bring_up_master(master, NULL);
311 331
312 if (ret < 0) { 332 if (ret < 0)
313 /* Delete off the list if we weren't successful */ 333 free_master(master);
314 list_del(&master->node); 334
315 kfree(master);
316 }
317 mutex_unlock(&component_mutex); 335 mutex_unlock(&component_mutex);
318 336
319 return ret < 0 ? ret : 0; 337 return ret < 0 ? ret : 0;
@@ -324,25 +342,12 @@ void component_master_del(struct device *dev,
324 const struct component_master_ops *ops) 342 const struct component_master_ops *ops)
325{ 343{
326 struct master *master; 344 struct master *master;
327 int i;
328 345
329 mutex_lock(&component_mutex); 346 mutex_lock(&component_mutex);
330 master = __master_find(dev, ops); 347 master = __master_find(dev, ops);
331 if (master) { 348 if (master) {
332 struct component_match *match = master->match;
333
334 take_down_master(master); 349 take_down_master(master);
335 350 free_master(master);
336 list_del(&master->node);
337
338 if (match) {
339 for (i = 0; i < match->num; i++) {
340 struct component *c = match->compare[i].component;
341 if (c)
342 c->master = NULL;
343 }
344 }
345 kfree(master);
346 } 351 }
347 mutex_unlock(&component_mutex); 352 mutex_unlock(&component_mutex);
348} 353}
@@ -486,6 +491,8 @@ int component_add(struct device *dev, const struct component_ops *ops)
486 491
487 ret = try_to_bring_up_masters(component); 492 ret = try_to_bring_up_masters(component);
488 if (ret < 0) { 493 if (ret < 0) {
494 if (component->master)
495 remove_component(component->master, component);
489 list_del(&component->node); 496 list_del(&component->node);
490 497
491 kfree(component); 498 kfree(component);
diff --git a/drivers/base/property.c b/drivers/base/property.c
index c359351d50f1..a163f2c59aa3 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -218,7 +218,7 @@ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname)
218 bool ret; 218 bool ret;
219 219
220 ret = __fwnode_property_present(fwnode, propname); 220 ret = __fwnode_property_present(fwnode, propname);
221 if (ret == false && fwnode && fwnode->secondary) 221 if (ret == false && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
222 ret = __fwnode_property_present(fwnode->secondary, propname); 222 ret = __fwnode_property_present(fwnode->secondary, propname);
223 return ret; 223 return ret;
224} 224}
@@ -423,7 +423,7 @@ EXPORT_SYMBOL_GPL(device_property_match_string);
423 int _ret_; \ 423 int _ret_; \
424 _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \ 424 _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \
425 _val_, _nval_); \ 425 _val_, _nval_); \
426 if (_ret_ == -EINVAL && _fwnode_ && _fwnode_->secondary) \ 426 if (_ret_ == -EINVAL && _fwnode_ && !IS_ERR_OR_NULL(_fwnode_->secondary)) \
427 _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \ 427 _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \
428 _proptype_, _val_, _nval_); \ 428 _proptype_, _val_, _nval_); \
429 _ret_; \ 429 _ret_; \
@@ -593,7 +593,7 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode,
593 int ret; 593 int ret;
594 594
595 ret = __fwnode_property_read_string_array(fwnode, propname, val, nval); 595 ret = __fwnode_property_read_string_array(fwnode, propname, val, nval);
596 if (ret == -EINVAL && fwnode && fwnode->secondary) 596 if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
597 ret = __fwnode_property_read_string_array(fwnode->secondary, 597 ret = __fwnode_property_read_string_array(fwnode->secondary,
598 propname, val, nval); 598 propname, val, nval);
599 return ret; 599 return ret;
@@ -621,7 +621,7 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
621 int ret; 621 int ret;
622 622
623 ret = __fwnode_property_read_string(fwnode, propname, val); 623 ret = __fwnode_property_read_string(fwnode, propname, val);
624 if (ret == -EINVAL && fwnode && fwnode->secondary) 624 if (ret == -EINVAL && fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
625 ret = __fwnode_property_read_string(fwnode->secondary, 625 ret = __fwnode_property_read_string(fwnode->secondary,
626 propname, val); 626 propname, val);
627 return ret; 627 return ret;
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 8812bfb9e3b8..eea51569f0eb 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -133,17 +133,17 @@ static int regmap_mmio_gather_write(void *context,
133 while (val_size) { 133 while (val_size) {
134 switch (ctx->val_bytes) { 134 switch (ctx->val_bytes) {
135 case 1: 135 case 1:
136 __raw_writeb(*(u8 *)val, ctx->regs + offset); 136 writeb(*(u8 *)val, ctx->regs + offset);
137 break; 137 break;
138 case 2: 138 case 2:
139 __raw_writew(*(u16 *)val, ctx->regs + offset); 139 writew(*(u16 *)val, ctx->regs + offset);
140 break; 140 break;
141 case 4: 141 case 4:
142 __raw_writel(*(u32 *)val, ctx->regs + offset); 142 writel(*(u32 *)val, ctx->regs + offset);
143 break; 143 break;
144#ifdef CONFIG_64BIT 144#ifdef CONFIG_64BIT
145 case 8: 145 case 8:
146 __raw_writeq(*(u64 *)val, ctx->regs + offset); 146 writeq(*(u64 *)val, ctx->regs + offset);
147 break; 147 break;
148#endif 148#endif
149 default: 149 default:
@@ -193,17 +193,17 @@ static int regmap_mmio_read(void *context,
193 while (val_size) { 193 while (val_size) {
194 switch (ctx->val_bytes) { 194 switch (ctx->val_bytes) {
195 case 1: 195 case 1:
196 *(u8 *)val = __raw_readb(ctx->regs + offset); 196 *(u8 *)val = readb(ctx->regs + offset);
197 break; 197 break;
198 case 2: 198 case 2:
199 *(u16 *)val = __raw_readw(ctx->regs + offset); 199 *(u16 *)val = readw(ctx->regs + offset);
200 break; 200 break;
201 case 4: 201 case 4:
202 *(u32 *)val = __raw_readl(ctx->regs + offset); 202 *(u32 *)val = readl(ctx->regs + offset);
203 break; 203 break;
204#ifdef CONFIG_64BIT 204#ifdef CONFIG_64BIT
205 case 8: 205 case 8:
206 *(u64 *)val = __raw_readq(ctx->regs + offset); 206 *(u64 *)val = readq(ctx->regs + offset);
207 break; 207 break;
208#endif 208#endif
209 default: 209 default:
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e251201dd48..84708a5f8c52 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -866,7 +866,7 @@ static void set_fdc(int drive)
866} 866}
867 867
868/* locks the driver */ 868/* locks the driver */
869static int lock_fdc(int drive, bool interruptible) 869static int lock_fdc(int drive)
870{ 870{
871 if (WARN(atomic_read(&usage_count) == 0, 871 if (WARN(atomic_read(&usage_count) == 0,
872 "Trying to lock fdc while usage count=0\n")) 872 "Trying to lock fdc while usage count=0\n"))
@@ -2173,7 +2173,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
2173{ 2173{
2174 int ret; 2174 int ret;
2175 2175
2176 if (lock_fdc(drive, true)) 2176 if (lock_fdc(drive))
2177 return -EINTR; 2177 return -EINTR;
2178 2178
2179 set_floppy(drive); 2179 set_floppy(drive);
@@ -2960,7 +2960,7 @@ static int user_reset_fdc(int drive, int arg, bool interruptible)
2960{ 2960{
2961 int ret; 2961 int ret;
2962 2962
2963 if (lock_fdc(drive, interruptible)) 2963 if (lock_fdc(drive))
2964 return -EINTR; 2964 return -EINTR;
2965 2965
2966 if (arg == FD_RESET_ALWAYS) 2966 if (arg == FD_RESET_ALWAYS)
@@ -3243,7 +3243,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3243 if (!capable(CAP_SYS_ADMIN)) 3243 if (!capable(CAP_SYS_ADMIN))
3244 return -EPERM; 3244 return -EPERM;
3245 mutex_lock(&open_lock); 3245 mutex_lock(&open_lock);
3246 if (lock_fdc(drive, true)) { 3246 if (lock_fdc(drive)) {
3247 mutex_unlock(&open_lock); 3247 mutex_unlock(&open_lock);
3248 return -EINTR; 3248 return -EINTR;
3249 } 3249 }
@@ -3263,7 +3263,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3263 } else { 3263 } else {
3264 int oldStretch; 3264 int oldStretch;
3265 3265
3266 if (lock_fdc(drive, true)) 3266 if (lock_fdc(drive))
3267 return -EINTR; 3267 return -EINTR;
3268 if (cmd != FDDEFPRM) { 3268 if (cmd != FDDEFPRM) {
3269 /* notice a disk change immediately, else 3269 /* notice a disk change immediately, else
@@ -3349,7 +3349,7 @@ static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
3349 if (type) 3349 if (type)
3350 *g = &floppy_type[type]; 3350 *g = &floppy_type[type];
3351 else { 3351 else {
3352 if (lock_fdc(drive, false)) 3352 if (lock_fdc(drive))
3353 return -EINTR; 3353 return -EINTR;
3354 if (poll_drive(false, 0) == -EINTR) 3354 if (poll_drive(false, 0) == -EINTR)
3355 return -EINTR; 3355 return -EINTR;
@@ -3433,7 +3433,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3433 if (UDRS->fd_ref != 1) 3433 if (UDRS->fd_ref != 1)
3434 /* somebody else has this drive open */ 3434 /* somebody else has this drive open */
3435 return -EBUSY; 3435 return -EBUSY;
3436 if (lock_fdc(drive, true)) 3436 if (lock_fdc(drive))
3437 return -EINTR; 3437 return -EINTR;
3438 3438
3439 /* do the actual eject. Fails on 3439 /* do the actual eject. Fails on
@@ -3445,7 +3445,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3445 process_fd_request(); 3445 process_fd_request();
3446 return ret; 3446 return ret;
3447 case FDCLRPRM: 3447 case FDCLRPRM:
3448 if (lock_fdc(drive, true)) 3448 if (lock_fdc(drive))
3449 return -EINTR; 3449 return -EINTR;
3450 current_type[drive] = NULL; 3450 current_type[drive] = NULL;
3451 floppy_sizes[drive] = MAX_DISK_SIZE << 1; 3451 floppy_sizes[drive] = MAX_DISK_SIZE << 1;
@@ -3467,7 +3467,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3467 UDP->flags &= ~FTD_MSG; 3467 UDP->flags &= ~FTD_MSG;
3468 return 0; 3468 return 0;
3469 case FDFMTBEG: 3469 case FDFMTBEG:
3470 if (lock_fdc(drive, true)) 3470 if (lock_fdc(drive))
3471 return -EINTR; 3471 return -EINTR;
3472 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) 3472 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
3473 return -EINTR; 3473 return -EINTR;
@@ -3484,7 +3484,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3484 return do_format(drive, &inparam.f); 3484 return do_format(drive, &inparam.f);
3485 case FDFMTEND: 3485 case FDFMTEND:
3486 case FDFLUSH: 3486 case FDFLUSH:
3487 if (lock_fdc(drive, true)) 3487 if (lock_fdc(drive))
3488 return -EINTR; 3488 return -EINTR;
3489 return invalidate_drive(bdev); 3489 return invalidate_drive(bdev);
3490 case FDSETEMSGTRESH: 3490 case FDSETEMSGTRESH:
@@ -3507,7 +3507,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3507 outparam = UDP; 3507 outparam = UDP;
3508 break; 3508 break;
3509 case FDPOLLDRVSTAT: 3509 case FDPOLLDRVSTAT:
3510 if (lock_fdc(drive, true)) 3510 if (lock_fdc(drive))
3511 return -EINTR; 3511 return -EINTR;
3512 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR) 3512 if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
3513 return -EINTR; 3513 return -EINTR;
@@ -3530,7 +3530,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3530 case FDRAWCMD: 3530 case FDRAWCMD:
3531 if (type) 3531 if (type)
3532 return -EINVAL; 3532 return -EINVAL;
3533 if (lock_fdc(drive, true)) 3533 if (lock_fdc(drive))
3534 return -EINTR; 3534 return -EINTR;
3535 set_floppy(drive); 3535 set_floppy(drive);
3536 i = raw_cmd_ioctl(cmd, (void __user *)param); 3536 i = raw_cmd_ioctl(cmd, (void __user *)param);
@@ -3539,7 +3539,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int
3539 process_fd_request(); 3539 process_fd_request();
3540 return i; 3540 return i;
3541 case FDTWADDLE: 3541 case FDTWADDLE:
3542 if (lock_fdc(drive, true)) 3542 if (lock_fdc(drive))
3543 return -EINTR; 3543 return -EINTR;
3544 twaddle(); 3544 twaddle();
3545 process_fd_request(); 3545 process_fd_request();
@@ -3663,6 +3663,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3663 3663
3664 opened_bdev[drive] = bdev; 3664 opened_bdev[drive] = bdev;
3665 3665
3666 if (!(mode & (FMODE_READ|FMODE_WRITE))) {
3667 res = -EINVAL;
3668 goto out;
3669 }
3670
3666 res = -ENXIO; 3671 res = -ENXIO;
3667 3672
3668 if (!floppy_track_buffer) { 3673 if (!floppy_track_buffer) {
@@ -3706,21 +3711,20 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
3706 if (UFDCS->rawcmd == 1) 3711 if (UFDCS->rawcmd == 1)
3707 UFDCS->rawcmd = 2; 3712 UFDCS->rawcmd = 2;
3708 3713
3709 if (!(mode & FMODE_NDELAY)) { 3714 UDRS->last_checked = 0;
3710 if (mode & (FMODE_READ|FMODE_WRITE)) { 3715 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
3711 UDRS->last_checked = 0; 3716 check_disk_change(bdev);
3712 clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); 3717 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
3713 check_disk_change(bdev); 3718 goto out;
3714 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags)) 3719 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
3715 goto out; 3720 goto out;
3716 if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags)) 3721
3717 goto out; 3722 res = -EROFS;
3718 } 3723
3719 res = -EROFS; 3724 if ((mode & FMODE_WRITE) &&
3720 if ((mode & FMODE_WRITE) && 3725 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
3721 !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags)) 3726 goto out;
3722 goto out; 3727
3723 }
3724 mutex_unlock(&open_lock); 3728 mutex_unlock(&open_lock);
3725 mutex_unlock(&floppy_mutex); 3729 mutex_unlock(&floppy_mutex);
3726 return 0; 3730 return 0;
@@ -3748,7 +3752,8 @@ static unsigned int floppy_check_events(struct gendisk *disk,
3748 return DISK_EVENT_MEDIA_CHANGE; 3752 return DISK_EVENT_MEDIA_CHANGE;
3749 3753
3750 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { 3754 if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
3751 lock_fdc(drive, false); 3755 if (lock_fdc(drive))
3756 return -EINTR;
3752 poll_drive(false, 0); 3757 poll_drive(false, 0);
3753 process_fd_request(); 3758 process_fd_request();
3754 } 3759 }
@@ -3847,7 +3852,9 @@ static int floppy_revalidate(struct gendisk *disk)
3847 "VFS: revalidate called on non-open device.\n")) 3852 "VFS: revalidate called on non-open device.\n"))
3848 return -EFAULT; 3853 return -EFAULT;
3849 3854
3850 lock_fdc(drive, false); 3855 res = lock_fdc(drive);
3856 if (res)
3857 return res;
3851 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || 3858 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
3852 test_bit(FD_VERIFY_BIT, &UDRS->flags)); 3859 test_bit(FD_VERIFY_BIT, &UDRS->flags));
3853 if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) { 3860 if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8ba1e97d573c..64a7b5971b57 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -478,7 +478,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
478 id->ver_id = 0x1; 478 id->ver_id = 0x1;
479 id->vmnt = 0; 479 id->vmnt = 0;
480 id->cgrps = 1; 480 id->cgrps = 1;
481 id->cap = 0x3; 481 id->cap = 0x2;
482 id->dom = 0x1; 482 id->dom = 0x1;
483 483
484 id->ppaf.blk_offset = 0; 484 id->ppaf.blk_offset = 0;
@@ -707,9 +707,7 @@ static int null_add_dev(void)
707 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); 707 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
708 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); 708 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
709 709
710
711 mutex_lock(&lock); 710 mutex_lock(&lock);
712 list_add_tail(&nullb->list, &nullb_list);
713 nullb->index = nullb_indexes++; 711 nullb->index = nullb_indexes++;
714 mutex_unlock(&lock); 712 mutex_unlock(&lock);
715 713
@@ -743,6 +741,10 @@ static int null_add_dev(void)
743 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); 741 strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
744 742
745 add_disk(disk); 743 add_disk(disk);
744
745 mutex_lock(&lock);
746 list_add_tail(&nullb->list, &nullb_list);
747 mutex_unlock(&lock);
746done: 748done:
747 return 0; 749 return 0;
748 750
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8a8dc91c39f7..83eb9e6bf8b0 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1873,6 +1873,43 @@ again:
1873 return err; 1873 return err;
1874} 1874}
1875 1875
1876static int negotiate_mq(struct blkfront_info *info)
1877{
1878 unsigned int backend_max_queues = 0;
1879 int err;
1880 unsigned int i;
1881
1882 BUG_ON(info->nr_rings);
1883
1884 /* Check if backend supports multiple queues. */
1885 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1886 "multi-queue-max-queues", "%u", &backend_max_queues);
1887 if (err < 0)
1888 backend_max_queues = 1;
1889
1890 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1891 /* We need at least one ring. */
1892 if (!info->nr_rings)
1893 info->nr_rings = 1;
1894
1895 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1896 if (!info->rinfo) {
1897 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1898 return -ENOMEM;
1899 }
1900
1901 for (i = 0; i < info->nr_rings; i++) {
1902 struct blkfront_ring_info *rinfo;
1903
1904 rinfo = &info->rinfo[i];
1905 INIT_LIST_HEAD(&rinfo->indirect_pages);
1906 INIT_LIST_HEAD(&rinfo->grants);
1907 rinfo->dev_info = info;
1908 INIT_WORK(&rinfo->work, blkif_restart_queue);
1909 spin_lock_init(&rinfo->ring_lock);
1910 }
1911 return 0;
1912}
1876/** 1913/**
1877 * Entry point to this code when a new device is created. Allocate the basic 1914 * Entry point to this code when a new device is created. Allocate the basic
1878 * structures and the ring buffer for communication with the backend, and 1915 * structures and the ring buffer for communication with the backend, and
@@ -1883,9 +1920,7 @@ static int blkfront_probe(struct xenbus_device *dev,
1883 const struct xenbus_device_id *id) 1920 const struct xenbus_device_id *id)
1884{ 1921{
1885 int err, vdevice; 1922 int err, vdevice;
1886 unsigned int r_index;
1887 struct blkfront_info *info; 1923 struct blkfront_info *info;
1888 unsigned int backend_max_queues = 0;
1889 1924
1890 /* FIXME: Use dynamic device id if this is not set. */ 1925 /* FIXME: Use dynamic device id if this is not set. */
1891 err = xenbus_scanf(XBT_NIL, dev->nodename, 1926 err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -1936,33 +1971,10 @@ static int blkfront_probe(struct xenbus_device *dev,
1936 } 1971 }
1937 1972
1938 info->xbdev = dev; 1973 info->xbdev = dev;
1939 /* Check if backend supports multiple queues. */ 1974 err = negotiate_mq(info);
1940 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 1975 if (err) {
1941 "multi-queue-max-queues", "%u", &backend_max_queues);
1942 if (err < 0)
1943 backend_max_queues = 1;
1944
1945 info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1946 /* We need at least one ring. */
1947 if (!info->nr_rings)
1948 info->nr_rings = 1;
1949
1950 info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
1951 if (!info->rinfo) {
1952 xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure");
1953 kfree(info); 1976 kfree(info);
1954 return -ENOMEM; 1977 return err;
1955 }
1956
1957 for (r_index = 0; r_index < info->nr_rings; r_index++) {
1958 struct blkfront_ring_info *rinfo;
1959
1960 rinfo = &info->rinfo[r_index];
1961 INIT_LIST_HEAD(&rinfo->indirect_pages);
1962 INIT_LIST_HEAD(&rinfo->grants);
1963 rinfo->dev_info = info;
1964 INIT_WORK(&rinfo->work, blkif_restart_queue);
1965 spin_lock_init(&rinfo->ring_lock);
1966 } 1978 }
1967 1979
1968 mutex_init(&info->mutex); 1980 mutex_init(&info->mutex);
@@ -2123,12 +2135,16 @@ static int blkif_recover(struct blkfront_info *info)
2123static int blkfront_resume(struct xenbus_device *dev) 2135static int blkfront_resume(struct xenbus_device *dev)
2124{ 2136{
2125 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 2137 struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2126 int err; 2138 int err = 0;
2127 2139
2128 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename); 2140 dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2129 2141
2130 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 2142 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2131 2143
2144 err = negotiate_mq(info);
2145 if (err)
2146 return err;
2147
2132 err = talk_to_blkback(dev, info); 2148 err = talk_to_blkback(dev, info);
2133 2149
2134 /* 2150 /*
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 240b6cf1d97c..be54e5331a45 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -42,7 +42,7 @@
42/* 42/*
43 * The High Precision Event Timer driver. 43 * The High Precision Event Timer driver.
44 * This driver is closely modelled after the rtc.c driver. 44 * This driver is closely modelled after the rtc.c driver.
45 * http://www.intel.com/hardwaredesign/hpetspec_1.pdf 45 * See HPET spec revision 1.
46 */ 46 */
47#define HPET_USER_FREQ (64) 47#define HPET_USER_FREQ (64)
48#define HPET_DRIFT (500) 48#define HPET_DRIFT (500)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d0da5d852d41..b583e5336630 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1819,6 +1819,28 @@ unsigned int get_random_int(void)
1819EXPORT_SYMBOL(get_random_int); 1819EXPORT_SYMBOL(get_random_int);
1820 1820
1821/* 1821/*
1822 * Same as get_random_int(), but returns unsigned long.
1823 */
1824unsigned long get_random_long(void)
1825{
1826 __u32 *hash;
1827 unsigned long ret;
1828
1829 if (arch_get_random_long(&ret))
1830 return ret;
1831
1832 hash = get_cpu_var(get_random_int_hash);
1833
1834 hash[0] += current->pid + jiffies + random_get_entropy();
1835 md5_transform(hash, random_int_secret);
1836 ret = *(unsigned long *)hash;
1837 put_cpu_var(get_random_int_hash);
1838
1839 return ret;
1840}
1841EXPORT_SYMBOL(get_random_long);
1842
1843/*
1822 * randomize_range() returns a start address such that 1844 * randomize_range() returns a start address such that
1823 * 1845 *
1824 * [...... <range> .....] 1846 * [...... <range> .....]
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b038e3666058..bae4be6501df 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_COMMON_CLK_SI514) += clk-si514.o
43obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o 43obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
44obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o 44obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
45obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o 45obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
46obj-$(CONFIG_ARCH_TANGOX) += clk-tango4.o 46obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
47obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o 47obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
48obj-$(CONFIG_ARCH_U300) += clk-u300.o 48obj-$(CONFIG_ARCH_U300) += clk-u300.o
49obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o 49obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 19fed65587e8..7b09a265d79f 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -289,7 +289,7 @@ static void __init of_gpio_clk_setup(struct device_node *node,
289 289
290 num_parents = of_clk_get_parent_count(node); 290 num_parents = of_clk_get_parent_count(node);
291 if (num_parents < 0) 291 if (num_parents < 0)
292 return; 292 num_parents = 0;
293 293
294 data = kzalloc(sizeof(*data), GFP_KERNEL); 294 data = kzalloc(sizeof(*data), GFP_KERNEL);
295 if (!data) 295 if (!data)
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index cd0f2726f5e0..89e9ca78bb94 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -299,7 +299,7 @@ static int scpi_clocks_probe(struct platform_device *pdev)
299 /* Add the virtual cpufreq device */ 299 /* Add the virtual cpufreq device */
300 cpufreq_dev = platform_device_register_simple("scpi-cpufreq", 300 cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
301 -1, NULL, 0); 301 -1, NULL, 0);
302 if (!cpufreq_dev) 302 if (IS_ERR(cpufreq_dev))
303 pr_warn("unable to register cpufreq device"); 303 pr_warn("unable to register cpufreq device");
304 304
305 return 0; 305 return 0;
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index d5c5bfa35a5a..3e0b52daa35f 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -247,7 +247,7 @@ static struct clk_onecell_data dove_divider_data = {
247 247
248void __init dove_divider_clk_init(struct device_node *np) 248void __init dove_divider_clk_init(struct device_node *np)
249{ 249{
250 void *base; 250 void __iomem *base;
251 251
252 base = of_iomap(np, 0); 252 base = of_iomap(np, 0);
253 if (WARN_ON(!base)) 253 if (WARN_ON(!base))
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index cf73e539e9f6..070037a29ea5 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -3587,7 +3587,6 @@ static const struct regmap_config gcc_apq8084_regmap_config = {
3587 .val_bits = 32, 3587 .val_bits = 32,
3588 .max_register = 0x1fc0, 3588 .max_register = 0x1fc0,
3589 .fast_io = true, 3589 .fast_io = true,
3590 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3591}; 3590};
3592 3591
3593static const struct qcom_cc_desc gcc_apq8084_desc = { 3592static const struct qcom_cc_desc gcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index b692ae881d6a..dd5402bac620 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -3005,7 +3005,6 @@ static const struct regmap_config gcc_ipq806x_regmap_config = {
3005 .val_bits = 32, 3005 .val_bits = 32,
3006 .max_register = 0x3e40, 3006 .max_register = 0x3e40,
3007 .fast_io = true, 3007 .fast_io = true,
3008 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3009}; 3008};
3010 3009
3011static const struct qcom_cc_desc gcc_ipq806x_desc = { 3010static const struct qcom_cc_desc gcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index f6a2b14dfec4..ad413036f7c7 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2702,7 +2702,6 @@ static const struct regmap_config gcc_msm8660_regmap_config = {
2702 .val_bits = 32, 2702 .val_bits = 32,
2703 .max_register = 0x363c, 2703 .max_register = 0x363c,
2704 .fast_io = true, 2704 .fast_io = true,
2705 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2706}; 2705};
2707 2706
2708static const struct qcom_cc_desc gcc_msm8660_desc = { 2707static const struct qcom_cc_desc gcc_msm8660_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index e3bf09d7d0ef..8cc9b2868b41 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -3336,7 +3336,6 @@ static const struct regmap_config gcc_msm8916_regmap_config = {
3336 .val_bits = 32, 3336 .val_bits = 32,
3337 .max_register = 0x80000, 3337 .max_register = 0x80000,
3338 .fast_io = true, 3338 .fast_io = true,
3339 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3340}; 3339};
3341 3340
3342static const struct qcom_cc_desc gcc_msm8916_desc = { 3341static const struct qcom_cc_desc gcc_msm8916_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index f31111e32d44..983dd7dc89a7 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3468,7 +3468,6 @@ static const struct regmap_config gcc_msm8960_regmap_config = {
3468 .val_bits = 32, 3468 .val_bits = 32,
3469 .max_register = 0x3660, 3469 .max_register = 0x3660,
3470 .fast_io = true, 3470 .fast_io = true,
3471 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3472}; 3471};
3473 3472
3474static const struct regmap_config gcc_apq8064_regmap_config = { 3473static const struct regmap_config gcc_apq8064_regmap_config = {
@@ -3477,7 +3476,6 @@ static const struct regmap_config gcc_apq8064_regmap_config = {
3477 .val_bits = 32, 3476 .val_bits = 32,
3478 .max_register = 0x3880, 3477 .max_register = 0x3880,
3479 .fast_io = true, 3478 .fast_io = true,
3480 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3481}; 3479};
3482 3480
3483static const struct qcom_cc_desc gcc_msm8960_desc = { 3481static const struct qcom_cc_desc gcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index df164d618e34..335952db309b 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -2680,7 +2680,6 @@ static const struct regmap_config gcc_msm8974_regmap_config = {
2680 .val_bits = 32, 2680 .val_bits = 32,
2681 .max_register = 0x1fc0, 2681 .max_register = 0x1fc0,
2682 .fast_io = true, 2682 .fast_io = true,
2683 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2684}; 2683};
2685 2684
2686static const struct qcom_cc_desc gcc_msm8974_desc = { 2685static const struct qcom_cc_desc gcc_msm8974_desc = {
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 62e79fadd5f7..db3998e5e2d8 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -419,7 +419,6 @@ static const struct regmap_config lcc_ipq806x_regmap_config = {
419 .val_bits = 32, 419 .val_bits = 32,
420 .max_register = 0xfc, 420 .max_register = 0xfc,
421 .fast_io = true, 421 .fast_io = true,
422 .val_format_endian = REGMAP_ENDIAN_LITTLE,
423}; 422};
424 423
425static const struct qcom_cc_desc lcc_ipq806x_desc = { 424static const struct qcom_cc_desc lcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index bf95bb0ea1b8..4fcf9d1d233c 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -524,7 +524,6 @@ static const struct regmap_config lcc_msm8960_regmap_config = {
524 .val_bits = 32, 524 .val_bits = 32,
525 .max_register = 0xfc, 525 .max_register = 0xfc,
526 .fast_io = true, 526 .fast_io = true,
527 .val_format_endian = REGMAP_ENDIAN_LITTLE,
528}; 527};
529 528
530static const struct qcom_cc_desc lcc_msm8960_desc = { 529static const struct qcom_cc_desc lcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 1e703fda8a0f..30777f9f1a43 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3368,7 +3368,6 @@ static const struct regmap_config mmcc_apq8084_regmap_config = {
3368 .val_bits = 32, 3368 .val_bits = 32,
3369 .max_register = 0x5104, 3369 .max_register = 0x5104,
3370 .fast_io = true, 3370 .fast_io = true,
3371 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3372}; 3371};
3373 3372
3374static const struct qcom_cc_desc mmcc_apq8084_desc = { 3373static const struct qcom_cc_desc mmcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index d73a048d3b9d..00e36192a1de 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -3029,7 +3029,6 @@ static const struct regmap_config mmcc_msm8960_regmap_config = {
3029 .val_bits = 32, 3029 .val_bits = 32,
3030 .max_register = 0x334, 3030 .max_register = 0x334,
3031 .fast_io = true, 3031 .fast_io = true,
3032 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3033}; 3032};
3034 3033
3035static const struct regmap_config mmcc_apq8064_regmap_config = { 3034static const struct regmap_config mmcc_apq8064_regmap_config = {
@@ -3038,7 +3037,6 @@ static const struct regmap_config mmcc_apq8064_regmap_config = {
3038 .val_bits = 32, 3037 .val_bits = 32,
3039 .max_register = 0x350, 3038 .max_register = 0x350,
3040 .fast_io = true, 3039 .fast_io = true,
3041 .val_format_endian = REGMAP_ENDIAN_LITTLE,
3042}; 3040};
3043 3041
3044static const struct qcom_cc_desc mmcc_msm8960_desc = { 3042static const struct qcom_cc_desc mmcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index bbe28ed93669..9d790bcadf25 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -2594,7 +2594,6 @@ static const struct regmap_config mmcc_msm8974_regmap_config = {
2594 .val_bits = 32, 2594 .val_bits = 32,
2595 .max_register = 0x5104, 2595 .max_register = 0x5104,
2596 .fast_io = true, 2596 .fast_io = true,
2597 .val_format_endian = REGMAP_ENDIAN_LITTLE,
2598}; 2597};
2599 2598
2600static const struct qcom_cc_desc mmcc_msm8974_desc = { 2599static const struct qcom_cc_desc mmcc_msm8974_desc = {
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index ebce98033fbb..bc7fbac83ab7 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -133,7 +133,7 @@ PNAME(mux_spdif_p) = { "spdif_src", "spdif_frac", "xin12m" };
133PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" }; 133PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
134PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" }; 134PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
135PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" }; 135PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
136PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" }; 136PNAME(mux_mac_p) = { "mac_pll_src", "rmii_clkin" };
137PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" }; 137PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" };
138 138
139static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = { 139static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = {
@@ -224,16 +224,16 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
224 RK2928_CLKGATE_CON(2), 2, GFLAGS), 224 RK2928_CLKGATE_CON(2), 2, GFLAGS),
225 225
226 COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED, 226 COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED,
227 RK2928_CLKSEL_CON(2), 4, 1, DFLAGS, 227 RK2928_CLKSEL_CON(2), 4, 1, MFLAGS,
228 RK2928_CLKGATE_CON(1), 0, GFLAGS), 228 RK2928_CLKGATE_CON(1), 0, GFLAGS),
229 COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED, 229 COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED,
230 RK2928_CLKSEL_CON(2), 5, 1, DFLAGS, 230 RK2928_CLKSEL_CON(2), 5, 1, MFLAGS,
231 RK2928_CLKGATE_CON(1), 1, GFLAGS), 231 RK2928_CLKGATE_CON(1), 1, GFLAGS),
232 COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED, 232 COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED,
233 RK2928_CLKSEL_CON(2), 6, 1, DFLAGS, 233 RK2928_CLKSEL_CON(2), 6, 1, MFLAGS,
234 RK2928_CLKGATE_CON(2), 4, GFLAGS), 234 RK2928_CLKGATE_CON(2), 4, GFLAGS),
235 COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED, 235 COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED,
236 RK2928_CLKSEL_CON(2), 7, 1, DFLAGS, 236 RK2928_CLKSEL_CON(2), 7, 1, MFLAGS,
237 RK2928_CLKGATE_CON(2), 5, GFLAGS), 237 RK2928_CLKGATE_CON(2), 5, GFLAGS),
238 238
239 MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0, 239 MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0,
@@ -242,11 +242,11 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
242 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 242 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
243 RK2928_CLKGATE_CON(1), 8, GFLAGS), 243 RK2928_CLKGATE_CON(1), 8, GFLAGS),
244 COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0, 244 COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0,
245 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 245 RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
246 RK2928_CLKGATE_CON(1), 8, GFLAGS), 246 RK2928_CLKGATE_CON(1), 10, GFLAGS),
247 COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0, 247 COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0,
248 RK2928_CLKSEL_CON(13), 0, 7, DFLAGS, 248 RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
249 RK2928_CLKGATE_CON(1), 8, GFLAGS), 249 RK2928_CLKGATE_CON(1), 12, GFLAGS),
250 COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, 250 COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
251 RK2928_CLKSEL_CON(17), 0, 251 RK2928_CLKSEL_CON(17), 0,
252 RK2928_CLKGATE_CON(1), 9, GFLAGS, 252 RK2928_CLKGATE_CON(1), 9, GFLAGS,
@@ -279,13 +279,13 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
279 RK2928_CLKGATE_CON(3), 2, GFLAGS), 279 RK2928_CLKGATE_CON(3), 2, GFLAGS),
280 280
281 COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0, 281 COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0,
282 RK2928_CLKSEL_CON(12), 8, 2, DFLAGS, 282 RK2928_CLKSEL_CON(12), 8, 2, MFLAGS,
283 RK2928_CLKGATE_CON(2), 11, GFLAGS), 283 RK2928_CLKGATE_CON(2), 11, GFLAGS),
284 DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0, 284 DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0,
285 RK2928_CLKSEL_CON(11), 0, 7, DFLAGS), 285 RK2928_CLKSEL_CON(11), 0, 7, DFLAGS),
286 286
287 COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0, 287 COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0,
288 RK2928_CLKSEL_CON(12), 10, 2, DFLAGS, 288 RK2928_CLKSEL_CON(12), 10, 2, MFLAGS,
289 RK2928_CLKGATE_CON(2), 13, GFLAGS), 289 RK2928_CLKGATE_CON(2), 13, GFLAGS),
290 DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0, 290 DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0,
291 RK2928_CLKSEL_CON(11), 8, 7, DFLAGS), 291 RK2928_CLKSEL_CON(11), 8, 7, DFLAGS),
@@ -344,12 +344,12 @@ static struct rockchip_clk_branch rk3036_clk_branches[] __initdata = {
344 RK2928_CLKGATE_CON(10), 5, GFLAGS), 344 RK2928_CLKGATE_CON(10), 5, GFLAGS),
345 345
346 COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0, 346 COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0,
347 RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 4, 5, DFLAGS), 347 RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS),
348 MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT, 348 MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT,
349 RK2928_CLKSEL_CON(21), 3, 1, MFLAGS), 349 RK2928_CLKSEL_CON(21), 3, 1, MFLAGS),
350 350
351 COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0, 351 COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
352 RK2928_CLKSEL_CON(21), 9, 5, DFLAGS, 352 RK2928_CLKSEL_CON(21), 4, 5, DFLAGS,
353 RK2928_CLKGATE_CON(2), 6, GFLAGS), 353 RK2928_CLKGATE_CON(2), 6, GFLAGS),
354 354
355 MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0, 355 MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index be0ede522269..21f3ea909fab 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -780,13 +780,13 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
780 GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS), 780 GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS),
781 781
782 /* pclk_pd_alive gates */ 782 /* pclk_pd_alive gates */
783 GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS), 783 GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 13, GFLAGS),
784 GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS), 784 GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 12, GFLAGS),
785 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS), 785 GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 9, GFLAGS),
786 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS), 786 GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 8, GFLAGS),
787 GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS), 787 GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 3, GFLAGS),
788 GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS), 788 GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 2, GFLAGS),
789 GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS), 789 GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 1, GFLAGS),
790 790
791 /* 791 /*
792 * pclk_vio gates 792 * pclk_vio gates
@@ -796,12 +796,12 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
796 GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS), 796 GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
797 797
798 /* pclk_pd_pmu gates */ 798 /* pclk_pd_pmu gates */
799 GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS), 799 GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 5, GFLAGS),
800 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS), 800 GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(23), 4, GFLAGS),
801 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS), 801 GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 3, GFLAGS),
802 GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), 802 GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 2, GFLAGS),
803 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS), 803 GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 1, GFLAGS),
804 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS), 804 GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 0, GFLAGS),
805 805
806 /* timer gates */ 806 /* timer gates */
807 GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS), 807 GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS),
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index e1fe8f35d45c..74e7544f861b 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -450,8 +450,10 @@ static int load_timings_from_dt(struct tegra_clk_emc *tegra,
450 struct emc_timing *timing = tegra->timings + (i++); 450 struct emc_timing *timing = tegra->timings + (i++);
451 451
452 err = load_one_timing_from_dt(tegra, timing, child); 452 err = load_one_timing_from_dt(tegra, timing, child);
453 if (err) 453 if (err) {
454 of_node_put(child);
454 return err; 455 return err;
456 }
455 457
456 timing->ram_code = ram_code; 458 timing->ram_code = ram_code;
457 } 459 }
@@ -499,9 +501,9 @@ struct clk *tegra_clk_register_emc(void __iomem *base, struct device_node *np,
499 * fuses until the apbmisc driver is loaded. 501 * fuses until the apbmisc driver is loaded.
500 */ 502 */
501 err = load_timings_from_dt(tegra, node, node_ram_code); 503 err = load_timings_from_dt(tegra, node, node_ram_code);
504 of_node_put(node);
502 if (err) 505 if (err)
503 return ERR_PTR(err); 506 return ERR_PTR(err);
504 of_node_put(node);
505 break; 507 break;
506 } 508 }
507 509
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 19ce0738ee76..62ea38187b71 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -11,6 +11,7 @@ enum clk_id {
11 tegra_clk_afi, 11 tegra_clk_afi,
12 tegra_clk_amx, 12 tegra_clk_amx,
13 tegra_clk_amx1, 13 tegra_clk_amx1,
14 tegra_clk_apb2ape,
14 tegra_clk_apbdma, 15 tegra_clk_apbdma,
15 tegra_clk_apbif, 16 tegra_clk_apbif,
16 tegra_clk_ape, 17 tegra_clk_ape,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index a534bfab30b3..6ac3f843e7ca 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -86,15 +86,21 @@
86#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\ 86#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
87 PLLE_SS_CNTL_SSC_BYP) 87 PLLE_SS_CNTL_SSC_BYP)
88#define PLLE_SS_MAX_MASK 0x1ff 88#define PLLE_SS_MAX_MASK 0x1ff
89#define PLLE_SS_MAX_VAL 0x25 89#define PLLE_SS_MAX_VAL_TEGRA114 0x25
90#define PLLE_SS_MAX_VAL_TEGRA210 0x21
90#define PLLE_SS_INC_MASK (0xff << 16) 91#define PLLE_SS_INC_MASK (0xff << 16)
91#define PLLE_SS_INC_VAL (0x1 << 16) 92#define PLLE_SS_INC_VAL (0x1 << 16)
92#define PLLE_SS_INCINTRV_MASK (0x3f << 24) 93#define PLLE_SS_INCINTRV_MASK (0x3f << 24)
93#define PLLE_SS_INCINTRV_VAL (0x20 << 24) 94#define PLLE_SS_INCINTRV_VAL_TEGRA114 (0x20 << 24)
95#define PLLE_SS_INCINTRV_VAL_TEGRA210 (0x23 << 24)
94#define PLLE_SS_COEFFICIENTS_MASK \ 96#define PLLE_SS_COEFFICIENTS_MASK \
95 (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK) 97 (PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
96#define PLLE_SS_COEFFICIENTS_VAL \ 98#define PLLE_SS_COEFFICIENTS_VAL_TEGRA114 \
97 (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL) 99 (PLLE_SS_MAX_VAL_TEGRA114 | PLLE_SS_INC_VAL |\
100 PLLE_SS_INCINTRV_VAL_TEGRA114)
101#define PLLE_SS_COEFFICIENTS_VAL_TEGRA210 \
102 (PLLE_SS_MAX_VAL_TEGRA210 | PLLE_SS_INC_VAL |\
103 PLLE_SS_INCINTRV_VAL_TEGRA210)
98 104
99#define PLLE_AUX_PLLP_SEL BIT(2) 105#define PLLE_AUX_PLLP_SEL BIT(2)
100#define PLLE_AUX_USE_LOCKDET BIT(3) 106#define PLLE_AUX_USE_LOCKDET BIT(3)
@@ -880,7 +886,7 @@ static int clk_plle_training(struct tegra_clk_pll *pll)
880static int clk_plle_enable(struct clk_hw *hw) 886static int clk_plle_enable(struct clk_hw *hw)
881{ 887{
882 struct tegra_clk_pll *pll = to_clk_pll(hw); 888 struct tegra_clk_pll *pll = to_clk_pll(hw);
883 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 889 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
884 struct tegra_clk_pll_freq_table sel; 890 struct tegra_clk_pll_freq_table sel;
885 u32 val; 891 u32 val;
886 int err; 892 int err;
@@ -1378,7 +1384,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1378 u32 val; 1384 u32 val;
1379 int ret; 1385 int ret;
1380 unsigned long flags = 0; 1386 unsigned long flags = 0;
1381 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 1387 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
1382 1388
1383 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) 1389 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
1384 return -EINVAL; 1390 return -EINVAL;
@@ -1401,7 +1407,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1401 val |= PLLE_MISC_IDDQ_SW_CTRL; 1407 val |= PLLE_MISC_IDDQ_SW_CTRL;
1402 val &= ~PLLE_MISC_IDDQ_SW_VALUE; 1408 val &= ~PLLE_MISC_IDDQ_SW_VALUE;
1403 val |= PLLE_MISC_PLLE_PTS; 1409 val |= PLLE_MISC_PLLE_PTS;
1404 val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; 1410 val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
1405 pll_writel_misc(val, pll); 1411 pll_writel_misc(val, pll);
1406 udelay(5); 1412 udelay(5);
1407 1413
@@ -1428,7 +1434,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
1428 val = pll_readl(PLLE_SS_CTRL, pll); 1434 val = pll_readl(PLLE_SS_CTRL, pll);
1429 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); 1435 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
1430 val &= ~PLLE_SS_COEFFICIENTS_MASK; 1436 val &= ~PLLE_SS_COEFFICIENTS_MASK;
1431 val |= PLLE_SS_COEFFICIENTS_VAL; 1437 val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA114;
1432 pll_writel(val, PLLE_SS_CTRL, pll); 1438 pll_writel(val, PLLE_SS_CTRL, pll);
1433 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); 1439 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
1434 pll_writel(val, PLLE_SS_CTRL, pll); 1440 pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2012,9 +2018,9 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2012 struct tegra_clk_pll *pll = to_clk_pll(hw); 2018 struct tegra_clk_pll *pll = to_clk_pll(hw);
2013 struct tegra_clk_pll_freq_table sel; 2019 struct tegra_clk_pll_freq_table sel;
2014 u32 val; 2020 u32 val;
2015 int ret; 2021 int ret = 0;
2016 unsigned long flags = 0; 2022 unsigned long flags = 0;
2017 unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk)); 2023 unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
2018 2024
2019 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) 2025 if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
2020 return -EINVAL; 2026 return -EINVAL;
@@ -2022,22 +2028,20 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2022 if (pll->lock) 2028 if (pll->lock)
2023 spin_lock_irqsave(pll->lock, flags); 2029 spin_lock_irqsave(pll->lock, flags);
2024 2030
2031 val = pll_readl(pll->params->aux_reg, pll);
2032 if (val & PLLE_AUX_SEQ_ENABLE)
2033 goto out;
2034
2025 val = pll_readl_base(pll); 2035 val = pll_readl_base(pll);
2026 val &= ~BIT(30); /* Disable lock override */ 2036 val &= ~BIT(30); /* Disable lock override */
2027 pll_writel_base(val, pll); 2037 pll_writel_base(val, pll);
2028 2038
2029 val = pll_readl(pll->params->aux_reg, pll);
2030 val |= PLLE_AUX_ENABLE_SWCTL;
2031 val &= ~PLLE_AUX_SEQ_ENABLE;
2032 pll_writel(val, pll->params->aux_reg, pll);
2033 udelay(1);
2034
2035 val = pll_readl_misc(pll); 2039 val = pll_readl_misc(pll);
2036 val |= PLLE_MISC_LOCK_ENABLE; 2040 val |= PLLE_MISC_LOCK_ENABLE;
2037 val |= PLLE_MISC_IDDQ_SW_CTRL; 2041 val |= PLLE_MISC_IDDQ_SW_CTRL;
2038 val &= ~PLLE_MISC_IDDQ_SW_VALUE; 2042 val &= ~PLLE_MISC_IDDQ_SW_VALUE;
2039 val |= PLLE_MISC_PLLE_PTS; 2043 val |= PLLE_MISC_PLLE_PTS;
2040 val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK; 2044 val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
2041 pll_writel_misc(val, pll); 2045 pll_writel_misc(val, pll);
2042 udelay(5); 2046 udelay(5);
2043 2047
@@ -2067,7 +2071,7 @@ static int clk_plle_tegra210_enable(struct clk_hw *hw)
2067 val = pll_readl(PLLE_SS_CTRL, pll); 2071 val = pll_readl(PLLE_SS_CTRL, pll);
2068 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT); 2072 val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
2069 val &= ~PLLE_SS_COEFFICIENTS_MASK; 2073 val &= ~PLLE_SS_COEFFICIENTS_MASK;
2070 val |= PLLE_SS_COEFFICIENTS_VAL; 2074 val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA210;
2071 pll_writel(val, PLLE_SS_CTRL, pll); 2075 pll_writel(val, PLLE_SS_CTRL, pll);
2072 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS); 2076 val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
2073 pll_writel(val, PLLE_SS_CTRL, pll); 2077 pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2104,15 +2108,25 @@ static void clk_plle_tegra210_disable(struct clk_hw *hw)
2104 if (pll->lock) 2108 if (pll->lock)
2105 spin_lock_irqsave(pll->lock, flags); 2109 spin_lock_irqsave(pll->lock, flags);
2106 2110
2111 /* If PLLE HW sequencer is enabled, SW should not disable PLLE */
2112 val = pll_readl(pll->params->aux_reg, pll);
2113 if (val & PLLE_AUX_SEQ_ENABLE)
2114 goto out;
2115
2107 val = pll_readl_base(pll); 2116 val = pll_readl_base(pll);
2108 val &= ~PLLE_BASE_ENABLE; 2117 val &= ~PLLE_BASE_ENABLE;
2109 pll_writel_base(val, pll); 2118 pll_writel_base(val, pll);
2110 2119
2120 val = pll_readl(pll->params->aux_reg, pll);
2121 val |= PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL;
2122 pll_writel(val, pll->params->aux_reg, pll);
2123
2111 val = pll_readl_misc(pll); 2124 val = pll_readl_misc(pll);
2112 val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE; 2125 val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
2113 pll_writel_misc(val, pll); 2126 pll_writel_misc(val, pll);
2114 udelay(1); 2127 udelay(1);
2115 2128
2129out:
2116 if (pll->lock) 2130 if (pll->lock)
2117 spin_unlock_irqrestore(pll->lock, flags); 2131 spin_unlock_irqrestore(pll->lock, flags);
2118} 2132}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 6ad381a888a6..ea2b9cbf9e70 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -773,7 +773,7 @@ static struct tegra_periph_init_data periph_clks[] = {
773 XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src), 773 XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
774 XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8), 774 XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8),
775 MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb), 775 MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb),
776 MUX8("msenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc), 776 MUX8("nvenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
777 MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec), 777 MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
778 MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg), 778 MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
779 MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape), 779 MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
@@ -782,7 +782,7 @@ static struct tegra_periph_init_data periph_clks[] = {
782 NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock), 782 NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock),
783 MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy), 783 MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
784 MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi), 784 MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
785 MUX("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, TEGRA_PERIPH_ON_APB, tegra_clk_vi_i2c), 785 I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
786 MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif), 786 MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif),
787 MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape), 787 MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape),
788 MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb), 788 MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb),
@@ -829,6 +829,7 @@ static struct tegra_periph_init_data gate_clks[] = {
829 GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0), 829 GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0),
830 GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0), 830 GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0),
831 GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0), 831 GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0),
832 GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0),
832}; 833};
833 834
834static struct tegra_periph_init_data div_clks[] = { 835static struct tegra_periph_init_data div_clks[] = {
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 4559a20e3af6..474de0f0c26d 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -67,7 +67,7 @@ static const char *cclk_lp_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
67 "pll_p", "pll_p_out4", "unused", 67 "pll_p", "pll_p_out4", "unused",
68 "unused", "pll_x", "pll_x_out0" }; 68 "unused", "pll_x", "pll_x_out0" };
69 69
70const struct tegra_super_gen_info tegra_super_gen_info_gen4 = { 70static const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
71 .gen = gen4, 71 .gen = gen4,
72 .sclk_parents = sclk_parents, 72 .sclk_parents = sclk_parents,
73 .cclk_g_parents = cclk_g_parents, 73 .cclk_g_parents = cclk_g_parents,
@@ -93,7 +93,7 @@ static const char *cclk_lp_parents_gen5[] = { "clk_m", "unused", "clk_32k", "unu
93 "unused", "unused", "unused", "unused", 93 "unused", "unused", "unused", "unused",
94 "dfllCPU_out" }; 94 "dfllCPU_out" };
95 95
96const struct tegra_super_gen_info tegra_super_gen_info_gen5 = { 96static const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
97 .gen = gen5, 97 .gen = gen5,
98 .sclk_parents = sclk_parents_gen5, 98 .sclk_parents = sclk_parents_gen5,
99 .cclk_g_parents = cclk_g_parents_gen5, 99 .cclk_g_parents = cclk_g_parents_gen5,
@@ -171,7 +171,7 @@ static void __init tegra_sclk_init(void __iomem *clk_base,
171 *dt_clk = clk; 171 *dt_clk = clk;
172} 172}
173 173
174void __init tegra_super_clk_init(void __iomem *clk_base, 174static void __init tegra_super_clk_init(void __iomem *clk_base,
175 void __iomem *pmc_base, 175 void __iomem *pmc_base,
176 struct tegra_clk *tegra_clks, 176 struct tegra_clk *tegra_clks,
177 struct tegra_clk_pll_params *params, 177 struct tegra_clk_pll_params *params,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 58514c44ea83..637041fd53ad 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -59,8 +59,8 @@
59#define PLLC3_MISC3 0x50c 59#define PLLC3_MISC3 0x50c
60 60
61#define PLLM_BASE 0x90 61#define PLLM_BASE 0x90
62#define PLLM_MISC0 0x9c
63#define PLLM_MISC1 0x98 62#define PLLM_MISC1 0x98
63#define PLLM_MISC2 0x9c
64#define PLLP_BASE 0xa0 64#define PLLP_BASE 0xa0
65#define PLLP_MISC0 0xac 65#define PLLP_MISC0 0xac
66#define PLLP_MISC1 0x680 66#define PLLP_MISC1 0x680
@@ -99,7 +99,7 @@
99#define PLLC4_MISC0 0x5a8 99#define PLLC4_MISC0 0x5a8
100#define PLLC4_OUT 0x5e4 100#define PLLC4_OUT 0x5e4
101#define PLLMB_BASE 0x5e8 101#define PLLMB_BASE 0x5e8
102#define PLLMB_MISC0 0x5ec 102#define PLLMB_MISC1 0x5ec
103#define PLLA1_BASE 0x6a4 103#define PLLA1_BASE 0x6a4
104#define PLLA1_MISC0 0x6a8 104#define PLLA1_MISC0 0x6a8
105#define PLLA1_MISC1 0x6ac 105#define PLLA1_MISC1 0x6ac
@@ -243,7 +243,8 @@ static unsigned long tegra210_input_freq[] = {
243}; 243};
244 244
245static const char *mux_pllmcp_clkm[] = { 245static const char *mux_pllmcp_clkm[] = {
246 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3", 246 "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
247 "pll_p",
247}; 248};
248#define mux_pllmcp_clkm_idx NULL 249#define mux_pllmcp_clkm_idx NULL
249 250
@@ -367,12 +368,12 @@ static const char *mux_pllmcp_clkm[] = {
367/* PLLMB */ 368/* PLLMB */
368#define PLLMB_BASE_LOCK (1 << 27) 369#define PLLMB_BASE_LOCK (1 << 27)
369 370
370#define PLLMB_MISC0_LOCK_OVERRIDE (1 << 18) 371#define PLLMB_MISC1_LOCK_OVERRIDE (1 << 18)
371#define PLLMB_MISC0_IDDQ (1 << 17) 372#define PLLMB_MISC1_IDDQ (1 << 17)
372#define PLLMB_MISC0_LOCK_ENABLE (1 << 16) 373#define PLLMB_MISC1_LOCK_ENABLE (1 << 16)
373 374
374#define PLLMB_MISC0_DEFAULT_VALUE 0x00030000 375#define PLLMB_MISC1_DEFAULT_VALUE 0x00030000
375#define PLLMB_MISC0_WRITE_MASK 0x0007ffff 376#define PLLMB_MISC1_WRITE_MASK 0x0007ffff
376 377
377/* PLLP */ 378/* PLLP */
378#define PLLP_BASE_OVERRIDE (1 << 28) 379#define PLLP_BASE_OVERRIDE (1 << 28)
@@ -457,7 +458,8 @@ static void pllcx_check_defaults(struct tegra_clk_pll_params *params)
457 PLLCX_MISC3_WRITE_MASK); 458 PLLCX_MISC3_WRITE_MASK);
458} 459}
459 460
460void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx) 461static void tegra210_pllcx_set_defaults(const char *name,
462 struct tegra_clk_pll *pllcx)
461{ 463{
462 pllcx->params->defaults_set = true; 464 pllcx->params->defaults_set = true;
463 465
@@ -482,22 +484,22 @@ void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
482 udelay(1); 484 udelay(1);
483} 485}
484 486
485void _pllc_set_defaults(struct tegra_clk_pll *pllcx) 487static void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
486{ 488{
487 tegra210_pllcx_set_defaults("PLL_C", pllcx); 489 tegra210_pllcx_set_defaults("PLL_C", pllcx);
488} 490}
489 491
490void _pllc2_set_defaults(struct tegra_clk_pll *pllcx) 492static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
491{ 493{
492 tegra210_pllcx_set_defaults("PLL_C2", pllcx); 494 tegra210_pllcx_set_defaults("PLL_C2", pllcx);
493} 495}
494 496
495void _pllc3_set_defaults(struct tegra_clk_pll *pllcx) 497static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
496{ 498{
497 tegra210_pllcx_set_defaults("PLL_C3", pllcx); 499 tegra210_pllcx_set_defaults("PLL_C3", pllcx);
498} 500}
499 501
500void _plla1_set_defaults(struct tegra_clk_pll *pllcx) 502static void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
501{ 503{
502 tegra210_pllcx_set_defaults("PLL_A1", pllcx); 504 tegra210_pllcx_set_defaults("PLL_A1", pllcx);
503} 505}
@@ -507,7 +509,7 @@ void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
507 * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used. 509 * PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used.
508 * Fractional SDM is allowed to provide exact audio rates. 510 * Fractional SDM is allowed to provide exact audio rates.
509 */ 511 */
510void tegra210_plla_set_defaults(struct tegra_clk_pll *plla) 512static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
511{ 513{
512 u32 mask; 514 u32 mask;
513 u32 val = readl_relaxed(clk_base + plla->params->base_reg); 515 u32 val = readl_relaxed(clk_base + plla->params->base_reg);
@@ -559,7 +561,7 @@ void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
559 * PLLD 561 * PLLD
560 * PLL with fractional SDM. 562 * PLL with fractional SDM.
561 */ 563 */
562void tegra210_plld_set_defaults(struct tegra_clk_pll *plld) 564static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
563{ 565{
564 u32 val; 566 u32 val;
565 u32 mask = 0xffff; 567 u32 mask = 0xffff;
@@ -698,7 +700,7 @@ static void plldss_defaults(const char *pll_name, struct tegra_clk_pll *plldss,
698 udelay(1); 700 udelay(1);
699} 701}
700 702
701void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2) 703static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
702{ 704{
703 plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE, 705 plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE,
704 PLLD2_MISC1_CFG_DEFAULT_VALUE, 706 PLLD2_MISC1_CFG_DEFAULT_VALUE,
@@ -706,7 +708,7 @@ void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
706 PLLD2_MISC3_CTRL2_DEFAULT_VALUE); 708 PLLD2_MISC3_CTRL2_DEFAULT_VALUE);
707} 709}
708 710
709void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp) 711static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
710{ 712{
711 plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE, 713 plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE,
712 PLLDP_MISC1_CFG_DEFAULT_VALUE, 714 PLLDP_MISC1_CFG_DEFAULT_VALUE,
@@ -719,7 +721,7 @@ void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
719 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support. 721 * Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support.
720 * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers. 722 * VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers.
721 */ 723 */
722void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4) 724static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
723{ 725{
724 plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0); 726 plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0);
725} 727}
@@ -728,7 +730,7 @@ void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
728 * PLLRE 730 * PLLRE
729 * VCO is exposed to the clock tree directly along with post-divider output 731 * VCO is exposed to the clock tree directly along with post-divider output
730 */ 732 */
731void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre) 733static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
732{ 734{
733 u32 mask; 735 u32 mask;
734 u32 val = readl_relaxed(clk_base + pllre->params->base_reg); 736 u32 val = readl_relaxed(clk_base + pllre->params->base_reg);
@@ -780,13 +782,13 @@ static void pllx_get_dyn_steps(struct clk_hw *hw, u32 *step_a, u32 *step_b)
780{ 782{
781 unsigned long input_rate; 783 unsigned long input_rate;
782 784
783 if (!IS_ERR_OR_NULL(hw->clk)) { 785 /* cf rate */
786 if (!IS_ERR_OR_NULL(hw->clk))
784 input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); 787 input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
785 /* cf rate */ 788 else
786 input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
787 } else {
788 input_rate = 38400000; 789 input_rate = 38400000;
789 } 790
791 input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
790 792
791 switch (input_rate) { 793 switch (input_rate) {
792 case 12000000: 794 case 12000000:
@@ -841,7 +843,7 @@ static void pllx_check_defaults(struct tegra_clk_pll *pll)
841 PLLX_MISC5_WRITE_MASK); 843 PLLX_MISC5_WRITE_MASK);
842} 844}
843 845
844void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx) 846static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
845{ 847{
846 u32 val; 848 u32 val;
847 u32 step_a, step_b; 849 u32 step_a, step_b;
@@ -901,7 +903,7 @@ void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
901} 903}
902 904
903/* PLLMB */ 905/* PLLMB */
904void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb) 906static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
905{ 907{
906 u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg); 908 u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg);
907 909
@@ -914,15 +916,15 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
914 * PLL is ON: check if defaults already set, then set those 916 * PLL is ON: check if defaults already set, then set those
915 * that can be updated in flight. 917 * that can be updated in flight.
916 */ 918 */
917 val = PLLMB_MISC0_DEFAULT_VALUE & (~PLLMB_MISC0_IDDQ); 919 val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ);
918 mask = PLLMB_MISC0_LOCK_ENABLE | PLLMB_MISC0_LOCK_OVERRIDE; 920 mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE;
919 _pll_misc_chk_default(clk_base, pllmb->params, 0, val, 921 _pll_misc_chk_default(clk_base, pllmb->params, 0, val,
920 ~mask & PLLMB_MISC0_WRITE_MASK); 922 ~mask & PLLMB_MISC1_WRITE_MASK);
921 923
922 /* Enable lock detect */ 924 /* Enable lock detect */
923 val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]); 925 val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]);
924 val &= ~mask; 926 val &= ~mask;
925 val |= PLLMB_MISC0_DEFAULT_VALUE & mask; 927 val |= PLLMB_MISC1_DEFAULT_VALUE & mask;
926 writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]); 928 writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]);
927 udelay(1); 929 udelay(1);
928 930
@@ -930,7 +932,7 @@ void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
930 } 932 }
931 933
932 /* set IDDQ, enable lock detect */ 934 /* set IDDQ, enable lock detect */
933 writel_relaxed(PLLMB_MISC0_DEFAULT_VALUE, 935 writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE,
934 clk_base + pllmb->params->ext_misc_reg[0]); 936 clk_base + pllmb->params->ext_misc_reg[0]);
935 udelay(1); 937 udelay(1);
936} 938}
@@ -960,7 +962,7 @@ static void pllp_check_defaults(struct tegra_clk_pll *pll, bool enabled)
960 ~mask & PLLP_MISC1_WRITE_MASK); 962 ~mask & PLLP_MISC1_WRITE_MASK);
961} 963}
962 964
963void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp) 965static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
964{ 966{
965 u32 mask; 967 u32 mask;
966 u32 val = readl_relaxed(clk_base + pllp->params->base_reg); 968 u32 val = readl_relaxed(clk_base + pllp->params->base_reg);
@@ -1022,7 +1024,7 @@ static void pllu_check_defaults(struct tegra_clk_pll *pll, bool hw_control)
1022 ~mask & PLLU_MISC1_WRITE_MASK); 1024 ~mask & PLLU_MISC1_WRITE_MASK);
1023} 1025}
1024 1026
1025void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu) 1027static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
1026{ 1028{
1027 u32 val = readl_relaxed(clk_base + pllu->params->base_reg); 1029 u32 val = readl_relaxed(clk_base + pllu->params->base_reg);
1028 1030
@@ -1212,8 +1214,9 @@ static void tegra210_clk_pll_set_gain(struct tegra_clk_pll_freq_table *cfg)
1212 cfg->m *= PLL_SDM_COEFF; 1214 cfg->m *= PLL_SDM_COEFF;
1213} 1215}
1214 1216
1215unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params, 1217static unsigned long
1216 unsigned long parent_rate) 1218tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
1219 unsigned long parent_rate)
1217{ 1220{
1218 unsigned long vco_min = params->vco_min; 1221 unsigned long vco_min = params->vco_min;
1219 1222
@@ -1386,7 +1389,7 @@ static struct tegra_clk_pll_params pll_c_params = {
1386 .mdiv_default = 3, 1389 .mdiv_default = 3,
1387 .div_nmp = &pllc_nmp, 1390 .div_nmp = &pllc_nmp,
1388 .freq_table = pll_cx_freq_table, 1391 .freq_table = pll_cx_freq_table,
1389 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1392 .flags = TEGRA_PLL_USE_LOCK,
1390 .set_defaults = _pllc_set_defaults, 1393 .set_defaults = _pllc_set_defaults,
1391 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1394 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1392}; 1395};
@@ -1425,7 +1428,7 @@ static struct tegra_clk_pll_params pll_c2_params = {
1425 .ext_misc_reg[2] = PLLC2_MISC2, 1428 .ext_misc_reg[2] = PLLC2_MISC2,
1426 .ext_misc_reg[3] = PLLC2_MISC3, 1429 .ext_misc_reg[3] = PLLC2_MISC3,
1427 .freq_table = pll_cx_freq_table, 1430 .freq_table = pll_cx_freq_table,
1428 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1431 .flags = TEGRA_PLL_USE_LOCK,
1429 .set_defaults = _pllc2_set_defaults, 1432 .set_defaults = _pllc2_set_defaults,
1430 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1433 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1431}; 1434};
@@ -1455,7 +1458,7 @@ static struct tegra_clk_pll_params pll_c3_params = {
1455 .ext_misc_reg[2] = PLLC3_MISC2, 1458 .ext_misc_reg[2] = PLLC3_MISC2,
1456 .ext_misc_reg[3] = PLLC3_MISC3, 1459 .ext_misc_reg[3] = PLLC3_MISC3,
1457 .freq_table = pll_cx_freq_table, 1460 .freq_table = pll_cx_freq_table,
1458 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1461 .flags = TEGRA_PLL_USE_LOCK,
1459 .set_defaults = _pllc3_set_defaults, 1462 .set_defaults = _pllc3_set_defaults,
1460 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1463 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1461}; 1464};
@@ -1505,7 +1508,6 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
1505 .base_reg = PLLC4_BASE, 1508 .base_reg = PLLC4_BASE,
1506 .misc_reg = PLLC4_MISC0, 1509 .misc_reg = PLLC4_MISC0,
1507 .lock_mask = PLL_BASE_LOCK, 1510 .lock_mask = PLL_BASE_LOCK,
1508 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1509 .lock_delay = 300, 1511 .lock_delay = 300,
1510 .max_p = PLL_QLIN_PDIV_MAX, 1512 .max_p = PLL_QLIN_PDIV_MAX,
1511 .ext_misc_reg[0] = PLLC4_MISC0, 1513 .ext_misc_reg[0] = PLLC4_MISC0,
@@ -1517,8 +1519,7 @@ static struct tegra_clk_pll_params pll_c4_vco_params = {
1517 .div_nmp = &pllss_nmp, 1519 .div_nmp = &pllss_nmp,
1518 .freq_table = pll_c4_vco_freq_table, 1520 .freq_table = pll_c4_vco_freq_table,
1519 .set_defaults = tegra210_pllc4_set_defaults, 1521 .set_defaults = tegra210_pllc4_set_defaults,
1520 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | 1522 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1521 TEGRA_PLL_VCO_OUT,
1522 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1523 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1523}; 1524};
1524 1525
@@ -1559,15 +1560,15 @@ static struct tegra_clk_pll_params pll_m_params = {
1559 .vco_min = 800000000, 1560 .vco_min = 800000000,
1560 .vco_max = 1866000000, 1561 .vco_max = 1866000000,
1561 .base_reg = PLLM_BASE, 1562 .base_reg = PLLM_BASE,
1562 .misc_reg = PLLM_MISC1, 1563 .misc_reg = PLLM_MISC2,
1563 .lock_mask = PLL_BASE_LOCK, 1564 .lock_mask = PLL_BASE_LOCK,
1564 .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE, 1565 .lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE,
1565 .lock_delay = 300, 1566 .lock_delay = 300,
1566 .iddq_reg = PLLM_MISC0, 1567 .iddq_reg = PLLM_MISC2,
1567 .iddq_bit_idx = PLLM_IDDQ_BIT, 1568 .iddq_bit_idx = PLLM_IDDQ_BIT,
1568 .max_p = PLL_QLIN_PDIV_MAX, 1569 .max_p = PLL_QLIN_PDIV_MAX,
1569 .ext_misc_reg[0] = PLLM_MISC0, 1570 .ext_misc_reg[0] = PLLM_MISC2,
1570 .ext_misc_reg[0] = PLLM_MISC1, 1571 .ext_misc_reg[1] = PLLM_MISC1,
1571 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1572 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1572 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1573 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1573 .div_nmp = &pllm_nmp, 1574 .div_nmp = &pllm_nmp,
@@ -1586,19 +1587,18 @@ static struct tegra_clk_pll_params pll_mb_params = {
1586 .vco_min = 800000000, 1587 .vco_min = 800000000,
1587 .vco_max = 1866000000, 1588 .vco_max = 1866000000,
1588 .base_reg = PLLMB_BASE, 1589 .base_reg = PLLMB_BASE,
1589 .misc_reg = PLLMB_MISC0, 1590 .misc_reg = PLLMB_MISC1,
1590 .lock_mask = PLL_BASE_LOCK, 1591 .lock_mask = PLL_BASE_LOCK,
1591 .lock_enable_bit_idx = PLLMB_MISC_LOCK_ENABLE,
1592 .lock_delay = 300, 1592 .lock_delay = 300,
1593 .iddq_reg = PLLMB_MISC0, 1593 .iddq_reg = PLLMB_MISC1,
1594 .iddq_bit_idx = PLLMB_IDDQ_BIT, 1594 .iddq_bit_idx = PLLMB_IDDQ_BIT,
1595 .max_p = PLL_QLIN_PDIV_MAX, 1595 .max_p = PLL_QLIN_PDIV_MAX,
1596 .ext_misc_reg[0] = PLLMB_MISC0, 1596 .ext_misc_reg[0] = PLLMB_MISC1,
1597 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1597 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1598 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1598 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1599 .div_nmp = &pllm_nmp, 1599 .div_nmp = &pllm_nmp,
1600 .freq_table = pll_m_freq_table, 1600 .freq_table = pll_m_freq_table,
1601 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1601 .flags = TEGRA_PLL_USE_LOCK,
1602 .set_defaults = tegra210_pllmb_set_defaults, 1602 .set_defaults = tegra210_pllmb_set_defaults,
1603 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1603 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1604}; 1604};
@@ -1671,7 +1671,6 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
1671 .base_reg = PLLRE_BASE, 1671 .base_reg = PLLRE_BASE,
1672 .misc_reg = PLLRE_MISC0, 1672 .misc_reg = PLLRE_MISC0,
1673 .lock_mask = PLLRE_MISC_LOCK, 1673 .lock_mask = PLLRE_MISC_LOCK,
1674 .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
1675 .lock_delay = 300, 1674 .lock_delay = 300,
1676 .max_p = PLL_QLIN_PDIV_MAX, 1675 .max_p = PLL_QLIN_PDIV_MAX,
1677 .ext_misc_reg[0] = PLLRE_MISC0, 1676 .ext_misc_reg[0] = PLLRE_MISC0,
@@ -1681,8 +1680,7 @@ static struct tegra_clk_pll_params pll_re_vco_params = {
1681 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1680 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1682 .div_nmp = &pllre_nmp, 1681 .div_nmp = &pllre_nmp,
1683 .freq_table = pll_re_vco_freq_table, 1682 .freq_table = pll_re_vco_freq_table,
1684 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | 1683 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT,
1685 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
1686 .set_defaults = tegra210_pllre_set_defaults, 1684 .set_defaults = tegra210_pllre_set_defaults,
1687 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1685 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1688}; 1686};
@@ -1712,7 +1710,6 @@ static struct tegra_clk_pll_params pll_p_params = {
1712 .base_reg = PLLP_BASE, 1710 .base_reg = PLLP_BASE,
1713 .misc_reg = PLLP_MISC0, 1711 .misc_reg = PLLP_MISC0,
1714 .lock_mask = PLL_BASE_LOCK, 1712 .lock_mask = PLL_BASE_LOCK,
1715 .lock_enable_bit_idx = PLLP_MISC_LOCK_ENABLE,
1716 .lock_delay = 300, 1713 .lock_delay = 300,
1717 .iddq_reg = PLLP_MISC0, 1714 .iddq_reg = PLLP_MISC0,
1718 .iddq_bit_idx = PLLXP_IDDQ_BIT, 1715 .iddq_bit_idx = PLLXP_IDDQ_BIT,
@@ -1721,8 +1718,7 @@ static struct tegra_clk_pll_params pll_p_params = {
1721 .div_nmp = &pllp_nmp, 1718 .div_nmp = &pllp_nmp,
1722 .freq_table = pll_p_freq_table, 1719 .freq_table = pll_p_freq_table,
1723 .fixed_rate = 408000000, 1720 .fixed_rate = 408000000,
1724 .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | 1721 .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1725 TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
1726 .set_defaults = tegra210_pllp_set_defaults, 1722 .set_defaults = tegra210_pllp_set_defaults,
1727 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1723 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1728}; 1724};
@@ -1750,7 +1746,7 @@ static struct tegra_clk_pll_params pll_a1_params = {
1750 .ext_misc_reg[2] = PLLA1_MISC2, 1746 .ext_misc_reg[2] = PLLA1_MISC2,
1751 .ext_misc_reg[3] = PLLA1_MISC3, 1747 .ext_misc_reg[3] = PLLA1_MISC3,
1752 .freq_table = pll_cx_freq_table, 1748 .freq_table = pll_cx_freq_table,
1753 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1749 .flags = TEGRA_PLL_USE_LOCK,
1754 .set_defaults = _plla1_set_defaults, 1750 .set_defaults = _plla1_set_defaults,
1755 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1751 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1756}; 1752};
@@ -1787,7 +1783,6 @@ static struct tegra_clk_pll_params pll_a_params = {
1787 .base_reg = PLLA_BASE, 1783 .base_reg = PLLA_BASE,
1788 .misc_reg = PLLA_MISC0, 1784 .misc_reg = PLLA_MISC0,
1789 .lock_mask = PLL_BASE_LOCK, 1785 .lock_mask = PLL_BASE_LOCK,
1790 .lock_enable_bit_idx = PLLA_MISC_LOCK_ENABLE,
1791 .lock_delay = 300, 1786 .lock_delay = 300,
1792 .round_p_to_pdiv = pll_qlin_p_to_pdiv, 1787 .round_p_to_pdiv = pll_qlin_p_to_pdiv,
1793 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1788 .pdiv_tohw = pll_qlin_pdiv_to_hw,
@@ -1802,8 +1797,7 @@ static struct tegra_clk_pll_params pll_a_params = {
1802 .ext_misc_reg[1] = PLLA_MISC1, 1797 .ext_misc_reg[1] = PLLA_MISC1,
1803 .ext_misc_reg[2] = PLLA_MISC2, 1798 .ext_misc_reg[2] = PLLA_MISC2,
1804 .freq_table = pll_a_freq_table, 1799 .freq_table = pll_a_freq_table,
1805 .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW | 1800 .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW,
1806 TEGRA_PLL_HAS_LOCK_ENABLE,
1807 .set_defaults = tegra210_plla_set_defaults, 1801 .set_defaults = tegra210_plla_set_defaults,
1808 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1802 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1809 .set_gain = tegra210_clk_pll_set_gain, 1803 .set_gain = tegra210_clk_pll_set_gain,
@@ -1836,7 +1830,6 @@ static struct tegra_clk_pll_params pll_d_params = {
1836 .base_reg = PLLD_BASE, 1830 .base_reg = PLLD_BASE,
1837 .misc_reg = PLLD_MISC0, 1831 .misc_reg = PLLD_MISC0,
1838 .lock_mask = PLL_BASE_LOCK, 1832 .lock_mask = PLL_BASE_LOCK,
1839 .lock_enable_bit_idx = PLLD_MISC_LOCK_ENABLE,
1840 .lock_delay = 1000, 1833 .lock_delay = 1000,
1841 .iddq_reg = PLLD_MISC0, 1834 .iddq_reg = PLLD_MISC0,
1842 .iddq_bit_idx = PLLD_IDDQ_BIT, 1835 .iddq_bit_idx = PLLD_IDDQ_BIT,
@@ -1850,7 +1843,7 @@ static struct tegra_clk_pll_params pll_d_params = {
1850 .ext_misc_reg[0] = PLLD_MISC0, 1843 .ext_misc_reg[0] = PLLD_MISC0,
1851 .ext_misc_reg[1] = PLLD_MISC1, 1844 .ext_misc_reg[1] = PLLD_MISC1,
1852 .freq_table = pll_d_freq_table, 1845 .freq_table = pll_d_freq_table,
1853 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1846 .flags = TEGRA_PLL_USE_LOCK,
1854 .mdiv_default = 1, 1847 .mdiv_default = 1,
1855 .set_defaults = tegra210_plld_set_defaults, 1848 .set_defaults = tegra210_plld_set_defaults,
1856 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1849 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
@@ -1876,7 +1869,6 @@ static struct tegra_clk_pll_params pll_d2_params = {
1876 .base_reg = PLLD2_BASE, 1869 .base_reg = PLLD2_BASE,
1877 .misc_reg = PLLD2_MISC0, 1870 .misc_reg = PLLD2_MISC0,
1878 .lock_mask = PLL_BASE_LOCK, 1871 .lock_mask = PLL_BASE_LOCK,
1879 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1880 .lock_delay = 300, 1872 .lock_delay = 300,
1881 .iddq_reg = PLLD2_BASE, 1873 .iddq_reg = PLLD2_BASE,
1882 .iddq_bit_idx = PLLSS_IDDQ_BIT, 1874 .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1897,7 +1889,7 @@ static struct tegra_clk_pll_params pll_d2_params = {
1897 .mdiv_default = 1, 1889 .mdiv_default = 1,
1898 .freq_table = tegra210_pll_d2_freq_table, 1890 .freq_table = tegra210_pll_d2_freq_table,
1899 .set_defaults = tegra210_plld2_set_defaults, 1891 .set_defaults = tegra210_plld2_set_defaults,
1900 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1892 .flags = TEGRA_PLL_USE_LOCK,
1901 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1893 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1902 .set_gain = tegra210_clk_pll_set_gain, 1894 .set_gain = tegra210_clk_pll_set_gain,
1903 .adjust_vco = tegra210_clk_adjust_vco_min, 1895 .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1920,7 +1912,6 @@ static struct tegra_clk_pll_params pll_dp_params = {
1920 .base_reg = PLLDP_BASE, 1912 .base_reg = PLLDP_BASE,
1921 .misc_reg = PLLDP_MISC, 1913 .misc_reg = PLLDP_MISC,
1922 .lock_mask = PLL_BASE_LOCK, 1914 .lock_mask = PLL_BASE_LOCK,
1923 .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
1924 .lock_delay = 300, 1915 .lock_delay = 300,
1925 .iddq_reg = PLLDP_BASE, 1916 .iddq_reg = PLLDP_BASE,
1926 .iddq_bit_idx = PLLSS_IDDQ_BIT, 1917 .iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1941,7 +1932,7 @@ static struct tegra_clk_pll_params pll_dp_params = {
1941 .mdiv_default = 1, 1932 .mdiv_default = 1,
1942 .freq_table = pll_dp_freq_table, 1933 .freq_table = pll_dp_freq_table,
1943 .set_defaults = tegra210_plldp_set_defaults, 1934 .set_defaults = tegra210_plldp_set_defaults,
1944 .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE, 1935 .flags = TEGRA_PLL_USE_LOCK,
1945 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1936 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1946 .set_gain = tegra210_clk_pll_set_gain, 1937 .set_gain = tegra210_clk_pll_set_gain,
1947 .adjust_vco = tegra210_clk_adjust_vco_min, 1938 .adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1973,7 +1964,6 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
1973 .base_reg = PLLU_BASE, 1964 .base_reg = PLLU_BASE,
1974 .misc_reg = PLLU_MISC0, 1965 .misc_reg = PLLU_MISC0,
1975 .lock_mask = PLL_BASE_LOCK, 1966 .lock_mask = PLL_BASE_LOCK,
1976 .lock_enable_bit_idx = PLLU_MISC_LOCK_ENABLE,
1977 .lock_delay = 1000, 1967 .lock_delay = 1000,
1978 .iddq_reg = PLLU_MISC0, 1968 .iddq_reg = PLLU_MISC0,
1979 .iddq_bit_idx = PLLU_IDDQ_BIT, 1969 .iddq_bit_idx = PLLU_IDDQ_BIT,
@@ -1983,8 +1973,7 @@ static struct tegra_clk_pll_params pll_u_vco_params = {
1983 .pdiv_tohw = pll_qlin_pdiv_to_hw, 1973 .pdiv_tohw = pll_qlin_pdiv_to_hw,
1984 .div_nmp = &pllu_nmp, 1974 .div_nmp = &pllu_nmp,
1985 .freq_table = pll_u_freq_table, 1975 .freq_table = pll_u_freq_table,
1986 .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE | 1976 .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
1987 TEGRA_PLL_VCO_OUT,
1988 .set_defaults = tegra210_pllu_set_defaults, 1977 .set_defaults = tegra210_pllu_set_defaults,
1989 .calc_rate = tegra210_pll_fixed_mdiv_cfg, 1978 .calc_rate = tegra210_pll_fixed_mdiv_cfg,
1990}; 1979};
@@ -2218,6 +2207,7 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = {
2218 [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true }, 2207 [tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true },
2219 [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true }, 2208 [tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true },
2220 [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true }, 2209 [tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true },
2210 [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true },
2221}; 2211};
2222 2212
2223static struct tegra_devclk devclks[] __initdata = { 2213static struct tegra_devclk devclks[] __initdata = {
@@ -2519,7 +2509,7 @@ static void __init tegra210_pll_init(void __iomem *clk_base,
2519 2509
2520 /* PLLU_VCO */ 2510 /* PLLU_VCO */
2521 val = readl(clk_base + pll_u_vco_params.base_reg); 2511 val = readl(clk_base + pll_u_vco_params.base_reg);
2522 val &= ~BIT(24); /* disable PLLU_OVERRIDE */ 2512 val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */
2523 writel(val, clk_base + pll_u_vco_params.base_reg); 2513 writel(val, clk_base + pll_u_vco_params.base_reg);
2524 2514
2525 clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc, 2515 clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc,
@@ -2738,8 +2728,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
2738 { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 }, 2728 { TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
2739 { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 }, 2729 { TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
2740 { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 }, 2730 { TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
2741 { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
2742 { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
2743 { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 }, 2731 { TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
2744 { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 }, 2732 { TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
2745 { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 }, 2733 { TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 1c300388782b..cc739291a3ce 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -460,7 +460,8 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
460 460
461 parent = clk_hw_get_parent(hw); 461 parent = clk_hw_get_parent(hw);
462 462
463 if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) { 463 if (clk_hw_get_rate(hw) ==
464 clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
464 WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); 465 WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
465 r = _omap3_noncore_dpll_bypass(clk); 466 r = _omap3_noncore_dpll_bypass(clk);
466 } else { 467 } else {
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index e62f8cb2c9b5..3bca438ecd19 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -78,6 +78,9 @@ static int vco_set(struct clk_icst *icst, struct icst_vco vco)
78 ret = regmap_read(icst->map, icst->vcoreg_off, &val); 78 ret = regmap_read(icst->map, icst->vcoreg_off, &val);
79 if (ret) 79 if (ret)
80 return ret; 80 return ret;
81
82 /* Mask the 18 bits used by the VCO */
83 val &= ~0x7ffff;
81 val |= vco.v | (vco.r << 9) | (vco.s << 16); 84 val |= vco.v | (vco.r << 9) | (vco.s << 16);
82 85
83 /* This magic unlocks the VCO so it can be controlled */ 86 /* This magic unlocks the VCO so it can be controlled */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 659879a56dba..f93511031177 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -296,6 +296,7 @@ endif
296config QORIQ_CPUFREQ 296config QORIQ_CPUFREQ
297 tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" 297 tristate "CPU frequency scaling driver for Freescale QorIQ SoCs"
298 depends on OF && COMMON_CLK && (PPC_E500MC || ARM) 298 depends on OF && COMMON_CLK && (PPC_E500MC || ARM)
299 depends on !CPU_THERMAL || THERMAL
299 select CLK_QORIQ 300 select CLK_QORIQ
300 help 301 help
301 This adds the CPUFreq driver support for Freescale QorIQ SoCs 302 This adds the CPUFreq driver support for Freescale QorIQ SoCs
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0031069b64c9..14b1f9393b05 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,10 +84,10 @@ config ARM_KIRKWOOD_CPUFREQ
84 SoCs. 84 SoCs.
85 85
86config ARM_MT8173_CPUFREQ 86config ARM_MT8173_CPUFREQ
87 bool "Mediatek MT8173 CPUFreq support" 87 tristate "Mediatek MT8173 CPUFreq support"
88 depends on ARCH_MEDIATEK && REGULATOR 88 depends on ARCH_MEDIATEK && REGULATOR
89 depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST) 89 depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
90 depends on !CPU_THERMAL || THERMAL=y 90 depends on !CPU_THERMAL || THERMAL
91 select PM_OPP 91 select PM_OPP
92 help 92 help
93 This adds the CPUFreq driver support for Mediatek MT8173 SoC. 93 This adds the CPUFreq driver support for Mediatek MT8173 SoC.
diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c
index 1efba340456d..2058e6d292ce 100644
--- a/drivers/cpufreq/mt8173-cpufreq.c
+++ b/drivers/cpufreq/mt8173-cpufreq.c
@@ -17,6 +17,7 @@
17#include <linux/cpu_cooling.h> 17#include <linux/cpu_cooling.h>
18#include <linux/cpufreq.h> 18#include <linux/cpufreq.h>
19#include <linux/cpumask.h> 19#include <linux/cpumask.h>
20#include <linux/module.h>
20#include <linux/of.h> 21#include <linux/of.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/pm_opp.h> 23#include <linux/pm_opp.h>
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 20de861aa0ea..8bf9914d4d15 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -782,7 +782,7 @@ static void atmel_sha_finish_req(struct ahash_request *req, int err)
782 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | 782 dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
783 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY); 783 SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
784 784
785 clk_disable_unprepare(dd->iclk); 785 clk_disable(dd->iclk);
786 786
787 if (req->base.complete) 787 if (req->base.complete)
788 req->base.complete(&req->base, err); 788 req->base.complete(&req->base, err);
@@ -795,7 +795,7 @@ static int atmel_sha_hw_init(struct atmel_sha_dev *dd)
795{ 795{
796 int err; 796 int err;
797 797
798 err = clk_prepare_enable(dd->iclk); 798 err = clk_enable(dd->iclk);
799 if (err) 799 if (err)
800 return err; 800 return err;
801 801
@@ -822,7 +822,7 @@ static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd)
822 dev_info(dd->dev, 822 dev_info(dd->dev,
823 "version: 0x%x\n", dd->hw_version); 823 "version: 0x%x\n", dd->hw_version);
824 824
825 clk_disable_unprepare(dd->iclk); 825 clk_disable(dd->iclk);
826} 826}
827 827
828static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, 828static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1410,6 +1410,10 @@ static int atmel_sha_probe(struct platform_device *pdev)
1410 goto res_err; 1410 goto res_err;
1411 } 1411 }
1412 1412
1413 err = clk_prepare(sha_dd->iclk);
1414 if (err)
1415 goto res_err;
1416
1413 atmel_sha_hw_version_init(sha_dd); 1417 atmel_sha_hw_version_init(sha_dd);
1414 1418
1415 atmel_sha_get_cap(sha_dd); 1419 atmel_sha_get_cap(sha_dd);
@@ -1421,12 +1425,12 @@ static int atmel_sha_probe(struct platform_device *pdev)
1421 if (IS_ERR(pdata)) { 1425 if (IS_ERR(pdata)) {
1422 dev_err(&pdev->dev, "platform data not available\n"); 1426 dev_err(&pdev->dev, "platform data not available\n");
1423 err = PTR_ERR(pdata); 1427 err = PTR_ERR(pdata);
1424 goto res_err; 1428 goto iclk_unprepare;
1425 } 1429 }
1426 } 1430 }
1427 if (!pdata->dma_slave) { 1431 if (!pdata->dma_slave) {
1428 err = -ENXIO; 1432 err = -ENXIO;
1429 goto res_err; 1433 goto iclk_unprepare;
1430 } 1434 }
1431 err = atmel_sha_dma_init(sha_dd, pdata); 1435 err = atmel_sha_dma_init(sha_dd, pdata);
1432 if (err) 1436 if (err)
@@ -1457,6 +1461,8 @@ err_algs:
1457 if (sha_dd->caps.has_dma) 1461 if (sha_dd->caps.has_dma)
1458 atmel_sha_dma_cleanup(sha_dd); 1462 atmel_sha_dma_cleanup(sha_dd);
1459err_sha_dma: 1463err_sha_dma:
1464iclk_unprepare:
1465 clk_unprepare(sha_dd->iclk);
1460res_err: 1466res_err:
1461 tasklet_kill(&sha_dd->done_task); 1467 tasklet_kill(&sha_dd->done_task);
1462sha_dd_err: 1468sha_dd_err:
@@ -1483,12 +1489,7 @@ static int atmel_sha_remove(struct platform_device *pdev)
1483 if (sha_dd->caps.has_dma) 1489 if (sha_dd->caps.has_dma)
1484 atmel_sha_dma_cleanup(sha_dd); 1490 atmel_sha_dma_cleanup(sha_dd);
1485 1491
1486 iounmap(sha_dd->io_base); 1492 clk_unprepare(sha_dd->iclk);
1487
1488 clk_put(sha_dd->iclk);
1489
1490 if (sha_dd->irq >= 0)
1491 free_irq(sha_dd->irq, sha_dd);
1492 1493
1493 return 0; 1494 return 0;
1494} 1495}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 0643e3366e33..c0656e7f37b5 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -306,7 +306,7 @@ static int mv_cesa_dev_dma_init(struct mv_cesa_dev *cesa)
306 return -ENOMEM; 306 return -ENOMEM;
307 307
308 dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0); 308 dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
309 if (!dma->cache_pool) 309 if (!dma->padding_pool)
310 return -ENOMEM; 310 return -ENOMEM;
311 311
312 cesa->dma = dma; 312 cesa->dma = dma;
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 848b93ee930f..fe9dce0245bf 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -500,6 +500,8 @@ static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
500 clk_set_min_rate(tegra->emc_clock, rate); 500 clk_set_min_rate(tegra->emc_clock, rate);
501 clk_set_rate(tegra->emc_clock, 0); 501 clk_set_rate(tegra->emc_clock, 0);
502 502
503 *freq = rate;
504
503 return 0; 505 return 0;
504} 506}
505 507
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 64f5d1bdbb48..8e304b1befc5 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -176,6 +176,7 @@
176#define AT_XDMAC_MAX_CHAN 0x20 176#define AT_XDMAC_MAX_CHAN 0x20
177#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */ 177#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
178#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */ 178#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
179#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
179 180
180#define AT_XDMAC_DMA_BUSWIDTHS\ 181#define AT_XDMAC_DMA_BUSWIDTHS\
181 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ 182 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -1395,8 +1396,8 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1395 struct at_xdmac_desc *desc, *_desc; 1396 struct at_xdmac_desc *desc, *_desc;
1396 struct list_head *descs_list; 1397 struct list_head *descs_list;
1397 enum dma_status ret; 1398 enum dma_status ret;
1398 int residue; 1399 int residue, retry;
1399 u32 cur_nda, mask, value; 1400 u32 cur_nda, check_nda, cur_ubc, mask, value;
1400 u8 dwidth = 0; 1401 u8 dwidth = 0;
1401 unsigned long flags; 1402 unsigned long flags;
1402 1403
@@ -1433,7 +1434,42 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1433 cpu_relax(); 1434 cpu_relax();
1434 } 1435 }
1435 1436
1437 /*
1438 * When processing the residue, we need to read two registers but we
1439 * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
1440 * we stand in the descriptor list and AT_XDMAC_CUBC is used
1441 * to know how many data are remaining for the current descriptor.
1442 * Since the dma channel is not paused to not loose data, between the
1443 * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
1444 * descriptor.
1445 * For that reason, after reading AT_XDMAC_CUBC, we check if we are
1446 * still using the same descriptor by reading a second time
1447 * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
1448 * read again AT_XDMAC_CUBC.
1449 * Memory barriers are used to ensure the read order of the registers.
1450 * A max number of retries is set because unlikely it can never ends if
1451 * we are transferring a lot of data with small buffers.
1452 */
1436 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc; 1453 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1454 rmb();
1455 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1456 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1457 rmb();
1458 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1459
1460 if (likely(cur_nda == check_nda))
1461 break;
1462
1463 cur_nda = check_nda;
1464 rmb();
1465 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1466 }
1467
1468 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1469 ret = DMA_ERROR;
1470 goto spin_unlock;
1471 }
1472
1437 /* 1473 /*
1438 * Remove size of all microblocks already transferred and the current 1474 * Remove size of all microblocks already transferred and the current
1439 * one. Then add the remaining size to transfer of the current 1475 * one. Then add the remaining size to transfer of the current
@@ -1446,7 +1482,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1446 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) 1482 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
1447 break; 1483 break;
1448 } 1484 }
1449 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth; 1485 residue += cur_ubc << dwidth;
1450 1486
1451 dma_set_residue(txstate, residue); 1487 dma_set_residue(txstate, residue);
1452 1488
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e893318560db..5ad0ec1f0e29 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
156 156
157 /* Enable interrupts */ 157 /* Enable interrupts */
158 channel_set_bit(dw, MASK.XFER, dwc->mask); 158 channel_set_bit(dw, MASK.XFER, dwc->mask);
159 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
160 channel_set_bit(dw, MASK.ERROR, dwc->mask); 159 channel_set_bit(dw, MASK.ERROR, dwc->mask);
161 160
162 dwc->initialized = true; 161 dwc->initialized = true;
@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
588 587
589 spin_unlock_irqrestore(&dwc->lock, flags); 588 spin_unlock_irqrestore(&dwc->lock, flags);
590 } 589 }
590
591 /* Re-enable interrupts */
592 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
591} 593}
592 594
593/* ------------------------------------------------------------------------- */ 595/* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
618 dwc_scan_descriptors(dw, dwc); 620 dwc_scan_descriptors(dw, dwc);
619 } 621 }
620 622
621 /* 623 /* Re-enable interrupts */
622 * Re-enable interrupts.
623 */
624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); 624 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
625 channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
626 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); 625 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
627} 626}
628 627
@@ -1261,6 +1260,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1261int dw_dma_cyclic_start(struct dma_chan *chan) 1260int dw_dma_cyclic_start(struct dma_chan *chan)
1262{ 1261{
1263 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 1262 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1263 struct dw_dma *dw = to_dw_dma(chan->device);
1264 unsigned long flags; 1264 unsigned long flags;
1265 1265
1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { 1266 if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1269,7 +1269,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
1269 } 1269 }
1270 1270
1271 spin_lock_irqsave(&dwc->lock, flags); 1271 spin_lock_irqsave(&dwc->lock, flags);
1272
1273 /* Enable interrupts to perform cyclic transfer */
1274 channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1275
1272 dwc_dostart(dwc, dwc->cdesc->desc[0]); 1276 dwc_dostart(dwc, dwc->cdesc->desc[0]);
1277
1273 spin_unlock_irqrestore(&dwc->lock, flags); 1278 spin_unlock_irqrestore(&dwc->lock, flags);
1274 1279
1275 return 0; 1280 return 0;
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 4c30fdd092b3..358f9689a3f5 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -108,6 +108,10 @@ static const struct pci_device_id dw_pci_id_table[] = {
108 108
109 /* Haswell */ 109 /* Haswell */
110 { PCI_VDEVICE(INTEL, 0x9c60) }, 110 { PCI_VDEVICE(INTEL, 0x9c60) },
111
112 /* Broadwell */
113 { PCI_VDEVICE(INTEL, 0x9ce0) },
114
111 { } 115 { }
112}; 116};
113MODULE_DEVICE_TABLE(pci, dw_pci_id_table); 117MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index d92d65549406..e3d7fcb69b4c 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -113,6 +113,9 @@
113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ 113#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
114#define CHMAP_EXIST BIT(24) 114#define CHMAP_EXIST BIT(24)
115 115
116/* CCSTAT register */
117#define EDMA_CCSTAT_ACTV BIT(4)
118
116/* 119/*
117 * Max of 20 segments per channel to conserve PaRAM slots 120 * Max of 20 segments per channel to conserve PaRAM slots
118 * Also note that MAX_NR_SG should be atleast the no.of periods 121 * Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@ static void edma_issue_pending(struct dma_chan *chan)
1680 spin_unlock_irqrestore(&echan->vchan.lock, flags); 1683 spin_unlock_irqrestore(&echan->vchan.lock, flags);
1681} 1684}
1682 1685
1686/*
1687 * This limit exists to avoid a possible infinite loop when waiting for proof
1688 * that a particular transfer is completed. This limit can be hit if there
1689 * are large bursts to/from slow devices or the CPU is never able to catch
1690 * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1691 * RX-FIFO, as many as 55 loops have been seen.
1692 */
1693#define EDMA_MAX_TR_WAIT_LOOPS 1000
1694
1683static u32 edma_residue(struct edma_desc *edesc) 1695static u32 edma_residue(struct edma_desc *edesc)
1684{ 1696{
1685 bool dst = edesc->direction == DMA_DEV_TO_MEM; 1697 bool dst = edesc->direction == DMA_DEV_TO_MEM;
1698 int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1699 struct edma_chan *echan = edesc->echan;
1686 struct edma_pset *pset = edesc->pset; 1700 struct edma_pset *pset = edesc->pset;
1687 dma_addr_t done, pos; 1701 dma_addr_t done, pos;
1688 int i; 1702 int i;
@@ -1691,7 +1705,32 @@ static u32 edma_residue(struct edma_desc *edesc)
1691 * We always read the dst/src position from the first RamPar 1705 * We always read the dst/src position from the first RamPar
1692 * pset. That's the one which is active now. 1706 * pset. That's the one which is active now.
1693 */ 1707 */
1694 pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); 1708 pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1709
1710 /*
1711 * "pos" may represent a transfer request that is still being
1712 * processed by the EDMACC or EDMATC. We will busy wait until
1713 * any one of the situations occurs:
1714 * 1. the DMA hardware is idle
1715 * 2. a new transfer request is setup
1716 * 3. we hit the loop limit
1717 */
1718 while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1719 /* check if a new transfer request is setup */
1720 if (edma_get_position(echan->ecc,
1721 echan->slot[0], dst) != pos) {
1722 break;
1723 }
1724
1725 if (!--loop_count) {
1726 dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1727 "%s: timeout waiting for PaRAM update\n",
1728 __func__);
1729 break;
1730 }
1731
1732 cpu_relax();
1733 }
1695 1734
1696 /* 1735 /*
1697 * Cyclic is simple. Just subtract pset[0].addr from pos. 1736 * Cyclic is simple. Just subtract pset[0].addr from pos.
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 2209f75fdf05..aac85c30c2cf 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -522,6 +522,8 @@ static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan,
522 chan_dbg(chan, "LD %p callback\n", desc); 522 chan_dbg(chan, "LD %p callback\n", desc);
523 txd->callback(txd->callback_param); 523 txd->callback(txd->callback_param);
524 } 524 }
525
526 dma_descriptor_unmap(txd);
525 } 527 }
526 528
527 /* Run any dependencies */ 529 /* Run any dependencies */
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1d5df2ef148b..21539d5c54c3 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -861,32 +861,42 @@ void ioat_timer_event(unsigned long data)
861 return; 861 return;
862 } 862 }
863 863
864 spin_lock_bh(&ioat_chan->cleanup_lock);
865
866 /* handle the no-actives case */
867 if (!ioat_ring_active(ioat_chan)) {
868 spin_lock_bh(&ioat_chan->prep_lock);
869 check_active(ioat_chan);
870 spin_unlock_bh(&ioat_chan->prep_lock);
871 spin_unlock_bh(&ioat_chan->cleanup_lock);
872 return;
873 }
874
864 /* if we haven't made progress and we have already 875 /* if we haven't made progress and we have already
865 * acknowledged a pending completion once, then be more 876 * acknowledged a pending completion once, then be more
866 * forceful with a restart 877 * forceful with a restart
867 */ 878 */
868 spin_lock_bh(&ioat_chan->cleanup_lock);
869 if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) 879 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
870 __cleanup(ioat_chan, phys_complete); 880 __cleanup(ioat_chan, phys_complete);
871 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) { 881 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
882 u32 chanerr;
883
884 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
885 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
886 dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
887 status, chanerr);
888 dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
889 ioat_ring_active(ioat_chan));
890
872 spin_lock_bh(&ioat_chan->prep_lock); 891 spin_lock_bh(&ioat_chan->prep_lock);
873 ioat_restart_channel(ioat_chan); 892 ioat_restart_channel(ioat_chan);
874 spin_unlock_bh(&ioat_chan->prep_lock); 893 spin_unlock_bh(&ioat_chan->prep_lock);
875 spin_unlock_bh(&ioat_chan->cleanup_lock); 894 spin_unlock_bh(&ioat_chan->cleanup_lock);
876 return; 895 return;
877 } else { 896 } else
878 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state); 897 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
879 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
880 }
881
882 898
883 if (ioat_ring_active(ioat_chan)) 899 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
884 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
885 else {
886 spin_lock_bh(&ioat_chan->prep_lock);
887 check_active(ioat_chan);
888 spin_unlock_bh(&ioat_chan->prep_lock);
889 }
890 spin_unlock_bh(&ioat_chan->cleanup_lock); 900 spin_unlock_bh(&ioat_chan->cleanup_lock);
891} 901}
892 902
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index f2a0310ae771..debca824bed6 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -583,6 +583,8 @@ static void set_updater_desc(struct pxad_desc_sw *sw_desc,
583 (PXA_DCMD_LENGTH & sizeof(u32)); 583 (PXA_DCMD_LENGTH & sizeof(u32));
584 if (flags & DMA_PREP_INTERRUPT) 584 if (flags & DMA_PREP_INTERRUPT)
585 updater->dcmd |= PXA_DCMD_ENDIRQEN; 585 updater->dcmd |= PXA_DCMD_ENDIRQEN;
586 if (sw_desc->cyclic)
587 sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
586} 588}
587 589
588static bool is_desc_completed(struct virt_dma_desc *vd) 590static bool is_desc_completed(struct virt_dma_desc *vd)
@@ -673,6 +675,10 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
673 dev_dbg(&chan->vc.chan.dev->device, 675 dev_dbg(&chan->vc.chan.dev->device,
674 "%s(): checking txd %p[%x]: completed=%d\n", 676 "%s(): checking txd %p[%x]: completed=%d\n",
675 __func__, vd, vd->tx.cookie, is_desc_completed(vd)); 677 __func__, vd, vd->tx.cookie, is_desc_completed(vd));
678 if (to_pxad_sw_desc(vd)->cyclic) {
679 vchan_cyclic_callback(vd);
680 break;
681 }
676 if (is_desc_completed(vd)) { 682 if (is_desc_completed(vd)) {
677 list_del(&vd->node); 683 list_del(&vd->node);
678 vchan_cookie_complete(vd); 684 vchan_cookie_complete(vd);
@@ -1080,7 +1086,7 @@ pxad_prep_dma_cyclic(struct dma_chan *dchan,
1080 return NULL; 1086 return NULL;
1081 1087
1082 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); 1088 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1083 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len); 1089 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
1084 dev_dbg(&chan->vc.chan.dev->device, 1090 dev_dbg(&chan->vc.chan.dev->device,
1085 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n", 1091 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1086 __func__, (unsigned long)buf_addr, len, period_len, dir, flags); 1092 __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index e438ee5b433f..f5c6b97c8958 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1574,7 +1574,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1574 for (cha = 0; cha < KNL_MAX_CHAS; cha++) { 1574 for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1575 if (knl_get_mc_route(target, 1575 if (knl_get_mc_route(target,
1576 mc_route_reg[cha]) == channel 1576 mc_route_reg[cha]) == channel
1577 && participants[channel]) { 1577 && !participants[channel]) {
1578 participant_count++; 1578 participant_count++;
1579 participants[channel] = 1; 1579 participants[channel] = 1;
1580 break; 1580 break;
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 756eca8c4cf8..10e6774ab2a2 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
221 } 221 }
222 222
223 if ((attributes & ~EFI_VARIABLE_MASK) != 0 || 223 if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
224 efivar_validate(name, data, size) == false) { 224 efivar_validate(vendor, name, data, size) == false) {
225 printk(KERN_ERR "efivars: Malformed variable content\n"); 225 printk(KERN_ERR "efivars: Malformed variable content\n");
226 return -EINVAL; 226 return -EINVAL;
227 } 227 }
@@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
447 } 447 }
448 448
449 if ((attributes & ~EFI_VARIABLE_MASK) != 0 || 449 if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
450 efivar_validate(name, data, size) == false) { 450 efivar_validate(new_var->VendorGuid, name, data,
451 size) == false) {
451 printk(KERN_ERR "efivars: Malformed variable content\n"); 452 printk(KERN_ERR "efivars: Malformed variable content\n");
452 return -EINVAL; 453 return -EINVAL;
453 } 454 }
@@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
540static int 541static int
541efivar_create_sysfs_entry(struct efivar_entry *new_var) 542efivar_create_sysfs_entry(struct efivar_entry *new_var)
542{ 543{
543 int i, short_name_size; 544 int short_name_size;
544 char *short_name; 545 char *short_name;
545 unsigned long variable_name_size; 546 unsigned long utf8_name_size;
546 efi_char16_t *variable_name; 547 efi_char16_t *variable_name = new_var->var.VariableName;
547 int ret; 548 int ret;
548 549
549 variable_name = new_var->var.VariableName;
550 variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
551
552 /* 550 /*
553 * Length of the variable bytes in ASCII, plus the '-' separator, 551 * Length of the variable bytes in UTF8, plus the '-' separator,
554 * plus the GUID, plus trailing NUL 552 * plus the GUID, plus trailing NUL
555 */ 553 */
556 short_name_size = variable_name_size / sizeof(efi_char16_t) 554 utf8_name_size = ucs2_utf8size(variable_name);
557 + 1 + EFI_VARIABLE_GUID_LEN + 1; 555 short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
558
559 short_name = kzalloc(short_name_size, GFP_KERNEL);
560 556
557 short_name = kmalloc(short_name_size, GFP_KERNEL);
561 if (!short_name) 558 if (!short_name)
562 return -ENOMEM; 559 return -ENOMEM;
563 560
564 /* Convert Unicode to normal chars (assume top bits are 0), 561 ucs2_as_utf8(short_name, variable_name, short_name_size);
565 ala UTF-8 */ 562
566 for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
567 short_name[i] = variable_name[i] & 0xFF;
568 }
569 /* This is ugly, but necessary to separate one vendor's 563 /* This is ugly, but necessary to separate one vendor's
570 private variables from another's. */ 564 private variables from another's. */
571 565 short_name[utf8_name_size] = '-';
572 *(short_name + strlen(short_name)) = '-';
573 efi_guid_to_str(&new_var->var.VendorGuid, 566 efi_guid_to_str(&new_var->var.VendorGuid,
574 short_name + strlen(short_name)); 567 short_name + utf8_name_size + 1);
575 568
576 new_var->kobj.kset = efivars_kset; 569 new_var->kobj.kset = efivars_kset;
577 570
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 70a0fb10517f..7f2ea21c730d 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
165} 165}
166 166
167struct variable_validate { 167struct variable_validate {
168 efi_guid_t vendor;
168 char *name; 169 char *name;
169 bool (*validate)(efi_char16_t *var_name, int match, u8 *data, 170 bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
170 unsigned long len); 171 unsigned long len);
171}; 172};
172 173
174/*
175 * This is the list of variables we need to validate, as well as the
176 * whitelist for what we think is safe not to default to immutable.
177 *
178 * If it has a validate() method that's not NULL, it'll go into the
179 * validation routine. If not, it is assumed valid, but still used for
180 * whitelisting.
181 *
182 * Note that it's sorted by {vendor,name}, but globbed names must come after
183 * any other name with the same prefix.
184 */
173static const struct variable_validate variable_validate[] = { 185static const struct variable_validate variable_validate[] = {
174 { "BootNext", validate_uint16 }, 186 { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
175 { "BootOrder", validate_boot_order }, 187 { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
176 { "DriverOrder", validate_boot_order }, 188 { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
177 { "Boot*", validate_load_option }, 189 { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
178 { "Driver*", validate_load_option }, 190 { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
179 { "ConIn", validate_device_path }, 191 { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
180 { "ConInDev", validate_device_path }, 192 { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
181 { "ConOut", validate_device_path }, 193 { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
182 { "ConOutDev", validate_device_path }, 194 { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
183 { "ErrOut", validate_device_path }, 195 { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
184 { "ErrOutDev", validate_device_path }, 196 { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
185 { "Timeout", validate_uint16 }, 197 { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
186 { "Lang", validate_ascii_string }, 198 { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
187 { "PlatformLang", validate_ascii_string }, 199 { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
188 { "", NULL }, 200 { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
201 { LINUX_EFI_CRASH_GUID, "*", NULL },
202 { NULL_GUID, "", NULL },
189}; 203};
190 204
205static bool
206variable_matches(const char *var_name, size_t len, const char *match_name,
207 int *match)
208{
209 for (*match = 0; ; (*match)++) {
210 char c = match_name[*match];
211 char u = var_name[*match];
212
213 /* Wildcard in the matching name means we've matched */
214 if (c == '*')
215 return true;
216
217 /* Case sensitive match */
218 if (!c && *match == len)
219 return true;
220
221 if (c != u)
222 return false;
223
224 if (!c)
225 return true;
226 }
227 return true;
228}
229
191bool 230bool
192efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len) 231efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
232 unsigned long data_size)
193{ 233{
194 int i; 234 int i;
195 u16 *unicode_name = var_name; 235 unsigned long utf8_size;
236 u8 *utf8_name;
196 237
197 for (i = 0; variable_validate[i].validate != NULL; i++) { 238 utf8_size = ucs2_utf8size(var_name);
198 const char *name = variable_validate[i].name; 239 utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
199 int match; 240 if (!utf8_name)
241 return false;
200 242
201 for (match = 0; ; match++) { 243 ucs2_as_utf8(utf8_name, var_name, utf8_size);
202 char c = name[match]; 244 utf8_name[utf8_size] = '\0';
203 u16 u = unicode_name[match];
204 245
205 /* All special variables are plain ascii */ 246 for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
206 if (u > 127) 247 const char *name = variable_validate[i].name;
207 return true; 248 int match = 0;
208 249
209 /* Wildcard in the matching name means we've matched */ 250 if (efi_guidcmp(vendor, variable_validate[i].vendor))
210 if (c == '*') 251 continue;
211 return variable_validate[i].validate(var_name,
212 match, data, len);
213 252
214 /* Case sensitive match */ 253 if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
215 if (c != u) 254 if (variable_validate[i].validate == NULL)
216 break; 255 break;
217 256 kfree(utf8_name);
218 /* Reached the end of the string while matching */ 257 return variable_validate[i].validate(var_name, match,
219 if (!c) 258 data, data_size);
220 return variable_validate[i].validate(var_name,
221 match, data, len);
222 } 259 }
223 } 260 }
224 261 kfree(utf8_name);
225 return true; 262 return true;
226} 263}
227EXPORT_SYMBOL_GPL(efivar_validate); 264EXPORT_SYMBOL_GPL(efivar_validate);
228 265
266bool
267efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
268 size_t len)
269{
270 int i;
271 bool found = false;
272 int match = 0;
273
274 /*
275 * Check if our variable is in the validated variables list
276 */
277 for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
278 if (efi_guidcmp(variable_validate[i].vendor, vendor))
279 continue;
280
281 if (variable_matches(var_name, len,
282 variable_validate[i].name, &match)) {
283 found = true;
284 break;
285 }
286 }
287
288 /*
289 * If it's in our list, it is removable.
290 */
291 return found;
292}
293EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
294
229static efi_status_t 295static efi_status_t
230check_var_size(u32 attributes, unsigned long size) 296check_var_size(u32 attributes, unsigned long size)
231{ 297{
@@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
852 918
853 *set = false; 919 *set = false;
854 920
855 if (efivar_validate(name, data, *size) == false) 921 if (efivar_validate(*vendor, name, data, *size) == false)
856 return -EINVAL; 922 return -EINVAL;
857 923
858 /* 924 /*
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 2aeaebd1c6e7..3f87a03abc22 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -312,8 +312,8 @@ static int altera_gpio_probe(struct platform_device *pdev)
312 handle_simple_irq, IRQ_TYPE_NONE); 312 handle_simple_irq, IRQ_TYPE_NONE);
313 313
314 if (ret) { 314 if (ret) {
315 dev_info(&pdev->dev, "could not add irqchip\n"); 315 dev_err(&pdev->dev, "could not add irqchip\n");
316 return ret; 316 goto teardown;
317 } 317 }
318 318
319 gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc, 319 gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
@@ -326,6 +326,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
326skip_irq: 326skip_irq:
327 return 0; 327 return 0;
328teardown: 328teardown:
329 of_mm_gpiochip_remove(&altera_gc->mmchip);
329 pr_err("%s: registration failed with status %d\n", 330 pr_err("%s: registration failed with status %d\n",
330 node->full_name, ret); 331 node->full_name, ret);
331 332
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index ec58f4288649..cd007a67b302 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -195,7 +195,7 @@ static int davinci_gpio_of_xlate(struct gpio_chip *gc,
195static int davinci_gpio_probe(struct platform_device *pdev) 195static int davinci_gpio_probe(struct platform_device *pdev)
196{ 196{
197 int i, base; 197 int i, base;
198 unsigned ngpio; 198 unsigned ngpio, nbank;
199 struct davinci_gpio_controller *chips; 199 struct davinci_gpio_controller *chips;
200 struct davinci_gpio_platform_data *pdata; 200 struct davinci_gpio_platform_data *pdata;
201 struct davinci_gpio_regs __iomem *regs; 201 struct davinci_gpio_regs __iomem *regs;
@@ -224,8 +224,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
224 if (WARN_ON(ARCH_NR_GPIOS < ngpio)) 224 if (WARN_ON(ARCH_NR_GPIOS < ngpio))
225 ngpio = ARCH_NR_GPIOS; 225 ngpio = ARCH_NR_GPIOS;
226 226
227 nbank = DIV_ROUND_UP(ngpio, 32);
227 chips = devm_kzalloc(dev, 228 chips = devm_kzalloc(dev,
228 ngpio * sizeof(struct davinci_gpio_controller), 229 nbank * sizeof(struct davinci_gpio_controller),
229 GFP_KERNEL); 230 GFP_KERNEL);
230 if (!chips) 231 if (!chips)
231 return -ENOMEM; 232 return -ENOMEM;
@@ -511,7 +512,7 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
511 return irq; 512 return irq;
512 } 513 }
513 514
514 irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0, 515 irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
515 &davinci_gpio_irq_ops, 516 &davinci_gpio_irq_ops,
516 chips); 517 chips);
517 if (!irq_domain) { 518 if (!irq_domain) {
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index cf41440aff91..d9ab0cd1d205 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -196,6 +196,44 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
196 return 0; 196 return 0;
197} 197}
198 198
199static void gpio_rcar_irq_bus_lock(struct irq_data *d)
200{
201 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
202 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
203
204 pm_runtime_get_sync(&p->pdev->dev);
205}
206
207static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
208{
209 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
210 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
211
212 pm_runtime_put(&p->pdev->dev);
213}
214
215
216static int gpio_rcar_irq_request_resources(struct irq_data *d)
217{
218 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
219 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
220 int error;
221
222 error = pm_runtime_get_sync(&p->pdev->dev);
223 if (error < 0)
224 return error;
225
226 return 0;
227}
228
229static void gpio_rcar_irq_release_resources(struct irq_data *d)
230{
231 struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
232 struct gpio_rcar_priv *p = gpiochip_get_data(gc);
233
234 pm_runtime_put(&p->pdev->dev);
235}
236
199static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id) 237static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
200{ 238{
201 struct gpio_rcar_priv *p = dev_id; 239 struct gpio_rcar_priv *p = dev_id;
@@ -450,6 +488,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
450 irq_chip->irq_unmask = gpio_rcar_irq_enable; 488 irq_chip->irq_unmask = gpio_rcar_irq_enable;
451 irq_chip->irq_set_type = gpio_rcar_irq_set_type; 489 irq_chip->irq_set_type = gpio_rcar_irq_set_type;
452 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake; 490 irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
491 irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
492 irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
493 irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
494 irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
453 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND; 495 irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
454 496
455 ret = gpiochip_add_data(gpio_chip, p); 497 ret = gpiochip_add_data(gpio_chip, p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 0c42a85ca5a5..d0489722fc7e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -85,6 +85,8 @@ extern int amdgpu_vm_debug;
85extern int amdgpu_sched_jobs; 85extern int amdgpu_sched_jobs;
86extern int amdgpu_sched_hw_submission; 86extern int amdgpu_sched_hw_submission;
87extern int amdgpu_powerplay; 87extern int amdgpu_powerplay;
88extern unsigned amdgpu_pcie_gen_cap;
89extern unsigned amdgpu_pcie_lane_cap;
88 90
89#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 91#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
90#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 92#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -127,47 +129,6 @@ extern int amdgpu_powerplay;
127#define AMDGPU_RESET_VCE (1 << 13) 129#define AMDGPU_RESET_VCE (1 << 13)
128#define AMDGPU_RESET_VCE1 (1 << 14) 130#define AMDGPU_RESET_VCE1 (1 << 14)
129 131
130/* CG block flags */
131#define AMDGPU_CG_BLOCK_GFX (1 << 0)
132#define AMDGPU_CG_BLOCK_MC (1 << 1)
133#define AMDGPU_CG_BLOCK_SDMA (1 << 2)
134#define AMDGPU_CG_BLOCK_UVD (1 << 3)
135#define AMDGPU_CG_BLOCK_VCE (1 << 4)
136#define AMDGPU_CG_BLOCK_HDP (1 << 5)
137#define AMDGPU_CG_BLOCK_BIF (1 << 6)
138
139/* CG flags */
140#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
141#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
142#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
143#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
144#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
145#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
146#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
147#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
148#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
149#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
150#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
151#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
152#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
153#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
154#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
155#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
156#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
157
158/* PG flags */
159#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
160#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
161#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
162#define AMDGPU_PG_SUPPORT_UVD (1 << 3)
163#define AMDGPU_PG_SUPPORT_VCE (1 << 4)
164#define AMDGPU_PG_SUPPORT_CP (1 << 5)
165#define AMDGPU_PG_SUPPORT_GDS (1 << 6)
166#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
167#define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
168#define AMDGPU_PG_SUPPORT_ACP (1 << 9)
169#define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
170
171/* GFX current status */ 132/* GFX current status */
172#define AMDGPU_GFX_NORMAL_MODE 0x00000000L 133#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
173#define AMDGPU_GFX_SAFE_MODE 0x00000001L 134#define AMDGPU_GFX_SAFE_MODE 0x00000001L
@@ -592,8 +553,6 @@ struct amdgpu_sa_manager {
592 uint32_t align; 553 uint32_t align;
593}; 554};
594 555
595struct amdgpu_sa_bo;
596
597/* sub-allocation buffer */ 556/* sub-allocation buffer */
598struct amdgpu_sa_bo { 557struct amdgpu_sa_bo {
599 struct list_head olist; 558 struct list_head olist;
@@ -2357,6 +2316,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
2357bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); 2316bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2358int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, 2317int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2359 uint32_t flags); 2318 uint32_t flags);
2319bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2360struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm); 2320struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
2361bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, 2321bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2362 unsigned long end); 2322 unsigned long end);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a081dda9fa2f..7a4b101e10c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -795,6 +795,12 @@ static int amdgpu_cgs_query_system_info(void *cgs_device,
795 case CGS_SYSTEM_INFO_PCIE_MLW: 795 case CGS_SYSTEM_INFO_PCIE_MLW:
796 sys_info->value = adev->pm.pcie_mlw_mask; 796 sys_info->value = adev->pm.pcie_mlw_mask;
797 break; 797 break;
798 case CGS_SYSTEM_INFO_CG_FLAGS:
799 sys_info->value = adev->cg_flags;
800 break;
801 case CGS_SYSTEM_INFO_PG_FLAGS:
802 sys_info->value = adev->pg_flags;
803 break;
798 default: 804 default:
799 return -ENODEV; 805 return -ENODEV;
800 } 806 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 89c3dd62ba21..119cdc2c43e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) { 77 } else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
78 /* Don't try to start link training before we 78 /* Don't try to start link training before we
79 * have the dpcd */ 79 * have the dpcd */
80 if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector)) 80 if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
81 return; 81 return;
82 82
83 /* set it to OFF so that drm_helper_connector_dpms() 83 /* set it to OFF so that drm_helper_connector_dpms()
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index db20d2783def..2139da773da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1762,15 +1762,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1762 } 1762 }
1763 1763
1764 /* post card */ 1764 /* post card */
1765 amdgpu_atom_asic_init(adev->mode_info.atom_context); 1765 if (!amdgpu_card_posted(adev))
1766 amdgpu_atom_asic_init(adev->mode_info.atom_context);
1766 1767
1767 r = amdgpu_resume(adev); 1768 r = amdgpu_resume(adev);
1769 if (r)
1770 DRM_ERROR("amdgpu_resume failed (%d).\n", r);
1768 1771
1769 amdgpu_fence_driver_resume(adev); 1772 amdgpu_fence_driver_resume(adev);
1770 1773
1771 r = amdgpu_ib_ring_tests(adev); 1774 if (resume) {
1772 if (r) 1775 r = amdgpu_ib_ring_tests(adev);
1773 DRM_ERROR("ib ring test failed (%d).\n", r); 1776 if (r)
1777 DRM_ERROR("ib ring test failed (%d).\n", r);
1778 }
1774 1779
1775 r = amdgpu_late_init(adev); 1780 r = amdgpu_late_init(adev);
1776 if (r) 1781 if (r)
@@ -1903,80 +1908,97 @@ retry:
1903 return r; 1908 return r;
1904} 1909}
1905 1910
1911#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
1912#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
1913
1906void amdgpu_get_pcie_info(struct amdgpu_device *adev) 1914void amdgpu_get_pcie_info(struct amdgpu_device *adev)
1907{ 1915{
1908 u32 mask; 1916 u32 mask;
1909 int ret; 1917 int ret;
1910 1918
1911 if (pci_is_root_bus(adev->pdev->bus)) 1919 if (amdgpu_pcie_gen_cap)
1912 return; 1920 adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
1913 1921
1914 if (amdgpu_pcie_gen2 == 0) 1922 if (amdgpu_pcie_lane_cap)
1915 return; 1923 adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
1916 1924
1917 if (adev->flags & AMD_IS_APU) 1925 /* covers APUs as well */
1926 if (pci_is_root_bus(adev->pdev->bus)) {
1927 if (adev->pm.pcie_gen_mask == 0)
1928 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
1929 if (adev->pm.pcie_mlw_mask == 0)
1930 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
1918 return; 1931 return;
1932 }
1919 1933
1920 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 1934 if (adev->pm.pcie_gen_mask == 0) {
1921 if (!ret) { 1935 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
1922 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 | 1936 if (!ret) {
1923 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 | 1937 adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
1924 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3); 1938 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
1925 1939 CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
1926 if (mask & DRM_PCIE_SPEED_25) 1940
1927 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1; 1941 if (mask & DRM_PCIE_SPEED_25)
1928 if (mask & DRM_PCIE_SPEED_50) 1942 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
1929 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2; 1943 if (mask & DRM_PCIE_SPEED_50)
1930 if (mask & DRM_PCIE_SPEED_80) 1944 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
1931 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3; 1945 if (mask & DRM_PCIE_SPEED_80)
1932 } 1946 adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
1933 ret = drm_pcie_get_max_link_width(adev->ddev, &mask); 1947 } else {
1934 if (!ret) { 1948 adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
1935 switch (mask) { 1949 }
1936 case 32: 1950 }
1937 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 | 1951 if (adev->pm.pcie_mlw_mask == 0) {
1938 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 1952 ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
1939 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 1953 if (!ret) {
1940 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 1954 switch (mask) {
1941 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 1955 case 32:
1942 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 1956 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
1943 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 1957 CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
1944 break; 1958 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1945 case 16: 1959 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1946 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 | 1960 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1947 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 1961 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1948 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 1962 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1949 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 1963 break;
1950 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 1964 case 16:
1951 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 1965 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
1952 break; 1966 CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1953 case 12: 1967 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1954 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 | 1968 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1955 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 1969 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1956 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 1970 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1957 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 1971 break;
1958 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 1972 case 12:
1959 break; 1973 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
1960 case 8: 1974 CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1961 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 | 1975 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1962 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 1976 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1963 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 1977 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1964 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 1978 break;
1965 break; 1979 case 8:
1966 case 4: 1980 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
1967 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 | 1981 CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1968 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 1982 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1969 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 1983 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1970 break; 1984 break;
1971 case 2: 1985 case 4:
1972 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 | 1986 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
1973 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1); 1987 CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1974 break; 1988 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1975 case 1: 1989 break;
1976 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1; 1990 case 2:
1977 break; 1991 adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
1978 default: 1992 CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
1979 break; 1993 break;
1994 case 1:
1995 adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
1996 break;
1997 default:
1998 break;
1999 }
2000 } else {
2001 adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
1980 } 2002 }
1981 } 2003 }
1982} 2004}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 2cb53c24dec0..f0ed974bd4e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -70,8 +70,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
70 70
71 struct drm_crtc *crtc = &amdgpuCrtc->base; 71 struct drm_crtc *crtc = &amdgpuCrtc->base;
72 unsigned long flags; 72 unsigned long flags;
73 unsigned i; 73 unsigned i, repcnt = 4;
74 int vpos, hpos, stat, min_udelay; 74 int vpos, hpos, stat, min_udelay = 0;
75 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; 75 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
76 76
77 if (amdgpu_flip_handle_fence(work, &work->excl)) 77 if (amdgpu_flip_handle_fence(work, &work->excl))
@@ -97,7 +97,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
97 * In practice this won't execute very often unless on very fast 97 * In practice this won't execute very often unless on very fast
98 * machines because the time window for this to happen is very small. 98 * machines because the time window for this to happen is very small.
99 */ 99 */
100 for (;;) { 100 while (amdgpuCrtc->enabled && --repcnt) {
101 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank 101 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
102 * start in hpos, and to the "fudged earlier" vblank start in 102 * start in hpos, and to the "fudged earlier" vblank start in
103 * vpos. 103 * vpos.
@@ -113,12 +113,24 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
113 break; 113 break;
114 114
115 /* Sleep at least until estimated real start of hw vblank */ 115 /* Sleep at least until estimated real start of hw vblank */
116 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
117 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); 116 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
117 if (min_udelay > vblank->framedur_ns / 2000) {
118 /* Don't wait ridiculously long - something is wrong */
119 repcnt = 0;
120 break;
121 }
122 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
118 usleep_range(min_udelay, 2 * min_udelay); 123 usleep_range(min_udelay, 2 * min_udelay);
119 spin_lock_irqsave(&crtc->dev->event_lock, flags); 124 spin_lock_irqsave(&crtc->dev->event_lock, flags);
120 }; 125 };
121 126
127 if (!repcnt)
128 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
129 "framedur %d, linedur %d, stat %d, vpos %d, "
130 "hpos %d\n", work->crtc_id, min_udelay,
131 vblank->framedur_ns / 1000,
132 vblank->linedur_ns / 1000, stat, vpos, hpos);
133
122 /* set the flip status */ 134 /* set the flip status */
123 amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; 135 amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
124 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 136 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 01b4fd6115c2..74a2f8a6be1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -80,6 +80,8 @@ int amdgpu_exp_hw_support = 0;
80int amdgpu_sched_jobs = 32; 80int amdgpu_sched_jobs = 32;
81int amdgpu_sched_hw_submission = 2; 81int amdgpu_sched_hw_submission = 2;
82int amdgpu_powerplay = -1; 82int amdgpu_powerplay = -1;
83unsigned amdgpu_pcie_gen_cap = 0;
84unsigned amdgpu_pcie_lane_cap = 0;
83 85
84MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 86MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
85module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 87module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -158,6 +160,12 @@ MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 =
158module_param_named(powerplay, amdgpu_powerplay, int, 0444); 160module_param_named(powerplay, amdgpu_powerplay, int, 0444);
159#endif 161#endif
160 162
163MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
164module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
165
166MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
167module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
168
161static struct pci_device_id pciidlist[] = { 169static struct pci_device_id pciidlist[] = {
162#ifdef CONFIG_DRM_AMDGPU_CIK 170#ifdef CONFIG_DRM_AMDGPU_CIK
163 /* Kaveri */ 171 /* Kaveri */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 2e26a517f2d6..7a47c45b2131 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -606,7 +606,8 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
606 break; 606 break;
607 } 607 }
608 ttm_eu_backoff_reservation(&ticket, &list); 608 ttm_eu_backoff_reservation(&ticket, &list);
609 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 609 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
610 !amdgpu_vm_debug)
610 amdgpu_gem_va_update_vm(adev, bo_va, args->operation); 611 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
611 612
612 drm_gem_object_unreference_unlocked(gobj); 613 drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index d77b2bdbe800..ff9597ce268c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -113,6 +113,10 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
113 struct drm_device *ddev = dev_get_drvdata(dev); 113 struct drm_device *ddev = dev_get_drvdata(dev);
114 struct amdgpu_device *adev = ddev->dev_private; 114 struct amdgpu_device *adev = ddev->dev_private;
115 115
116 if ((adev->flags & AMD_IS_PX) &&
117 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
118 return snprintf(buf, PAGE_SIZE, "off\n");
119
116 if (adev->pp_enabled) { 120 if (adev->pp_enabled) {
117 enum amd_dpm_forced_level level; 121 enum amd_dpm_forced_level level;
118 122
@@ -142,6 +146,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
142 enum amdgpu_dpm_forced_level level; 146 enum amdgpu_dpm_forced_level level;
143 int ret = 0; 147 int ret = 0;
144 148
149 /* Can't force performance level when the card is off */
150 if ((adev->flags & AMD_IS_PX) &&
151 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
152 return -EINVAL;
153
145 if (strncmp("low", buf, strlen("low")) == 0) { 154 if (strncmp("low", buf, strlen("low")) == 0) {
146 level = AMDGPU_DPM_FORCED_LEVEL_LOW; 155 level = AMDGPU_DPM_FORCED_LEVEL_LOW;
147 } else if (strncmp("high", buf, strlen("high")) == 0) { 156 } else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -161,6 +170,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
161 mutex_lock(&adev->pm.mutex); 170 mutex_lock(&adev->pm.mutex);
162 if (adev->pm.dpm.thermal_active) { 171 if (adev->pm.dpm.thermal_active) {
163 count = -EINVAL; 172 count = -EINVAL;
173 mutex_unlock(&adev->pm.mutex);
164 goto fail; 174 goto fail;
165 } 175 }
166 ret = amdgpu_dpm_force_performance_level(adev, level); 176 ret = amdgpu_dpm_force_performance_level(adev, level);
@@ -171,8 +181,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
171 mutex_unlock(&adev->pm.mutex); 181 mutex_unlock(&adev->pm.mutex);
172 } 182 }
173fail: 183fail:
174 mutex_unlock(&adev->pm.mutex);
175
176 return count; 184 return count;
177} 185}
178 186
@@ -469,8 +477,14 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
469 char *buf) 477 char *buf)
470{ 478{
471 struct amdgpu_device *adev = dev_get_drvdata(dev); 479 struct amdgpu_device *adev = dev_get_drvdata(dev);
480 struct drm_device *ddev = adev->ddev;
472 int temp; 481 int temp;
473 482
483 /* Can't get temperature when the card is off */
484 if ((adev->flags & AMD_IS_PX) &&
485 (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
486 return -EINVAL;
487
474 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature) 488 if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
475 temp = 0; 489 temp = 0;
476 else 490 else
@@ -919,11 +933,6 @@ force:
919 933
920 /* update display watermarks based on new power state */ 934 /* update display watermarks based on new power state */
921 amdgpu_display_bandwidth_update(adev); 935 amdgpu_display_bandwidth_update(adev);
922 /* update displays */
923 amdgpu_dpm_display_configuration_changed(adev);
924
925 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
926 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
927 936
928 /* wait for the rings to drain */ 937 /* wait for the rings to drain */
929 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 938 for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
@@ -940,6 +949,12 @@ force:
940 949
941 amdgpu_dpm_post_set_power_state(adev); 950 amdgpu_dpm_post_set_power_state(adev);
942 951
952 /* update displays */
953 amdgpu_dpm_display_configuration_changed(adev);
954
955 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
956 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
957
943 if (adev->pm.funcs->force_performance_level) { 958 if (adev->pm.funcs->force_performance_level) {
944 if (adev->pm.dpm.thermal_active) { 959 if (adev->pm.dpm.thermal_active) {
945 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level; 960 enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -1174,12 +1189,16 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
1174 struct drm_info_node *node = (struct drm_info_node *) m->private; 1189 struct drm_info_node *node = (struct drm_info_node *) m->private;
1175 struct drm_device *dev = node->minor->dev; 1190 struct drm_device *dev = node->minor->dev;
1176 struct amdgpu_device *adev = dev->dev_private; 1191 struct amdgpu_device *adev = dev->dev_private;
1192 struct drm_device *ddev = adev->ddev;
1177 1193
1178 if (!adev->pm.dpm_enabled) { 1194 if (!adev->pm.dpm_enabled) {
1179 seq_printf(m, "dpm not enabled\n"); 1195 seq_printf(m, "dpm not enabled\n");
1180 return 0; 1196 return 0;
1181 } 1197 }
1182 if (adev->pp_enabled) { 1198 if ((adev->flags & AMD_IS_PX) &&
1199 (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
1200 seq_printf(m, "PX asic powered off\n");
1201 } else if (adev->pp_enabled) {
1183 amdgpu_dpm_debugfs_print_current_performance_level(adev, m); 1202 amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
1184 } else { 1203 } else {
1185 mutex_lock(&adev->pm.mutex); 1204 mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index b9d0d55f6b47..3cb6d6c413c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle)
143 adev->powerplay.pp_handle); 143 adev->powerplay.pp_handle);
144 144
145#ifdef CONFIG_DRM_AMD_POWERPLAY 145#ifdef CONFIG_DRM_AMD_POWERPLAY
146 if (adev->pp_enabled) 146 if (adev->pp_enabled) {
147 amdgpu_pm_sysfs_init(adev); 147 amdgpu_pm_sysfs_init(adev);
148 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
149 }
148#endif 150#endif
149 return ret; 151 return ret;
150} 152}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 7d8f8f1e3f7f..2faf03bcda21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -357,12 +357,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
357 357
358 for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i) 358 for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
359 if (fences[i]) 359 if (fences[i])
360 fences[count++] = fences[i]; 360 fences[count++] = fence_get(fences[i]);
361 361
362 if (count) { 362 if (count) {
363 spin_unlock(&sa_manager->wq.lock); 363 spin_unlock(&sa_manager->wq.lock);
364 t = fence_wait_any_timeout(fences, count, false, 364 t = fence_wait_any_timeout(fences, count, false,
365 MAX_SCHEDULE_TIMEOUT); 365 MAX_SCHEDULE_TIMEOUT);
366 for (i = 0; i < count; ++i)
367 fence_put(fences[i]);
368
366 r = (t > 0) ? 0 : t; 369 r = (t > 0) ? 0 : t;
367 spin_lock(&sa_manager->wq.lock); 370 spin_lock(&sa_manager->wq.lock);
368 } else { 371 } else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index e52fc641edfb..9ccdd189d717 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -725,7 +725,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
725 0, PAGE_SIZE, 725 0, PAGE_SIZE,
726 PCI_DMA_BIDIRECTIONAL); 726 PCI_DMA_BIDIRECTIONAL);
727 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { 727 if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
728 while (--i) { 728 while (i--) {
729 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], 729 pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
730 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 730 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
731 gtt->ttm.dma_address[i] = 0; 731 gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index 21aacc1f45c1..bf731e9f643e 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
265 unsigned max_lane_num = drm_dp_max_lane_count(dpcd); 265 unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
266 unsigned lane_num, i, max_pix_clock; 266 unsigned lane_num, i, max_pix_clock;
267 267
268 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { 268 if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
269 for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { 269 ENCODER_OBJECT_ID_NUTMEG) {
270 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; 270 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
271 max_pix_clock = (lane_num * 270000 * 8) / bpp;
271 if (max_pix_clock >= pix_clock) { 272 if (max_pix_clock >= pix_clock) {
272 *dp_lanes = lane_num; 273 *dp_lanes = lane_num;
273 *dp_rate = link_rates[i]; 274 *dp_rate = 270000;
274 return 0; 275 return 0;
275 } 276 }
276 } 277 }
278 } else {
279 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
280 for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
281 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
282 if (max_pix_clock >= pix_clock) {
283 *dp_lanes = lane_num;
284 *dp_rate = link_rates[i];
285 return 0;
286 }
287 }
288 }
277 } 289 }
278 290
279 return -EINVAL; 291 return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 8b4731d4e10e..474ca02b0949 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -31,6 +31,7 @@
31#include "ci_dpm.h" 31#include "ci_dpm.h"
32#include "gfx_v7_0.h" 32#include "gfx_v7_0.h"
33#include "atom.h" 33#include "atom.h"
34#include "amd_pcie.h"
34#include <linux/seq_file.h> 35#include <linux/seq_file.h>
35 36
36#include "smu/smu_7_0_1_d.h" 37#include "smu/smu_7_0_1_d.h"
@@ -5835,18 +5836,16 @@ static int ci_dpm_init(struct amdgpu_device *adev)
5835 u8 frev, crev; 5836 u8 frev, crev;
5836 struct ci_power_info *pi; 5837 struct ci_power_info *pi;
5837 int ret; 5838 int ret;
5838 u32 mask;
5839 5839
5840 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL); 5840 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5841 if (pi == NULL) 5841 if (pi == NULL)
5842 return -ENOMEM; 5842 return -ENOMEM;
5843 adev->pm.dpm.priv = pi; 5843 adev->pm.dpm.priv = pi;
5844 5844
5845 ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); 5845 pi->sys_pcie_mask =
5846 if (ret) 5846 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5847 pi->sys_pcie_mask = 0; 5847 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5848 else 5848
5849 pi->sys_pcie_mask = mask;
5850 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; 5849 pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5851 5850
5852 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1; 5851 pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 6b1f0539ce9d..192ab13e9f05 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1462,6 +1462,9 @@ static void cik_program_aspm(struct amdgpu_device *adev)
1462 if (amdgpu_aspm == 0) 1462 if (amdgpu_aspm == 0)
1463 return; 1463 return;
1464 1464
1465 if (pci_is_root_bus(adev->pdev->bus))
1466 return;
1467
1465 /* XXX double check APUs */ 1468 /* XXX double check APUs */
1466 if (adev->flags & AMD_IS_APU) 1469 if (adev->flags & AMD_IS_APU)
1467 return; 1470 return;
@@ -2032,72 +2035,72 @@ static int cik_common_early_init(void *handle)
2032 switch (adev->asic_type) { 2035 switch (adev->asic_type) {
2033 case CHIP_BONAIRE: 2036 case CHIP_BONAIRE:
2034 adev->cg_flags = 2037 adev->cg_flags =
2035 AMDGPU_CG_SUPPORT_GFX_MGCG | 2038 AMD_CG_SUPPORT_GFX_MGCG |
2036 AMDGPU_CG_SUPPORT_GFX_MGLS | 2039 AMD_CG_SUPPORT_GFX_MGLS |
2037 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2040 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2038 AMDGPU_CG_SUPPORT_GFX_CGLS | 2041 AMD_CG_SUPPORT_GFX_CGLS |
2039 AMDGPU_CG_SUPPORT_GFX_CGTS | 2042 AMD_CG_SUPPORT_GFX_CGTS |
2040 AMDGPU_CG_SUPPORT_GFX_CGTS_LS | 2043 AMD_CG_SUPPORT_GFX_CGTS_LS |
2041 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2044 AMD_CG_SUPPORT_GFX_CP_LS |
2042 AMDGPU_CG_SUPPORT_MC_LS | 2045 AMD_CG_SUPPORT_MC_LS |
2043 AMDGPU_CG_SUPPORT_MC_MGCG | 2046 AMD_CG_SUPPORT_MC_MGCG |
2044 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2047 AMD_CG_SUPPORT_SDMA_MGCG |
2045 AMDGPU_CG_SUPPORT_SDMA_LS | 2048 AMD_CG_SUPPORT_SDMA_LS |
2046 AMDGPU_CG_SUPPORT_BIF_LS | 2049 AMD_CG_SUPPORT_BIF_LS |
2047 AMDGPU_CG_SUPPORT_VCE_MGCG | 2050 AMD_CG_SUPPORT_VCE_MGCG |
2048 AMDGPU_CG_SUPPORT_UVD_MGCG | 2051 AMD_CG_SUPPORT_UVD_MGCG |
2049 AMDGPU_CG_SUPPORT_HDP_LS | 2052 AMD_CG_SUPPORT_HDP_LS |
2050 AMDGPU_CG_SUPPORT_HDP_MGCG; 2053 AMD_CG_SUPPORT_HDP_MGCG;
2051 adev->pg_flags = 0; 2054 adev->pg_flags = 0;
2052 adev->external_rev_id = adev->rev_id + 0x14; 2055 adev->external_rev_id = adev->rev_id + 0x14;
2053 break; 2056 break;
2054 case CHIP_HAWAII: 2057 case CHIP_HAWAII:
2055 adev->cg_flags = 2058 adev->cg_flags =
2056 AMDGPU_CG_SUPPORT_GFX_MGCG | 2059 AMD_CG_SUPPORT_GFX_MGCG |
2057 AMDGPU_CG_SUPPORT_GFX_MGLS | 2060 AMD_CG_SUPPORT_GFX_MGLS |
2058 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2061 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2059 AMDGPU_CG_SUPPORT_GFX_CGLS | 2062 AMD_CG_SUPPORT_GFX_CGLS |
2060 AMDGPU_CG_SUPPORT_GFX_CGTS | 2063 AMD_CG_SUPPORT_GFX_CGTS |
2061 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2064 AMD_CG_SUPPORT_GFX_CP_LS |
2062 AMDGPU_CG_SUPPORT_MC_LS | 2065 AMD_CG_SUPPORT_MC_LS |
2063 AMDGPU_CG_SUPPORT_MC_MGCG | 2066 AMD_CG_SUPPORT_MC_MGCG |
2064 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2067 AMD_CG_SUPPORT_SDMA_MGCG |
2065 AMDGPU_CG_SUPPORT_SDMA_LS | 2068 AMD_CG_SUPPORT_SDMA_LS |
2066 AMDGPU_CG_SUPPORT_BIF_LS | 2069 AMD_CG_SUPPORT_BIF_LS |
2067 AMDGPU_CG_SUPPORT_VCE_MGCG | 2070 AMD_CG_SUPPORT_VCE_MGCG |
2068 AMDGPU_CG_SUPPORT_UVD_MGCG | 2071 AMD_CG_SUPPORT_UVD_MGCG |
2069 AMDGPU_CG_SUPPORT_HDP_LS | 2072 AMD_CG_SUPPORT_HDP_LS |
2070 AMDGPU_CG_SUPPORT_HDP_MGCG; 2073 AMD_CG_SUPPORT_HDP_MGCG;
2071 adev->pg_flags = 0; 2074 adev->pg_flags = 0;
2072 adev->external_rev_id = 0x28; 2075 adev->external_rev_id = 0x28;
2073 break; 2076 break;
2074 case CHIP_KAVERI: 2077 case CHIP_KAVERI:
2075 adev->cg_flags = 2078 adev->cg_flags =
2076 AMDGPU_CG_SUPPORT_GFX_MGCG | 2079 AMD_CG_SUPPORT_GFX_MGCG |
2077 AMDGPU_CG_SUPPORT_GFX_MGLS | 2080 AMD_CG_SUPPORT_GFX_MGLS |
2078 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2081 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2079 AMDGPU_CG_SUPPORT_GFX_CGLS | 2082 AMD_CG_SUPPORT_GFX_CGLS |
2080 AMDGPU_CG_SUPPORT_GFX_CGTS | 2083 AMD_CG_SUPPORT_GFX_CGTS |
2081 AMDGPU_CG_SUPPORT_GFX_CGTS_LS | 2084 AMD_CG_SUPPORT_GFX_CGTS_LS |
2082 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2085 AMD_CG_SUPPORT_GFX_CP_LS |
2083 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2086 AMD_CG_SUPPORT_SDMA_MGCG |
2084 AMDGPU_CG_SUPPORT_SDMA_LS | 2087 AMD_CG_SUPPORT_SDMA_LS |
2085 AMDGPU_CG_SUPPORT_BIF_LS | 2088 AMD_CG_SUPPORT_BIF_LS |
2086 AMDGPU_CG_SUPPORT_VCE_MGCG | 2089 AMD_CG_SUPPORT_VCE_MGCG |
2087 AMDGPU_CG_SUPPORT_UVD_MGCG | 2090 AMD_CG_SUPPORT_UVD_MGCG |
2088 AMDGPU_CG_SUPPORT_HDP_LS | 2091 AMD_CG_SUPPORT_HDP_LS |
2089 AMDGPU_CG_SUPPORT_HDP_MGCG; 2092 AMD_CG_SUPPORT_HDP_MGCG;
2090 adev->pg_flags = 2093 adev->pg_flags =
2091 /*AMDGPU_PG_SUPPORT_GFX_PG | 2094 /*AMD_PG_SUPPORT_GFX_PG |
2092 AMDGPU_PG_SUPPORT_GFX_SMG | 2095 AMD_PG_SUPPORT_GFX_SMG |
2093 AMDGPU_PG_SUPPORT_GFX_DMG |*/ 2096 AMD_PG_SUPPORT_GFX_DMG |*/
2094 AMDGPU_PG_SUPPORT_UVD | 2097 AMD_PG_SUPPORT_UVD |
2095 /*AMDGPU_PG_SUPPORT_VCE | 2098 /*AMD_PG_SUPPORT_VCE |
2096 AMDGPU_PG_SUPPORT_CP | 2099 AMD_PG_SUPPORT_CP |
2097 AMDGPU_PG_SUPPORT_GDS | 2100 AMD_PG_SUPPORT_GDS |
2098 AMDGPU_PG_SUPPORT_RLC_SMU_HS | 2101 AMD_PG_SUPPORT_RLC_SMU_HS |
2099 AMDGPU_PG_SUPPORT_ACP | 2102 AMD_PG_SUPPORT_ACP |
2100 AMDGPU_PG_SUPPORT_SAMU |*/ 2103 AMD_PG_SUPPORT_SAMU |*/
2101 0; 2104 0;
2102 if (adev->pdev->device == 0x1312 || 2105 if (adev->pdev->device == 0x1312 ||
2103 adev->pdev->device == 0x1316 || 2106 adev->pdev->device == 0x1316 ||
@@ -2109,29 +2112,29 @@ static int cik_common_early_init(void *handle)
2109 case CHIP_KABINI: 2112 case CHIP_KABINI:
2110 case CHIP_MULLINS: 2113 case CHIP_MULLINS:
2111 adev->cg_flags = 2114 adev->cg_flags =
2112 AMDGPU_CG_SUPPORT_GFX_MGCG | 2115 AMD_CG_SUPPORT_GFX_MGCG |
2113 AMDGPU_CG_SUPPORT_GFX_MGLS | 2116 AMD_CG_SUPPORT_GFX_MGLS |
2114 /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/ 2117 /*AMD_CG_SUPPORT_GFX_CGCG |*/
2115 AMDGPU_CG_SUPPORT_GFX_CGLS | 2118 AMD_CG_SUPPORT_GFX_CGLS |
2116 AMDGPU_CG_SUPPORT_GFX_CGTS | 2119 AMD_CG_SUPPORT_GFX_CGTS |
2117 AMDGPU_CG_SUPPORT_GFX_CGTS_LS | 2120 AMD_CG_SUPPORT_GFX_CGTS_LS |
2118 AMDGPU_CG_SUPPORT_GFX_CP_LS | 2121 AMD_CG_SUPPORT_GFX_CP_LS |
2119 AMDGPU_CG_SUPPORT_SDMA_MGCG | 2122 AMD_CG_SUPPORT_SDMA_MGCG |
2120 AMDGPU_CG_SUPPORT_SDMA_LS | 2123 AMD_CG_SUPPORT_SDMA_LS |
2121 AMDGPU_CG_SUPPORT_BIF_LS | 2124 AMD_CG_SUPPORT_BIF_LS |
2122 AMDGPU_CG_SUPPORT_VCE_MGCG | 2125 AMD_CG_SUPPORT_VCE_MGCG |
2123 AMDGPU_CG_SUPPORT_UVD_MGCG | 2126 AMD_CG_SUPPORT_UVD_MGCG |
2124 AMDGPU_CG_SUPPORT_HDP_LS | 2127 AMD_CG_SUPPORT_HDP_LS |
2125 AMDGPU_CG_SUPPORT_HDP_MGCG; 2128 AMD_CG_SUPPORT_HDP_MGCG;
2126 adev->pg_flags = 2129 adev->pg_flags =
2127 /*AMDGPU_PG_SUPPORT_GFX_PG | 2130 /*AMD_PG_SUPPORT_GFX_PG |
2128 AMDGPU_PG_SUPPORT_GFX_SMG | */ 2131 AMD_PG_SUPPORT_GFX_SMG | */
2129 AMDGPU_PG_SUPPORT_UVD | 2132 AMD_PG_SUPPORT_UVD |
2130 /*AMDGPU_PG_SUPPORT_VCE | 2133 /*AMD_PG_SUPPORT_VCE |
2131 AMDGPU_PG_SUPPORT_CP | 2134 AMD_PG_SUPPORT_CP |
2132 AMDGPU_PG_SUPPORT_GDS | 2135 AMD_PG_SUPPORT_GDS |
2133 AMDGPU_PG_SUPPORT_RLC_SMU_HS | 2136 AMD_PG_SUPPORT_RLC_SMU_HS |
2134 AMDGPU_PG_SUPPORT_SAMU |*/ 2137 AMD_PG_SUPPORT_SAMU |*/
2135 0; 2138 0;
2136 if (adev->asic_type == CHIP_KABINI) { 2139 if (adev->asic_type == CHIP_KABINI) {
2137 if (adev->rev_id == 0) 2140 if (adev->rev_id == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index e4e4b2ac77b7..266db15daf2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -856,7 +856,7 @@ static void cik_enable_sdma_mgcg(struct amdgpu_device *adev,
856{ 856{
857 u32 orig, data; 857 u32 orig, data;
858 858
859 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) { 859 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
860 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100); 860 WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
861 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100); 861 WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
862 } else { 862 } else {
@@ -877,7 +877,7 @@ static void cik_enable_sdma_mgls(struct amdgpu_device *adev,
877{ 877{
878 u32 orig, data; 878 u32 orig, data;
879 879
880 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) { 880 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
881 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET); 881 orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
882 data |= 0x100; 882 data |= 0x100;
883 if (orig != data) 883 if (orig != data)
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 4dd17f2dd905..e7ef2261ff4a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -445,13 +445,13 @@ static int cz_dpm_init(struct amdgpu_device *adev)
445 pi->gfx_pg_threshold = 500; 445 pi->gfx_pg_threshold = 500;
446 pi->caps_fps = true; 446 pi->caps_fps = true;
447 /* uvd */ 447 /* uvd */
448 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; 448 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
449 pi->caps_uvd_dpm = true; 449 pi->caps_uvd_dpm = true;
450 /* vce */ 450 /* vce */
451 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; 451 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
452 pi->caps_vce_dpm = true; 452 pi->caps_vce_dpm = true;
453 /* acp */ 453 /* acp */
454 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; 454 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
455 pi->caps_acp_dpm = true; 455 pi->caps_acp_dpm = true;
456 456
457 pi->caps_stable_power_state = false; 457 pi->caps_stable_power_state = false;
@@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
2202 AMD_PG_STATE_GATE); 2202 AMD_PG_STATE_GATE);
2203 2203
2204 cz_enable_vce_dpm(adev, false); 2204 cz_enable_vce_dpm(adev, false);
2205 /* TODO: to figure out why vce can't be poweroff. */ 2205 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
2206 /* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
2207 pi->vce_power_gated = true; 2206 pi->vce_power_gated = true;
2208 } else { 2207 } else {
2209 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON); 2208 cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
@@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
2226 } 2225 }
2227 } else { /*pi->caps_vce_pg*/ 2226 } else { /*pi->caps_vce_pg*/
2228 cz_update_vce_dpm(adev); 2227 cz_update_vce_dpm(adev);
2229 cz_enable_vce_dpm(adev, true); 2228 cz_enable_vce_dpm(adev, !gate);
2230 } 2229 }
2231
2232 return;
2233} 2230}
2234 2231
2235const struct amd_ip_funcs cz_dpm_ip_funcs = { 2232const struct amd_ip_funcs cz_dpm_ip_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8fb7ebf3be3e..4411b94775db 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3041,6 +3041,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
3041 unsigned vm_id, uint64_t pd_addr) 3041 unsigned vm_id, uint64_t pd_addr)
3042{ 3042{
3043 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX); 3043 int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
3044 uint32_t seq = ring->fence_drv.sync_seq;
3045 uint64_t addr = ring->fence_drv.gpu_addr;
3046
3047 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
3048 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3049 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3050 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
3051 amdgpu_ring_write(ring, addr & 0xfffffffc);
3052 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
3053 amdgpu_ring_write(ring, seq);
3054 amdgpu_ring_write(ring, 0xffffffff);
3055 amdgpu_ring_write(ring, 4); /* poll interval */
3056
3044 if (usepfp) { 3057 if (usepfp) {
3045 /* synce CE with ME to prevent CE fetch CEIB before context switch done */ 3058 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3046 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3059 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
@@ -3522,7 +3535,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3522 3535
3523 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL); 3536 orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
3524 3537
3525 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) { 3538 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
3526 gfx_v7_0_enable_gui_idle_interrupt(adev, true); 3539 gfx_v7_0_enable_gui_idle_interrupt(adev, true);
3527 3540
3528 tmp = gfx_v7_0_halt_rlc(adev); 3541 tmp = gfx_v7_0_halt_rlc(adev);
@@ -3560,9 +3573,9 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3560{ 3573{
3561 u32 data, orig, tmp = 0; 3574 u32 data, orig, tmp = 0;
3562 3575
3563 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) { 3576 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
3564 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) { 3577 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
3565 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) { 3578 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
3566 orig = data = RREG32(mmCP_MEM_SLP_CNTL); 3579 orig = data = RREG32(mmCP_MEM_SLP_CNTL);
3567 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK; 3580 data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
3568 if (orig != data) 3581 if (orig != data)
@@ -3589,14 +3602,14 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3589 3602
3590 gfx_v7_0_update_rlc(adev, tmp); 3603 gfx_v7_0_update_rlc(adev, tmp);
3591 3604
3592 if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) { 3605 if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
3593 orig = data = RREG32(mmCGTS_SM_CTRL_REG); 3606 orig = data = RREG32(mmCGTS_SM_CTRL_REG);
3594 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK; 3607 data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
3595 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT); 3608 data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
3596 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK; 3609 data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
3597 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK; 3610 data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
3598 if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) && 3611 if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
3599 (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS)) 3612 (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
3600 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK; 3613 data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
3601 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK; 3614 data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
3602 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK; 3615 data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
@@ -3662,7 +3675,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device *adev,
3662 u32 data, orig; 3675 u32 data, orig;
3663 3676
3664 orig = data = RREG32(mmRLC_PG_CNTL); 3677 orig = data = RREG32(mmRLC_PG_CNTL);
3665 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) 3678 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3666 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; 3679 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3667 else 3680 else
3668 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; 3681 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
@@ -3676,7 +3689,7 @@ static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device *adev,
3676 u32 data, orig; 3689 u32 data, orig;
3677 3690
3678 orig = data = RREG32(mmRLC_PG_CNTL); 3691 orig = data = RREG32(mmRLC_PG_CNTL);
3679 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS)) 3692 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
3680 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; 3693 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3681 else 3694 else
3682 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; 3695 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
@@ -3689,7 +3702,7 @@ static void gfx_v7_0_enable_cp_pg(struct amdgpu_device *adev, bool enable)
3689 u32 data, orig; 3702 u32 data, orig;
3690 3703
3691 orig = data = RREG32(mmRLC_PG_CNTL); 3704 orig = data = RREG32(mmRLC_PG_CNTL);
3692 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP)) 3705 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
3693 data &= ~0x8000; 3706 data &= ~0x8000;
3694 else 3707 else
3695 data |= 0x8000; 3708 data |= 0x8000;
@@ -3702,7 +3715,7 @@ static void gfx_v7_0_enable_gds_pg(struct amdgpu_device *adev, bool enable)
3702 u32 data, orig; 3715 u32 data, orig;
3703 3716
3704 orig = data = RREG32(mmRLC_PG_CNTL); 3717 orig = data = RREG32(mmRLC_PG_CNTL);
3705 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS)) 3718 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
3706 data &= ~0x2000; 3719 data &= ~0x2000;
3707 else 3720 else
3708 data |= 0x2000; 3721 data |= 0x2000;
@@ -3783,7 +3796,7 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
3783{ 3796{
3784 u32 data, orig; 3797 u32 data, orig;
3785 3798
3786 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) { 3799 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
3787 orig = data = RREG32(mmRLC_PG_CNTL); 3800 orig = data = RREG32(mmRLC_PG_CNTL);
3788 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; 3801 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
3789 if (orig != data) 3802 if (orig != data)
@@ -3846,7 +3859,7 @@ static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device *adev,
3846 u32 data, orig; 3859 u32 data, orig;
3847 3860
3848 orig = data = RREG32(mmRLC_PG_CNTL); 3861 orig = data = RREG32(mmRLC_PG_CNTL);
3849 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG)) 3862 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
3850 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 3863 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
3851 else 3864 else
3852 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 3865 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
@@ -3860,7 +3873,7 @@ static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device *adev,
3860 u32 data, orig; 3873 u32 data, orig;
3861 3874
3862 orig = data = RREG32(mmRLC_PG_CNTL); 3875 orig = data = RREG32(mmRLC_PG_CNTL);
3863 if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG)) 3876 if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
3864 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 3877 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
3865 else 3878 else
3866 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 3879 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
@@ -4027,15 +4040,15 @@ static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev,
4027 4040
4028static void gfx_v7_0_init_pg(struct amdgpu_device *adev) 4041static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4029{ 4042{
4030 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | 4043 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4031 AMDGPU_PG_SUPPORT_GFX_SMG | 4044 AMD_PG_SUPPORT_GFX_SMG |
4032 AMDGPU_PG_SUPPORT_GFX_DMG | 4045 AMD_PG_SUPPORT_GFX_DMG |
4033 AMDGPU_PG_SUPPORT_CP | 4046 AMD_PG_SUPPORT_CP |
4034 AMDGPU_PG_SUPPORT_GDS | 4047 AMD_PG_SUPPORT_GDS |
4035 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { 4048 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4036 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true); 4049 gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
4037 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true); 4050 gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
4038 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { 4051 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4039 gfx_v7_0_init_gfx_cgpg(adev); 4052 gfx_v7_0_init_gfx_cgpg(adev);
4040 gfx_v7_0_enable_cp_pg(adev, true); 4053 gfx_v7_0_enable_cp_pg(adev, true);
4041 gfx_v7_0_enable_gds_pg(adev, true); 4054 gfx_v7_0_enable_gds_pg(adev, true);
@@ -4047,14 +4060,14 @@ static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
4047 4060
4048static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) 4061static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4049{ 4062{
4050 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | 4063 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
4051 AMDGPU_PG_SUPPORT_GFX_SMG | 4064 AMD_PG_SUPPORT_GFX_SMG |
4052 AMDGPU_PG_SUPPORT_GFX_DMG | 4065 AMD_PG_SUPPORT_GFX_DMG |
4053 AMDGPU_PG_SUPPORT_CP | 4066 AMD_PG_SUPPORT_CP |
4054 AMDGPU_PG_SUPPORT_GDS | 4067 AMD_PG_SUPPORT_GDS |
4055 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { 4068 AMD_PG_SUPPORT_RLC_SMU_HS)) {
4056 gfx_v7_0_update_gfx_pg(adev, false); 4069 gfx_v7_0_update_gfx_pg(adev, false);
4057 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { 4070 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
4058 gfx_v7_0_enable_cp_pg(adev, false); 4071 gfx_v7_0_enable_cp_pg(adev, false);
4059 gfx_v7_0_enable_gds_pg(adev, false); 4072 gfx_v7_0_enable_gds_pg(adev, false);
4060 } 4073 }
@@ -5089,14 +5102,14 @@ static int gfx_v7_0_set_powergating_state(void *handle,
5089 if (state == AMD_PG_STATE_GATE) 5102 if (state == AMD_PG_STATE_GATE)
5090 gate = true; 5103 gate = true;
5091 5104
5092 if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG | 5105 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
5093 AMDGPU_PG_SUPPORT_GFX_SMG | 5106 AMD_PG_SUPPORT_GFX_SMG |
5094 AMDGPU_PG_SUPPORT_GFX_DMG | 5107 AMD_PG_SUPPORT_GFX_DMG |
5095 AMDGPU_PG_SUPPORT_CP | 5108 AMD_PG_SUPPORT_CP |
5096 AMDGPU_PG_SUPPORT_GDS | 5109 AMD_PG_SUPPORT_GDS |
5097 AMDGPU_PG_SUPPORT_RLC_SMU_HS)) { 5110 AMD_PG_SUPPORT_RLC_SMU_HS)) {
5098 gfx_v7_0_update_gfx_pg(adev, gate); 5111 gfx_v7_0_update_gfx_pg(adev, gate);
5099 if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) { 5112 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
5100 gfx_v7_0_enable_cp_pg(adev, gate); 5113 gfx_v7_0_enable_cp_pg(adev, gate);
5101 gfx_v7_0_enable_gds_pg(adev, gate); 5114 gfx_v7_0_enable_gds_pg(adev, gate);
5102 } 5115 }
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index e37378fe1edc..1b85c001f860 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4691,7 +4691,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
4691 4691
4692 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); 4692 amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
4693 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */ 4693 amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
4694 WAIT_REG_MEM_FUNCTION(3))); /* equal */ 4694 WAIT_REG_MEM_FUNCTION(3) | /* equal */
4695 WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
4695 amdgpu_ring_write(ring, addr & 0xfffffffc); 4696 amdgpu_ring_write(ring, addr & 0xfffffffc);
4696 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 4697 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
4697 amdgpu_ring_write(ring, seq); 4698 amdgpu_ring_write(ring, seq);
@@ -4877,7 +4878,7 @@ static int gfx_v8_0_set_priv_reg_fault_state(struct amdgpu_device *adev,
4877 case AMDGPU_IRQ_STATE_ENABLE: 4878 case AMDGPU_IRQ_STATE_ENABLE:
4878 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0); 4879 cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
4879 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0, 4880 cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
4880 PRIV_REG_INT_ENABLE, 0); 4881 PRIV_REG_INT_ENABLE, 1);
4881 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl); 4882 WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
4882 break; 4883 break;
4883 default: 4884 default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 68ee66b38e5c..711840a23bd3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -793,7 +793,7 @@ static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev,
793 793
794 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { 794 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
795 orig = data = RREG32(mc_cg_registers[i]); 795 orig = data = RREG32(mc_cg_registers[i]);
796 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) 796 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
797 data |= mc_cg_ls_en[i]; 797 data |= mc_cg_ls_en[i];
798 else 798 else
799 data &= ~mc_cg_ls_en[i]; 799 data &= ~mc_cg_ls_en[i];
@@ -810,7 +810,7 @@ static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev,
810 810
811 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { 811 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
812 orig = data = RREG32(mc_cg_registers[i]); 812 orig = data = RREG32(mc_cg_registers[i]);
813 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) 813 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
814 data |= mc_cg_en[i]; 814 data |= mc_cg_en[i];
815 else 815 else
816 data &= ~mc_cg_en[i]; 816 data &= ~mc_cg_en[i];
@@ -826,7 +826,7 @@ static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev,
826 826
827 orig = data = RREG32_PCIE(ixPCIE_CNTL2); 827 orig = data = RREG32_PCIE(ixPCIE_CNTL2);
828 828
829 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { 829 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
830 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); 830 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
831 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); 831 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
832 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); 832 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -849,7 +849,7 @@ static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev,
849 849
850 orig = data = RREG32(mmHDP_HOST_PATH_CNTL); 850 orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
851 851
852 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) 852 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
853 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); 853 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
854 else 854 else
855 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); 855 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -865,7 +865,7 @@ static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev,
865 865
866 orig = data = RREG32(mmHDP_MEM_POWER_LS); 866 orig = data = RREG32(mmHDP_MEM_POWER_LS);
867 867
868 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) 868 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
869 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); 869 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
870 else 870 else
871 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); 871 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7e9154c7f1db..654d76723bc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2859,11 +2859,11 @@ static int kv_dpm_init(struct amdgpu_device *adev)
2859 pi->voltage_drop_t = 0; 2859 pi->voltage_drop_t = 0;
2860 pi->caps_sclk_throttle_low_notification = false; 2860 pi->caps_sclk_throttle_low_notification = false;
2861 pi->caps_fps = false; /* true? */ 2861 pi->caps_fps = false; /* true? */
2862 pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false; 2862 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
2863 pi->caps_uvd_dpm = true; 2863 pi->caps_uvd_dpm = true;
2864 pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false; 2864 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
2865 pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false; 2865 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
2866 pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false; 2866 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
2867 pi->caps_stable_p_state = false; 2867 pi->caps_stable_p_state = false;
2868 2868
2869 ret = kv_parse_sys_info_table(adev); 2869 ret = kv_parse_sys_info_table(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 70ed73fa5156..c606ccb38d8b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -588,7 +588,7 @@ static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
588{ 588{
589 u32 orig, data; 589 u32 orig, data;
590 590
591 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) { 591 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
592 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 592 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
593 data = 0xfff; 593 data = 0xfff;
594 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 594 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
@@ -814,6 +814,9 @@ static int uvd_v4_2_set_clockgating_state(void *handle,
814 bool gate = false; 814 bool gate = false;
815 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 815 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
816 816
817 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
818 return 0;
819
817 if (state == AMD_CG_STATE_GATE) 820 if (state == AMD_CG_STATE_GATE)
818 gate = true; 821 gate = true;
819 822
@@ -832,7 +835,10 @@ static int uvd_v4_2_set_powergating_state(void *handle,
832 * revisit this when there is a cleaner line between 835 * revisit this when there is a cleaner line between
833 * the smc and the hw blocks 836 * the smc and the hw blocks
834 */ 837 */
835 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 838 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
839
840 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
841 return 0;
836 842
837 if (state == AMD_PG_STATE_GATE) { 843 if (state == AMD_PG_STATE_GATE) {
838 uvd_v4_2_stop(adev); 844 uvd_v4_2_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 578ffb62fdb2..e3c852d9d79a 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -757,6 +757,11 @@ static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
757static int uvd_v5_0_set_clockgating_state(void *handle, 757static int uvd_v5_0_set_clockgating_state(void *handle,
758 enum amd_clockgating_state state) 758 enum amd_clockgating_state state)
759{ 759{
760 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
761
762 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
763 return 0;
764
760 return 0; 765 return 0;
761} 766}
762 767
@@ -772,6 +777,9 @@ static int uvd_v5_0_set_powergating_state(void *handle,
772 */ 777 */
773 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 778 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
774 779
780 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
781 return 0;
782
775 if (state == AMD_PG_STATE_GATE) { 783 if (state == AMD_PG_STATE_GATE) {
776 uvd_v5_0_stop(adev); 784 uvd_v5_0_stop(adev);
777 return 0; 785 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d4da1f04378c..3375e614ac67 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -536,7 +536,7 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
536 uvd_v6_0_mc_resume(adev); 536 uvd_v6_0_mc_resume(adev);
537 537
538 /* Set dynamic clock gating in S/W control mode */ 538 /* Set dynamic clock gating in S/W control mode */
539 if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) { 539 if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
540 if (adev->flags & AMD_IS_APU) 540 if (adev->flags & AMD_IS_APU)
541 cz_set_uvd_clock_gating_branches(adev, false); 541 cz_set_uvd_clock_gating_branches(adev, false);
542 else 542 else
@@ -983,7 +983,7 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
983 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 983 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
984 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 984 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
985 985
986 if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) 986 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
987 return 0; 987 return 0;
988 988
989 if (enable) { 989 if (enable) {
@@ -1013,6 +1013,9 @@ static int uvd_v6_0_set_powergating_state(void *handle,
1013 */ 1013 */
1014 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1014 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1015 1015
1016 if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
1017 return 0;
1018
1016 if (state == AMD_PG_STATE_GATE) { 1019 if (state == AMD_PG_STATE_GATE) {
1017 uvd_v6_0_stop(adev); 1020 uvd_v6_0_stop(adev);
1018 return 0; 1021 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 9c804f436974..c7e885bcfd41 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -373,7 +373,7 @@ static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
373{ 373{
374 bool sw_cg = false; 374 bool sw_cg = false;
375 375
376 if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) { 376 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
377 if (sw_cg) 377 if (sw_cg)
378 vce_v2_0_set_sw_cg(adev, true); 378 vce_v2_0_set_sw_cg(adev, true);
379 else 379 else
@@ -608,6 +608,9 @@ static int vce_v2_0_set_powergating_state(void *handle,
608 */ 608 */
609 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 609 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
610 610
611 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
612 return 0;
613
611 if (state == AMD_PG_STATE_GATE) 614 if (state == AMD_PG_STATE_GATE)
612 /* XXX do we need a vce_v2_0_stop()? */ 615 /* XXX do we need a vce_v2_0_stop()? */
613 return 0; 616 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 8f8d479061f8..ce468ee5da2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -277,7 +277,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
277 WREG32_P(mmVCE_STATUS, 0, ~1); 277 WREG32_P(mmVCE_STATUS, 0, ~1);
278 278
279 /* Set Clock-Gating off */ 279 /* Set Clock-Gating off */
280 if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG) 280 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
281 vce_v3_0_set_vce_sw_clock_gating(adev, false); 281 vce_v3_0_set_vce_sw_clock_gating(adev, false);
282 282
283 if (r) { 283 if (r) {
@@ -676,7 +676,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
676 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 676 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
677 int i; 677 int i;
678 678
679 if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) 679 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
680 return 0; 680 return 0;
681 681
682 mutex_lock(&adev->grbm_idx_mutex); 682 mutex_lock(&adev->grbm_idx_mutex);
@@ -728,6 +728,9 @@ static int vce_v3_0_set_powergating_state(void *handle,
728 */ 728 */
729 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 729 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
730 730
731 if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
732 return 0;
733
731 if (state == AMD_PG_STATE_GATE) 734 if (state == AMD_PG_STATE_GATE)
732 /* XXX do we need a vce_v3_0_stop()? */ 735 /* XXX do we need a vce_v3_0_stop()? */
733 return 0; 736 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 125003517544..b72cf063df1a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1092,8 +1092,7 @@ static int vi_common_early_init(void *handle)
1092 case CHIP_STONEY: 1092 case CHIP_STONEY:
1093 adev->has_uvd = true; 1093 adev->has_uvd = true;
1094 adev->cg_flags = 0; 1094 adev->cg_flags = 0;
1095 /* Disable UVD pg */ 1095 adev->pg_flags = 0;
1096 adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
1097 adev->external_rev_id = adev->rev_id + 0x1; 1096 adev->external_rev_id = adev->rev_id + 0x1;
1098 break; 1097 break;
1099 default: 1098 default:
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 15ff8b2c26e7..04e4090666fb 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -86,6 +86,38 @@ enum amd_powergating_state {
86 AMD_PG_STATE_UNGATE, 86 AMD_PG_STATE_UNGATE,
87}; 87};
88 88
89/* CG flags */
90#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
91#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
92#define AMD_CG_SUPPORT_GFX_CGCG (1 << 2)
93#define AMD_CG_SUPPORT_GFX_CGLS (1 << 3)
94#define AMD_CG_SUPPORT_GFX_CGTS (1 << 4)
95#define AMD_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
96#define AMD_CG_SUPPORT_GFX_CP_LS (1 << 6)
97#define AMD_CG_SUPPORT_GFX_RLC_LS (1 << 7)
98#define AMD_CG_SUPPORT_MC_LS (1 << 8)
99#define AMD_CG_SUPPORT_MC_MGCG (1 << 9)
100#define AMD_CG_SUPPORT_SDMA_LS (1 << 10)
101#define AMD_CG_SUPPORT_SDMA_MGCG (1 << 11)
102#define AMD_CG_SUPPORT_BIF_LS (1 << 12)
103#define AMD_CG_SUPPORT_UVD_MGCG (1 << 13)
104#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14)
105#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
106#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
107
108/* PG flags */
109#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
110#define AMD_PG_SUPPORT_GFX_SMG (1 << 1)
111#define AMD_PG_SUPPORT_GFX_DMG (1 << 2)
112#define AMD_PG_SUPPORT_UVD (1 << 3)
113#define AMD_PG_SUPPORT_VCE (1 << 4)
114#define AMD_PG_SUPPORT_CP (1 << 5)
115#define AMD_PG_SUPPORT_GDS (1 << 6)
116#define AMD_PG_SUPPORT_RLC_SMU_HS (1 << 7)
117#define AMD_PG_SUPPORT_SDMA (1 << 8)
118#define AMD_PG_SUPPORT_ACP (1 << 9)
119#define AMD_PG_SUPPORT_SAMU (1 << 10)
120
89enum amd_pm_state_type { 121enum amd_pm_state_type {
90 /* not used for dpm */ 122 /* not used for dpm */
91 POWER_STATE_TYPE_DEFAULT, 123 POWER_STATE_TYPE_DEFAULT,
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 713aec954692..aec38fc3834f 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -109,6 +109,8 @@ enum cgs_system_info_id {
109 CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, 109 CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
110 CGS_SYSTEM_INFO_PCIE_GEN_INFO, 110 CGS_SYSTEM_INFO_PCIE_GEN_INFO,
111 CGS_SYSTEM_INFO_PCIE_MLW, 111 CGS_SYSTEM_INFO_PCIE_MLW,
112 CGS_SYSTEM_INFO_CG_FLAGS,
113 CGS_SYSTEM_INFO_PG_FLAGS,
112 CGS_SYSTEM_INFO_ID_MAXIMUM, 114 CGS_SYSTEM_INFO_ID_MAXIMUM,
113}; 115};
114 116
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 0db64231effe..9d2290044708 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -403,8 +403,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
403 403
404 data.requested_ui_label = power_state_convert(ps); 404 data.requested_ui_label = power_state_convert(ps);
405 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); 405 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
406 break;
406 } 407 }
407 break; 408 case AMD_PP_EVENT_COMPLETE_INIT:
409 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
410 break;
408 default: 411 default:
409 break; 412 break;
410 } 413 }
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index 83be3cf210e0..6b52c78cb404 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = {
165}; 165};
166 166
167static const pem_event_action *complete_init_event[] = { 167static const pem_event_action *complete_init_event[] = {
168 unblock_adjust_power_state_tasks,
168 adjust_power_state_tasks, 169 adjust_power_state_tasks,
169 enable_gfx_clock_gating_tasks, 170 enable_gfx_clock_gating_tasks,
170 enable_gfx_voltage_island_power_gating_tasks, 171 enable_gfx_voltage_island_power_gating_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 52a3efc97f05..46410e3c7349 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -31,7 +31,7 @@
31static int pem_init(struct pp_eventmgr *eventmgr) 31static int pem_init(struct pp_eventmgr *eventmgr)
32{ 32{
33 int result = 0; 33 int result = 0;
34 struct pem_event_data event_data; 34 struct pem_event_data event_data = { {0} };
35 35
36 /* Initialize PowerPlay feature info */ 36 /* Initialize PowerPlay feature info */
37 pem_init_feature_info(eventmgr); 37 pem_init_feature_info(eventmgr);
@@ -52,7 +52,7 @@ static int pem_init(struct pp_eventmgr *eventmgr)
52 52
53static void pem_fini(struct pp_eventmgr *eventmgr) 53static void pem_fini(struct pp_eventmgr *eventmgr)
54{ 54{
55 struct pem_event_data event_data; 55 struct pem_event_data event_data = { {0} };
56 56
57 pem_uninit_featureInfo(eventmgr); 57 pem_uninit_featureInfo(eventmgr);
58 pem_unregister_interrupts(eventmgr); 58 pem_unregister_interrupts(eventmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index ad7700822a1c..ff08ce41bde9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
226 } 226 }
227 } else { 227 } else {
228 cz_dpm_update_vce_dpm(hwmgr); 228 cz_dpm_update_vce_dpm(hwmgr);
229 cz_enable_disable_vce_dpm(hwmgr, true); 229 cz_enable_disable_vce_dpm(hwmgr, !bgate);
230 return 0; 230 return 0;
231 } 231 }
232 232
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index ef1daf1251c7..b8d6a82c1be2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -174,6 +174,8 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
174{ 174{
175 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend); 175 struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
176 uint32_t i; 176 uint32_t i;
177 struct cgs_system_info sys_info = {0};
178 int result;
177 179
178 cz_hwmgr->gfx_ramp_step = 256*25/100; 180 cz_hwmgr->gfx_ramp_step = 256*25/100;
179 181
@@ -247,6 +249,22 @@ static int cz_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
247 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 249 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
248 PHM_PlatformCaps_DisableVoltageIsland); 250 PHM_PlatformCaps_DisableVoltageIsland);
249 251
252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_UVDPowerGating);
254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_VCEPowerGating);
256 sys_info.size = sizeof(struct cgs_system_info);
257 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
258 result = cgs_query_system_info(hwmgr->device, &sys_info);
259 if (!result) {
260 if (sys_info.value & AMD_PG_SUPPORT_UVD)
261 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
262 PHM_PlatformCaps_UVDPowerGating);
263 if (sys_info.value & AMD_PG_SUPPORT_VCE)
264 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
265 PHM_PlatformCaps_VCEPowerGating);
266 }
267
250 return 0; 268 return 0;
251} 269}
252 270
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index bc83fa35ec46..aec4f8346d9c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -4451,6 +4451,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4451 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; 4451 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
4452 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 4452 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
4453 phw_tonga_ulv_parm *ulv; 4453 phw_tonga_ulv_parm *ulv;
4454 struct cgs_system_info sys_info = {0};
4454 4455
4455 PP_ASSERT_WITH_CODE((NULL != hwmgr), 4456 PP_ASSERT_WITH_CODE((NULL != hwmgr),
4456 "Invalid Parameter!", return -1;); 4457 "Invalid Parameter!", return -1;);
@@ -4615,9 +4616,23 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4615 4616
4616 data->vddc_phase_shed_control = 0; 4617 data->vddc_phase_shed_control = 0;
4617 4618
4618 if (0 == result) { 4619 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4619 struct cgs_system_info sys_info = {0}; 4620 PHM_PlatformCaps_UVDPowerGating);
4621 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4622 PHM_PlatformCaps_VCEPowerGating);
4623 sys_info.size = sizeof(struct cgs_system_info);
4624 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
4625 result = cgs_query_system_info(hwmgr->device, &sys_info);
4626 if (!result) {
4627 if (sys_info.value & AMD_PG_SUPPORT_UVD)
4628 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4629 PHM_PlatformCaps_UVDPowerGating);
4630 if (sys_info.value & AMD_PG_SUPPORT_VCE)
4631 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4632 PHM_PlatformCaps_VCEPowerGating);
4633 }
4620 4634
4635 if (0 == result) {
4621 data->is_tlu_enabled = 0; 4636 data->is_tlu_enabled = 0;
4622 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 4637 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
4623 TONGA_MAX_HARDWARE_POWERLEVELS; 4638 TONGA_MAX_HARDWARE_POWERLEVELS;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 9759009d1da3..b1480acbb3c3 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
227 } while (ast_read32(ast, 0x10000) != 0x01); 227 } while (ast_read32(ast, 0x10000) != 0x01);
228 data = ast_read32(ast, 0x10004); 228 data = ast_read32(ast, 0x10004);
229 229
230 if (data & 0x400) 230 if (data & 0x40)
231 ast->dram_bus_width = 16; 231 ast->dram_bus_width = 16;
232 else 232 else
233 ast->dram_bus_width = 32; 233 ast->dram_bus_width = 32;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 8fb469c4e4b8..092620c6ff32 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -65,8 +65,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
65 */ 65 */
66 state->allow_modeset = true; 66 state->allow_modeset = true;
67 67
68 state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
69
70 state->crtcs = kcalloc(dev->mode_config.num_crtc, 68 state->crtcs = kcalloc(dev->mode_config.num_crtc,
71 sizeof(*state->crtcs), GFP_KERNEL); 69 sizeof(*state->crtcs), GFP_KERNEL);
72 if (!state->crtcs) 70 if (!state->crtcs)
@@ -83,16 +81,6 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
83 sizeof(*state->plane_states), GFP_KERNEL); 81 sizeof(*state->plane_states), GFP_KERNEL);
84 if (!state->plane_states) 82 if (!state->plane_states)
85 goto fail; 83 goto fail;
86 state->connectors = kcalloc(state->num_connector,
87 sizeof(*state->connectors),
88 GFP_KERNEL);
89 if (!state->connectors)
90 goto fail;
91 state->connector_states = kcalloc(state->num_connector,
92 sizeof(*state->connector_states),
93 GFP_KERNEL);
94 if (!state->connector_states)
95 goto fail;
96 84
97 state->dev = dev; 85 state->dev = dev;
98 86
@@ -823,19 +811,27 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
823 811
824 index = drm_connector_index(connector); 812 index = drm_connector_index(connector);
825 813
826 /*
827 * Construction of atomic state updates can race with a connector
828 * hot-add which might overflow. In this case flip the table and just
829 * restart the entire ioctl - no one is fast enough to livelock a cpu
830 * with physical hotplug events anyway.
831 *
832 * Note that we only grab the indexes once we have the right lock to
833 * prevent hotplug/unplugging of connectors. So removal is no problem,
834 * at most the array is a bit too large.
835 */
836 if (index >= state->num_connector) { 814 if (index >= state->num_connector) {
837 DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n"); 815 struct drm_connector **c;
838 return ERR_PTR(-EAGAIN); 816 struct drm_connector_state **cs;
817 int alloc = max(index + 1, config->num_connector);
818
819 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
820 if (!c)
821 return ERR_PTR(-ENOMEM);
822
823 state->connectors = c;
824 memset(&state->connectors[state->num_connector], 0,
825 sizeof(*state->connectors) * (alloc - state->num_connector));
826
827 cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
828 if (!cs)
829 return ERR_PTR(-ENOMEM);
830
831 state->connector_states = cs;
832 memset(&state->connector_states[state->num_connector], 0,
833 sizeof(*state->connector_states) * (alloc - state->num_connector));
834 state->num_connector = alloc;
839 } 835 }
840 836
841 if (state->connector_states[index]) 837 if (state->connector_states[index])
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 2b430b05f35d..4da4f2a49078 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1535,7 +1535,7 @@ void drm_atomic_helper_swap_state(struct drm_device *dev,
1535{ 1535{
1536 int i; 1536 int i;
1537 1537
1538 for (i = 0; i < dev->mode_config.num_connector; i++) { 1538 for (i = 0; i < state->num_connector; i++) {
1539 struct drm_connector *connector = state->connectors[i]; 1539 struct drm_connector *connector = state->connectors[i];
1540 1540
1541 if (!connector) 1541 if (!connector)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 65258acddb90..84514001dcef 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -918,12 +918,19 @@ int drm_connector_init(struct drm_device *dev,
918 connector->base.properties = &connector->properties; 918 connector->base.properties = &connector->properties;
919 connector->dev = dev; 919 connector->dev = dev;
920 connector->funcs = funcs; 920 connector->funcs = funcs;
921
922 connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
923 if (connector->connector_id < 0) {
924 ret = connector->connector_id;
925 goto out_put;
926 }
927
921 connector->connector_type = connector_type; 928 connector->connector_type = connector_type;
922 connector->connector_type_id = 929 connector->connector_type_id =
923 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL); 930 ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
924 if (connector->connector_type_id < 0) { 931 if (connector->connector_type_id < 0) {
925 ret = connector->connector_type_id; 932 ret = connector->connector_type_id;
926 goto out_put; 933 goto out_put_id;
927 } 934 }
928 connector->name = 935 connector->name =
929 kasprintf(GFP_KERNEL, "%s-%d", 936 kasprintf(GFP_KERNEL, "%s-%d",
@@ -931,7 +938,7 @@ int drm_connector_init(struct drm_device *dev,
931 connector->connector_type_id); 938 connector->connector_type_id);
932 if (!connector->name) { 939 if (!connector->name) {
933 ret = -ENOMEM; 940 ret = -ENOMEM;
934 goto out_put; 941 goto out_put_type_id;
935 } 942 }
936 943
937 INIT_LIST_HEAD(&connector->probed_modes); 944 INIT_LIST_HEAD(&connector->probed_modes);
@@ -959,7 +966,12 @@ int drm_connector_init(struct drm_device *dev,
959 } 966 }
960 967
961 connector->debugfs_entry = NULL; 968 connector->debugfs_entry = NULL;
962 969out_put_type_id:
970 if (ret)
971 ida_remove(connector_ida, connector->connector_type_id);
972out_put_id:
973 if (ret)
974 ida_remove(&config->connector_ida, connector->connector_id);
963out_put: 975out_put:
964 if (ret) 976 if (ret)
965 drm_mode_object_put(dev, &connector->base); 977 drm_mode_object_put(dev, &connector->base);
@@ -996,6 +1008,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
996 ida_remove(&drm_connector_enum_list[connector->connector_type].ida, 1008 ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
997 connector->connector_type_id); 1009 connector->connector_type_id);
998 1010
1011 ida_remove(&dev->mode_config.connector_ida,
1012 connector->connector_id);
1013
999 kfree(connector->display_info.bus_formats); 1014 kfree(connector->display_info.bus_formats);
1000 drm_mode_object_put(dev, &connector->base); 1015 drm_mode_object_put(dev, &connector->base);
1001 kfree(connector->name); 1016 kfree(connector->name);
@@ -1013,32 +1028,6 @@ void drm_connector_cleanup(struct drm_connector *connector)
1013EXPORT_SYMBOL(drm_connector_cleanup); 1028EXPORT_SYMBOL(drm_connector_cleanup);
1014 1029
1015/** 1030/**
1016 * drm_connector_index - find the index of a registered connector
1017 * @connector: connector to find index for
1018 *
1019 * Given a registered connector, return the index of that connector within a DRM
1020 * device's list of connectors.
1021 */
1022unsigned int drm_connector_index(struct drm_connector *connector)
1023{
1024 unsigned int index = 0;
1025 struct drm_connector *tmp;
1026 struct drm_mode_config *config = &connector->dev->mode_config;
1027
1028 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
1029
1030 drm_for_each_connector(tmp, connector->dev) {
1031 if (tmp == connector)
1032 return index;
1033
1034 index++;
1035 }
1036
1037 BUG();
1038}
1039EXPORT_SYMBOL(drm_connector_index);
1040
1041/**
1042 * drm_connector_register - register a connector 1031 * drm_connector_register - register a connector
1043 * @connector: the connector to register 1032 * @connector: the connector to register
1044 * 1033 *
@@ -5838,6 +5827,7 @@ void drm_mode_config_init(struct drm_device *dev)
5838 INIT_LIST_HEAD(&dev->mode_config.plane_list); 5827 INIT_LIST_HEAD(&dev->mode_config.plane_list);
5839 idr_init(&dev->mode_config.crtc_idr); 5828 idr_init(&dev->mode_config.crtc_idr);
5840 idr_init(&dev->mode_config.tile_idr); 5829 idr_init(&dev->mode_config.tile_idr);
5830 ida_init(&dev->mode_config.connector_ida);
5841 5831
5842 drm_modeset_lock_all(dev); 5832 drm_modeset_lock_all(dev);
5843 drm_mode_create_standard_properties(dev); 5833 drm_mode_create_standard_properties(dev);
@@ -5918,6 +5908,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
5918 crtc->funcs->destroy(crtc); 5908 crtc->funcs->destroy(crtc);
5919 } 5909 }
5920 5910
5911 ida_destroy(&dev->mode_config.connector_ida);
5921 idr_destroy(&dev->mode_config.tile_idr); 5912 idr_destroy(&dev->mode_config.tile_idr);
5922 idr_destroy(&dev->mode_config.crtc_idr); 5913 idr_destroy(&dev->mode_config.crtc_idr);
5923 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 5914 drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 8ae13de272c4..27fbd79d0daf 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1159,11 +1159,13 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1159 drm_dp_put_port(port); 1159 drm_dp_put_port(port);
1160 goto out; 1160 goto out;
1161 } 1161 }
1162 1162 if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
1163 drm_mode_connector_set_tile_property(port->connector); 1163 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1164 1164 drm_mode_connector_set_tile_property(port->connector);
1165 }
1165 (*mstb->mgr->cbs->register_connector)(port->connector); 1166 (*mstb->mgr->cbs->register_connector)(port->connector);
1166 } 1167 }
1168
1167out: 1169out:
1168 /* put reference to this port */ 1170 /* put reference to this port */
1169 drm_dp_put_port(port); 1171 drm_dp_put_port(port);
@@ -1188,8 +1190,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
1188 port->ddps = conn_stat->displayport_device_plug_status; 1190 port->ddps = conn_stat->displayport_device_plug_status;
1189 1191
1190 if (old_ddps != port->ddps) { 1192 if (old_ddps != port->ddps) {
1191 dowork = true;
1192 if (port->ddps) { 1193 if (port->ddps) {
1194 dowork = true;
1193 } else { 1195 } else {
1194 port->available_pbn = 0; 1196 port->available_pbn = 0;
1195 } 1197 }
@@ -1294,13 +1296,8 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1294 if (port->input) 1296 if (port->input)
1295 continue; 1297 continue;
1296 1298
1297 if (!port->ddps) { 1299 if (!port->ddps)
1298 if (port->cached_edid) {
1299 kfree(port->cached_edid);
1300 port->cached_edid = NULL;
1301 }
1302 continue; 1300 continue;
1303 }
1304 1301
1305 if (!port->available_pbn) 1302 if (!port->available_pbn)
1306 drm_dp_send_enum_path_resources(mgr, mstb, port); 1303 drm_dp_send_enum_path_resources(mgr, mstb, port);
@@ -1311,12 +1308,6 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
1311 drm_dp_check_and_send_link_address(mgr, mstb_child); 1308 drm_dp_check_and_send_link_address(mgr, mstb_child);
1312 drm_dp_put_mst_branch_device(mstb_child); 1309 drm_dp_put_mst_branch_device(mstb_child);
1313 } 1310 }
1314 } else if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
1315 port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) {
1316 if (!port->cached_edid) {
1317 port->cached_edid =
1318 drm_get_edid(port->connector, &port->aux.ddc);
1319 }
1320 } 1311 }
1321 } 1312 }
1322} 1313}
@@ -1336,8 +1327,6 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
1336 drm_dp_check_and_send_link_address(mgr, mstb); 1327 drm_dp_check_and_send_link_address(mgr, mstb);
1337 drm_dp_put_mst_branch_device(mstb); 1328 drm_dp_put_mst_branch_device(mstb);
1338 } 1329 }
1339
1340 (*mgr->cbs->hotplug)(mgr);
1341} 1330}
1342 1331
1343static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, 1332static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1597,6 +1586,7 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
1597 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { 1586 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
1598 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); 1587 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
1599 } 1588 }
1589 (*mgr->cbs->hotplug)(mgr);
1600 } 1590 }
1601 } else { 1591 } else {
1602 mstb->link_address_sent = false; 1592 mstb->link_address_sent = false;
@@ -2293,6 +2283,8 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
2293 drm_dp_update_port(mstb, &msg.u.conn_stat); 2283 drm_dp_update_port(mstb, &msg.u.conn_stat);
2294 2284
2295 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); 2285 DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
2286 (*mgr->cbs->hotplug)(mgr);
2287
2296 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { 2288 } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
2297 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); 2289 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
2298 if (!mstb) 2290 if (!mstb)
@@ -2379,6 +2371,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
2379 2371
2380 case DP_PEER_DEVICE_SST_SINK: 2372 case DP_PEER_DEVICE_SST_SINK:
2381 status = connector_status_connected; 2373 status = connector_status_connected;
2374 /* for logical ports - cache the EDID */
2375 if (port->port_num >= 8 && !port->cached_edid) {
2376 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2377 }
2382 break; 2378 break;
2383 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2379 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2384 if (port->ldps) 2380 if (port->ldps)
@@ -2433,7 +2429,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2433 2429
2434 if (port->cached_edid) 2430 if (port->cached_edid)
2435 edid = drm_edid_duplicate(port->cached_edid); 2431 edid = drm_edid_duplicate(port->cached_edid);
2436 2432 else {
2433 edid = drm_get_edid(connector, &port->aux.ddc);
2434 drm_mode_connector_set_tile_property(connector);
2435 }
2437 port->has_audio = drm_detect_monitor_audio(edid); 2436 port->has_audio = drm_detect_monitor_audio(edid);
2438 drm_dp_put_port(port); 2437 drm_dp_put_port(port);
2439 return edid; 2438 return edid;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 96d03ac38ef7..881c5a6c180c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -224,6 +224,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
224 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0; 224 diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
225 } 225 }
226 226
227 /*
228 * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
229 * interval? If so then vblank irqs keep running and it will likely
230 * happen that the hardware vblank counter is not trustworthy as it
231 * might reset at some point in that interval and vblank timestamps
232 * are not trustworthy either in that interval. Iow. this can result
233 * in a bogus diff >> 1 which must be avoided as it would cause
234 * random large forward jumps of the software vblank counter.
235 */
236 if (diff > 1 && (vblank->inmodeset & 0x2)) {
237 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
238 " due to pre-modeset.\n", pipe, diff);
239 diff = 1;
240 }
241
242 /*
243 * FIMXE: Need to replace this hack with proper seqlocks.
244 *
245 * Restrict the bump of the software vblank counter to a safe maximum
246 * value of +1 whenever there is the possibility that concurrent readers
247 * of vblank timestamps could be active at the moment, as the current
248 * implementation of the timestamp caching and updating is not safe
249 * against concurrent readers for calls to store_vblank() with a bump
250 * of anything but +1. A bump != 1 would very likely return corrupted
251 * timestamps to userspace, because the same slot in the cache could
252 * be concurrently written by store_vblank() and read by one of those
253 * readers without the read-retry logic detecting the collision.
254 *
255 * Concurrent readers can exist when we are called from the
256 * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
257 * irq callers. However, all those calls to us are happening with the
258 * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
259 * can't increase while we are executing. Therefore a zero refcount at
260 * this point is safe for arbitrary counter bumps if we are called
261 * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
262 * we must also accept a refcount of 1, as whenever we are called from
263 * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
264 * we must let that one pass through in order to not lose vblank counts
265 * during vblank irq off - which would completely defeat the whole
266 * point of this routine.
267 *
268 * Whenever we are called from vblank irq, we have to assume concurrent
269 * readers exist or can show up any time during our execution, even if
270 * the refcount is currently zero, as vblank irqs are usually only
271 * enabled due to the presence of readers, and because when we are called
272 * from vblank irq we can't hold the vbl_lock to protect us from sudden
273 * bumps in vblank refcount. Therefore also restrict bumps to +1 when
274 * called from vblank irq.
275 */
276 if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
277 (flags & DRM_CALLED_FROM_VBLIRQ))) {
278 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
279 "refcount %u, vblirq %u\n", pipe, diff,
280 atomic_read(&vblank->refcount),
281 (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
282 diff = 1;
283 }
284
227 DRM_DEBUG_VBL("updating vblank count on crtc %u:" 285 DRM_DEBUG_VBL("updating vblank count on crtc %u:"
228 " current=%u, diff=%u, hw=%u hw_last=%u\n", 286 " current=%u, diff=%u, hw=%u hw_last=%u\n",
229 pipe, vblank->count, diff, cur_vblank, vblank->last); 287 pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
1313 spin_lock_irqsave(&dev->event_lock, irqflags); 1371 spin_lock_irqsave(&dev->event_lock, irqflags);
1314 1372
1315 spin_lock(&dev->vbl_lock); 1373 spin_lock(&dev->vbl_lock);
1316 vblank_disable_and_save(dev, pipe); 1374 DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
1375 pipe, vblank->enabled, vblank->inmodeset);
1376
1377 /* Avoid redundant vblank disables without previous drm_vblank_on(). */
1378 if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
1379 vblank_disable_and_save(dev, pipe);
1380
1317 wake_up(&vblank->queue); 1381 wake_up(&vblank->queue);
1318 1382
1319 /* 1383 /*
@@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1415 return; 1479 return;
1416 1480
1417 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1481 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1482 DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
1483 pipe, vblank->enabled, vblank->inmodeset);
1484
1418 /* Drop our private "prevent drm_vblank_get" refcount */ 1485 /* Drop our private "prevent drm_vblank_get" refcount */
1419 if (vblank->inmodeset) { 1486 if (vblank->inmodeset) {
1420 atomic_dec(&vblank->refcount); 1487 atomic_dec(&vblank->refcount);
@@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
1427 * re-enable interrupts if there are users left, or the 1494 * re-enable interrupts if there are users left, or the
1428 * user wishes vblank interrupts to be enabled all the time. 1495 * user wishes vblank interrupts to be enabled all the time.
1429 */ 1496 */
1430 if (atomic_read(&vblank->refcount) != 0 || 1497 if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
1431 (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
1432 WARN_ON(drm_vblank_enable(dev, pipe)); 1498 WARN_ON(drm_vblank_enable(dev, pipe));
1433 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1499 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1434} 1500}
@@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
1523 if (vblank->inmodeset) { 1589 if (vblank->inmodeset) {
1524 spin_lock_irqsave(&dev->vbl_lock, irqflags); 1590 spin_lock_irqsave(&dev->vbl_lock, irqflags);
1525 dev->vblank_disable_allowed = true; 1591 dev->vblank_disable_allowed = true;
1592 drm_reset_vblank_timestamp(dev, pipe);
1526 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1593 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
1527 1594
1528 if (vblank->inmodeset & 0x2) 1595 if (vblank->inmodeset & 0x2)
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index b61282d89aa3..f4315bc8d471 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1373,8 +1373,16 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
1373 drm_connector_cleanup(connector); 1373 drm_connector_cleanup(connector);
1374} 1374}
1375 1375
1376static int tda998x_connector_dpms(struct drm_connector *connector, int mode)
1377{
1378 if (drm_core_check_feature(connector->dev, DRIVER_ATOMIC))
1379 return drm_atomic_helper_connector_dpms(connector, mode);
1380 else
1381 return drm_helper_connector_dpms(connector, mode);
1382}
1383
1376static const struct drm_connector_funcs tda998x_connector_funcs = { 1384static const struct drm_connector_funcs tda998x_connector_funcs = {
1377 .dpms = drm_atomic_helper_connector_dpms, 1385 .dpms = tda998x_connector_dpms,
1378 .reset = drm_atomic_helper_connector_reset, 1386 .reset = drm_atomic_helper_connector_reset,
1379 .fill_modes = drm_helper_probe_single_connector_modes, 1387 .fill_modes = drm_helper_probe_single_connector_modes,
1380 .detect = tda998x_connector_detect, 1388 .detect = tda998x_connector_detect,
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 31f6d212fb1b..30f921421b0c 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -527,6 +527,8 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
527 527
528 mutex_lock(&dev_priv->av_mutex); 528 mutex_lock(&dev_priv->av_mutex);
529 intel_dig_port->audio_connector = connector; 529 intel_dig_port->audio_connector = connector;
530 /* referred in audio callbacks */
531 dev_priv->dig_port_map[port] = intel_encoder;
530 mutex_unlock(&dev_priv->av_mutex); 532 mutex_unlock(&dev_priv->av_mutex);
531 533
532 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) 534 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
@@ -554,6 +556,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
554 556
555 mutex_lock(&dev_priv->av_mutex); 557 mutex_lock(&dev_priv->av_mutex);
556 intel_dig_port->audio_connector = NULL; 558 intel_dig_port->audio_connector = NULL;
559 dev_priv->dig_port_map[port] = NULL;
557 mutex_unlock(&dev_priv->av_mutex); 560 mutex_unlock(&dev_priv->av_mutex);
558 561
559 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) 562 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify)
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 21a9b83f3bfc..62de9f4bce09 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3326,7 +3326,6 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
3326 intel_encoder->get_config = intel_ddi_get_config; 3326 intel_encoder->get_config = intel_ddi_get_config;
3327 3327
3328 intel_dig_port->port = port; 3328 intel_dig_port->port = port;
3329 dev_priv->dig_port_map[port] = intel_encoder;
3330 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & 3329 intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
3331 (DDI_BUF_PORT_REVERSAL | 3330 (DDI_BUF_PORT_REVERSAL |
3332 DDI_A_4_LANES); 3331 DDI_A_4_LANES);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index cbc06596659a..f069a82deb57 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -6043,7 +6043,6 @@ intel_dp_init(struct drm_device *dev,
6043 } 6043 }
6044 6044
6045 intel_dig_port->port = port; 6045 intel_dig_port->port = port;
6046 dev_priv->dig_port_map[port] = intel_encoder;
6047 intel_dig_port->dp.output_reg = output_reg; 6046 intel_dig_port->dp.output_reg = output_reg;
6048 intel_dig_port->max_lanes = 4; 6047 intel_dig_port->max_lanes = 4;
6049 6048
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 80b44c054087..a0d8daed2470 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -2167,7 +2167,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2167void intel_hdmi_init(struct drm_device *dev, 2167void intel_hdmi_init(struct drm_device *dev,
2168 i915_reg_t hdmi_reg, enum port port) 2168 i915_reg_t hdmi_reg, enum port port)
2169{ 2169{
2170 struct drm_i915_private *dev_priv = dev->dev_private;
2171 struct intel_digital_port *intel_dig_port; 2170 struct intel_digital_port *intel_dig_port;
2172 struct intel_encoder *intel_encoder; 2171 struct intel_encoder *intel_encoder;
2173 struct intel_connector *intel_connector; 2172 struct intel_connector *intel_connector;
@@ -2236,7 +2235,6 @@ void intel_hdmi_init(struct drm_device *dev,
2236 intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI; 2235 intel_encoder->cloneable |= 1 << INTEL_OUTPUT_HDMI;
2237 2236
2238 intel_dig_port->port = port; 2237 intel_dig_port->port = port;
2239 dev_priv->dig_port_map[port] = intel_encoder;
2240 intel_dig_port->hdmi.hdmi_reg = hdmi_reg; 2238 intel_dig_port->hdmi.hdmi_reg = hdmi_reg;
2241 intel_dig_port->dp.output_reg = INVALID_MMIO_REG; 2239 intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
2242 intel_dig_port->max_lanes = 4; 2240 intel_dig_port->max_lanes = 4;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index deb8282c26d8..52fbe530fc9e 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -664,6 +664,12 @@ int intel_setup_gmbus(struct drm_device *dev)
664 664
665 bus->adapter.algo = &gmbus_algorithm; 665 bus->adapter.algo = &gmbus_algorithm;
666 666
667 /*
668 * We wish to retry with bit banging
669 * after a timed out GMBUS attempt.
670 */
671 bus->adapter.retries = 1;
672
667 /* By default use a conservative clock rate */ 673 /* By default use a conservative clock rate */
668 bus->reg0 = pin | GMBUS_RATE_100KHZ; 674 bus->reg0 = pin | GMBUS_RATE_100KHZ;
669 675
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 4172e73212cd..6e54d978d9d4 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -2319,15 +2319,15 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2319 */ 2319 */
2320void intel_power_domains_suspend(struct drm_i915_private *dev_priv) 2320void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2321{ 2321{
2322 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2323 skl_display_core_uninit(dev_priv);
2324
2325 /* 2322 /*
2326 * Even if power well support was disabled we still want to disable 2323 * Even if power well support was disabled we still want to disable
2327 * power wells while we are system suspended. 2324 * power wells while we are system suspended.
2328 */ 2325 */
2329 if (!i915.disable_power_well) 2326 if (!i915.disable_power_well)
2330 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 2327 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
2328
2329 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
2330 skl_display_core_uninit(dev_priv);
2331} 2331}
2332 2332
2333/** 2333/**
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 4bea0ddb3f3e..dee8e8b3523b 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -84,6 +84,7 @@ static void ipu_fb_enable(struct ipu_crtc *ipu_crtc)
84 /* Start DC channel and DI after IDMAC */ 84 /* Start DC channel and DI after IDMAC */
85 ipu_dc_enable_channel(ipu_crtc->dc); 85 ipu_dc_enable_channel(ipu_crtc->dc);
86 ipu_di_enable(ipu_crtc->di); 86 ipu_di_enable(ipu_crtc->di);
87 drm_crtc_vblank_on(&ipu_crtc->base);
87 88
88 ipu_crtc->enabled = 1; 89 ipu_crtc->enabled = 1;
89} 90}
@@ -100,6 +101,7 @@ static void ipu_fb_disable(struct ipu_crtc *ipu_crtc)
100 ipu_di_disable(ipu_crtc->di); 101 ipu_di_disable(ipu_crtc->di);
101 ipu_plane_disable(ipu_crtc->plane[0]); 102 ipu_plane_disable(ipu_crtc->plane[0]);
102 ipu_dc_disable(ipu); 103 ipu_dc_disable(ipu);
104 drm_crtc_vblank_off(&ipu_crtc->base);
103 105
104 ipu_crtc->enabled = 0; 106 ipu_crtc->enabled = 0;
105} 107}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index d078373c4233..588827844f30 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -42,6 +42,7 @@ static const uint32_t ipu_plane_formats[] = {
42 DRM_FORMAT_YVYU, 42 DRM_FORMAT_YVYU,
43 DRM_FORMAT_YUV420, 43 DRM_FORMAT_YUV420,
44 DRM_FORMAT_YVU420, 44 DRM_FORMAT_YVU420,
45 DRM_FORMAT_RGB565,
45}; 46};
46 47
47int ipu_plane_irq(struct ipu_plane *ipu_plane) 48int ipu_plane_irq(struct ipu_plane *ipu_plane)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 78f520d05de9..e3acc35e3805 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1520,7 +1520,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1520 DMA_BIDIRECTIONAL); 1520 DMA_BIDIRECTIONAL);
1521 1521
1522 if (dma_mapping_error(pdev, addr)) { 1522 if (dma_mapping_error(pdev, addr)) {
1523 while (--i) { 1523 while (i--) {
1524 dma_unmap_page(pdev, ttm_dma->dma_address[i], 1524 dma_unmap_page(pdev, ttm_dma->dma_address[i],
1525 PAGE_SIZE, DMA_BIDIRECTIONAL); 1525 PAGE_SIZE, DMA_BIDIRECTIONAL);
1526 ttm_dma->dma_address[i] = 0; 1526 ttm_dma->dma_address[i] = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 24be27d3cd18..20935eb2a09e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -635,10 +635,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
635 nv_crtc->lut.depth = 0; 635 nv_crtc->lut.depth = 0;
636 } 636 }
637 637
638 /* Make sure that drm and hw vblank irqs get resumed if needed. */
639 for (head = 0; head < dev->mode_config.num_crtc; head++)
640 drm_vblank_on(dev, head);
641
642 /* This should ensure we don't hit a locking problem when someone 638 /* This should ensure we don't hit a locking problem when someone
643 * wakes us up via a connector. We should never go into suspend 639 * wakes us up via a connector. We should never go into suspend
644 * while the display is on anyways. 640 * while the display is on anyways.
@@ -648,6 +644,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
648 644
649 drm_helper_resume_force_mode(dev); 645 drm_helper_resume_force_mode(dev);
650 646
647 /* Make sure that drm and hw vblank irqs get resumed if needed. */
648 for (head = 0; head < dev->mode_config.num_crtc; head++)
649 drm_vblank_on(dev, head);
650
651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 651 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
652 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 652 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
653 653
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 8a70cec59bcd..2dfe58af12e4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -24,7 +24,7 @@
24static int nouveau_platform_probe(struct platform_device *pdev) 24static int nouveau_platform_probe(struct platform_device *pdev)
25{ 25{
26 const struct nvkm_device_tegra_func *func; 26 const struct nvkm_device_tegra_func *func;
27 struct nvkm_device *device; 27 struct nvkm_device *device = NULL;
28 struct drm_device *drm; 28 struct drm_device *drm;
29 int ret; 29 int ret;
30 30
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 7f8a42721eb2..e7e581d6a8ff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
252 252
253 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL))) 253 if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
254 return -ENOMEM; 254 return -ENOMEM;
255 *pdevice = &tdev->device; 255
256 tdev->func = func; 256 tdev->func = func;
257 tdev->pdev = pdev; 257 tdev->pdev = pdev;
258 tdev->irq = -1; 258 tdev->irq = -1;
259 259
260 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd"); 260 tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
261 if (IS_ERR(tdev->vdd)) 261 if (IS_ERR(tdev->vdd)) {
262 return PTR_ERR(tdev->vdd); 262 ret = PTR_ERR(tdev->vdd);
263 goto free;
264 }
263 265
264 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu"); 266 tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
265 if (IS_ERR(tdev->rst)) 267 if (IS_ERR(tdev->rst)) {
266 return PTR_ERR(tdev->rst); 268 ret = PTR_ERR(tdev->rst);
269 goto free;
270 }
267 271
268 tdev->clk = devm_clk_get(&pdev->dev, "gpu"); 272 tdev->clk = devm_clk_get(&pdev->dev, "gpu");
269 if (IS_ERR(tdev->clk)) 273 if (IS_ERR(tdev->clk)) {
270 return PTR_ERR(tdev->clk); 274 ret = PTR_ERR(tdev->clk);
275 goto free;
276 }
271 277
272 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr"); 278 tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
273 if (IS_ERR(tdev->clk_pwr)) 279 if (IS_ERR(tdev->clk_pwr)) {
274 return PTR_ERR(tdev->clk_pwr); 280 ret = PTR_ERR(tdev->clk_pwr);
281 goto free;
282 }
275 283
276 nvkm_device_tegra_probe_iommu(tdev); 284 nvkm_device_tegra_probe_iommu(tdev);
277 285
278 ret = nvkm_device_tegra_power_up(tdev); 286 ret = nvkm_device_tegra_power_up(tdev);
279 if (ret) 287 if (ret)
280 return ret; 288 goto remove;
281 289
282 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 290 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
283 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 291 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
285 cfg, dbg, detect, mmio, subdev_mask, 293 cfg, dbg, detect, mmio, subdev_mask,
286 &tdev->device); 294 &tdev->device);
287 if (ret) 295 if (ret)
288 return ret; 296 goto powerdown;
297
298 *pdevice = &tdev->device;
289 299
290 return 0; 300 return 0;
301
302powerdown:
303 nvkm_device_tegra_power_down(tdev);
304remove:
305 nvkm_device_tegra_remove_iommu(tdev);
306free:
307 kfree(tdev);
308 return ret;
291} 309}
292#else 310#else
293int 311int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 74e2f7c6c07e..9688970eca47 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
328 .outp = outp, 328 .outp = outp,
329 }, *dp = &_dp; 329 }, *dp = &_dp;
330 u32 datarate = 0; 330 u32 datarate = 0;
331 u8 pwr;
331 int ret; 332 int ret;
332 333
333 if (!outp->base.info.location && disp->func->sor.magic) 334 if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
355 /* disable link interrupt handling during link training */ 356 /* disable link interrupt handling during link training */
356 nvkm_notify_put(&outp->irq); 357 nvkm_notify_put(&outp->irq);
357 358
359 /* ensure sink is not in a low-power state */
360 if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
361 if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
362 pwr &= ~DPCD_SC00_SET_POWER;
363 pwr |= DPCD_SC00_SET_POWER_D0;
364 nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
365 }
366 }
367
358 /* enable down-spreading and execute pre-train script from vbios */ 368 /* enable down-spreading and execute pre-train script from vbios */
359 dp_link_train_init(dp, outp->dpcd[3] & 0x01); 369 dp_link_train_init(dp, outp->dpcd[3] & 0x01);
360 370
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 9596290329c7..6e10c5e0ef11 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -71,5 +71,11 @@
71#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c 71#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
72#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03 72#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
73 73
74/* DPCD Sink Control */
75#define DPCD_SC00 0x00600
76#define DPCD_SC00_SET_POWER 0x03
77#define DPCD_SC00_SET_POWER_D0 0x01
78#define DPCD_SC00_SET_POWER_D3 0x03
79
74void nvkm_dp_train(struct work_struct *); 80void nvkm_dp_train(struct work_struct *);
75#endif 81#endif
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 2ae8577497ca..7c2e78201ead 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
168 cmd->command_size)) 168 cmd->command_size))
169 return -EFAULT; 169 return -EFAULT;
170 170
171 reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); 171 reloc_info = kmalloc_array(cmd->relocs_num,
172 sizeof(struct qxl_reloc_info), GFP_KERNEL);
172 if (!reloc_info) 173 if (!reloc_info)
173 return -ENOMEM; 174 return -ENOMEM;
174 175
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 3d031b50a8fd..9f029dda1f07 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -68,5 +68,5 @@ int qxl_gem_prime_mmap(struct drm_gem_object *obj,
68 struct vm_area_struct *area) 68 struct vm_area_struct *area)
69{ 69{
70 WARN_ONCE(1, "not implemented"); 70 WARN_ONCE(1, "not implemented");
71 return ENOSYS; 71 return -ENOSYS;
72} 72}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 44ee72e04df9..6af832545bc5 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
315 unsigned max_lane_num = drm_dp_max_lane_count(dpcd); 315 unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
316 unsigned lane_num, i, max_pix_clock; 316 unsigned lane_num, i, max_pix_clock;
317 317
318 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { 318 if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
319 for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { 319 ENCODER_OBJECT_ID_NUTMEG) {
320 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; 320 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
321 max_pix_clock = (lane_num * 270000 * 8) / bpp;
321 if (max_pix_clock >= pix_clock) { 322 if (max_pix_clock >= pix_clock) {
322 *dp_lanes = lane_num; 323 *dp_lanes = lane_num;
323 *dp_rate = link_rates[i]; 324 *dp_rate = 270000;
324 return 0; 325 return 0;
325 } 326 }
326 } 327 }
328 } else {
329 for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
330 for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
331 max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
332 if (max_pix_clock >= pix_clock) {
333 *dp_lanes = lane_num;
334 *dp_rate = link_rates[i];
335 return 0;
336 }
337 }
338 }
327 } 339 }
328 340
329 return -EINVAL; 341 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 902b59cebac5..4197ca1bb1e4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1744,7 +1744,6 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1744 } 1744 }
1745 1745
1746 drm_kms_helper_poll_enable(dev); 1746 drm_kms_helper_poll_enable(dev);
1747 drm_helper_hpd_irq_event(dev);
1748 1747
1749 /* set the power state here in case we are a PX system or headless */ 1748 /* set the power state here in case we are a PX system or headless */
1750 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) 1749 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a4674bfd979a..e29096b2fa6b 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
403 struct drm_crtc *crtc = &radeon_crtc->base; 403 struct drm_crtc *crtc = &radeon_crtc->base;
404 unsigned long flags; 404 unsigned long flags;
405 int r; 405 int r;
406 int vpos, hpos, stat, min_udelay; 406 int vpos, hpos, stat, min_udelay = 0;
407 unsigned repcnt = 4;
407 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id]; 408 struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
408 409
409 down_read(&rdev->exclusive_lock); 410 down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
454 * In practice this won't execute very often unless on very fast 455 * In practice this won't execute very often unless on very fast
455 * machines because the time window for this to happen is very small. 456 * machines because the time window for this to happen is very small.
456 */ 457 */
457 for (;;) { 458 while (radeon_crtc->enabled && --repcnt) {
458 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank 459 /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
459 * start in hpos, and to the "fudged earlier" vblank start in 460 * start in hpos, and to the "fudged earlier" vblank start in
460 * vpos. 461 * vpos.
@@ -470,12 +471,24 @@ static void radeon_flip_work_func(struct work_struct *__work)
470 break; 471 break;
471 472
472 /* Sleep at least until estimated real start of hw vblank */ 473 /* Sleep at least until estimated real start of hw vblank */
473 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
474 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5); 474 min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
475 if (min_udelay > vblank->framedur_ns / 2000) {
476 /* Don't wait ridiculously long - something is wrong */
477 repcnt = 0;
478 break;
479 }
480 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
475 usleep_range(min_udelay, 2 * min_udelay); 481 usleep_range(min_udelay, 2 * min_udelay);
476 spin_lock_irqsave(&crtc->dev->event_lock, flags); 482 spin_lock_irqsave(&crtc->dev->event_lock, flags);
477 }; 483 };
478 484
485 if (!repcnt)
486 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
487 "framedur %d, linedur %d, stat %d, vpos %d, "
488 "hpos %d\n", work->crtc_id, min_udelay,
489 vblank->framedur_ns / 1000,
490 vblank->linedur_ns / 1000, stat, vpos, hpos);
491
479 /* do the flip (mmio) */ 492 /* do the flip (mmio) */
480 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base); 493 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
481 494
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 460c8f2989da..7a98823bacd1 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -276,8 +276,12 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
276 if (rdev->irq.installed) { 276 if (rdev->irq.installed) {
277 for (i = 0; i < rdev->num_crtc; i++) { 277 for (i = 0; i < rdev->num_crtc; i++) {
278 if (rdev->pm.active_crtcs & (1 << i)) { 278 if (rdev->pm.active_crtcs & (1 << i)) {
279 rdev->pm.req_vblank |= (1 << i); 279 /* This can fail if a modeset is in progress */
280 drm_vblank_get(rdev->ddev, i); 280 if (drm_vblank_get(rdev->ddev, i) == 0)
281 rdev->pm.req_vblank |= (1 << i);
282 else
283 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
284 i);
281 } 285 }
282 } 286 }
283 } 287 }
@@ -1078,10 +1082,6 @@ force:
1078 /* update displays */ 1082 /* update displays */
1079 radeon_dpm_display_configuration_changed(rdev); 1083 radeon_dpm_display_configuration_changed(rdev);
1080 1084
1081 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1082 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1083 rdev->pm.dpm.single_display = single_display;
1084
1085 /* wait for the rings to drain */ 1085 /* wait for the rings to drain */
1086 for (i = 0; i < RADEON_NUM_RINGS; i++) { 1086 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1087 struct radeon_ring *ring = &rdev->ring[i]; 1087 struct radeon_ring *ring = &rdev->ring[i];
@@ -1097,6 +1097,10 @@ force:
1097 1097
1098 radeon_dpm_post_set_power_state(rdev); 1098 radeon_dpm_post_set_power_state(rdev);
1099 1099
1100 rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
1101 rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
1102 rdev->pm.dpm.single_display = single_display;
1103
1100 if (rdev->asic->dpm.force_performance_level) { 1104 if (rdev->asic->dpm.force_performance_level) {
1101 if (rdev->pm.dpm.thermal_active) { 1105 if (rdev->pm.dpm.thermal_active) {
1102 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level; 1106 enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c507896aca45..197b157b73d0 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
349 /* see if we can skip over some allocations */ 349 /* see if we can skip over some allocations */
350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 350 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
351 351
352 for (i = 0; i < RADEON_NUM_RINGS; ++i)
353 radeon_fence_ref(fences[i]);
354
352 spin_unlock(&sa_manager->wq.lock); 355 spin_unlock(&sa_manager->wq.lock);
353 r = radeon_fence_wait_any(rdev, fences, false); 356 r = radeon_fence_wait_any(rdev, fences, false);
357 for (i = 0; i < RADEON_NUM_RINGS; ++i)
358 radeon_fence_unref(&fences[i]);
354 spin_lock(&sa_manager->wq.lock); 359 spin_lock(&sa_manager->wq.lock);
355 /* if we have nothing to wait for block */ 360 /* if we have nothing to wait for block */
356 if (r == -ENOENT) { 361 if (r == -ENOENT) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e34307459e50..e06ac546a90f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
758 0, PAGE_SIZE, 758 0, PAGE_SIZE,
759 PCI_DMA_BIDIRECTIONAL); 759 PCI_DMA_BIDIRECTIONAL);
760 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { 760 if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
761 while (--i) { 761 while (i--) {
762 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], 762 pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
763 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 763 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
764 gtt->ttm.dma_address[i] = 0; 764 gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 18dfe3ec9a62..22278bcfc60e 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -215,7 +215,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
215 struct drm_gem_cma_object *cma_obj; 215 struct drm_gem_cma_object *cma_obj;
216 216
217 if (size == 0) 217 if (size == 0)
218 return NULL; 218 return ERR_PTR(-EINVAL);
219 219
220 /* First, try to get a vc4_bo from the kernel BO cache. */ 220 /* First, try to get a vc4_bo from the kernel BO cache. */
221 if (from_cache) { 221 if (from_cache) {
@@ -237,7 +237,7 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size,
237 if (IS_ERR(cma_obj)) { 237 if (IS_ERR(cma_obj)) {
238 DRM_ERROR("Failed to allocate from CMA:\n"); 238 DRM_ERROR("Failed to allocate from CMA:\n");
239 vc4_bo_stats_dump(vc4); 239 vc4_bo_stats_dump(vc4);
240 return NULL; 240 return ERR_PTR(-ENOMEM);
241 } 241 }
242 } 242 }
243 243
@@ -259,8 +259,8 @@ int vc4_dumb_create(struct drm_file *file_priv,
259 args->size = args->pitch * args->height; 259 args->size = args->pitch * args->height;
260 260
261 bo = vc4_bo_create(dev, args->size, false); 261 bo = vc4_bo_create(dev, args->size, false);
262 if (!bo) 262 if (IS_ERR(bo))
263 return -ENOMEM; 263 return PTR_ERR(bo);
264 264
265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 265 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
266 drm_gem_object_unreference_unlocked(&bo->base.base); 266 drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -443,8 +443,8 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
443 * get zeroed, and that might leak data between users. 443 * get zeroed, and that might leak data between users.
444 */ 444 */
445 bo = vc4_bo_create(dev, args->size, false); 445 bo = vc4_bo_create(dev, args->size, false);
446 if (!bo) 446 if (IS_ERR(bo))
447 return -ENOMEM; 447 return PTR_ERR(bo);
448 448
449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle); 449 ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
450 drm_gem_object_unreference_unlocked(&bo->base.base); 450 drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -496,8 +496,8 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
496 } 496 }
497 497
498 bo = vc4_bo_create(dev, args->size, true); 498 bo = vc4_bo_create(dev, args->size, true);
499 if (!bo) 499 if (IS_ERR(bo))
500 return -ENOMEM; 500 return PTR_ERR(bo);
501 501
502 ret = copy_from_user(bo->base.vaddr, 502 ret = copy_from_user(bo->base.vaddr,
503 (void __user *)(uintptr_t)args->data, 503 (void __user *)(uintptr_t)args->data,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 3d1df6b1c4d3..f53fe6cd72be 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -91,8 +91,12 @@ struct vc4_dev {
91 struct vc4_bo *overflow_mem; 91 struct vc4_bo *overflow_mem;
92 struct work_struct overflow_mem_work; 92 struct work_struct overflow_mem_work;
93 93
94 int power_refcount;
95
96 /* Mutex controlling the power refcount. */
97 struct mutex power_lock;
98
94 struct { 99 struct {
95 uint32_t last_ct0ca, last_ct1ca;
96 struct timer_list timer; 100 struct timer_list timer;
97 struct work_struct reset_work; 101 struct work_struct reset_work;
98 } hangcheck; 102 } hangcheck;
@@ -142,6 +146,7 @@ struct vc4_seqno_cb {
142}; 146};
143 147
144struct vc4_v3d { 148struct vc4_v3d {
149 struct vc4_dev *vc4;
145 struct platform_device *pdev; 150 struct platform_device *pdev;
146 void __iomem *regs; 151 void __iomem *regs;
147}; 152};
@@ -202,6 +207,11 @@ struct vc4_exec_info {
202 /* Sequence number for this bin/render job. */ 207 /* Sequence number for this bin/render job. */
203 uint64_t seqno; 208 uint64_t seqno;
204 209
210 /* Last current addresses the hardware was processing when the
211 * hangcheck timer checked on us.
212 */
213 uint32_t last_ct0ca, last_ct1ca;
214
205 /* Kernel-space copy of the ioctl arguments */ 215 /* Kernel-space copy of the ioctl arguments */
206 struct drm_vc4_submit_cl *args; 216 struct drm_vc4_submit_cl *args;
207 217
@@ -443,7 +453,6 @@ void vc4_plane_async_set_fb(struct drm_plane *plane,
443extern struct platform_driver vc4_v3d_driver; 453extern struct platform_driver vc4_v3d_driver;
444int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused); 454int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
445int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused); 455int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
446int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
447 456
448/* vc4_validate.c */ 457/* vc4_validate.c */
449int 458int
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 48ce30a6f4b5..202aa1544acc 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
26#include <linux/device.h> 27#include <linux/device.h>
27#include <linux/io.h> 28#include <linux/io.h>
28 29
@@ -228,8 +229,16 @@ vc4_reset(struct drm_device *dev)
228 struct vc4_dev *vc4 = to_vc4_dev(dev); 229 struct vc4_dev *vc4 = to_vc4_dev(dev);
229 230
230 DRM_INFO("Resetting GPU.\n"); 231 DRM_INFO("Resetting GPU.\n");
231 vc4_v3d_set_power(vc4, false); 232
232 vc4_v3d_set_power(vc4, true); 233 mutex_lock(&vc4->power_lock);
234 if (vc4->power_refcount) {
235 /* Power the device off and back on the by dropping the
236 * reference on runtime PM.
237 */
238 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
239 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
240 }
241 mutex_unlock(&vc4->power_lock);
233 242
234 vc4_irq_reset(dev); 243 vc4_irq_reset(dev);
235 244
@@ -257,10 +266,17 @@ vc4_hangcheck_elapsed(unsigned long data)
257 struct drm_device *dev = (struct drm_device *)data; 266 struct drm_device *dev = (struct drm_device *)data;
258 struct vc4_dev *vc4 = to_vc4_dev(dev); 267 struct vc4_dev *vc4 = to_vc4_dev(dev);
259 uint32_t ct0ca, ct1ca; 268 uint32_t ct0ca, ct1ca;
269 unsigned long irqflags;
270 struct vc4_exec_info *exec;
271
272 spin_lock_irqsave(&vc4->job_lock, irqflags);
273 exec = vc4_first_job(vc4);
260 274
261 /* If idle, we can stop watching for hangs. */ 275 /* If idle, we can stop watching for hangs. */
262 if (list_empty(&vc4->job_list)) 276 if (!exec) {
277 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
263 return; 278 return;
279 }
264 280
265 ct0ca = V3D_READ(V3D_CTNCA(0)); 281 ct0ca = V3D_READ(V3D_CTNCA(0));
266 ct1ca = V3D_READ(V3D_CTNCA(1)); 282 ct1ca = V3D_READ(V3D_CTNCA(1));
@@ -268,14 +284,16 @@ vc4_hangcheck_elapsed(unsigned long data)
268 /* If we've made any progress in execution, rearm the timer 284 /* If we've made any progress in execution, rearm the timer
269 * and wait. 285 * and wait.
270 */ 286 */
271 if (ct0ca != vc4->hangcheck.last_ct0ca || 287 if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
272 ct1ca != vc4->hangcheck.last_ct1ca) { 288 exec->last_ct0ca = ct0ca;
273 vc4->hangcheck.last_ct0ca = ct0ca; 289 exec->last_ct1ca = ct1ca;
274 vc4->hangcheck.last_ct1ca = ct1ca; 290 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
275 vc4_queue_hangcheck(dev); 291 vc4_queue_hangcheck(dev);
276 return; 292 return;
277 } 293 }
278 294
295 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
296
279 /* We've gone too long with no progress, reset. This has to 297 /* We've gone too long with no progress, reset. This has to
280 * be done from a work struct, since resetting can sleep and 298 * be done from a work struct, since resetting can sleep and
281 * this timer hook isn't allowed to. 299 * this timer hook isn't allowed to.
@@ -340,12 +358,7 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns,
340 finish_wait(&vc4->job_wait_queue, &wait); 358 finish_wait(&vc4->job_wait_queue, &wait);
341 trace_vc4_wait_for_seqno_end(dev, seqno); 359 trace_vc4_wait_for_seqno_end(dev, seqno);
342 360
343 if (ret && ret != -ERESTARTSYS) { 361 return ret;
344 DRM_ERROR("timeout waiting for render thread idle\n");
345 return ret;
346 }
347
348 return 0;
349} 362}
350 363
351static void 364static void
@@ -578,9 +591,9 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
578 } 591 }
579 592
580 bo = vc4_bo_create(dev, exec_size, true); 593 bo = vc4_bo_create(dev, exec_size, true);
581 if (!bo) { 594 if (IS_ERR(bo)) {
582 DRM_ERROR("Couldn't allocate BO for binning\n"); 595 DRM_ERROR("Couldn't allocate BO for binning\n");
583 ret = -ENOMEM; 596 ret = PTR_ERR(bo);
584 goto fail; 597 goto fail;
585 } 598 }
586 exec->exec_bo = &bo->base; 599 exec->exec_bo = &bo->base;
@@ -617,6 +630,7 @@ fail:
617static void 630static void
618vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) 631vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
619{ 632{
633 struct vc4_dev *vc4 = to_vc4_dev(dev);
620 unsigned i; 634 unsigned i;
621 635
622 /* Need the struct lock for drm_gem_object_unreference(). */ 636 /* Need the struct lock for drm_gem_object_unreference(). */
@@ -635,6 +649,11 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
635 } 649 }
636 mutex_unlock(&dev->struct_mutex); 650 mutex_unlock(&dev->struct_mutex);
637 651
652 mutex_lock(&vc4->power_lock);
653 if (--vc4->power_refcount == 0)
654 pm_runtime_put(&vc4->v3d->pdev->dev);
655 mutex_unlock(&vc4->power_lock);
656
638 kfree(exec); 657 kfree(exec);
639} 658}
640 659
@@ -746,6 +765,9 @@ vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
746 struct drm_gem_object *gem_obj; 765 struct drm_gem_object *gem_obj;
747 struct vc4_bo *bo; 766 struct vc4_bo *bo;
748 767
768 if (args->pad != 0)
769 return -EINVAL;
770
749 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle); 771 gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
750 if (!gem_obj) { 772 if (!gem_obj) {
751 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle); 773 DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
@@ -772,7 +794,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
772 struct vc4_dev *vc4 = to_vc4_dev(dev); 794 struct vc4_dev *vc4 = to_vc4_dev(dev);
773 struct drm_vc4_submit_cl *args = data; 795 struct drm_vc4_submit_cl *args = data;
774 struct vc4_exec_info *exec; 796 struct vc4_exec_info *exec;
775 int ret; 797 int ret = 0;
776 798
777 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) { 799 if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
778 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags); 800 DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
@@ -785,6 +807,15 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
785 return -ENOMEM; 807 return -ENOMEM;
786 } 808 }
787 809
810 mutex_lock(&vc4->power_lock);
811 if (vc4->power_refcount++ == 0)
812 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
813 mutex_unlock(&vc4->power_lock);
814 if (ret < 0) {
815 kfree(exec);
816 return ret;
817 }
818
788 exec->args = args; 819 exec->args = args;
789 INIT_LIST_HEAD(&exec->unref_list); 820 INIT_LIST_HEAD(&exec->unref_list);
790 821
@@ -839,6 +870,8 @@ vc4_gem_init(struct drm_device *dev)
839 (unsigned long)dev); 870 (unsigned long)dev);
840 871
841 INIT_WORK(&vc4->job_done_work, vc4_job_done_work); 872 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
873
874 mutex_init(&vc4->power_lock);
842} 875}
843 876
844void 877void
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index b68060e758db..78a21357fb2d 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -57,7 +57,7 @@ vc4_overflow_mem_work(struct work_struct *work)
57 struct vc4_bo *bo; 57 struct vc4_bo *bo;
58 58
59 bo = vc4_bo_create(dev, 256 * 1024, true); 59 bo = vc4_bo_create(dev, 256 * 1024, true);
60 if (!bo) { 60 if (IS_ERR(bo)) {
61 DRM_ERROR("Couldn't allocate binner overflow mem\n"); 61 DRM_ERROR("Couldn't allocate binner overflow mem\n");
62 return; 62 return;
63 } 63 }
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 8a2a312e2c1b..0f12418725e5 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -316,20 +316,11 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
316 size += xtiles * ytiles * loop_body_size; 316 size += xtiles * ytiles * loop_body_size;
317 317
318 setup->rcl = &vc4_bo_create(dev, size, true)->base; 318 setup->rcl = &vc4_bo_create(dev, size, true)->base;
319 if (!setup->rcl) 319 if (IS_ERR(setup->rcl))
320 return -ENOMEM; 320 return PTR_ERR(setup->rcl);
321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head, 321 list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
322 &exec->unref_list); 322 &exec->unref_list);
323 323
324 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
325 rcl_u32(setup,
326 (setup->color_write ? (setup->color_write->paddr +
327 args->color_write.offset) :
328 0));
329 rcl_u16(setup, args->width);
330 rcl_u16(setup, args->height);
331 rcl_u16(setup, args->color_write.bits);
332
333 /* The tile buffer gets cleared when the previous tile is stored. If 324 /* The tile buffer gets cleared when the previous tile is stored. If
334 * the clear values changed between frames, then the tile buffer has 325 * the clear values changed between frames, then the tile buffer has
335 * stale clear values in it, so we have to do a store in None mode (no 326 * stale clear values in it, so we have to do a store in None mode (no
@@ -349,6 +340,15 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
349 rcl_u32(setup, 0); /* no address, since we're in None mode */ 340 rcl_u32(setup, 0); /* no address, since we're in None mode */
350 } 341 }
351 342
343 rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
344 rcl_u32(setup,
345 (setup->color_write ? (setup->color_write->paddr +
346 args->color_write.offset) :
347 0));
348 rcl_u16(setup, args->width);
349 rcl_u16(setup, args->height);
350 rcl_u16(setup, args->color_write.bits);
351
352 for (y = min_y_tile; y <= max_y_tile; y++) { 352 for (y = min_y_tile; y <= max_y_tile; y++) {
353 for (x = min_x_tile; x <= max_x_tile; x++) { 353 for (x = min_x_tile; x <= max_x_tile; x++) {
354 bool first = (x == min_x_tile && y == min_y_tile); 354 bool first = (x == min_x_tile && y == min_y_tile);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 314ff71db978..31de5d17bc85 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include "linux/component.h" 19#include "linux/component.h"
20#include "linux/pm_runtime.h"
20#include "vc4_drv.h" 21#include "vc4_drv.h"
21#include "vc4_regs.h" 22#include "vc4_regs.h"
22 23
@@ -144,18 +145,6 @@ int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused)
144} 145}
145#endif /* CONFIG_DEBUG_FS */ 146#endif /* CONFIG_DEBUG_FS */
146 147
147int
148vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
149{
150 /* XXX: This interface is needed for GPU reset, and the way to
151 * do it is to turn our power domain off and back on. We
152 * can't just reset from within the driver, because the reset
153 * bits are in the power domain's register area, and get set
154 * during the poweron process.
155 */
156 return 0;
157}
158
159static void vc4_v3d_init_hw(struct drm_device *dev) 148static void vc4_v3d_init_hw(struct drm_device *dev)
160{ 149{
161 struct vc4_dev *vc4 = to_vc4_dev(dev); 150 struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -167,6 +156,29 @@ static void vc4_v3d_init_hw(struct drm_device *dev)
167 V3D_WRITE(V3D_VPMBASE, 0); 156 V3D_WRITE(V3D_VPMBASE, 0);
168} 157}
169 158
159#ifdef CONFIG_PM
160static int vc4_v3d_runtime_suspend(struct device *dev)
161{
162 struct vc4_v3d *v3d = dev_get_drvdata(dev);
163 struct vc4_dev *vc4 = v3d->vc4;
164
165 vc4_irq_uninstall(vc4->dev);
166
167 return 0;
168}
169
170static int vc4_v3d_runtime_resume(struct device *dev)
171{
172 struct vc4_v3d *v3d = dev_get_drvdata(dev);
173 struct vc4_dev *vc4 = v3d->vc4;
174
175 vc4_v3d_init_hw(vc4->dev);
176 vc4_irq_postinstall(vc4->dev);
177
178 return 0;
179}
180#endif
181
170static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) 182static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
171{ 183{
172 struct platform_device *pdev = to_platform_device(dev); 184 struct platform_device *pdev = to_platform_device(dev);
@@ -179,6 +191,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
179 if (!v3d) 191 if (!v3d)
180 return -ENOMEM; 192 return -ENOMEM;
181 193
194 dev_set_drvdata(dev, v3d);
195
182 v3d->pdev = pdev; 196 v3d->pdev = pdev;
183 197
184 v3d->regs = vc4_ioremap_regs(pdev, 0); 198 v3d->regs = vc4_ioremap_regs(pdev, 0);
@@ -186,6 +200,7 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
186 return PTR_ERR(v3d->regs); 200 return PTR_ERR(v3d->regs);
187 201
188 vc4->v3d = v3d; 202 vc4->v3d = v3d;
203 v3d->vc4 = vc4;
189 204
190 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) { 205 if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
191 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n", 206 DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
@@ -207,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
207 return ret; 222 return ret;
208 } 223 }
209 224
225 pm_runtime_enable(dev);
226
210 return 0; 227 return 0;
211} 228}
212 229
@@ -216,6 +233,8 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
216 struct drm_device *drm = dev_get_drvdata(master); 233 struct drm_device *drm = dev_get_drvdata(master);
217 struct vc4_dev *vc4 = to_vc4_dev(drm); 234 struct vc4_dev *vc4 = to_vc4_dev(drm);
218 235
236 pm_runtime_disable(dev);
237
219 drm_irq_uninstall(drm); 238 drm_irq_uninstall(drm);
220 239
221 /* Disable the binner's overflow memory address, so the next 240 /* Disable the binner's overflow memory address, so the next
@@ -228,6 +247,10 @@ static void vc4_v3d_unbind(struct device *dev, struct device *master,
228 vc4->v3d = NULL; 247 vc4->v3d = NULL;
229} 248}
230 249
250static const struct dev_pm_ops vc4_v3d_pm_ops = {
251 SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL)
252};
253
231static const struct component_ops vc4_v3d_ops = { 254static const struct component_ops vc4_v3d_ops = {
232 .bind = vc4_v3d_bind, 255 .bind = vc4_v3d_bind,
233 .unbind = vc4_v3d_unbind, 256 .unbind = vc4_v3d_unbind,
@@ -255,5 +278,6 @@ struct platform_driver vc4_v3d_driver = {
255 .driver = { 278 .driver = {
256 .name = "vc4_v3d", 279 .name = "vc4_v3d",
257 .of_match_table = vc4_v3d_dt_match, 280 .of_match_table = vc4_v3d_dt_match,
281 .pm = &vc4_v3d_pm_ops,
258 }, 282 },
259}; 283};
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index e26d9f6face3..24c2c746e8f3 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -401,8 +401,8 @@ validate_tile_binning_config(VALIDATE_ARGS)
401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size, 401 tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size,
402 true); 402 true);
403 exec->tile_bo = &tile_bo->base; 403 exec->tile_bo = &tile_bo->base;
404 if (!exec->tile_bo) 404 if (IS_ERR(exec->tile_bo))
405 return -ENOMEM; 405 return PTR_ERR(exec->tile_bo);
406 list_add_tail(&tile_bo->unref_head, &exec->unref_list); 406 list_add_tail(&tile_bo->unref_head, &exec->unref_list);
407 407
408 /* tile alloc address. */ 408 /* tile alloc address. */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index db082bea8daf..c5a1a08b0449 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -563,6 +563,8 @@ static void vmw_sou_connector_destroy(struct drm_connector *connector)
563 563
564static const struct drm_connector_funcs vmw_sou_connector_funcs = { 564static const struct drm_connector_funcs vmw_sou_connector_funcs = {
565 .dpms = vmw_du_connector_dpms, 565 .dpms = vmw_du_connector_dpms,
566 .detect = vmw_du_connector_detect,
567 .fill_modes = vmw_du_connector_fill_modes,
566 .set_property = vmw_du_connector_set_property, 568 .set_property = vmw_du_connector_set_property,
567 .destroy = vmw_sou_connector_destroy, 569 .destroy = vmw_sou_connector_destroy,
568}; 570};
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index da462afcb225..dd2dbb9746ce 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -18,6 +18,7 @@
18#include <linux/host1x.h> 18#include <linux/host1x.h>
19#include <linux/of.h> 19#include <linux/of.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of_device.h>
21 22
22#include "bus.h" 23#include "bus.h"
23#include "dev.h" 24#include "dev.h"
@@ -394,6 +395,7 @@ static int host1x_device_add(struct host1x *host1x,
394 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask; 395 device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
395 device->dev.dma_mask = &device->dev.coherent_dma_mask; 396 device->dev.dma_mask = &device->dev.coherent_dma_mask;
396 dev_set_name(&device->dev, "%s", driver->driver.name); 397 dev_set_name(&device->dev, "%s", driver->driver.name);
398 of_dma_configure(&device->dev, host1x->dev->of_node);
397 device->dev.release = host1x_device_release; 399 device->dev.release = host1x_device_release;
398 device->dev.bus = &host1x_bus_type; 400 device->dev.bus = &host1x_bus_type;
399 device->dev.parent = host1x->dev; 401 device->dev.parent = host1x->dev;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 314bf3718cc7..ff348690df94 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -23,6 +23,7 @@
23#include <linux/of_device.h> 23#include <linux/of_device.h>
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/dma-mapping.h>
26 27
27#define CREATE_TRACE_POINTS 28#define CREATE_TRACE_POINTS
28#include <trace/events/host1x.h> 29#include <trace/events/host1x.h>
@@ -68,6 +69,7 @@ static const struct host1x_info host1x01_info = {
68 .nb_bases = 8, 69 .nb_bases = 8,
69 .init = host1x01_init, 70 .init = host1x01_init,
70 .sync_offset = 0x3000, 71 .sync_offset = 0x3000,
72 .dma_mask = DMA_BIT_MASK(32),
71}; 73};
72 74
73static const struct host1x_info host1x02_info = { 75static const struct host1x_info host1x02_info = {
@@ -77,6 +79,7 @@ static const struct host1x_info host1x02_info = {
77 .nb_bases = 12, 79 .nb_bases = 12,
78 .init = host1x02_init, 80 .init = host1x02_init,
79 .sync_offset = 0x3000, 81 .sync_offset = 0x3000,
82 .dma_mask = DMA_BIT_MASK(32),
80}; 83};
81 84
82static const struct host1x_info host1x04_info = { 85static const struct host1x_info host1x04_info = {
@@ -86,6 +89,7 @@ static const struct host1x_info host1x04_info = {
86 .nb_bases = 64, 89 .nb_bases = 64,
87 .init = host1x04_init, 90 .init = host1x04_init,
88 .sync_offset = 0x2100, 91 .sync_offset = 0x2100,
92 .dma_mask = DMA_BIT_MASK(34),
89}; 93};
90 94
91static const struct host1x_info host1x05_info = { 95static const struct host1x_info host1x05_info = {
@@ -95,6 +99,7 @@ static const struct host1x_info host1x05_info = {
95 .nb_bases = 64, 99 .nb_bases = 64,
96 .init = host1x05_init, 100 .init = host1x05_init,
97 .sync_offset = 0x2100, 101 .sync_offset = 0x2100,
102 .dma_mask = DMA_BIT_MASK(34),
98}; 103};
99 104
100static struct of_device_id host1x_of_match[] = { 105static struct of_device_id host1x_of_match[] = {
@@ -148,6 +153,8 @@ static int host1x_probe(struct platform_device *pdev)
148 if (IS_ERR(host->regs)) 153 if (IS_ERR(host->regs))
149 return PTR_ERR(host->regs); 154 return PTR_ERR(host->regs);
150 155
156 dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
157
151 if (host->info->init) { 158 if (host->info->init) {
152 err = host->info->init(host); 159 err = host->info->init(host);
153 if (err) 160 if (err)
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 0b6e8e9629c5..dace124994bb 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -96,6 +96,7 @@ struct host1x_info {
96 int nb_mlocks; /* host1x: number of mlocks */ 96 int nb_mlocks; /* host1x: number of mlocks */
97 int (*init)(struct host1x *); /* initialize per SoC ops */ 97 int (*init)(struct host1x *); /* initialize per SoC ops */
98 int sync_offset; 98 int sync_offset;
99 u64 dma_mask; /* mask of addressable memory */
99}; 100};
100 101
101struct host1x { 102struct host1x {
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index f2e13eb8339f..e00db3f510dd 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1050,6 +1050,17 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1050 for (i = 0; i < ARRAY_SIZE(client_reg); i++) { 1050 for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
1051 const struct ipu_platform_reg *reg = &client_reg[i]; 1051 const struct ipu_platform_reg *reg = &client_reg[i];
1052 struct platform_device *pdev; 1052 struct platform_device *pdev;
1053 struct device_node *of_node;
1054
1055 /* Associate subdevice with the corresponding port node */
1056 of_node = of_graph_get_port_by_id(dev->of_node, i);
1057 if (!of_node) {
1058 dev_info(dev,
1059 "no port@%d node in %s, not using %s%d\n",
1060 i, dev->of_node->full_name,
1061 (i / 2) ? "DI" : "CSI", i % 2);
1062 continue;
1063 }
1053 1064
1054 pdev = platform_device_alloc(reg->name, id++); 1065 pdev = platform_device_alloc(reg->name, id++);
1055 if (!pdev) { 1066 if (!pdev) {
@@ -1057,17 +1068,9 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
1057 goto err_register; 1068 goto err_register;
1058 } 1069 }
1059 1070
1071 pdev->dev.of_node = of_node;
1060 pdev->dev.parent = dev; 1072 pdev->dev.parent = dev;
1061 1073
1062 /* Associate subdevice with the corresponding port node */
1063 pdev->dev.of_node = of_graph_get_port_by_id(dev->of_node, i);
1064 if (!pdev->dev.of_node) {
1065 dev_err(dev, "missing port@%d node in %s\n", i,
1066 dev->of_node->full_name);
1067 ret = -ENODEV;
1068 goto err_register;
1069 }
1070
1071 ret = platform_device_add_data(pdev, &reg->pdata, 1074 ret = platform_device_add_data(pdev, &reg->pdata,
1072 sizeof(reg->pdata)); 1075 sizeof(reg->pdata));
1073 if (!ret) 1076 if (!ret)
@@ -1289,10 +1292,6 @@ static int ipu_probe(struct platform_device *pdev)
1289 ipu->irq_sync = irq_sync; 1292 ipu->irq_sync = irq_sync;
1290 ipu->irq_err = irq_err; 1293 ipu->irq_err = irq_err;
1291 1294
1292 ret = ipu_irq_init(ipu);
1293 if (ret)
1294 goto out_failed_irq;
1295
1296 ret = device_reset(&pdev->dev); 1295 ret = device_reset(&pdev->dev);
1297 if (ret) { 1296 if (ret) {
1298 dev_err(&pdev->dev, "failed to reset: %d\n", ret); 1297 dev_err(&pdev->dev, "failed to reset: %d\n", ret);
@@ -1302,6 +1301,10 @@ static int ipu_probe(struct platform_device *pdev)
1302 if (ret) 1301 if (ret)
1303 goto out_failed_reset; 1302 goto out_failed_reset;
1304 1303
1304 ret = ipu_irq_init(ipu);
1305 if (ret)
1306 goto out_failed_irq;
1307
1305 /* Set MCU_T to divide MCU access window into 2 */ 1308 /* Set MCU_T to divide MCU access window into 2 */
1306 ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18), 1309 ipu_cm_write(ipu, 0x00400000L | (IPU_MCU_T_DEFAULT << 18),
1307 IPU_DISP_GEN); 1310 IPU_DISP_GEN);
@@ -1324,9 +1327,9 @@ static int ipu_probe(struct platform_device *pdev)
1324failed_add_clients: 1327failed_add_clients:
1325 ipu_submodules_exit(ipu); 1328 ipu_submodules_exit(ipu);
1326failed_submodules_init: 1329failed_submodules_init:
1327out_failed_reset:
1328 ipu_irq_exit(ipu); 1330 ipu_irq_exit(ipu);
1329out_failed_irq: 1331out_failed_irq:
1332out_failed_reset:
1330 clk_disable_unprepare(ipu->clk); 1333 clk_disable_unprepare(ipu->clk);
1331 return ret; 1334 return ret;
1332} 1335}
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index f155b8380481..2b3105c8aed3 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
126 struct ads1015_data *data = i2c_get_clientdata(client); 126 struct ads1015_data *data = i2c_get_clientdata(client);
127 unsigned int pga = data->channel_data[channel].pga; 127 unsigned int pga = data->channel_data[channel].pga;
128 int fullscale = fullscale_table[pga]; 128 int fullscale = fullscale_table[pga];
129 const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0; 129 const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
130 130
131 return DIV_ROUND_CLOSEST(reg * fullscale, mask); 131 return DIV_ROUND_CLOSEST(reg * fullscale, mask);
132} 132}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 82de3deeb18a..685568b1236d 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
406 unsigned long *state) 406 unsigned long *state)
407{ 407{
408 struct gpio_fan_data *fan_data = cdev->devdata; 408 struct gpio_fan_data *fan_data = cdev->devdata;
409 int r;
410 409
411 if (!fan_data) 410 if (!fan_data)
412 return -EINVAL; 411 return -EINVAL;
413 412
414 r = get_fan_speed_index(fan_data); 413 *state = fan_data->speed_index;
415 if (r < 0)
416 return r;
417
418 *state = r;
419 return 0; 414 return 0;
420} 415}
421 416
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 3711df1d4526..4a45408dd820 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -586,8 +586,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
586 if (!dev) 586 if (!dev)
587 return -ENOMEM; 587 return -ENOMEM;
588 588
589 dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(struct bsc_regs *), 589 dev->bsc_regmap = devm_kzalloc(&pdev->dev, sizeof(*dev->bsc_regmap), GFP_KERNEL);
590 GFP_KERNEL);
591 if (!dev->bsc_regmap) 590 if (!dev->bsc_regmap)
592 return -ENOMEM; 591 return -ENOMEM;
593 592
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f62d69799a9c..27fa0cb09538 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1271 switch (dev->device) { 1271 switch (dev->device) {
1272 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS: 1272 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
1273 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS: 1273 case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
1274 case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
1275 case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
1274 case PCI_DEVICE_ID_INTEL_DNV_SMBUS: 1276 case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
1275 priv->features |= FEATURE_I2C_BLOCK_READ; 1277 priv->features |= FEATURE_I2C_BLOCK_READ;
1276 priv->features |= FEATURE_IRQ; 1278 priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 08d26ba61ed3..13c45296ce5b 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1450,7 +1450,8 @@ omap_i2c_probe(struct platform_device *pdev)
1450 1450
1451err_unuse_clocks: 1451err_unuse_clocks:
1452 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); 1452 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
1453 pm_runtime_put(omap->dev); 1453 pm_runtime_dont_use_autosuspend(omap->dev);
1454 pm_runtime_put_sync(omap->dev);
1454 pm_runtime_disable(&pdev->dev); 1455 pm_runtime_disable(&pdev->dev);
1455err_free_mem: 1456err_free_mem:
1456 1457
@@ -1468,6 +1469,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
1468 return ret; 1469 return ret;
1469 1470
1470 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); 1471 omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
1472 pm_runtime_dont_use_autosuspend(&pdev->dev);
1471 pm_runtime_put_sync(&pdev->dev); 1473 pm_runtime_put_sync(&pdev->dev);
1472 pm_runtime_disable(&pdev->dev); 1474 pm_runtime_disable(&pdev->dev);
1473 return 0; 1475 return 0;
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index f3e5ff8522f0..213ba55e17c3 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -467,7 +467,7 @@ static int uniphier_fi2c_clk_init(struct device *dev,
467 bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED; 467 bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
468 468
469 if (!bus_speed) { 469 if (!bus_speed) {
470 dev_err(dev, "clock-freqyency should not be zero\n"); 470 dev_err(dev, "clock-frequency should not be zero\n");
471 return -EINVAL; 471 return -EINVAL;
472 } 472 }
473 473
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 1f4f3f53819c..89eaa8a7e1e0 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -328,7 +328,7 @@ static int uniphier_i2c_clk_init(struct device *dev,
328 bus_speed = UNIPHIER_I2C_DEFAULT_SPEED; 328 bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
329 329
330 if (!bus_speed) { 330 if (!bus_speed) {
331 dev_err(dev, "clock-freqyency should not be zero\n"); 331 dev_err(dev, "clock-frequency should not be zero\n");
332 return -EINVAL; 332 return -EINVAL;
333 } 333 }
334 334
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 00da80e02154..94b80a51ab68 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -358,6 +358,7 @@ int ib_register_device(struct ib_device *device,
358 ret = device->query_device(device, &device->attrs, &uhw); 358 ret = device->query_device(device, &device->attrs, &uhw);
359 if (ret) { 359 if (ret) {
360 printk(KERN_WARNING "Couldn't query the device attributes\n"); 360 printk(KERN_WARNING "Couldn't query the device attributes\n");
361 ib_cache_cleanup_one(device);
361 goto out; 362 goto out;
362 } 363 }
363 364
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index f334090bb612..1e37f3515d98 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1071,7 +1071,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
1071 } 1071 }
1072 } 1072 }
1073 1073
1074 if (rec->hop_limit > 1 || use_roce) { 1074 if (rec->hop_limit > 0 || use_roce) {
1075 ah_attr->ah_flags = IB_AH_GRH; 1075 ah_attr->ah_flags = IB_AH_GRH;
1076 ah_attr->grh.dgid = rec->dgid; 1076 ah_attr->grh.dgid = rec->dgid;
1077 1077
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 3de93517efe4..14606afbfaa8 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -336,7 +336,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p,
336 union ib_gid gid; 336 union ib_gid gid;
337 struct ib_gid_attr gid_attr = {}; 337 struct ib_gid_attr gid_attr = {};
338 ssize_t ret; 338 ssize_t ret;
339 va_list args;
340 339
341 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid, 340 ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid,
342 &gid_attr); 341 &gid_attr);
@@ -348,7 +347,6 @@ static ssize_t _show_port_gid_attr(struct ib_port *p,
348err: 347err:
349 if (gid_attr.ndev) 348 if (gid_attr.ndev)
350 dev_put(gid_attr.ndev); 349 dev_put(gid_attr.ndev);
351 va_end(args);
352 return ret; 350 return ret;
353} 351}
354 352
@@ -722,12 +720,11 @@ static struct attribute_group *get_counter_table(struct ib_device *dev,
722 720
723 if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO, 721 if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO,
724 &cpi, 40, sizeof(cpi)) >= 0) { 722 &cpi, 40, sizeof(cpi)) >= 0) {
725 723 if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH)
726 if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH)
727 /* We have extended counters */ 724 /* We have extended counters */
728 return &pma_group_ext; 725 return &pma_group_ext;
729 726
730 if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF) 727 if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
731 /* But not the IETF ones */ 728 /* But not the IETF ones */
732 return &pma_group_noietf; 729 return &pma_group_noietf;
733 } 730 }
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6ffc9c4e93af..6c6fbff19752 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1970,7 +1970,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1970 resp_size); 1970 resp_size);
1971 INIT_UDATA(&uhw, buf + sizeof(cmd), 1971 INIT_UDATA(&uhw, buf + sizeof(cmd),
1972 (unsigned long)cmd.response + resp_size, 1972 (unsigned long)cmd.response + resp_size,
1973 in_len - sizeof(cmd), out_len - resp_size); 1973 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1974 out_len - resp_size);
1974 1975
1975 memset(&cmd_ex, 0, sizeof(cmd_ex)); 1976 memset(&cmd_ex, 0, sizeof(cmd_ex));
1976 cmd_ex.user_handle = cmd.user_handle; 1977 cmd_ex.user_handle = cmd.user_handle;
@@ -3413,7 +3414,8 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3413 3414
3414 INIT_UDATA(&udata, buf + sizeof cmd, 3415 INIT_UDATA(&udata, buf + sizeof cmd,
3415 (unsigned long) cmd.response + sizeof resp, 3416 (unsigned long) cmd.response + sizeof resp,
3416 in_len - sizeof cmd, out_len - sizeof resp); 3417 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3418 out_len - sizeof resp);
3417 3419
3418 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata); 3420 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3419 if (ret) 3421 if (ret)
@@ -3439,7 +3441,8 @@ ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3439 3441
3440 INIT_UDATA(&udata, buf + sizeof cmd, 3442 INIT_UDATA(&udata, buf + sizeof cmd,
3441 (unsigned long) cmd.response + sizeof resp, 3443 (unsigned long) cmd.response + sizeof resp,
3442 in_len - sizeof cmd, out_len - sizeof resp); 3444 in_len - sizeof cmd - sizeof(struct ib_uverbs_cmd_hdr),
3445 out_len - sizeof resp);
3443 3446
3444 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata); 3447 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3445 if (ret) 3448 if (ret)
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 26833bfa639b..d68f506c1922 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -817,17 +817,48 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
817 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 817 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
818} 818}
819 819
820static void edit_counter(struct mlx4_counter *cnt, 820static void edit_counter(struct mlx4_counter *cnt, void *counters,
821 struct ib_pma_portcounters *pma_cnt) 821 __be16 attr_id)
822{ 822{
823 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data, 823 switch (attr_id) {
824 (be64_to_cpu(cnt->tx_bytes) >> 2)); 824 case IB_PMA_PORT_COUNTERS:
825 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data, 825 {
826 (be64_to_cpu(cnt->rx_bytes) >> 2)); 826 struct ib_pma_portcounters *pma_cnt =
827 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets, 827 (struct ib_pma_portcounters *)counters;
828 be64_to_cpu(cnt->tx_frames)); 828
829 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets, 829 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
830 be64_to_cpu(cnt->rx_frames)); 830 (be64_to_cpu(cnt->tx_bytes) >> 2));
831 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
832 (be64_to_cpu(cnt->rx_bytes) >> 2));
833 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
834 be64_to_cpu(cnt->tx_frames));
835 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
836 be64_to_cpu(cnt->rx_frames));
837 break;
838 }
839 case IB_PMA_PORT_COUNTERS_EXT:
840 {
841 struct ib_pma_portcounters_ext *pma_cnt_ext =
842 (struct ib_pma_portcounters_ext *)counters;
843
844 pma_cnt_ext->port_xmit_data =
845 cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
846 pma_cnt_ext->port_rcv_data =
847 cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
848 pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
849 pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
850 break;
851 }
852 }
853}
854
855static int iboe_process_mad_port_info(void *out_mad)
856{
857 struct ib_class_port_info cpi = {};
858
859 cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
860 memcpy(out_mad, &cpi, sizeof(cpi));
861 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
831} 862}
832 863
833static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 864static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -842,6 +873,9 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
842 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) 873 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
843 return -EINVAL; 874 return -EINVAL;
844 875
876 if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
877 return iboe_process_mad_port_info((void *)(out_mad->data + 40));
878
845 memset(&counter_stats, 0, sizeof(counter_stats)); 879 memset(&counter_stats, 0, sizeof(counter_stats));
846 mutex_lock(&dev->counters_table[port_num - 1].mutex); 880 mutex_lock(&dev->counters_table[port_num - 1].mutex);
847 list_for_each_entry(tmp_counter, 881 list_for_each_entry(tmp_counter,
@@ -863,7 +897,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
863 switch (counter_stats.counter_mode & 0xf) { 897 switch (counter_stats.counter_mode & 0xf) {
864 case 0: 898 case 0:
865 edit_counter(&counter_stats, 899 edit_counter(&counter_stats,
866 (void *)(out_mad->data + 40)); 900 (void *)(out_mad->data + 40),
901 in_mad->mad_hdr.attr_id);
867 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 902 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
868 break; 903 break;
869 default: 904 default:
@@ -894,8 +929,10 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
894 */ 929 */
895 if (link == IB_LINK_LAYER_INFINIBAND) { 930 if (link == IB_LINK_LAYER_INFINIBAND) {
896 if (mlx4_is_slave(dev->dev) && 931 if (mlx4_is_slave(dev->dev) &&
897 in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT && 932 (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
898 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS) 933 (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
934 in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
935 in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
899 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc, 936 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
900 in_grh, in_mad, out_mad); 937 in_grh, in_mad, out_mad);
901 938
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bc5536f00b6c..fd97534762b8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1681,9 +1681,12 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1681 } 1681 }
1682 1682
1683 if (qp->ibqp.uobject) 1683 if (qp->ibqp.uobject)
1684 context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index); 1684 context->usr_page = cpu_to_be32(
1685 mlx4_to_hw_uar_index(dev->dev,
1686 to_mucontext(ibqp->uobject->context)->uar.index));
1685 else 1687 else
1686 context->usr_page = cpu_to_be32(dev->priv_uar.index); 1688 context->usr_page = cpu_to_be32(
1689 mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
1687 1690
1688 if (attr_mask & IB_QP_DEST_QPN) 1691 if (attr_mask & IB_QP_DEST_QPN)
1689 context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 1692 context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 9116bc3988a6..34cb8e87c7b8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -270,8 +270,10 @@ static int sq_overhead(enum ib_qp_type qp_type)
270 /* fall through */ 270 /* fall through */
271 case IB_QPT_RC: 271 case IB_QPT_RC:
272 size += sizeof(struct mlx5_wqe_ctrl_seg) + 272 size += sizeof(struct mlx5_wqe_ctrl_seg) +
273 sizeof(struct mlx5_wqe_atomic_seg) + 273 max(sizeof(struct mlx5_wqe_atomic_seg) +
274 sizeof(struct mlx5_wqe_raddr_seg); 274 sizeof(struct mlx5_wqe_raddr_seg),
275 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
276 sizeof(struct mlx5_mkey_seg));
275 break; 277 break;
276 278
277 case IB_QPT_XRC_TGT: 279 case IB_QPT_XRC_TGT:
@@ -279,9 +281,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
279 281
280 case IB_QPT_UC: 282 case IB_QPT_UC:
281 size += sizeof(struct mlx5_wqe_ctrl_seg) + 283 size += sizeof(struct mlx5_wqe_ctrl_seg) +
282 sizeof(struct mlx5_wqe_raddr_seg) + 284 max(sizeof(struct mlx5_wqe_raddr_seg),
283 sizeof(struct mlx5_wqe_umr_ctrl_seg) + 285 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
284 sizeof(struct mlx5_mkey_seg); 286 sizeof(struct mlx5_mkey_seg));
285 break; 287 break;
286 288
287 case IB_QPT_UD: 289 case IB_QPT_UD:
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 4659256cd95e..3b2ddd64a371 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -75,7 +75,8 @@ static void mlx5_ib_srq_event(struct mlx5_core_srq *srq, enum mlx5_event type)
75 75
76static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq, 76static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
77 struct mlx5_create_srq_mbox_in **in, 77 struct mlx5_create_srq_mbox_in **in,
78 struct ib_udata *udata, int buf_size, int *inlen) 78 struct ib_udata *udata, int buf_size, int *inlen,
79 int is_xrc)
79{ 80{
80 struct mlx5_ib_dev *dev = to_mdev(pd->device); 81 struct mlx5_ib_dev *dev = to_mdev(pd->device);
81 struct mlx5_ib_create_srq ucmd = {}; 82 struct mlx5_ib_create_srq ucmd = {};
@@ -87,13 +88,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
87 int ncont; 88 int ncont;
88 u32 offset; 89 u32 offset;
89 u32 uidx = MLX5_IB_DEFAULT_UIDX; 90 u32 uidx = MLX5_IB_DEFAULT_UIDX;
90 int drv_data = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
91 91
92 if (drv_data < 0) 92 ucmdlen = min(udata->inlen, sizeof(ucmd));
93 return -EINVAL;
94
95 ucmdlen = (drv_data < sizeof(ucmd)) ?
96 drv_data : sizeof(ucmd);
97 93
98 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) { 94 if (ib_copy_from_udata(&ucmd, udata, ucmdlen)) {
99 mlx5_ib_dbg(dev, "failed copy udata\n"); 95 mlx5_ib_dbg(dev, "failed copy udata\n");
@@ -103,15 +99,17 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
103 if (ucmd.reserved0 || ucmd.reserved1) 99 if (ucmd.reserved0 || ucmd.reserved1)
104 return -EINVAL; 100 return -EINVAL;
105 101
106 if (drv_data > sizeof(ucmd) && 102 if (udata->inlen > sizeof(ucmd) &&
107 !ib_is_udata_cleared(udata, sizeof(ucmd), 103 !ib_is_udata_cleared(udata, sizeof(ucmd),
108 drv_data - sizeof(ucmd))) 104 udata->inlen - sizeof(ucmd)))
109 return -EINVAL; 105 return -EINVAL;
110 106
111 err = get_srq_user_index(to_mucontext(pd->uobject->context), 107 if (is_xrc) {
112 &ucmd, udata->inlen, &uidx); 108 err = get_srq_user_index(to_mucontext(pd->uobject->context),
113 if (err) 109 &ucmd, udata->inlen, &uidx);
114 return err; 110 if (err)
111 return err;
112 }
115 113
116 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE); 114 srq->wq_sig = !!(ucmd.flags & MLX5_SRQ_FLAG_SIGNATURE);
117 115
@@ -151,7 +149,8 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
151 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 149 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
152 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26); 150 (*in)->ctx.pgoff_cqn = cpu_to_be32(offset << 26);
153 151
154 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) { 152 if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
153 is_xrc){
155 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, 154 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
156 xrc_srq_context_entry); 155 xrc_srq_context_entry);
157 MLX5_SET(xrc_srqc, xsrqc, user_index, uidx); 156 MLX5_SET(xrc_srqc, xsrqc, user_index, uidx);
@@ -170,7 +169,7 @@ err_umem:
170 169
171static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq, 170static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
172 struct mlx5_create_srq_mbox_in **in, int buf_size, 171 struct mlx5_create_srq_mbox_in **in, int buf_size,
173 int *inlen) 172 int *inlen, int is_xrc)
174{ 173{
175 int err; 174 int err;
176 int i; 175 int i;
@@ -224,7 +223,8 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
224 223
225 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT; 224 (*in)->ctx.log_pg_sz = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
226 225
227 if (MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) { 226 if ((MLX5_CAP_GEN(dev->mdev, cqe_version) == MLX5_CQE_VERSION_V1) &&
227 is_xrc){
228 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in, 228 xsrqc = MLX5_ADDR_OF(create_xrc_srq_in, *in,
229 xrc_srq_context_entry); 229 xrc_srq_context_entry);
230 /* 0xffffff means we ask to work with cqe version 0 */ 230 /* 0xffffff means we ask to work with cqe version 0 */
@@ -302,10 +302,14 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
302 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs, 302 desc_size, init_attr->attr.max_wr, srq->msrq.max, srq->msrq.max_gs,
303 srq->msrq.max_avail_gather); 303 srq->msrq.max_avail_gather);
304 304
305 is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
306
305 if (pd->uobject) 307 if (pd->uobject)
306 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen); 308 err = create_srq_user(pd, srq, &in, udata, buf_size, &inlen,
309 is_xrc);
307 else 310 else
308 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen); 311 err = create_srq_kernel(dev, srq, &in, buf_size, &inlen,
312 is_xrc);
309 313
310 if (err) { 314 if (err) {
311 mlx5_ib_warn(dev, "create srq %s failed, err %d\n", 315 mlx5_ib_warn(dev, "create srq %s failed, err %d\n",
@@ -313,7 +317,6 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
313 goto err_srq; 317 goto err_srq;
314 } 318 }
315 319
316 is_xrc = (init_attr->srq_type == IB_SRQT_XRC);
317 in->ctx.state_log_sz = ilog2(srq->msrq.max); 320 in->ctx.state_log_sz = ilog2(srq->msrq.max);
318 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24; 321 flgs = ((srq->msrq.wqe_shift - 4) | (is_xrc << 5) | (srq->wq_sig << 7)) << 24;
319 xrcdn = 0; 322 xrcdn = 0;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 040bb8b5cb15..12503f15fbd6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -323,9 +323,6 @@ struct ocrdma_cq {
323 */ 323 */
324 u32 max_hw_cqe; 324 u32 max_hw_cqe;
325 bool phase_change; 325 bool phase_change;
326 bool deferred_arm, deferred_sol;
327 bool first_arm;
328
329 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization 326 spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
330 * to cq polling 327 * to cq polling
331 */ 328 */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 573849354cb9..f38743018cb4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -228,6 +228,11 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
228 228
229 ocrdma_alloc_pd_pool(dev); 229 ocrdma_alloc_pd_pool(dev);
230 230
231 if (!ocrdma_alloc_stats_resources(dev)) {
232 pr_err("%s: stats resource allocation failed\n", __func__);
233 goto alloc_err;
234 }
235
231 spin_lock_init(&dev->av_tbl.lock); 236 spin_lock_init(&dev->av_tbl.lock);
232 spin_lock_init(&dev->flush_q_lock); 237 spin_lock_init(&dev->flush_q_lock);
233 return 0; 238 return 0;
@@ -238,6 +243,7 @@ alloc_err:
238 243
239static void ocrdma_free_resources(struct ocrdma_dev *dev) 244static void ocrdma_free_resources(struct ocrdma_dev *dev)
240{ 245{
246 ocrdma_release_stats_resources(dev);
241 kfree(dev->stag_arr); 247 kfree(dev->stag_arr);
242 kfree(dev->qp_tbl); 248 kfree(dev->qp_tbl);
243 kfree(dev->cq_tbl); 249 kfree(dev->cq_tbl);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 86c303a620c1..255f774080a4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -64,10 +64,11 @@ static int ocrdma_add_stat(char *start, char *pcur,
64 return cpy_len; 64 return cpy_len;
65} 65}
66 66
67static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev) 67bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
68{ 68{
69 struct stats_mem *mem = &dev->stats_mem; 69 struct stats_mem *mem = &dev->stats_mem;
70 70
71 mutex_init(&dev->stats_lock);
71 /* Alloc mbox command mem*/ 72 /* Alloc mbox command mem*/
72 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req), 73 mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
73 sizeof(struct ocrdma_rdma_stats_resp)); 74 sizeof(struct ocrdma_rdma_stats_resp));
@@ -91,13 +92,14 @@ static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
91 return true; 92 return true;
92} 93}
93 94
94static void ocrdma_release_stats_mem(struct ocrdma_dev *dev) 95void ocrdma_release_stats_resources(struct ocrdma_dev *dev)
95{ 96{
96 struct stats_mem *mem = &dev->stats_mem; 97 struct stats_mem *mem = &dev->stats_mem;
97 98
98 if (mem->va) 99 if (mem->va)
99 dma_free_coherent(&dev->nic_info.pdev->dev, mem->size, 100 dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
100 mem->va, mem->pa); 101 mem->va, mem->pa);
102 mem->va = NULL;
101 kfree(mem->debugfs_mem); 103 kfree(mem->debugfs_mem);
102} 104}
103 105
@@ -838,15 +840,9 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
838 &dev->reset_stats, &ocrdma_dbg_ops)) 840 &dev->reset_stats, &ocrdma_dbg_ops))
839 goto err; 841 goto err;
840 842
841 /* Now create dma_mem for stats mbx command */
842 if (!ocrdma_alloc_stats_mem(dev))
843 goto err;
844
845 mutex_init(&dev->stats_lock);
846 843
847 return; 844 return;
848err: 845err:
849 ocrdma_release_stats_mem(dev);
850 debugfs_remove_recursive(dev->dir); 846 debugfs_remove_recursive(dev->dir);
851 dev->dir = NULL; 847 dev->dir = NULL;
852} 848}
@@ -855,9 +851,7 @@ void ocrdma_rem_port_stats(struct ocrdma_dev *dev)
855{ 851{
856 if (!dev->dir) 852 if (!dev->dir)
857 return; 853 return;
858 debugfs_remove(dev->dir); 854 debugfs_remove_recursive(dev->dir);
859 mutex_destroy(&dev->stats_lock);
860 ocrdma_release_stats_mem(dev);
861} 855}
862 856
863void ocrdma_init_debugfs(void) 857void ocrdma_init_debugfs(void)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index c9e58d04c7b8..bba1fec4f11f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -65,6 +65,8 @@ enum OCRDMA_STATS_TYPE {
65 65
66void ocrdma_rem_debugfs(void); 66void ocrdma_rem_debugfs(void);
67void ocrdma_init_debugfs(void); 67void ocrdma_init_debugfs(void);
68bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
69void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
68void ocrdma_rem_port_stats(struct ocrdma_dev *dev); 70void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
69void ocrdma_add_port_stats(struct ocrdma_dev *dev); 71void ocrdma_add_port_stats(struct ocrdma_dev *dev);
70int ocrdma_pma_counters(struct ocrdma_dev *dev, 72int ocrdma_pma_counters(struct ocrdma_dev *dev,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index d4c687b548d8..12420e4ecf3d 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -125,8 +125,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
125 IB_DEVICE_SYS_IMAGE_GUID | 125 IB_DEVICE_SYS_IMAGE_GUID |
126 IB_DEVICE_LOCAL_DMA_LKEY | 126 IB_DEVICE_LOCAL_DMA_LKEY |
127 IB_DEVICE_MEM_MGT_EXTENSIONS; 127 IB_DEVICE_MEM_MGT_EXTENSIONS;
128 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge); 128 attr->max_sge = dev->attr.max_send_sge;
129 attr->max_sge_rd = 0; 129 attr->max_sge_rd = attr->max_sge;
130 attr->max_cq = dev->attr.max_cq; 130 attr->max_cq = dev->attr.max_cq;
131 attr->max_cqe = dev->attr.max_cqe; 131 attr->max_cqe = dev->attr.max_cqe;
132 attr->max_mr = dev->attr.max_mr; 132 attr->max_mr = dev->attr.max_mr;
@@ -1094,7 +1094,6 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
1094 spin_lock_init(&cq->comp_handler_lock); 1094 spin_lock_init(&cq->comp_handler_lock);
1095 INIT_LIST_HEAD(&cq->sq_head); 1095 INIT_LIST_HEAD(&cq->sq_head);
1096 INIT_LIST_HEAD(&cq->rq_head); 1096 INIT_LIST_HEAD(&cq->rq_head);
1097 cq->first_arm = true;
1098 1097
1099 if (ib_ctx) { 1098 if (ib_ctx) {
1100 uctx = get_ocrdma_ucontext(ib_ctx); 1099 uctx = get_ocrdma_ucontext(ib_ctx);
@@ -2726,8 +2725,7 @@ static int ocrdma_update_ud_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe)
2726 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT; 2725 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2727 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) & 2726 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2728 OCRDMA_CQE_SRCQP_MASK; 2727 OCRDMA_CQE_SRCQP_MASK;
2729 ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) & 2728 ibwc->pkey_index = 0;
2730 OCRDMA_CQE_PKEY_MASK;
2731 ibwc->wc_flags = IB_WC_GRH; 2729 ibwc->wc_flags = IB_WC_GRH;
2732 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >> 2730 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2733 OCRDMA_CQE_UD_XFER_LEN_SHIFT); 2731 OCRDMA_CQE_UD_XFER_LEN_SHIFT);
@@ -2911,12 +2909,9 @@ expand_cqe:
2911 } 2909 }
2912stop_cqe: 2910stop_cqe:
2913 cq->getp = cur_getp; 2911 cq->getp = cur_getp;
2914 if (cq->deferred_arm || polled_hw_cqes) { 2912
2915 ocrdma_ring_cq_db(dev, cq->id, cq->deferred_arm, 2913 if (polled_hw_cqes)
2916 cq->deferred_sol, polled_hw_cqes); 2914 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2917 cq->deferred_arm = false;
2918 cq->deferred_sol = false;
2919 }
2920 2915
2921 return i; 2916 return i;
2922} 2917}
@@ -3000,13 +2995,7 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
3000 if (cq_flags & IB_CQ_SOLICITED) 2995 if (cq_flags & IB_CQ_SOLICITED)
3001 sol_needed = true; 2996 sol_needed = true;
3002 2997
3003 if (cq->first_arm) { 2998 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3004 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
3005 cq->first_arm = false;
3006 }
3007
3008 cq->deferred_arm = true;
3009 cq->deferred_sol = sol_needed;
3010 spin_unlock_irqrestore(&cq->cq_lock, flags); 2999 spin_unlock_irqrestore(&cq->cq_lock, flags);
3011 3000
3012 return 0; 3001 return 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5ea0c14070d1..fa9c42ff1fb0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -245,8 +245,6 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
245 skb_reset_mac_header(skb); 245 skb_reset_mac_header(skb);
246 skb_pull(skb, IPOIB_ENCAP_LEN); 246 skb_pull(skb, IPOIB_ENCAP_LEN);
247 247
248 skb->truesize = SKB_TRUESIZE(skb->len);
249
250 ++dev->stats.rx_packets; 248 ++dev->stats.rx_packets;
251 dev->stats.rx_bytes += skb->len; 249 dev->stats.rx_bytes += skb->len;
252 250
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 050dfa175d16..25889311b1e9 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -456,7 +456,10 @@ out_locked:
456 return status; 456 return status;
457} 457}
458 458
459static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast) 459/*
460 * Caller must hold 'priv->lock'
461 */
462static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
460{ 463{
461 struct ipoib_dev_priv *priv = netdev_priv(dev); 464 struct ipoib_dev_priv *priv = netdev_priv(dev);
462 struct ib_sa_multicast *multicast; 465 struct ib_sa_multicast *multicast;
@@ -466,6 +469,10 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
466 ib_sa_comp_mask comp_mask; 469 ib_sa_comp_mask comp_mask;
467 int ret = 0; 470 int ret = 0;
468 471
472 if (!priv->broadcast ||
473 !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
474 return -EINVAL;
475
469 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw); 476 ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
470 477
471 rec.mgid = mcast->mcmember.mgid; 478 rec.mgid = mcast->mcmember.mgid;
@@ -525,20 +532,23 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
525 rec.join_state = 4; 532 rec.join_state = 4;
526#endif 533#endif
527 } 534 }
535 spin_unlock_irq(&priv->lock);
528 536
529 multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port, 537 multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
530 &rec, comp_mask, GFP_KERNEL, 538 &rec, comp_mask, GFP_KERNEL,
531 ipoib_mcast_join_complete, mcast); 539 ipoib_mcast_join_complete, mcast);
540 spin_lock_irq(&priv->lock);
532 if (IS_ERR(multicast)) { 541 if (IS_ERR(multicast)) {
533 ret = PTR_ERR(multicast); 542 ret = PTR_ERR(multicast);
534 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret); 543 ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
535 spin_lock_irq(&priv->lock);
536 /* Requeue this join task with a backoff delay */ 544 /* Requeue this join task with a backoff delay */
537 __ipoib_mcast_schedule_join_thread(priv, mcast, 1); 545 __ipoib_mcast_schedule_join_thread(priv, mcast, 1);
538 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 546 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
539 spin_unlock_irq(&priv->lock); 547 spin_unlock_irq(&priv->lock);
540 complete(&mcast->done); 548 complete(&mcast->done);
549 spin_lock_irq(&priv->lock);
541 } 550 }
551 return 0;
542} 552}
543 553
544void ipoib_mcast_join_task(struct work_struct *work) 554void ipoib_mcast_join_task(struct work_struct *work)
@@ -620,9 +630,10 @@ void ipoib_mcast_join_task(struct work_struct *work)
620 /* Found the next unjoined group */ 630 /* Found the next unjoined group */
621 init_completion(&mcast->done); 631 init_completion(&mcast->done);
622 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 632 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
623 spin_unlock_irq(&priv->lock); 633 if (ipoib_mcast_join(dev, mcast)) {
624 ipoib_mcast_join(dev, mcast); 634 spin_unlock_irq(&priv->lock);
625 spin_lock_irq(&priv->lock); 635 return;
636 }
626 } else if (!delay_until || 637 } else if (!delay_until ||
627 time_before(mcast->delay_until, delay_until)) 638 time_before(mcast->delay_until, delay_until))
628 delay_until = mcast->delay_until; 639 delay_until = mcast->delay_until;
@@ -641,10 +652,9 @@ out:
641 if (mcast) { 652 if (mcast) {
642 init_completion(&mcast->done); 653 init_completion(&mcast->done);
643 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags); 654 set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
655 ipoib_mcast_join(dev, mcast);
644 } 656 }
645 spin_unlock_irq(&priv->lock); 657 spin_unlock_irq(&priv->lock);
646 if (mcast)
647 ipoib_mcast_join(dev, mcast);
648} 658}
649 659
650int ipoib_mcast_start_thread(struct net_device *dev) 660int ipoib_mcast_start_thread(struct net_device *dev)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6727954ab74b..e8a84d12b7ff 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1207,7 +1207,6 @@ static void xpad_led_disconnect(struct usb_xpad *xpad)
1207#else 1207#else
1208static int xpad_led_probe(struct usb_xpad *xpad) { return 0; } 1208static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
1209static void xpad_led_disconnect(struct usb_xpad *xpad) { } 1209static void xpad_led_disconnect(struct usb_xpad *xpad) { }
1210static void xpad_identify_controller(struct usb_xpad *xpad) { }
1211#endif 1210#endif
1212 1211
1213static int xpad_start_input(struct usb_xpad *xpad) 1212static int xpad_start_input(struct usb_xpad *xpad)
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 4d446d5085aa..c01a1d648f9f 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -235,7 +235,7 @@ struct adp5589_kpad {
235 unsigned short gpimapsize; 235 unsigned short gpimapsize;
236 unsigned extend_cfg; 236 unsigned extend_cfg;
237 bool is_adp5585; 237 bool is_adp5585;
238 bool adp5585_support_row5; 238 bool support_row5;
239#ifdef CONFIG_GPIOLIB 239#ifdef CONFIG_GPIOLIB
240 unsigned char gpiomap[ADP5589_MAXGPIO]; 240 unsigned char gpiomap[ADP5589_MAXGPIO];
241 bool export_gpio; 241 bool export_gpio;
@@ -485,7 +485,7 @@ static int adp5589_build_gpiomap(struct adp5589_kpad *kpad,
485 if (kpad->extend_cfg & C4_EXTEND_CFG) 485 if (kpad->extend_cfg & C4_EXTEND_CFG)
486 pin_used[kpad->var->c4_extend_cfg] = true; 486 pin_used[kpad->var->c4_extend_cfg] = true;
487 487
488 if (!kpad->adp5585_support_row5) 488 if (!kpad->support_row5)
489 pin_used[5] = true; 489 pin_used[5] = true;
490 490
491 for (i = 0; i < kpad->var->maxgpio; i++) 491 for (i = 0; i < kpad->var->maxgpio; i++)
@@ -884,12 +884,13 @@ static int adp5589_probe(struct i2c_client *client,
884 884
885 switch (id->driver_data) { 885 switch (id->driver_data) {
886 case ADP5585_02: 886 case ADP5585_02:
887 kpad->adp5585_support_row5 = true; 887 kpad->support_row5 = true;
888 case ADP5585_01: 888 case ADP5585_01:
889 kpad->is_adp5585 = true; 889 kpad->is_adp5585 = true;
890 kpad->var = &const_adp5585; 890 kpad->var = &const_adp5585;
891 break; 891 break;
892 case ADP5589: 892 case ADP5589:
893 kpad->support_row5 = true;
893 kpad->var = &const_adp5589; 894 kpad->var = &const_adp5589;
894 break; 895 break;
895 } 896 }
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 378db10001df..4401be225d64 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -304,8 +304,10 @@ static int cap11xx_init_leds(struct device *dev,
304 led->cdev.brightness = LED_OFF; 304 led->cdev.brightness = LED_OFF;
305 305
306 error = of_property_read_u32(child, "reg", &reg); 306 error = of_property_read_u32(child, "reg", &reg);
307 if (error != 0 || reg >= num_leds) 307 if (error != 0 || reg >= num_leds) {
308 of_node_put(child);
308 return -EINVAL; 309 return -EINVAL;
310 }
309 311
310 led->reg = reg; 312 led->reg = reg;
311 led->priv = priv; 313 led->priv = priv;
@@ -313,8 +315,10 @@ static int cap11xx_init_leds(struct device *dev,
313 INIT_WORK(&led->work, cap11xx_led_work); 315 INIT_WORK(&led->work, cap11xx_led_work);
314 316
315 error = devm_led_classdev_register(dev, &led->cdev); 317 error = devm_led_classdev_register(dev, &led->cdev);
316 if (error) 318 if (error) {
319 of_node_put(child);
317 return error; 320 return error;
321 }
318 322
319 priv->num_leds++; 323 priv->num_leds++;
320 led++; 324 led++;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index d6d16fa78281..1f2337abcf2f 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -733,7 +733,7 @@ config INPUT_XEN_KBDDEV_FRONTEND
733 module will be called xen-kbdfront. 733 module will be called xen-kbdfront.
734 734
735config INPUT_SIRFSOC_ONKEY 735config INPUT_SIRFSOC_ONKEY
736 bool "CSR SiRFSoC power on/off/suspend key support" 736 tristate "CSR SiRFSoC power on/off/suspend key support"
737 depends on ARCH_SIRF && OF 737 depends on ARCH_SIRF && OF
738 default y 738 default y
739 help 739 help
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 9d5b89befe6f..ed7237f19539 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -101,7 +101,7 @@ static void sirfsoc_pwrc_close(struct input_dev *input)
101static const struct of_device_id sirfsoc_pwrc_of_match[] = { 101static const struct of_device_id sirfsoc_pwrc_of_match[] = {
102 { .compatible = "sirf,prima2-pwrc" }, 102 { .compatible = "sirf,prima2-pwrc" },
103 {}, 103 {},
104} 104};
105MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match); 105MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
106 106
107static int sirfsoc_pwrc_probe(struct platform_device *pdev) 107static int sirfsoc_pwrc_probe(struct platform_device *pdev)
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index e272f06258ce..a3f0f5a47490 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -458,8 +458,6 @@ int vmmouse_init(struct psmouse *psmouse)
458 priv->abs_dev = abs_dev; 458 priv->abs_dev = abs_dev;
459 psmouse->private = priv; 459 psmouse->private = priv;
460 460
461 input_set_capability(rel_dev, EV_REL, REL_WHEEL);
462
463 /* Set up and register absolute device */ 461 /* Set up and register absolute device */
464 snprintf(priv->phys, sizeof(priv->phys), "%s/input1", 462 snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
465 psmouse->ps2dev.serio->phys); 463 psmouse->ps2dev.serio->phys);
@@ -475,10 +473,6 @@ int vmmouse_init(struct psmouse *psmouse)
475 abs_dev->id.version = psmouse->model; 473 abs_dev->id.version = psmouse->model;
476 abs_dev->dev.parent = &psmouse->ps2dev.serio->dev; 474 abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
477 475
478 error = input_register_device(priv->abs_dev);
479 if (error)
480 goto init_fail;
481
482 /* Set absolute device capabilities */ 476 /* Set absolute device capabilities */
483 input_set_capability(abs_dev, EV_KEY, BTN_LEFT); 477 input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
484 input_set_capability(abs_dev, EV_KEY, BTN_RIGHT); 478 input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
@@ -488,6 +482,13 @@ int vmmouse_init(struct psmouse *psmouse)
488 input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0); 482 input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
489 input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0); 483 input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
490 484
485 error = input_register_device(priv->abs_dev);
486 if (error)
487 goto init_fail;
488
489 /* Add wheel capability to the relative device */
490 input_set_capability(rel_dev, EV_REL, REL_WHEEL);
491
491 psmouse->protocol_handler = vmmouse_process_byte; 492 psmouse->protocol_handler = vmmouse_process_byte;
492 psmouse->disconnect = vmmouse_disconnect; 493 psmouse->disconnect = vmmouse_disconnect;
493 psmouse->reconnect = vmmouse_reconnect; 494 psmouse->reconnect = vmmouse_reconnect;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 8f828975ab10..1ca7f551e2da 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -134,7 +134,7 @@ static void serio_find_driver(struct serio *serio)
134 int error; 134 int error;
135 135
136 error = device_attach(&serio->dev); 136 error = device_attach(&serio->dev);
137 if (error < 0) 137 if (error < 0 && error != -EPROBE_DEFER)
138 dev_warn(&serio->dev, 138 dev_warn(&serio->dev,
139 "device_attach() failed for %s (%s), error: %d\n", 139 "device_attach() failed for %s (%s), error: %d\n",
140 serio->phys, serio->name, error); 140 serio->phys, serio->name, error);
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
index 5d4903a402cc..69828d015d45 100644
--- a/drivers/input/touchscreen/colibri-vf50-ts.c
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -21,6 +21,7 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/of.h>
24#include <linux/pinctrl/consumer.h> 25#include <linux/pinctrl/consumer.h>
25#include <linux/platform_device.h> 26#include <linux/platform_device.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 0b0f8c17f3f7..23fbe382da8b 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -822,16 +822,22 @@ static void edt_ft5x06_ts_get_defaults(struct device *dev,
822 int error; 822 int error;
823 823
824 error = device_property_read_u32(dev, "threshold", &val); 824 error = device_property_read_u32(dev, "threshold", &val);
825 if (!error) 825 if (!error) {
826 reg_addr->reg_threshold = val; 826 edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val);
827 tsdata->threshold = val;
828 }
827 829
828 error = device_property_read_u32(dev, "gain", &val); 830 error = device_property_read_u32(dev, "gain", &val);
829 if (!error) 831 if (!error) {
830 reg_addr->reg_gain = val; 832 edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val);
833 tsdata->gain = val;
834 }
831 835
832 error = device_property_read_u32(dev, "offset", &val); 836 error = device_property_read_u32(dev, "offset", &val);
833 if (!error) 837 if (!error) {
834 reg_addr->reg_offset = val; 838 edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
839 tsdata->offset = val;
840 }
835} 841}
836 842
837static void 843static void
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index e5e223938eec..374c129219ef 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -114,6 +114,7 @@ struct kmem_cache *amd_iommu_irq_cache;
114 114
115static void update_domain(struct protection_domain *domain); 115static void update_domain(struct protection_domain *domain);
116static int protection_domain_init(struct protection_domain *domain); 116static int protection_domain_init(struct protection_domain *domain);
117static void detach_device(struct device *dev);
117 118
118/* 119/*
119 * For dynamic growth the aperture size is split into ranges of 128MB of 120 * For dynamic growth the aperture size is split into ranges of 128MB of
@@ -384,6 +385,9 @@ static void iommu_uninit_device(struct device *dev)
384 if (!dev_data) 385 if (!dev_data)
385 return; 386 return;
386 387
388 if (dev_data->domain)
389 detach_device(dev);
390
387 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev, 391 iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
388 dev); 392 dev);
389 393
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 013bdfff2d4d..bf4959f4225b 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -228,6 +228,10 @@ static int amd_iommu_enable_interrupts(void);
228static int __init iommu_go_to_state(enum iommu_init_state state); 228static int __init iommu_go_to_state(enum iommu_init_state state);
229static void init_device_table_dma(void); 229static void init_device_table_dma(void);
230 230
231static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
232 u8 bank, u8 cntr, u8 fxn,
233 u64 *value, bool is_write);
234
231static inline void update_last_devid(u16 devid) 235static inline void update_last_devid(u16 devid)
232{ 236{
233 if (devid > amd_iommu_last_bdf) 237 if (devid > amd_iommu_last_bdf)
@@ -1016,6 +1020,34 @@ static void amd_iommu_erratum_746_workaround(struct amd_iommu *iommu)
1016} 1020}
1017 1021
1018/* 1022/*
1023 * Family15h Model 30h-3fh (IOMMU Mishandles ATS Write Permission)
1024 * Workaround:
1025 * BIOS should enable ATS write permission check by setting
1026 * L2_DEBUG_3[AtsIgnoreIWDis](D0F2xF4_x47[0]) = 1b
1027 */
1028static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
1029{
1030 u32 value;
1031
1032 if ((boot_cpu_data.x86 != 0x15) ||
1033 (boot_cpu_data.x86_model < 0x30) ||
1034 (boot_cpu_data.x86_model > 0x3f))
1035 return;
1036
1037 /* Test L2_DEBUG_3[AtsIgnoreIWDis] == 1 */
1038 value = iommu_read_l2(iommu, 0x47);
1039
1040 if (value & BIT(0))
1041 return;
1042
1043 /* Set L2_DEBUG_3[AtsIgnoreIWDis] = 1 */
1044 iommu_write_l2(iommu, 0x47, value | BIT(0));
1045
1046 pr_info("AMD-Vi: Applying ATS write check workaround for IOMMU at %s\n",
1047 dev_name(&iommu->dev->dev));
1048}
1049
1050/*
1019 * This function clues the initialization function for one IOMMU 1051 * This function clues the initialization function for one IOMMU
1020 * together and also allocates the command buffer and programs the 1052 * together and also allocates the command buffer and programs the
1021 * hardware. It does NOT enable the IOMMU. This is done afterwards. 1053 * hardware. It does NOT enable the IOMMU. This is done afterwards.
@@ -1142,8 +1174,8 @@ static void init_iommu_perf_ctr(struct amd_iommu *iommu)
1142 amd_iommu_pc_present = true; 1174 amd_iommu_pc_present = true;
1143 1175
1144 /* Check if the performance counters can be written to */ 1176 /* Check if the performance counters can be written to */
1145 if ((0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val, true)) || 1177 if ((0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val, true)) ||
1146 (0 != amd_iommu_pc_get_set_reg_val(0, 0, 0, 0, &val2, false)) || 1178 (0 != iommu_pc_get_set_reg_val(iommu, 0, 0, 0, &val2, false)) ||
1147 (val != val2)) { 1179 (val != val2)) {
1148 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n"); 1180 pr_err("AMD-Vi: Unable to write to IOMMU perf counter.\n");
1149 amd_iommu_pc_present = false; 1181 amd_iommu_pc_present = false;
@@ -1284,6 +1316,7 @@ static int iommu_init_pci(struct amd_iommu *iommu)
1284 } 1316 }
1285 1317
1286 amd_iommu_erratum_746_workaround(iommu); 1318 amd_iommu_erratum_746_workaround(iommu);
1319 amd_iommu_ats_write_check_workaround(iommu);
1287 1320
1288 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu, 1321 iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
1289 amd_iommu_groups, "ivhd%d", 1322 amd_iommu_groups, "ivhd%d",
@@ -2283,22 +2316,15 @@ u8 amd_iommu_pc_get_max_counters(u16 devid)
2283} 2316}
2284EXPORT_SYMBOL(amd_iommu_pc_get_max_counters); 2317EXPORT_SYMBOL(amd_iommu_pc_get_max_counters);
2285 2318
2286int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, 2319static int iommu_pc_get_set_reg_val(struct amd_iommu *iommu,
2320 u8 bank, u8 cntr, u8 fxn,
2287 u64 *value, bool is_write) 2321 u64 *value, bool is_write)
2288{ 2322{
2289 struct amd_iommu *iommu;
2290 u32 offset; 2323 u32 offset;
2291 u32 max_offset_lim; 2324 u32 max_offset_lim;
2292 2325
2293 /* Make sure the IOMMU PC resource is available */
2294 if (!amd_iommu_pc_present)
2295 return -ENODEV;
2296
2297 /* Locate the iommu associated with the device ID */
2298 iommu = amd_iommu_rlookup_table[devid];
2299
2300 /* Check for valid iommu and pc register indexing */ 2326 /* Check for valid iommu and pc register indexing */
2301 if (WARN_ON((iommu == NULL) || (fxn > 0x28) || (fxn & 7))) 2327 if (WARN_ON((fxn > 0x28) || (fxn & 7)))
2302 return -ENODEV; 2328 return -ENODEV;
2303 2329
2304 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn); 2330 offset = (u32)(((0x40|bank) << 12) | (cntr << 8) | fxn);
@@ -2322,3 +2348,16 @@ int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2322 return 0; 2348 return 0;
2323} 2349}
2324EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val); 2350EXPORT_SYMBOL(amd_iommu_pc_get_set_reg_val);
2351
2352int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
2353 u64 *value, bool is_write)
2354{
2355 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
2356
2357 /* Make sure the IOMMU PC resource is available */
2358 if (!amd_iommu_pc_present || iommu == NULL)
2359 return -ENODEV;
2360
2361 return iommu_pc_get_set_reg_val(iommu, bank, cntr, fxn,
2362 value, is_write);
2363}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 62a400c5ba06..8ffd7568fc91 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -329,7 +329,8 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
329 /* Only care about add/remove events for physical functions */ 329 /* Only care about add/remove events for physical functions */
330 if (pdev->is_virtfn) 330 if (pdev->is_virtfn)
331 return NOTIFY_DONE; 331 return NOTIFY_DONE;
332 if (action != BUS_NOTIFY_ADD_DEVICE && action != BUS_NOTIFY_DEL_DEVICE) 332 if (action != BUS_NOTIFY_ADD_DEVICE &&
333 action != BUS_NOTIFY_REMOVED_DEVICE)
333 return NOTIFY_DONE; 334 return NOTIFY_DONE;
334 335
335 info = dmar_alloc_pci_notify_info(pdev, action); 336 info = dmar_alloc_pci_notify_info(pdev, action);
@@ -339,7 +340,7 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
339 down_write(&dmar_global_lock); 340 down_write(&dmar_global_lock);
340 if (action == BUS_NOTIFY_ADD_DEVICE) 341 if (action == BUS_NOTIFY_ADD_DEVICE)
341 dmar_pci_bus_add_dev(info); 342 dmar_pci_bus_add_dev(info);
342 else if (action == BUS_NOTIFY_DEL_DEVICE) 343 else if (action == BUS_NOTIFY_REMOVED_DEVICE)
343 dmar_pci_bus_del_dev(info); 344 dmar_pci_bus_del_dev(info);
344 up_write(&dmar_global_lock); 345 up_write(&dmar_global_lock);
345 346
@@ -1353,7 +1354,7 @@ void dmar_disable_qi(struct intel_iommu *iommu)
1353 1354
1354 raw_spin_lock_irqsave(&iommu->register_lock, flags); 1355 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1355 1356
1356 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 1357 sts = readl(iommu->reg + DMAR_GSTS_REG);
1357 if (!(sts & DMA_GSTS_QIES)) 1358 if (!(sts & DMA_GSTS_QIES))
1358 goto end; 1359 goto end;
1359 1360
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 986a53e3eb96..a2e1b7f14df2 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4367,7 +4367,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4367 rmrru->devices_cnt); 4367 rmrru->devices_cnt);
4368 if(ret < 0) 4368 if(ret < 0)
4369 return ret; 4369 return ret;
4370 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 4370 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4371 dmar_remove_dev_scope(info, rmrr->segment, 4371 dmar_remove_dev_scope(info, rmrr->segment,
4372 rmrru->devices, rmrru->devices_cnt); 4372 rmrru->devices, rmrru->devices_cnt);
4373 } 4373 }
@@ -4387,7 +4387,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
4387 break; 4387 break;
4388 else if(ret < 0) 4388 else if(ret < 0)
4389 return ret; 4389 return ret;
4390 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 4390 } else if (info->event == BUS_NOTIFY_REMOVED_DEVICE) {
4391 if (dmar_remove_dev_scope(info, atsr->segment, 4391 if (dmar_remove_dev_scope(info, atsr->segment,
4392 atsru->devices, atsru->devices_cnt)) 4392 atsru->devices, atsru->devices_cnt))
4393 break; 4393 break;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 50464833d0b8..d9939fa9b588 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -249,12 +249,30 @@ static void intel_flush_pasid_dev(struct intel_svm *svm, struct intel_svm_dev *s
249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 249static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
250{ 250{
251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 251 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
252 struct intel_svm_dev *sdev;
252 253
254 /* This might end up being called from exit_mmap(), *before* the page
255 * tables are cleared. And __mmu_notifier_release() will delete us from
256 * the list of notifiers so that our invalidate_range() callback doesn't
257 * get called when the page tables are cleared. So we need to protect
258 * against hardware accessing those page tables.
259 *
260 * We do it by clearing the entry in the PASID table and then flushing
261 * the IOTLB and the PASID table caches. This might upset hardware;
262 * perhaps we'll want to point the PASID to a dummy PGD (like the zero
263 * page) so that we end up taking a fault that the hardware really
264 * *has* to handle gracefully without affecting other processes.
265 */
253 svm->iommu->pasid_table[svm->pasid].val = 0; 266 svm->iommu->pasid_table[svm->pasid].val = 0;
267 wmb();
268
269 rcu_read_lock();
270 list_for_each_entry_rcu(sdev, &svm->devs, list) {
271 intel_flush_pasid_dev(svm, sdev, svm->pasid);
272 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
273 }
274 rcu_read_unlock();
254 275
255 /* There's no need to do any flush because we can't get here if there
256 * are any devices left anyway. */
257 WARN_ON(!list_empty(&svm->devs));
258} 276}
259 277
260static const struct mmu_notifier_ops intel_mmuops = { 278static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
379 goto out; 397 goto out;
380 } 398 }
381 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1; 399 iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
382 mm = NULL;
383 } else 400 } else
384 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11); 401 iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
385 wmb(); 402 wmb();
@@ -442,11 +459,11 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
442 kfree_rcu(sdev, rcu); 459 kfree_rcu(sdev, rcu);
443 460
444 if (list_empty(&svm->devs)) { 461 if (list_empty(&svm->devs)) {
445 mmu_notifier_unregister(&svm->notifier, svm->mm);
446 462
447 idr_remove(&svm->iommu->pasid_idr, svm->pasid); 463 idr_remove(&svm->iommu->pasid_idr, svm->pasid);
448 if (svm->mm) 464 if (svm->mm)
449 mmput(svm->mm); 465 mmu_notifier_unregister(&svm->notifier, svm->mm);
466
450 /* We mandate that no page faults may be outstanding 467 /* We mandate that no page faults may be outstanding
451 * for the PASID when intel_svm_unbind_mm() is called. 468 * for the PASID when intel_svm_unbind_mm() is called.
452 * If that is not obeyed, subtle errors will happen. 469 * If that is not obeyed, subtle errors will happen.
@@ -507,6 +524,10 @@ static irqreturn_t prq_event_thread(int irq, void *d)
507 struct intel_svm *svm = NULL; 524 struct intel_svm *svm = NULL;
508 int head, tail, handled = 0; 525 int head, tail, handled = 0;
509 526
527 /* Clear PPR bit before reading head/tail registers, to
528 * ensure that we get a new interrupt if needed. */
529 writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
530
510 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; 531 tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
511 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; 532 head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
512 while (head != tail) { 533 while (head != tail) {
@@ -551,6 +572,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
551 * any faults on kernel addresses. */ 572 * any faults on kernel addresses. */
552 if (!svm->mm) 573 if (!svm->mm)
553 goto bad_req; 574 goto bad_req;
575 /* If the mm is already defunct, don't handle faults. */
576 if (!atomic_inc_not_zero(&svm->mm->mm_users))
577 goto bad_req;
554 down_read(&svm->mm->mmap_sem); 578 down_read(&svm->mm->mmap_sem);
555 vma = find_extend_vma(svm->mm, address); 579 vma = find_extend_vma(svm->mm, address);
556 if (!vma || address < vma->vm_start) 580 if (!vma || address < vma->vm_start)
@@ -567,6 +591,7 @@ static irqreturn_t prq_event_thread(int irq, void *d)
567 result = QI_RESP_SUCCESS; 591 result = QI_RESP_SUCCESS;
568 invalid: 592 invalid:
569 up_read(&svm->mm->mmap_sem); 593 up_read(&svm->mm->mmap_sem);
594 mmput(svm->mm);
570 bad_req: 595 bad_req:
571 /* Accounting for major/minor faults? */ 596 /* Accounting for major/minor faults? */
572 rcu_read_lock(); 597 rcu_read_lock();
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index c12ba4516df2..ac596928f6b4 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -629,7 +629,7 @@ static void iommu_disable_irq_remapping(struct intel_iommu *iommu)
629 629
630 raw_spin_lock_irqsave(&iommu->register_lock, flags); 630 raw_spin_lock_irqsave(&iommu->register_lock, flags);
631 631
632 sts = dmar_readq(iommu->reg + DMAR_GSTS_REG); 632 sts = readl(iommu->reg + DMAR_GSTS_REG);
633 if (!(sts & DMA_GSTS_IRES)) 633 if (!(sts & DMA_GSTS_IRES))
634 goto end; 634 goto end;
635 635
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 3447549fcc93..43dfd15c1dd2 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -66,7 +66,10 @@ struct its_node {
66 unsigned long phys_base; 66 unsigned long phys_base;
67 struct its_cmd_block *cmd_base; 67 struct its_cmd_block *cmd_base;
68 struct its_cmd_block *cmd_write; 68 struct its_cmd_block *cmd_write;
69 void *tables[GITS_BASER_NR_REGS]; 69 struct {
70 void *base;
71 u32 order;
72 } tables[GITS_BASER_NR_REGS];
70 struct its_collection *collections; 73 struct its_collection *collections;
71 struct list_head its_device_list; 74 struct list_head its_device_list;
72 u64 flags; 75 u64 flags;
@@ -75,6 +78,9 @@ struct its_node {
75 78
76#define ITS_ITT_ALIGN SZ_256 79#define ITS_ITT_ALIGN SZ_256
77 80
81/* Convert page order to size in bytes */
82#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
83
78struct event_lpi_map { 84struct event_lpi_map {
79 unsigned long *lpi_map; 85 unsigned long *lpi_map;
80 u16 *col_map; 86 u16 *col_map;
@@ -597,11 +603,6 @@ static void its_unmask_irq(struct irq_data *d)
597 lpi_set_config(d, true); 603 lpi_set_config(d, true);
598} 604}
599 605
600static void its_eoi_irq(struct irq_data *d)
601{
602 gic_write_eoir(d->hwirq);
603}
604
605static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 606static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
606 bool force) 607 bool force)
607{ 608{
@@ -638,7 +639,7 @@ static struct irq_chip its_irq_chip = {
638 .name = "ITS", 639 .name = "ITS",
639 .irq_mask = its_mask_irq, 640 .irq_mask = its_mask_irq,
640 .irq_unmask = its_unmask_irq, 641 .irq_unmask = its_unmask_irq,
641 .irq_eoi = its_eoi_irq, 642 .irq_eoi = irq_chip_eoi_parent,
642 .irq_set_affinity = its_set_affinity, 643 .irq_set_affinity = its_set_affinity,
643 .irq_compose_msi_msg = its_irq_compose_msi_msg, 644 .irq_compose_msi_msg = its_irq_compose_msi_msg,
644}; 645};
@@ -807,9 +808,10 @@ static void its_free_tables(struct its_node *its)
807 int i; 808 int i;
808 809
809 for (i = 0; i < GITS_BASER_NR_REGS; i++) { 810 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
810 if (its->tables[i]) { 811 if (its->tables[i].base) {
811 free_page((unsigned long)its->tables[i]); 812 free_pages((unsigned long)its->tables[i].base,
812 its->tables[i] = NULL; 813 its->tables[i].order);
814 its->tables[i].base = NULL;
813 } 815 }
814 } 816 }
815} 817}
@@ -842,7 +844,6 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
842 u64 type = GITS_BASER_TYPE(val); 844 u64 type = GITS_BASER_TYPE(val);
843 u64 entry_size = GITS_BASER_ENTRY_SIZE(val); 845 u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
844 int order = get_order(psz); 846 int order = get_order(psz);
845 int alloc_size;
846 int alloc_pages; 847 int alloc_pages;
847 u64 tmp; 848 u64 tmp;
848 void *base; 849 void *base;
@@ -874,9 +875,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
874 } 875 }
875 } 876 }
876 877
877 alloc_size = (1 << order) * PAGE_SIZE;
878retry_alloc_baser: 878retry_alloc_baser:
879 alloc_pages = (alloc_size / psz); 879 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
880 if (alloc_pages > GITS_BASER_PAGES_MAX) { 880 if (alloc_pages > GITS_BASER_PAGES_MAX) {
881 alloc_pages = GITS_BASER_PAGES_MAX; 881 alloc_pages = GITS_BASER_PAGES_MAX;
882 order = get_order(GITS_BASER_PAGES_MAX * psz); 882 order = get_order(GITS_BASER_PAGES_MAX * psz);
@@ -890,7 +890,8 @@ retry_alloc_baser:
890 goto out_free; 890 goto out_free;
891 } 891 }
892 892
893 its->tables[i] = base; 893 its->tables[i].base = base;
894 its->tables[i].order = order;
894 895
895retry_baser: 896retry_baser:
896 val = (virt_to_phys(base) | 897 val = (virt_to_phys(base) |
@@ -928,7 +929,7 @@ retry_baser:
928 shr = tmp & GITS_BASER_SHAREABILITY_MASK; 929 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
929 if (!shr) { 930 if (!shr) {
930 cache = GITS_BASER_nC; 931 cache = GITS_BASER_nC;
931 __flush_dcache_area(base, alloc_size); 932 __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
932 } 933 }
933 goto retry_baser; 934 goto retry_baser;
934 } 935 }
@@ -940,7 +941,7 @@ retry_baser:
940 * something is horribly wrong... 941 * something is horribly wrong...
941 */ 942 */
942 free_pages((unsigned long)base, order); 943 free_pages((unsigned long)base, order);
943 its->tables[i] = NULL; 944 its->tables[i].base = NULL;
944 945
945 switch (psz) { 946 switch (psz) {
946 case SZ_16K: 947 case SZ_16K:
@@ -961,7 +962,7 @@ retry_baser:
961 } 962 }
962 963
963 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", 964 pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
964 (int)(alloc_size / entry_size), 965 (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
965 its_base_type_string[type], 966 its_base_type_string[type],
966 (unsigned long)virt_to_phys(base), 967 (unsigned long)virt_to_phys(base),
967 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); 968 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 911758c056c1..8f9ebf714e2b 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -384,9 +384,6 @@ static struct irq_chip gic_chip = {
384 .irq_unmask = gic_unmask_irq, 384 .irq_unmask = gic_unmask_irq,
385 .irq_eoi = gic_eoi_irq, 385 .irq_eoi = gic_eoi_irq,
386 .irq_set_type = gic_set_type, 386 .irq_set_type = gic_set_type,
387#ifdef CONFIG_SMP
388 .irq_set_affinity = gic_set_affinity,
389#endif
390 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 387 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
391 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 388 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
392 .flags = IRQCHIP_SET_TYPE_MASKED | 389 .flags = IRQCHIP_SET_TYPE_MASKED |
@@ -400,9 +397,6 @@ static struct irq_chip gic_eoimode1_chip = {
400 .irq_unmask = gic_unmask_irq, 397 .irq_unmask = gic_unmask_irq,
401 .irq_eoi = gic_eoimode1_eoi_irq, 398 .irq_eoi = gic_eoimode1_eoi_irq,
402 .irq_set_type = gic_set_type, 399 .irq_set_type = gic_set_type,
403#ifdef CONFIG_SMP
404 .irq_set_affinity = gic_set_affinity,
405#endif
406 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 400 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
407 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 401 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
408 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 402 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
@@ -443,7 +437,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic)
443 u32 bypass = 0; 437 u32 bypass = 0;
444 u32 mode = 0; 438 u32 mode = 0;
445 439
446 if (static_key_true(&supports_deactivate)) 440 if (gic == &gic_data[0] && static_key_true(&supports_deactivate))
447 mode = GIC_CPU_CTRL_EOImodeNS; 441 mode = GIC_CPU_CTRL_EOImodeNS;
448 442
449 /* 443 /*
@@ -1039,6 +1033,11 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
1039 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr); 1033 gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
1040 } 1034 }
1041 1035
1036#ifdef CONFIG_SMP
1037 if (gic_nr == 0)
1038 gic->chip.irq_set_affinity = gic_set_affinity;
1039#endif
1040
1042#ifdef CONFIG_GIC_NON_BANKED 1041#ifdef CONFIG_GIC_NON_BANKED
1043 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 1042 if (percpu_offset) { /* Frankein-GIC without banked registers... */
1044 unsigned int cpu; 1043 unsigned int cpu;
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 0704362f4c82..376b28074e0d 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -22,7 +22,6 @@
22#include <linux/of_irq.h> 22#include <linux/of_irq.h>
23 23
24#include <asm/exception.h> 24#include <asm/exception.h>
25#include <asm/mach/irq.h>
26 25
27#define SUN4I_IRQ_VECTOR_REG 0x00 26#define SUN4I_IRQ_VECTOR_REG 0x00
28#define SUN4I_IRQ_PROTECTION_REG 0x08 27#define SUN4I_IRQ_PROTECTION_REG 0x08
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 2a506fe0c8a4..d1f8ab915b15 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -373,13 +373,7 @@ static void gigaset_freecshw(struct cardstate *cs)
373 373
374static void gigaset_device_release(struct device *dev) 374static void gigaset_device_release(struct device *dev)
375{ 375{
376 struct cardstate *cs = dev_get_drvdata(dev); 376 kfree(container_of(dev, struct ser_cardstate, dev.dev));
377
378 if (!cs)
379 return;
380 dev_set_drvdata(dev, NULL);
381 kfree(cs->hw.ser);
382 cs->hw.ser = NULL;
383} 377}
384 378
385/* 379/*
@@ -408,7 +402,6 @@ static int gigaset_initcshw(struct cardstate *cs)
408 cs->hw.ser = NULL; 402 cs->hw.ser = NULL;
409 return rc; 403 return rc;
410 } 404 }
411 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
412 405
413 tasklet_init(&cs->write_tasklet, 406 tasklet_init(&cs->write_tasklet,
414 gigaset_modem_fill, (unsigned long) cs); 407 gigaset_modem_fill, (unsigned long) cs);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 8e2944784e00..afde4edef9ae 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -392,7 +392,7 @@ read_dma(struct tiger_ch *bc, u32 idx, int cnt)
392 } 392 }
393 stat = bchannel_get_rxbuf(&bc->bch, cnt); 393 stat = bchannel_get_rxbuf(&bc->bch, cnt);
394 /* only transparent use the count here, HDLC overun is detected later */ 394 /* only transparent use the count here, HDLC overun is detected later */
395 if (stat == ENOMEM) { 395 if (stat == -ENOMEM) {
396 pr_warning("%s.B%d: No memory for %d bytes\n", 396 pr_warning("%s.B%d: No memory for %d bytes\n",
397 card->name, bc->bch.nr, cnt); 397 card->name, bc->bch.nr, cnt);
398 return; 398 return;
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb91c5b..9f6acd5d1d2e 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -572,11 +572,13 @@ int nvm_register(struct request_queue *q, char *disk_name,
572 } 572 }
573 } 573 }
574 574
575 ret = nvm_get_sysblock(dev, &dev->sb); 575 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
576 if (!ret) 576 ret = nvm_get_sysblock(dev, &dev->sb);
577 pr_err("nvm: device not initialized.\n"); 577 if (!ret)
578 else if (ret < 0) 578 pr_err("nvm: device not initialized.\n");
579 pr_err("nvm: err (%d) on device initialization\n", ret); 579 else if (ret < 0)
580 pr_err("nvm: err (%d) on device initialization\n", ret);
581 }
580 582
581 /* register device with a supported media manager */ 583 /* register device with a supported media manager */
582 down_write(&nvm_lock); 584 down_write(&nvm_lock);
@@ -1055,9 +1057,11 @@ static long __nvm_ioctl_dev_init(struct nvm_ioctl_dev_init *init)
1055 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN); 1057 strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
1056 info.fs_ppa.ppa = -1; 1058 info.fs_ppa.ppa = -1;
1057 1059
1058 ret = nvm_init_sysblock(dev, &info); 1060 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
1059 if (ret) 1061 ret = nvm_init_sysblock(dev, &info);
1060 return ret; 1062 if (ret)
1063 return ret;
1064 }
1061 1065
1062 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info)); 1066 memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
1063 1067
@@ -1117,7 +1121,10 @@ static long nvm_ioctl_dev_factory(struct file *file, void __user *arg)
1117 dev->mt = NULL; 1121 dev->mt = NULL;
1118 } 1122 }
1119 1123
1120 return nvm_dev_factory(dev, fact.flags); 1124 if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
1125 return nvm_dev_factory(dev, fact.flags);
1126
1127 return 0;
1121} 1128}
1122 1129
1123static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg) 1130static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c75958ced3..307db1ea22de 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -300,8 +300,10 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
300 } 300 }
301 301
302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO); 302 page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
303 if (!page) 303 if (!page) {
304 bio_put(bio);
304 return -ENOMEM; 305 return -ENOMEM;
306 }
305 307
306 while ((slot = find_first_zero_bit(rblk->invalid_pages, 308 while ((slot = find_first_zero_bit(rblk->invalid_pages,
307 nr_pgs_per_blk)) < nr_pgs_per_blk) { 309 nr_pgs_per_blk)) < nr_pgs_per_blk) {
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7700c8..f7b37336353f 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -174,8 +174,7 @@ static inline sector_t rrpc_get_sector(sector_t laddr)
174static inline int request_intersects(struct rrpc_inflight_rq *r, 174static inline int request_intersects(struct rrpc_inflight_rq *r,
175 sector_t laddr_start, sector_t laddr_end) 175 sector_t laddr_start, sector_t laddr_end)
176{ 176{
177 return (laddr_end >= r->l_start && laddr_end <= r->l_end) && 177 return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
178 (laddr_start >= r->l_start && laddr_start <= r->l_end);
179} 178}
180 179
181static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr, 180static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
@@ -184,6 +183,8 @@ static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
184 sector_t laddr_end = laddr + pages - 1; 183 sector_t laddr_end = laddr + pages - 1;
185 struct rrpc_inflight_rq *rtmp; 184 struct rrpc_inflight_rq *rtmp;
186 185
186 WARN_ON(irqs_disabled());
187
187 spin_lock_irq(&rrpc->inflights.lock); 188 spin_lock_irq(&rrpc->inflights.lock);
188 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) { 189 list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
189 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) { 190 if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5df40480228b..dd834927bc66 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
1191 1191
1192 if (clone) 1192 if (clone)
1193 free_rq_clone(clone); 1193 free_rq_clone(clone);
1194 else if (!tio->md->queue->mq_ops)
1195 free_rq_tio(tio);
1194} 1196}
1195 1197
1196/* 1198/*
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index d35e0ba8b269..befb07df036d 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -1946,10 +1946,9 @@ static int adv76xx_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
1946 } 1946 }
1947 1947
1948 /* tx 5v detect */ 1948 /* tx 5v detect */
1949 tx_5v = io_read(sd, 0x70) & info->cable_det_mask; 1949 tx_5v = irq_reg_0x70 & info->cable_det_mask;
1950 if (tx_5v) { 1950 if (tx_5v) {
1951 v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v); 1951 v4l2_dbg(1, debug, sd, "%s: tx_5v: 0x%x\n", __func__, tx_5v);
1952 io_write(sd, 0x71, tx_5v);
1953 adv76xx_s_detect_tx_5v_ctrl(sd); 1952 adv76xx_s_detect_tx_5v_ctrl(sd);
1954 if (handled) 1953 if (handled)
1955 *handled = true; 1954 *handled = true;
diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
index 5ebb3cd31345..711c3674a5d9 100644
--- a/drivers/media/media-device.c
+++ b/drivers/media/media-device.c
@@ -20,6 +20,9 @@
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 21 */
22 22
23/* We need to access legacy defines from linux/media.h */
24#define __NEED_MEDIA_LEGACY_API
25
23#include <linux/compat.h> 26#include <linux/compat.h>
24#include <linux/export.h> 27#include <linux/export.h>
25#include <linux/idr.h> 28#include <linux/idr.h>
@@ -119,6 +122,26 @@ static long media_device_enum_entities(struct media_device *mdev,
119 u_ent.group_id = 0; /* Unused */ 122 u_ent.group_id = 0; /* Unused */
120 u_ent.pads = ent->num_pads; 123 u_ent.pads = ent->num_pads;
121 u_ent.links = ent->num_links - ent->num_backlinks; 124 u_ent.links = ent->num_links - ent->num_backlinks;
125
126 /*
127 * Workaround for a bug at media-ctl <= v1.10 that makes it to
128 * do the wrong thing if the entity function doesn't belong to
129 * either MEDIA_ENT_F_OLD_BASE or MEDIA_ENT_F_OLD_SUBDEV_BASE
130 * Ranges.
131 *
132 * Non-subdevices are expected to be at the MEDIA_ENT_F_OLD_BASE,
133 * or, otherwise, will be silently ignored by media-ctl when
134 * printing the graphviz diagram. So, map them into the devnode
135 * old range.
136 */
137 if (ent->function < MEDIA_ENT_F_OLD_BASE ||
138 ent->function > MEDIA_ENT_T_DEVNODE_UNKNOWN) {
139 if (is_media_entity_v4l2_subdev(ent))
140 u_ent.type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
141 else if (ent->function != MEDIA_ENT_F_IO_V4L)
142 u_ent.type = MEDIA_ENT_T_DEVNODE_UNKNOWN;
143 }
144
122 memcpy(&u_ent.raw, &ent->info, sizeof(ent->info)); 145 memcpy(&u_ent.raw, &ent->info, sizeof(ent->info));
123 if (copy_to_user(uent, &u_ent, sizeof(u_ent))) 146 if (copy_to_user(uent, &u_ent, sizeof(u_ent)))
124 return -EFAULT; 147 return -EFAULT;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e6e4bacb09ee..12099b09a9a7 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2048,6 +2048,7 @@ int db8500_prcmu_config_hotmon(u8 low, u8 high)
2048 2048
2049 return 0; 2049 return 0;
2050} 2050}
2051EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon);
2051 2052
2052static int config_hot_period(u16 val) 2053static int config_hot_period(u16 val)
2053{ 2054{
@@ -2074,11 +2075,13 @@ int db8500_prcmu_start_temp_sense(u16 cycles32k)
2074 2075
2075 return config_hot_period(cycles32k); 2076 return config_hot_period(cycles32k);
2076} 2077}
2078EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense);
2077 2079
2078int db8500_prcmu_stop_temp_sense(void) 2080int db8500_prcmu_stop_temp_sense(void)
2079{ 2081{
2080 return config_hot_period(0xFFFF); 2082 return config_hot_period(0xFFFF);
2081} 2083}
2084EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense);
2082 2085
2083static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3) 2086static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
2084{ 2087{
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 4c1903f781fc..0c6c17a1c59e 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -415,7 +415,7 @@ static int cxl_setup_psl_timebase(struct cxl *adapter, struct pci_dev *dev)
415 delta = mftb() - psl_tb; 415 delta = mftb() - psl_tb;
416 if (delta < 0) 416 if (delta < 0)
417 delta = -delta; 417 delta = -delta;
418 } while (cputime_to_usecs(delta) > 16); 418 } while (tb_to_ns(delta) > 16000);
419 419
420 return 0; 420 return 0;
421} 421}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 677d0362f334..80f9afcb1382 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
458{ 458{
459 struct mei_cl *cl = file->private_data; 459 struct mei_cl *cl = file->private_data;
460 460
461 return mei_cl_notify_request(cl, file, request); 461 if (request != MEI_HBM_NOTIFICATION_START &&
462 request != MEI_HBM_NOTIFICATION_STOP)
463 return -EINVAL;
464
465 return mei_cl_notify_request(cl, file, (u8)request);
462} 466}
463 467
464/** 468/**
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 5914263090fc..fe207e542032 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -47,13 +47,10 @@
47#include "queue.h" 47#include "queue.h"
48 48
49MODULE_ALIAS("mmc:block"); 49MODULE_ALIAS("mmc:block");
50
51#ifdef KERNEL
52#ifdef MODULE_PARAM_PREFIX 50#ifdef MODULE_PARAM_PREFIX
53#undef MODULE_PARAM_PREFIX 51#undef MODULE_PARAM_PREFIX
54#endif 52#endif
55#define MODULE_PARAM_PREFIX "mmcblk." 53#define MODULE_PARAM_PREFIX "mmcblk."
56#endif
57 54
58#define INAND_CMD38_ARG_EXT_CSD 113 55#define INAND_CMD38_ARG_EXT_CSD 113
59#define INAND_CMD38_ARG_ERASE 0x00 56#define INAND_CMD38_ARG_ERASE 0x00
@@ -655,8 +652,10 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev,
655 } 652 }
656 653
657 md = mmc_blk_get(bdev->bd_disk); 654 md = mmc_blk_get(bdev->bd_disk);
658 if (!md) 655 if (!md) {
656 err = -EINVAL;
659 goto cmd_err; 657 goto cmd_err;
658 }
660 659
661 card = md->queue.card; 660 card = md->queue.card;
662 if (IS_ERR(card)) { 661 if (IS_ERR(card)) {
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 1c1b45ef3faf..3446097a43c0 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -925,6 +925,10 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
925 925
926 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0, 926 dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
927 PAGE_SIZE, dir); 927 PAGE_SIZE, dir);
928 if (dma_mapping_error(dma_dev, dma_addr)) {
929 data->error = -EFAULT;
930 break;
931 }
928 if (direction == DMA_TO_DEVICE) 932 if (direction == DMA_TO_DEVICE)
929 t->tx_dma = dma_addr + sg->offset; 933 t->tx_dma = dma_addr + sg->offset;
930 else 934 else
@@ -1393,10 +1397,12 @@ static int mmc_spi_probe(struct spi_device *spi)
1393 host->dma_dev = dev; 1397 host->dma_dev = dev;
1394 host->ones_dma = dma_map_single(dev, ones, 1398 host->ones_dma = dma_map_single(dev, ones,
1395 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); 1399 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1400 if (dma_mapping_error(dev, host->ones_dma))
1401 goto fail_ones_dma;
1396 host->data_dma = dma_map_single(dev, host->data, 1402 host->data_dma = dma_map_single(dev, host->data,
1397 sizeof(*host->data), DMA_BIDIRECTIONAL); 1403 sizeof(*host->data), DMA_BIDIRECTIONAL);
1398 1404 if (dma_mapping_error(dev, host->data_dma))
1399 /* REVISIT in theory those map operations can fail... */ 1405 goto fail_data_dma;
1400 1406
1401 dma_sync_single_for_cpu(host->dma_dev, 1407 dma_sync_single_for_cpu(host->dma_dev,
1402 host->data_dma, sizeof(*host->data), 1408 host->data_dma, sizeof(*host->data),
@@ -1462,6 +1468,11 @@ fail_glue_init:
1462 if (host->dma_dev) 1468 if (host->dma_dev)
1463 dma_unmap_single(host->dma_dev, host->data_dma, 1469 dma_unmap_single(host->dma_dev, host->data_dma,
1464 sizeof(*host->data), DMA_BIDIRECTIONAL); 1470 sizeof(*host->data), DMA_BIDIRECTIONAL);
1471fail_data_dma:
1472 if (host->dma_dev)
1473 dma_unmap_single(host->dma_dev, host->ones_dma,
1474 MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
1475fail_ones_dma:
1465 kfree(host->data); 1476 kfree(host->data);
1466 1477
1467fail_nobuf1: 1478fail_nobuf1:
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b6639ea0bf18..f6e4d9718035 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2232,6 +2232,7 @@ err_irq:
2232 dma_release_channel(host->tx_chan); 2232 dma_release_channel(host->tx_chan);
2233 if (host->rx_chan) 2233 if (host->rx_chan)
2234 dma_release_channel(host->rx_chan); 2234 dma_release_channel(host->rx_chan);
2235 pm_runtime_dont_use_autosuspend(host->dev);
2235 pm_runtime_put_sync(host->dev); 2236 pm_runtime_put_sync(host->dev);
2236 pm_runtime_disable(host->dev); 2237 pm_runtime_disable(host->dev);
2237 if (host->dbclk) 2238 if (host->dbclk)
@@ -2253,6 +2254,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
2253 dma_release_channel(host->tx_chan); 2254 dma_release_channel(host->tx_chan);
2254 dma_release_channel(host->rx_chan); 2255 dma_release_channel(host->rx_chan);
2255 2256
2257 pm_runtime_dont_use_autosuspend(host->dev);
2256 pm_runtime_put_sync(host->dev); 2258 pm_runtime_put_sync(host->dev);
2257 pm_runtime_disable(host->dev); 2259 pm_runtime_disable(host->dev);
2258 device_init_wakeup(&pdev->dev, false); 2260 device_init_wakeup(&pdev->dev, false);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index ce08896b9d69..da824772bbb4 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -86,7 +86,7 @@ struct pxamci_host {
86static inline void pxamci_init_ocr(struct pxamci_host *host) 86static inline void pxamci_init_ocr(struct pxamci_host *host)
87{ 87{
88#ifdef CONFIG_REGULATOR 88#ifdef CONFIG_REGULATOR
89 host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc"); 89 host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc");
90 90
91 if (IS_ERR(host->vcc)) 91 if (IS_ERR(host->vcc))
92 host->vcc = NULL; 92 host->vcc = NULL;
@@ -654,12 +654,8 @@ static int pxamci_probe(struct platform_device *pdev)
654 654
655 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 655 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
656 irq = platform_get_irq(pdev, 0); 656 irq = platform_get_irq(pdev, 0);
657 if (!r || irq < 0) 657 if (irq < 0)
658 return -ENXIO; 658 return irq;
659
660 r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
661 if (!r)
662 return -EBUSY;
663 659
664 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev); 660 mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
665 if (!mmc) { 661 if (!mmc) {
@@ -695,7 +691,7 @@ static int pxamci_probe(struct platform_device *pdev)
695 host->pdata = pdev->dev.platform_data; 691 host->pdata = pdev->dev.platform_data;
696 host->clkrt = CLKRT_OFF; 692 host->clkrt = CLKRT_OFF;
697 693
698 host->clk = clk_get(&pdev->dev, NULL); 694 host->clk = devm_clk_get(&pdev->dev, NULL);
699 if (IS_ERR(host->clk)) { 695 if (IS_ERR(host->clk)) {
700 ret = PTR_ERR(host->clk); 696 ret = PTR_ERR(host->clk);
701 host->clk = NULL; 697 host->clk = NULL;
@@ -727,9 +723,9 @@ static int pxamci_probe(struct platform_device *pdev)
727 host->irq = irq; 723 host->irq = irq;
728 host->imask = MMC_I_MASK_ALL; 724 host->imask = MMC_I_MASK_ALL;
729 725
730 host->base = ioremap(r->start, SZ_4K); 726 host->base = devm_ioremap_resource(&pdev->dev, r);
731 if (!host->base) { 727 if (IS_ERR(host->base)) {
732 ret = -ENOMEM; 728 ret = PTR_ERR(host->base);
733 goto out; 729 goto out;
734 } 730 }
735 731
@@ -742,7 +738,8 @@ static int pxamci_probe(struct platform_device *pdev)
742 writel(64, host->base + MMC_RESTO); 738 writel(64, host->base + MMC_RESTO);
743 writel(host->imask, host->base + MMC_I_MASK); 739 writel(host->imask, host->base + MMC_I_MASK);
744 740
745 ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host); 741 ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0,
742 DRIVER_NAME, host);
746 if (ret) 743 if (ret)
747 goto out; 744 goto out;
748 745
@@ -804,7 +801,7 @@ static int pxamci_probe(struct platform_device *pdev)
804 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro); 801 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
805 goto out; 802 goto out;
806 } else { 803 } else {
807 mmc->caps |= host->pdata->gpio_card_ro_invert ? 804 mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
808 0 : MMC_CAP2_RO_ACTIVE_HIGH; 805 0 : MMC_CAP2_RO_ACTIVE_HIGH;
809 } 806 }
810 807
@@ -833,14 +830,9 @@ out:
833 dma_release_channel(host->dma_chan_rx); 830 dma_release_channel(host->dma_chan_rx);
834 if (host->dma_chan_tx) 831 if (host->dma_chan_tx)
835 dma_release_channel(host->dma_chan_tx); 832 dma_release_channel(host->dma_chan_tx);
836 if (host->base)
837 iounmap(host->base);
838 if (host->clk)
839 clk_put(host->clk);
840 } 833 }
841 if (mmc) 834 if (mmc)
842 mmc_free_host(mmc); 835 mmc_free_host(mmc);
843 release_resource(r);
844 return ret; 836 return ret;
845} 837}
846 838
@@ -859,9 +851,6 @@ static int pxamci_remove(struct platform_device *pdev)
859 gpio_ro = host->pdata->gpio_card_ro; 851 gpio_ro = host->pdata->gpio_card_ro;
860 gpio_power = host->pdata->gpio_power; 852 gpio_power = host->pdata->gpio_power;
861 } 853 }
862 if (host->vcc)
863 regulator_put(host->vcc);
864
865 if (host->pdata && host->pdata->exit) 854 if (host->pdata && host->pdata->exit)
866 host->pdata->exit(&pdev->dev, mmc); 855 host->pdata->exit(&pdev->dev, mmc);
867 856
@@ -870,16 +859,10 @@ static int pxamci_remove(struct platform_device *pdev)
870 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE, 859 END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
871 host->base + MMC_I_MASK); 860 host->base + MMC_I_MASK);
872 861
873 free_irq(host->irq, host);
874 dmaengine_terminate_all(host->dma_chan_rx); 862 dmaengine_terminate_all(host->dma_chan_rx);
875 dmaengine_terminate_all(host->dma_chan_tx); 863 dmaengine_terminate_all(host->dma_chan_tx);
876 dma_release_channel(host->dma_chan_rx); 864 dma_release_channel(host->dma_chan_rx);
877 dma_release_channel(host->dma_chan_tx); 865 dma_release_channel(host->dma_chan_tx);
878 iounmap(host->base);
879
880 clk_put(host->clk);
881
882 release_resource(host->res);
883 866
884 mmc_free_host(mmc); 867 mmc_free_host(mmc);
885 } 868 }
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index f6047fc94062..a5cda926d38e 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
146 .ops = &sdhci_acpi_ops_int, 146 .ops = &sdhci_acpi_ops_int,
147}; 147};
148 148
149static int bxt_get_cd(struct mmc_host *mmc)
150{
151 int gpio_cd = mmc_gpio_get_cd(mmc);
152 struct sdhci_host *host = mmc_priv(mmc);
153 unsigned long flags;
154 int ret = 0;
155
156 if (!gpio_cd)
157 return 0;
158
159 pm_runtime_get_sync(mmc->parent);
160
161 spin_lock_irqsave(&host->lock, flags);
162
163 if (host->flags & SDHCI_DEVICE_DEAD)
164 goto out;
165
166 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
167out:
168 spin_unlock_irqrestore(&host->lock, flags);
169
170 pm_runtime_mark_last_busy(mmc->parent);
171 pm_runtime_put_autosuspend(mmc->parent);
172
173 return ret;
174}
175
149static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev, 176static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
150 const char *hid, const char *uid) 177 const char *hid, const char *uid)
151{ 178{
@@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
196 223
197 /* Platform specific code during sd probe slot goes here */ 224 /* Platform specific code during sd probe slot goes here */
198 225
226 if (hid && !strcmp(hid, "80865ACA"))
227 host->mmc_host_ops.get_cd = bxt_get_cd;
228
199 return 0; 229 return 0;
200} 230}
201 231
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 7e7d8f0c9438..9cb86fb25976 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -217,6 +217,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
217pm_runtime_disable: 217pm_runtime_disable:
218 pm_runtime_disable(&pdev->dev); 218 pm_runtime_disable(&pdev->dev);
219 pm_runtime_set_suspended(&pdev->dev); 219 pm_runtime_set_suspended(&pdev->dev);
220 pm_runtime_put_noidle(&pdev->dev);
220clocks_disable_unprepare: 221clocks_disable_unprepare:
221 clk_disable_unprepare(priv->gck); 222 clk_disable_unprepare(priv->gck);
222 clk_disable_unprepare(priv->mainck); 223 clk_disable_unprepare(priv->mainck);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index cc851b065d0a..df3b8eced8c4 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
330 sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf); 330 sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
331} 331}
332 332
333static int bxt_get_cd(struct mmc_host *mmc)
334{
335 int gpio_cd = mmc_gpio_get_cd(mmc);
336 struct sdhci_host *host = mmc_priv(mmc);
337 unsigned long flags;
338 int ret = 0;
339
340 if (!gpio_cd)
341 return 0;
342
343 pm_runtime_get_sync(mmc->parent);
344
345 spin_lock_irqsave(&host->lock, flags);
346
347 if (host->flags & SDHCI_DEVICE_DEAD)
348 goto out;
349
350 ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
351out:
352 spin_unlock_irqrestore(&host->lock, flags);
353
354 pm_runtime_mark_last_busy(mmc->parent);
355 pm_runtime_put_autosuspend(mmc->parent);
356
357 return ret;
358}
359
333static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) 360static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
334{ 361{
335 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | 362 slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
362 slot->cd_con_id = NULL; 389 slot->cd_con_id = NULL;
363 slot->cd_idx = 0; 390 slot->cd_idx = 0;
364 slot->cd_override_level = true; 391 slot->cd_override_level = true;
392 if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
393 slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
394 slot->host->mmc_host_ops.get_cd = bxt_get_cd;
395
365 return 0; 396 return 0;
366} 397}
367 398
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d622435d1bcc..add9fdfd1d8f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1360,7 +1360,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1360 sdhci_runtime_pm_get(host); 1360 sdhci_runtime_pm_get(host);
1361 1361
1362 /* Firstly check card presence */ 1362 /* Firstly check card presence */
1363 present = sdhci_do_get_cd(host); 1363 present = mmc->ops->get_cd(mmc);
1364 1364
1365 spin_lock_irqsave(&host->lock, flags); 1365 spin_lock_irqsave(&host->lock, flags);
1366 1366
@@ -2849,6 +2849,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
2849 2849
2850 host = mmc_priv(mmc); 2850 host = mmc_priv(mmc);
2851 host->mmc = mmc; 2851 host->mmc = mmc;
2852 host->mmc_host_ops = sdhci_ops;
2853 mmc->ops = &host->mmc_host_ops;
2852 2854
2853 return host; 2855 return host;
2854} 2856}
@@ -3037,7 +3039,6 @@ int sdhci_add_host(struct sdhci_host *host)
3037 /* 3039 /*
3038 * Set host parameters. 3040 * Set host parameters.
3039 */ 3041 */
3040 mmc->ops = &sdhci_ops;
3041 max_clk = host->max_clk; 3042 max_clk = host->max_clk;
3042 3043
3043 if (host->ops->get_min_clock) 3044 if (host->ops->get_min_clock)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7654ae5d2b4e..0115e9907bf8 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -430,6 +430,7 @@ struct sdhci_host {
430 430
431 /* Internal data */ 431 /* Internal data */
432 struct mmc_host *mmc; /* MMC structure */ 432 struct mmc_host *mmc; /* MMC structure */
433 struct mmc_host_ops mmc_host_ops; /* MMC host ops */
433 u64 dma_mask; /* custom DMA mask */ 434 u64 dma_mask; /* custom DMA mask */
434 435
435#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) 436#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 1ca8a1359cbc..6234eab38ff3 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -445,7 +445,7 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host)
445 pdata->slave_id_rx); 445 pdata->slave_id_rx);
446 } else { 446 } else {
447 host->chan_tx = dma_request_slave_channel(dev, "tx"); 447 host->chan_tx = dma_request_slave_channel(dev, "tx");
448 host->chan_tx = dma_request_slave_channel(dev, "rx"); 448 host->chan_rx = dma_request_slave_channel(dev, "rx");
449 } 449 }
450 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, 450 dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
451 host->chan_rx); 451 host->chan_rx);
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 2a1b6e037e1a..0134ba32a057 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -193,7 +193,7 @@ int ubi_start_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
193 vol->changing_leb = 1; 193 vol->changing_leb = 1;
194 vol->ch_lnum = req->lnum; 194 vol->ch_lnum = req->lnum;
195 195
196 vol->upd_buf = vmalloc(req->bytes); 196 vol->upd_buf = vmalloc(ALIGN((int)req->bytes, ubi->min_io_size));
197 if (!vol->upd_buf) 197 if (!vol->upd_buf)
198 return -ENOMEM; 198 return -ENOMEM;
199 199
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 56b560558884..b7f1a9919033 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
214static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev, 214static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
215 struct rtnl_link_stats64 *stats); 215 struct rtnl_link_stats64 *stats);
216static void bond_slave_arr_handler(struct work_struct *work); 216static void bond_slave_arr_handler(struct work_struct *work);
217static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
218 int mod);
217 219
218/*---------------------------- General routines -----------------------------*/ 220/*---------------------------- General routines -----------------------------*/
219 221
@@ -2127,6 +2129,7 @@ static void bond_miimon_commit(struct bonding *bond)
2127 continue; 2129 continue;
2128 2130
2129 case BOND_LINK_UP: 2131 case BOND_LINK_UP:
2132 bond_update_speed_duplex(slave);
2130 bond_set_slave_link_state(slave, BOND_LINK_UP, 2133 bond_set_slave_link_state(slave, BOND_LINK_UP,
2131 BOND_SLAVE_NOTIFY_NOW); 2134 BOND_SLAVE_NOTIFY_NOW);
2132 slave->last_link_up = jiffies; 2135 slave->last_link_up = jiffies;
@@ -2459,7 +2462,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2459 struct slave *slave) 2462 struct slave *slave)
2460{ 2463{
2461 struct arphdr *arp = (struct arphdr *)skb->data; 2464 struct arphdr *arp = (struct arphdr *)skb->data;
2462 struct slave *curr_active_slave; 2465 struct slave *curr_active_slave, *curr_arp_slave;
2463 unsigned char *arp_ptr; 2466 unsigned char *arp_ptr;
2464 __be32 sip, tip; 2467 __be32 sip, tip;
2465 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP); 2468 int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
@@ -2506,26 +2509,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
2506 &sip, &tip); 2509 &sip, &tip);
2507 2510
2508 curr_active_slave = rcu_dereference(bond->curr_active_slave); 2511 curr_active_slave = rcu_dereference(bond->curr_active_slave);
2512 curr_arp_slave = rcu_dereference(bond->current_arp_slave);
2509 2513
2510 /* Backup slaves won't see the ARP reply, but do come through 2514 /* We 'trust' the received ARP enough to validate it if:
2511 * here for each ARP probe (so we swap the sip/tip to validate 2515 *
2512 * the probe). In a "redundant switch, common router" type of 2516 * (a) the slave receiving the ARP is active (which includes the
2513 * configuration, the ARP probe will (hopefully) travel from 2517 * current ARP slave, if any), or
2514 * the active, through one switch, the router, then the other 2518 *
2515 * switch before reaching the backup. 2519 * (b) the receiving slave isn't active, but there is a currently
2520 * active slave and it received valid arp reply(s) after it became
2521 * the currently active slave, or
2522 *
2523 * (c) there is an ARP slave that sent an ARP during the prior ARP
2524 * interval, and we receive an ARP reply on any slave. We accept
2525 * these because switch FDB update delays may deliver the ARP
2526 * reply to a slave other than the sender of the ARP request.
2516 * 2527 *
2517 * We 'trust' the arp requests if there is an active slave and 2528 * Note: for (b), backup slaves are receiving the broadcast ARP
2518 * it received valid arp reply(s) after it became active. This 2529 * request, not a reply. This request passes from the sending
2519 * is done to avoid endless looping when we can't reach the 2530 * slave through the L2 switch(es) to the receiving slave. Since
2531 * this is checking the request, sip/tip are swapped for
2532 * validation.
2533 *
2534 * This is done to avoid endless looping when we can't reach the
2520 * arp_ip_target and fool ourselves with our own arp requests. 2535 * arp_ip_target and fool ourselves with our own arp requests.
2521 */ 2536 */
2522
2523 if (bond_is_active_slave(slave)) 2537 if (bond_is_active_slave(slave))
2524 bond_validate_arp(bond, slave, sip, tip); 2538 bond_validate_arp(bond, slave, sip, tip);
2525 else if (curr_active_slave && 2539 else if (curr_active_slave &&
2526 time_after(slave_last_rx(bond, curr_active_slave), 2540 time_after(slave_last_rx(bond, curr_active_slave),
2527 curr_active_slave->last_link_up)) 2541 curr_active_slave->last_link_up))
2528 bond_validate_arp(bond, slave, tip, sip); 2542 bond_validate_arp(bond, slave, tip, sip);
2543 else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
2544 bond_time_in_interval(bond,
2545 dev_trans_start(curr_arp_slave->dev), 1))
2546 bond_validate_arp(bond, slave, sip, tip);
2529 2547
2530out_unlock: 2548out_unlock:
2531 if (arp != (struct arphdr *)skb->data) 2549 if (arp != (struct arphdr *)skb->data)
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 575790e8a75a..74a7dfecee27 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -843,7 +843,7 @@ static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
843 if (clear_intf) 843 if (clear_intf)
844 mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00); 844 mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00);
845 845
846 if (eflag) 846 if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR))
847 mcp251x_write_bits(spi, EFLG, eflag, 0x00); 847 mcp251x_write_bits(spi, EFLG, eflag, 0x00);
848 848
849 /* Update can state */ 849 /* Update can state */
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index fc5b75675cd8..eb7192fab593 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
117 */ 117 */
118#define EMS_USB_ARM7_CLOCK 8000000 118#define EMS_USB_ARM7_CLOCK 8000000
119 119
120#define CPC_TX_QUEUE_TRIGGER_LOW 25
121#define CPC_TX_QUEUE_TRIGGER_HIGH 35
122
120/* 123/*
121 * CAN-Message representation in a CPC_MSG. Message object type is 124 * CAN-Message representation in a CPC_MSG. Message object type is
122 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or 125 * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
@@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
278 switch (urb->status) { 281 switch (urb->status) {
279 case 0: 282 case 0:
280 dev->free_slots = dev->intr_in_buffer[1]; 283 dev->free_slots = dev->intr_in_buffer[1];
284 if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
285 if (netif_queue_stopped(netdev)){
286 netif_wake_queue(netdev);
287 }
288 }
281 break; 289 break;
282 290
283 case -ECONNRESET: /* unlink */ 291 case -ECONNRESET: /* unlink */
@@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
526 /* Release context */ 534 /* Release context */
527 context->echo_index = MAX_TX_URBS; 535 context->echo_index = MAX_TX_URBS;
528 536
529 if (netif_queue_stopped(netdev))
530 netif_wake_queue(netdev);
531} 537}
532 538
533/* 539/*
@@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
587 int err, i; 593 int err, i;
588 594
589 dev->intr_in_buffer[0] = 0; 595 dev->intr_in_buffer[0] = 0;
590 dev->free_slots = 15; /* initial size */ 596 dev->free_slots = 50; /* initial size */
591 597
592 for (i = 0; i < MAX_RX_URBS; i++) { 598 for (i = 0; i < MAX_RX_URBS; i++) {
593 struct urb *urb = NULL; 599 struct urb *urb = NULL;
@@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
835 841
836 /* Slow down tx path */ 842 /* Slow down tx path */
837 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || 843 if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
838 dev->free_slots < 5) { 844 dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
839 netif_stop_queue(netdev); 845 netif_stop_queue(netdev);
840 } 846 }
841 } 847 }
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 5eee62badf45..cbc99d5649af 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -826,9 +826,8 @@ static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface
826static void gs_destroy_candev(struct gs_can *dev) 826static void gs_destroy_candev(struct gs_can *dev)
827{ 827{
828 unregister_candev(dev->netdev); 828 unregister_candev(dev->netdev);
829 free_candev(dev->netdev);
830 usb_kill_anchored_urbs(&dev->tx_submitted); 829 usb_kill_anchored_urbs(&dev->tx_submitted);
831 kfree(dev); 830 free_candev(dev->netdev);
832} 831}
833 832
834static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) 833static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
@@ -913,12 +912,15 @@ static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *
913 for (i = 0; i < icount; i++) { 912 for (i = 0; i < icount; i++) {
914 dev->canch[i] = gs_make_candev(i, intf); 913 dev->canch[i] = gs_make_candev(i, intf);
915 if (IS_ERR_OR_NULL(dev->canch[i])) { 914 if (IS_ERR_OR_NULL(dev->canch[i])) {
915 /* save error code to return later */
916 rc = PTR_ERR(dev->canch[i]);
917
916 /* on failure destroy previously created candevs */ 918 /* on failure destroy previously created candevs */
917 icount = i; 919 icount = i;
918 for (i = 0; i < icount; i++) { 920 for (i = 0; i < icount; i++)
919 gs_destroy_candev(dev->canch[i]); 921 gs_destroy_candev(dev->canch[i]);
920 dev->canch[i] = NULL; 922
921 } 923 usb_kill_anchored_urbs(&dev->rx_submitted);
922 kfree(dev); 924 kfree(dev);
923 return rc; 925 return rc;
924 } 926 }
@@ -939,16 +941,12 @@ static void gs_usb_disconnect(struct usb_interface *intf)
939 return; 941 return;
940 } 942 }
941 943
942 for (i = 0; i < GS_MAX_INTF; i++) { 944 for (i = 0; i < GS_MAX_INTF; i++)
943 struct gs_can *can = dev->canch[i]; 945 if (dev->canch[i])
944 946 gs_destroy_candev(dev->canch[i]);
945 if (!can)
946 continue;
947
948 gs_destroy_candev(can);
949 }
950 947
951 usb_kill_anchored_urbs(&dev->rx_submitted); 948 usb_kill_anchored_urbs(&dev->rx_submitted);
949 kfree(dev);
952} 950}
953 951
954static const struct usb_device_id gs_usb_table[] = { 952static const struct usb_device_id gs_usb_table[] = {
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index cc6c54553418..a47f52f44b0d 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -25,6 +25,7 @@
25static const struct mv88e6xxx_switch_id mv88e6352_table[] = { 25static const struct mv88e6xxx_switch_id mv88e6352_table[] = {
26 { PORT_SWITCH_ID_6172, "Marvell 88E6172" }, 26 { PORT_SWITCH_ID_6172, "Marvell 88E6172" },
27 { PORT_SWITCH_ID_6176, "Marvell 88E6176" }, 27 { PORT_SWITCH_ID_6176, "Marvell 88E6176" },
28 { PORT_SWITCH_ID_6240, "Marvell 88E6240" },
28 { PORT_SWITCH_ID_6320, "Marvell 88E6320" }, 29 { PORT_SWITCH_ID_6320, "Marvell 88E6320" },
29 { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" }, 30 { PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" },
30 { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" }, 31 { PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" },
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index cf34681af4f6..512c8c0be1b4 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1555,7 +1555,7 @@ static int _mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1555 1555
1556 if (vlan.vid != vid || !vlan.valid || 1556 if (vlan.vid != vid || !vlan.valid ||
1557 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) 1557 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
1558 return -ENOENT; 1558 return -EOPNOTSUPP;
1559 1559
1560 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER; 1560 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1561 1561
@@ -1582,6 +1582,7 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1582 const struct switchdev_obj_port_vlan *vlan) 1582 const struct switchdev_obj_port_vlan *vlan)
1583{ 1583{
1584 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1584 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1585 const u16 defpvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1585 u16 pvid, vid; 1586 u16 pvid, vid;
1586 int err = 0; 1587 int err = 0;
1587 1588
@@ -1597,7 +1598,8 @@ int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
1597 goto unlock; 1598 goto unlock;
1598 1599
1599 if (vid == pvid) { 1600 if (vid == pvid) {
1600 err = _mv88e6xxx_port_pvid_set(ds, port, 0); 1601 /* restore reserved VLAN ID */
1602 err = _mv88e6xxx_port_pvid_set(ds, port, defpvid);
1601 if (err) 1603 if (err)
1602 goto unlock; 1604 goto unlock;
1603 } 1605 }
@@ -1889,26 +1891,20 @@ unlock:
1889 1891
1890int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members) 1892int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members)
1891{ 1893{
1892 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1894 return 0;
1893 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1894 int err;
1895
1896 /* The port joined a bridge, so leave its reserved VLAN */
1897 mutex_lock(&ps->smi_mutex);
1898 err = _mv88e6xxx_port_vlan_del(ds, port, pvid);
1899 if (!err)
1900 err = _mv88e6xxx_port_pvid_set(ds, port, 0);
1901 mutex_unlock(&ps->smi_mutex);
1902 return err;
1903} 1895}
1904 1896
1905int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members) 1897int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members)
1906{ 1898{
1899 return 0;
1900}
1901
1902static int mv88e6xxx_setup_port_default_vlan(struct dsa_switch *ds, int port)
1903{
1907 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds); 1904 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1908 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port; 1905 const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
1909 int err; 1906 int err;
1910 1907
1911 /* The port left the bridge, so join its reserved VLAN */
1912 mutex_lock(&ps->smi_mutex); 1908 mutex_lock(&ps->smi_mutex);
1913 err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true); 1909 err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true);
1914 if (!err) 1910 if (!err)
@@ -2192,8 +2188,7 @@ int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2192 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i)) 2188 if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
2193 continue; 2189 continue;
2194 2190
2195 /* setup the unbridged state */ 2191 ret = mv88e6xxx_setup_port_default_vlan(ds, i);
2196 ret = mv88e6xxx_port_bridge_leave(ds, i, 0);
2197 if (ret < 0) 2192 if (ret < 0)
2198 return ret; 2193 return ret;
2199 } 2194 }
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 79e1a0282163..17b2126075e0 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2461,7 +2461,7 @@ boomerang_interrupt(int irq, void *dev_id)
2461 int i; 2461 int i;
2462 pci_unmap_single(VORTEX_PCI(vp), 2462 pci_unmap_single(VORTEX_PCI(vp),
2463 le32_to_cpu(vp->tx_ring[entry].frag[0].addr), 2463 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2464 le32_to_cpu(vp->tx_ring[entry].frag[0].length), 2464 le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
2465 PCI_DMA_TODEVICE); 2465 PCI_DMA_TODEVICE);
2466 2466
2467 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++) 2467 for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 2777289a26c0..2f79d29f17f2 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1501,6 +1501,7 @@ static const struct pcmcia_device_id pcnet_ids[] = {
1501 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a), 1501 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a),
1502 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103), 1502 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103),
1503 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121), 1503 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121),
1504 PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0009),
1504 PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941), 1505 PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941),
1505 PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e), 1506 PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e),
1506 PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b), 1507 PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b),
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 3f3bcbea15bd..0907ab6ff309 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2380,7 +2380,7 @@ static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
2380 sizeof(u32), 2380 sizeof(u32),
2381 &tx_ring->tx_status_pa, 2381 &tx_ring->tx_status_pa,
2382 GFP_KERNEL); 2382 GFP_KERNEL);
2383 if (!tx_ring->tx_status_pa) { 2383 if (!tx_ring->tx_status) {
2384 dev_err(&adapter->pdev->dev, 2384 dev_err(&adapter->pdev->dev,
2385 "Cannot alloc memory for Tx status block\n"); 2385 "Cannot alloc memory for Tx status block\n");
2386 return -ENOMEM; 2386 return -ENOMEM;
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 17472851674f..f749e4d389eb 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -193,7 +193,6 @@ static void altera_tse_mdio_destroy(struct net_device *dev)
193 priv->mdio->id); 193 priv->mdio->id);
194 194
195 mdiobus_unregister(priv->mdio); 195 mdiobus_unregister(priv->mdio);
196 kfree(priv->mdio->irq);
197 mdiobus_free(priv->mdio); 196 mdiobus_free(priv->mdio);
198 priv->mdio = NULL; 197 priv->mdio = NULL;
199} 198}
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 87e727b921dc..fcdf5dda448f 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -50,8 +50,8 @@ static const char version[] =
50static void write_rreg(u_long base, u_int reg, u_int val) 50static void write_rreg(u_long base, u_int reg, u_int val)
51{ 51{
52 asm volatile( 52 asm volatile(
53 "str%?h %1, [%2] @ NET_RAP\n\t" 53 "strh %1, [%2] @ NET_RAP\n\t"
54 "str%?h %0, [%2, #-4] @ NET_RDP" 54 "strh %0, [%2, #-4] @ NET_RDP"
55 : 55 :
56 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); 56 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
57} 57}
@@ -60,8 +60,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
60{ 60{
61 unsigned short v; 61 unsigned short v;
62 asm volatile( 62 asm volatile(
63 "str%?h %1, [%2] @ NET_RAP\n\t" 63 "strh %1, [%2] @ NET_RAP\n\t"
64 "ldr%?h %0, [%2, #-4] @ NET_RDP" 64 "ldrh %0, [%2, #-4] @ NET_RDP"
65 : "=r" (v) 65 : "=r" (v)
66 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 66 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
67 return v; 67 return v;
@@ -70,8 +70,8 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
70static inline void write_ireg(u_long base, u_int reg, u_int val) 70static inline void write_ireg(u_long base, u_int reg, u_int val)
71{ 71{
72 asm volatile( 72 asm volatile(
73 "str%?h %1, [%2] @ NET_RAP\n\t" 73 "strh %1, [%2] @ NET_RAP\n\t"
74 "str%?h %0, [%2, #8] @ NET_IDP" 74 "strh %0, [%2, #8] @ NET_IDP"
75 : 75 :
76 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464)); 76 : "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
77} 77}
@@ -80,8 +80,8 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
80{ 80{
81 u_short v; 81 u_short v;
82 asm volatile( 82 asm volatile(
83 "str%?h %1, [%2] @ NAT_RAP\n\t" 83 "strh %1, [%2] @ NAT_RAP\n\t"
84 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" 84 "ldrh %0, [%2, #8] @ NET_IDP\n\t"
85 : "=r" (v) 85 : "=r" (v)
86 : "r" (reg), "r" (ISAIO_BASE + 0x0464)); 86 : "r" (reg), "r" (ISAIO_BASE + 0x0464));
87 return v; 87 return v;
@@ -96,7 +96,7 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
96 offset = ISAMEM_BASE + (offset << 1); 96 offset = ISAMEM_BASE + (offset << 1);
97 length = (length + 1) & ~1; 97 length = (length + 1) & ~1;
98 if ((int)buf & 2) { 98 if ((int)buf & 2) {
99 asm volatile("str%?h %2, [%0], #4" 99 asm volatile("strh %2, [%0], #4"
100 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 100 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
101 buf += 2; 101 buf += 2;
102 length -= 2; 102 length -= 2;
@@ -104,20 +104,20 @@ am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigne
104 while (length > 8) { 104 while (length > 8) {
105 register unsigned int tmp asm("r2"), tmp2 asm("r3"); 105 register unsigned int tmp asm("r2"), tmp2 asm("r3");
106 asm volatile( 106 asm volatile(
107 "ldm%?ia %0!, {%1, %2}" 107 "ldmia %0!, {%1, %2}"
108 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2)); 108 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
109 length -= 8; 109 length -= 8;
110 asm volatile( 110 asm volatile(
111 "str%?h %1, [%0], #4\n\t" 111 "strh %1, [%0], #4\n\t"
112 "mov%? %1, %1, lsr #16\n\t" 112 "mov %1, %1, lsr #16\n\t"
113 "str%?h %1, [%0], #4\n\t" 113 "strh %1, [%0], #4\n\t"
114 "str%?h %2, [%0], #4\n\t" 114 "strh %2, [%0], #4\n\t"
115 "mov%? %2, %2, lsr #16\n\t" 115 "mov %2, %2, lsr #16\n\t"
116 "str%?h %2, [%0], #4" 116 "strh %2, [%0], #4"
117 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2)); 117 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
118 } 118 }
119 while (length > 0) { 119 while (length > 0) {
120 asm volatile("str%?h %2, [%0], #4" 120 asm volatile("strh %2, [%0], #4"
121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
122 buf += 2; 122 buf += 2;
123 length -= 2; 123 length -= 2;
@@ -132,23 +132,23 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
132 if ((int)buf & 2) { 132 if ((int)buf & 2) {
133 unsigned int tmp; 133 unsigned int tmp;
134 asm volatile( 134 asm volatile(
135 "ldr%?h %2, [%0], #4\n\t" 135 "ldrh %2, [%0], #4\n\t"
136 "str%?b %2, [%1], #1\n\t" 136 "strb %2, [%1], #1\n\t"
137 "mov%? %2, %2, lsr #8\n\t" 137 "mov %2, %2, lsr #8\n\t"
138 "str%?b %2, [%1], #1" 138 "strb %2, [%1], #1"
139 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf)); 139 : "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
140 length -= 2; 140 length -= 2;
141 } 141 }
142 while (length > 8) { 142 while (length > 8) {
143 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3; 143 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
144 asm volatile( 144 asm volatile(
145 "ldr%?h %2, [%0], #4\n\t" 145 "ldrh %2, [%0], #4\n\t"
146 "ldr%?h %4, [%0], #4\n\t" 146 "ldrh %4, [%0], #4\n\t"
147 "ldr%?h %3, [%0], #4\n\t" 147 "ldrh %3, [%0], #4\n\t"
148 "orr%? %2, %2, %4, lsl #16\n\t" 148 "orr %2, %2, %4, lsl #16\n\t"
149 "ldr%?h %4, [%0], #4\n\t" 149 "ldrh %4, [%0], #4\n\t"
150 "orr%? %3, %3, %4, lsl #16\n\t" 150 "orr %3, %3, %4, lsl #16\n\t"
151 "stm%?ia %1!, {%2, %3}" 151 "stmia %1!, {%2, %3}"
152 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3) 152 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
153 : "0" (offset), "1" (buf)); 153 : "0" (offset), "1" (buf));
154 length -= 8; 154 length -= 8;
@@ -156,10 +156,10 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
156 while (length > 0) { 156 while (length > 0) {
157 unsigned int tmp; 157 unsigned int tmp;
158 asm volatile( 158 asm volatile(
159 "ldr%?h %2, [%0], #4\n\t" 159 "ldrh %2, [%0], #4\n\t"
160 "str%?b %2, [%1], #1\n\t" 160 "strb %2, [%1], #1\n\t"
161 "mov%? %2, %2, lsr #8\n\t" 161 "mov %2, %2, lsr #8\n\t"
162 "str%?b %2, [%1], #1" 162 "strb %2, [%1], #1"
163 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf)); 163 : "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
164 length -= 2; 164 length -= 2;
165 } 165 }
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 256f590f6bb1..3a7ebfdda57d 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -547,8 +547,8 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */ 547 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
548 548
549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL); 549 lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
550 if(lp==NULL) 550 if (!lp)
551 return -ENODEV; 551 return -ENOMEM;
552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp); 552 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
553 dev->ml_priv = lp; 553 dev->ml_priv = lp;
554 lp->name = chipname; 554 lp->name = chipname;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index abe1eabc0171..6446af1403f7 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -163,7 +163,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
163 struct sk_buff *skb = tx_buff->skb; 163 struct sk_buff *skb = tx_buff->skb;
164 unsigned int info = le32_to_cpu(txbd->info); 164 unsigned int info = le32_to_cpu(txbd->info);
165 165
166 if ((info & FOR_EMAC) || !txbd->data) 166 if ((info & FOR_EMAC) || !txbd->data || !skb)
167 break; 167 break;
168 168
169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) { 169 if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
@@ -191,6 +191,7 @@ static void arc_emac_tx_clean(struct net_device *ndev)
191 191
192 txbd->data = 0; 192 txbd->data = 0;
193 txbd->info = 0; 193 txbd->info = 0;
194 tx_buff->skb = NULL;
194 195
195 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM; 196 *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
196 } 197 }
@@ -446,6 +447,9 @@ static int arc_emac_open(struct net_device *ndev)
446 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM; 447 *last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
447 } 448 }
448 449
450 priv->txbd_curr = 0;
451 priv->txbd_dirty = 0;
452
449 /* Clean Tx BD's */ 453 /* Clean Tx BD's */
450 memset(priv->txbd, 0, TX_RING_SZ); 454 memset(priv->txbd, 0, TX_RING_SZ);
451 455
@@ -514,6 +518,64 @@ static void arc_emac_set_rx_mode(struct net_device *ndev)
514} 518}
515 519
516/** 520/**
521 * arc_free_tx_queue - free skb from tx queue
522 * @ndev: Pointer to the network device.
523 *
524 * This function must be called while EMAC disable
525 */
526static void arc_free_tx_queue(struct net_device *ndev)
527{
528 struct arc_emac_priv *priv = netdev_priv(ndev);
529 unsigned int i;
530
531 for (i = 0; i < TX_BD_NUM; i++) {
532 struct arc_emac_bd *txbd = &priv->txbd[i];
533 struct buffer_state *tx_buff = &priv->tx_buff[i];
534
535 if (tx_buff->skb) {
536 dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
537 dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
538
539 /* return the sk_buff to system */
540 dev_kfree_skb_irq(tx_buff->skb);
541 }
542
543 txbd->info = 0;
544 txbd->data = 0;
545 tx_buff->skb = NULL;
546 }
547}
548
549/**
550 * arc_free_rx_queue - free skb from rx queue
551 * @ndev: Pointer to the network device.
552 *
553 * This function must be called while EMAC disable
554 */
555static void arc_free_rx_queue(struct net_device *ndev)
556{
557 struct arc_emac_priv *priv = netdev_priv(ndev);
558 unsigned int i;
559
560 for (i = 0; i < RX_BD_NUM; i++) {
561 struct arc_emac_bd *rxbd = &priv->rxbd[i];
562 struct buffer_state *rx_buff = &priv->rx_buff[i];
563
564 if (rx_buff->skb) {
565 dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
566 dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
567
568 /* return the sk_buff to system */
569 dev_kfree_skb_irq(rx_buff->skb);
570 }
571
572 rxbd->info = 0;
573 rxbd->data = 0;
574 rx_buff->skb = NULL;
575 }
576}
577
578/**
517 * arc_emac_stop - Close the network device. 579 * arc_emac_stop - Close the network device.
518 * @ndev: Pointer to the network device. 580 * @ndev: Pointer to the network device.
519 * 581 *
@@ -534,6 +596,10 @@ static int arc_emac_stop(struct net_device *ndev)
534 /* Disable EMAC */ 596 /* Disable EMAC */
535 arc_reg_clr(priv, R_CTRL, EN_MASK); 597 arc_reg_clr(priv, R_CTRL, EN_MASK);
536 598
599 /* Return the sk_buff to system */
600 arc_free_tx_queue(ndev);
601 arc_free_rx_queue(ndev);
602
537 return 0; 603 return 0;
538} 604}
539 605
@@ -610,7 +676,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
610 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr); 676 dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
611 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len); 677 dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
612 678
613 priv->tx_buff[*txbd_curr].skb = skb;
614 priv->txbd[*txbd_curr].data = cpu_to_le32(addr); 679 priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
615 680
616 /* Make sure pointer to data buffer is set */ 681 /* Make sure pointer to data buffer is set */
@@ -620,6 +685,11 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
620 685
621 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len); 686 *info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
622 687
688 /* Make sure info word is set */
689 wmb();
690
691 priv->tx_buff[*txbd_curr].skb = skb;
692
623 /* Increment index to point to the next BD */ 693 /* Increment index to point to the next BD */
624 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM; 694 *txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
625 695
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index f71ab2647a3b..08a23e6b60e9 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -1460,7 +1460,19 @@ static int nb8800_probe(struct platform_device *pdev)
1460 goto err_disable_clk; 1460 goto err_disable_clk;
1461 } 1461 }
1462 1462
1463 priv->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1463 if (of_phy_is_fixed_link(pdev->dev.of_node)) {
1464 ret = of_phy_register_fixed_link(pdev->dev.of_node);
1465 if (ret < 0) {
1466 dev_err(&pdev->dev, "bad fixed-link spec\n");
1467 goto err_free_bus;
1468 }
1469 priv->phy_node = of_node_get(pdev->dev.of_node);
1470 }
1471
1472 if (!priv->phy_node)
1473 priv->phy_node = of_parse_phandle(pdev->dev.of_node,
1474 "phy-handle", 0);
1475
1464 if (!priv->phy_node) { 1476 if (!priv->phy_node) {
1465 dev_err(&pdev->dev, "no PHY specified\n"); 1477 dev_err(&pdev->dev, "no PHY specified\n");
1466 ret = -ENODEV; 1478 ret = -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 27aa0802d87d..91874d24fd56 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -4896,9 +4896,9 @@ struct c2s_pri_trans_table_entry {
4896 * cfc delete event data 4896 * cfc delete event data
4897 */ 4897 */
4898struct cfc_del_event_data { 4898struct cfc_del_event_data {
4899 u32 cid; 4899 __le32 cid;
4900 u32 reserved0; 4900 __le32 reserved0;
4901 u32 reserved1; 4901 __le32 reserved1;
4902}; 4902};
4903 4903
4904 4904
@@ -5114,15 +5114,9 @@ struct vf_pf_channel_zone_trigger {
5114 * zone that triggers the in-bound interrupt 5114 * zone that triggers the in-bound interrupt
5115 */ 5115 */
5116struct trigger_vf_zone { 5116struct trigger_vf_zone {
5117#if defined(__BIG_ENDIAN)
5118 u16 reserved1;
5119 u8 reserved0;
5120 struct vf_pf_channel_zone_trigger vf_pf_channel;
5121#elif defined(__LITTLE_ENDIAN)
5122 struct vf_pf_channel_zone_trigger vf_pf_channel; 5117 struct vf_pf_channel_zone_trigger vf_pf_channel;
5123 u8 reserved0; 5118 u8 reserved0;
5124 u16 reserved1; 5119 u16 reserved1;
5125#endif
5126 u32 reserved2; 5120 u32 reserved2;
5127}; 5121};
5128 5122
@@ -5207,9 +5201,9 @@ struct e2_integ_data {
5207 * set mac event data 5201 * set mac event data
5208 */ 5202 */
5209struct eth_event_data { 5203struct eth_event_data {
5210 u32 echo; 5204 __le32 echo;
5211 u32 reserved0; 5205 __le32 reserved0;
5212 u32 reserved1; 5206 __le32 reserved1;
5213}; 5207};
5214 5208
5215 5209
@@ -5219,9 +5213,9 @@ struct eth_event_data {
5219struct vf_pf_event_data { 5213struct vf_pf_event_data {
5220 u8 vf_id; 5214 u8 vf_id;
5221 u8 reserved0; 5215 u8 reserved0;
5222 u16 reserved1; 5216 __le16 reserved1;
5223 u32 msg_addr_lo; 5217 __le32 msg_addr_lo;
5224 u32 msg_addr_hi; 5218 __le32 msg_addr_hi;
5225}; 5219};
5226 5220
5227/* 5221/*
@@ -5230,9 +5224,9 @@ struct vf_pf_event_data {
5230struct vf_flr_event_data { 5224struct vf_flr_event_data {
5231 u8 vf_id; 5225 u8 vf_id;
5232 u8 reserved0; 5226 u8 reserved0;
5233 u16 reserved1; 5227 __le16 reserved1;
5234 u32 reserved2; 5228 __le32 reserved2;
5235 u32 reserved3; 5229 __le32 reserved3;
5236}; 5230};
5237 5231
5238/* 5232/*
@@ -5241,9 +5235,9 @@ struct vf_flr_event_data {
5241struct malicious_vf_event_data { 5235struct malicious_vf_event_data {
5242 u8 vf_id; 5236 u8 vf_id;
5243 u8 err_id; 5237 u8 err_id;
5244 u16 reserved1; 5238 __le16 reserved1;
5245 u32 reserved2; 5239 __le32 reserved2;
5246 u32 reserved3; 5240 __le32 reserved3;
5247}; 5241};
5248 5242
5249/* 5243/*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d946bba43726..1fb80100e5e7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6185,26 +6185,80 @@ static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
6185 shift -= 4; 6185 shift -= 4;
6186 digit = ((num & mask) >> shift); 6186 digit = ((num & mask) >> shift);
6187 if (digit == 0 && remove_leading_zeros) { 6187 if (digit == 0 && remove_leading_zeros) {
6188 mask = mask >> 4; 6188 *str_ptr = '0';
6189 continue; 6189 } else {
6190 } else if (digit < 0xa) 6190 if (digit < 0xa)
6191 *str_ptr = digit + '0'; 6191 *str_ptr = digit + '0';
6192 else 6192 else
6193 *str_ptr = digit - 0xa + 'a'; 6193 *str_ptr = digit - 0xa + 'a';
6194 remove_leading_zeros = 0; 6194
6195 str_ptr++; 6195 remove_leading_zeros = 0;
6196 (*len)--; 6196 str_ptr++;
6197 (*len)--;
6198 }
6197 mask = mask >> 4; 6199 mask = mask >> 4;
6198 if (shift == 4*4) { 6200 if (shift == 4*4) {
6201 if (remove_leading_zeros) {
6202 str_ptr++;
6203 (*len)--;
6204 }
6199 *str_ptr = '.'; 6205 *str_ptr = '.';
6200 str_ptr++; 6206 str_ptr++;
6201 (*len)--; 6207 (*len)--;
6202 remove_leading_zeros = 1; 6208 remove_leading_zeros = 1;
6203 } 6209 }
6204 } 6210 }
6211 if (remove_leading_zeros)
6212 (*len)--;
6205 return 0; 6213 return 0;
6206} 6214}
6207 6215
6216static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
6217{
6218 u8 *str_ptr = str;
6219 u32 mask = 0x00f00000;
6220 u8 shift = 8*3;
6221 u8 digit;
6222 u8 remove_leading_zeros = 1;
6223
6224 if (*len < 10) {
6225 /* Need more than 10chars for this format */
6226 *str_ptr = '\0';
6227 (*len)--;
6228 return -EINVAL;
6229 }
6230
6231 while (shift > 0) {
6232 shift -= 4;
6233 digit = ((num & mask) >> shift);
6234 if (digit == 0 && remove_leading_zeros) {
6235 *str_ptr = '0';
6236 } else {
6237 if (digit < 0xa)
6238 *str_ptr = digit + '0';
6239 else
6240 *str_ptr = digit - 0xa + 'a';
6241
6242 remove_leading_zeros = 0;
6243 str_ptr++;
6244 (*len)--;
6245 }
6246 mask = mask >> 4;
6247 if ((shift == 4*4) || (shift == 4*2)) {
6248 if (remove_leading_zeros) {
6249 str_ptr++;
6250 (*len)--;
6251 }
6252 *str_ptr = '.';
6253 str_ptr++;
6254 (*len)--;
6255 remove_leading_zeros = 1;
6256 }
6257 }
6258 if (remove_leading_zeros)
6259 (*len)--;
6260 return 0;
6261}
6208 6262
6209static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len) 6263static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
6210{ 6264{
@@ -9677,8 +9731,9 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9677 9731
9678 if (bnx2x_is_8483x_8485x(phy)) { 9732 if (bnx2x_is_8483x_8485x(phy)) {
9679 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1); 9733 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
9680 bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff, 9734 if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9681 phy->ver_addr); 9735 fw_ver1 &= 0xfff;
9736 bnx2x_save_spirom_version(bp, port, fw_ver1, phy->ver_addr);
9682 } else { 9737 } else {
9683 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */ 9738 /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
9684 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */ 9739 /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
@@ -9732,16 +9787,32 @@ static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
9732static void bnx2x_848xx_set_led(struct bnx2x *bp, 9787static void bnx2x_848xx_set_led(struct bnx2x *bp,
9733 struct bnx2x_phy *phy) 9788 struct bnx2x_phy *phy)
9734{ 9789{
9735 u16 val, offset, i; 9790 u16 val, led3_blink_rate, offset, i;
9736 static struct bnx2x_reg_set reg_set[] = { 9791 static struct bnx2x_reg_set reg_set[] = {
9737 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080}, 9792 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
9738 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018}, 9793 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
9739 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006}, 9794 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
9740 {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
9741 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH, 9795 {MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
9742 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ}, 9796 MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
9743 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD} 9797 {MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
9744 }; 9798 };
9799
9800 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
9801 /* Set LED5 source */
9802 bnx2x_cl45_write(bp, phy,
9803 MDIO_PMA_DEVAD,
9804 MDIO_PMA_REG_8481_LED5_MASK,
9805 0x90);
9806 led3_blink_rate = 0x000f;
9807 } else {
9808 led3_blink_rate = 0x0000;
9809 }
9810 /* Set LED3 BLINK */
9811 bnx2x_cl45_write(bp, phy,
9812 MDIO_PMA_DEVAD,
9813 MDIO_PMA_REG_8481_LED3_BLINK,
9814 led3_blink_rate);
9815
9745 /* PHYC_CTL_LED_CTL */ 9816 /* PHYC_CTL_LED_CTL */
9746 bnx2x_cl45_read(bp, phy, 9817 bnx2x_cl45_read(bp, phy,
9747 MDIO_PMA_DEVAD, 9818 MDIO_PMA_DEVAD,
@@ -9749,6 +9820,9 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9749 val &= 0xFE00; 9820 val &= 0xFE00;
9750 val |= 0x0092; 9821 val |= 0x0092;
9751 9822
9823 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9824 val |= 2 << 12; /* LED5 ON based on source */
9825
9752 bnx2x_cl45_write(bp, phy, 9826 bnx2x_cl45_write(bp, phy,
9753 MDIO_PMA_DEVAD, 9827 MDIO_PMA_DEVAD,
9754 MDIO_PMA_REG_8481_LINK_SIGNAL, val); 9828 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
@@ -9762,10 +9836,17 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
9762 else 9836 else
9763 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1; 9837 offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
9764 9838
9765 /* stretch_en for LED3*/ 9839 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
9840 val = MDIO_PMA_REG_84858_ALLOW_GPHY_ACT |
9841 MDIO_PMA_REG_84823_LED3_STRETCH_EN;
9842 else
9843 val = MDIO_PMA_REG_84823_LED3_STRETCH_EN;
9844
9845 /* stretch_en for LEDs */
9766 bnx2x_cl45_read_or_write(bp, phy, 9846 bnx2x_cl45_read_or_write(bp, phy,
9767 MDIO_PMA_DEVAD, offset, 9847 MDIO_PMA_DEVAD,
9768 MDIO_PMA_REG_84823_LED3_STRETCH_EN); 9848 offset,
9849 val);
9769} 9850}
9770 9851
9771static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy, 9852static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
@@ -9775,7 +9856,7 @@ static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
9775 struct bnx2x *bp = params->bp; 9856 struct bnx2x *bp = params->bp;
9776 switch (action) { 9857 switch (action) {
9777 case PHY_INIT: 9858 case PHY_INIT:
9778 if (!bnx2x_is_8483x_8485x(phy)) { 9859 if (bnx2x_is_8483x_8485x(phy)) {
9779 /* Save spirom version */ 9860 /* Save spirom version */
9780 bnx2x_save_848xx_spirom_version(phy, bp, params->port); 9861 bnx2x_save_848xx_spirom_version(phy, bp, params->port);
9781 } 9862 }
@@ -10036,15 +10117,20 @@ static int bnx2x_84858_cmd_hdlr(struct bnx2x_phy *phy,
10036 10117
10037static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, 10118static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10038 struct link_params *params, u16 fw_cmd, 10119 struct link_params *params, u16 fw_cmd,
10039 u16 cmd_args[], int argc) 10120 u16 cmd_args[], int argc, int process)
10040{ 10121{
10041 int idx; 10122 int idx;
10042 u16 val; 10123 u16 val;
10043 struct bnx2x *bp = params->bp; 10124 struct bnx2x *bp = params->bp;
10044 /* Write CMD_OPEN_OVERRIDE to STATUS reg */ 10125 int rc = 0;
10045 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10126
10046 MDIO_848xx_CMD_HDLR_STATUS, 10127 if (process == PHY84833_MB_PROCESS2) {
10047 PHY84833_STATUS_CMD_OPEN_OVERRIDE); 10128 /* Write CMD_OPEN_OVERRIDE to STATUS reg */
10129 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10130 MDIO_848xx_CMD_HDLR_STATUS,
10131 PHY84833_STATUS_CMD_OPEN_OVERRIDE);
10132 }
10133
10048 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { 10134 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
10049 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10135 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10050 MDIO_848xx_CMD_HDLR_STATUS, &val); 10136 MDIO_848xx_CMD_HDLR_STATUS, &val);
@@ -10054,15 +10140,27 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10054 } 10140 }
10055 if (idx >= PHY848xx_CMDHDLR_WAIT) { 10141 if (idx >= PHY848xx_CMDHDLR_WAIT) {
10056 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); 10142 DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
10143 /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR
10144 * clear the status to CMD_CLEAR_COMPLETE
10145 */
10146 if (val == PHY84833_STATUS_CMD_COMPLETE_PASS ||
10147 val == PHY84833_STATUS_CMD_COMPLETE_ERROR) {
10148 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10149 MDIO_848xx_CMD_HDLR_STATUS,
10150 PHY84833_STATUS_CMD_CLEAR_COMPLETE);
10151 }
10057 return -EINVAL; 10152 return -EINVAL;
10058 } 10153 }
10059 10154 if (process == PHY84833_MB_PROCESS1 ||
10060 /* Prepare argument(s) and issue command */ 10155 process == PHY84833_MB_PROCESS2) {
10061 for (idx = 0; idx < argc; idx++) { 10156 /* Prepare argument(s) */
10062 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10157 for (idx = 0; idx < argc; idx++) {
10063 MDIO_848xx_CMD_HDLR_DATA1 + idx, 10158 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10064 cmd_args[idx]); 10159 MDIO_848xx_CMD_HDLR_DATA1 + idx,
10160 cmd_args[idx]);
10161 }
10065 } 10162 }
10163
10066 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10164 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10067 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd); 10165 MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
10068 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) { 10166 for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
@@ -10076,24 +10174,30 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
10076 if ((idx >= PHY848xx_CMDHDLR_WAIT) || 10174 if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
10077 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { 10175 (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
10078 DP(NETIF_MSG_LINK, "FW cmd failed.\n"); 10176 DP(NETIF_MSG_LINK, "FW cmd failed.\n");
10079 return -EINVAL; 10177 rc = -EINVAL;
10080 } 10178 }
10081 /* Gather returning data */ 10179 if (process == PHY84833_MB_PROCESS3 && rc == 0) {
10082 for (idx = 0; idx < argc; idx++) { 10180 /* Gather returning data */
10083 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 10181 for (idx = 0; idx < argc; idx++) {
10084 MDIO_848xx_CMD_HDLR_DATA1 + idx, 10182 bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
10085 &cmd_args[idx]); 10183 MDIO_848xx_CMD_HDLR_DATA1 + idx,
10184 &cmd_args[idx]);
10185 }
10086 } 10186 }
10087 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD, 10187 if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR ||
10088 MDIO_848xx_CMD_HDLR_STATUS, 10188 val == PHY84833_STATUS_CMD_COMPLETE_PASS) {
10089 PHY84833_STATUS_CMD_CLEAR_COMPLETE); 10189 bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
10090 return 0; 10190 MDIO_848xx_CMD_HDLR_STATUS,
10191 PHY84833_STATUS_CMD_CLEAR_COMPLETE);
10192 }
10193 return rc;
10091} 10194}
10092 10195
10093static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy, 10196static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
10094 struct link_params *params, 10197 struct link_params *params,
10095 u16 fw_cmd, 10198 u16 fw_cmd,
10096 u16 cmd_args[], int argc) 10199 u16 cmd_args[], int argc,
10200 int process)
10097{ 10201{
10098 struct bnx2x *bp = params->bp; 10202 struct bnx2x *bp = params->bp;
10099 10203
@@ -10106,7 +10210,7 @@ static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
10106 argc); 10210 argc);
10107 } else { 10211 } else {
10108 return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args, 10212 return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
10109 argc); 10213 argc, process);
10110 } 10214 }
10111} 10215}
10112 10216
@@ -10133,7 +10237,7 @@ static int bnx2x_848xx_pair_swap_cfg(struct bnx2x_phy *phy,
10133 10237
10134 status = bnx2x_848xx_cmd_hdlr(phy, params, 10238 status = bnx2x_848xx_cmd_hdlr(phy, params,
10135 PHY848xx_CMD_SET_PAIR_SWAP, data, 10239 PHY848xx_CMD_SET_PAIR_SWAP, data,
10136 PHY848xx_CMDHDLR_MAX_ARGS); 10240 2, PHY84833_MB_PROCESS2);
10137 if (status == 0) 10241 if (status == 0)
10138 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]); 10242 DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
10139 10243
@@ -10222,8 +10326,8 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy,
10222 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n"); 10326 DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
10223 10327
10224 /* Prevent Phy from working in EEE and advertising it */ 10328 /* Prevent Phy from working in EEE and advertising it */
10225 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10329 rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
10226 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); 10330 &cmd_args, 1, PHY84833_MB_PROCESS1);
10227 if (rc) { 10331 if (rc) {
10228 DP(NETIF_MSG_LINK, "EEE disable failed.\n"); 10332 DP(NETIF_MSG_LINK, "EEE disable failed.\n");
10229 return rc; 10333 return rc;
@@ -10240,8 +10344,8 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy,
10240 struct bnx2x *bp = params->bp; 10344 struct bnx2x *bp = params->bp;
10241 u16 cmd_args = 1; 10345 u16 cmd_args = 1;
10242 10346
10243 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10347 rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
10244 PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1); 10348 &cmd_args, 1, PHY84833_MB_PROCESS1);
10245 if (rc) { 10349 if (rc) {
10246 DP(NETIF_MSG_LINK, "EEE enable failed.\n"); 10350 DP(NETIF_MSG_LINK, "EEE enable failed.\n");
10247 return rc; 10351 return rc;
@@ -10362,7 +10466,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10362 cmd_args[3] = PHY84833_CONSTANT_LATENCY; 10466 cmd_args[3] = PHY84833_CONSTANT_LATENCY;
10363 rc = bnx2x_848xx_cmd_hdlr(phy, params, 10467 rc = bnx2x_848xx_cmd_hdlr(phy, params,
10364 PHY848xx_CMD_SET_EEE_MODE, cmd_args, 10468 PHY848xx_CMD_SET_EEE_MODE, cmd_args,
10365 PHY848xx_CMDHDLR_MAX_ARGS); 10469 4, PHY84833_MB_PROCESS1);
10366 if (rc) 10470 if (rc)
10367 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); 10471 DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
10368 } 10472 }
@@ -10416,6 +10520,32 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
10416 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK; 10520 vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
10417 } 10521 }
10418 10522
10523 if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
10524 /* Additional settings for jumbo packets in 1000BASE-T mode */
10525 /* Allow rx extended length */
10526 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10527 MDIO_AN_REG_8481_AUX_CTRL, &val);
10528 val |= 0x4000;
10529 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10530 MDIO_AN_REG_8481_AUX_CTRL, val);
10531 /* TX FIFO Elasticity LSB */
10532 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10533 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, &val);
10534 val |= 0x1;
10535 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10536 MDIO_AN_REG_8481_1G_100T_EXT_CTRL, val);
10537 /* TX FIFO Elasticity MSB */
10538 /* Enable expansion register 0x46 (Pattern Generator status) */
10539 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10540 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf46);
10541
10542 bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
10543 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, &val);
10544 val |= 0x4000;
10545 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
10546 MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, val);
10547 }
10548
10419 if (bnx2x_is_8483x_8485x(phy)) { 10549 if (bnx2x_is_8483x_8485x(phy)) {
10420 /* Bring PHY out of super isolate mode as the final step. */ 10550 /* Bring PHY out of super isolate mode as the final step. */
10421 bnx2x_cl45_read_and_write(bp, phy, 10551 bnx2x_cl45_read_and_write(bp, phy,
@@ -10555,6 +10685,17 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
10555 return link_up; 10685 return link_up;
10556} 10686}
10557 10687
10688static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len)
10689{
10690 int status = 0;
10691 u32 num;
10692
10693 num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) |
10694 ((raw_ver & 0xF000) >> 12);
10695 status = bnx2x_3_seq_format_ver(num, str, len);
10696 return status;
10697}
10698
10558static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len) 10699static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
10559{ 10700{
10560 int status = 0; 10701 int status = 0;
@@ -10651,10 +10792,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10651 0x0); 10792 0x0);
10652 10793
10653 } else { 10794 } else {
10795 /* LED 1 OFF */
10654 bnx2x_cl45_write(bp, phy, 10796 bnx2x_cl45_write(bp, phy,
10655 MDIO_PMA_DEVAD, 10797 MDIO_PMA_DEVAD,
10656 MDIO_PMA_REG_8481_LED1_MASK, 10798 MDIO_PMA_REG_8481_LED1_MASK,
10657 0x0); 10799 0x0);
10800
10801 if (phy->type ==
10802 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10803 /* LED 2 OFF */
10804 bnx2x_cl45_write(bp, phy,
10805 MDIO_PMA_DEVAD,
10806 MDIO_PMA_REG_8481_LED2_MASK,
10807 0x0);
10808 /* LED 3 OFF */
10809 bnx2x_cl45_write(bp, phy,
10810 MDIO_PMA_DEVAD,
10811 MDIO_PMA_REG_8481_LED3_MASK,
10812 0x0);
10813 }
10658 } 10814 }
10659 break; 10815 break;
10660 case LED_MODE_FRONT_PANEL_OFF: 10816 case LED_MODE_FRONT_PANEL_OFF:
@@ -10713,6 +10869,19 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10713 MDIO_PMA_REG_8481_SIGNAL_MASK, 10869 MDIO_PMA_REG_8481_SIGNAL_MASK,
10714 0x0); 10870 0x0);
10715 } 10871 }
10872 if (phy->type ==
10873 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10874 /* LED 2 OFF */
10875 bnx2x_cl45_write(bp, phy,
10876 MDIO_PMA_DEVAD,
10877 MDIO_PMA_REG_8481_LED2_MASK,
10878 0x0);
10879 /* LED 3 OFF */
10880 bnx2x_cl45_write(bp, phy,
10881 MDIO_PMA_DEVAD,
10882 MDIO_PMA_REG_8481_LED3_MASK,
10883 0x0);
10884 }
10716 } 10885 }
10717 break; 10886 break;
10718 case LED_MODE_ON: 10887 case LED_MODE_ON:
@@ -10776,6 +10945,25 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10776 params->port*4, 10945 params->port*4,
10777 NIG_MASK_MI_INT); 10946 NIG_MASK_MI_INT);
10778 } 10947 }
10948 }
10949 if (phy->type ==
10950 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
10951 /* Tell LED3 to constant on */
10952 bnx2x_cl45_read(bp, phy,
10953 MDIO_PMA_DEVAD,
10954 MDIO_PMA_REG_8481_LINK_SIGNAL,
10955 &val);
10956 val &= ~(7<<6);
10957 val |= (2<<6); /* A83B[8:6]= 2 */
10958 bnx2x_cl45_write(bp, phy,
10959 MDIO_PMA_DEVAD,
10960 MDIO_PMA_REG_8481_LINK_SIGNAL,
10961 val);
10962 bnx2x_cl45_write(bp, phy,
10963 MDIO_PMA_DEVAD,
10964 MDIO_PMA_REG_8481_LED3_MASK,
10965 0x20);
10966 } else {
10779 bnx2x_cl45_write(bp, phy, 10967 bnx2x_cl45_write(bp, phy,
10780 MDIO_PMA_DEVAD, 10968 MDIO_PMA_DEVAD,
10781 MDIO_PMA_REG_8481_SIGNAL_MASK, 10969 MDIO_PMA_REG_8481_SIGNAL_MASK,
@@ -10854,6 +11042,17 @@ static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
10854 MDIO_PMA_REG_8481_LINK_SIGNAL, 11042 MDIO_PMA_REG_8481_LINK_SIGNAL,
10855 val); 11043 val);
10856 if (phy->type == 11044 if (phy->type ==
11045 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
11046 bnx2x_cl45_write(bp, phy,
11047 MDIO_PMA_DEVAD,
11048 MDIO_PMA_REG_8481_LED2_MASK,
11049 0x18);
11050 bnx2x_cl45_write(bp, phy,
11051 MDIO_PMA_DEVAD,
11052 MDIO_PMA_REG_8481_LED3_MASK,
11053 0x06);
11054 }
11055 if (phy->type ==
10857 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) { 11056 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
10858 /* Restore LED4 source to external link, 11057 /* Restore LED4 source to external link,
10859 * and re-enable interrupts. 11058 * and re-enable interrupts.
@@ -11982,7 +12181,7 @@ static const struct bnx2x_phy phy_84858 = {
11982 .read_status = (read_status_t)bnx2x_848xx_read_status, 12181 .read_status = (read_status_t)bnx2x_848xx_read_status,
11983 .link_reset = (link_reset_t)bnx2x_848x3_link_reset, 12182 .link_reset = (link_reset_t)bnx2x_848x3_link_reset,
11984 .config_loopback = (config_loopback_t)NULL, 12183 .config_loopback = (config_loopback_t)NULL,
11985 .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver, 12184 .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver,
11986 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy, 12185 .hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
11987 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led, 12186 .set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
11988 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func 12187 .phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
@@ -13807,8 +14006,10 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
13807 if (CHIP_IS_E3(bp)) { 14006 if (CHIP_IS_E3(bp)) {
13808 struct bnx2x_phy *phy = &params->phy[INT_PHY]; 14007 struct bnx2x_phy *phy = &params->phy[INT_PHY];
13809 bnx2x_set_aer_mmd(params, phy); 14008 bnx2x_set_aer_mmd(params, phy);
13810 if ((phy->supported & SUPPORTED_20000baseKR2_Full) && 14009 if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
13811 (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) 14010 (phy->speed_cap_mask &
14011 PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
14012 (phy->req_line_speed == SPEED_20000))
13812 bnx2x_check_kr2_wa(params, vars, phy); 14013 bnx2x_check_kr2_wa(params, vars, phy);
13813 bnx2x_check_over_curr(params, vars); 14014 bnx2x_check_over_curr(params, vars);
13814 if (vars->rx_tx_asic_rst) 14015 if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 6c4e3a69976f..2bf9c871144f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -5280,14 +5280,14 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5280{ 5280{
5281 unsigned long ramrod_flags = 0; 5281 unsigned long ramrod_flags = 0;
5282 int rc = 0; 5282 int rc = 0;
5283 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK; 5283 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
5284 u32 cid = echo & BNX2X_SWCID_MASK;
5284 struct bnx2x_vlan_mac_obj *vlan_mac_obj; 5285 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
5285 5286
5286 /* Always push next commands out, don't wait here */ 5287 /* Always push next commands out, don't wait here */
5287 __set_bit(RAMROD_CONT, &ramrod_flags); 5288 __set_bit(RAMROD_CONT, &ramrod_flags);
5288 5289
5289 switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo) 5290 switch (echo >> BNX2X_SWCID_SHIFT) {
5290 >> BNX2X_SWCID_SHIFT) {
5291 case BNX2X_FILTER_MAC_PENDING: 5291 case BNX2X_FILTER_MAC_PENDING:
5292 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); 5292 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
5293 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp))) 5293 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
@@ -5308,8 +5308,7 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
5308 bnx2x_handle_mcast_eqe(bp); 5308 bnx2x_handle_mcast_eqe(bp);
5309 return; 5309 return;
5310 default: 5310 default:
5311 BNX2X_ERR("Unsupported classification command: %d\n", 5311 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
5312 elem->message.data.eth_event.echo);
5313 return; 5312 return;
5314 } 5313 }
5315 5314
@@ -5478,9 +5477,6 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5478 goto next_spqe; 5477 goto next_spqe;
5479 } 5478 }
5480 5479
5481 /* elem CID originates from FW; actually LE */
5482 cid = SW_CID((__force __le32)
5483 elem->message.data.cfc_del_event.cid);
5484 opcode = elem->message.opcode; 5480 opcode = elem->message.opcode;
5485 5481
5486 /* handle eq element */ 5482 /* handle eq element */
@@ -5503,6 +5499,10 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5503 * we may want to verify here that the bp state is 5499 * we may want to verify here that the bp state is
5504 * HALTING 5500 * HALTING
5505 */ 5501 */
5502
5503 /* elem CID originates from FW; actually LE */
5504 cid = SW_CID(elem->message.data.cfc_del_event.cid);
5505
5506 DP(BNX2X_MSG_SP, 5506 DP(BNX2X_MSG_SP,
5507 "got delete ramrod for MULTI[%d]\n", cid); 5507 "got delete ramrod for MULTI[%d]\n", cid);
5508 5508
@@ -5596,10 +5596,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
5596 BNX2X_STATE_OPENING_WAIT4_PORT): 5596 BNX2X_STATE_OPENING_WAIT4_PORT):
5597 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES | 5597 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5598 BNX2X_STATE_CLOSING_WAIT4_HALT): 5598 BNX2X_STATE_CLOSING_WAIT4_HALT):
5599 cid = elem->message.data.eth_event.echo &
5600 BNX2X_SWCID_MASK;
5601 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n", 5599 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
5602 cid); 5600 SW_CID(elem->message.data.eth_event.echo));
5603 rss_raw->clear_pending(rss_raw); 5601 rss_raw->clear_pending(rss_raw);
5604 break; 5602 break;
5605 5603
@@ -5684,7 +5682,7 @@ static void bnx2x_sp_task(struct work_struct *work)
5684 if (status & BNX2X_DEF_SB_IDX) { 5682 if (status & BNX2X_DEF_SB_IDX) {
5685 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp); 5683 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
5686 5684
5687 if (FCOE_INIT(bp) && 5685 if (FCOE_INIT(bp) &&
5688 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 5686 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5689 /* Prevent local bottom-halves from running as 5687 /* Prevent local bottom-halves from running as
5690 * we are going to change the local NAPI list. 5688 * we are going to change the local NAPI list.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4dead49bd5cb..a43dea259b12 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7296,6 +7296,8 @@ Theotherbitsarereservedandshouldbezero*/
7296#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3 7296#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
7297#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec 7297#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
7298#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080 7298#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
7299/* BCM84858 only */
7300#define MDIO_PMA_REG_84858_ALLOW_GPHY_ACT 0x8000
7299 7301
7300/* BCM84833 only */ 7302/* BCM84833 only */
7301#define MDIO_84833_TOP_CFG_FW_REV 0x400f 7303#define MDIO_84833_TOP_CFG_FW_REV 0x400f
@@ -7337,6 +7339,10 @@ Theotherbitsarereservedandshouldbezero*/
7337#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040 7339#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
7338#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080 7340#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
7339#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5 7341#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
7342/* Mailbox Process */
7343#define PHY84833_MB_PROCESS1 1
7344#define PHY84833_MB_PROCESS2 2
7345#define PHY84833_MB_PROCESS3 3
7340 7346
7341/* Mailbox status set used by 84858 only */ 7347/* Mailbox status set used by 84858 only */
7342#define PHY84858_STATUS_CMD_RECEIVED 0x0001 7348#define PHY84858_STATUS_CMD_RECEIVED 0x0001
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 9d027348cd09..632daff117d3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1672,11 +1672,12 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1672{ 1672{
1673 unsigned long ramrod_flags = 0; 1673 unsigned long ramrod_flags = 0;
1674 int rc = 0; 1674 int rc = 0;
1675 u32 echo = le32_to_cpu(elem->message.data.eth_event.echo);
1675 1676
1676 /* Always push next commands out, don't wait here */ 1677 /* Always push next commands out, don't wait here */
1677 set_bit(RAMROD_CONT, &ramrod_flags); 1678 set_bit(RAMROD_CONT, &ramrod_flags);
1678 1679
1679 switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) { 1680 switch (echo >> BNX2X_SWCID_SHIFT) {
1680 case BNX2X_FILTER_MAC_PENDING: 1681 case BNX2X_FILTER_MAC_PENDING:
1681 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem, 1682 rc = vfq->mac_obj.complete(bp, &vfq->mac_obj, elem,
1682 &ramrod_flags); 1683 &ramrod_flags);
@@ -1686,8 +1687,7 @@ void bnx2x_vf_handle_classification_eqe(struct bnx2x *bp,
1686 &ramrod_flags); 1687 &ramrod_flags);
1687 break; 1688 break;
1688 default: 1689 default:
1689 BNX2X_ERR("Unsupported classification command: %d\n", 1690 BNX2X_ERR("Unsupported classification command: 0x%x\n", echo);
1690 elem->message.data.eth_event.echo);
1691 return; 1691 return;
1692 } 1692 }
1693 if (rc < 0) 1693 if (rc < 0)
@@ -1747,16 +1747,14 @@ int bnx2x_iov_eq_sp_event(struct bnx2x *bp, union event_ring_elem *elem)
1747 1747
1748 switch (opcode) { 1748 switch (opcode) {
1749 case EVENT_RING_OPCODE_CFC_DEL: 1749 case EVENT_RING_OPCODE_CFC_DEL:
1750 cid = SW_CID((__force __le32) 1750 cid = SW_CID(elem->message.data.cfc_del_event.cid);
1751 elem->message.data.cfc_del_event.cid);
1752 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid); 1751 DP(BNX2X_MSG_IOV, "checking cfc-del comp cid=%d\n", cid);
1753 break; 1752 break;
1754 case EVENT_RING_OPCODE_CLASSIFICATION_RULES: 1753 case EVENT_RING_OPCODE_CLASSIFICATION_RULES:
1755 case EVENT_RING_OPCODE_MULTICAST_RULES: 1754 case EVENT_RING_OPCODE_MULTICAST_RULES:
1756 case EVENT_RING_OPCODE_FILTERS_RULES: 1755 case EVENT_RING_OPCODE_FILTERS_RULES:
1757 case EVENT_RING_OPCODE_RSS_UPDATE_RULES: 1756 case EVENT_RING_OPCODE_RSS_UPDATE_RULES:
1758 cid = (elem->message.data.eth_event.echo & 1757 cid = SW_CID(elem->message.data.eth_event.echo);
1759 BNX2X_SWCID_MASK);
1760 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid); 1758 DP(BNX2X_MSG_IOV, "checking filtering comp cid=%d\n", cid);
1761 break; 1759 break;
1762 case EVENT_RING_OPCODE_VF_FLR: 1760 case EVENT_RING_OPCODE_VF_FLR:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 1374e5394a79..bfae300cf25f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -2187,8 +2187,10 @@ void bnx2x_vf_mbx_schedule(struct bnx2x *bp,
2187 2187
2188 /* Update VFDB with current message and schedule its handling */ 2188 /* Update VFDB with current message and schedule its handling */
2189 mutex_lock(&BP_VFDB(bp)->event_mutex); 2189 mutex_lock(&BP_VFDB(bp)->event_mutex);
2190 BP_VF_MBX(bp, vf_idx)->vf_addr_hi = vfpf_event->msg_addr_hi; 2190 BP_VF_MBX(bp, vf_idx)->vf_addr_hi =
2191 BP_VF_MBX(bp, vf_idx)->vf_addr_lo = vfpf_event->msg_addr_lo; 2191 le32_to_cpu(vfpf_event->msg_addr_hi);
2192 BP_VF_MBX(bp, vf_idx)->vf_addr_lo =
2193 le32_to_cpu(vfpf_event->msg_addr_lo);
2192 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx); 2194 BP_VFDB(bp)->event_occur |= (1ULL << vf_idx);
2193 mutex_unlock(&BP_VFDB(bp)->event_mutex); 2195 mutex_unlock(&BP_VFDB(bp)->event_mutex);
2194 2196
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5dc89e527e7d..82f191382989 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,7 +69,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD 69#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
70#define BNXT_RX_COPY_THRESH 256 70#define BNXT_RX_COPY_THRESH 256
71 71
72#define BNXT_TX_PUSH_THRESH 92 72#define BNXT_TX_PUSH_THRESH 164
73 73
74enum board_idx { 74enum board_idx {
75 BCM57301, 75 BCM57301,
@@ -223,11 +223,12 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
223 } 223 }
224 224
225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) { 225 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
226 struct tx_push_bd *push = txr->tx_push; 226 struct tx_push_buffer *tx_push_buf = txr->tx_push;
227 struct tx_bd *tx_push = &push->txbd1; 227 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
228 struct tx_bd_ext *tx_push1 = &push->txbd2; 228 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
229 void *pdata = tx_push1 + 1; 229 void *pdata = tx_push_buf->data;
230 int j; 230 u64 *end;
231 int j, push_len;
231 232
232 /* Set COAL_NOW to be ready quickly for the next push */ 233 /* Set COAL_NOW to be ready quickly for the next push */
233 tx_push->tx_bd_len_flags_type = 234 tx_push->tx_bd_len_flags_type =
@@ -247,6 +248,10 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
247 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags); 248 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
248 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action); 249 tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
249 250
251 end = pdata + length;
252 end = PTR_ALIGN(end, 8) - 1;
253 *end = 0;
254
250 skb_copy_from_linear_data(skb, pdata, len); 255 skb_copy_from_linear_data(skb, pdata, len);
251 pdata += len; 256 pdata += len;
252 for (j = 0; j < last_frag; j++) { 257 for (j = 0; j < last_frag; j++) {
@@ -261,22 +266,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
261 pdata += skb_frag_size(frag); 266 pdata += skb_frag_size(frag);
262 } 267 }
263 268
264 memcpy(txbd, tx_push, sizeof(*txbd)); 269 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
270 txbd->tx_bd_haddr = txr->data_mapping;
265 prod = NEXT_TX(prod); 271 prod = NEXT_TX(prod);
266 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)]; 272 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
267 memcpy(txbd, tx_push1, sizeof(*txbd)); 273 memcpy(txbd, tx_push1, sizeof(*txbd));
268 prod = NEXT_TX(prod); 274 prod = NEXT_TX(prod);
269 push->doorbell = 275 tx_push->doorbell =
270 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod); 276 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
271 txr->tx_prod = prod; 277 txr->tx_prod = prod;
272 278
273 netdev_tx_sent_queue(txq, skb->len); 279 netdev_tx_sent_queue(txq, skb->len);
274 280
275 __iowrite64_copy(txr->tx_doorbell, push, 281 push_len = (length + sizeof(*tx_push) + 7) / 8;
276 (length + sizeof(*push) + 8) / 8); 282 if (push_len > 16) {
283 __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
284 __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
285 push_len - 16);
286 } else {
287 __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
288 push_len);
289 }
277 290
278 tx_buf->is_push = 1; 291 tx_buf->is_push = 1;
279
280 goto tx_done; 292 goto tx_done;
281 } 293 }
282 294
@@ -1753,7 +1765,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1753 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) + 1765 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
1754 bp->tx_push_thresh); 1766 bp->tx_push_thresh);
1755 1767
1756 if (push_size > 128) { 1768 if (push_size > 256) {
1757 push_size = 0; 1769 push_size = 0;
1758 bp->tx_push_thresh = 0; 1770 bp->tx_push_thresh = 0;
1759 } 1771 }
@@ -1772,7 +1784,6 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1772 return rc; 1784 return rc;
1773 1785
1774 if (bp->tx_push_size) { 1786 if (bp->tx_push_size) {
1775 struct tx_bd *txbd;
1776 dma_addr_t mapping; 1787 dma_addr_t mapping;
1777 1788
1778 /* One pre-allocated DMA buffer to backup 1789 /* One pre-allocated DMA buffer to backup
@@ -1786,13 +1797,11 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
1786 if (!txr->tx_push) 1797 if (!txr->tx_push)
1787 return -ENOMEM; 1798 return -ENOMEM;
1788 1799
1789 txbd = &txr->tx_push->txbd1;
1790
1791 mapping = txr->tx_push_mapping + 1800 mapping = txr->tx_push_mapping +
1792 sizeof(struct tx_push_bd); 1801 sizeof(struct tx_push_bd);
1793 txbd->tx_bd_haddr = cpu_to_le64(mapping); 1802 txr->data_mapping = cpu_to_le64(mapping);
1794 1803
1795 memset(txbd + 1, 0, sizeof(struct tx_bd_ext)); 1804 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
1796 } 1805 }
1797 ring->queue_id = bp->q_info[j].queue_id; 1806 ring->queue_id = bp->q_info[j].queue_id;
1798 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1)) 1807 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -4546,20 +4555,18 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
4546 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) && 4555 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
4547 link_info->force_pause_setting != link_info->req_flow_ctrl) 4556 link_info->force_pause_setting != link_info->req_flow_ctrl)
4548 update_pause = true; 4557 update_pause = true;
4549 if (link_info->req_duplex != link_info->duplex_setting)
4550 update_link = true;
4551 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { 4558 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
4552 if (BNXT_AUTO_MODE(link_info->auto_mode)) 4559 if (BNXT_AUTO_MODE(link_info->auto_mode))
4553 update_link = true; 4560 update_link = true;
4554 if (link_info->req_link_speed != link_info->force_link_speed) 4561 if (link_info->req_link_speed != link_info->force_link_speed)
4555 update_link = true; 4562 update_link = true;
4563 if (link_info->req_duplex != link_info->duplex_setting)
4564 update_link = true;
4556 } else { 4565 } else {
4557 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE) 4566 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
4558 update_link = true; 4567 update_link = true;
4559 if (link_info->advertising != link_info->auto_link_speeds) 4568 if (link_info->advertising != link_info->auto_link_speeds)
4560 update_link = true; 4569 update_link = true;
4561 if (link_info->req_link_speed != link_info->auto_link_speed)
4562 update_link = true;
4563 } 4570 }
4564 4571
4565 if (update_link) 4572 if (update_link)
@@ -4636,7 +4643,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4636 if (link_re_init) { 4643 if (link_re_init) {
4637 rc = bnxt_update_phy_setting(bp); 4644 rc = bnxt_update_phy_setting(bp);
4638 if (rc) 4645 if (rc)
4639 goto open_err; 4646 netdev_warn(bp->dev, "failed to update phy settings\n");
4640 } 4647 }
4641 4648
4642 if (irq_re_init) { 4649 if (irq_re_init) {
@@ -4654,6 +4661,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
4654 /* Enable TX queues */ 4661 /* Enable TX queues */
4655 bnxt_tx_enable(bp); 4662 bnxt_tx_enable(bp);
4656 mod_timer(&bp->timer, jiffies + bp->current_interval); 4663 mod_timer(&bp->timer, jiffies + bp->current_interval);
4664 bnxt_update_link(bp, true);
4657 4665
4658 return 0; 4666 return 0;
4659 4667
@@ -5670,22 +5678,16 @@ static int bnxt_probe_phy(struct bnxt *bp)
5670 } 5678 }
5671 5679
5672 /*initialize the ethool setting copy with NVM settings */ 5680 /*initialize the ethool setting copy with NVM settings */
5673 if (BNXT_AUTO_MODE(link_info->auto_mode)) 5681 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
5674 link_info->autoneg |= BNXT_AUTONEG_SPEED; 5682 link_info->autoneg = BNXT_AUTONEG_SPEED |
5675 5683 BNXT_AUTONEG_FLOW_CTRL;
5676 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { 5684 link_info->advertising = link_info->auto_link_speeds;
5677 if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
5678 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
5679 link_info->req_flow_ctrl = link_info->auto_pause_setting; 5685 link_info->req_flow_ctrl = link_info->auto_pause_setting;
5680 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) { 5686 } else {
5687 link_info->req_link_speed = link_info->force_link_speed;
5688 link_info->req_duplex = link_info->duplex_setting;
5681 link_info->req_flow_ctrl = link_info->force_pause_setting; 5689 link_info->req_flow_ctrl = link_info->force_pause_setting;
5682 } 5690 }
5683 link_info->req_duplex = link_info->duplex_setting;
5684 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
5685 link_info->req_link_speed = link_info->auto_link_speed;
5686 else
5687 link_info->req_link_speed = link_info->force_link_speed;
5688 link_info->advertising = link_info->auto_link_speeds;
5689 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d", 5691 snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
5690 link_info->phy_ver[0], 5692 link_info->phy_ver[0],
5691 link_info->phy_ver[1], 5693 link_info->phy_ver[1],
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 8af3ca8efcef..2be51b332652 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -411,8 +411,8 @@ struct rx_tpa_end_cmp_ext {
411 411
412#define BNXT_NUM_TESTS(bp) 0 412#define BNXT_NUM_TESTS(bp) 0
413 413
414#define BNXT_DEFAULT_RX_RING_SIZE 1023 414#define BNXT_DEFAULT_RX_RING_SIZE 511
415#define BNXT_DEFAULT_TX_RING_SIZE 512 415#define BNXT_DEFAULT_TX_RING_SIZE 511
416 416
417#define MAX_TPA 64 417#define MAX_TPA 64
418 418
@@ -523,10 +523,16 @@ struct bnxt_ring_struct {
523 523
524struct tx_push_bd { 524struct tx_push_bd {
525 __le32 doorbell; 525 __le32 doorbell;
526 struct tx_bd txbd1; 526 __le32 tx_bd_len_flags_type;
527 u32 tx_bd_opaque;
527 struct tx_bd_ext txbd2; 528 struct tx_bd_ext txbd2;
528}; 529};
529 530
531struct tx_push_buffer {
532 struct tx_push_bd push_bd;
533 u32 data[25];
534};
535
530struct bnxt_tx_ring_info { 536struct bnxt_tx_ring_info {
531 struct bnxt_napi *bnapi; 537 struct bnxt_napi *bnapi;
532 u16 tx_prod; 538 u16 tx_prod;
@@ -538,8 +544,9 @@ struct bnxt_tx_ring_info {
538 544
539 dma_addr_t tx_desc_mapping[MAX_TX_PAGES]; 545 dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
540 546
541 struct tx_push_bd *tx_push; 547 struct tx_push_buffer *tx_push;
542 dma_addr_t tx_push_mapping; 548 dma_addr_t tx_push_mapping;
549 __le64 data_mapping;
543 550
544#define BNXT_DEV_STATE_CLOSING 0x1 551#define BNXT_DEV_STATE_CLOSING 0x1
545 u32 dev_state; 552 u32 dev_state;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 922b898e7a32..3238817dfd5f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -486,15 +486,8 @@ static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
486 speed_mask |= SUPPORTED_2500baseX_Full; 486 speed_mask |= SUPPORTED_2500baseX_Full;
487 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 487 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
488 speed_mask |= SUPPORTED_10000baseT_Full; 488 speed_mask |= SUPPORTED_10000baseT_Full;
489 /* TODO: support 25GB, 50GB with different cable type */
490 if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
491 speed_mask |= SUPPORTED_20000baseMLD2_Full |
492 SUPPORTED_20000baseKR2_Full;
493 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 489 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
494 speed_mask |= SUPPORTED_40000baseKR4_Full | 490 speed_mask |= SUPPORTED_40000baseCR4_Full;
495 SUPPORTED_40000baseCR4_Full |
496 SUPPORTED_40000baseSR4_Full |
497 SUPPORTED_40000baseLR4_Full;
498 491
499 return speed_mask; 492 return speed_mask;
500} 493}
@@ -514,15 +507,8 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
514 speed_mask |= ADVERTISED_2500baseX_Full; 507 speed_mask |= ADVERTISED_2500baseX_Full;
515 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB) 508 if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
516 speed_mask |= ADVERTISED_10000baseT_Full; 509 speed_mask |= ADVERTISED_10000baseT_Full;
517 /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
518 if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
519 speed_mask |= ADVERTISED_20000baseMLD2_Full |
520 ADVERTISED_20000baseKR2_Full;
521 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB) 510 if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
522 speed_mask |= ADVERTISED_40000baseKR4_Full | 511 speed_mask |= ADVERTISED_40000baseCR4_Full;
523 ADVERTISED_40000baseCR4_Full |
524 ADVERTISED_40000baseSR4_Full |
525 ADVERTISED_40000baseLR4_Full;
526 return speed_mask; 512 return speed_mask;
527} 513}
528 514
@@ -557,11 +543,12 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
557 u16 ethtool_speed; 543 u16 ethtool_speed;
558 544
559 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info); 545 cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
546 cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
560 547
561 if (link_info->auto_link_speeds) 548 if (link_info->auto_link_speeds)
562 cmd->supported |= SUPPORTED_Autoneg; 549 cmd->supported |= SUPPORTED_Autoneg;
563 550
564 if (BNXT_AUTO_MODE(link_info->auto_mode)) { 551 if (link_info->autoneg) {
565 cmd->advertising = 552 cmd->advertising =
566 bnxt_fw_to_ethtool_advertised_spds(link_info); 553 bnxt_fw_to_ethtool_advertised_spds(link_info);
567 cmd->advertising |= ADVERTISED_Autoneg; 554 cmd->advertising |= ADVERTISED_Autoneg;
@@ -570,28 +557,16 @@ static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
570 cmd->autoneg = AUTONEG_DISABLE; 557 cmd->autoneg = AUTONEG_DISABLE;
571 cmd->advertising = 0; 558 cmd->advertising = 0;
572 } 559 }
573 if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) { 560 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) {
574 if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) == 561 if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
575 BNXT_LINK_PAUSE_BOTH) { 562 BNXT_LINK_PAUSE_BOTH) {
576 cmd->advertising |= ADVERTISED_Pause; 563 cmd->advertising |= ADVERTISED_Pause;
577 cmd->supported |= SUPPORTED_Pause;
578 } else { 564 } else {
579 cmd->advertising |= ADVERTISED_Asym_Pause; 565 cmd->advertising |= ADVERTISED_Asym_Pause;
580 cmd->supported |= SUPPORTED_Asym_Pause;
581 if (link_info->auto_pause_setting & 566 if (link_info->auto_pause_setting &
582 BNXT_LINK_PAUSE_RX) 567 BNXT_LINK_PAUSE_RX)
583 cmd->advertising |= ADVERTISED_Pause; 568 cmd->advertising |= ADVERTISED_Pause;
584 } 569 }
585 } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
586 if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
587 BNXT_LINK_PAUSE_BOTH) {
588 cmd->supported |= SUPPORTED_Pause;
589 } else {
590 cmd->supported |= SUPPORTED_Asym_Pause;
591 if (link_info->force_pause_setting &
592 BNXT_LINK_PAUSE_RX)
593 cmd->supported |= SUPPORTED_Pause;
594 }
595 } 570 }
596 571
597 cmd->port = PORT_NONE; 572 cmd->port = PORT_NONE;
@@ -670,6 +645,9 @@ static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
670 if (advertising & ADVERTISED_10000baseT_Full) 645 if (advertising & ADVERTISED_10000baseT_Full)
671 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB; 646 fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
672 647
648 if (advertising & ADVERTISED_40000baseCR4_Full)
649 fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
650
673 return fw_speed_mask; 651 return fw_speed_mask;
674} 652}
675 653
@@ -729,7 +707,7 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
729 speed = ethtool_cmd_speed(cmd); 707 speed = ethtool_cmd_speed(cmd);
730 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed); 708 link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
731 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL; 709 link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
732 link_info->autoneg &= ~BNXT_AUTONEG_SPEED; 710 link_info->autoneg = 0;
733 link_info->advertising = 0; 711 link_info->advertising = 0;
734 } 712 }
735 713
@@ -748,8 +726,7 @@ static void bnxt_get_pauseparam(struct net_device *dev,
748 726
749 if (BNXT_VF(bp)) 727 if (BNXT_VF(bp))
750 return; 728 return;
751 epause->autoneg = !!(link_info->auto_pause_setting & 729 epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
752 BNXT_LINK_PAUSE_BOTH);
753 epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0); 730 epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
754 epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0); 731 epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
755} 732}
@@ -765,6 +742,9 @@ static int bnxt_set_pauseparam(struct net_device *dev,
765 return rc; 742 return rc;
766 743
767 if (epause->autoneg) { 744 if (epause->autoneg) {
745 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
746 return -EINVAL;
747
768 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; 748 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
769 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH; 749 link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
770 } else { 750 } else {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b15a60d787c7..d7e01a74e927 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2445,8 +2445,7 @@ static void bcmgenet_irq_task(struct work_struct *work)
2445 } 2445 }
2446 2446
2447 /* Link UP/DOWN event */ 2447 /* Link UP/DOWN event */
2448 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) && 2448 if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
2449 (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
2450 phy_mac_interrupt(priv->phydev, 2449 phy_mac_interrupt(priv->phydev,
2451 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP)); 2450 !!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
2452 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT; 2451 priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 49eea8981332..3010080cfeee 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7831,6 +7831,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7831 return ret; 7831 return ret;
7832} 7832}
7833 7833
7834static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
7835{
7836 /* Check if we will never have enough descriptors,
7837 * as gso_segs can be more than current ring size
7838 */
7839 return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
7840}
7841
7834static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7842static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7835 7843
7836/* Use GSO to workaround all TSO packets that meet HW bug conditions 7844/* Use GSO to workaround all TSO packets that meet HW bug conditions
@@ -7934,14 +7942,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7934 * vlan encapsulated. 7942 * vlan encapsulated.
7935 */ 7943 */
7936 if (skb->protocol == htons(ETH_P_8021Q) || 7944 if (skb->protocol == htons(ETH_P_8021Q) ||
7937 skb->protocol == htons(ETH_P_8021AD)) 7945 skb->protocol == htons(ETH_P_8021AD)) {
7938 return tg3_tso_bug(tp, tnapi, txq, skb); 7946 if (tg3_tso_bug_gso_check(tnapi, skb))
7947 return tg3_tso_bug(tp, tnapi, txq, skb);
7948 goto drop;
7949 }
7939 7950
7940 if (!skb_is_gso_v6(skb)) { 7951 if (!skb_is_gso_v6(skb)) {
7941 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7952 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7942 tg3_flag(tp, TSO_BUG)) 7953 tg3_flag(tp, TSO_BUG)) {
7943 return tg3_tso_bug(tp, tnapi, txq, skb); 7954 if (tg3_tso_bug_gso_check(tnapi, skb))
7944 7955 return tg3_tso_bug(tp, tnapi, txq, skb);
7956 goto drop;
7957 }
7945 ip_csum = iph->check; 7958 ip_csum = iph->check;
7946 ip_tot_len = iph->tot_len; 7959 ip_tot_len = iph->tot_len;
7947 iph->check = 0; 7960 iph->check = 0;
@@ -8073,7 +8086,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8073 if (would_hit_hwbug) { 8086 if (would_hit_hwbug) {
8074 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i); 8087 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
8075 8088
8076 if (mss) { 8089 if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
8077 /* If it's a TSO packet, do GSO instead of 8090 /* If it's a TSO packet, do GSO instead of
8078 * allocating and copying to a large linear SKB 8091 * allocating and copying to a large linear SKB
8079 */ 8092 */
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 04b0d16b210e..95bc470ae441 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -987,7 +987,7 @@ bna_rxf_ucast_cfg_apply(struct bna_rxf *rxf)
987 if (!list_empty(&rxf->ucast_pending_add_q)) { 987 if (!list_empty(&rxf->ucast_pending_add_q)) {
988 mac = list_first_entry(&rxf->ucast_pending_add_q, 988 mac = list_first_entry(&rxf->ucast_pending_add_q,
989 struct bna_mac, qe); 989 struct bna_mac, qe);
990 list_add_tail(&mac->qe, &rxf->ucast_active_q); 990 list_move_tail(&mac->qe, &rxf->ucast_active_q);
991 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ); 991 bna_bfi_ucast_req(rxf, mac, BFI_ENET_H2I_MAC_UCAST_ADD_REQ);
992 return 1; 992 return 1;
993 } 993 }
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 872765527081..34d269cd5579 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1683,7 +1683,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
1683 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no); 1683 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
1684 /* droq creation and local register settings. */ 1684 /* droq creation and local register settings. */
1685 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx); 1685 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
1686 if (ret_val == -1) 1686 if (ret_val < 0)
1687 return ret_val; 1687 return ret_val;
1688 1688
1689 if (ret_val == 1) { 1689 if (ret_val == 1) {
@@ -2524,7 +2524,7 @@ static void handle_timestamp(struct octeon_device *oct,
2524 2524
2525 octeon_swap_8B_data(&resp->timestamp, 1); 2525 octeon_swap_8B_data(&resp->timestamp, 1);
2526 2526
2527 if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) { 2527 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2528 struct skb_shared_hwtstamps ts; 2528 struct skb_shared_hwtstamps ts;
2529 u64 ns = resp->timestamp; 2529 u64 ns = resp->timestamp;
2530 2530
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 4dba86eaa045..174072b3740b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -983,5 +983,5 @@ int octeon_create_droq(struct octeon_device *oct,
983 983
984create_droq_fail: 984create_droq_fail:
985 octeon_delete_droq(oct, q_no); 985 octeon_delete_droq(oct, q_no);
986 return -1; 986 return -ENOMEM;
987} 987}
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 688828865c48..34e9acea8747 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -116,6 +116,15 @@
116#define NIC_PF_INTR_ID_MBOX0 8 116#define NIC_PF_INTR_ID_MBOX0 8
117#define NIC_PF_INTR_ID_MBOX1 9 117#define NIC_PF_INTR_ID_MBOX1 9
118 118
119/* Minimum FIFO level before all packets for the CQ are dropped
120 *
121 * This value ensures that once a packet has been "accepted"
122 * for reception it will not get dropped due to non-availability
123 * of CQ descriptor. An errata in HW mandates this value to be
124 * atleast 0x100.
125 */
126#define NICPF_CQM_MIN_DROP_LEVEL 0x100
127
119/* Global timer for CQ timer thresh interrupts 128/* Global timer for CQ timer thresh interrupts
120 * Calculated for SCLK of 700Mhz 129 * Calculated for SCLK of 700Mhz
121 * value written should be a 1/16th of what is expected 130 * value written should be a 1/16th of what is expected
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 4dded90076c8..95f17f8cadac 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -304,6 +304,7 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
304static void nic_init_hw(struct nicpf *nic) 304static void nic_init_hw(struct nicpf *nic)
305{ 305{
306 int i; 306 int i;
307 u64 cqm_cfg;
307 308
308 /* Enable NIC HW block */ 309 /* Enable NIC HW block */
309 nic_reg_write(nic, NIC_PF_CFG, 0x3); 310 nic_reg_write(nic, NIC_PF_CFG, 0x3);
@@ -340,6 +341,11 @@ static void nic_init_hw(struct nicpf *nic)
340 /* Enable VLAN ethertype matching and stripping */ 341 /* Enable VLAN ethertype matching and stripping */
341 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7, 342 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
342 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q); 343 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
344
345 /* Check if HW expected value is higher (could be in future chips) */
346 cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
347 if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
348 nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
343} 349}
344 350
345/* Channel parse index configuration */ 351/* Channel parse index configuration */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index dd536be20193..afb10e326b4f 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -21,7 +21,7 @@
21#define NIC_PF_TCP_TIMER (0x0060) 21#define NIC_PF_TCP_TIMER (0x0060)
22#define NIC_PF_BP_CFG (0x0080) 22#define NIC_PF_BP_CFG (0x0080)
23#define NIC_PF_RRM_CFG (0x0088) 23#define NIC_PF_RRM_CFG (0x0088)
24#define NIC_PF_CQM_CF (0x00A0) 24#define NIC_PF_CQM_CFG (0x00A0)
25#define NIC_PF_CNM_CF (0x00A8) 25#define NIC_PF_CNM_CF (0x00A8)
26#define NIC_PF_CNM_STATUS (0x00B0) 26#define NIC_PF_CNM_STATUS (0x00B0)
27#define NIC_PF_CQ_AVG_CFG (0x00C0) 27#define NIC_PF_CQ_AVG_CFG (0x00C0)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index c24cb2a86a42..a009bc30dc4d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -574,8 +574,7 @@ static inline void nicvf_set_rxhash(struct net_device *netdev,
574 574
575static void nicvf_rcv_pkt_handler(struct net_device *netdev, 575static void nicvf_rcv_pkt_handler(struct net_device *netdev,
576 struct napi_struct *napi, 576 struct napi_struct *napi,
577 struct cmp_queue *cq, 577 struct cqe_rx_t *cqe_rx)
578 struct cqe_rx_t *cqe_rx, int cqe_type)
579{ 578{
580 struct sk_buff *skb; 579 struct sk_buff *skb;
581 struct nicvf *nic = netdev_priv(netdev); 580 struct nicvf *nic = netdev_priv(netdev);
@@ -591,7 +590,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
591 } 590 }
592 591
593 /* Check for errors */ 592 /* Check for errors */
594 err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); 593 err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
595 if (err && !cqe_rx->rb_cnt) 594 if (err && !cqe_rx->rb_cnt)
596 return; 595 return;
597 596
@@ -682,8 +681,7 @@ loop:
682 cq_idx, cq_desc->cqe_type); 681 cq_idx, cq_desc->cqe_type);
683 switch (cq_desc->cqe_type) { 682 switch (cq_desc->cqe_type) {
684 case CQE_TYPE_RX: 683 case CQE_TYPE_RX:
685 nicvf_rcv_pkt_handler(netdev, napi, cq, 684 nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
686 cq_desc, CQE_TYPE_RX);
687 work_done++; 685 work_done++;
688 break; 686 break;
689 case CQE_TYPE_SEND: 687 case CQE_TYPE_SEND:
@@ -1125,7 +1123,6 @@ int nicvf_stop(struct net_device *netdev)
1125 1123
1126 /* Clear multiqset info */ 1124 /* Clear multiqset info */
1127 nic->pnicvf = nic; 1125 nic->pnicvf = nic;
1128 nic->sqs_count = 0;
1129 1126
1130 return 0; 1127 return 0;
1131} 1128}
@@ -1354,6 +1351,9 @@ void nicvf_update_stats(struct nicvf *nic)
1354 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + 1351 drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
1355 stats->tx_bcast_frames_ok + 1352 stats->tx_bcast_frames_ok +
1356 stats->tx_mcast_frames_ok; 1353 stats->tx_mcast_frames_ok;
1354 drv_stats->rx_frames_ok = stats->rx_ucast_frames +
1355 stats->rx_bcast_frames +
1356 stats->rx_mcast_frames;
1357 drv_stats->rx_drops = stats->rx_drop_red + 1357 drv_stats->rx_drops = stats->rx_drop_red +
1358 stats->rx_drop_overrun; 1358 stats->rx_drop_overrun;
1359 drv_stats->tx_drops = stats->tx_drops; 1359 drv_stats->tx_drops = stats->tx_drops;
@@ -1538,6 +1538,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1538 1538
1539 nicvf_send_vf_struct(nic); 1539 nicvf_send_vf_struct(nic);
1540 1540
1541 if (!pass1_silicon(nic->pdev))
1542 nic->hw_tso = true;
1543
1541 /* Check if this VF is in QS only mode */ 1544 /* Check if this VF is in QS only mode */
1542 if (nic->sqs_mode) 1545 if (nic->sqs_mode)
1543 return 0; 1546 return 0;
@@ -1557,9 +1560,6 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1557 1560
1558 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; 1561 netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
1559 1562
1560 if (!pass1_silicon(nic->pdev))
1561 nic->hw_tso = true;
1562
1563 netdev->netdev_ops = &nicvf_netdev_ops; 1563 netdev->netdev_ops = &nicvf_netdev_ops;
1564 netdev->watchdog_timeo = NICVF_TX_TIMEOUT; 1564 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1565 1565
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d0d1b5490061..767347b1f631 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1329,16 +1329,12 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
1329} 1329}
1330 1330
1331/* Check for errors in the receive cmp.queue entry */ 1331/* Check for errors in the receive cmp.queue entry */
1332int nicvf_check_cqe_rx_errs(struct nicvf *nic, 1332int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
1333 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
1334{ 1333{
1335 struct nicvf_hw_stats *stats = &nic->hw_stats; 1334 struct nicvf_hw_stats *stats = &nic->hw_stats;
1336 struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
1337 1335
1338 if (!cqe_rx->err_level && !cqe_rx->err_opcode) { 1336 if (!cqe_rx->err_level && !cqe_rx->err_opcode)
1339 drv_stats->rx_frames_ok++;
1340 return 0; 1337 return 0;
1341 }
1342 1338
1343 if (netif_msg_rx_err(nic)) 1339 if (netif_msg_rx_err(nic))
1344 netdev_err(nic->netdev, 1340 netdev_err(nic->netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index c5030a7f213a..6673e1133523 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -338,8 +338,7 @@ u64 nicvf_queue_reg_read(struct nicvf *nic,
338/* Stats */ 338/* Stats */
339void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); 339void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
340void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); 340void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
341int nicvf_check_cqe_rx_errs(struct nicvf *nic, 341int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
342 struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
343int nicvf_check_cqe_tx_errs(struct nicvf *nic, 342int nicvf_check_cqe_tx_errs(struct nicvf *nic,
344 struct cmp_queue *cq, struct cqe_send_t *cqe_tx); 343 struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
345#endif /* NICVF_QUEUES_H */ 344#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index ee04caa6c4d8..a89721fad633 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -681,6 +681,24 @@ int t3_seeprom_wp(struct adapter *adapter, int enable)
681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); 681 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
682} 682}
683 683
684static int vpdstrtouint(char *s, int len, unsigned int base, unsigned int *val)
685{
686 char tok[len + 1];
687
688 memcpy(tok, s, len);
689 tok[len] = 0;
690 return kstrtouint(strim(tok), base, val);
691}
692
693static int vpdstrtou16(char *s, int len, unsigned int base, u16 *val)
694{
695 char tok[len + 1];
696
697 memcpy(tok, s, len);
698 tok[len] = 0;
699 return kstrtou16(strim(tok), base, val);
700}
701
684/** 702/**
685 * get_vpd_params - read VPD parameters from VPD EEPROM 703 * get_vpd_params - read VPD parameters from VPD EEPROM
686 * @adapter: adapter to read 704 * @adapter: adapter to read
@@ -709,19 +727,19 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
709 return ret; 727 return ret;
710 } 728 }
711 729
712 ret = kstrtouint(vpd.cclk_data, 10, &p->cclk); 730 ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
713 if (ret) 731 if (ret)
714 return ret; 732 return ret;
715 ret = kstrtouint(vpd.mclk_data, 10, &p->mclk); 733 ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
716 if (ret) 734 if (ret)
717 return ret; 735 return ret;
718 ret = kstrtouint(vpd.uclk_data, 10, &p->uclk); 736 ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
719 if (ret) 737 if (ret)
720 return ret; 738 return ret;
721 ret = kstrtouint(vpd.mdc_data, 10, &p->mdc); 739 ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
722 if (ret) 740 if (ret)
723 return ret; 741 return ret;
724 ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing); 742 ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
725 if (ret) 743 if (ret)
726 return ret; 744 return ret;
727 memcpy(p->sn, vpd.sn_data, SERNUM_LEN); 745 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
@@ -733,10 +751,12 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
733 } else { 751 } else {
734 p->port_type[0] = hex_to_bin(vpd.port0_data[0]); 752 p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
735 p->port_type[1] = hex_to_bin(vpd.port1_data[0]); 753 p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
736 ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]); 754 ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
755 &p->xauicfg[0]);
737 if (ret) 756 if (ret)
738 return ret; 757 return ret;
739 ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]); 758 ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
759 &p->xauicfg[1]);
740 if (ret) 760 if (ret)
741 return ret; 761 return ret;
742 } 762 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a8dda635456d..06bc2d2e7a73 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -165,6 +165,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
165 CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */ 165 CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */ 166 CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */ 167 CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
168 CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
168 169
169 /* T6 adapters: 170 /* T6 adapters:
170 */ 171 */
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 1671fa3332c2..7ba6d530b0c0 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
33 33
34#define DRV_NAME "enic" 34#define DRV_NAME "enic"
35#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 35#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
36#define DRV_VERSION "2.3.0.12" 36#define DRV_VERSION "2.3.0.20"
37#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc" 37#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
38 38
39#define ENIC_BARS_MAX 6 39#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 1ffd1050860b..1fdf5fe12a95 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
298 int wait) 298 int wait)
299{ 299{
300 struct devcmd2_controller *dc2c = vdev->devcmd2; 300 struct devcmd2_controller *dc2c = vdev->devcmd2;
301 struct devcmd2_result *result = dc2c->result + dc2c->next_result; 301 struct devcmd2_result *result;
302 u8 color;
302 unsigned int i; 303 unsigned int i;
303 int delay, err; 304 int delay, err;
304 u32 fetch_index, new_posted; 305 u32 fetch_index, new_posted;
@@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
336 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT) 337 if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
337 return 0; 338 return 0;
338 339
340 result = dc2c->result + dc2c->next_result;
341 color = dc2c->color;
342
343 dc2c->next_result++;
344 if (dc2c->next_result == dc2c->result_size) {
345 dc2c->next_result = 0;
346 dc2c->color = dc2c->color ? 0 : 1;
347 }
348
339 for (delay = 0; delay < wait; delay++) { 349 for (delay = 0; delay < wait; delay++) {
340 if (result->color == dc2c->color) { 350 if (result->color == color) {
341 dc2c->next_result++;
342 if (dc2c->next_result == dc2c->result_size) {
343 dc2c->next_result = 0;
344 dc2c->color = dc2c->color ? 0 : 1;
345 }
346 if (result->error) { 351 if (result->error) {
347 err = result->error; 352 err = result->error;
348 if (err != ERR_ECMDUNKNOWN || 353 if (err != ERR_ECMDUNKNOWN ||
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index cf94b72dbacd..48d91941408d 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -128,7 +128,6 @@ struct board_info {
128 struct resource *data_res; 128 struct resource *data_res;
129 struct resource *addr_req; /* resources requested */ 129 struct resource *addr_req; /* resources requested */
130 struct resource *data_req; 130 struct resource *data_req;
131 struct resource *irq_res;
132 131
133 int irq_wake; 132 int irq_wake;
134 133
@@ -1300,22 +1299,16 @@ static int
1300dm9000_open(struct net_device *dev) 1299dm9000_open(struct net_device *dev)
1301{ 1300{
1302 struct board_info *db = netdev_priv(dev); 1301 struct board_info *db = netdev_priv(dev);
1303 unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
1304 1302
1305 if (netif_msg_ifup(db)) 1303 if (netif_msg_ifup(db))
1306 dev_dbg(db->dev, "enabling %s\n", dev->name); 1304 dev_dbg(db->dev, "enabling %s\n", dev->name);
1307 1305
1308 /* If there is no IRQ type specified, default to something that 1306 /* If there is no IRQ type specified, tell the user that this is a
1309 * may work, and tell the user that this is a problem */ 1307 * problem
1310 1308 */
1311 if (irqflags == IRQF_TRIGGER_NONE) 1309 if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
1312 irqflags = irq_get_trigger_type(dev->irq);
1313
1314 if (irqflags == IRQF_TRIGGER_NONE)
1315 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n"); 1310 dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
1316 1311
1317 irqflags |= IRQF_SHARED;
1318
1319 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ 1312 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1320 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ 1313 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1321 mdelay(1); /* delay needs by DM9000B */ 1314 mdelay(1); /* delay needs by DM9000B */
@@ -1323,7 +1316,8 @@ dm9000_open(struct net_device *dev)
1323 /* Initialize DM9000 board */ 1316 /* Initialize DM9000 board */
1324 dm9000_init_dm9000(dev); 1317 dm9000_init_dm9000(dev);
1325 1318
1326 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1319 if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
1320 dev->name, dev))
1327 return -EAGAIN; 1321 return -EAGAIN;
1328 /* Now that we have an interrupt handler hooked up we can unmask 1322 /* Now that we have an interrupt handler hooked up we can unmask
1329 * our interrupts 1323 * our interrupts
@@ -1500,15 +1494,22 @@ dm9000_probe(struct platform_device *pdev)
1500 1494
1501 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1495 db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1502 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1496 db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1503 db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1504 1497
1505 if (db->addr_res == NULL || db->data_res == NULL || 1498 if (!db->addr_res || !db->data_res) {
1506 db->irq_res == NULL) { 1499 dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
1507 dev_err(db->dev, "insufficient resources\n"); 1500 db->addr_res, db->data_res);
1508 ret = -ENOENT; 1501 ret = -ENOENT;
1509 goto out; 1502 goto out;
1510 } 1503 }
1511 1504
1505 ndev->irq = platform_get_irq(pdev, 0);
1506 if (ndev->irq < 0) {
1507 dev_err(db->dev, "interrupt resource unavailable: %d\n",
1508 ndev->irq);
1509 ret = ndev->irq;
1510 goto out;
1511 }
1512
1512 db->irq_wake = platform_get_irq(pdev, 1); 1513 db->irq_wake = platform_get_irq(pdev, 1);
1513 if (db->irq_wake >= 0) { 1514 if (db->irq_wake >= 0) {
1514 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake); 1515 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
@@ -1570,7 +1571,6 @@ dm9000_probe(struct platform_device *pdev)
1570 1571
1571 /* fill in parameters for net-dev structure */ 1572 /* fill in parameters for net-dev structure */
1572 ndev->base_addr = (unsigned long)db->io_addr; 1573 ndev->base_addr = (unsigned long)db->io_addr;
1573 ndev->irq = db->irq_res->start;
1574 1574
1575 /* ensure at least we have a default set of IO routines */ 1575 /* ensure at least we have a default set of IO routines */
1576 dm9000_set_io(db, iosize); 1576 dm9000_set_io(db, iosize);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index cf837831304b..f9751294ece7 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -531,6 +531,7 @@ struct be_adapter {
531 531
532 struct delayed_work be_err_detection_work; 532 struct delayed_work be_err_detection_work;
533 u8 err_flags; 533 u8 err_flags;
534 bool pcicfg_mapped; /* pcicfg obtained via pci_iomap() */
534 u32 flags; 535 u32 flags;
535 u32 cmd_privileges; 536 u32 cmd_privileges;
536 /* Ethtool knobs and info */ 537 /* Ethtool knobs and info */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 241819b36ca7..6d9a8d78e8ad 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -622,10 +622,13 @@ enum be_if_flags {
622 BE_IF_FLAGS_VLAN_PROMISCUOUS |\ 622 BE_IF_FLAGS_VLAN_PROMISCUOUS |\
623 BE_IF_FLAGS_MCAST_PROMISCUOUS) 623 BE_IF_FLAGS_MCAST_PROMISCUOUS)
624 624
625#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\ 625#define BE_IF_FILT_FLAGS_BASIC (BE_IF_FLAGS_BROADCAST | \
626 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED) 626 BE_IF_FLAGS_PASS_L3L4_ERRORS | \
627 BE_IF_FLAGS_UNTAGGED)
627 628
628#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS) 629#define BE_IF_ALL_FILT_FLAGS (BE_IF_FILT_FLAGS_BASIC | \
630 BE_IF_FLAGS_MULTICAST | \
631 BE_IF_FLAGS_ALL_PROMISCUOUS)
629 632
630/* An RX interface is an object with one or more MAC addresses and 633/* An RX interface is an object with one or more MAC addresses and
631 * filtering capabilities. */ 634 * filtering capabilities. */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index f99de3657ce3..d1cf1274fc2f 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -125,6 +125,11 @@ static const char * const ue_status_hi_desc[] = {
125 "Unknown" 125 "Unknown"
126}; 126};
127 127
128#define BE_VF_IF_EN_FLAGS (BE_IF_FLAGS_UNTAGGED | \
129 BE_IF_FLAGS_BROADCAST | \
130 BE_IF_FLAGS_MULTICAST | \
131 BE_IF_FLAGS_PASS_L3L4_ERRORS)
132
128static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) 133static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
129{ 134{
130 struct be_dma_mem *mem = &q->dma_mem; 135 struct be_dma_mem *mem = &q->dma_mem;
@@ -3537,7 +3542,7 @@ static int be_enable_if_filters(struct be_adapter *adapter)
3537{ 3542{
3538 int status; 3543 int status;
3539 3544
3540 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON); 3545 status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
3541 if (status) 3546 if (status)
3542 return status; 3547 return status;
3543 3548
@@ -3857,8 +3862,7 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3857 int status; 3862 int status;
3858 3863
3859 /* If a FW profile exists, then cap_flags are updated */ 3864 /* If a FW profile exists, then cap_flags are updated */
3860 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 3865 cap_flags = BE_VF_IF_EN_FLAGS;
3861 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3862 3866
3863 for_all_vfs(adapter, vf_cfg, vf) { 3867 for_all_vfs(adapter, vf_cfg, vf) {
3864 if (!BE3_chip(adapter)) { 3868 if (!BE3_chip(adapter)) {
@@ -3874,10 +3878,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3874 } 3878 }
3875 } 3879 }
3876 3880
3877 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED | 3881 /* PF should enable IF flags during proxy if_create call */
3878 BE_IF_FLAGS_BROADCAST | 3882 en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
3879 BE_IF_FLAGS_MULTICAST |
3880 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3881 status = be_cmd_if_create(adapter, cap_flags, en_flags, 3883 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3882 &vf_cfg->if_handle, vf + 1); 3884 &vf_cfg->if_handle, vf + 1);
3883 if (status) 3885 if (status)
@@ -4968,6 +4970,8 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
4968 pci_iounmap(adapter->pdev, adapter->csr); 4970 pci_iounmap(adapter->pdev, adapter->csr);
4969 if (adapter->db) 4971 if (adapter->db)
4970 pci_iounmap(adapter->pdev, adapter->db); 4972 pci_iounmap(adapter->pdev, adapter->db);
4973 if (adapter->pcicfg && adapter->pcicfg_mapped)
4974 pci_iounmap(adapter->pdev, adapter->pcicfg);
4971} 4975}
4972 4976
4973static int db_bar(struct be_adapter *adapter) 4977static int db_bar(struct be_adapter *adapter)
@@ -5019,8 +5023,10 @@ static int be_map_pci_bars(struct be_adapter *adapter)
5019 if (!addr) 5023 if (!addr)
5020 goto pci_map_err; 5024 goto pci_map_err;
5021 adapter->pcicfg = addr; 5025 adapter->pcicfg = addr;
5026 adapter->pcicfg_mapped = true;
5022 } else { 5027 } else {
5023 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET; 5028 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5029 adapter->pcicfg_mapped = false;
5024 } 5030 }
5025 } 5031 }
5026 5032
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 62fa136554ac..41b010645100 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1265,7 +1265,6 @@ static int ethoc_remove(struct platform_device *pdev)
1265 1265
1266 if (priv->mdio) { 1266 if (priv->mdio) {
1267 mdiobus_unregister(priv->mdio); 1267 mdiobus_unregister(priv->mdio);
1268 kfree(priv->mdio->irq);
1269 mdiobus_free(priv->mdio); 1268 mdiobus_free(priv->mdio);
1270 } 1269 }
1271 if (priv->clk) 1270 if (priv->clk)
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 623aa1c8ebc6..79a210aaf0bb 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -2791,6 +2791,8 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
2791 goto fman_free; 2791 goto fman_free;
2792 } 2792 }
2793 2793
2794 fman->dev = &of_dev->dev;
2795
2794 return fman; 2796 return fman;
2795 2797
2796fman_node_put: 2798fman_node_put:
@@ -2845,8 +2847,6 @@ static int fman_probe(struct platform_device *of_dev)
2845 2847
2846 dev_set_drvdata(dev, fman); 2848 dev_set_drvdata(dev, fman);
2847 2849
2848 fman->dev = dev;
2849
2850 dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id); 2850 dev_dbg(dev, "FMan%d probed\n", fman->dts_params.id);
2851 2851
2852 return 0; 2852 return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 2aa7b401cc3b..b9ecf197ad11 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1111,8 +1111,10 @@ static void __gfar_detect_errata_85xx(struct gfar_private *priv)
1111 1111
1112 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20)) 1112 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
1113 priv->errata |= GFAR_ERRATA_12; 1113 priv->errata |= GFAR_ERRATA_12;
1114 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
1114 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) || 1115 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
1115 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20))) 1116 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
1117 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
1116 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */ 1118 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
1117} 1119}
1118#endif 1120#endif
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a7139f588ad2..678f5018d0be 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -469,8 +469,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
469 goto failed; 469 goto failed;
470 } 470 }
471 /* Read MACID from CIS */ 471 /* Read MACID from CIS */
472 for (i = 5; i < 11; i++) 472 for (i = 0; i < 6; i++)
473 dev->dev_addr[i] = buf[i]; 473 dev->dev_addr[i] = buf[i + 5];
474 kfree(buf); 474 kfree(buf);
475 } else { 475 } else {
476 if (pcmcia_get_mac_from_cis(link, dev)) 476 if (pcmcia_get_mac_from_cis(link, dev))
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 74beb1867230..4ccc032633c4 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -25,6 +25,7 @@ config HIX5HD2_GMAC
25 25
26config HIP04_ETH 26config HIP04_ETH
27 tristate "HISILICON P04 Ethernet support" 27 tristate "HISILICON P04 Ethernet support"
28 depends on HAS_IOMEM # For MFD_SYSCON
28 select MARVELL_PHY 29 select MARVELL_PHY
29 select MFD_SYSCON 30 select MFD_SYSCON
30 select HNS_MDIO 31 select HNS_MDIO
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index a0070d0e740d..d4f92ed322d6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -675,8 +675,12 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
675{ 675{
676 int ret; 676 int ret;
677 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle); 677 struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
678 struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
678 679
679 switch (loop) { 680 switch (loop) {
681 case MAC_INTERNALLOOP_PHY:
682 ret = 0;
683 break;
680 case MAC_INTERNALLOOP_SERDES: 684 case MAC_INTERNALLOOP_SERDES:
681 ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en); 685 ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
682 break; 686 break;
@@ -686,6 +690,10 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
686 default: 690 default:
687 ret = -EINVAL; 691 ret = -EINVAL;
688 } 692 }
693
694 if (!ret)
695 hns_dsaf_set_inner_lb(mac_cb->dsaf_dev, mac_cb->mac_id, en);
696
689 return ret; 697 return ret;
690} 698}
691 699
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 9439f04962e1..38fc5be3870c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -230,6 +230,30 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
230 } 230 }
231} 231}
232 232
233static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
234{
235 u16 max_q_per_vf, max_vfn;
236 u32 q_id, q_num_per_port;
237 u32 mac_id;
238
239 if (AE_IS_VER1(dsaf_dev->dsaf_ver))
240 return;
241
242 hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
243 HNS_DSAF_COMM_SERVICE_NW_IDX,
244 &max_vfn, &max_q_per_vf);
245 q_num_per_port = max_vfn * max_q_per_vf;
246
247 for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
248 dsaf_set_dev_field(dsaf_dev,
249 DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
250 DSAFV2_SERDES_LBK_QID_M,
251 DSAFV2_SERDES_LBK_QID_S,
252 q_id);
253 q_id += q_num_per_port;
254 }
255}
256
233/** 257/**
234 * hns_dsaf_sw_port_type_cfg - cfg sw type 258 * hns_dsaf_sw_port_type_cfg - cfg sw type
235 * @dsaf_id: dsa fabric id 259 * @dsaf_id: dsa fabric id
@@ -691,6 +715,16 @@ void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
691 dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en); 715 dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
692} 716}
693 717
718void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
719{
720 if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
721 dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG)
722 return;
723
724 dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
725 DSAFV2_SERDES_LBK_EN_B, !!en);
726}
727
694/** 728/**
695 * hns_dsaf_tbl_stat_en - tbl 729 * hns_dsaf_tbl_stat_en - tbl
696 * @dsaf_id: dsa fabric id 730 * @dsaf_id: dsa fabric id
@@ -1022,6 +1056,9 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
1022 /* set promisc def queue id */ 1056 /* set promisc def queue id */
1023 hns_dsaf_mix_def_qid_cfg(dsaf_dev); 1057 hns_dsaf_mix_def_qid_cfg(dsaf_dev);
1024 1058
1059 /* set inner loopback queue id */
1060 hns_dsaf_inner_qid_cfg(dsaf_dev);
1061
1025 /* in non switch mode, set all port to access mode */ 1062 /* in non switch mode, set all port to access mode */
1026 hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN); 1063 hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
1027 1064
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 40205b910f80..5fea226efaf3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -417,5 +417,6 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port);
417void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data); 417void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
418int hns_dsaf_get_regs_count(void); 418int hns_dsaf_get_regs_count(void);
419void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en); 419void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
420void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
420 421
421#endif /* __HNS_DSAF_MAIN_H__ */ 422#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index f0c4f9b09d5b..60d695daa471 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -134,6 +134,7 @@
134#define DSAF_XGE_INT_STS_0_REG 0x1C0 134#define DSAF_XGE_INT_STS_0_REG 0x1C0
135#define DSAF_PPE_INT_STS_0_REG 0x1E0 135#define DSAF_PPE_INT_STS_0_REG 0x1E0
136#define DSAF_ROCEE_INT_STS_0_REG 0x200 136#define DSAF_ROCEE_INT_STS_0_REG 0x200
137#define DSAFV2_SERDES_LBK_0_REG 0x220
137#define DSAF_PPE_QID_CFG_0_REG 0x300 138#define DSAF_PPE_QID_CFG_0_REG 0x300
138#define DSAF_SW_PORT_TYPE_0_REG 0x320 139#define DSAF_SW_PORT_TYPE_0_REG 0x320
139#define DSAF_STP_PORT_TYPE_0_REG 0x340 140#define DSAF_STP_PORT_TYPE_0_REG 0x340
@@ -857,6 +858,10 @@
857#define PPEV2_CFG_RSS_TBL_4N3_S 24 858#define PPEV2_CFG_RSS_TBL_4N3_S 24
858#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S) 859#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S)
859 860
861#define DSAFV2_SERDES_LBK_EN_B 8
862#define DSAFV2_SERDES_LBK_QID_S 0
863#define DSAFV2_SERDES_LBK_QID_M (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S)
864
860#define PPE_CNT_CLR_CE_B 0 865#define PPE_CNT_CLR_CE_B 0
861#define PPE_CNT_CLR_SNAP_EN_B 1 866#define PPE_CNT_CLR_SNAP_EN_B 1
862 867
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3df22840fcd1..3c4a3bc31a89 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -295,8 +295,10 @@ static int __lb_setup(struct net_device *ndev,
295 295
296 switch (loop) { 296 switch (loop) {
297 case MAC_INTERNALLOOP_PHY: 297 case MAC_INTERNALLOOP_PHY:
298 if ((phy_dev) && (!phy_dev->is_c45)) 298 if ((phy_dev) && (!phy_dev->is_c45)) {
299 ret = hns_nic_config_phy_loopback(phy_dev, 0x1); 299 ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
300 ret |= h->dev->ops->set_loopback(h, loop, 0x1);
301 }
300 break; 302 break;
301 case MAC_INTERNALLOOP_MAC: 303 case MAC_INTERNALLOOP_MAC:
302 if ((h->dev->ops->set_loopback) && 304 if ((h->dev->ops->set_loopback) &&
@@ -376,6 +378,7 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
376 struct sk_buff *skb) 378 struct sk_buff *skb)
377{ 379{
378 struct net_device *ndev; 380 struct net_device *ndev;
381 struct hns_nic_priv *priv;
379 struct hnae_ring *ring; 382 struct hnae_ring *ring;
380 struct netdev_queue *dev_queue; 383 struct netdev_queue *dev_queue;
381 struct sk_buff *new_skb; 384 struct sk_buff *new_skb;
@@ -385,8 +388,17 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
385 char buff[33]; /* 32B data and the last character '\0' */ 388 char buff[33]; /* 32B data and the last character '\0' */
386 389
387 if (!ring_data) { /* Just for doing create frame*/ 390 if (!ring_data) { /* Just for doing create frame*/
391 ndev = skb->dev;
392 priv = netdev_priv(ndev);
393
388 frame_size = skb->len; 394 frame_size = skb->len;
389 memset(skb->data, 0xFF, frame_size); 395 memset(skb->data, 0xFF, frame_size);
396 if ((!AE_IS_VER1(priv->enet_ver)) &&
397 (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) {
398 memcpy(skb->data, ndev->dev_addr, 6);
399 skb->data[5] += 0x1f;
400 }
401
390 frame_size &= ~1ul; 402 frame_size &= ~1ul;
391 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 403 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
392 memset(&skb->data[frame_size / 2 + 10], 0xBE, 404 memset(&skb->data[frame_size / 2 + 10], 0xBE,
@@ -486,6 +498,7 @@ static int __lb_run_test(struct net_device *ndev,
486 498
487 /* place data into test skb */ 499 /* place data into test skb */
488 (void)skb_put(skb, size); 500 (void)skb_put(skb, size);
501 skb->dev = ndev;
489 __lb_other_process(NULL, skb); 502 __lb_other_process(NULL, skb);
490 skb->queue_mapping = NIC_LB_TEST_RING_ID; 503 skb->queue_mapping = NIC_LB_TEST_RING_ID;
491 504
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 335417b4756b..ebe60719e489 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1166,7 +1166,10 @@ map_failed:
1166 if (!firmware_has_feature(FW_FEATURE_CMO)) 1166 if (!firmware_has_feature(FW_FEATURE_CMO))
1167 netdev_err(netdev, "tx: unable to map xmit buffer\n"); 1167 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1168 adapter->tx_map_failed++; 1168 adapter->tx_map_failed++;
1169 skb_linearize(skb); 1169 if (skb_linearize(skb)) {
1170 netdev->stats.tx_dropped++;
1171 goto out;
1172 }
1170 force_bounce = 1; 1173 force_bounce = 1;
1171 goto retry_bounce; 1174 goto retry_bounce;
1172} 1175}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 7d6570843723..6e9e16eee5d0 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1348,44 +1348,44 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1348 crq.request_capability.cmd = REQUEST_CAPABILITY; 1348 crq.request_capability.cmd = REQUEST_CAPABILITY;
1349 1349
1350 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES); 1350 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1351 crq.request_capability.number = cpu_to_be32(adapter->req_tx_queues); 1351 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1352 ibmvnic_send_crq(adapter, &crq); 1352 ibmvnic_send_crq(adapter, &crq);
1353 1353
1354 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES); 1354 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1355 crq.request_capability.number = cpu_to_be32(adapter->req_rx_queues); 1355 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1356 ibmvnic_send_crq(adapter, &crq); 1356 ibmvnic_send_crq(adapter, &crq);
1357 1357
1358 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES); 1358 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1359 crq.request_capability.number = cpu_to_be32(adapter->req_rx_add_queues); 1359 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1360 ibmvnic_send_crq(adapter, &crq); 1360 ibmvnic_send_crq(adapter, &crq);
1361 1361
1362 crq.request_capability.capability = 1362 crq.request_capability.capability =
1363 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ); 1363 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1364 crq.request_capability.number = 1364 crq.request_capability.number =
1365 cpu_to_be32(adapter->req_tx_entries_per_subcrq); 1365 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1366 ibmvnic_send_crq(adapter, &crq); 1366 ibmvnic_send_crq(adapter, &crq);
1367 1367
1368 crq.request_capability.capability = 1368 crq.request_capability.capability =
1369 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ); 1369 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1370 crq.request_capability.number = 1370 crq.request_capability.number =
1371 cpu_to_be32(adapter->req_rx_add_entries_per_subcrq); 1371 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1372 ibmvnic_send_crq(adapter, &crq); 1372 ibmvnic_send_crq(adapter, &crq);
1373 1373
1374 crq.request_capability.capability = cpu_to_be16(REQ_MTU); 1374 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1375 crq.request_capability.number = cpu_to_be32(adapter->req_mtu); 1375 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1376 ibmvnic_send_crq(adapter, &crq); 1376 ibmvnic_send_crq(adapter, &crq);
1377 1377
1378 if (adapter->netdev->flags & IFF_PROMISC) { 1378 if (adapter->netdev->flags & IFF_PROMISC) {
1379 if (adapter->promisc_supported) { 1379 if (adapter->promisc_supported) {
1380 crq.request_capability.capability = 1380 crq.request_capability.capability =
1381 cpu_to_be16(PROMISC_REQUESTED); 1381 cpu_to_be16(PROMISC_REQUESTED);
1382 crq.request_capability.number = cpu_to_be32(1); 1382 crq.request_capability.number = cpu_to_be64(1);
1383 ibmvnic_send_crq(adapter, &crq); 1383 ibmvnic_send_crq(adapter, &crq);
1384 } 1384 }
1385 } else { 1385 } else {
1386 crq.request_capability.capability = 1386 crq.request_capability.capability =
1387 cpu_to_be16(PROMISC_REQUESTED); 1387 cpu_to_be16(PROMISC_REQUESTED);
1388 crq.request_capability.number = cpu_to_be32(0); 1388 crq.request_capability.number = cpu_to_be64(0);
1389 ibmvnic_send_crq(adapter, &crq); 1389 ibmvnic_send_crq(adapter, &crq);
1390 } 1390 }
1391 1391
@@ -2312,93 +2312,93 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2312 switch (be16_to_cpu(crq->query_capability.capability)) { 2312 switch (be16_to_cpu(crq->query_capability.capability)) {
2313 case MIN_TX_QUEUES: 2313 case MIN_TX_QUEUES:
2314 adapter->min_tx_queues = 2314 adapter->min_tx_queues =
2315 be32_to_cpu(crq->query_capability.number); 2315 be64_to_cpu(crq->query_capability.number);
2316 netdev_dbg(netdev, "min_tx_queues = %lld\n", 2316 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2317 adapter->min_tx_queues); 2317 adapter->min_tx_queues);
2318 break; 2318 break;
2319 case MIN_RX_QUEUES: 2319 case MIN_RX_QUEUES:
2320 adapter->min_rx_queues = 2320 adapter->min_rx_queues =
2321 be32_to_cpu(crq->query_capability.number); 2321 be64_to_cpu(crq->query_capability.number);
2322 netdev_dbg(netdev, "min_rx_queues = %lld\n", 2322 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2323 adapter->min_rx_queues); 2323 adapter->min_rx_queues);
2324 break; 2324 break;
2325 case MIN_RX_ADD_QUEUES: 2325 case MIN_RX_ADD_QUEUES:
2326 adapter->min_rx_add_queues = 2326 adapter->min_rx_add_queues =
2327 be32_to_cpu(crq->query_capability.number); 2327 be64_to_cpu(crq->query_capability.number);
2328 netdev_dbg(netdev, "min_rx_add_queues = %lld\n", 2328 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2329 adapter->min_rx_add_queues); 2329 adapter->min_rx_add_queues);
2330 break; 2330 break;
2331 case MAX_TX_QUEUES: 2331 case MAX_TX_QUEUES:
2332 adapter->max_tx_queues = 2332 adapter->max_tx_queues =
2333 be32_to_cpu(crq->query_capability.number); 2333 be64_to_cpu(crq->query_capability.number);
2334 netdev_dbg(netdev, "max_tx_queues = %lld\n", 2334 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2335 adapter->max_tx_queues); 2335 adapter->max_tx_queues);
2336 break; 2336 break;
2337 case MAX_RX_QUEUES: 2337 case MAX_RX_QUEUES:
2338 adapter->max_rx_queues = 2338 adapter->max_rx_queues =
2339 be32_to_cpu(crq->query_capability.number); 2339 be64_to_cpu(crq->query_capability.number);
2340 netdev_dbg(netdev, "max_rx_queues = %lld\n", 2340 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2341 adapter->max_rx_queues); 2341 adapter->max_rx_queues);
2342 break; 2342 break;
2343 case MAX_RX_ADD_QUEUES: 2343 case MAX_RX_ADD_QUEUES:
2344 adapter->max_rx_add_queues = 2344 adapter->max_rx_add_queues =
2345 be32_to_cpu(crq->query_capability.number); 2345 be64_to_cpu(crq->query_capability.number);
2346 netdev_dbg(netdev, "max_rx_add_queues = %lld\n", 2346 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2347 adapter->max_rx_add_queues); 2347 adapter->max_rx_add_queues);
2348 break; 2348 break;
2349 case MIN_TX_ENTRIES_PER_SUBCRQ: 2349 case MIN_TX_ENTRIES_PER_SUBCRQ:
2350 adapter->min_tx_entries_per_subcrq = 2350 adapter->min_tx_entries_per_subcrq =
2351 be32_to_cpu(crq->query_capability.number); 2351 be64_to_cpu(crq->query_capability.number);
2352 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n", 2352 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2353 adapter->min_tx_entries_per_subcrq); 2353 adapter->min_tx_entries_per_subcrq);
2354 break; 2354 break;
2355 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ: 2355 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2356 adapter->min_rx_add_entries_per_subcrq = 2356 adapter->min_rx_add_entries_per_subcrq =
2357 be32_to_cpu(crq->query_capability.number); 2357 be64_to_cpu(crq->query_capability.number);
2358 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n", 2358 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2359 adapter->min_rx_add_entries_per_subcrq); 2359 adapter->min_rx_add_entries_per_subcrq);
2360 break; 2360 break;
2361 case MAX_TX_ENTRIES_PER_SUBCRQ: 2361 case MAX_TX_ENTRIES_PER_SUBCRQ:
2362 adapter->max_tx_entries_per_subcrq = 2362 adapter->max_tx_entries_per_subcrq =
2363 be32_to_cpu(crq->query_capability.number); 2363 be64_to_cpu(crq->query_capability.number);
2364 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n", 2364 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2365 adapter->max_tx_entries_per_subcrq); 2365 adapter->max_tx_entries_per_subcrq);
2366 break; 2366 break;
2367 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ: 2367 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2368 adapter->max_rx_add_entries_per_subcrq = 2368 adapter->max_rx_add_entries_per_subcrq =
2369 be32_to_cpu(crq->query_capability.number); 2369 be64_to_cpu(crq->query_capability.number);
2370 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n", 2370 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2371 adapter->max_rx_add_entries_per_subcrq); 2371 adapter->max_rx_add_entries_per_subcrq);
2372 break; 2372 break;
2373 case TCP_IP_OFFLOAD: 2373 case TCP_IP_OFFLOAD:
2374 adapter->tcp_ip_offload = 2374 adapter->tcp_ip_offload =
2375 be32_to_cpu(crq->query_capability.number); 2375 be64_to_cpu(crq->query_capability.number);
2376 netdev_dbg(netdev, "tcp_ip_offload = %lld\n", 2376 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2377 adapter->tcp_ip_offload); 2377 adapter->tcp_ip_offload);
2378 break; 2378 break;
2379 case PROMISC_SUPPORTED: 2379 case PROMISC_SUPPORTED:
2380 adapter->promisc_supported = 2380 adapter->promisc_supported =
2381 be32_to_cpu(crq->query_capability.number); 2381 be64_to_cpu(crq->query_capability.number);
2382 netdev_dbg(netdev, "promisc_supported = %lld\n", 2382 netdev_dbg(netdev, "promisc_supported = %lld\n",
2383 adapter->promisc_supported); 2383 adapter->promisc_supported);
2384 break; 2384 break;
2385 case MIN_MTU: 2385 case MIN_MTU:
2386 adapter->min_mtu = be32_to_cpu(crq->query_capability.number); 2386 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2387 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu); 2387 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2388 break; 2388 break;
2389 case MAX_MTU: 2389 case MAX_MTU:
2390 adapter->max_mtu = be32_to_cpu(crq->query_capability.number); 2390 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2391 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu); 2391 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2392 break; 2392 break;
2393 case MAX_MULTICAST_FILTERS: 2393 case MAX_MULTICAST_FILTERS:
2394 adapter->max_multicast_filters = 2394 adapter->max_multicast_filters =
2395 be32_to_cpu(crq->query_capability.number); 2395 be64_to_cpu(crq->query_capability.number);
2396 netdev_dbg(netdev, "max_multicast_filters = %lld\n", 2396 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2397 adapter->max_multicast_filters); 2397 adapter->max_multicast_filters);
2398 break; 2398 break;
2399 case VLAN_HEADER_INSERTION: 2399 case VLAN_HEADER_INSERTION:
2400 adapter->vlan_header_insertion = 2400 adapter->vlan_header_insertion =
2401 be32_to_cpu(crq->query_capability.number); 2401 be64_to_cpu(crq->query_capability.number);
2402 if (adapter->vlan_header_insertion) 2402 if (adapter->vlan_header_insertion)
2403 netdev->features |= NETIF_F_HW_VLAN_STAG_TX; 2403 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2404 netdev_dbg(netdev, "vlan_header_insertion = %lld\n", 2404 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
@@ -2406,43 +2406,43 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2406 break; 2406 break;
2407 case MAX_TX_SG_ENTRIES: 2407 case MAX_TX_SG_ENTRIES:
2408 adapter->max_tx_sg_entries = 2408 adapter->max_tx_sg_entries =
2409 be32_to_cpu(crq->query_capability.number); 2409 be64_to_cpu(crq->query_capability.number);
2410 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n", 2410 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2411 adapter->max_tx_sg_entries); 2411 adapter->max_tx_sg_entries);
2412 break; 2412 break;
2413 case RX_SG_SUPPORTED: 2413 case RX_SG_SUPPORTED:
2414 adapter->rx_sg_supported = 2414 adapter->rx_sg_supported =
2415 be32_to_cpu(crq->query_capability.number); 2415 be64_to_cpu(crq->query_capability.number);
2416 netdev_dbg(netdev, "rx_sg_supported = %lld\n", 2416 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2417 adapter->rx_sg_supported); 2417 adapter->rx_sg_supported);
2418 break; 2418 break;
2419 case OPT_TX_COMP_SUB_QUEUES: 2419 case OPT_TX_COMP_SUB_QUEUES:
2420 adapter->opt_tx_comp_sub_queues = 2420 adapter->opt_tx_comp_sub_queues =
2421 be32_to_cpu(crq->query_capability.number); 2421 be64_to_cpu(crq->query_capability.number);
2422 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n", 2422 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2423 adapter->opt_tx_comp_sub_queues); 2423 adapter->opt_tx_comp_sub_queues);
2424 break; 2424 break;
2425 case OPT_RX_COMP_QUEUES: 2425 case OPT_RX_COMP_QUEUES:
2426 adapter->opt_rx_comp_queues = 2426 adapter->opt_rx_comp_queues =
2427 be32_to_cpu(crq->query_capability.number); 2427 be64_to_cpu(crq->query_capability.number);
2428 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n", 2428 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2429 adapter->opt_rx_comp_queues); 2429 adapter->opt_rx_comp_queues);
2430 break; 2430 break;
2431 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q: 2431 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2432 adapter->opt_rx_bufadd_q_per_rx_comp_q = 2432 adapter->opt_rx_bufadd_q_per_rx_comp_q =
2433 be32_to_cpu(crq->query_capability.number); 2433 be64_to_cpu(crq->query_capability.number);
2434 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n", 2434 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2435 adapter->opt_rx_bufadd_q_per_rx_comp_q); 2435 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2436 break; 2436 break;
2437 case OPT_TX_ENTRIES_PER_SUBCRQ: 2437 case OPT_TX_ENTRIES_PER_SUBCRQ:
2438 adapter->opt_tx_entries_per_subcrq = 2438 adapter->opt_tx_entries_per_subcrq =
2439 be32_to_cpu(crq->query_capability.number); 2439 be64_to_cpu(crq->query_capability.number);
2440 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n", 2440 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2441 adapter->opt_tx_entries_per_subcrq); 2441 adapter->opt_tx_entries_per_subcrq);
2442 break; 2442 break;
2443 case OPT_RXBA_ENTRIES_PER_SUBCRQ: 2443 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2444 adapter->opt_rxba_entries_per_subcrq = 2444 adapter->opt_rxba_entries_per_subcrq =
2445 be32_to_cpu(crq->query_capability.number); 2445 be64_to_cpu(crq->query_capability.number);
2446 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n", 2446 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2447 adapter->opt_rxba_entries_per_subcrq); 2447 adapter->opt_rxba_entries_per_subcrq);
2448 break; 2448 break;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1242925ad34c..1a9993cc79b5 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -319,10 +319,8 @@ struct ibmvnic_capability {
319 u8 first; 319 u8 first;
320 u8 cmd; 320 u8 cmd;
321 __be16 capability; /* one of ibmvnic_capabilities */ 321 __be16 capability; /* one of ibmvnic_capabilities */
322 __be64 number;
322 struct ibmvnic_rc rc; 323 struct ibmvnic_rc rc;
323 __be32 number; /*FIX: should be __be64, but I'm getting the least
324 * significant word first
325 */
326} __packed __aligned(8); 324} __packed __aligned(8);
327 325
328struct ibmvnic_login { 326struct ibmvnic_login {
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index b1de7afd4116..3ddf657bc10b 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
270} 270}
271 271
272static inline void 272static inline void
273jme_clear_pm(struct jme_adapter *jme) 273jme_clear_pm_enable_wol(struct jme_adapter *jme)
274{ 274{
275 jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs); 275 jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
276} 276}
277 277
278static inline void
279jme_clear_pm_disable_wol(struct jme_adapter *jme)
280{
281 jwrite32(jme, JME_PMCS, PMCS_STMASK);
282}
283
278static int 284static int
279jme_reload_eeprom(struct jme_adapter *jme) 285jme_reload_eeprom(struct jme_adapter *jme)
280{ 286{
@@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev)
1853 struct jme_adapter *jme = netdev_priv(netdev); 1859 struct jme_adapter *jme = netdev_priv(netdev);
1854 int rc; 1860 int rc;
1855 1861
1856 jme_clear_pm(jme); 1862 jme_clear_pm_disable_wol(jme);
1857 JME_NAPI_ENABLE(jme); 1863 JME_NAPI_ENABLE(jme);
1858 1864
1859 tasklet_init(&jme->linkch_task, jme_link_change_tasklet, 1865 tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
@@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme)
1925static void 1931static void
1926jme_powersave_phy(struct jme_adapter *jme) 1932jme_powersave_phy(struct jme_adapter *jme)
1927{ 1933{
1928 if (jme->reg_pmcs) { 1934 if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
1929 jme_set_100m_half(jme); 1935 jme_set_100m_half(jme);
1930 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN)) 1936 if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
1931 jme_wait_link(jme); 1937 jme_wait_link(jme);
1932 jme_clear_pm(jme); 1938 jme_clear_pm_enable_wol(jme);
1933 } else { 1939 } else {
1934 jme_phy_off(jme); 1940 jme_phy_off(jme);
1935 } 1941 }
@@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev,
2646 if (wol->wolopts & WAKE_MAGIC) 2652 if (wol->wolopts & WAKE_MAGIC)
2647 jme->reg_pmcs |= PMCS_MFEN; 2653 jme->reg_pmcs |= PMCS_MFEN;
2648 2654
2649 jwrite32(jme, JME_PMCS, jme->reg_pmcs);
2650 device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
2651
2652 return 0; 2655 return 0;
2653} 2656}
2654 2657
@@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev,
3172 jme->mii_if.mdio_read = jme_mdio_read; 3175 jme->mii_if.mdio_read = jme_mdio_read;
3173 jme->mii_if.mdio_write = jme_mdio_write; 3176 jme->mii_if.mdio_write = jme_mdio_write;
3174 3177
3175 jme_clear_pm(jme); 3178 jme_clear_pm_disable_wol(jme);
3176 device_set_wakeup_enable(&pdev->dev, true); 3179 device_init_wakeup(&pdev->dev, true);
3177 3180
3178 jme_set_phyfifo_5level(jme); 3181 jme_set_phyfifo_5level(jme);
3179 jme->pcirev = pdev->revision; 3182 jme->pcirev = pdev->revision;
@@ -3304,7 +3307,7 @@ jme_resume(struct device *dev)
3304 if (!netif_running(netdev)) 3307 if (!netif_running(netdev))
3305 return 0; 3308 return 0;
3306 3309
3307 jme_clear_pm(jme); 3310 jme_clear_pm_disable_wol(jme);
3308 jme_phy_on(jme); 3311 jme_phy_on(jme);
3309 if (test_bit(JME_FLAG_SSET, &jme->flags)) 3312 if (test_bit(JME_FLAG_SSET, &jme->flags))
3310 jme_set_settings(netdev, &jme->old_ecmd); 3313 jme_set_settings(netdev, &jme->old_ecmd);
@@ -3312,13 +3315,14 @@ jme_resume(struct device *dev)
3312 jme_reset_phy_processor(jme); 3315 jme_reset_phy_processor(jme);
3313 jme_phy_calibration(jme); 3316 jme_phy_calibration(jme);
3314 jme_phy_setEA(jme); 3317 jme_phy_setEA(jme);
3315 jme_start_irq(jme);
3316 netif_device_attach(netdev); 3318 netif_device_attach(netdev);
3317 3319
3318 atomic_inc(&jme->link_changing); 3320 atomic_inc(&jme->link_changing);
3319 3321
3320 jme_reset_link(jme); 3322 jme_reset_link(jme);
3321 3323
3324 jme_start_irq(jme);
3325
3322 return 0; 3326 return 0;
3323} 3327}
3324 3328
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 662c2ee268c7..b0ae69f84493 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -370,6 +370,11 @@ struct mvneta_port {
370 struct net_device *dev; 370 struct net_device *dev;
371 struct notifier_block cpu_notifier; 371 struct notifier_block cpu_notifier;
372 int rxq_def; 372 int rxq_def;
373 /* Protect the access to the percpu interrupt registers,
374 * ensuring that the configuration remains coherent.
375 */
376 spinlock_t lock;
377 bool is_stopped;
373 378
374 /* Core clock */ 379 /* Core clock */
375 struct clk *clk; 380 struct clk *clk;
@@ -1038,6 +1043,43 @@ static void mvneta_set_autoneg(struct mvneta_port *pp, int enable)
1038 } 1043 }
1039} 1044}
1040 1045
1046static void mvneta_percpu_unmask_interrupt(void *arg)
1047{
1048 struct mvneta_port *pp = arg;
1049
1050 /* All the queue are unmasked, but actually only the ones
1051 * mapped to this CPU will be unmasked
1052 */
1053 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
1054 MVNETA_RX_INTR_MASK_ALL |
1055 MVNETA_TX_INTR_MASK_ALL |
1056 MVNETA_MISCINTR_INTR_MASK);
1057}
1058
1059static void mvneta_percpu_mask_interrupt(void *arg)
1060{
1061 struct mvneta_port *pp = arg;
1062
1063 /* All the queue are masked, but actually only the ones
1064 * mapped to this CPU will be masked
1065 */
1066 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
1067 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1068 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1069}
1070
1071static void mvneta_percpu_clear_intr_cause(void *arg)
1072{
1073 struct mvneta_port *pp = arg;
1074
1075 /* All the queue are cleared, but actually only the ones
1076 * mapped to this CPU will be cleared
1077 */
1078 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
1079 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1080 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1081}
1082
1041/* This method sets defaults to the NETA port: 1083/* This method sets defaults to the NETA port:
1042 * Clears interrupt Cause and Mask registers. 1084 * Clears interrupt Cause and Mask registers.
1043 * Clears all MAC tables. 1085 * Clears all MAC tables.
@@ -1055,14 +1097,10 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
1055 int max_cpu = num_present_cpus(); 1097 int max_cpu = num_present_cpus();
1056 1098
1057 /* Clear all Cause registers */ 1099 /* Clear all Cause registers */
1058 mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); 1100 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
1059 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
1060 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
1061 1101
1062 /* Mask all interrupts */ 1102 /* Mask all interrupts */
1063 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 1103 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
1064 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
1065 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
1066 mvreg_write(pp, MVNETA_INTR_ENABLE, 0); 1104 mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
1067 1105
1068 /* Enable MBUS Retry bit16 */ 1106 /* Enable MBUS Retry bit16 */
@@ -2528,34 +2566,9 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
2528 return 0; 2566 return 0;
2529} 2567}
2530 2568
2531static void mvneta_percpu_unmask_interrupt(void *arg)
2532{
2533 struct mvneta_port *pp = arg;
2534
2535 /* All the queue are unmasked, but actually only the ones
2536 * maped to this CPU will be unmasked
2537 */
2538 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2539 MVNETA_RX_INTR_MASK_ALL |
2540 MVNETA_TX_INTR_MASK_ALL |
2541 MVNETA_MISCINTR_INTR_MASK);
2542}
2543
2544static void mvneta_percpu_mask_interrupt(void *arg)
2545{
2546 struct mvneta_port *pp = arg;
2547
2548 /* All the queue are masked, but actually only the ones
2549 * maped to this CPU will be masked
2550 */
2551 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
2552 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2553 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2554}
2555
2556static void mvneta_start_dev(struct mvneta_port *pp) 2569static void mvneta_start_dev(struct mvneta_port *pp)
2557{ 2570{
2558 unsigned int cpu; 2571 int cpu;
2559 2572
2560 mvneta_max_rx_size_set(pp, pp->pkt_size); 2573 mvneta_max_rx_size_set(pp, pp->pkt_size);
2561 mvneta_txq_max_tx_size_set(pp, pp->pkt_size); 2574 mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2564,16 +2577,15 @@ static void mvneta_start_dev(struct mvneta_port *pp)
2564 mvneta_port_enable(pp); 2577 mvneta_port_enable(pp);
2565 2578
2566 /* Enable polling on the port */ 2579 /* Enable polling on the port */
2567 for_each_present_cpu(cpu) { 2580 for_each_online_cpu(cpu) {
2568 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2581 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2569 2582
2570 napi_enable(&port->napi); 2583 napi_enable(&port->napi);
2571 } 2584 }
2572 2585
2573 /* Unmask interrupts. It has to be done from each CPU */ 2586 /* Unmask interrupts. It has to be done from each CPU */
2574 for_each_online_cpu(cpu) 2587 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2575 smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, 2588
2576 pp, true);
2577 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2589 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2578 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2590 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2579 MVNETA_CAUSE_LINK_CHANGE | 2591 MVNETA_CAUSE_LINK_CHANGE |
@@ -2589,7 +2601,7 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
2589 2601
2590 phy_stop(pp->phy_dev); 2602 phy_stop(pp->phy_dev);
2591 2603
2592 for_each_present_cpu(cpu) { 2604 for_each_online_cpu(cpu) {
2593 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); 2605 struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
2594 2606
2595 napi_disable(&port->napi); 2607 napi_disable(&port->napi);
@@ -2604,13 +2616,10 @@ static void mvneta_stop_dev(struct mvneta_port *pp)
2604 mvneta_port_disable(pp); 2616 mvneta_port_disable(pp);
2605 2617
2606 /* Clear all ethernet port interrupts */ 2618 /* Clear all ethernet port interrupts */
2607 mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2619 on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
2608 mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
2609 2620
2610 /* Mask all ethernet port interrupts */ 2621 /* Mask all ethernet port interrupts */
2611 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2622 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2612 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2613 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2614 2623
2615 mvneta_tx_reset(pp); 2624 mvneta_tx_reset(pp);
2616 mvneta_rx_reset(pp); 2625 mvneta_rx_reset(pp);
@@ -2847,11 +2856,20 @@ static void mvneta_percpu_disable(void *arg)
2847 disable_percpu_irq(pp->dev->irq); 2856 disable_percpu_irq(pp->dev->irq);
2848} 2857}
2849 2858
2859/* Electing a CPU must be done in an atomic way: it should be done
2860 * after or before the removal/insertion of a CPU and this function is
2861 * not reentrant.
2862 */
2850static void mvneta_percpu_elect(struct mvneta_port *pp) 2863static void mvneta_percpu_elect(struct mvneta_port *pp)
2851{ 2864{
2852 int online_cpu_idx, max_cpu, cpu, i = 0; 2865 int elected_cpu = 0, max_cpu, cpu, i = 0;
2866
2867 /* Use the cpu associated to the rxq when it is online, in all
2868 * the other cases, use the cpu 0 which can't be offline.
2869 */
2870 if (cpu_online(pp->rxq_def))
2871 elected_cpu = pp->rxq_def;
2853 2872
2854 online_cpu_idx = pp->rxq_def % num_online_cpus();
2855 max_cpu = num_present_cpus(); 2873 max_cpu = num_present_cpus();
2856 2874
2857 for_each_online_cpu(cpu) { 2875 for_each_online_cpu(cpu) {
@@ -2862,7 +2880,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
2862 if ((rxq % max_cpu) == cpu) 2880 if ((rxq % max_cpu) == cpu)
2863 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); 2881 rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
2864 2882
2865 if (i == online_cpu_idx) 2883 if (cpu == elected_cpu)
2866 /* Map the default receive queue queue to the 2884 /* Map the default receive queue queue to the
2867 * elected CPU 2885 * elected CPU
2868 */ 2886 */
@@ -2873,7 +2891,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
2873 * the CPU bound to the default RX queue 2891 * the CPU bound to the default RX queue
2874 */ 2892 */
2875 if (txq_number == 1) 2893 if (txq_number == 1)
2876 txq_map = (i == online_cpu_idx) ? 2894 txq_map = (cpu == elected_cpu) ?
2877 MVNETA_CPU_TXQ_ACCESS(1) : 0; 2895 MVNETA_CPU_TXQ_ACCESS(1) : 0;
2878 else 2896 else
2879 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & 2897 txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
@@ -2902,6 +2920,14 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2902 switch (action) { 2920 switch (action) {
2903 case CPU_ONLINE: 2921 case CPU_ONLINE:
2904 case CPU_ONLINE_FROZEN: 2922 case CPU_ONLINE_FROZEN:
2923 spin_lock(&pp->lock);
2924 /* Configuring the driver for a new CPU while the
2925 * driver is stopping is racy, so just avoid it.
2926 */
2927 if (pp->is_stopped) {
2928 spin_unlock(&pp->lock);
2929 break;
2930 }
2905 netif_tx_stop_all_queues(pp->dev); 2931 netif_tx_stop_all_queues(pp->dev);
2906 2932
2907 /* We have to synchronise on tha napi of each CPU 2933 /* We have to synchronise on tha napi of each CPU
@@ -2917,9 +2943,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2917 } 2943 }
2918 2944
2919 /* Mask all ethernet port interrupts */ 2945 /* Mask all ethernet port interrupts */
2920 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2946 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2921 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
2922 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2923 napi_enable(&port->napi); 2947 napi_enable(&port->napi);
2924 2948
2925 2949
@@ -2934,27 +2958,25 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2934 */ 2958 */
2935 mvneta_percpu_elect(pp); 2959 mvneta_percpu_elect(pp);
2936 2960
2937 /* Unmask all ethernet port interrupts, as this 2961 /* Unmask all ethernet port interrupts */
2938 * notifier is called for each CPU then the CPU to 2962 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2939 * Queue mapping is applied
2940 */
2941 mvreg_write(pp, MVNETA_INTR_NEW_MASK,
2942 MVNETA_RX_INTR_MASK(rxq_number) |
2943 MVNETA_TX_INTR_MASK(txq_number) |
2944 MVNETA_MISCINTR_INTR_MASK);
2945 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2963 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2946 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2964 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2947 MVNETA_CAUSE_LINK_CHANGE | 2965 MVNETA_CAUSE_LINK_CHANGE |
2948 MVNETA_CAUSE_PSC_SYNC_CHANGE); 2966 MVNETA_CAUSE_PSC_SYNC_CHANGE);
2949 netif_tx_start_all_queues(pp->dev); 2967 netif_tx_start_all_queues(pp->dev);
2968 spin_unlock(&pp->lock);
2950 break; 2969 break;
2951 case CPU_DOWN_PREPARE: 2970 case CPU_DOWN_PREPARE:
2952 case CPU_DOWN_PREPARE_FROZEN: 2971 case CPU_DOWN_PREPARE_FROZEN:
2953 netif_tx_stop_all_queues(pp->dev); 2972 netif_tx_stop_all_queues(pp->dev);
2973 /* Thanks to this lock we are sure that any pending
2974 * cpu election is done
2975 */
2976 spin_lock(&pp->lock);
2954 /* Mask all ethernet port interrupts */ 2977 /* Mask all ethernet port interrupts */
2955 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); 2978 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
2956 mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); 2979 spin_unlock(&pp->lock);
2957 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
2958 2980
2959 napi_synchronize(&port->napi); 2981 napi_synchronize(&port->napi);
2960 napi_disable(&port->napi); 2982 napi_disable(&port->napi);
@@ -2968,12 +2990,11 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2968 case CPU_DEAD: 2990 case CPU_DEAD:
2969 case CPU_DEAD_FROZEN: 2991 case CPU_DEAD_FROZEN:
2970 /* Check if a new CPU must be elected now this on is down */ 2992 /* Check if a new CPU must be elected now this on is down */
2993 spin_lock(&pp->lock);
2971 mvneta_percpu_elect(pp); 2994 mvneta_percpu_elect(pp);
2995 spin_unlock(&pp->lock);
2972 /* Unmask all ethernet port interrupts */ 2996 /* Unmask all ethernet port interrupts */
2973 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2997 on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
2974 MVNETA_RX_INTR_MASK(rxq_number) |
2975 MVNETA_TX_INTR_MASK(txq_number) |
2976 MVNETA_MISCINTR_INTR_MASK);
2977 mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2998 mvreg_write(pp, MVNETA_INTR_MISC_MASK,
2978 MVNETA_CAUSE_PHY_STATUS_CHANGE | 2999 MVNETA_CAUSE_PHY_STATUS_CHANGE |
2979 MVNETA_CAUSE_LINK_CHANGE | 3000 MVNETA_CAUSE_LINK_CHANGE |
@@ -2988,7 +3009,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
2988static int mvneta_open(struct net_device *dev) 3009static int mvneta_open(struct net_device *dev)
2989{ 3010{
2990 struct mvneta_port *pp = netdev_priv(dev); 3011 struct mvneta_port *pp = netdev_priv(dev);
2991 int ret, cpu; 3012 int ret;
2992 3013
2993 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); 3014 pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
2994 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + 3015 pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
@@ -3010,22 +3031,12 @@ static int mvneta_open(struct net_device *dev)
3010 goto err_cleanup_txqs; 3031 goto err_cleanup_txqs;
3011 } 3032 }
3012 3033
3013 /* Even though the documentation says that request_percpu_irq
3014 * doesn't enable the interrupts automatically, it actually
3015 * does so on the local CPU.
3016 *
3017 * Make sure it's disabled.
3018 */
3019 mvneta_percpu_disable(pp);
3020
3021 /* Enable per-CPU interrupt on all the CPU to handle our RX 3034 /* Enable per-CPU interrupt on all the CPU to handle our RX
3022 * queue interrupts 3035 * queue interrupts
3023 */ 3036 */
3024 for_each_online_cpu(cpu) 3037 on_each_cpu(mvneta_percpu_enable, pp, true);
3025 smp_call_function_single(cpu, mvneta_percpu_enable,
3026 pp, true);
3027
3028 3038
3039 pp->is_stopped = false;
3029 /* Register a CPU notifier to handle the case where our CPU 3040 /* Register a CPU notifier to handle the case where our CPU
3030 * might be taken offline. 3041 * might be taken offline.
3031 */ 3042 */
@@ -3057,13 +3068,20 @@ err_cleanup_rxqs:
3057static int mvneta_stop(struct net_device *dev) 3068static int mvneta_stop(struct net_device *dev)
3058{ 3069{
3059 struct mvneta_port *pp = netdev_priv(dev); 3070 struct mvneta_port *pp = netdev_priv(dev);
3060 int cpu;
3061 3071
3072 /* Inform that we are stopping so we don't want to setup the
3073 * driver for new CPUs in the notifiers
3074 */
3075 spin_lock(&pp->lock);
3076 pp->is_stopped = true;
3062 mvneta_stop_dev(pp); 3077 mvneta_stop_dev(pp);
3063 mvneta_mdio_remove(pp); 3078 mvneta_mdio_remove(pp);
3064 unregister_cpu_notifier(&pp->cpu_notifier); 3079 unregister_cpu_notifier(&pp->cpu_notifier);
3065 for_each_present_cpu(cpu) 3080 /* Now that the notifier are unregistered, we can release le
3066 smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); 3081 * lock
3082 */
3083 spin_unlock(&pp->lock);
3084 on_each_cpu(mvneta_percpu_disable, pp, true);
3067 free_percpu_irq(dev->irq, pp->ports); 3085 free_percpu_irq(dev->irq, pp->ports);
3068 mvneta_cleanup_rxqs(pp); 3086 mvneta_cleanup_rxqs(pp);
3069 mvneta_cleanup_txqs(pp); 3087 mvneta_cleanup_txqs(pp);
@@ -3312,9 +3330,7 @@ static int mvneta_config_rss(struct mvneta_port *pp)
3312 3330
3313 netif_tx_stop_all_queues(pp->dev); 3331 netif_tx_stop_all_queues(pp->dev);
3314 3332
3315 for_each_online_cpu(cpu) 3333 on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
3316 smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
3317 pp, true);
3318 3334
3319 /* We have to synchronise on the napi of each CPU */ 3335 /* We have to synchronise on the napi of each CPU */
3320 for_each_online_cpu(cpu) { 3336 for_each_online_cpu(cpu) {
@@ -3335,7 +3351,9 @@ static int mvneta_config_rss(struct mvneta_port *pp)
3335 mvreg_write(pp, MVNETA_PORT_CONFIG, val); 3351 mvreg_write(pp, MVNETA_PORT_CONFIG, val);
3336 3352
3337 /* Update the elected CPU matching the new rxq_def */ 3353 /* Update the elected CPU matching the new rxq_def */
3354 spin_lock(&pp->lock);
3338 mvneta_percpu_elect(pp); 3355 mvneta_percpu_elect(pp);
3356 spin_unlock(&pp->lock);
3339 3357
3340 /* We have to synchronise on the napi of each CPU */ 3358 /* We have to synchronise on the napi of each CPU */
3341 for_each_online_cpu(cpu) { 3359 for_each_online_cpu(cpu) {
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a4beccf1fd46..c797971aefab 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3061,7 +3061,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3061 3061
3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL); 3062 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
3063 if (!pe) 3063 if (!pe)
3064 return -1; 3064 return -ENOMEM;
3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); 3065 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
3066 pe->index = tid; 3066 pe->index = tid;
3067 3067
@@ -3077,7 +3077,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
3077 if (pmap == 0) { 3077 if (pmap == 0) {
3078 if (add) { 3078 if (add) {
3079 kfree(pe); 3079 kfree(pe);
3080 return -1; 3080 return -EINVAL;
3081 } 3081 }
3082 mvpp2_prs_hw_inv(priv, pe->index); 3082 mvpp2_prs_hw_inv(priv, pe->index);
3083 priv->prs_shadow[pe->index].valid = false; 3083 priv->prs_shadow[pe->index].valid = false;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 715de8affcc9..c7e939945259 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -182,10 +182,17 @@ void mlx4_enter_error_state(struct mlx4_dev_persistent *persist)
182 err = mlx4_reset_slave(dev); 182 err = mlx4_reset_slave(dev);
183 else 183 else
184 err = mlx4_reset_master(dev); 184 err = mlx4_reset_master(dev);
185 BUG_ON(err != 0);
186 185
186 if (!err) {
187 mlx4_err(dev, "device was reset successfully\n");
188 } else {
189 /* EEH could have disabled the PCI channel during reset. That's
190 * recoverable and the PCI error flow will handle it.
191 */
192 if (!pci_channel_offline(dev->persist->pdev))
193 BUG_ON(1);
194 }
187 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR; 195 dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
188 mlx4_err(dev, "device was reset successfully\n");
189 mutex_unlock(&persist->device_state_mutex); 196 mutex_unlock(&persist->device_state_mutex);
190 197
191 /* At that step HW was already reset, now notify clients */ 198 /* At that step HW was already reset, now notify clients */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index d48d5793407d..e94ca1c3fc7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2429,7 +2429,7 @@ err_thread:
2429 flush_workqueue(priv->mfunc.master.comm_wq); 2429 flush_workqueue(priv->mfunc.master.comm_wq);
2430 destroy_workqueue(priv->mfunc.master.comm_wq); 2430 destroy_workqueue(priv->mfunc.master.comm_wq);
2431err_slaves: 2431err_slaves:
2432 while (--i) { 2432 while (i--) {
2433 for (port = 1; port <= MLX4_MAX_PORTS; port++) 2433 for (port = 1; port <= MLX4_MAX_PORTS; port++)
2434 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]); 2434 kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
2435 } 2435 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 3348e646db70..a849da92f857 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -318,7 +318,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
318 if (timestamp_en) 318 if (timestamp_en)
319 cq_context->flags |= cpu_to_be32(1 << 19); 319 cq_context->flags |= cpu_to_be32(1 << 19);
320 320
321 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index); 321 cq_context->logsize_usrpage =
322 cpu_to_be32((ilog2(nent) << 24) |
323 mlx4_to_hw_uar_index(dev, uar->index));
322 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn; 324 cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
323 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; 325 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
324 326
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 038f9ce391e6..1494997c4f7e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
236 .enable = mlx4_en_phc_enable, 236 .enable = mlx4_en_phc_enable,
237}; 237};
238 238
239#define MLX4_EN_WRAP_AROUND_SEC 10ULL
240
241/* This function calculates the max shift that enables the user range
242 * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
243 */
244static u32 freq_to_shift(u16 freq)
245{
246 u32 freq_khz = freq * 1000;
247 u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
248 u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
249 max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
250 /* calculate max possible multiplier in order to fit in 64bit */
251 u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
252
253 /* This comes from the reverse of clocksource_khz2mult */
254 return ilog2(div_u64(max_mul * freq_khz, 1000000));
255}
256
239void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev) 257void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
240{ 258{
241 struct mlx4_dev *dev = mdev->dev; 259 struct mlx4_dev *dev = mdev->dev;
@@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
254 memset(&mdev->cycles, 0, sizeof(mdev->cycles)); 272 memset(&mdev->cycles, 0, sizeof(mdev->cycles));
255 mdev->cycles.read = mlx4_en_read_clock; 273 mdev->cycles.read = mlx4_en_read_clock;
256 mdev->cycles.mask = CLOCKSOURCE_MASK(48); 274 mdev->cycles.mask = CLOCKSOURCE_MASK(48);
257 /* Using shift to make calculation more accurate. Since current HW 275 mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
258 * clock frequency is 427 MHz, and cycles are given using a 48 bits
259 * register, the biggest shift when calculating using u64, is 14
260 * (max_cycles * multiplier < 2^64)
261 */
262 mdev->cycles.shift = 14;
263 mdev->cycles.mult = 276 mdev->cycles.mult =
264 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift); 277 clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
265 mdev->nominal_c_mult = mdev->cycles.mult; 278 mdev->nominal_c_mult = mdev->cycles.mult;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0c7e3f69a73b..21e2c0960271 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2245,7 +2245,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
2245 struct mlx4_en_dev *mdev = en_priv->mdev; 2245 struct mlx4_en_dev *mdev = en_priv->mdev;
2246 u64 mac_u64 = mlx4_mac_to_u64(mac); 2246 u64 mac_u64 = mlx4_mac_to_u64(mac);
2247 2247
2248 if (!is_valid_ether_addr(mac)) 2248 if (is_multicast_ether_addr(mac))
2249 return -EINVAL; 2249 return -EINVAL;
2250 2250
2251 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64); 2251 return mlx4_set_vf_mac(mdev->dev, en_priv->port, queue, mac_u64);
@@ -2344,8 +2344,6 @@ out:
2344 /* set offloads */ 2344 /* set offloads */
2345 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2345 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2346 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL; 2346 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2347 priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2348 priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2349} 2347}
2350 2348
2351static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2349static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2356,8 +2354,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2356 /* unset offloads */ 2354 /* unset offloads */
2357 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 2355 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2358 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL); 2356 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2359 priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2360 priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
2361 2357
2362 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2358 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2363 VXLAN_STEER_BY_OUTER_MAC, 0); 2359 VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2980,6 +2976,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2980 priv->rss_hash_fn = ETH_RSS_HASH_TOP; 2976 priv->rss_hash_fn = ETH_RSS_HASH_TOP;
2981 } 2977 }
2982 2978
2979 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2980 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2981 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2982 }
2983
2983 mdev->pndev[port] = dev; 2984 mdev->pndev[port] = dev;
2984 mdev->upper[port] = NULL; 2985 mdev->upper[port] = NULL;
2985 2986
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index ee99e67187f5..3904b5fc0b7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
238 stats->collisions = 0; 238 stats->collisions = 0;
239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP); 239 stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength); 240 stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
241 stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 241 stats->rx_over_errors = 0;
242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC); 242 stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
243 stats->rx_frame_errors = 0; 243 stats->rx_frame_errors = 0;
244 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 244 stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
245 stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw); 245 stats->rx_missed_errors = 0;
246 stats->tx_aborted_errors = 0; 246 stats->tx_aborted_errors = 0;
247 stats->tx_carrier_errors = 0; 247 stats->tx_carrier_errors = 0;
248 stats->tx_fifo_errors = 0; 248 stats->tx_fifo_errors = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 12aab5a659d3..02e925d6f734 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -58,7 +58,8 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
58 } else { 58 } else {
59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4; 59 context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
60 } 60 }
61 context->usr_page = cpu_to_be32(mdev->priv_uar.index); 61 context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
62 mdev->priv_uar.index));
62 context->local_qpn = cpu_to_be32(qpn); 63 context->local_qpn = cpu_to_be32(qpn);
63 context->pri_path.ackto = 1 & 0x07; 64 context->pri_path.ackto = 1 & 0x07;
64 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 65 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4421bf5463f6..e0946ab22010 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -213,7 +213,9 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, 213 mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
214 ring->cqn, user_prio, &ring->context); 214 ring->cqn, user_prio, &ring->context);
215 if (ring->bf_alloced) 215 if (ring->bf_alloced)
216 ring->context.usr_page = cpu_to_be32(ring->bf.uar->index); 216 ring->context.usr_page =
217 cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
218 ring->bf.uar->index));
217 219
218 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context, 220 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
219 &ring->qp, &ring->qp_state); 221 &ring->qp, &ring->qp_state);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 4696053165f8..f613977455e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -940,9 +940,10 @@ static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
940 940
941 if (!priv->eq_table.uar_map[index]) { 941 if (!priv->eq_table.uar_map[index]) {
942 priv->eq_table.uar_map[index] = 942 priv->eq_table.uar_map[index] =
943 ioremap(pci_resource_start(dev->persist->pdev, 2) + 943 ioremap(
944 ((eq->eqn / 4) << PAGE_SHIFT), 944 pci_resource_start(dev->persist->pdev, 2) +
945 PAGE_SIZE); 945 ((eq->eqn / 4) << (dev->uar_page_shift)),
946 (1 << (dev->uar_page_shift)));
946 if (!priv->eq_table.uar_map[index]) { 947 if (!priv->eq_table.uar_map[index]) {
947 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n", 948 mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
948 eq->eqn); 949 eq->eqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f1b6d219e445..f8674ae62752 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -168,6 +168,20 @@ struct mlx4_port_config {
168 168
169static atomic_t pf_loading = ATOMIC_INIT(0); 169static atomic_t pf_loading = ATOMIC_INIT(0);
170 170
171static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
172 struct mlx4_dev_cap *dev_cap)
173{
174 /* The reserved_uars is calculated by system page size unit.
175 * Therefore, adjustment is added when the uar page size is less
176 * than the system page size
177 */
178 dev->caps.reserved_uars =
179 max_t(int,
180 mlx4_get_num_reserved_uar(dev),
181 dev_cap->reserved_uars /
182 (1 << (PAGE_SHIFT - dev->uar_page_shift)));
183}
184
171int mlx4_check_port_params(struct mlx4_dev *dev, 185int mlx4_check_port_params(struct mlx4_dev *dev,
172 enum mlx4_port_type *port_type) 186 enum mlx4_port_type *port_type)
173{ 187{
@@ -386,8 +400,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
386 dev->caps.reserved_mtts = dev_cap->reserved_mtts; 400 dev->caps.reserved_mtts = dev_cap->reserved_mtts;
387 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 401 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
388 402
389 /* The first 128 UARs are used for EQ doorbells */
390 dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
391 dev->caps.reserved_pds = dev_cap->reserved_pds; 403 dev->caps.reserved_pds = dev_cap->reserved_pds;
392 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ? 404 dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
393 dev_cap->reserved_xrcds : 0; 405 dev_cap->reserved_xrcds : 0;
@@ -405,6 +417,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
405 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 417 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
406 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; 418 dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
407 419
420 /* Save uar page shift */
421 if (!mlx4_is_slave(dev)) {
422 /* Virtual PCI function needs to determine UAR page size from
423 * firmware. Only master PCI function can set the uar page size
424 */
425 dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
426 mlx4_set_num_reserved_uars(dev, dev_cap);
427 }
428
408 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) { 429 if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
409 struct mlx4_init_hca_param hca_param; 430 struct mlx4_init_hca_param hca_param;
410 431
@@ -815,16 +836,25 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
815 return -ENODEV; 836 return -ENODEV;
816 } 837 }
817 838
818 /* slave gets uar page size from QUERY_HCA fw command */ 839 /* Set uar_page_shift for VF */
819 dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12); 840 dev->uar_page_shift = hca_param.uar_page_sz + 12;
820 841
821 /* TODO: relax this assumption */ 842 /* Make sure the master uar page size is valid */
822 if (dev->caps.uar_page_size != PAGE_SIZE) { 843 if (dev->uar_page_shift > PAGE_SHIFT) {
823 mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n", 844 mlx4_err(dev,
824 dev->caps.uar_page_size, PAGE_SIZE); 845 "Invalid configuration: uar page size is larger than system page size\n");
825 return -ENODEV; 846 return -ENODEV;
826 } 847 }
827 848
849 /* Set reserved_uars based on the uar_page_shift */
850 mlx4_set_num_reserved_uars(dev, &dev_cap);
851
852 /* Although uar page size in FW differs from system page size,
853 * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
854 * still works with assumption that uar page size == system page size
855 */
856 dev->caps.uar_page_size = PAGE_SIZE;
857
828 memset(&func_cap, 0, sizeof(func_cap)); 858 memset(&func_cap, 0, sizeof(func_cap));
829 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap); 859 err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
830 if (err) { 860 if (err) {
@@ -1226,6 +1256,7 @@ err_set_port:
1226static int mlx4_mf_bond(struct mlx4_dev *dev) 1256static int mlx4_mf_bond(struct mlx4_dev *dev)
1227{ 1257{
1228 int err = 0; 1258 int err = 0;
1259 int nvfs;
1229 struct mlx4_slaves_pport slaves_port1; 1260 struct mlx4_slaves_pport slaves_port1;
1230 struct mlx4_slaves_pport slaves_port2; 1261 struct mlx4_slaves_pport slaves_port2;
1231 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX); 1262 DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
@@ -1242,11 +1273,18 @@ static int mlx4_mf_bond(struct mlx4_dev *dev)
1242 return -EINVAL; 1273 return -EINVAL;
1243 } 1274 }
1244 1275
1276 /* number of virtual functions is number of total functions minus one
1277 * physical function for each port.
1278 */
1279 nvfs = bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) +
1280 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1) - 2;
1281
1245 /* limit on maximum allowed VFs */ 1282 /* limit on maximum allowed VFs */
1246 if ((bitmap_weight(slaves_port1.slaves, dev->persist->num_vfs + 1) + 1283 if (nvfs > MAX_MF_BOND_ALLOWED_SLAVES) {
1247 bitmap_weight(slaves_port2.slaves, dev->persist->num_vfs + 1)) > 1284 mlx4_warn(dev, "HA mode is not supported for %d VFs (max %d are allowed)\n",
1248 MAX_MF_BOND_ALLOWED_SLAVES) 1285 nvfs, MAX_MF_BOND_ALLOWED_SLAVES);
1249 return -EINVAL; 1286 return -EINVAL;
1287 }
1250 1288
1251 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) { 1289 if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) {
1252 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n"); 1290 mlx4_warn(dev, "HA mode unsupported for NON DMFS steering\n");
@@ -2179,8 +2217,12 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
2179 2217
2180 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; 2218 dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
2181 2219
2182 init_hca.log_uar_sz = ilog2(dev->caps.num_uars); 2220 /* Always set UAR page size 4KB, set log_uar_sz accordingly */
2183 init_hca.uar_page_sz = PAGE_SHIFT - 12; 2221 init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
2222 PAGE_SHIFT -
2223 DEFAULT_UAR_PAGE_SHIFT;
2224 init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
2225
2184 init_hca.mw_enabled = 0; 2226 init_hca.mw_enabled = 0;
2185 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || 2227 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
2186 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) 2228 dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 609c59dc854e..b3cc3ab63799 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -269,9 +269,15 @@ EXPORT_SYMBOL_GPL(mlx4_bf_free);
269 269
270int mlx4_init_uar_table(struct mlx4_dev *dev) 270int mlx4_init_uar_table(struct mlx4_dev *dev)
271{ 271{
272 if (dev->caps.num_uars <= 128) { 272 int num_reserved_uar = mlx4_get_num_reserved_uar(dev);
273 mlx4_err(dev, "Only %d UAR pages (need more than 128)\n", 273
274 dev->caps.num_uars); 274 mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift);
275 mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars);
276
277 if (dev->caps.num_uars <= num_reserved_uar) {
278 mlx4_err(
279 dev, "Only %d UAR pages (need more than %d)\n",
280 dev->caps.num_uars, num_reserved_uar);
275 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n"); 281 mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
276 return -ENODEV; 282 return -ENODEV;
277 } 283 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 787b7bb54d52..211c65087997 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -193,10 +193,10 @@ int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
193 if (need_mf_bond) { 193 if (need_mf_bond) {
194 if (port == 1) { 194 if (port == 1) {
195 mutex_lock(&table->mutex); 195 mutex_lock(&table->mutex);
196 mutex_lock(&dup_table->mutex); 196 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
197 } else { 197 } else {
198 mutex_lock(&dup_table->mutex); 198 mutex_lock(&dup_table->mutex);
199 mutex_lock(&table->mutex); 199 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
200 } 200 }
201 } else { 201 } else {
202 mutex_lock(&table->mutex); 202 mutex_lock(&table->mutex);
@@ -389,10 +389,10 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
389 if (dup) { 389 if (dup) {
390 if (port == 1) { 390 if (port == 1) {
391 mutex_lock(&table->mutex); 391 mutex_lock(&table->mutex);
392 mutex_lock(&dup_table->mutex); 392 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
393 } else { 393 } else {
394 mutex_lock(&dup_table->mutex); 394 mutex_lock(&dup_table->mutex);
395 mutex_lock(&table->mutex); 395 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
396 } 396 }
397 } else { 397 } else {
398 mutex_lock(&table->mutex); 398 mutex_lock(&table->mutex);
@@ -479,10 +479,10 @@ int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
479 if (dup) { 479 if (dup) {
480 if (port == 1) { 480 if (port == 1) {
481 mutex_lock(&table->mutex); 481 mutex_lock(&table->mutex);
482 mutex_lock(&dup_table->mutex); 482 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
483 } else { 483 } else {
484 mutex_lock(&dup_table->mutex); 484 mutex_lock(&dup_table->mutex);
485 mutex_lock(&table->mutex); 485 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
486 } 486 }
487 } else { 487 } else {
488 mutex_lock(&table->mutex); 488 mutex_lock(&table->mutex);
@@ -588,10 +588,10 @@ int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
588 if (need_mf_bond) { 588 if (need_mf_bond) {
589 if (port == 1) { 589 if (port == 1) {
590 mutex_lock(&table->mutex); 590 mutex_lock(&table->mutex);
591 mutex_lock(&dup_table->mutex); 591 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
592 } else { 592 } else {
593 mutex_lock(&dup_table->mutex); 593 mutex_lock(&dup_table->mutex);
594 mutex_lock(&table->mutex); 594 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
595 } 595 }
596 } else { 596 } else {
597 mutex_lock(&table->mutex); 597 mutex_lock(&table->mutex);
@@ -764,10 +764,10 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
764 if (dup) { 764 if (dup) {
765 if (port == 1) { 765 if (port == 1) {
766 mutex_lock(&table->mutex); 766 mutex_lock(&table->mutex);
767 mutex_lock(&dup_table->mutex); 767 mutex_lock_nested(&dup_table->mutex, SINGLE_DEPTH_NESTING);
768 } else { 768 } else {
769 mutex_lock(&dup_table->mutex); 769 mutex_lock(&dup_table->mutex);
770 mutex_lock(&table->mutex); 770 mutex_lock_nested(&table->mutex, SINGLE_DEPTH_NESTING);
771 } 771 }
772 } else { 772 } else {
773 mutex_lock(&table->mutex); 773 mutex_lock(&table->mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b46dbe29ef6c..25ce1b030a00 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -915,11 +915,13 @@ static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
915 915
916 spin_lock_irq(mlx4_tlock(dev)); 916 spin_lock_irq(mlx4_tlock(dev));
917 r = find_res(dev, counter_index, RES_COUNTER); 917 r = find_res(dev, counter_index, RES_COUNTER);
918 if (!r || r->owner != slave) 918 if (!r || r->owner != slave) {
919 ret = -EINVAL; 919 ret = -EINVAL;
920 counter = container_of(r, struct res_counter, com); 920 } else {
921 if (!counter->port) 921 counter = container_of(r, struct res_counter, com);
922 counter->port = port; 922 if (!counter->port)
923 counter->port = port;
924 }
923 925
924 spin_unlock_irq(mlx4_tlock(dev)); 926 spin_unlock_irq(mlx4_tlock(dev));
925 return ret; 927 return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index aac071a7e830..5b1753233c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -223,6 +223,7 @@ struct mlx5e_pport_stats {
223 223
224static const char rq_stats_strings[][ETH_GSTRING_LEN] = { 224static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
225 "packets", 225 "packets",
226 "bytes",
226 "csum_none", 227 "csum_none",
227 "csum_sw", 228 "csum_sw",
228 "lro_packets", 229 "lro_packets",
@@ -232,16 +233,18 @@ static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
232 233
233struct mlx5e_rq_stats { 234struct mlx5e_rq_stats {
234 u64 packets; 235 u64 packets;
236 u64 bytes;
235 u64 csum_none; 237 u64 csum_none;
236 u64 csum_sw; 238 u64 csum_sw;
237 u64 lro_packets; 239 u64 lro_packets;
238 u64 lro_bytes; 240 u64 lro_bytes;
239 u64 wqe_err; 241 u64 wqe_err;
240#define NUM_RQ_STATS 6 242#define NUM_RQ_STATS 7
241}; 243};
242 244
243static const char sq_stats_strings[][ETH_GSTRING_LEN] = { 245static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
244 "packets", 246 "packets",
247 "bytes",
245 "tso_packets", 248 "tso_packets",
246 "tso_bytes", 249 "tso_bytes",
247 "csum_offload_none", 250 "csum_offload_none",
@@ -253,6 +256,7 @@ static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
253 256
254struct mlx5e_sq_stats { 257struct mlx5e_sq_stats {
255 u64 packets; 258 u64 packets;
259 u64 bytes;
256 u64 tso_packets; 260 u64 tso_packets;
257 u64 tso_bytes; 261 u64 tso_bytes;
258 u64 csum_offload_none; 262 u64 csum_offload_none;
@@ -260,7 +264,7 @@ struct mlx5e_sq_stats {
260 u64 wake; 264 u64 wake;
261 u64 dropped; 265 u64 dropped;
262 u64 nop; 266 u64 nop;
263#define NUM_SQ_STATS 8 267#define NUM_SQ_STATS 9
264}; 268};
265 269
266struct mlx5e_stats { 270struct mlx5e_stats {
@@ -304,14 +308,9 @@ enum {
304 MLX5E_RQ_STATE_POST_WQES_ENABLE, 308 MLX5E_RQ_STATE_POST_WQES_ENABLE,
305}; 309};
306 310
307enum cq_flags {
308 MLX5E_CQ_HAS_CQES = 1,
309};
310
311struct mlx5e_cq { 311struct mlx5e_cq {
312 /* data path - accessed per cqe */ 312 /* data path - accessed per cqe */
313 struct mlx5_cqwq wq; 313 struct mlx5_cqwq wq;
314 unsigned long flags;
315 314
316 /* data path - accessed per napi poll */ 315 /* data path - accessed per napi poll */
317 struct napi_struct *napi; 316 struct napi_struct *napi;
@@ -452,6 +451,8 @@ enum mlx5e_traffic_types {
452 MLX5E_NUM_TT, 451 MLX5E_NUM_TT,
453}; 452};
454 453
454#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
455
455enum mlx5e_rqt_ix { 456enum mlx5e_rqt_ix {
456 MLX5E_INDIRECTION_RQT, 457 MLX5E_INDIRECTION_RQT,
457 MLX5E_SINGLE_RQ_RQT, 458 MLX5E_SINGLE_RQ_RQT,
@@ -618,9 +619,12 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
618void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv); 619void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
619 620
620int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix); 621int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
622void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
621 623
622int mlx5e_open_locked(struct net_device *netdev); 624int mlx5e_open_locked(struct net_device *netdev);
623int mlx5e_close_locked(struct net_device *netdev); 625int mlx5e_close_locked(struct net_device *netdev);
626void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
627 int num_channels);
624 628
625static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq, 629static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
626 struct mlx5e_tx_wqe *wqe, int bf_sz) 630 struct mlx5e_tx_wqe *wqe, int bf_sz)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index be6543570b2b..2018eebe1531 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -62,10 +62,11 @@ static void mlx5e_timestamp_overflow(struct work_struct *work)
62 struct delayed_work *dwork = to_delayed_work(work); 62 struct delayed_work *dwork = to_delayed_work(work);
63 struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp, 63 struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
64 overflow_work); 64 overflow_work);
65 unsigned long flags;
65 66
66 write_lock(&tstamp->lock); 67 write_lock_irqsave(&tstamp->lock, flags);
67 timecounter_read(&tstamp->clock); 68 timecounter_read(&tstamp->clock);
68 write_unlock(&tstamp->lock); 69 write_unlock_irqrestore(&tstamp->lock, flags);
69 schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period); 70 schedule_delayed_work(&tstamp->overflow_work, tstamp->overflow_period);
70} 71}
71 72
@@ -136,10 +137,11 @@ static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
136 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, 137 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
137 ptp_info); 138 ptp_info);
138 u64 ns = timespec64_to_ns(ts); 139 u64 ns = timespec64_to_ns(ts);
140 unsigned long flags;
139 141
140 write_lock(&tstamp->lock); 142 write_lock_irqsave(&tstamp->lock, flags);
141 timecounter_init(&tstamp->clock, &tstamp->cycles, ns); 143 timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
142 write_unlock(&tstamp->lock); 144 write_unlock_irqrestore(&tstamp->lock, flags);
143 145
144 return 0; 146 return 0;
145} 147}
@@ -150,10 +152,11 @@ static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
150 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, 152 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
151 ptp_info); 153 ptp_info);
152 u64 ns; 154 u64 ns;
155 unsigned long flags;
153 156
154 write_lock(&tstamp->lock); 157 write_lock_irqsave(&tstamp->lock, flags);
155 ns = timecounter_read(&tstamp->clock); 158 ns = timecounter_read(&tstamp->clock);
156 write_unlock(&tstamp->lock); 159 write_unlock_irqrestore(&tstamp->lock, flags);
157 160
158 *ts = ns_to_timespec64(ns); 161 *ts = ns_to_timespec64(ns);
159 162
@@ -164,10 +167,11 @@ static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
164{ 167{
165 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, 168 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
166 ptp_info); 169 ptp_info);
170 unsigned long flags;
167 171
168 write_lock(&tstamp->lock); 172 write_lock_irqsave(&tstamp->lock, flags);
169 timecounter_adjtime(&tstamp->clock, delta); 173 timecounter_adjtime(&tstamp->clock, delta);
170 write_unlock(&tstamp->lock); 174 write_unlock_irqrestore(&tstamp->lock, flags);
171 175
172 return 0; 176 return 0;
173} 177}
@@ -176,6 +180,7 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
176{ 180{
177 u64 adj; 181 u64 adj;
178 u32 diff; 182 u32 diff;
183 unsigned long flags;
179 int neg_adj = 0; 184 int neg_adj = 0;
180 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp, 185 struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
181 ptp_info); 186 ptp_info);
@@ -189,11 +194,11 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
189 adj *= delta; 194 adj *= delta;
190 diff = div_u64(adj, 1000000000ULL); 195 diff = div_u64(adj, 1000000000ULL);
191 196
192 write_lock(&tstamp->lock); 197 write_lock_irqsave(&tstamp->lock, flags);
193 timecounter_read(&tstamp->clock); 198 timecounter_read(&tstamp->clock);
194 tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff : 199 tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
195 tstamp->nominal_c_mult + diff; 200 tstamp->nominal_c_mult + diff;
196 write_unlock(&tstamp->lock); 201 write_unlock_irqrestore(&tstamp->lock, flags);
197 202
198 return 0; 203 return 0;
199} 204}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 65624ac65b4c..5abeb00fceb8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -385,6 +385,8 @@ static int mlx5e_set_channels(struct net_device *dev,
385 mlx5e_close_locked(dev); 385 mlx5e_close_locked(dev);
386 386
387 priv->params.num_channels = count; 387 priv->params.num_channels = count;
388 mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
389 MLX5E_INDIR_RQT_SIZE, count);
388 390
389 if (was_opened) 391 if (was_opened)
390 err = mlx5e_open_locked(dev); 392 err = mlx5e_open_locked(dev);
@@ -703,18 +705,36 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
703 return 0; 705 return 0;
704} 706}
705 707
708static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
709{
710 struct mlx5_core_dev *mdev = priv->mdev;
711 void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
712 int i;
713
714 MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
715 mlx5e_build_tir_ctx_hash(tirc, priv);
716
717 for (i = 0; i < MLX5E_NUM_TT; i++)
718 if (IS_HASHING_TT(i))
719 mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
720}
721
706static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, 722static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
707 const u8 *key, const u8 hfunc) 723 const u8 *key, const u8 hfunc)
708{ 724{
709 struct mlx5e_priv *priv = netdev_priv(dev); 725 struct mlx5e_priv *priv = netdev_priv(dev);
710 bool close_open; 726 int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
711 int err = 0; 727 void *in;
712 728
713 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && 729 if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
714 (hfunc != ETH_RSS_HASH_XOR) && 730 (hfunc != ETH_RSS_HASH_XOR) &&
715 (hfunc != ETH_RSS_HASH_TOP)) 731 (hfunc != ETH_RSS_HASH_TOP))
716 return -EINVAL; 732 return -EINVAL;
717 733
734 in = mlx5_vzalloc(inlen);
735 if (!in)
736 return -ENOMEM;
737
718 mutex_lock(&priv->state_lock); 738 mutex_lock(&priv->state_lock);
719 739
720 if (indir) { 740 if (indir) {
@@ -723,11 +743,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
723 mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT); 743 mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
724 } 744 }
725 745
726 close_open = (key || (hfunc != ETH_RSS_HASH_NO_CHANGE)) &&
727 test_bit(MLX5E_STATE_OPENED, &priv->state);
728 if (close_open)
729 mlx5e_close_locked(dev);
730
731 if (key) 746 if (key)
732 memcpy(priv->params.toeplitz_hash_key, key, 747 memcpy(priv->params.toeplitz_hash_key, key,
733 sizeof(priv->params.toeplitz_hash_key)); 748 sizeof(priv->params.toeplitz_hash_key));
@@ -735,12 +750,13 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
735 if (hfunc != ETH_RSS_HASH_NO_CHANGE) 750 if (hfunc != ETH_RSS_HASH_NO_CHANGE)
736 priv->params.rss_hfunc = hfunc; 751 priv->params.rss_hfunc = hfunc;
737 752
738 if (close_open) 753 mlx5e_modify_tirs_hash(priv, in, inlen);
739 err = mlx5e_open_locked(priv->netdev);
740 754
741 mutex_unlock(&priv->state_lock); 755 mutex_unlock(&priv->state_lock);
742 756
743 return err; 757 kvfree(in);
758
759 return 0;
744} 760}
745 761
746static int mlx5e_get_rxnfc(struct net_device *netdev, 762static int mlx5e_get_rxnfc(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6a3e430f1062..402994bf7e16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -141,6 +141,10 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
141 return; 141 return;
142 142
143 /* Collect firts the SW counters and then HW for consistency */ 143 /* Collect firts the SW counters and then HW for consistency */
144 s->rx_packets = 0;
145 s->rx_bytes = 0;
146 s->tx_packets = 0;
147 s->tx_bytes = 0;
144 s->tso_packets = 0; 148 s->tso_packets = 0;
145 s->tso_bytes = 0; 149 s->tso_bytes = 0;
146 s->tx_queue_stopped = 0; 150 s->tx_queue_stopped = 0;
@@ -155,6 +159,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
155 for (i = 0; i < priv->params.num_channels; i++) { 159 for (i = 0; i < priv->params.num_channels; i++) {
156 rq_stats = &priv->channel[i]->rq.stats; 160 rq_stats = &priv->channel[i]->rq.stats;
157 161
162 s->rx_packets += rq_stats->packets;
163 s->rx_bytes += rq_stats->bytes;
158 s->lro_packets += rq_stats->lro_packets; 164 s->lro_packets += rq_stats->lro_packets;
159 s->lro_bytes += rq_stats->lro_bytes; 165 s->lro_bytes += rq_stats->lro_bytes;
160 s->rx_csum_none += rq_stats->csum_none; 166 s->rx_csum_none += rq_stats->csum_none;
@@ -164,6 +170,8 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
164 for (j = 0; j < priv->params.num_tc; j++) { 170 for (j = 0; j < priv->params.num_tc; j++) {
165 sq_stats = &priv->channel[i]->sq[j].stats; 171 sq_stats = &priv->channel[i]->sq[j].stats;
166 172
173 s->tx_packets += sq_stats->packets;
174 s->tx_bytes += sq_stats->bytes;
167 s->tso_packets += sq_stats->tso_packets; 175 s->tso_packets += sq_stats->tso_packets;
168 s->tso_bytes += sq_stats->tso_bytes; 176 s->tso_bytes += sq_stats->tso_bytes;
169 s->tx_queue_stopped += sq_stats->stopped; 177 s->tx_queue_stopped += sq_stats->stopped;
@@ -225,23 +233,6 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
225 s->tx_broadcast_bytes = 233 s->tx_broadcast_bytes =
226 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets); 234 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
227 235
228 s->rx_packets =
229 s->rx_unicast_packets +
230 s->rx_multicast_packets +
231 s->rx_broadcast_packets;
232 s->rx_bytes =
233 s->rx_unicast_bytes +
234 s->rx_multicast_bytes +
235 s->rx_broadcast_bytes;
236 s->tx_packets =
237 s->tx_unicast_packets +
238 s->tx_multicast_packets +
239 s->tx_broadcast_packets;
240 s->tx_bytes =
241 s->tx_unicast_bytes +
242 s->tx_multicast_bytes +
243 s->tx_broadcast_bytes;
244
245 /* Update calculated offload counters */ 236 /* Update calculated offload counters */
246 s->tx_csum_offload = s->tx_packets - tx_offload_none; 237 s->tx_csum_offload = s->tx_packets - tx_offload_none;
247 s->rx_csum_good = s->rx_packets - s->rx_csum_none - 238 s->rx_csum_good = s->rx_packets - s->rx_csum_none -
@@ -1199,7 +1190,6 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1199 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE); 1190 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1200 1191
1201 ix = priv->params.indirection_rqt[ix]; 1192 ix = priv->params.indirection_rqt[ix];
1202 ix = ix % priv->params.num_channels;
1203 MLX5_SET(rqtc, rqtc, rq_num[i], 1193 MLX5_SET(rqtc, rqtc, rq_num[i],
1204 test_bit(MLX5E_STATE_OPENED, &priv->state) ? 1194 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1205 priv->channel[ix]->rq.rqn : 1195 priv->channel[ix]->rq.rqn :
@@ -1317,7 +1307,22 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1317 lro_timer_supported_periods[2])); 1307 lro_timer_supported_periods[2]));
1318} 1308}
1319 1309
1320static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt) 1310void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
1311{
1312 MLX5_SET(tirc, tirc, rx_hash_fn,
1313 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1314 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1315 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1316 rx_hash_toeplitz_key);
1317 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1318 rx_hash_toeplitz_key);
1319
1320 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1321 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1322 }
1323}
1324
1325static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
1321{ 1326{
1322 struct mlx5_core_dev *mdev = priv->mdev; 1327 struct mlx5_core_dev *mdev = priv->mdev;
1323 1328
@@ -1325,6 +1330,7 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1325 void *tirc; 1330 void *tirc;
1326 int inlen; 1331 int inlen;
1327 int err; 1332 int err;
1333 int tt;
1328 1334
1329 inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 1335 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1330 in = mlx5_vzalloc(inlen); 1336 in = mlx5_vzalloc(inlen);
@@ -1336,7 +1342,11 @@ static int mlx5e_modify_tir_lro(struct mlx5e_priv *priv, int tt)
1336 1342
1337 mlx5e_build_tir_ctx_lro(tirc, priv); 1343 mlx5e_build_tir_ctx_lro(tirc, priv);
1338 1344
1339 err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen); 1345 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1346 err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
1347 if (err)
1348 break;
1349 }
1340 1350
1341 kvfree(in); 1351 kvfree(in);
1342 1352
@@ -1672,17 +1682,7 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1672 default: 1682 default:
1673 MLX5_SET(tirc, tirc, indirect_table, 1683 MLX5_SET(tirc, tirc, indirect_table,
1674 priv->rqtn[MLX5E_INDIRECTION_RQT]); 1684 priv->rqtn[MLX5E_INDIRECTION_RQT]);
1675 MLX5_SET(tirc, tirc, rx_hash_fn, 1685 mlx5e_build_tir_ctx_hash(tirc, priv);
1676 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1677 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1678 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1679 rx_hash_toeplitz_key);
1680 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1681 rx_hash_toeplitz_key);
1682
1683 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1684 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1685 }
1686 break; 1686 break;
1687 } 1687 }
1688 1688
@@ -1885,8 +1885,10 @@ static int mlx5e_set_features(struct net_device *netdev,
1885 mlx5e_close_locked(priv->netdev); 1885 mlx5e_close_locked(priv->netdev);
1886 1886
1887 priv->params.lro_en = !!(features & NETIF_F_LRO); 1887 priv->params.lro_en = !!(features & NETIF_F_LRO);
1888 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV4_TCP); 1888 err = mlx5e_modify_tirs_lro(priv);
1889 mlx5e_modify_tir_lro(priv, MLX5E_TT_IPV6_TCP); 1889 if (err)
1890 mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
1891 err);
1890 1892
1891 if (was_opened) 1893 if (was_opened)
1892 err = mlx5e_open_locked(priv->netdev); 1894 err = mlx5e_open_locked(priv->netdev);
@@ -2024,18 +2026,37 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
2024 vf_stats); 2026 vf_stats);
2025} 2027}
2026 2028
2027static struct net_device_ops mlx5e_netdev_ops = { 2029static const struct net_device_ops mlx5e_netdev_ops_basic = {
2030 .ndo_open = mlx5e_open,
2031 .ndo_stop = mlx5e_close,
2032 .ndo_start_xmit = mlx5e_xmit,
2033 .ndo_get_stats64 = mlx5e_get_stats,
2034 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2035 .ndo_set_mac_address = mlx5e_set_mac,
2036 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2037 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2038 .ndo_set_features = mlx5e_set_features,
2039 .ndo_change_mtu = mlx5e_change_mtu,
2040 .ndo_do_ioctl = mlx5e_ioctl,
2041};
2042
2043static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2028 .ndo_open = mlx5e_open, 2044 .ndo_open = mlx5e_open,
2029 .ndo_stop = mlx5e_close, 2045 .ndo_stop = mlx5e_close,
2030 .ndo_start_xmit = mlx5e_xmit, 2046 .ndo_start_xmit = mlx5e_xmit,
2031 .ndo_get_stats64 = mlx5e_get_stats, 2047 .ndo_get_stats64 = mlx5e_get_stats,
2032 .ndo_set_rx_mode = mlx5e_set_rx_mode, 2048 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2033 .ndo_set_mac_address = mlx5e_set_mac, 2049 .ndo_set_mac_address = mlx5e_set_mac,
2034 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, 2050 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2035 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, 2051 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2036 .ndo_set_features = mlx5e_set_features, 2052 .ndo_set_features = mlx5e_set_features,
2037 .ndo_change_mtu = mlx5e_change_mtu, 2053 .ndo_change_mtu = mlx5e_change_mtu,
2038 .ndo_do_ioctl = mlx5e_ioctl, 2054 .ndo_do_ioctl = mlx5e_ioctl,
2055 .ndo_set_vf_mac = mlx5e_set_vf_mac,
2056 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
2057 .ndo_get_vf_config = mlx5e_get_vf_config,
2058 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2059 .ndo_get_vf_stats = mlx5e_get_vf_stats,
2039}; 2060};
2040 2061
2041static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) 2062static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2070,12 +2091,20 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2070 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; 2091 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2071} 2092}
2072 2093
2094void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
2095 int num_channels)
2096{
2097 int i;
2098
2099 for (i = 0; i < len; i++)
2100 indirection_rqt[i] = i % num_channels;
2101}
2102
2073static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev, 2103static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2074 struct net_device *netdev, 2104 struct net_device *netdev,
2075 int num_channels) 2105 int num_channels)
2076{ 2106{
2077 struct mlx5e_priv *priv = netdev_priv(netdev); 2107 struct mlx5e_priv *priv = netdev_priv(netdev);
2078 int i;
2079 2108
2080 priv->params.log_sq_size = 2109 priv->params.log_sq_size =
2081 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; 2110 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
@@ -2099,8 +2128,8 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2099 netdev_rss_key_fill(priv->params.toeplitz_hash_key, 2128 netdev_rss_key_fill(priv->params.toeplitz_hash_key,
2100 sizeof(priv->params.toeplitz_hash_key)); 2129 sizeof(priv->params.toeplitz_hash_key));
2101 2130
2102 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) 2131 mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
2103 priv->params.indirection_rqt[i] = i % num_channels; 2132 MLX5E_INDIR_RQT_SIZE, num_channels);
2104 2133
2105 priv->params.lro_wqe_sz = 2134 priv->params.lro_wqe_sz =
2106 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ; 2135 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
@@ -2137,18 +2166,11 @@ static void mlx5e_build_netdev(struct net_device *netdev)
2137 2166
2138 SET_NETDEV_DEV(netdev, &mdev->pdev->dev); 2167 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
2139 2168
2140 if (priv->params.num_tc > 1) 2169 if (MLX5_CAP_GEN(mdev, vport_group_manager))
2141 mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue; 2170 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
2142 2171 else
2143 if (MLX5_CAP_GEN(mdev, vport_group_manager)) { 2172 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
2144 mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac;
2145 mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan;
2146 mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config;
2147 mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state;
2148 mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats;
2149 }
2150 2173
2151 netdev->netdev_ops = &mlx5e_netdev_ops;
2152 netdev->watchdog_timeo = 15 * HZ; 2174 netdev->watchdog_timeo = 15 * HZ;
2153 2175
2154 netdev->ethtool_ops = &mlx5e_ethtool_ops; 2176 netdev->ethtool_ops = &mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index dd959d929aad..59658b9d05d1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -230,10 +230,6 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
230 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); 230 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
231 int work_done; 231 int work_done;
232 232
233 /* avoid accessing cq (dma coherent memory) if not needed */
234 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
235 return 0;
236
237 for (work_done = 0; work_done < budget; work_done++) { 233 for (work_done = 0; work_done < budget; work_done++) {
238 struct mlx5e_rx_wqe *wqe; 234 struct mlx5e_rx_wqe *wqe;
239 struct mlx5_cqe64 *cqe; 235 struct mlx5_cqe64 *cqe;
@@ -267,6 +263,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
267 263
268 mlx5e_build_rx_skb(cqe, rq, skb); 264 mlx5e_build_rx_skb(cqe, rq, skb);
269 rq->stats.packets++; 265 rq->stats.packets++;
266 rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
270 napi_gro_receive(cq->napi, skb); 267 napi_gro_receive(cq->napi, skb);
271 268
272wq_ll_pop: 269wq_ll_pop:
@@ -279,8 +276,5 @@ wq_ll_pop:
279 /* ensure cq space is freed before enabling more cqes */ 276 /* ensure cq space is freed before enabling more cqes */
280 wmb(); 277 wmb();
281 278
282 if (work_done == budget)
283 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
284
285 return work_done; 279 return work_done;
286} 280}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 2c3fba0fff54..bb4eeeb007de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -179,6 +179,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
179 unsigned int skb_len = skb->len; 179 unsigned int skb_len = skb->len;
180 u8 opcode = MLX5_OPCODE_SEND; 180 u8 opcode = MLX5_OPCODE_SEND;
181 dma_addr_t dma_addr = 0; 181 dma_addr_t dma_addr = 0;
182 unsigned int num_bytes;
182 bool bf = false; 183 bool bf = false;
183 u16 headlen; 184 u16 headlen;
184 u16 ds_cnt; 185 u16 ds_cnt;
@@ -204,8 +205,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
204 opcode = MLX5_OPCODE_LSO; 205 opcode = MLX5_OPCODE_LSO;
205 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb); 206 ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
206 payload_len = skb->len - ihs; 207 payload_len = skb->len - ihs;
207 wi->num_bytes = skb->len + 208 num_bytes = skb->len + (skb_shinfo(skb)->gso_segs - 1) * ihs;
208 (skb_shinfo(skb)->gso_segs - 1) * ihs;
209 sq->stats.tso_packets++; 209 sq->stats.tso_packets++;
210 sq->stats.tso_bytes += payload_len; 210 sq->stats.tso_bytes += payload_len;
211 } else { 211 } else {
@@ -213,9 +213,11 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
213 !skb->xmit_more && 213 !skb->xmit_more &&
214 !skb_shinfo(skb)->nr_frags; 214 !skb_shinfo(skb)->nr_frags;
215 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf); 215 ihs = mlx5e_get_inline_hdr_size(sq, skb, bf);
216 wi->num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); 216 num_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
217 } 217 }
218 218
219 wi->num_bytes = num_bytes;
220
219 if (skb_vlan_tag_present(skb)) { 221 if (skb_vlan_tag_present(skb)) {
220 mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data, 222 mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs, &skb_data,
221 &skb_len); 223 &skb_len);
@@ -307,6 +309,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
307 sq->bf_budget = bf ? sq->bf_budget - 1 : 0; 309 sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
308 310
309 sq->stats.packets++; 311 sq->stats.packets++;
312 sq->stats.bytes += num_bytes;
310 return NETDEV_TX_OK; 313 return NETDEV_TX_OK;
311 314
312dma_unmap_wqe_err: 315dma_unmap_wqe_err:
@@ -335,10 +338,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
335 u16 sqcc; 338 u16 sqcc;
336 int i; 339 int i;
337 340
338 /* avoid accessing cq (dma coherent memory) if not needed */
339 if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
340 return false;
341
342 sq = container_of(cq, struct mlx5e_sq, cq); 341 sq = container_of(cq, struct mlx5e_sq, cq);
343 342
344 npkts = 0; 343 npkts = 0;
@@ -422,10 +421,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
422 netif_tx_wake_queue(sq->txq); 421 netif_tx_wake_queue(sq->txq);
423 sq->stats.wake++; 422 sq->stats.wake++;
424 } 423 }
425 if (i == MLX5E_TX_CQ_POLL_BUDGET) {
426 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
427 return true;
428 }
429 424
430 return false; 425 return (i == MLX5E_TX_CQ_POLL_BUDGET);
431} 426}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 4ac8d716dbdd..66d51a77609e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -88,7 +88,6 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
88{ 88{
89 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); 89 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
90 90
91 set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
92 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags); 91 set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
93 barrier(); 92 barrier();
94 napi_schedule(cq->napi); 93 napi_schedule(cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index c071077aafbd..7992c553c1f5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -215,7 +215,7 @@ mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
215{ 215{
216 int index = q->producer_counter & (q->count - 1); 216 int index = q->producer_counter & (q->count - 1);
217 217
218 if ((q->producer_counter - q->consumer_counter) == q->count) 218 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
219 return NULL; 219 return NULL;
220 return mlxsw_pci_queue_elem_info_get(q, index); 220 return mlxsw_pci_queue_elem_info_get(q, index);
221} 221}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 726f5435b32f..ae65b9940aed 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -49,7 +49,7 @@
49#define MLXSW_PORT_MID 0xd000 49#define MLXSW_PORT_MID 0xd000
50 50
51#define MLXSW_PORT_MAX_PHY_PORTS 0x40 51#define MLXSW_PORT_MAX_PHY_PORTS 0x40
52#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS 52#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1)
53 53
54#define MLXSW_PORT_DEVID_BITS_OFFSET 10 54#define MLXSW_PORT_DEVID_BITS_OFFSET 10
55#define MLXSW_PORT_PHY_BITS_OFFSET 4 55#define MLXSW_PORT_PHY_BITS_OFFSET 4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index bb77e2207804..ffe4c0305733 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -873,6 +873,62 @@ static inline void mlxsw_reg_spvm_pack(char *payload, u8 local_port,
873 } 873 }
874} 874}
875 875
876/* SPAFT - Switch Port Acceptable Frame Types
877 * ------------------------------------------
878 * The Switch Port Acceptable Frame Types register configures the frame
879 * admittance of the port.
880 */
881#define MLXSW_REG_SPAFT_ID 0x2010
882#define MLXSW_REG_SPAFT_LEN 0x08
883
884static const struct mlxsw_reg_info mlxsw_reg_spaft = {
885 .id = MLXSW_REG_SPAFT_ID,
886 .len = MLXSW_REG_SPAFT_LEN,
887};
888
889/* reg_spaft_local_port
890 * Local port number.
891 * Access: Index
892 *
893 * Note: CPU port is not supported (all tag types are allowed).
894 */
895MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
896
897/* reg_spaft_sub_port
898 * Virtual port within the physical port.
899 * Should be set to 0 when virtual ports are not enabled on the port.
900 * Access: RW
901 */
902MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8);
903
904/* reg_spaft_allow_untagged
905 * When set, untagged frames on the ingress are allowed (default).
906 * Access: RW
907 */
908MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1);
909
910/* reg_spaft_allow_prio_tagged
911 * When set, priority tagged frames on the ingress are allowed (default).
912 * Access: RW
913 */
914MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
915
916/* reg_spaft_allow_tagged
917 * When set, tagged frames on the ingress are allowed (default).
918 * Access: RW
919 */
920MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
921
922static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
923 bool allow_untagged)
924{
925 MLXSW_REG_ZERO(spaft, payload);
926 mlxsw_reg_spaft_local_port_set(payload, local_port);
927 mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
928 mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
929 mlxsw_reg_spaft_allow_tagged_set(payload, true);
930}
931
876/* SFGC - Switch Flooding Group Configuration 932/* SFGC - Switch Flooding Group Configuration
877 * ------------------------------------------ 933 * ------------------------------------------
878 * The following register controls the association of flooding tables and MIDs 934 * The following register controls the association of flooding tables and MIDs
@@ -3203,6 +3259,8 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
3203 return "SPVID"; 3259 return "SPVID";
3204 case MLXSW_REG_SPVM_ID: 3260 case MLXSW_REG_SPVM_ID:
3205 return "SPVM"; 3261 return "SPVM";
3262 case MLXSW_REG_SPAFT_ID:
3263 return "SPAFT";
3206 case MLXSW_REG_SFGC_ID: 3264 case MLXSW_REG_SFGC_ID:
3207 return "SFGC"; 3265 return "SFGC";
3208 case MLXSW_REG_SFTR_ID: 3266 case MLXSW_REG_SFTR_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 217856bdd400..a94daa8c346c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2123,6 +2123,8 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port)) 2123 if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n"); 2124 netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
2125 2125
2126 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
2127
2126 mlxsw_sp_port->learning = 0; 2128 mlxsw_sp_port->learning = 0;
2127 mlxsw_sp_port->learning_sync = 0; 2129 mlxsw_sp_port->learning_sync = 0;
2128 mlxsw_sp_port->uc_flood = 0; 2130 mlxsw_sp_port->uc_flood = 0;
@@ -2356,9 +2358,7 @@ static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2356 if (mlxsw_sp_port->bridged) { 2358 if (mlxsw_sp_port->bridged) {
2357 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port); 2359 mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
2358 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false); 2360 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
2359 2361 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2360 if (lag->ref_count == 1)
2361 mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
2362 } 2362 }
2363 2363
2364 if (lag->ref_count == 1) { 2364 if (lag->ref_count == 1) {
@@ -2746,6 +2746,13 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2746 goto err_vport_flood_set; 2746 goto err_vport_flood_set;
2747 } 2747 }
2748 2748
2749 err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
2750 MLXSW_REG_SPMS_STATE_FORWARDING);
2751 if (err) {
2752 netdev_err(dev, "Failed to set STP state\n");
2753 goto err_port_stp_state_set;
2754 }
2755
2749 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport)) 2756 if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
2750 netdev_err(dev, "Failed to flush FDB\n"); 2757 netdev_err(dev, "Failed to flush FDB\n");
2751 2758
@@ -2763,6 +2770,7 @@ static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
2763 2770
2764 return 0; 2771 return 0;
2765 2772
2773err_port_stp_state_set:
2766err_vport_flood_set: 2774err_vport_flood_set:
2767err_port_vid_learning_set: 2775err_port_vid_learning_set:
2768err_port_vid_to_fid_validate: 2776err_port_vid_to_fid_validate:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 7f42eb1c320e..3b89ed2f3c76 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -254,5 +254,6 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
254int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid, 254int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
255 bool set, bool only_uc); 255 bool set, bool only_uc);
256void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port); 256void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
257int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
257 258
258#endif 259#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index e492ca2cdecd..7b56098acc58 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -370,7 +370,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
370 return err; 370 return err;
371} 371}
372 372
373static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) 373static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
374 u16 vid)
374{ 375{
375 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; 376 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
376 char spvid_pl[MLXSW_REG_SPVID_LEN]; 377 char spvid_pl[MLXSW_REG_SPVID_LEN];
@@ -379,6 +380,53 @@ static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
379 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl); 380 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
380} 381}
381 382
383static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
384 bool allow)
385{
386 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
387 char spaft_pl[MLXSW_REG_SPAFT_LEN];
388
389 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
390 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
391}
392
393int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
394{
395 struct net_device *dev = mlxsw_sp_port->dev;
396 int err;
397
398 if (!vid) {
399 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
400 if (err) {
401 netdev_err(dev, "Failed to disallow untagged traffic\n");
402 return err;
403 }
404 } else {
405 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
406 if (err) {
407 netdev_err(dev, "Failed to set PVID\n");
408 return err;
409 }
410
411 /* Only allow if not already allowed. */
412 if (!mlxsw_sp_port->pvid) {
413 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
414 true);
415 if (err) {
416 netdev_err(dev, "Failed to allow untagged traffic\n");
417 goto err_port_allow_untagged_set;
418 }
419 }
420 }
421
422 mlxsw_sp_port->pvid = vid;
423 return 0;
424
425err_port_allow_untagged_set:
426 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
427 return err;
428}
429
382static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid) 430static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
383{ 431{
384 char sfmr_pl[MLXSW_REG_SFMR_LEN]; 432 char sfmr_pl[MLXSW_REG_SFMR_LEN];
@@ -540,7 +588,12 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
540 netdev_err(dev, "Unable to add PVID %d\n", vid_begin); 588 netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
541 goto err_port_pvid_set; 589 goto err_port_pvid_set;
542 } 590 }
543 mlxsw_sp_port->pvid = vid_begin; 591 } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
592 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
593 if (err) {
594 netdev_err(dev, "Unable to del PVID\n");
595 goto err_port_pvid_set;
596 }
544 } 597 }
545 598
546 /* Changing activity bits only if HW operation succeded */ 599 /* Changing activity bits only if HW operation succeded */
@@ -892,20 +945,18 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
892 return err; 945 return err;
893 } 946 }
894 947
948 if (init)
949 goto out;
950
895 pvid = mlxsw_sp_port->pvid; 951 pvid = mlxsw_sp_port->pvid;
896 if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) { 952 if (pvid >= vid_begin && pvid <= vid_end) {
897 /* Default VLAN is always 1 */ 953 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
898 err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
899 if (err) { 954 if (err) {
900 netdev_err(dev, "Unable to del PVID %d\n", pvid); 955 netdev_err(dev, "Unable to del PVID %d\n", pvid);
901 return err; 956 return err;
902 } 957 }
903 mlxsw_sp_port->pvid = 1;
904 } 958 }
905 959
906 if (init)
907 goto out;
908
909 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, 960 err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
910 false, false); 961 false, false);
911 if (err) { 962 if (err) {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 00cfd95ca59d..3e67f451f2ab 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -474,9 +474,9 @@ static int moxart_mac_probe(struct platform_device *pdev)
474 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 474 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
475 ndev->base_addr = res->start; 475 ndev->base_addr = res->start;
476 priv->base = devm_ioremap_resource(p_dev, res); 476 priv->base = devm_ioremap_resource(p_dev, res);
477 ret = IS_ERR(priv->base); 477 if (IS_ERR(priv->base)) {
478 if (ret) {
479 dev_err(p_dev, "devm_ioremap_resource failed\n"); 478 dev_err(p_dev, "devm_ioremap_resource failed\n");
479 ret = PTR_ERR(priv->base);
480 goto init_fail; 480 goto init_fail;
481 } 481 }
482 482
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 689a4a5c8dcf..1ef03939d25f 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -811,7 +811,7 @@ qcaspi_netdev_setup(struct net_device *dev)
811 dev->netdev_ops = &qcaspi_netdev_ops; 811 dev->netdev_ops = &qcaspi_netdev_ops;
812 qcaspi_set_ethtool_ops(dev); 812 qcaspi_set_ethtool_ops(dev);
813 dev->watchdog_timeo = QCASPI_TX_TIMEOUT; 813 dev->watchdog_timeo = QCASPI_TX_TIMEOUT;
814 dev->flags = IFF_MULTICAST; 814 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
815 dev->tx_queue_len = 100; 815 dev->tx_queue_len = 100;
816 816
817 qca = netdev_priv(dev); 817 qca = netdev_priv(dev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 17d5571d0432..dd2cf3738b73 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4933,8 +4933,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4933 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); 4933 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4934 break; 4934 break;
4935 case RTL_GIGA_MAC_VER_40: 4935 case RTL_GIGA_MAC_VER_40:
4936 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4937 break;
4938 case RTL_GIGA_MAC_VER_41: 4936 case RTL_GIGA_MAC_VER_41:
4939 case RTL_GIGA_MAC_VER_42: 4937 case RTL_GIGA_MAC_VER_42:
4940 case RTL_GIGA_MAC_VER_43: 4938 case RTL_GIGA_MAC_VER_43:
@@ -4943,8 +4941,6 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4943 case RTL_GIGA_MAC_VER_46: 4941 case RTL_GIGA_MAC_VER_46:
4944 case RTL_GIGA_MAC_VER_47: 4942 case RTL_GIGA_MAC_VER_47:
4945 case RTL_GIGA_MAC_VER_48: 4943 case RTL_GIGA_MAC_VER_48:
4946 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4947 break;
4948 case RTL_GIGA_MAC_VER_49: 4944 case RTL_GIGA_MAC_VER_49:
4949 case RTL_GIGA_MAC_VER_50: 4945 case RTL_GIGA_MAC_VER_50:
4950 case RTL_GIGA_MAC_VER_51: 4946 case RTL_GIGA_MAC_VER_51:
@@ -6137,28 +6133,28 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
6137 sw_cnt_1ms_ini = 16000000/rg_saw_cnt; 6133 sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
6138 sw_cnt_1ms_ini &= 0x0fff; 6134 sw_cnt_1ms_ini &= 0x0fff;
6139 data = r8168_mac_ocp_read(tp, 0xd412); 6135 data = r8168_mac_ocp_read(tp, 0xd412);
6140 data &= 0x0fff; 6136 data &= ~0x0fff;
6141 data |= sw_cnt_1ms_ini; 6137 data |= sw_cnt_1ms_ini;
6142 r8168_mac_ocp_write(tp, 0xd412, data); 6138 r8168_mac_ocp_write(tp, 0xd412, data);
6143 } 6139 }
6144 6140
6145 data = r8168_mac_ocp_read(tp, 0xe056); 6141 data = r8168_mac_ocp_read(tp, 0xe056);
6146 data &= 0xf0; 6142 data &= ~0xf0;
6147 data |= 0x07; 6143 data |= 0x70;
6148 r8168_mac_ocp_write(tp, 0xe056, data); 6144 r8168_mac_ocp_write(tp, 0xe056, data);
6149 6145
6150 data = r8168_mac_ocp_read(tp, 0xe052); 6146 data = r8168_mac_ocp_read(tp, 0xe052);
6151 data &= 0x8008; 6147 data &= ~0x6000;
6152 data |= 0x6000; 6148 data |= 0x8008;
6153 r8168_mac_ocp_write(tp, 0xe052, data); 6149 r8168_mac_ocp_write(tp, 0xe052, data);
6154 6150
6155 data = r8168_mac_ocp_read(tp, 0xe0d6); 6151 data = r8168_mac_ocp_read(tp, 0xe0d6);
6156 data &= 0x01ff; 6152 data &= ~0x01ff;
6157 data |= 0x017f; 6153 data |= 0x017f;
6158 r8168_mac_ocp_write(tp, 0xe0d6, data); 6154 r8168_mac_ocp_write(tp, 0xe0d6, data);
6159 6155
6160 data = r8168_mac_ocp_read(tp, 0xd420); 6156 data = r8168_mac_ocp_read(tp, 0xd420);
6161 data &= 0x0fff; 6157 data &= ~0x0fff;
6162 data |= 0x047f; 6158 data |= 0x047f;
6163 r8168_mac_ocp_write(tp, 0xd420, data); 6159 r8168_mac_ocp_write(tp, 0xd420, data);
6164 6160
@@ -7730,10 +7726,13 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7730{ 7726{
7731 struct rtl8169_private *tp = netdev_priv(dev); 7727 struct rtl8169_private *tp = netdev_priv(dev);
7732 void __iomem *ioaddr = tp->mmio_addr; 7728 void __iomem *ioaddr = tp->mmio_addr;
7729 struct pci_dev *pdev = tp->pci_dev;
7733 struct rtl8169_counters *counters = tp->counters; 7730 struct rtl8169_counters *counters = tp->counters;
7734 unsigned int start; 7731 unsigned int start;
7735 7732
7736 if (netif_running(dev)) 7733 pm_runtime_get_noresume(&pdev->dev);
7734
7735 if (netif_running(dev) && pm_runtime_active(&pdev->dev))
7737 rtl8169_rx_missed(dev, ioaddr); 7736 rtl8169_rx_missed(dev, ioaddr);
7738 7737
7739 do { 7738 do {
@@ -7761,7 +7760,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7761 * Fetch additonal counter values missing in stats collected by driver 7760 * Fetch additonal counter values missing in stats collected by driver
7762 * from tally counters. 7761 * from tally counters.
7763 */ 7762 */
7764 rtl8169_update_counters(dev); 7763 if (pm_runtime_active(&pdev->dev))
7764 rtl8169_update_counters(dev);
7765 7765
7766 /* 7766 /*
7767 * Subtract values fetched during initalization. 7767 * Subtract values fetched during initalization.
@@ -7774,6 +7774,8 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7774 stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - 7774 stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
7775 le16_to_cpu(tp->tc_offset.tx_aborted); 7775 le16_to_cpu(tp->tc_offset.tx_aborted);
7776 7776
7777 pm_runtime_put_noidle(&pdev->dev);
7778
7777 return stats; 7779 return stats;
7778} 7780}
7779 7781
@@ -7853,6 +7855,10 @@ static int rtl8169_runtime_suspend(struct device *device)
7853 7855
7854 rtl8169_net_suspend(dev); 7856 rtl8169_net_suspend(dev);
7855 7857
7858 /* Update counters before going runtime suspend */
7859 rtl8169_rx_missed(dev, tp->mmio_addr);
7860 rtl8169_update_counters(dev);
7861
7856 return 0; 7862 return 0;
7857} 7863}
7858 7864
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac43ed914fcf..86449c357168 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1139,7 +1139,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
1139 if (netif_running(ndev)) { 1139 if (netif_running(ndev)) {
1140 netif_device_detach(ndev); 1140 netif_device_detach(ndev);
1141 /* Stop PTP Clock driver */ 1141 /* Stop PTP Clock driver */
1142 ravb_ptp_stop(ndev); 1142 if (priv->chip_id == RCAR_GEN2)
1143 ravb_ptp_stop(ndev);
1143 /* Wait for DMA stopping */ 1144 /* Wait for DMA stopping */
1144 error = ravb_stop_dma(ndev); 1145 error = ravb_stop_dma(ndev);
1145 if (error) { 1146 if (error) {
@@ -1170,7 +1171,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
1170 ravb_emac_init(ndev); 1171 ravb_emac_init(ndev);
1171 1172
1172 /* Initialise PTP Clock driver */ 1173 /* Initialise PTP Clock driver */
1173 ravb_ptp_init(ndev, priv->pdev); 1174 if (priv->chip_id == RCAR_GEN2)
1175 ravb_ptp_init(ndev, priv->pdev);
1174 1176
1175 netif_device_attach(ndev); 1177 netif_device_attach(ndev);
1176 } 1178 }
@@ -1298,7 +1300,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1298 netif_tx_stop_all_queues(ndev); 1300 netif_tx_stop_all_queues(ndev);
1299 1301
1300 /* Stop PTP Clock driver */ 1302 /* Stop PTP Clock driver */
1301 ravb_ptp_stop(ndev); 1303 if (priv->chip_id == RCAR_GEN2)
1304 ravb_ptp_stop(ndev);
1302 1305
1303 /* Wait for DMA stopping */ 1306 /* Wait for DMA stopping */
1304 ravb_stop_dma(ndev); 1307 ravb_stop_dma(ndev);
@@ -1311,7 +1314,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
1311 ravb_emac_init(ndev); 1314 ravb_emac_init(ndev);
1312 1315
1313 /* Initialise PTP Clock driver */ 1316 /* Initialise PTP Clock driver */
1314 ravb_ptp_init(ndev, priv->pdev); 1317 if (priv->chip_id == RCAR_GEN2)
1318 ravb_ptp_init(ndev, priv->pdev);
1315 1319
1316 netif_tx_start_all_queues(ndev); 1320 netif_tx_start_all_queues(ndev);
1317} 1321}
@@ -1718,7 +1722,6 @@ static int ravb_set_gti(struct net_device *ndev)
1718static int ravb_probe(struct platform_device *pdev) 1722static int ravb_probe(struct platform_device *pdev)
1719{ 1723{
1720 struct device_node *np = pdev->dev.of_node; 1724 struct device_node *np = pdev->dev.of_node;
1721 const struct of_device_id *match;
1722 struct ravb_private *priv; 1725 struct ravb_private *priv;
1723 enum ravb_chip_id chip_id; 1726 enum ravb_chip_id chip_id;
1724 struct net_device *ndev; 1727 struct net_device *ndev;
@@ -1750,8 +1753,7 @@ static int ravb_probe(struct platform_device *pdev)
1750 ndev->base_addr = res->start; 1753 ndev->base_addr = res->start;
1751 ndev->dma = -1; 1754 ndev->dma = -1;
1752 1755
1753 match = of_match_device(of_match_ptr(ravb_match_table), &pdev->dev); 1756 chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
1754 chip_id = (enum ravb_chip_id)match->data;
1755 1757
1756 if (chip_id == RCAR_GEN3) 1758 if (chip_id == RCAR_GEN3)
1757 irq = platform_get_irq_byname(pdev, "ch22"); 1759 irq = platform_get_irq_byname(pdev, "ch22");
@@ -1814,10 +1816,6 @@ static int ravb_probe(struct platform_device *pdev)
1814 CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC); 1816 CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC);
1815 } 1817 }
1816 1818
1817 /* Set CSEL value */
1818 ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
1819 CCC);
1820
1821 /* Set GTI value */ 1819 /* Set GTI value */
1822 error = ravb_set_gti(ndev); 1820 error = ravb_set_gti(ndev);
1823 if (error) 1821 if (error)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index dfa9e59c9442..738449992876 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -3061,15 +3061,11 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3061 mdp->ether_link_active_low = pd->ether_link_active_low; 3061 mdp->ether_link_active_low = pd->ether_link_active_low;
3062 3062
3063 /* set cpu data */ 3063 /* set cpu data */
3064 if (id) { 3064 if (id)
3065 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; 3065 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3066 } else { 3066 else
3067 const struct of_device_id *match; 3067 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3068 3068
3069 match = of_match_device(of_match_ptr(sh_eth_match_table),
3070 &pdev->dev);
3071 mdp->cd = (struct sh_eth_cpu_data *)match->data;
3072 }
3073 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); 3069 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3074 if (!mdp->reg_offset) { 3070 if (!mdp->reg_offset) {
3075 dev_err(&pdev->dev, "Unknown register type (%d)\n", 3071 dev_err(&pdev->dev, "Unknown register type (%d)\n",
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0e2fc1a844ab..db7db8ac4ca3 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2342,8 +2342,8 @@ static int smc_drv_probe(struct platform_device *pdev)
2342 } 2342 }
2343 2343
2344 ndev->irq = platform_get_irq(pdev, 0); 2344 ndev->irq = platform_get_irq(pdev, 0);
2345 if (ndev->irq <= 0) { 2345 if (ndev->irq < 0) {
2346 ret = -ENODEV; 2346 ret = ndev->irq;
2347 goto out_release_io; 2347 goto out_release_io;
2348 } 2348 }
2349 /* 2349 /*
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0faf16336035..efb54f356a67 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -199,21 +199,12 @@ int stmmac_mdio_register(struct net_device *ndev)
199 struct stmmac_priv *priv = netdev_priv(ndev); 199 struct stmmac_priv *priv = netdev_priv(ndev);
200 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data; 200 struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
201 int addr, found; 201 int addr, found;
202 struct device_node *mdio_node = NULL; 202 struct device_node *mdio_node = priv->plat->mdio_node;
203 struct device_node *child_node = NULL;
204 203
205 if (!mdio_bus_data) 204 if (!mdio_bus_data)
206 return 0; 205 return 0;
207 206
208 if (IS_ENABLED(CONFIG_OF)) { 207 if (IS_ENABLED(CONFIG_OF)) {
209 for_each_child_of_node(priv->device->of_node, child_node) {
210 if (of_device_is_compatible(child_node,
211 "snps,dwmac-mdio")) {
212 mdio_node = child_node;
213 break;
214 }
215 }
216
217 if (mdio_node) { 208 if (mdio_node) {
218 netdev_dbg(ndev, "FOUND MDIO subnode\n"); 209 netdev_dbg(ndev, "FOUND MDIO subnode\n");
219 } else { 210 } else {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6a52fa18cbf2..4514ba73d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -110,6 +110,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
110 struct device_node *np = pdev->dev.of_node; 110 struct device_node *np = pdev->dev.of_node;
111 struct plat_stmmacenet_data *plat; 111 struct plat_stmmacenet_data *plat;
112 struct stmmac_dma_cfg *dma_cfg; 112 struct stmmac_dma_cfg *dma_cfg;
113 struct device_node *child_node = NULL;
113 114
114 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); 115 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
115 if (!plat) 116 if (!plat)
@@ -140,13 +141,19 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
140 plat->phy_node = of_node_get(np); 141 plat->phy_node = of_node_get(np);
141 } 142 }
142 143
144 for_each_child_of_node(np, child_node)
145 if (of_device_is_compatible(child_node, "snps,dwmac-mdio")) {
146 plat->mdio_node = child_node;
147 break;
148 }
149
143 /* "snps,phy-addr" is not a standard property. Mark it as deprecated 150 /* "snps,phy-addr" is not a standard property. Mark it as deprecated
144 * and warn of its use. Remove this when phy node support is added. 151 * and warn of its use. Remove this when phy node support is added.
145 */ 152 */
146 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0) 153 if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
147 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n"); 154 dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
148 155
149 if ((plat->phy_node && !of_phy_is_fixed_link(np)) || plat->phy_bus_name) 156 if ((plat->phy_node && !of_phy_is_fixed_link(np)) || !plat->mdio_node)
150 plat->mdio_bus_data = NULL; 157 plat->mdio_bus_data = NULL;
151 else 158 else
152 plat->mdio_bus_data = 159 plat->mdio_bus_data =
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 70814b7386b3..af11ed1e0bcc 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -426,7 +426,7 @@
426#define DWC_MMC_RXOCTETCOUNT_GB 0x0784 426#define DWC_MMC_RXOCTETCOUNT_GB 0x0784
427#define DWC_MMC_RXPACKETCOUNT_GB 0x0780 427#define DWC_MMC_RXPACKETCOUNT_GB 0x0780
428 428
429static int debug = 3; 429static int debug = -1;
430module_param(debug, int, 0); 430module_param(debug, int, 0);
431MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); 431MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
432 432
@@ -650,6 +650,11 @@ struct net_local {
650 u32 mmc_tx_counters_mask; 650 u32 mmc_tx_counters_mask;
651 651
652 struct dwceqos_flowcontrol flowcontrol; 652 struct dwceqos_flowcontrol flowcontrol;
653
654 /* Tracks the intermediate state of phy started but hardware
655 * init not finished yet.
656 */
657 bool phy_defer;
653}; 658};
654 659
655static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, 660static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
@@ -901,6 +906,9 @@ static void dwceqos_adjust_link(struct net_device *ndev)
901 struct phy_device *phydev = lp->phy_dev; 906 struct phy_device *phydev = lp->phy_dev;
902 int status_change = 0; 907 int status_change = 0;
903 908
909 if (lp->phy_defer)
910 return;
911
904 if (phydev->link) { 912 if (phydev->link) {
905 if ((lp->speed != phydev->speed) || 913 if ((lp->speed != phydev->speed) ||
906 (lp->duplex != phydev->duplex)) { 914 (lp->duplex != phydev->duplex)) {
@@ -1113,7 +1121,7 @@ static int dwceqos_descriptor_init(struct net_local *lp)
1113 /* Allocate DMA descriptors */ 1121 /* Allocate DMA descriptors */
1114 size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); 1122 size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
1115 lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, 1123 lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
1116 &lp->rx_descs_addr, 0); 1124 &lp->rx_descs_addr, GFP_KERNEL);
1117 if (!lp->rx_descs) 1125 if (!lp->rx_descs)
1118 goto err_out; 1126 goto err_out;
1119 lp->rx_descs_tail_addr = lp->rx_descs_addr + 1127 lp->rx_descs_tail_addr = lp->rx_descs_addr +
@@ -1121,7 +1129,7 @@ static int dwceqos_descriptor_init(struct net_local *lp)
1121 1129
1122 size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); 1130 size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
1123 lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, 1131 lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
1124 &lp->tx_descs_addr, 0); 1132 &lp->tx_descs_addr, GFP_KERNEL);
1125 if (!lp->tx_descs) 1133 if (!lp->tx_descs)
1126 goto err_out; 1134 goto err_out;
1127 lp->tx_descs_tail_addr = lp->tx_descs_addr + 1135 lp->tx_descs_tail_addr = lp->tx_descs_addr +
@@ -1635,6 +1643,12 @@ static void dwceqos_init_hw(struct net_local *lp)
1635 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); 1643 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
1636 dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, 1644 dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
1637 regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); 1645 regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
1646
1647 lp->phy_defer = false;
1648 mutex_lock(&lp->phy_dev->lock);
1649 phy_read_status(lp->phy_dev);
1650 dwceqos_adjust_link(lp->ndev);
1651 mutex_unlock(&lp->phy_dev->lock);
1638} 1652}
1639 1653
1640static void dwceqos_tx_reclaim(unsigned long data) 1654static void dwceqos_tx_reclaim(unsigned long data)
@@ -1880,9 +1894,13 @@ static int dwceqos_open(struct net_device *ndev)
1880 } 1894 }
1881 netdev_reset_queue(ndev); 1895 netdev_reset_queue(ndev);
1882 1896
1883 napi_enable(&lp->napi); 1897 /* The dwceqos reset state machine requires all phy clocks to complete,
1898 * hence the unusual init order with phy_start first.
1899 */
1900 lp->phy_defer = true;
1884 phy_start(lp->phy_dev); 1901 phy_start(lp->phy_dev);
1885 dwceqos_init_hw(lp); 1902 dwceqos_init_hw(lp);
1903 napi_enable(&lp->napi);
1886 1904
1887 netif_start_queue(ndev); 1905 netif_start_queue(ndev);
1888 tasklet_enable(&lp->tx_bdreclaim_tasklet); 1906 tasklet_enable(&lp->tx_bdreclaim_tasklet);
@@ -1915,18 +1933,19 @@ static int dwceqos_stop(struct net_device *ndev)
1915{ 1933{
1916 struct net_local *lp = netdev_priv(ndev); 1934 struct net_local *lp = netdev_priv(ndev);
1917 1935
1918 phy_stop(lp->phy_dev);
1919
1920 tasklet_disable(&lp->tx_bdreclaim_tasklet); 1936 tasklet_disable(&lp->tx_bdreclaim_tasklet);
1921 netif_stop_queue(ndev);
1922 napi_disable(&lp->napi); 1937 napi_disable(&lp->napi);
1923 1938
1924 dwceqos_drain_dma(lp); 1939 /* Stop all tx before we drain the tx dma. */
1940 netif_tx_lock_bh(lp->ndev);
1941 netif_stop_queue(ndev);
1942 netif_tx_unlock_bh(lp->ndev);
1925 1943
1926 netif_tx_lock(lp->ndev); 1944 dwceqos_drain_dma(lp);
1927 dwceqos_reset_hw(lp); 1945 dwceqos_reset_hw(lp);
1946 phy_stop(lp->phy_dev);
1947
1928 dwceqos_descriptor_free(lp); 1948 dwceqos_descriptor_free(lp);
1929 netif_tx_unlock(lp->ndev);
1930 1949
1931 return 0; 1950 return 0;
1932} 1951}
@@ -2178,12 +2197,10 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2178 ((trans.initial_descriptor + trans.nr_descriptors) % 2197 ((trans.initial_descriptor + trans.nr_descriptors) %
2179 DWCEQOS_TX_DCNT)); 2198 DWCEQOS_TX_DCNT));
2180 2199
2181 dwceqos_tx_finalize(skb, lp, &trans);
2182
2183 netdev_sent_queue(ndev, skb->len);
2184
2185 spin_lock_bh(&lp->tx_lock); 2200 spin_lock_bh(&lp->tx_lock);
2186 lp->tx_free -= trans.nr_descriptors; 2201 lp->tx_free -= trans.nr_descriptors;
2202 dwceqos_tx_finalize(skb, lp, &trans);
2203 netdev_sent_queue(ndev, skb->len);
2187 spin_unlock_bh(&lp->tx_lock); 2204 spin_unlock_bh(&lp->tx_lock);
2188 2205
2189 ndev->trans_start = jiffies; 2206 ndev->trans_start = jiffies;
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index e9cc61e1ec74..c3e85acfdc70 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -63,8 +63,12 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
63 mode = AM33XX_GMII_SEL_MODE_RGMII; 63 mode = AM33XX_GMII_SEL_MODE_RGMII;
64 break; 64 break;
65 65
66 case PHY_INTERFACE_MODE_MII:
67 default: 66 default:
67 dev_warn(priv->dev,
68 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
69 phy_modes(phy_mode));
70 /* fallthrough */
71 case PHY_INTERFACE_MODE_MII:
68 mode = AM33XX_GMII_SEL_MODE_MII; 72 mode = AM33XX_GMII_SEL_MODE_MII;
69 break; 73 break;
70 }; 74 };
@@ -106,8 +110,12 @@ static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
106 mode = AM33XX_GMII_SEL_MODE_RGMII; 110 mode = AM33XX_GMII_SEL_MODE_RGMII;
107 break; 111 break;
108 112
109 case PHY_INTERFACE_MODE_MII:
110 default: 113 default:
114 dev_warn(priv->dev,
115 "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
116 phy_modes(phy_mode));
117 /* fallthrough */
118 case PHY_INTERFACE_MODE_MII:
111 mode = AM33XX_GMII_SEL_MODE_MII; 119 mode = AM33XX_GMII_SEL_MODE_MII;
112 break; 120 break;
113 }; 121 };
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index c61d66d38634..029841f98c32 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -117,21 +117,17 @@ static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
117 *ndesc = le32_to_cpu(desc->next_desc); 117 *ndesc = le32_to_cpu(desc->next_desc);
118} 118}
119 119
120static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc) 120static u32 get_sw_data(int index, struct knav_dma_desc *desc)
121{ 121{
122 *pad0 = le32_to_cpu(desc->pad[0]); 122 /* No Endian conversion needed as this data is untouched by hw */
123 *pad1 = le32_to_cpu(desc->pad[1]); 123 return desc->sw_data[index];
124 *pad2 = le32_to_cpu(desc->pad[2]);
125} 124}
126 125
127static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc) 126/* use these macros to get sw data */
128{ 127#define GET_SW_DATA0(desc) get_sw_data(0, desc)
129 u64 pad64; 128#define GET_SW_DATA1(desc) get_sw_data(1, desc)
130 129#define GET_SW_DATA2(desc) get_sw_data(2, desc)
131 pad64 = le32_to_cpu(desc->pad[0]) + 130#define GET_SW_DATA3(desc) get_sw_data(3, desc)
132 ((u64)le32_to_cpu(desc->pad[1]) << 32);
133 *padptr = (void *)(uintptr_t)pad64;
134}
135 131
136static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len, 132static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
137 struct knav_dma_desc *desc) 133 struct knav_dma_desc *desc)
@@ -163,13 +159,18 @@ static void set_desc_info(u32 desc_info, u32 pkt_info,
163 desc->packet_info = cpu_to_le32(pkt_info); 159 desc->packet_info = cpu_to_le32(pkt_info);
164} 160}
165 161
166static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc) 162static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
167{ 163{
168 desc->pad[0] = cpu_to_le32(pad0); 164 /* No Endian conversion needed as this data is untouched by hw */
169 desc->pad[1] = cpu_to_le32(pad1); 165 desc->sw_data[index] = data;
170 desc->pad[2] = cpu_to_le32(pad1);
171} 166}
172 167
168/* use these macros to set sw data */
169#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
170#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
171#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
172#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
173
173static void set_org_pkt_info(dma_addr_t buff, u32 buff_len, 174static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
174 struct knav_dma_desc *desc) 175 struct knav_dma_desc *desc)
175{ 176{
@@ -581,7 +582,6 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
581 dma_addr_t dma_desc, dma_buf; 582 dma_addr_t dma_desc, dma_buf;
582 unsigned int buf_len, dma_sz = sizeof(*ndesc); 583 unsigned int buf_len, dma_sz = sizeof(*ndesc);
583 void *buf_ptr; 584 void *buf_ptr;
584 u32 pad[2];
585 u32 tmp; 585 u32 tmp;
586 586
587 get_words(&dma_desc, 1, &desc->next_desc); 587 get_words(&dma_desc, 1, &desc->next_desc);
@@ -593,14 +593,20 @@ static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
593 break; 593 break;
594 } 594 }
595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc); 595 get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
596 get_pad_ptr(&buf_ptr, ndesc); 596 /* warning!!!! We are retrieving the virtual ptr in the sw_data
597 * field as a 32bit value. Will not work on 64bit machines
598 */
599 buf_ptr = (void *)GET_SW_DATA0(ndesc);
600 buf_len = (int)GET_SW_DATA1(desc);
597 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE); 601 dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
598 __free_page(buf_ptr); 602 __free_page(buf_ptr);
599 knav_pool_desc_put(netcp->rx_pool, desc); 603 knav_pool_desc_put(netcp->rx_pool, desc);
600 } 604 }
601 605 /* warning!!!! We are retrieving the virtual ptr in the sw_data
602 get_pad_info(&pad[0], &pad[1], &buf_len, desc); 606 * field as a 32bit value. Will not work on 64bit machines
603 buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 607 */
608 buf_ptr = (void *)GET_SW_DATA0(desc);
609 buf_len = (int)GET_SW_DATA1(desc);
604 610
605 if (buf_ptr) 611 if (buf_ptr)
606 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr); 612 netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
@@ -639,7 +645,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
639 dma_addr_t dma_desc, dma_buff; 645 dma_addr_t dma_desc, dma_buff;
640 struct netcp_packet p_info; 646 struct netcp_packet p_info;
641 struct sk_buff *skb; 647 struct sk_buff *skb;
642 u32 pad[2];
643 void *org_buf_ptr; 648 void *org_buf_ptr;
644 649
645 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz); 650 dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
@@ -653,8 +658,11 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
653 } 658 }
654 659
655 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc); 660 get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
656 get_pad_info(&pad[0], &pad[1], &org_buf_len, desc); 661 /* warning!!!! We are retrieving the virtual ptr in the sw_data
657 org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32)); 662 * field as a 32bit value. Will not work on 64bit machines
663 */
664 org_buf_ptr = (void *)GET_SW_DATA0(desc);
665 org_buf_len = (int)GET_SW_DATA1(desc);
658 666
659 if (unlikely(!org_buf_ptr)) { 667 if (unlikely(!org_buf_ptr)) {
660 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n"); 668 dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
@@ -679,7 +687,6 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
679 /* Fill in the page fragment list */ 687 /* Fill in the page fragment list */
680 while (dma_desc) { 688 while (dma_desc) {
681 struct page *page; 689 struct page *page;
682 void *ptr;
683 690
684 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz); 691 ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
685 if (unlikely(!ndesc)) { 692 if (unlikely(!ndesc)) {
@@ -688,8 +695,10 @@ static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
688 } 695 }
689 696
690 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc); 697 get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
691 get_pad_ptr(&ptr, ndesc); 698 /* warning!!!! We are retrieving the virtual ptr in the sw_data
692 page = ptr; 699 * field as a 32bit value. Will not work on 64bit machines
700 */
701 page = (struct page *)GET_SW_DATA0(desc);
693 702
694 if (likely(dma_buff && buf_len && page)) { 703 if (likely(dma_buff && buf_len && page)) {
695 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE, 704 dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
@@ -777,7 +786,10 @@ static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
777 } 786 }
778 787
779 get_org_pkt_info(&dma, &buf_len, desc); 788 get_org_pkt_info(&dma, &buf_len, desc);
780 get_pad_ptr(&buf_ptr, desc); 789 /* warning!!!! We are retrieving the virtual ptr in the sw_data
790 * field as a 32bit value. Will not work on 64bit machines
791 */
792 buf_ptr = (void *)GET_SW_DATA0(desc);
781 793
782 if (unlikely(!dma)) { 794 if (unlikely(!dma)) {
783 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n"); 795 dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
@@ -829,7 +841,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
829 struct page *page; 841 struct page *page;
830 dma_addr_t dma; 842 dma_addr_t dma;
831 void *bufptr; 843 void *bufptr;
832 u32 pad[3]; 844 u32 sw_data[2];
833 845
834 /* Allocate descriptor */ 846 /* Allocate descriptor */
835 hwdesc = knav_pool_desc_get(netcp->rx_pool); 847 hwdesc = knav_pool_desc_get(netcp->rx_pool);
@@ -846,7 +858,7 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
846 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 858 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
847 859
848 bufptr = netdev_alloc_frag(primary_buf_len); 860 bufptr = netdev_alloc_frag(primary_buf_len);
849 pad[2] = primary_buf_len; 861 sw_data[1] = primary_buf_len;
850 862
851 if (unlikely(!bufptr)) { 863 if (unlikely(!bufptr)) {
852 dev_warn_ratelimited(netcp->ndev_dev, 864 dev_warn_ratelimited(netcp->ndev_dev,
@@ -858,9 +870,10 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
858 if (unlikely(dma_mapping_error(netcp->dev, dma))) 870 if (unlikely(dma_mapping_error(netcp->dev, dma)))
859 goto fail; 871 goto fail;
860 872
861 pad[0] = lower_32_bits((uintptr_t)bufptr); 873 /* warning!!!! We are saving the virtual ptr in the sw_data
862 pad[1] = upper_32_bits((uintptr_t)bufptr); 874 * field as a 32bit value. Will not work on 64bit machines
863 875 */
876 sw_data[0] = (u32)bufptr;
864 } else { 877 } else {
865 /* Allocate a secondary receive queue entry */ 878 /* Allocate a secondary receive queue entry */
866 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD); 879 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
@@ -870,9 +883,11 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
870 } 883 }
871 buf_len = PAGE_SIZE; 884 buf_len = PAGE_SIZE;
872 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE); 885 dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
873 pad[0] = lower_32_bits(dma); 886 /* warning!!!! We are saving the virtual ptr in the sw_data
874 pad[1] = upper_32_bits(dma); 887 * field as a 32bit value. Will not work on 64bit machines
875 pad[2] = 0; 888 */
889 sw_data[0] = (u32)page;
890 sw_data[1] = 0;
876 } 891 }
877 892
878 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC; 893 desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
@@ -882,7 +897,8 @@ static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
882 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) << 897 pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
883 KNAV_DMA_DESC_RETQ_SHIFT; 898 KNAV_DMA_DESC_RETQ_SHIFT;
884 set_org_pkt_info(dma, buf_len, hwdesc); 899 set_org_pkt_info(dma, buf_len, hwdesc);
885 set_pad_info(pad[0], pad[1], pad[2], hwdesc); 900 SET_SW_DATA0(sw_data[0], hwdesc);
901 SET_SW_DATA1(sw_data[1], hwdesc);
886 set_desc_info(desc_info, pkt_info, hwdesc); 902 set_desc_info(desc_info, pkt_info, hwdesc);
887 903
888 /* Push to FDQs */ 904 /* Push to FDQs */
@@ -971,7 +987,6 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
971 unsigned int budget) 987 unsigned int budget)
972{ 988{
973 struct knav_dma_desc *desc; 989 struct knav_dma_desc *desc;
974 void *ptr;
975 struct sk_buff *skb; 990 struct sk_buff *skb;
976 unsigned int dma_sz; 991 unsigned int dma_sz;
977 dma_addr_t dma; 992 dma_addr_t dma;
@@ -988,8 +1003,10 @@ static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
988 continue; 1003 continue;
989 } 1004 }
990 1005
991 get_pad_ptr(&ptr, desc); 1006 /* warning!!!! We are retrieving the virtual ptr in the sw_data
992 skb = ptr; 1007 * field as a 32bit value. Will not work on 64bit machines
1008 */
1009 skb = (struct sk_buff *)GET_SW_DATA0(desc);
993 netcp_free_tx_desc_chain(netcp, desc, dma_sz); 1010 netcp_free_tx_desc_chain(netcp, desc, dma_sz);
994 if (!skb) { 1011 if (!skb) {
995 dev_err(netcp->ndev_dev, "No skb in Tx desc\n"); 1012 dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
@@ -1194,10 +1211,10 @@ static int netcp_tx_submit_skb(struct netcp_intf *netcp,
1194 } 1211 }
1195 1212
1196 set_words(&tmp, 1, &desc->packet_info); 1213 set_words(&tmp, 1, &desc->packet_info);
1197 tmp = lower_32_bits((uintptr_t)&skb); 1214 /* warning!!!! We are saving the virtual ptr in the sw_data
1198 set_words(&tmp, 1, &desc->pad[0]); 1215 * field as a 32bit value. Will not work on 64bit machines
1199 tmp = upper_32_bits((uintptr_t)&skb); 1216 */
1200 set_words(&tmp, 1, &desc->pad[1]); 1217 SET_SW_DATA0((u32)skb, desc);
1201 1218
1202 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) { 1219 if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
1203 tmp = tx_pipe->switch_to_port; 1220 tmp = tx_pipe->switch_to_port;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 0b14ac3b8d11..0bf7edd99573 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1039,6 +1039,34 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
1039 return geneve_xmit_skb(skb, dev, info); 1039 return geneve_xmit_skb(skb, dev, info);
1040} 1040}
1041 1041
1042static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
1043{
1044 /* The max_mtu calculation does not take account of GENEVE
1045 * options, to avoid excluding potentially valid
1046 * configurations.
1047 */
1048 int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
1049 - dev->hard_header_len;
1050
1051 if (new_mtu < 68)
1052 return -EINVAL;
1053
1054 if (new_mtu > max_mtu) {
1055 if (strict)
1056 return -EINVAL;
1057
1058 new_mtu = max_mtu;
1059 }
1060
1061 dev->mtu = new_mtu;
1062 return 0;
1063}
1064
1065static int geneve_change_mtu(struct net_device *dev, int new_mtu)
1066{
1067 return __geneve_change_mtu(dev, new_mtu, true);
1068}
1069
1042static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) 1070static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
1043{ 1071{
1044 struct ip_tunnel_info *info = skb_tunnel_info(skb); 1072 struct ip_tunnel_info *info = skb_tunnel_info(skb);
@@ -1083,7 +1111,7 @@ static const struct net_device_ops geneve_netdev_ops = {
1083 .ndo_stop = geneve_stop, 1111 .ndo_stop = geneve_stop,
1084 .ndo_start_xmit = geneve_xmit, 1112 .ndo_start_xmit = geneve_xmit,
1085 .ndo_get_stats64 = ip_tunnel_get_stats64, 1113 .ndo_get_stats64 = ip_tunnel_get_stats64,
1086 .ndo_change_mtu = eth_change_mtu, 1114 .ndo_change_mtu = geneve_change_mtu,
1087 .ndo_validate_addr = eth_validate_addr, 1115 .ndo_validate_addr = eth_validate_addr,
1088 .ndo_set_mac_address = eth_mac_addr, 1116 .ndo_set_mac_address = eth_mac_addr,
1089 .ndo_fill_metadata_dst = geneve_fill_metadata_dst, 1117 .ndo_fill_metadata_dst = geneve_fill_metadata_dst,
@@ -1150,6 +1178,7 @@ static void geneve_setup(struct net_device *dev)
1150 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 1178 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
1151 1179
1152 netif_keep_dst(dev); 1180 netif_keep_dst(dev);
1181 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1153 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 1182 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
1154 eth_hw_addr_random(dev); 1183 eth_hw_addr_random(dev);
1155} 1184}
@@ -1441,12 +1470,23 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
1441 return dev; 1470 return dev;
1442 1471
1443 err = geneve_configure(net, dev, &geneve_remote_unspec, 1472 err = geneve_configure(net, dev, &geneve_remote_unspec,
1444 0, 0, 0, htons(dst_port), true, 0); 1473 0, 0, 0, htons(dst_port), true,
1445 if (err) { 1474 GENEVE_F_UDP_ZERO_CSUM6_RX);
1446 free_netdev(dev); 1475 if (err)
1447 return ERR_PTR(err); 1476 goto err;
1448 } 1477
1478 /* openvswitch users expect packet sizes to be unrestricted,
1479 * so set the largest MTU we can.
1480 */
1481 err = __geneve_change_mtu(dev, IP_MAX_MTU, false);
1482 if (err)
1483 goto err;
1484
1449 return dev; 1485 return dev;
1486
1487 err:
1488 free_netdev(dev);
1489 return ERR_PTR(err);
1450} 1490}
1451EXPORT_SYMBOL_GPL(geneve_dev_create_fb); 1491EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
1452 1492
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1d3a66563bac..98e34fee45c7 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1089,6 +1089,9 @@ static int netvsc_probe(struct hv_device *dev,
1089 net->ethtool_ops = &ethtool_ops; 1089 net->ethtool_ops = &ethtool_ops;
1090 SET_NETDEV_DEV(net, &dev->device); 1090 SET_NETDEV_DEV(net, &dev->device);
1091 1091
1092 /* We always need headroom for rndis header */
1093 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1094
1092 /* Notify the netvsc driver of the new device */ 1095 /* Notify the netvsc driver of the new device */
1093 memset(&device_info, 0, sizeof(device_info)); 1096 memset(&device_info, 0, sizeof(device_info));
1094 device_info.ring_size = ring_size; 1097 device_info.ring_size = ring_size;
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index bf241a3ec5e5..db507e3bcab9 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -250,10 +250,6 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
250 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO); 250 phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
251 phy_read(phydev, MII_BCM7XXX_AUX_MODE); 251 phy_read(phydev, MII_BCM7XXX_AUX_MODE);
252 252
253 /* Workaround only required for 100Mbits/sec capable PHYs */
254 if (phydev->supported & PHY_GBIT_FEATURES)
255 return 0;
256
257 /* set shadow mode 2 */ 253 /* set shadow mode 2 */
258 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 254 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
259 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2); 255 MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
@@ -270,7 +266,7 @@ static int bcm7xxx_config_init(struct phy_device *phydev)
270 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555); 266 phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
271 267
272 /* reset shadow mode 2 */ 268 /* reset shadow mode 2 */
273 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0); 269 ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, MII_BCM7XXX_SHD_MODE_2);
274 if (ret < 0) 270 if (ret < 0)
275 return ret; 271 return ret;
276 272
@@ -307,11 +303,6 @@ static int bcm7xxx_suspend(struct phy_device *phydev)
307 return 0; 303 return 0;
308} 304}
309 305
310static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
311{
312 return 0;
313}
314
315#define BCM7XXX_28NM_GPHY(_oui, _name) \ 306#define BCM7XXX_28NM_GPHY(_oui, _name) \
316{ \ 307{ \
317 .phy_id = (_oui), \ 308 .phy_id = (_oui), \
@@ -337,7 +328,7 @@ static struct phy_driver bcm7xxx_driver[] = {
337 .phy_id = PHY_ID_BCM7425, 328 .phy_id = PHY_ID_BCM7425,
338 .phy_id_mask = 0xfffffff0, 329 .phy_id_mask = 0xfffffff0,
339 .name = "Broadcom BCM7425", 330 .name = "Broadcom BCM7425",
340 .features = PHY_GBIT_FEATURES | 331 .features = PHY_BASIC_FEATURES |
341 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 332 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
342 .flags = PHY_IS_INTERNAL, 333 .flags = PHY_IS_INTERNAL,
343 .config_init = bcm7xxx_config_init, 334 .config_init = bcm7xxx_config_init,
@@ -349,7 +340,7 @@ static struct phy_driver bcm7xxx_driver[] = {
349 .phy_id = PHY_ID_BCM7429, 340 .phy_id = PHY_ID_BCM7429,
350 .phy_id_mask = 0xfffffff0, 341 .phy_id_mask = 0xfffffff0,
351 .name = "Broadcom BCM7429", 342 .name = "Broadcom BCM7429",
352 .features = PHY_GBIT_FEATURES | 343 .features = PHY_BASIC_FEATURES |
353 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 344 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
354 .flags = PHY_IS_INTERNAL, 345 .flags = PHY_IS_INTERNAL,
355 .config_init = bcm7xxx_config_init, 346 .config_init = bcm7xxx_config_init,
@@ -361,7 +352,7 @@ static struct phy_driver bcm7xxx_driver[] = {
361 .phy_id = PHY_ID_BCM7435, 352 .phy_id = PHY_ID_BCM7435,
362 .phy_id_mask = 0xfffffff0, 353 .phy_id_mask = 0xfffffff0,
363 .name = "Broadcom BCM7435", 354 .name = "Broadcom BCM7435",
364 .features = PHY_GBIT_FEATURES | 355 .features = PHY_BASIC_FEATURES |
365 SUPPORTED_Pause | SUPPORTED_Asym_Pause, 356 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
366 .flags = PHY_IS_INTERNAL, 357 .flags = PHY_IS_INTERNAL,
367 .config_init = bcm7xxx_config_init, 358 .config_init = bcm7xxx_config_init,
@@ -369,30 +360,6 @@ static struct phy_driver bcm7xxx_driver[] = {
369 .read_status = genphy_read_status, 360 .read_status = genphy_read_status,
370 .suspend = bcm7xxx_suspend, 361 .suspend = bcm7xxx_suspend,
371 .resume = bcm7xxx_config_init, 362 .resume = bcm7xxx_config_init,
372}, {
373 .phy_id = PHY_BCM_OUI_4,
374 .phy_id_mask = 0xffff0000,
375 .name = "Broadcom BCM7XXX 40nm",
376 .features = PHY_GBIT_FEATURES |
377 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
378 .flags = PHY_IS_INTERNAL,
379 .config_init = bcm7xxx_config_init,
380 .config_aneg = genphy_config_aneg,
381 .read_status = genphy_read_status,
382 .suspend = bcm7xxx_suspend,
383 .resume = bcm7xxx_config_init,
384}, {
385 .phy_id = PHY_BCM_OUI_5,
386 .phy_id_mask = 0xffffff00,
387 .name = "Broadcom BCM7XXX 65nm",
388 .features = PHY_BASIC_FEATURES |
389 SUPPORTED_Pause | SUPPORTED_Asym_Pause,
390 .flags = PHY_IS_INTERNAL,
391 .config_init = bcm7xxx_dummy_config_init,
392 .config_aneg = genphy_config_aneg,
393 .read_status = genphy_read_status,
394 .suspend = bcm7xxx_suspend,
395 .resume = bcm7xxx_config_init,
396} }; 363} };
397 364
398static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { 365static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
@@ -404,8 +371,6 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
404 { PHY_ID_BCM7439, 0xfffffff0, }, 371 { PHY_ID_BCM7439, 0xfffffff0, },
405 { PHY_ID_BCM7435, 0xfffffff0, }, 372 { PHY_ID_BCM7435, 0xfffffff0, },
406 { PHY_ID_BCM7445, 0xfffffff0, }, 373 { PHY_ID_BCM7445, 0xfffffff0, },
407 { PHY_BCM_OUI_4, 0xffff0000 },
408 { PHY_BCM_OUI_5, 0xffffff00 },
409 { } 374 { }
410}; 375};
411 376
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e3eb96443c97..ab1d0fcaf1d9 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -446,6 +446,12 @@ static int m88e1510_config_aneg(struct phy_device *phydev)
446 if (err < 0) 446 if (err < 0)
447 return err; 447 return err;
448 448
449 return 0;
450}
451
452static int marvell_config_init(struct phy_device *phydev)
453{
454 /* Set registers from marvell,reg-init DT property */
449 return marvell_of_reg_init(phydev); 455 return marvell_of_reg_init(phydev);
450} 456}
451 457
@@ -495,7 +501,7 @@ static int m88e1116r_config_init(struct phy_device *phydev)
495 501
496 mdelay(500); 502 mdelay(500);
497 503
498 return 0; 504 return marvell_config_init(phydev);
499} 505}
500 506
501static int m88e3016_config_init(struct phy_device *phydev) 507static int m88e3016_config_init(struct phy_device *phydev)
@@ -514,7 +520,7 @@ static int m88e3016_config_init(struct phy_device *phydev)
514 if (reg < 0) 520 if (reg < 0)
515 return reg; 521 return reg;
516 522
517 return 0; 523 return marvell_config_init(phydev);
518} 524}
519 525
520static int m88e1111_config_init(struct phy_device *phydev) 526static int m88e1111_config_init(struct phy_device *phydev)
@@ -1078,6 +1084,7 @@ static struct phy_driver marvell_drivers[] = {
1078 .features = PHY_GBIT_FEATURES, 1084 .features = PHY_GBIT_FEATURES,
1079 .probe = marvell_probe, 1085 .probe = marvell_probe,
1080 .flags = PHY_HAS_INTERRUPT, 1086 .flags = PHY_HAS_INTERRUPT,
1087 .config_init = &marvell_config_init,
1081 .config_aneg = &marvell_config_aneg, 1088 .config_aneg = &marvell_config_aneg,
1082 .read_status = &genphy_read_status, 1089 .read_status = &genphy_read_status,
1083 .ack_interrupt = &marvell_ack_interrupt, 1090 .ack_interrupt = &marvell_ack_interrupt,
@@ -1149,6 +1156,7 @@ static struct phy_driver marvell_drivers[] = {
1149 .features = PHY_GBIT_FEATURES, 1156 .features = PHY_GBIT_FEATURES,
1150 .flags = PHY_HAS_INTERRUPT, 1157 .flags = PHY_HAS_INTERRUPT,
1151 .probe = marvell_probe, 1158 .probe = marvell_probe,
1159 .config_init = &marvell_config_init,
1152 .config_aneg = &m88e1121_config_aneg, 1160 .config_aneg = &m88e1121_config_aneg,
1153 .read_status = &marvell_read_status, 1161 .read_status = &marvell_read_status,
1154 .ack_interrupt = &marvell_ack_interrupt, 1162 .ack_interrupt = &marvell_ack_interrupt,
@@ -1167,6 +1175,7 @@ static struct phy_driver marvell_drivers[] = {
1167 .features = PHY_GBIT_FEATURES, 1175 .features = PHY_GBIT_FEATURES,
1168 .flags = PHY_HAS_INTERRUPT, 1176 .flags = PHY_HAS_INTERRUPT,
1169 .probe = marvell_probe, 1177 .probe = marvell_probe,
1178 .config_init = &marvell_config_init,
1170 .config_aneg = &m88e1318_config_aneg, 1179 .config_aneg = &m88e1318_config_aneg,
1171 .read_status = &marvell_read_status, 1180 .read_status = &marvell_read_status,
1172 .ack_interrupt = &marvell_ack_interrupt, 1181 .ack_interrupt = &marvell_ack_interrupt,
@@ -1259,6 +1268,7 @@ static struct phy_driver marvell_drivers[] = {
1259 .features = PHY_GBIT_FEATURES, 1268 .features = PHY_GBIT_FEATURES,
1260 .flags = PHY_HAS_INTERRUPT, 1269 .flags = PHY_HAS_INTERRUPT,
1261 .probe = marvell_probe, 1270 .probe = marvell_probe,
1271 .config_init = &marvell_config_init,
1262 .config_aneg = &m88e1510_config_aneg, 1272 .config_aneg = &m88e1510_config_aneg,
1263 .read_status = &marvell_read_status, 1273 .read_status = &marvell_read_status,
1264 .ack_interrupt = &marvell_ack_interrupt, 1274 .ack_interrupt = &marvell_ack_interrupt,
@@ -1277,6 +1287,7 @@ static struct phy_driver marvell_drivers[] = {
1277 .features = PHY_GBIT_FEATURES, 1287 .features = PHY_GBIT_FEATURES,
1278 .flags = PHY_HAS_INTERRUPT, 1288 .flags = PHY_HAS_INTERRUPT,
1279 .probe = marvell_probe, 1289 .probe = marvell_probe,
1290 .config_init = &marvell_config_init,
1280 .config_aneg = &m88e1510_config_aneg, 1291 .config_aneg = &m88e1510_config_aneg,
1281 .read_status = &marvell_read_status, 1292 .read_status = &marvell_read_status,
1282 .ack_interrupt = &marvell_ack_interrupt, 1293 .ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 03833dbfca67..dc85f7095e51 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -297,6 +297,17 @@ static int kszphy_config_init(struct phy_device *phydev)
297 if (priv->led_mode >= 0) 297 if (priv->led_mode >= 0)
298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); 298 kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode);
299 299
300 if (phy_interrupt_is_valid(phydev)) {
301 int ctl = phy_read(phydev, MII_BMCR);
302
303 if (ctl < 0)
304 return ctl;
305
306 ret = phy_write(phydev, MII_BMCR, ctl & ~BMCR_ANENABLE);
307 if (ret < 0)
308 return ret;
309 }
310
300 return 0; 311 return 0;
301} 312}
302 313
@@ -635,6 +646,21 @@ static void kszphy_get_stats(struct phy_device *phydev,
635 data[i] = kszphy_get_stat(phydev, i); 646 data[i] = kszphy_get_stat(phydev, i);
636} 647}
637 648
649static int kszphy_resume(struct phy_device *phydev)
650{
651 int value;
652
653 mutex_lock(&phydev->lock);
654
655 value = phy_read(phydev, MII_BMCR);
656 phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN);
657
658 kszphy_config_intr(phydev);
659 mutex_unlock(&phydev->lock);
660
661 return 0;
662}
663
638static int kszphy_probe(struct phy_device *phydev) 664static int kszphy_probe(struct phy_device *phydev)
639{ 665{
640 const struct kszphy_type *type = phydev->drv->driver_data; 666 const struct kszphy_type *type = phydev->drv->driver_data;
@@ -844,7 +870,7 @@ static struct phy_driver ksphy_driver[] = {
844 .get_strings = kszphy_get_strings, 870 .get_strings = kszphy_get_strings,
845 .get_stats = kszphy_get_stats, 871 .get_stats = kszphy_get_stats,
846 .suspend = genphy_suspend, 872 .suspend = genphy_suspend,
847 .resume = genphy_resume, 873 .resume = kszphy_resume,
848}, { 874}, {
849 .phy_id = PHY_ID_KSZ8061, 875 .phy_id = PHY_ID_KSZ8061,
850 .name = "Micrel KSZ8061", 876 .name = "Micrel KSZ8061",
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bad3f005faee..e551f3a89cfd 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1410,7 +1410,7 @@ int genphy_config_init(struct phy_device *phydev)
1410 1410
1411 features = (SUPPORTED_TP | SUPPORTED_MII 1411 features = (SUPPORTED_TP | SUPPORTED_MII
1412 | SUPPORTED_AUI | SUPPORTED_FIBRE | 1412 | SUPPORTED_AUI | SUPPORTED_FIBRE |
1413 SUPPORTED_BNC); 1413 SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause);
1414 1414
1415 /* Do we support autonegotiation? */ 1415 /* Do we support autonegotiation? */
1416 val = phy_read(phydev, MII_BMSR); 1416 val = phy_read(phydev, MII_BMSR);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fc8ad001bc94..d61da9ece3ba 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -443,9 +443,14 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
443 * network traffic (demand mode). 443 * network traffic (demand mode).
444 */ 444 */
445 struct ppp *ppp = PF_TO_PPP(pf); 445 struct ppp *ppp = PF_TO_PPP(pf);
446
447 ppp_recv_lock(ppp);
446 if (ppp->n_channels == 0 && 448 if (ppp->n_channels == 0 &&
447 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 449 (ppp->flags & SC_LOOP_TRAFFIC) == 0) {
450 ppp_recv_unlock(ppp);
448 break; 451 break;
452 }
453 ppp_recv_unlock(ppp);
449 } 454 }
450 ret = -EAGAIN; 455 ret = -EAGAIN;
451 if (file->f_flags & O_NONBLOCK) 456 if (file->f_flags & O_NONBLOCK)
@@ -532,9 +537,12 @@ static unsigned int ppp_poll(struct file *file, poll_table *wait)
532 else if (pf->kind == INTERFACE) { 537 else if (pf->kind == INTERFACE) {
533 /* see comment in ppp_read */ 538 /* see comment in ppp_read */
534 struct ppp *ppp = PF_TO_PPP(pf); 539 struct ppp *ppp = PF_TO_PPP(pf);
540
541 ppp_recv_lock(ppp);
535 if (ppp->n_channels == 0 && 542 if (ppp->n_channels == 0 &&
536 (ppp->flags & SC_LOOP_TRAFFIC) == 0) 543 (ppp->flags & SC_LOOP_TRAFFIC) == 0)
537 mask |= POLLIN | POLLRDNORM; 544 mask |= POLLIN | POLLRDNORM;
545 ppp_recv_unlock(ppp);
538 } 546 }
539 547
540 return mask; 548 return mask;
@@ -2808,6 +2816,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
2808 2816
2809out2: 2817out2:
2810 mutex_unlock(&pn->all_ppp_mutex); 2818 mutex_unlock(&pn->all_ppp_mutex);
2819 rtnl_unlock();
2811 free_netdev(dev); 2820 free_netdev(dev);
2812out1: 2821out1:
2813 *retp = ret; 2822 *retp = ret;
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index f3c63022eb3c..4ddae8118c85 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
395 395
396 if (!__pppoe_xmit(sk_pppox(relay_po), skb)) 396 if (!__pppoe_xmit(sk_pppox(relay_po), skb))
397 goto abort_put; 397 goto abort_put;
398
399 sock_put(sk_pppox(relay_po));
398 } else { 400 } else {
399 if (sock_queue_rcv_skb(sk, skb)) 401 if (sock_queue_rcv_skb(sk, skb))
400 goto abort_kfree; 402 goto abort_kfree;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7f83504dfa69..cdde59089f72 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -395,6 +395,10 @@ config USB_NET_RNDIS_HOST
395 The protocol specification is incomplete, and is controlled by 395 The protocol specification is incomplete, and is controlled by
396 (and for) Microsoft; it isn't an "Open" ecosystem or market. 396 (and for) Microsoft; it isn't an "Open" ecosystem or market.
397 397
398config USB_NET_CDC_SUBSET_ENABLE
399 tristate
400 depends on USB_NET_CDC_SUBSET
401
398config USB_NET_CDC_SUBSET 402config USB_NET_CDC_SUBSET
399 tristate "Simple USB Network Links (CDC Ethernet subset)" 403 tristate "Simple USB Network Links (CDC Ethernet subset)"
400 depends on USB_USBNET 404 depends on USB_USBNET
@@ -413,6 +417,7 @@ config USB_NET_CDC_SUBSET
413config USB_ALI_M5632 417config USB_ALI_M5632
414 bool "ALi M5632 based 'USB 2.0 Data Link' cables" 418 bool "ALi M5632 based 'USB 2.0 Data Link' cables"
415 depends on USB_NET_CDC_SUBSET 419 depends on USB_NET_CDC_SUBSET
420 select USB_NET_CDC_SUBSET_ENABLE
416 help 421 help
417 Choose this option if you're using a host-to-host cable 422 Choose this option if you're using a host-to-host cable
418 based on this design, which supports USB 2.0 high speed. 423 based on this design, which supports USB 2.0 high speed.
@@ -420,6 +425,7 @@ config USB_ALI_M5632
420config USB_AN2720 425config USB_AN2720
421 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)" 426 bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
422 depends on USB_NET_CDC_SUBSET 427 depends on USB_NET_CDC_SUBSET
428 select USB_NET_CDC_SUBSET_ENABLE
423 help 429 help
424 Choose this option if you're using a host-to-host cable 430 Choose this option if you're using a host-to-host cable
425 based on this design. Note that AnchorChips is now a 431 based on this design. Note that AnchorChips is now a
@@ -428,6 +434,7 @@ config USB_AN2720
428config USB_BELKIN 434config USB_BELKIN
429 bool "eTEK based host-to-host cables (Advance, Belkin, ...)" 435 bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
430 depends on USB_NET_CDC_SUBSET 436 depends on USB_NET_CDC_SUBSET
437 select USB_NET_CDC_SUBSET_ENABLE
431 default y 438 default y
432 help 439 help
433 Choose this option if you're using a host-to-host cable 440 Choose this option if you're using a host-to-host cable
@@ -437,6 +444,7 @@ config USB_BELKIN
437config USB_ARMLINUX 444config USB_ARMLINUX
438 bool "Embedded ARM Linux links (iPaq, ...)" 445 bool "Embedded ARM Linux links (iPaq, ...)"
439 depends on USB_NET_CDC_SUBSET 446 depends on USB_NET_CDC_SUBSET
447 select USB_NET_CDC_SUBSET_ENABLE
440 default y 448 default y
441 help 449 help
442 Choose this option to support the "usb-eth" networking driver 450 Choose this option to support the "usb-eth" networking driver
@@ -454,6 +462,7 @@ config USB_ARMLINUX
454config USB_EPSON2888 462config USB_EPSON2888
455 bool "Epson 2888 based firmware (DEVELOPMENT)" 463 bool "Epson 2888 based firmware (DEVELOPMENT)"
456 depends on USB_NET_CDC_SUBSET 464 depends on USB_NET_CDC_SUBSET
465 select USB_NET_CDC_SUBSET_ENABLE
457 help 466 help
458 Choose this option to support the usb networking links used 467 Choose this option to support the usb networking links used
459 by some sample firmware from Epson. 468 by some sample firmware from Epson.
@@ -461,6 +470,7 @@ config USB_EPSON2888
461config USB_KC2190 470config USB_KC2190
462 bool "KT Technology KC2190 based cables (InstaNet)" 471 bool "KT Technology KC2190 based cables (InstaNet)"
463 depends on USB_NET_CDC_SUBSET 472 depends on USB_NET_CDC_SUBSET
473 select USB_NET_CDC_SUBSET_ENABLE
464 help 474 help
465 Choose this option if you're using a host-to-host cable 475 Choose this option if you're using a host-to-host cable
466 with one of these chips. 476 with one of these chips.
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b5f04068dbe4..37fb46aee341 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
23obj-$(CONFIG_USB_NET_NET1080) += net1080.o 23obj-$(CONFIG_USB_NET_NET1080) += net1080.o
24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o 24obj-$(CONFIG_USB_NET_PLUSB) += plusb.o
25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o 25obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
26obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o 26obj-$(CONFIG_USB_NET_CDC_SUBSET_ENABLE) += cdc_subset.o
27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o 27obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o 28obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
29obj-$(CONFIG_USB_USBNET) += usbnet.o 29obj-$(CONFIG_USB_USBNET) += usbnet.o
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 224e7d82de6d..cf77f2dffa69 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -134,7 +134,6 @@ static void ax88172a_remove_mdio(struct usbnet *dev)
134 134
135 netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id); 135 netdev_info(dev->net, "deregistering mdio bus %s\n", priv->mdio->id);
136 mdiobus_unregister(priv->mdio); 136 mdiobus_unregister(priv->mdio);
137 kfree(priv->mdio->irq);
138 mdiobus_free(priv->mdio); 137 mdiobus_free(priv->mdio);
139} 138}
140 139
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index dc0212c3cc28..86ba30ba35e8 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -837,7 +837,11 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
837 837
838 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; 838 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
839 839
840 /* reset data interface */ 840 /* Reset data interface. Some devices will not reset properly
841 * unless they are configured first. Toggle the altsetting to
842 * force a reset
843 */
844 usb_set_interface(dev->udev, iface_no, data_altsetting);
841 temp = usb_set_interface(dev->udev, iface_no, 0); 845 temp = usb_set_interface(dev->udev, iface_no, 0);
842 if (temp) { 846 if (temp) {
843 dev_dbg(&intf->dev, "set interface failed\n"); 847 dev_dbg(&intf->dev, "set interface failed\n");
@@ -984,8 +988,6 @@ EXPORT_SYMBOL_GPL(cdc_ncm_select_altsetting);
984 988
985static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf) 989static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
986{ 990{
987 int ret;
988
989 /* MBIM backwards compatible function? */ 991 /* MBIM backwards compatible function? */
990 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM) 992 if (cdc_ncm_select_altsetting(intf) != CDC_NCM_COMM_ALTSETTING_NCM)
991 return -ENODEV; 993 return -ENODEV;
@@ -994,16 +996,7 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
994 * Additionally, generic NCM devices are assumed to accept arbitrarily 996 * Additionally, generic NCM devices are assumed to accept arbitrarily
995 * placed NDP. 997 * placed NDP.
996 */ 998 */
997 ret = cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0); 999 return cdc_ncm_bind_common(dev, intf, CDC_NCM_DATA_ALTSETTING_NCM, 0);
998
999 /*
1000 * We should get an event when network connection is "connected" or
1001 * "disconnected". Set network connection in "disconnected" state
1002 * (carrier is OFF) during attach, so the IP network stack does not
1003 * start IPv6 negotiation and more.
1004 */
1005 usbnet_link_change(dev, 0, 0);
1006 return ret;
1007} 1000}
1008 1001
1009static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max) 1002static void cdc_ncm_align_tail(struct sk_buff *skb, size_t modulus, size_t remainder, size_t max)
@@ -1586,7 +1579,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1586 1579
1587static const struct driver_info cdc_ncm_info = { 1580static const struct driver_info cdc_ncm_info = {
1588 .description = "CDC NCM", 1581 .description = "CDC NCM",
1589 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET, 1582 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
1583 | FLAG_LINK_INTR,
1590 .bind = cdc_ncm_bind, 1584 .bind = cdc_ncm_bind,
1591 .unbind = cdc_ncm_unbind, 1585 .unbind = cdc_ncm_unbind,
1592 .manage_power = usbnet_manage_power, 1586 .manage_power = usbnet_manage_power,
@@ -1599,7 +1593,7 @@ static const struct driver_info cdc_ncm_info = {
1599static const struct driver_info wwan_info = { 1593static const struct driver_info wwan_info = {
1600 .description = "Mobile Broadband Network Device", 1594 .description = "Mobile Broadband Network Device",
1601 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET 1595 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
1602 | FLAG_WWAN, 1596 | FLAG_LINK_INTR | FLAG_WWAN,
1603 .bind = cdc_ncm_bind, 1597 .bind = cdc_ncm_bind,
1604 .unbind = cdc_ncm_unbind, 1598 .unbind = cdc_ncm_unbind,
1605 .manage_power = usbnet_manage_power, 1599 .manage_power = usbnet_manage_power,
@@ -1612,7 +1606,7 @@ static const struct driver_info wwan_info = {
1612static const struct driver_info wwan_noarp_info = { 1606static const struct driver_info wwan_noarp_info = {
1613 .description = "Mobile Broadband Network Device (NO ARP)", 1607 .description = "Mobile Broadband Network Device (NO ARP)",
1614 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET 1608 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
1615 | FLAG_WWAN | FLAG_NOARP, 1609 | FLAG_LINK_INTR | FLAG_WWAN | FLAG_NOARP,
1616 .bind = cdc_ncm_bind, 1610 .bind = cdc_ncm_bind,
1617 .unbind = cdc_ncm_unbind, 1611 .unbind = cdc_ncm_unbind,
1618 .manage_power = usbnet_manage_power, 1612 .manage_power = usbnet_manage_power,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23e9880791fc..a3a4ccf7cf52 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -637,6 +637,7 @@ static const struct usb_device_id products[] = {
637 637
638 /* 3. Combined interface devices matching on interface number */ 638 /* 3. Combined interface devices matching on interface number */
639 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ 639 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
640 {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
640 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)}, 641 {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
641 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)}, 642 {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
642 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)}, 643 {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
@@ -860,8 +861,10 @@ static const struct usb_device_id products[] = {
860 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */ 861 {QMI_FIXED_INTF(0x1199, 0x9056, 8)}, /* Sierra Wireless Modem */
861 {QMI_FIXED_INTF(0x1199, 0x9057, 8)}, 862 {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
862 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */ 863 {QMI_FIXED_INTF(0x1199, 0x9061, 8)}, /* Sierra Wireless Modem */
863 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx/EM74xx */ 864 {QMI_FIXED_INTF(0x1199, 0x9071, 8)}, /* Sierra Wireless MC74xx */
864 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx/EM74xx */ 865 {QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
866 {QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
867 {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
865 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 868 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
866 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 869 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
867 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 870 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -884,6 +887,7 @@ static const struct usb_device_id products[] = {
884 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 887 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
885 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 888 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
886 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 889 {QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
890 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
887 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 891 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
888 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 892 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
889 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */ 893 {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 0b0ba7ef14e4..10798128c03f 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1769,6 +1769,13 @@ out3:
1769 if (info->unbind) 1769 if (info->unbind)
1770 info->unbind (dev, udev); 1770 info->unbind (dev, udev);
1771out1: 1771out1:
1772 /* subdrivers must undo all they did in bind() if they
1773 * fail it, but we may fail later and a deferred kevent
1774 * may trigger an error resubmitting itself and, worse,
1775 * schedule a timer. So we kill it all just in case.
1776 */
1777 cancel_work_sync(&dev->kevent);
1778 del_timer_sync(&dev->delay);
1772 free_netdev(net); 1779 free_netdev(net);
1773out: 1780out:
1774 return status; 1781 return status;
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 221a53025fd0..72ba8ae7f09a 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -377,7 +377,7 @@ union Vmxnet3_GenericDesc {
377#define VMXNET3_TX_RING_MAX_SIZE 4096 377#define VMXNET3_TX_RING_MAX_SIZE 4096
378#define VMXNET3_TC_RING_MAX_SIZE 4096 378#define VMXNET3_TC_RING_MAX_SIZE 4096
379#define VMXNET3_RX_RING_MAX_SIZE 4096 379#define VMXNET3_RX_RING_MAX_SIZE 4096
380#define VMXNET3_RX_RING2_MAX_SIZE 2048 380#define VMXNET3_RX_RING2_MAX_SIZE 4096
381#define VMXNET3_RC_RING_MAX_SIZE 8192 381#define VMXNET3_RC_RING_MAX_SIZE 8192
382 382
383/* a list of reasons for queue stop */ 383/* a list of reasons for queue stop */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0cbf520cea77..fc895d0e85d9 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -814,7 +814,7 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
814 814
815 815
816/* 816/*
817 * parse and copy relevant protocol headers: 817 * parse relevant protocol headers:
818 * For a tso pkt, relevant headers are L2/3/4 including options 818 * For a tso pkt, relevant headers are L2/3/4 including options
819 * For a pkt requesting csum offloading, they are L2/3 and may include L4 819 * For a pkt requesting csum offloading, they are L2/3 and may include L4
820 * if it's a TCP/UDP pkt 820 * if it's a TCP/UDP pkt
@@ -827,15 +827,14 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
827 * Other effects: 827 * Other effects:
828 * 1. related *ctx fields are updated. 828 * 1. related *ctx fields are updated.
829 * 2. ctx->copy_size is # of bytes copied 829 * 2. ctx->copy_size is # of bytes copied
830 * 3. the portion copied is guaranteed to be in the linear part 830 * 3. the portion to be copied is guaranteed to be in the linear part
831 * 831 *
832 */ 832 */
833static int 833static int
834vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 834vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
835 struct vmxnet3_tx_ctx *ctx, 835 struct vmxnet3_tx_ctx *ctx,
836 struct vmxnet3_adapter *adapter) 836 struct vmxnet3_adapter *adapter)
837{ 837{
838 struct Vmxnet3_TxDataDesc *tdd;
839 u8 protocol = 0; 838 u8 protocol = 0;
840 839
841 if (ctx->mss) { /* TSO */ 840 if (ctx->mss) { /* TSO */
@@ -892,16 +891,34 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
892 return 0; 891 return 0;
893 } 892 }
894 893
894 return 1;
895err:
896 return -1;
897}
898
899/*
900 * copy relevant protocol headers to the transmit ring:
901 * For a tso pkt, relevant headers are L2/3/4 including options
902 * For a pkt requesting csum offloading, they are L2/3 and may include L4
903 * if it's a TCP/UDP pkt
904 *
905 *
906 * Note that this requires that vmxnet3_parse_hdr be called first to set the
907 * appropriate bits in ctx first
908 */
909static void
910vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
911 struct vmxnet3_tx_ctx *ctx,
912 struct vmxnet3_adapter *adapter)
913{
914 struct Vmxnet3_TxDataDesc *tdd;
915
895 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 916 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
896 917
897 memcpy(tdd->data, skb->data, ctx->copy_size); 918 memcpy(tdd->data, skb->data, ctx->copy_size);
898 netdev_dbg(adapter->netdev, 919 netdev_dbg(adapter->netdev,
899 "copy %u bytes to dataRing[%u]\n", 920 "copy %u bytes to dataRing[%u]\n",
900 ctx->copy_size, tq->tx_ring.next2fill); 921 ctx->copy_size, tq->tx_ring.next2fill);
901 return 1;
902
903err:
904 return -1;
905} 922}
906 923
907 924
@@ -998,22 +1015,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
998 } 1015 }
999 } 1016 }
1000 1017
1001 spin_lock_irqsave(&tq->tx_lock, flags); 1018 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1002
1003 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1004 tq->stats.tx_ring_full++;
1005 netdev_dbg(adapter->netdev,
1006 "tx queue stopped on %s, next2comp %u"
1007 " next2fill %u\n", adapter->netdev->name,
1008 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1009
1010 vmxnet3_tq_stop(tq, adapter);
1011 spin_unlock_irqrestore(&tq->tx_lock, flags);
1012 return NETDEV_TX_BUSY;
1013 }
1014
1015
1016 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
1017 if (ret >= 0) { 1019 if (ret >= 0) {
1018 BUG_ON(ret <= 0 && ctx.copy_size != 0); 1020 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1019 /* hdrs parsed, check against other limits */ 1021 /* hdrs parsed, check against other limits */
@@ -1033,9 +1035,26 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1033 } 1035 }
1034 } else { 1036 } else {
1035 tq->stats.drop_hdr_inspect_err++; 1037 tq->stats.drop_hdr_inspect_err++;
1036 goto unlock_drop_pkt; 1038 goto drop_pkt;
1037 } 1039 }
1038 1040
1041 spin_lock_irqsave(&tq->tx_lock, flags);
1042
1043 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1044 tq->stats.tx_ring_full++;
1045 netdev_dbg(adapter->netdev,
1046 "tx queue stopped on %s, next2comp %u"
1047 " next2fill %u\n", adapter->netdev->name,
1048 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1049
1050 vmxnet3_tq_stop(tq, adapter);
1051 spin_unlock_irqrestore(&tq->tx_lock, flags);
1052 return NETDEV_TX_BUSY;
1053 }
1054
1055
1056 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1057
1039 /* fill tx descs related to addr & len */ 1058 /* fill tx descs related to addr & len */
1040 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) 1059 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1041 goto unlock_drop_pkt; 1060 goto unlock_drop_pkt;
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index bdb8a6c0f8aa..729c344e6774 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
69/* 69/*
70 * Version numbers 70 * Version numbers
71 */ 71 */
72#define VMXNET3_DRIVER_VERSION_STRING "1.4.5.0-k" 72#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
73 73
74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 74/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
75#define VMXNET3_DRIVER_VERSION_NUM 0x01040500 75#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
76 76
77#if defined(CONFIG_PCI_MSI) 77#if defined(CONFIG_PCI_MSI)
78 /* RSS only makes sense if MSI-X is supported. */ 78 /* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 66addb7a7911..bdcf617a9d52 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -104,20 +104,23 @@ static struct dst_ops vrf_dst_ops = {
104#if IS_ENABLED(CONFIG_IPV6) 104#if IS_ENABLED(CONFIG_IPV6)
105static bool check_ipv6_frame(const struct sk_buff *skb) 105static bool check_ipv6_frame(const struct sk_buff *skb)
106{ 106{
107 const struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb->data; 107 const struct ipv6hdr *ipv6h;
108 size_t hlen = sizeof(*ipv6h); 108 struct ipv6hdr _ipv6h;
109 bool rc = true; 109 bool rc = true;
110 110
111 if (skb->len < hlen) 111 ipv6h = skb_header_pointer(skb, 0, sizeof(_ipv6h), &_ipv6h);
112 if (!ipv6h)
112 goto out; 113 goto out;
113 114
114 if (ipv6h->nexthdr == NEXTHDR_ICMP) { 115 if (ipv6h->nexthdr == NEXTHDR_ICMP) {
115 const struct icmp6hdr *icmph; 116 const struct icmp6hdr *icmph;
117 struct icmp6hdr _icmph;
116 118
117 if (skb->len < hlen + sizeof(*icmph)) 119 icmph = skb_header_pointer(skb, sizeof(_ipv6h),
120 sizeof(_icmph), &_icmph);
121 if (!icmph)
118 goto out; 122 goto out;
119 123
120 icmph = (struct icmp6hdr *)(skb->data + sizeof(*ipv6h));
121 switch (icmph->icmp6_type) { 124 switch (icmph->icmp6_type) {
122 case NDISC_ROUTER_SOLICITATION: 125 case NDISC_ROUTER_SOLICITATION:
123 case NDISC_ROUTER_ADVERTISEMENT: 126 case NDISC_ROUTER_ADVERTISEMENT:
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 65439188c582..1c32bd104797 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -931,8 +931,10 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
931 cb->nlh->nlmsg_seq, 931 cb->nlh->nlmsg_seq,
932 RTM_NEWNEIGH, 932 RTM_NEWNEIGH,
933 NLM_F_MULTI, rd); 933 NLM_F_MULTI, rd);
934 if (err < 0) 934 if (err < 0) {
935 cb->args[1] = err;
935 goto out; 936 goto out;
937 }
936skip: 938skip:
937 ++idx; 939 ++idx;
938 } 940 }
@@ -1306,8 +1308,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
1306 gbp = (struct vxlanhdr_gbp *)vxh; 1308 gbp = (struct vxlanhdr_gbp *)vxh;
1307 md->gbp = ntohs(gbp->policy_id); 1309 md->gbp = ntohs(gbp->policy_id);
1308 1310
1309 if (tun_dst) 1311 if (tun_dst) {
1310 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT; 1312 tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
1313 tun_dst->u.tun_info.options_len = sizeof(*md);
1314 }
1311 1315
1312 if (gbp->dont_learn) 1316 if (gbp->dont_learn)
1313 md->gbp |= VXLAN_GBP_DONT_LEARN; 1317 md->gbp |= VXLAN_GBP_DONT_LEARN;
@@ -2171,9 +2175,11 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
2171#endif 2175#endif
2172 } 2176 }
2173 2177
2174 if (vxlan->flags & VXLAN_F_COLLECT_METADATA && 2178 if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
2175 info && info->mode & IP_TUNNEL_INFO_TX) { 2179 if (info && info->mode & IP_TUNNEL_INFO_TX)
2176 vxlan_xmit_one(skb, dev, NULL, false); 2180 vxlan_xmit_one(skb, dev, NULL, false);
2181 else
2182 kfree_skb(skb);
2177 return NETDEV_TX_OK; 2183 return NETDEV_TX_OK;
2178 } 2184 }
2179 2185
@@ -2367,29 +2373,43 @@ static void vxlan_set_multicast_list(struct net_device *dev)
2367{ 2373{
2368} 2374}
2369 2375
2370static int vxlan_change_mtu(struct net_device *dev, int new_mtu) 2376static int __vxlan_change_mtu(struct net_device *dev,
2377 struct net_device *lowerdev,
2378 struct vxlan_rdst *dst, int new_mtu, bool strict)
2371{ 2379{
2372 struct vxlan_dev *vxlan = netdev_priv(dev); 2380 int max_mtu = IP_MAX_MTU;
2373 struct vxlan_rdst *dst = &vxlan->default_dst;
2374 struct net_device *lowerdev;
2375 int max_mtu;
2376 2381
2377 lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex); 2382 if (lowerdev)
2378 if (lowerdev == NULL) 2383 max_mtu = lowerdev->mtu;
2379 return eth_change_mtu(dev, new_mtu);
2380 2384
2381 if (dst->remote_ip.sa.sa_family == AF_INET6) 2385 if (dst->remote_ip.sa.sa_family == AF_INET6)
2382 max_mtu = lowerdev->mtu - VXLAN6_HEADROOM; 2386 max_mtu -= VXLAN6_HEADROOM;
2383 else 2387 else
2384 max_mtu = lowerdev->mtu - VXLAN_HEADROOM; 2388 max_mtu -= VXLAN_HEADROOM;
2385 2389
2386 if (new_mtu < 68 || new_mtu > max_mtu) 2390 if (new_mtu < 68)
2387 return -EINVAL; 2391 return -EINVAL;
2388 2392
2393 if (new_mtu > max_mtu) {
2394 if (strict)
2395 return -EINVAL;
2396
2397 new_mtu = max_mtu;
2398 }
2399
2389 dev->mtu = new_mtu; 2400 dev->mtu = new_mtu;
2390 return 0; 2401 return 0;
2391} 2402}
2392 2403
2404static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
2405{
2406 struct vxlan_dev *vxlan = netdev_priv(dev);
2407 struct vxlan_rdst *dst = &vxlan->default_dst;
2408 struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
2409 dst->remote_ifindex);
2410 return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
2411}
2412
2393static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb, 2413static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
2394 struct ip_tunnel_info *info, 2414 struct ip_tunnel_info *info,
2395 __be16 sport, __be16 dport) 2415 __be16 sport, __be16 dport)
@@ -2523,6 +2543,7 @@ static void vxlan_setup(struct net_device *dev)
2523 dev->hw_features |= NETIF_F_GSO_SOFTWARE; 2543 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2524 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX; 2544 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
2525 netif_keep_dst(dev); 2545 netif_keep_dst(dev);
2546 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
2526 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE; 2547 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
2527 2548
2528 INIT_LIST_HEAD(&vxlan->next); 2549 INIT_LIST_HEAD(&vxlan->next);
@@ -2765,6 +2786,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2765 int err; 2786 int err;
2766 bool use_ipv6 = false; 2787 bool use_ipv6 = false;
2767 __be16 default_port = vxlan->cfg.dst_port; 2788 __be16 default_port = vxlan->cfg.dst_port;
2789 struct net_device *lowerdev = NULL;
2768 2790
2769 vxlan->net = src_net; 2791 vxlan->net = src_net;
2770 2792
@@ -2785,9 +2807,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2785 } 2807 }
2786 2808
2787 if (conf->remote_ifindex) { 2809 if (conf->remote_ifindex) {
2788 struct net_device *lowerdev 2810 lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
2789 = __dev_get_by_index(src_net, conf->remote_ifindex);
2790
2791 dst->remote_ifindex = conf->remote_ifindex; 2811 dst->remote_ifindex = conf->remote_ifindex;
2792 2812
2793 if (!lowerdev) { 2813 if (!lowerdev) {
@@ -2811,6 +2831,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
2811 needed_headroom = lowerdev->hard_header_len; 2831 needed_headroom = lowerdev->hard_header_len;
2812 } 2832 }
2813 2833
2834 if (conf->mtu) {
2835 err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
2836 if (err)
2837 return err;
2838 }
2839
2814 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) 2840 if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
2815 needed_headroom += VXLAN6_HEADROOM; 2841 needed_headroom += VXLAN6_HEADROOM;
2816 else 2842 else
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 7a72407208b1..629225980463 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1626,7 +1626,7 @@ try:
1626 if (state & Xpr) { 1626 if (state & Xpr) {
1627 void __iomem *scc_addr; 1627 void __iomem *scc_addr;
1628 unsigned long ring; 1628 unsigned long ring;
1629 int i; 1629 unsigned int i;
1630 1630
1631 /* 1631 /*
1632 * - the busy condition happens (sometimes); 1632 * - the busy condition happens (sometimes);
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 866067789330..7438fbeef744 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -53,7 +53,6 @@ config IWLWIFI_LEDS
53 53
54config IWLDVM 54config IWLDVM
55 tristate "Intel Wireless WiFi DVM Firmware support" 55 tristate "Intel Wireless WiFi DVM Firmware support"
56 depends on m
57 help 56 help
58 This is the driver that supports the DVM firmware. The list 57 This is the driver that supports the DVM firmware. The list
59 of the devices that use this firmware is available here: 58 of the devices that use this firmware is available here:
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index c84a0299d43e..bce9b3420a13 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -7,6 +7,7 @@
7 * 7 *
8 * Copyright(c) 2014 Intel Corporation. All rights reserved. 8 * Copyright(c) 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH 9 * Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as 13 * it under the terms of version 2 of the GNU General Public License as
@@ -70,12 +71,15 @@
70 71
71/* Highest firmware API version supported */ 72/* Highest firmware API version supported */
72#define IWL8000_UCODE_API_MAX 20 73#define IWL8000_UCODE_API_MAX 20
74#define IWL8265_UCODE_API_MAX 20
73 75
74/* Oldest version we won't warn about */ 76/* Oldest version we won't warn about */
75#define IWL8000_UCODE_API_OK 13 77#define IWL8000_UCODE_API_OK 13
78#define IWL8265_UCODE_API_OK 20
76 79
77/* Lowest firmware API version supported */ 80/* Lowest firmware API version supported */
78#define IWL8000_UCODE_API_MIN 13 81#define IWL8000_UCODE_API_MIN 13
82#define IWL8265_UCODE_API_MIN 20
79 83
80/* NVM versions */ 84/* NVM versions */
81#define IWL8000_NVM_VERSION 0x0a1d 85#define IWL8000_NVM_VERSION 0x0a1d
@@ -93,6 +97,10 @@
93#define IWL8000_MODULE_FIRMWARE(api) \ 97#define IWL8000_MODULE_FIRMWARE(api) \
94 IWL8000_FW_PRE "-" __stringify(api) ".ucode" 98 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
95 99
100#define IWL8265_FW_PRE "iwlwifi-8265-"
101#define IWL8265_MODULE_FIRMWARE(api) \
102 IWL8265_FW_PRE __stringify(api) ".ucode"
103
96#define NVM_HW_SECTION_NUM_FAMILY_8000 10 104#define NVM_HW_SECTION_NUM_FAMILY_8000 10
97#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B" 105#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
98#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" 106#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
@@ -144,10 +152,7 @@ static const struct iwl_tt_params iwl8000_tt_params = {
144 .support_tx_backoff = true, 152 .support_tx_backoff = true,
145}; 153};
146 154
147#define IWL_DEVICE_8000 \ 155#define IWL_DEVICE_8000_COMMON \
148 .ucode_api_max = IWL8000_UCODE_API_MAX, \
149 .ucode_api_ok = IWL8000_UCODE_API_OK, \
150 .ucode_api_min = IWL8000_UCODE_API_MIN, \
151 .device_family = IWL_DEVICE_FAMILY_8000, \ 156 .device_family = IWL_DEVICE_FAMILY_8000, \
152 .max_inst_size = IWL60_RTC_INST_SIZE, \ 157 .max_inst_size = IWL60_RTC_INST_SIZE, \
153 .max_data_size = IWL60_RTC_DATA_SIZE, \ 158 .max_data_size = IWL60_RTC_DATA_SIZE, \
@@ -167,10 +172,28 @@ static const struct iwl_tt_params iwl8000_tt_params = {
167 .thermal_params = &iwl8000_tt_params, \ 172 .thermal_params = &iwl8000_tt_params, \
168 .apmg_not_supported = true 173 .apmg_not_supported = true
169 174
175#define IWL_DEVICE_8000 \
176 IWL_DEVICE_8000_COMMON, \
177 .ucode_api_max = IWL8000_UCODE_API_MAX, \
178 .ucode_api_ok = IWL8000_UCODE_API_OK, \
179 .ucode_api_min = IWL8000_UCODE_API_MIN \
180
181#define IWL_DEVICE_8260 \
182 IWL_DEVICE_8000_COMMON, \
183 .ucode_api_max = IWL8000_UCODE_API_MAX, \
184 .ucode_api_ok = IWL8000_UCODE_API_OK, \
185 .ucode_api_min = IWL8000_UCODE_API_MIN \
186
187#define IWL_DEVICE_8265 \
188 IWL_DEVICE_8000_COMMON, \
189 .ucode_api_max = IWL8265_UCODE_API_MAX, \
190 .ucode_api_ok = IWL8265_UCODE_API_OK, \
191 .ucode_api_min = IWL8265_UCODE_API_MIN \
192
170const struct iwl_cfg iwl8260_2n_cfg = { 193const struct iwl_cfg iwl8260_2n_cfg = {
171 .name = "Intel(R) Dual Band Wireless N 8260", 194 .name = "Intel(R) Dual Band Wireless N 8260",
172 .fw_name_pre = IWL8000_FW_PRE, 195 .fw_name_pre = IWL8000_FW_PRE,
173 IWL_DEVICE_8000, 196 IWL_DEVICE_8260,
174 .ht_params = &iwl8000_ht_params, 197 .ht_params = &iwl8000_ht_params,
175 .nvm_ver = IWL8000_NVM_VERSION, 198 .nvm_ver = IWL8000_NVM_VERSION,
176 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 199 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -179,7 +202,7 @@ const struct iwl_cfg iwl8260_2n_cfg = {
179const struct iwl_cfg iwl8260_2ac_cfg = { 202const struct iwl_cfg iwl8260_2ac_cfg = {
180 .name = "Intel(R) Dual Band Wireless AC 8260", 203 .name = "Intel(R) Dual Band Wireless AC 8260",
181 .fw_name_pre = IWL8000_FW_PRE, 204 .fw_name_pre = IWL8000_FW_PRE,
182 IWL_DEVICE_8000, 205 IWL_DEVICE_8260,
183 .ht_params = &iwl8000_ht_params, 206 .ht_params = &iwl8000_ht_params,
184 .nvm_ver = IWL8000_NVM_VERSION, 207 .nvm_ver = IWL8000_NVM_VERSION,
185 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 208 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -188,8 +211,8 @@ const struct iwl_cfg iwl8260_2ac_cfg = {
188 211
189const struct iwl_cfg iwl8265_2ac_cfg = { 212const struct iwl_cfg iwl8265_2ac_cfg = {
190 .name = "Intel(R) Dual Band Wireless AC 8265", 213 .name = "Intel(R) Dual Band Wireless AC 8265",
191 .fw_name_pre = IWL8000_FW_PRE, 214 .fw_name_pre = IWL8265_FW_PRE,
192 IWL_DEVICE_8000, 215 IWL_DEVICE_8265,
193 .ht_params = &iwl8000_ht_params, 216 .ht_params = &iwl8000_ht_params,
194 .nvm_ver = IWL8000_NVM_VERSION, 217 .nvm_ver = IWL8000_NVM_VERSION,
195 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 218 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -209,7 +232,7 @@ const struct iwl_cfg iwl4165_2ac_cfg = {
209const struct iwl_cfg iwl8260_2ac_sdio_cfg = { 232const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
210 .name = "Intel(R) Dual Band Wireless-AC 8260", 233 .name = "Intel(R) Dual Band Wireless-AC 8260",
211 .fw_name_pre = IWL8000_FW_PRE, 234 .fw_name_pre = IWL8000_FW_PRE,
212 IWL_DEVICE_8000, 235 IWL_DEVICE_8260,
213 .ht_params = &iwl8000_ht_params, 236 .ht_params = &iwl8000_ht_params,
214 .nvm_ver = IWL8000_NVM_VERSION, 237 .nvm_ver = IWL8000_NVM_VERSION,
215 .nvm_calib_ver = IWL8000_TX_POWER_VERSION, 238 .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -236,3 +259,4 @@ const struct iwl_cfg iwl4165_2ac_sdio_cfg = {
236}; 259};
237 260
238MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK)); 261MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
262MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 7acb49075683..ab4c2a0470b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -243,8 +243,10 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 243 if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev); 244 char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
245 245
246 snprintf(drv->firmware_name, sizeof(drv->firmware_name), 246 if (rev_step != 'A')
247 "%s%c-%s.ucode", name_pre, rev_step, tag); 247 snprintf(drv->firmware_name,
248 sizeof(drv->firmware_name), "%s%c-%s.ucode",
249 name_pre, rev_step, tag);
248 } 250 }
249 251
250 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n", 252 IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 4ed5180c547b..0ccc697fef76 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -107,7 +107,7 @@ static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
107 sizeof(tx_ant_cmd), &tx_ant_cmd); 107 sizeof(tx_ant_cmd), &tx_ant_cmd);
108} 108}
109 109
110static void iwl_free_fw_paging(struct iwl_mvm *mvm) 110void iwl_free_fw_paging(struct iwl_mvm *mvm)
111{ 111{
112 int i; 112 int i;
113 113
@@ -127,6 +127,8 @@ static void iwl_free_fw_paging(struct iwl_mvm *mvm)
127 get_order(mvm->fw_paging_db[i].fw_paging_size)); 127 get_order(mvm->fw_paging_db[i].fw_paging_size));
128 } 128 }
129 kfree(mvm->trans->paging_download_buf); 129 kfree(mvm->trans->paging_download_buf);
130 mvm->trans->paging_download_buf = NULL;
131
130 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db)); 132 memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
131} 133}
132 134
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 5f3ac8cccf49..ff7c6df9f941 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -1225,6 +1225,9 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
1225void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, 1225void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
1226 struct iwl_rx_cmd_buffer *rxb); 1226 struct iwl_rx_cmd_buffer *rxb);
1227 1227
1228/* Paging */
1229void iwl_free_fw_paging(struct iwl_mvm *mvm);
1230
1228/* MVM debugfs */ 1231/* MVM debugfs */
1229#ifdef CONFIG_IWLWIFI_DEBUGFS 1232#ifdef CONFIG_IWLWIFI_DEBUGFS
1230int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir); 1233int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 89ea70deeb84..e80be9a59520 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -684,6 +684,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
684 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++) 684 for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
685 kfree(mvm->nvm_sections[i].data); 685 kfree(mvm->nvm_sections[i].data);
686 686
687 iwl_free_fw_paging(mvm);
688
687 iwl_mvm_tof_clean(mvm); 689 iwl_mvm_tof_clean(mvm);
688 690
689 ieee80211_free_hw(mvm->hw); 691 ieee80211_free_hw(mvm->hw);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 9a15642f80dd..ea1e177c2ea1 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1298,6 +1298,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
1298 return -EBUSY; 1298 return -EBUSY;
1299 } 1299 }
1300 1300
1301 /* we don't support "match all" in the firmware */
1302 if (!req->n_match_sets)
1303 return -EOPNOTSUPP;
1304
1301 ret = iwl_mvm_check_running_scans(mvm, type); 1305 ret = iwl_mvm_check_running_scans(mvm, type);
1302 if (ret) 1306 if (ret)
1303 return ret; 1307 return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 0914ec2fd574..a040edc55057 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -423,6 +423,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
423 return -1; 423 return -1;
424 } 424 }
425 425
426 /*
427 * Increase the pending frames counter, so that later when a reply comes
428 * in and the counter is decreased - we don't start getting negative
429 * values.
430 * Note that we don't need to make sure it isn't agg'd, since we're
431 * TXing non-sta
432 */
433 atomic_inc(&mvm->pending_frames[sta_id]);
434
426 return 0; 435 return 0;
427} 436}
428 437
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index cc3888e2700d..73c95594eabe 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -490,6 +490,15 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 490 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
491} 491}
492 492
493static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
494{
495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
496
497 IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
498 trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
499 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
500}
501
493static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 502static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
494{ 503{
495 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 504 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index ccafbd8cf4b3..152cf9ad9566 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1438,9 +1438,11 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1438 inta & ~trans_pcie->inta_mask); 1438 inta & ~trans_pcie->inta_mask);
1439 } 1439 }
1440 1440
1441 /* Re-enable all interrupts */ 1441 /* we are loading the firmware, enable FH_TX interrupt only */
1442 /* only Re-enable if disabled by irq */ 1442 if (handled & CSR_INT_BIT_FH_TX)
1443 if (test_bit(STATUS_INT_ENABLED, &trans->status)) 1443 iwl_enable_fw_load_int(trans);
1444 /* only Re-enable all interrupt if disabled by irq */
1445 else if (test_bit(STATUS_INT_ENABLED, &trans->status))
1444 iwl_enable_interrupts(trans); 1446 iwl_enable_interrupts(trans);
1445 /* Re-enable RF_KILL if it occurred */ 1447 /* Re-enable RF_KILL if it occurred */
1446 else if (handled & CSR_INT_BIT_RF_KILL) 1448 else if (handled & CSR_INT_BIT_RF_KILL)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index d60a467a983c..5a854c609477 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1021,82 +1021,6 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1021 &first_ucode_section); 1021 &first_ucode_section);
1022} 1022}
1023 1023
1024static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1025 const struct fw_img *fw, bool run_in_rfkill)
1026{
1027 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1028 bool hw_rfkill;
1029 int ret;
1030
1031 mutex_lock(&trans_pcie->mutex);
1032
1033 /* Someone called stop_device, don't try to start_fw */
1034 if (trans_pcie->is_down) {
1035 IWL_WARN(trans,
1036 "Can't start_fw since the HW hasn't been started\n");
1037 ret = EIO;
1038 goto out;
1039 }
1040
1041 /* This may fail if AMT took ownership of the device */
1042 if (iwl_pcie_prepare_card_hw(trans)) {
1043 IWL_WARN(trans, "Exit HW not ready\n");
1044 ret = -EIO;
1045 goto out;
1046 }
1047
1048 iwl_enable_rfkill_int(trans);
1049
1050 /* If platform's RF_KILL switch is NOT set to KILL */
1051 hw_rfkill = iwl_is_rfkill_set(trans);
1052 if (hw_rfkill)
1053 set_bit(STATUS_RFKILL, &trans->status);
1054 else
1055 clear_bit(STATUS_RFKILL, &trans->status);
1056 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1057 if (hw_rfkill && !run_in_rfkill) {
1058 ret = -ERFKILL;
1059 goto out;
1060 }
1061
1062 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1063
1064 ret = iwl_pcie_nic_init(trans);
1065 if (ret) {
1066 IWL_ERR(trans, "Unable to init nic\n");
1067 goto out;
1068 }
1069
1070 /* make sure rfkill handshake bits are cleared */
1071 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1072 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1073 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1074
1075 /* clear (again), then enable host interrupts */
1076 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1077 iwl_enable_interrupts(trans);
1078
1079 /* really make sure rfkill handshake bits are cleared */
1080 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1081 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1082
1083 /* Load the given image to the HW */
1084 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1085 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1086 else
1087 ret = iwl_pcie_load_given_ucode(trans, fw);
1088
1089out:
1090 mutex_unlock(&trans_pcie->mutex);
1091 return ret;
1092}
1093
1094static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1095{
1096 iwl_pcie_reset_ict(trans);
1097 iwl_pcie_tx_start(trans, scd_addr);
1098}
1099
1100static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1024static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1101{ 1025{
1102 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1026 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1127,7 +1051,8 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1127 * already dead. 1051 * already dead.
1128 */ 1052 */
1129 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 1053 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1130 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n"); 1054 IWL_DEBUG_INFO(trans,
1055 "DEVICE_ENABLED bit was set and is now cleared\n");
1131 iwl_pcie_tx_stop(trans); 1056 iwl_pcie_tx_stop(trans);
1132 iwl_pcie_rx_stop(trans); 1057 iwl_pcie_rx_stop(trans);
1133 1058
@@ -1161,7 +1086,6 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1161 iwl_disable_interrupts(trans); 1086 iwl_disable_interrupts(trans);
1162 spin_unlock(&trans_pcie->irq_lock); 1087 spin_unlock(&trans_pcie->irq_lock);
1163 1088
1164
1165 /* clear all status bits */ 1089 /* clear all status bits */
1166 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1090 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1167 clear_bit(STATUS_INT_ENABLED, &trans->status); 1091 clear_bit(STATUS_INT_ENABLED, &trans->status);
@@ -1194,10 +1118,116 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1194 if (hw_rfkill != was_hw_rfkill) 1118 if (hw_rfkill != was_hw_rfkill)
1195 iwl_trans_pcie_rf_kill(trans, hw_rfkill); 1119 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1196 1120
1197 /* re-take ownership to prevent other users from stealing the deivce */ 1121 /* re-take ownership to prevent other users from stealing the device */
1198 iwl_pcie_prepare_card_hw(trans); 1122 iwl_pcie_prepare_card_hw(trans);
1199} 1123}
1200 1124
1125static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1126 const struct fw_img *fw, bool run_in_rfkill)
1127{
1128 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1129 bool hw_rfkill;
1130 int ret;
1131
1132 /* This may fail if AMT took ownership of the device */
1133 if (iwl_pcie_prepare_card_hw(trans)) {
1134 IWL_WARN(trans, "Exit HW not ready\n");
1135 ret = -EIO;
1136 goto out;
1137 }
1138
1139 iwl_enable_rfkill_int(trans);
1140
1141 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1142
1143 /*
1144 * We enabled the RF-Kill interrupt and the handler may very
1145 * well be running. Disable the interrupts to make sure no other
1146 * interrupt can be fired.
1147 */
1148 iwl_disable_interrupts(trans);
1149
1150 /* Make sure it finished running */
1151 synchronize_irq(trans_pcie->pci_dev->irq);
1152
1153 mutex_lock(&trans_pcie->mutex);
1154
1155 /* If platform's RF_KILL switch is NOT set to KILL */
1156 hw_rfkill = iwl_is_rfkill_set(trans);
1157 if (hw_rfkill)
1158 set_bit(STATUS_RFKILL, &trans->status);
1159 else
1160 clear_bit(STATUS_RFKILL, &trans->status);
1161 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1162 if (hw_rfkill && !run_in_rfkill) {
1163 ret = -ERFKILL;
1164 goto out;
1165 }
1166
1167 /* Someone called stop_device, don't try to start_fw */
1168 if (trans_pcie->is_down) {
1169 IWL_WARN(trans,
1170 "Can't start_fw since the HW hasn't been started\n");
1171 ret = -EIO;
1172 goto out;
1173 }
1174
1175 /* make sure rfkill handshake bits are cleared */
1176 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1177 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1178 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1179
1180 /* clear (again), then enable host interrupts */
1181 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1182
1183 ret = iwl_pcie_nic_init(trans);
1184 if (ret) {
1185 IWL_ERR(trans, "Unable to init nic\n");
1186 goto out;
1187 }
1188
1189 /*
1190 * Now, we load the firmware and don't want to be interrupted, even
1191 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1192 * FH_TX interrupt which is needed to load the firmware). If the
1193 * RF-Kill switch is toggled, we will find out after having loaded
1194 * the firmware and return the proper value to the caller.
1195 */
1196 iwl_enable_fw_load_int(trans);
1197
1198 /* really make sure rfkill handshake bits are cleared */
1199 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1200 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1201
1202 /* Load the given image to the HW */
1203 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1204 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1205 else
1206 ret = iwl_pcie_load_given_ucode(trans, fw);
1207 iwl_enable_interrupts(trans);
1208
1209 /* re-check RF-Kill state since we may have missed the interrupt */
1210 hw_rfkill = iwl_is_rfkill_set(trans);
1211 if (hw_rfkill)
1212 set_bit(STATUS_RFKILL, &trans->status);
1213 else
1214 clear_bit(STATUS_RFKILL, &trans->status);
1215
1216 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1217 if (hw_rfkill && !run_in_rfkill)
1218 ret = -ERFKILL;
1219
1220out:
1221 mutex_unlock(&trans_pcie->mutex);
1222 return ret;
1223}
1224
1225static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1226{
1227 iwl_pcie_reset_ict(trans);
1228 iwl_pcie_tx_start(trans, scd_addr);
1229}
1230
1201static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power) 1231static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1202{ 1232{
1203 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1233 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 74c14ce28238..28f7010e7108 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -138,6 +138,11 @@ static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
138 ((wireless_mode == WIRELESS_MODE_N_5G) || 138 ((wireless_mode == WIRELESS_MODE_N_5G) ||
139 (wireless_mode == WIRELESS_MODE_N_24G))) 139 (wireless_mode == WIRELESS_MODE_N_24G)))
140 rate->flags |= IEEE80211_TX_RC_MCS; 140 rate->flags |= IEEE80211_TX_RC_MCS;
141 if (sta && sta->vht_cap.vht_supported &&
142 (wireless_mode == WIRELESS_MODE_AC_5G ||
143 wireless_mode == WIRELESS_MODE_AC_24G ||
144 wireless_mode == WIRELESS_MODE_AC_ONLY))
145 rate->flags |= IEEE80211_TX_RC_VHT_MCS;
141 } 146 }
142} 147}
143 148
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 9ac118e727e9..564ca750c5ee 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -175,14 +175,14 @@ int wlcore_set_partition(struct wl1271 *wl,
175 if (ret < 0) 175 if (ret < 0)
176 goto out; 176 goto out;
177 177
178 /* We don't need the size of the last partition, as it is
179 * automatically calculated based on the total memory size and
180 * the sizes of the previous partitions.
181 */
178 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start); 182 ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
179 if (ret < 0) 183 if (ret < 0)
180 goto out; 184 goto out;
181 185
182 ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size);
183 if (ret < 0)
184 goto out;
185
186out: 186out:
187 return ret; 187 return ret;
188} 188}
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 6c257b54f415..10cf3747694d 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -36,8 +36,8 @@
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12) 36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16) 37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20) 38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 24) 39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
40#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 28) 40
41#define HW_ACCESS_REGISTER_SIZE 4 41#define HW_ACCESS_REGISTER_SIZE 4
42 42
43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000 43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7e2c43f701bc..5d28e9405f32 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -382,18 +382,18 @@ static const struct nd_cmd_desc __nd_cmd_bus_descs[] = {
382 [ND_CMD_ARS_CAP] = { 382 [ND_CMD_ARS_CAP] = {
383 .in_num = 2, 383 .in_num = 2,
384 .in_sizes = { 8, 8, }, 384 .in_sizes = { 8, 8, },
385 .out_num = 2, 385 .out_num = 4,
386 .out_sizes = { 4, 4, }, 386 .out_sizes = { 4, 4, 4, 4, },
387 }, 387 },
388 [ND_CMD_ARS_START] = { 388 [ND_CMD_ARS_START] = {
389 .in_num = 4, 389 .in_num = 5,
390 .in_sizes = { 8, 8, 2, 6, }, 390 .in_sizes = { 8, 8, 2, 1, 5, },
391 .out_num = 1, 391 .out_num = 2,
392 .out_sizes = { 4, }, 392 .out_sizes = { 4, 4, },
393 }, 393 },
394 [ND_CMD_ARS_STATUS] = { 394 [ND_CMD_ARS_STATUS] = {
395 .out_num = 2, 395 .out_num = 3,
396 .out_sizes = { 4, UINT_MAX, }, 396 .out_sizes = { 4, 4, UINT_MAX, },
397 }, 397 },
398}; 398};
399 399
@@ -442,8 +442,8 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
442 return in_field[1]; 442 return in_field[1];
443 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2) 443 else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
444 return out_field[1]; 444 return out_field[1];
445 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 1) 445 else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
446 return ND_CMD_ARS_STATUS_MAX; 446 return out_field[1] - 8;
447 447
448 return UINT_MAX; 448 return UINT_MAX;
449} 449}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7edf31671dab..8d0b54670184 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -41,7 +41,7 @@ struct pmem_device {
41 phys_addr_t phys_addr; 41 phys_addr_t phys_addr;
42 /* when non-zero this device is hosting a 'pfn' instance */ 42 /* when non-zero this device is hosting a 'pfn' instance */
43 phys_addr_t data_offset; 43 phys_addr_t data_offset;
44 unsigned long pfn_flags; 44 u64 pfn_flags;
45 void __pmem *virt_addr; 45 void __pmem *virt_addr;
46 size_t size; 46 size_t size;
47 struct badblocks bb; 47 struct badblocks bb;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 5d6237391dcd..b586d84f2518 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -17,5 +17,6 @@ config BLK_DEV_NVME_SCSI
17 and block devices nodes, as well a a translation for a small 17 and block devices nodes, as well a a translation for a small
18 number of selected SCSI commands to NVMe commands to the NVMe 18 number of selected SCSI commands to NVMe commands to the NVMe
19 driver. If you don't know what this means you probably want 19 driver. If you don't know what this means you probably want
20 to say N here, and if you know what it means you probably 20 to say N here, unless you run a distro that abuses the SCSI
21 want to say N as well. 21 emulation to provide stable device names for mount by id, like
22 some OpenSuSE and SLES versions.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c5bf001af559..03c46412fff4 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -55,8 +55,9 @@ static void nvme_free_ns(struct kref *kref)
55 ns->disk->private_data = NULL; 55 ns->disk->private_data = NULL;
56 spin_unlock(&dev_list_lock); 56 spin_unlock(&dev_list_lock);
57 57
58 nvme_put_ctrl(ns->ctrl);
59 put_disk(ns->disk); 58 put_disk(ns->disk);
59 ida_simple_remove(&ns->ctrl->ns_ida, ns->instance);
60 nvme_put_ctrl(ns->ctrl);
60 kfree(ns); 61 kfree(ns);
61} 62}
62 63
@@ -183,7 +184,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
183 goto out_unmap; 184 goto out_unmap;
184 } 185 }
185 186
186 if (meta_buffer) { 187 if (meta_buffer && meta_len) {
187 struct bio_integrity_payload *bip; 188 struct bio_integrity_payload *bip;
188 189
189 meta = kmalloc(meta_len, GFP_KERNEL); 190 meta = kmalloc(meta_len, GFP_KERNEL);
@@ -373,6 +374,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
373 374
374 if (copy_from_user(&io, uio, sizeof(io))) 375 if (copy_from_user(&io, uio, sizeof(io)))
375 return -EFAULT; 376 return -EFAULT;
377 if (io.flags)
378 return -EINVAL;
376 379
377 switch (io.opcode) { 380 switch (io.opcode) {
378 case nvme_cmd_write: 381 case nvme_cmd_write:
@@ -424,6 +427,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
424 return -EACCES; 427 return -EACCES;
425 if (copy_from_user(&cmd, ucmd, sizeof(cmd))) 428 if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
426 return -EFAULT; 429 return -EFAULT;
430 if (cmd.flags)
431 return -EINVAL;
427 432
428 memset(&c, 0, sizeof(c)); 433 memset(&c, 0, sizeof(c));
429 c.common.opcode = cmd.opcode; 434 c.common.opcode = cmd.opcode;
@@ -556,6 +561,10 @@ static int nvme_revalidate_disk(struct gendisk *disk)
556 u16 old_ms; 561 u16 old_ms;
557 unsigned short bs; 562 unsigned short bs;
558 563
564 if (test_bit(NVME_NS_DEAD, &ns->flags)) {
565 set_capacity(disk, 0);
566 return -ENODEV;
567 }
559 if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) { 568 if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) {
560 dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n", 569 dev_warn(ns->ctrl->dev, "%s: Identify failure nvme%dn%d\n",
561 __func__, ns->ctrl->instance, ns->ns_id); 570 __func__, ns->ctrl->instance, ns->ns_id);
@@ -831,6 +840,23 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl)
831 return ret; 840 return ret;
832} 841}
833 842
843static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
844 struct request_queue *q)
845{
846 if (ctrl->max_hw_sectors) {
847 u32 max_segments =
848 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
849
850 blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
851 blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
852 }
853 if (ctrl->stripe_size)
854 blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
855 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
856 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
857 blk_queue_virt_boundary(q, ctrl->page_size - 1);
858}
859
834/* 860/*
835 * Initialize the cached copies of the Identify data and various controller 861 * Initialize the cached copies of the Identify data and various controller
836 * register in our nvme_ctrl structure. This should be called as soon as 862 * register in our nvme_ctrl structure. This should be called as soon as
@@ -888,6 +914,8 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
888 } 914 }
889 } 915 }
890 916
917 nvme_set_queue_limits(ctrl, ctrl->admin_q);
918
891 kfree(id); 919 kfree(id);
892 return 0; 920 return 0;
893} 921}
@@ -1118,10 +1146,13 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1118 if (!ns) 1146 if (!ns)
1119 return; 1147 return;
1120 1148
1149 ns->instance = ida_simple_get(&ctrl->ns_ida, 1, 0, GFP_KERNEL);
1150 if (ns->instance < 0)
1151 goto out_free_ns;
1152
1121 ns->queue = blk_mq_init_queue(ctrl->tagset); 1153 ns->queue = blk_mq_init_queue(ctrl->tagset);
1122 if (IS_ERR(ns->queue)) 1154 if (IS_ERR(ns->queue))
1123 goto out_free_ns; 1155 goto out_release_instance;
1124 queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
1125 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); 1156 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
1126 ns->queue->queuedata = ns; 1157 ns->queue->queuedata = ns;
1127 ns->ctrl = ctrl; 1158 ns->ctrl = ctrl;
@@ -1135,17 +1166,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1135 ns->disk = disk; 1166 ns->disk = disk;
1136 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ 1167 ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
1137 1168
1169
1138 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); 1170 blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
1139 if (ctrl->max_hw_sectors) { 1171 nvme_set_queue_limits(ctrl, ns->queue);
1140 blk_queue_max_hw_sectors(ns->queue, ctrl->max_hw_sectors);
1141 blk_queue_max_segments(ns->queue,
1142 (ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1);
1143 }
1144 if (ctrl->stripe_size)
1145 blk_queue_chunk_sectors(ns->queue, ctrl->stripe_size >> 9);
1146 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
1147 blk_queue_flush(ns->queue, REQ_FLUSH | REQ_FUA);
1148 blk_queue_virt_boundary(ns->queue, ctrl->page_size - 1);
1149 1172
1150 disk->major = nvme_major; 1173 disk->major = nvme_major;
1151 disk->first_minor = 0; 1174 disk->first_minor = 0;
@@ -1154,7 +1177,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1154 disk->queue = ns->queue; 1177 disk->queue = ns->queue;
1155 disk->driverfs_dev = ctrl->device; 1178 disk->driverfs_dev = ctrl->device;
1156 disk->flags = GENHD_FL_EXT_DEVT; 1179 disk->flags = GENHD_FL_EXT_DEVT;
1157 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, nsid); 1180 sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance);
1158 1181
1159 if (nvme_revalidate_disk(ns->disk)) 1182 if (nvme_revalidate_disk(ns->disk))
1160 goto out_free_disk; 1183 goto out_free_disk;
@@ -1174,40 +1197,29 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
1174 kfree(disk); 1197 kfree(disk);
1175 out_free_queue: 1198 out_free_queue:
1176 blk_cleanup_queue(ns->queue); 1199 blk_cleanup_queue(ns->queue);
1200 out_release_instance:
1201 ida_simple_remove(&ctrl->ns_ida, ns->instance);
1177 out_free_ns: 1202 out_free_ns:
1178 kfree(ns); 1203 kfree(ns);
1179} 1204}
1180 1205
1181static void nvme_ns_remove(struct nvme_ns *ns) 1206static void nvme_ns_remove(struct nvme_ns *ns)
1182{ 1207{
1183 bool kill = nvme_io_incapable(ns->ctrl) && 1208 if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
1184 !blk_queue_dying(ns->queue); 1209 return;
1185
1186 lockdep_assert_held(&ns->ctrl->namespaces_mutex);
1187
1188 if (kill) {
1189 blk_set_queue_dying(ns->queue);
1190 1210
1191 /*
1192 * The controller was shutdown first if we got here through
1193 * device removal. The shutdown may requeue outstanding
1194 * requests. These need to be aborted immediately so
1195 * del_gendisk doesn't block indefinitely for their completion.
1196 */
1197 blk_mq_abort_requeue_list(ns->queue);
1198 }
1199 if (ns->disk->flags & GENHD_FL_UP) { 1211 if (ns->disk->flags & GENHD_FL_UP) {
1200 if (blk_get_integrity(ns->disk)) 1212 if (blk_get_integrity(ns->disk))
1201 blk_integrity_unregister(ns->disk); 1213 blk_integrity_unregister(ns->disk);
1202 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, 1214 sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
1203 &nvme_ns_attr_group); 1215 &nvme_ns_attr_group);
1204 del_gendisk(ns->disk); 1216 del_gendisk(ns->disk);
1205 }
1206 if (kill || !blk_queue_dying(ns->queue)) {
1207 blk_mq_abort_requeue_list(ns->queue); 1217 blk_mq_abort_requeue_list(ns->queue);
1208 blk_cleanup_queue(ns->queue); 1218 blk_cleanup_queue(ns->queue);
1209 } 1219 }
1220 mutex_lock(&ns->ctrl->namespaces_mutex);
1210 list_del_init(&ns->list); 1221 list_del_init(&ns->list);
1222 mutex_unlock(&ns->ctrl->namespaces_mutex);
1211 nvme_put_ns(ns); 1223 nvme_put_ns(ns);
1212} 1224}
1213 1225
@@ -1301,10 +1313,8 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
1301{ 1313{
1302 struct nvme_ns *ns, *next; 1314 struct nvme_ns *ns, *next;
1303 1315
1304 mutex_lock(&ctrl->namespaces_mutex);
1305 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) 1316 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
1306 nvme_ns_remove(ns); 1317 nvme_ns_remove(ns);
1307 mutex_unlock(&ctrl->namespaces_mutex);
1308} 1318}
1309 1319
1310static DEFINE_IDA(nvme_instance_ida); 1320static DEFINE_IDA(nvme_instance_ida);
@@ -1351,6 +1361,7 @@ static void nvme_free_ctrl(struct kref *kref)
1351 1361
1352 put_device(ctrl->device); 1362 put_device(ctrl->device);
1353 nvme_release_instance(ctrl); 1363 nvme_release_instance(ctrl);
1364 ida_destroy(&ctrl->ns_ida);
1354 1365
1355 ctrl->ops->free_ctrl(ctrl); 1366 ctrl->ops->free_ctrl(ctrl);
1356} 1367}
@@ -1391,6 +1402,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
1391 } 1402 }
1392 get_device(ctrl->device); 1403 get_device(ctrl->device);
1393 dev_set_drvdata(ctrl->device, ctrl); 1404 dev_set_drvdata(ctrl->device, ctrl);
1405 ida_init(&ctrl->ns_ida);
1394 1406
1395 spin_lock(&dev_list_lock); 1407 spin_lock(&dev_list_lock);
1396 list_add_tail(&ctrl->node, &nvme_ctrl_list); 1408 list_add_tail(&ctrl->node, &nvme_ctrl_list);
@@ -1403,6 +1415,38 @@ out:
1403 return ret; 1415 return ret;
1404} 1416}
1405 1417
1418/**
1419 * nvme_kill_queues(): Ends all namespace queues
1420 * @ctrl: the dead controller that needs to end
1421 *
1422 * Call this function when the driver determines it is unable to get the
1423 * controller in a state capable of servicing IO.
1424 */
1425void nvme_kill_queues(struct nvme_ctrl *ctrl)
1426{
1427 struct nvme_ns *ns;
1428
1429 mutex_lock(&ctrl->namespaces_mutex);
1430 list_for_each_entry(ns, &ctrl->namespaces, list) {
1431 if (!kref_get_unless_zero(&ns->kref))
1432 continue;
1433
1434 /*
1435 * Revalidating a dead namespace sets capacity to 0. This will
1436 * end buffered writers dirtying pages that can't be synced.
1437 */
1438 if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags))
1439 revalidate_disk(ns->disk);
1440
1441 blk_set_queue_dying(ns->queue);
1442 blk_mq_abort_requeue_list(ns->queue);
1443 blk_mq_start_stopped_hw_queues(ns->queue, true);
1444
1445 nvme_put_ns(ns);
1446 }
1447 mutex_unlock(&ctrl->namespaces_mutex);
1448}
1449
1406void nvme_stop_queues(struct nvme_ctrl *ctrl) 1450void nvme_stop_queues(struct nvme_ctrl *ctrl)
1407{ 1451{
1408 struct nvme_ns *ns; 1452 struct nvme_ns *ns;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 5cd3725e2fa4..6bb15e4926dc 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,9 +146,10 @@ struct nvme_nvm_command {
146 }; 146 };
147}; 147};
148 148
149#define NVME_NVM_LP_MLC_PAIRS 886
149struct nvme_nvm_lp_mlc { 150struct nvme_nvm_lp_mlc {
150 __u16 num_pairs; 151 __u16 num_pairs;
151 __u8 pairs[886]; 152 __u8 pairs[NVME_NVM_LP_MLC_PAIRS];
152}; 153};
153 154
154struct nvme_nvm_lp_tbl { 155struct nvme_nvm_lp_tbl {
@@ -282,9 +283,14 @@ static int init_grps(struct nvm_id *nvm_id, struct nvme_nvm_id *nvme_nvm_id)
282 memcpy(dst->lptbl.id, src->lptbl.id, 8); 283 memcpy(dst->lptbl.id, src->lptbl.id, 8);
283 dst->lptbl.mlc.num_pairs = 284 dst->lptbl.mlc.num_pairs =
284 le16_to_cpu(src->lptbl.mlc.num_pairs); 285 le16_to_cpu(src->lptbl.mlc.num_pairs);
285 /* 4 bits per pair */ 286
287 if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
288 pr_err("nvm: number of MLC pairs not supported\n");
289 return -EINVAL;
290 }
291
286 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs, 292 memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
287 dst->lptbl.mlc.num_pairs >> 1); 293 dst->lptbl.mlc.num_pairs);
288 } 294 }
289 } 295 }
290 296
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 4fb5bb737868..fb15ba5f5d19 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -72,6 +72,7 @@ struct nvme_ctrl {
72 struct mutex namespaces_mutex; 72 struct mutex namespaces_mutex;
73 struct device *device; /* char device */ 73 struct device *device; /* char device */
74 struct list_head node; 74 struct list_head node;
75 struct ida ns_ida;
75 76
76 char name[12]; 77 char name[12];
77 char serial[20]; 78 char serial[20];
@@ -102,6 +103,7 @@ struct nvme_ns {
102 struct request_queue *queue; 103 struct request_queue *queue;
103 struct gendisk *disk; 104 struct gendisk *disk;
104 struct kref kref; 105 struct kref kref;
106 int instance;
105 107
106 u8 eui[8]; 108 u8 eui[8];
107 u8 uuid[16]; 109 u8 uuid[16];
@@ -112,6 +114,11 @@ struct nvme_ns {
112 bool ext; 114 bool ext;
113 u8 pi_type; 115 u8 pi_type;
114 int type; 116 int type;
117 unsigned long flags;
118
119#define NVME_NS_REMOVING 0
120#define NVME_NS_DEAD 1
121
115 u64 mode_select_num_blocks; 122 u64 mode_select_num_blocks;
116 u32 mode_select_block_len; 123 u32 mode_select_block_len;
117}; 124};
@@ -139,9 +146,9 @@ static inline bool nvme_io_incapable(struct nvme_ctrl *ctrl)
139 u32 val = 0; 146 u32 val = 0;
140 147
141 if (ctrl->ops->io_incapable(ctrl)) 148 if (ctrl->ops->io_incapable(ctrl))
142 return false; 149 return true;
143 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val)) 150 if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
144 return false; 151 return true;
145 return val & NVME_CSTS_CFS; 152 return val & NVME_CSTS_CFS;
146} 153}
147 154
@@ -240,6 +247,7 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
240 247
241void nvme_stop_queues(struct nvme_ctrl *ctrl); 248void nvme_stop_queues(struct nvme_ctrl *ctrl);
242void nvme_start_queues(struct nvme_ctrl *ctrl); 249void nvme_start_queues(struct nvme_ctrl *ctrl);
250void nvme_kill_queues(struct nvme_ctrl *ctrl);
243 251
244struct request *nvme_alloc_request(struct request_queue *q, 252struct request *nvme_alloc_request(struct request_queue *q,
245 struct nvme_command *cmd, unsigned int flags); 253 struct nvme_command *cmd, unsigned int flags);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 72ef8322d32a..680f5780750c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -86,7 +86,6 @@ struct nvme_queue;
86 86
87static int nvme_reset(struct nvme_dev *dev); 87static int nvme_reset(struct nvme_dev *dev);
88static void nvme_process_cq(struct nvme_queue *nvmeq); 88static void nvme_process_cq(struct nvme_queue *nvmeq);
89static void nvme_remove_dead_ctrl(struct nvme_dev *dev);
90static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown); 89static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown);
91 90
92/* 91/*
@@ -120,6 +119,7 @@ struct nvme_dev {
120 unsigned long flags; 119 unsigned long flags;
121 120
122#define NVME_CTRL_RESETTING 0 121#define NVME_CTRL_RESETTING 0
122#define NVME_CTRL_REMOVING 1
123 123
124 struct nvme_ctrl ctrl; 124 struct nvme_ctrl ctrl;
125 struct completion ioq_wait; 125 struct completion ioq_wait;
@@ -286,6 +286,17 @@ static int nvme_init_request(void *data, struct request *req,
286 return 0; 286 return 0;
287} 287}
288 288
289static void nvme_queue_scan(struct nvme_dev *dev)
290{
291 /*
292 * Do not queue new scan work when a controller is reset during
293 * removal.
294 */
295 if (test_bit(NVME_CTRL_REMOVING, &dev->flags))
296 return;
297 queue_work(nvme_workq, &dev->scan_work);
298}
299
289static void nvme_complete_async_event(struct nvme_dev *dev, 300static void nvme_complete_async_event(struct nvme_dev *dev,
290 struct nvme_completion *cqe) 301 struct nvme_completion *cqe)
291{ 302{
@@ -300,7 +311,7 @@ static void nvme_complete_async_event(struct nvme_dev *dev,
300 switch (result & 0xff07) { 311 switch (result & 0xff07) {
301 case NVME_AER_NOTICE_NS_CHANGED: 312 case NVME_AER_NOTICE_NS_CHANGED:
302 dev_info(dev->dev, "rescanning\n"); 313 dev_info(dev->dev, "rescanning\n");
303 queue_work(nvme_workq, &dev->scan_work); 314 nvme_queue_scan(dev);
304 default: 315 default:
305 dev_warn(dev->dev, "async event result %08x\n", result); 316 dev_warn(dev->dev, "async event result %08x\n", result);
306 } 317 }
@@ -678,6 +689,14 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
678 blk_mq_start_request(req); 689 blk_mq_start_request(req);
679 690
680 spin_lock_irq(&nvmeq->q_lock); 691 spin_lock_irq(&nvmeq->q_lock);
692 if (unlikely(nvmeq->cq_vector < 0)) {
693 if (ns && !test_bit(NVME_NS_DEAD, &ns->flags))
694 ret = BLK_MQ_RQ_QUEUE_BUSY;
695 else
696 ret = BLK_MQ_RQ_QUEUE_ERROR;
697 spin_unlock_irq(&nvmeq->q_lock);
698 goto out;
699 }
681 __nvme_submit_cmd(nvmeq, &cmnd); 700 __nvme_submit_cmd(nvmeq, &cmnd);
682 nvme_process_cq(nvmeq); 701 nvme_process_cq(nvmeq);
683 spin_unlock_irq(&nvmeq->q_lock); 702 spin_unlock_irq(&nvmeq->q_lock);
@@ -999,7 +1018,7 @@ static void nvme_cancel_queue_ios(struct request *req, void *data, bool reserved
999 if (!blk_mq_request_started(req)) 1018 if (!blk_mq_request_started(req))
1000 return; 1019 return;
1001 1020
1002 dev_warn(nvmeq->q_dmadev, 1021 dev_dbg_ratelimited(nvmeq->q_dmadev,
1003 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); 1022 "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
1004 1023
1005 status = NVME_SC_ABORT_REQ; 1024 status = NVME_SC_ABORT_REQ;
@@ -1245,6 +1264,12 @@ static struct blk_mq_ops nvme_mq_ops = {
1245static void nvme_dev_remove_admin(struct nvme_dev *dev) 1264static void nvme_dev_remove_admin(struct nvme_dev *dev)
1246{ 1265{
1247 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) { 1266 if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q)) {
1267 /*
1268 * If the controller was reset during removal, it's possible
1269 * user requests may be waiting on a stopped queue. Start the
1270 * queue to flush these to completion.
1271 */
1272 blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true);
1248 blk_cleanup_queue(dev->ctrl.admin_q); 1273 blk_cleanup_queue(dev->ctrl.admin_q);
1249 blk_mq_free_tag_set(&dev->admin_tagset); 1274 blk_mq_free_tag_set(&dev->admin_tagset);
1250 } 1275 }
@@ -1685,14 +1710,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
1685 return 0; 1710 return 0;
1686 dev->ctrl.tagset = &dev->tagset; 1711 dev->ctrl.tagset = &dev->tagset;
1687 } 1712 }
1688 queue_work(nvme_workq, &dev->scan_work); 1713 nvme_queue_scan(dev);
1689 return 0; 1714 return 0;
1690} 1715}
1691 1716
1692static int nvme_dev_map(struct nvme_dev *dev) 1717static int nvme_pci_enable(struct nvme_dev *dev)
1693{ 1718{
1694 u64 cap; 1719 u64 cap;
1695 int bars, result = -ENOMEM; 1720 int result = -ENOMEM;
1696 struct pci_dev *pdev = to_pci_dev(dev->dev); 1721 struct pci_dev *pdev = to_pci_dev(dev->dev);
1697 1722
1698 if (pci_enable_device_mem(pdev)) 1723 if (pci_enable_device_mem(pdev))
@@ -1700,24 +1725,14 @@ static int nvme_dev_map(struct nvme_dev *dev)
1700 1725
1701 dev->entry[0].vector = pdev->irq; 1726 dev->entry[0].vector = pdev->irq;
1702 pci_set_master(pdev); 1727 pci_set_master(pdev);
1703 bars = pci_select_bars(pdev, IORESOURCE_MEM);
1704 if (!bars)
1705 goto disable_pci;
1706
1707 if (pci_request_selected_regions(pdev, bars, "nvme"))
1708 goto disable_pci;
1709 1728
1710 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) && 1729 if (dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)) &&
1711 dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32))) 1730 dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(32)))
1712 goto disable; 1731 goto disable;
1713 1732
1714 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
1715 if (!dev->bar)
1716 goto disable;
1717
1718 if (readl(dev->bar + NVME_REG_CSTS) == -1) { 1733 if (readl(dev->bar + NVME_REG_CSTS) == -1) {
1719 result = -ENODEV; 1734 result = -ENODEV;
1720 goto unmap; 1735 goto disable;
1721 } 1736 }
1722 1737
1723 /* 1738 /*
@@ -1727,7 +1742,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
1727 if (!pdev->irq) { 1742 if (!pdev->irq) {
1728 result = pci_enable_msix(pdev, dev->entry, 1); 1743 result = pci_enable_msix(pdev, dev->entry, 1);
1729 if (result < 0) 1744 if (result < 0)
1730 goto unmap; 1745 goto disable;
1731 } 1746 }
1732 1747
1733 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); 1748 cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
@@ -1754,18 +1769,20 @@ static int nvme_dev_map(struct nvme_dev *dev)
1754 pci_save_state(pdev); 1769 pci_save_state(pdev);
1755 return 0; 1770 return 0;
1756 1771
1757 unmap:
1758 iounmap(dev->bar);
1759 dev->bar = NULL;
1760 disable: 1772 disable:
1761 pci_release_regions(pdev);
1762 disable_pci:
1763 pci_disable_device(pdev); 1773 pci_disable_device(pdev);
1764 return result; 1774 return result;
1765} 1775}
1766 1776
1767static void nvme_dev_unmap(struct nvme_dev *dev) 1777static void nvme_dev_unmap(struct nvme_dev *dev)
1768{ 1778{
1779 if (dev->bar)
1780 iounmap(dev->bar);
1781 pci_release_regions(to_pci_dev(dev->dev));
1782}
1783
1784static void nvme_pci_disable(struct nvme_dev *dev)
1785{
1769 struct pci_dev *pdev = to_pci_dev(dev->dev); 1786 struct pci_dev *pdev = to_pci_dev(dev->dev);
1770 1787
1771 if (pdev->msi_enabled) 1788 if (pdev->msi_enabled)
@@ -1773,12 +1790,6 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
1773 else if (pdev->msix_enabled) 1790 else if (pdev->msix_enabled)
1774 pci_disable_msix(pdev); 1791 pci_disable_msix(pdev);
1775 1792
1776 if (dev->bar) {
1777 iounmap(dev->bar);
1778 dev->bar = NULL;
1779 pci_release_regions(pdev);
1780 }
1781
1782 if (pci_is_enabled(pdev)) { 1793 if (pci_is_enabled(pdev)) {
1783 pci_disable_pcie_error_reporting(pdev); 1794 pci_disable_pcie_error_reporting(pdev);
1784 pci_disable_device(pdev); 1795 pci_disable_device(pdev);
@@ -1837,7 +1848,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1837 nvme_dev_list_remove(dev); 1848 nvme_dev_list_remove(dev);
1838 1849
1839 mutex_lock(&dev->shutdown_lock); 1850 mutex_lock(&dev->shutdown_lock);
1840 if (dev->bar) { 1851 if (pci_is_enabled(to_pci_dev(dev->dev))) {
1841 nvme_stop_queues(&dev->ctrl); 1852 nvme_stop_queues(&dev->ctrl);
1842 csts = readl(dev->bar + NVME_REG_CSTS); 1853 csts = readl(dev->bar + NVME_REG_CSTS);
1843 } 1854 }
@@ -1850,7 +1861,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
1850 nvme_disable_io_queues(dev); 1861 nvme_disable_io_queues(dev);
1851 nvme_disable_admin_queue(dev, shutdown); 1862 nvme_disable_admin_queue(dev, shutdown);
1852 } 1863 }
1853 nvme_dev_unmap(dev); 1864 nvme_pci_disable(dev);
1854 1865
1855 for (i = dev->queue_count - 1; i >= 0; i--) 1866 for (i = dev->queue_count - 1; i >= 0; i--)
1856 nvme_clear_queue(dev->queues[i]); 1867 nvme_clear_queue(dev->queues[i]);
@@ -1894,10 +1905,20 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
1894 kfree(dev); 1905 kfree(dev);
1895} 1906}
1896 1907
1908static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
1909{
1910 dev_warn(dev->dev, "Removing after probe failure status: %d\n", status);
1911
1912 kref_get(&dev->ctrl.kref);
1913 nvme_dev_disable(dev, false);
1914 if (!schedule_work(&dev->remove_work))
1915 nvme_put_ctrl(&dev->ctrl);
1916}
1917
1897static void nvme_reset_work(struct work_struct *work) 1918static void nvme_reset_work(struct work_struct *work)
1898{ 1919{
1899 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work); 1920 struct nvme_dev *dev = container_of(work, struct nvme_dev, reset_work);
1900 int result; 1921 int result = -ENODEV;
1901 1922
1902 if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags))) 1923 if (WARN_ON(test_bit(NVME_CTRL_RESETTING, &dev->flags)))
1903 goto out; 1924 goto out;
@@ -1906,37 +1927,37 @@ static void nvme_reset_work(struct work_struct *work)
1906 * If we're called to reset a live controller first shut it down before 1927 * If we're called to reset a live controller first shut it down before
1907 * moving on. 1928 * moving on.
1908 */ 1929 */
1909 if (dev->bar) 1930 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
1910 nvme_dev_disable(dev, false); 1931 nvme_dev_disable(dev, false);
1911 1932
1912 set_bit(NVME_CTRL_RESETTING, &dev->flags); 1933 set_bit(NVME_CTRL_RESETTING, &dev->flags);
1913 1934
1914 result = nvme_dev_map(dev); 1935 result = nvme_pci_enable(dev);
1915 if (result) 1936 if (result)
1916 goto out; 1937 goto out;
1917 1938
1918 result = nvme_configure_admin_queue(dev); 1939 result = nvme_configure_admin_queue(dev);
1919 if (result) 1940 if (result)
1920 goto unmap; 1941 goto out;
1921 1942
1922 nvme_init_queue(dev->queues[0], 0); 1943 nvme_init_queue(dev->queues[0], 0);
1923 result = nvme_alloc_admin_tags(dev); 1944 result = nvme_alloc_admin_tags(dev);
1924 if (result) 1945 if (result)
1925 goto disable; 1946 goto out;
1926 1947
1927 result = nvme_init_identify(&dev->ctrl); 1948 result = nvme_init_identify(&dev->ctrl);
1928 if (result) 1949 if (result)
1929 goto free_tags; 1950 goto out;
1930 1951
1931 result = nvme_setup_io_queues(dev); 1952 result = nvme_setup_io_queues(dev);
1932 if (result) 1953 if (result)
1933 goto free_tags; 1954 goto out;
1934 1955
1935 dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS; 1956 dev->ctrl.event_limit = NVME_NR_AEN_COMMANDS;
1936 1957
1937 result = nvme_dev_list_add(dev); 1958 result = nvme_dev_list_add(dev);
1938 if (result) 1959 if (result)
1939 goto remove; 1960 goto out;
1940 1961
1941 /* 1962 /*
1942 * Keep the controller around but remove all namespaces if we don't have 1963 * Keep the controller around but remove all namespaces if we don't have
@@ -1953,19 +1974,8 @@ static void nvme_reset_work(struct work_struct *work)
1953 clear_bit(NVME_CTRL_RESETTING, &dev->flags); 1974 clear_bit(NVME_CTRL_RESETTING, &dev->flags);
1954 return; 1975 return;
1955 1976
1956 remove:
1957 nvme_dev_list_remove(dev);
1958 free_tags:
1959 nvme_dev_remove_admin(dev);
1960 blk_put_queue(dev->ctrl.admin_q);
1961 dev->ctrl.admin_q = NULL;
1962 dev->queues[0]->tags = NULL;
1963 disable:
1964 nvme_disable_admin_queue(dev, false);
1965 unmap:
1966 nvme_dev_unmap(dev);
1967 out: 1977 out:
1968 nvme_remove_dead_ctrl(dev); 1978 nvme_remove_dead_ctrl(dev, result);
1969} 1979}
1970 1980
1971static void nvme_remove_dead_ctrl_work(struct work_struct *work) 1981static void nvme_remove_dead_ctrl_work(struct work_struct *work)
@@ -1973,19 +1983,12 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
1973 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work); 1983 struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
1974 struct pci_dev *pdev = to_pci_dev(dev->dev); 1984 struct pci_dev *pdev = to_pci_dev(dev->dev);
1975 1985
1986 nvme_kill_queues(&dev->ctrl);
1976 if (pci_get_drvdata(pdev)) 1987 if (pci_get_drvdata(pdev))
1977 pci_stop_and_remove_bus_device_locked(pdev); 1988 pci_stop_and_remove_bus_device_locked(pdev);
1978 nvme_put_ctrl(&dev->ctrl); 1989 nvme_put_ctrl(&dev->ctrl);
1979} 1990}
1980 1991
1981static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
1982{
1983 dev_warn(dev->dev, "Removing after probe failure\n");
1984 kref_get(&dev->ctrl.kref);
1985 if (!schedule_work(&dev->remove_work))
1986 nvme_put_ctrl(&dev->ctrl);
1987}
1988
1989static int nvme_reset(struct nvme_dev *dev) 1992static int nvme_reset(struct nvme_dev *dev)
1990{ 1993{
1991 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q)) 1994 if (!dev->ctrl.admin_q || blk_queue_dying(dev->ctrl.admin_q))
@@ -2037,6 +2040,27 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = {
2037 .free_ctrl = nvme_pci_free_ctrl, 2040 .free_ctrl = nvme_pci_free_ctrl,
2038}; 2041};
2039 2042
2043static int nvme_dev_map(struct nvme_dev *dev)
2044{
2045 int bars;
2046 struct pci_dev *pdev = to_pci_dev(dev->dev);
2047
2048 bars = pci_select_bars(pdev, IORESOURCE_MEM);
2049 if (!bars)
2050 return -ENODEV;
2051 if (pci_request_selected_regions(pdev, bars, "nvme"))
2052 return -ENODEV;
2053
2054 dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
2055 if (!dev->bar)
2056 goto release;
2057
2058 return 0;
2059 release:
2060 pci_release_regions(pdev);
2061 return -ENODEV;
2062}
2063
2040static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2064static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2041{ 2065{
2042 int node, result = -ENOMEM; 2066 int node, result = -ENOMEM;
@@ -2061,6 +2085,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2061 dev->dev = get_device(&pdev->dev); 2085 dev->dev = get_device(&pdev->dev);
2062 pci_set_drvdata(pdev, dev); 2086 pci_set_drvdata(pdev, dev);
2063 2087
2088 result = nvme_dev_map(dev);
2089 if (result)
2090 goto free;
2091
2064 INIT_LIST_HEAD(&dev->node); 2092 INIT_LIST_HEAD(&dev->node);
2065 INIT_WORK(&dev->scan_work, nvme_dev_scan); 2093 INIT_WORK(&dev->scan_work, nvme_dev_scan);
2066 INIT_WORK(&dev->reset_work, nvme_reset_work); 2094 INIT_WORK(&dev->reset_work, nvme_reset_work);
@@ -2084,6 +2112,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2084 nvme_release_prp_pools(dev); 2112 nvme_release_prp_pools(dev);
2085 put_pci: 2113 put_pci:
2086 put_device(dev->dev); 2114 put_device(dev->dev);
2115 nvme_dev_unmap(dev);
2087 free: 2116 free:
2088 kfree(dev->queues); 2117 kfree(dev->queues);
2089 kfree(dev->entry); 2118 kfree(dev->entry);
@@ -2107,24 +2136,27 @@ static void nvme_shutdown(struct pci_dev *pdev)
2107 nvme_dev_disable(dev, true); 2136 nvme_dev_disable(dev, true);
2108} 2137}
2109 2138
2139/*
2140 * The driver's remove may be called on a device in a partially initialized
2141 * state. This function must not have any dependencies on the device state in
2142 * order to proceed.
2143 */
2110static void nvme_remove(struct pci_dev *pdev) 2144static void nvme_remove(struct pci_dev *pdev)
2111{ 2145{
2112 struct nvme_dev *dev = pci_get_drvdata(pdev); 2146 struct nvme_dev *dev = pci_get_drvdata(pdev);
2113 2147
2114 spin_lock(&dev_list_lock); 2148 set_bit(NVME_CTRL_REMOVING, &dev->flags);
2115 list_del_init(&dev->node);
2116 spin_unlock(&dev_list_lock);
2117
2118 pci_set_drvdata(pdev, NULL); 2149 pci_set_drvdata(pdev, NULL);
2119 flush_work(&dev->reset_work);
2120 flush_work(&dev->scan_work); 2150 flush_work(&dev->scan_work);
2121 nvme_remove_namespaces(&dev->ctrl); 2151 nvme_remove_namespaces(&dev->ctrl);
2122 nvme_uninit_ctrl(&dev->ctrl); 2152 nvme_uninit_ctrl(&dev->ctrl);
2123 nvme_dev_disable(dev, true); 2153 nvme_dev_disable(dev, true);
2154 flush_work(&dev->reset_work);
2124 nvme_dev_remove_admin(dev); 2155 nvme_dev_remove_admin(dev);
2125 nvme_free_queues(dev, 0); 2156 nvme_free_queues(dev, 0);
2126 nvme_release_cmb(dev); 2157 nvme_release_cmb(dev);
2127 nvme_release_prp_pools(dev); 2158 nvme_release_prp_pools(dev);
2159 nvme_dev_unmap(dev);
2128 nvme_put_ctrl(&dev->ctrl); 2160 nvme_put_ctrl(&dev->ctrl);
2129} 2161}
2130 2162
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 6fd4e5a5ef4a..9d11d9837312 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -70,6 +70,9 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
70 if (pos >= nvmem->size) 70 if (pos >= nvmem->size)
71 return 0; 71 return 0;
72 72
73 if (count < nvmem->word_size)
74 return -EINVAL;
75
73 if (pos + count > nvmem->size) 76 if (pos + count > nvmem->size)
74 count = nvmem->size - pos; 77 count = nvmem->size - pos;
75 78
@@ -95,6 +98,9 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
95 if (pos >= nvmem->size) 98 if (pos >= nvmem->size)
96 return 0; 99 return 0;
97 100
101 if (count < nvmem->word_size)
102 return -EINVAL;
103
98 if (pos + count > nvmem->size) 104 if (pos + count > nvmem->size)
99 count = nvmem->size - pos; 105 count = nvmem->size - pos;
100 106
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index afb67e7eeee4..3829e5fbf8c3 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -21,6 +21,7 @@ static struct regmap_config qfprom_regmap_config = {
21 .reg_bits = 32, 21 .reg_bits = 32,
22 .val_bits = 8, 22 .val_bits = 8,
23 .reg_stride = 1, 23 .reg_stride = 1,
24 .val_format_endian = REGMAP_ENDIAN_LITTLE,
24}; 25};
25 26
26static struct nvmem_config econfig = { 27static struct nvmem_config econfig = {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7ee21ae305ae..e7bfc175b8e1 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -635,6 +635,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
635 msi_base = be32_to_cpup(msi_map + 2); 635 msi_base = be32_to_cpup(msi_map + 2);
636 rid_len = be32_to_cpup(msi_map + 3); 636 rid_len = be32_to_cpup(msi_map + 3);
637 637
638 if (rid_base & ~map_mask) {
639 dev_err(parent_dev,
640 "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
641 map_mask, rid_base);
642 return rid_out;
643 }
644
638 msi_controller_node = of_find_node_by_phandle(phandle); 645 msi_controller_node = of_find_node_by_phandle(phandle);
639 646
640 matched = (masked_rid >= rid_base && 647 matched = (masked_rid >= rid_base &&
@@ -654,7 +661,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
654 if (!matched) 661 if (!matched)
655 return rid_out; 662 return rid_out;
656 663
657 rid_out = masked_rid + msi_base; 664 rid_out = masked_rid - rid_base + msi_base;
658 dev_dbg(dev, 665 dev_dbg(dev,
659 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n", 666 "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
660 dev_name(parent_dev), map_mask, rid_base, msi_base, 667 dev_name(parent_dev), map_mask, rid_base, msi_base,
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 5648317d355f..365dc7e83ab4 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -154,6 +154,7 @@ static const struct of_device_id whitelist_phys[] = {
154 { .compatible = "marvell,88E1111", }, 154 { .compatible = "marvell,88E1111", },
155 { .compatible = "marvell,88e1116", }, 155 { .compatible = "marvell,88e1116", },
156 { .compatible = "marvell,88e1118", }, 156 { .compatible = "marvell,88e1118", },
157 { .compatible = "marvell,88e1145", },
157 { .compatible = "marvell,88e1149r", }, 158 { .compatible = "marvell,88e1149r", },
158 { .compatible = "marvell,88e1310", }, 159 { .compatible = "marvell,88e1310", },
159 { .compatible = "marvell,88E1510", }, 160 { .compatible = "marvell,88E1510", },
@@ -304,6 +305,7 @@ EXPORT_SYMBOL(of_phy_find_device);
304 * @dev: pointer to net_device claiming the phy 305 * @dev: pointer to net_device claiming the phy
305 * @phy_np: Pointer to device tree node for the PHY 306 * @phy_np: Pointer to device tree node for the PHY
306 * @hndlr: Link state callback for the network device 307 * @hndlr: Link state callback for the network device
308 * @flags: flags to pass to the PHY
307 * @iface: PHY data interface type 309 * @iface: PHY data interface type
308 * 310 *
309 * If successful, returns a pointer to the phy_device with the embedded 311 * If successful, returns a pointer to the phy_device with the embedded
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 75a605426538..d1cdd9c992ac 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -14,6 +14,7 @@ config PCI_DRA7XX
14config PCI_MVEBU 14config PCI_MVEBU
15 bool "Marvell EBU PCIe controller" 15 bool "Marvell EBU PCIe controller"
16 depends on ARCH_MVEBU || ARCH_DOVE 16 depends on ARCH_MVEBU || ARCH_DOVE
17 depends on ARM
17 depends on OF 18 depends on OF
18 19
19config PCIE_DW 20config PCIE_DW
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
index ed34c9520a02..6153853ca9c3 100644
--- a/drivers/pci/host/pci-keystone-dw.c
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -58,11 +58,6 @@
58 58
59#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp) 59#define to_keystone_pcie(x) container_of(x, struct keystone_pcie, pp)
60 60
61static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
62{
63 return sys->private_data;
64}
65
66static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, 61static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
67 u32 *bit_pos) 62 u32 *bit_pos)
68{ 63{
@@ -108,7 +103,7 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
108 struct pcie_port *pp; 103 struct pcie_port *pp;
109 104
110 msi = irq_data_get_msi_desc(d); 105 msi = irq_data_get_msi_desc(d);
111 pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 106 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
112 ks_pcie = to_keystone_pcie(pp); 107 ks_pcie = to_keystone_pcie(pp);
113 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 108 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
114 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos); 109 update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
@@ -146,7 +141,7 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
146 u32 offset; 141 u32 offset;
147 142
148 msi = irq_data_get_msi_desc(d); 143 msi = irq_data_get_msi_desc(d);
149 pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 144 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
150 ks_pcie = to_keystone_pcie(pp); 145 ks_pcie = to_keystone_pcie(pp);
151 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 146 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
152 147
@@ -167,7 +162,7 @@ static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
167 u32 offset; 162 u32 offset;
168 163
169 msi = irq_data_get_msi_desc(d); 164 msi = irq_data_get_msi_desc(d);
170 pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); 165 pp = (struct pcie_port *) msi_desc_to_pci_sysdata(msi);
171 ks_pcie = to_keystone_pcie(pp); 166 ks_pcie = to_keystone_pcie(pp);
172 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); 167 offset = d->irq - irq_linear_revmap(pp->irq_domain, 0);
173 168
diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c
index 3923bed93c7e..f39961bcf7aa 100644
--- a/drivers/pci/host/pci-layerscape.c
+++ b/drivers/pci/host/pci-layerscape.c
@@ -77,6 +77,16 @@ static void ls_pcie_fix_class(struct ls_pcie *pcie)
77 iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); 77 iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
78} 78}
79 79
80/* Drop MSG TLP except for Vendor MSG */
81static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
82{
83 u32 val;
84
85 val = ioread32(pcie->dbi + PCIE_STRFMR1);
86 val &= 0xDFFFFFFF;
87 iowrite32(val, pcie->dbi + PCIE_STRFMR1);
88}
89
80static int ls1021_pcie_link_up(struct pcie_port *pp) 90static int ls1021_pcie_link_up(struct pcie_port *pp)
81{ 91{
82 u32 state; 92 u32 state;
@@ -97,7 +107,7 @@ static int ls1021_pcie_link_up(struct pcie_port *pp)
97static void ls1021_pcie_host_init(struct pcie_port *pp) 107static void ls1021_pcie_host_init(struct pcie_port *pp)
98{ 108{
99 struct ls_pcie *pcie = to_ls_pcie(pp); 109 struct ls_pcie *pcie = to_ls_pcie(pp);
100 u32 val, index[2]; 110 u32 index[2];
101 111
102 pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node, 112 pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node,
103 "fsl,pcie-scfg"); 113 "fsl,pcie-scfg");
@@ -116,13 +126,7 @@ static void ls1021_pcie_host_init(struct pcie_port *pp)
116 126
117 dw_pcie_setup_rc(pp); 127 dw_pcie_setup_rc(pp);
118 128
119 /* 129 ls_pcie_drop_msg_tlp(pcie);
120 * LS1021A Workaround for internal TKT228622
121 * to fix the INTx hang issue
122 */
123 val = ioread32(pcie->dbi + PCIE_STRFMR1);
124 val &= 0xffff;
125 iowrite32(val, pcie->dbi + PCIE_STRFMR1);
126} 130}
127 131
128static int ls_pcie_link_up(struct pcie_port *pp) 132static int ls_pcie_link_up(struct pcie_port *pp)
@@ -147,6 +151,7 @@ static void ls_pcie_host_init(struct pcie_port *pp)
147 iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); 151 iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
148 ls_pcie_fix_class(pcie); 152 ls_pcie_fix_class(pcie);
149 ls_pcie_clear_multifunction(pcie); 153 ls_pcie_clear_multifunction(pcie);
154 ls_pcie_drop_msg_tlp(pcie);
150 iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); 155 iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
151} 156}
152 157
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 5816bceddb65..a576aeeb22da 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -64,7 +64,6 @@
64#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT) 64#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
65 65
66#define MAX_NUM_OB_WINDOWS 2 66#define MAX_NUM_OB_WINDOWS 2
67#define MAX_NUM_PAXC_PF 4
68 67
69#define IPROC_PCIE_REG_INVALID 0xffff 68#define IPROC_PCIE_REG_INVALID 0xffff
70 69
@@ -170,20 +169,6 @@ static inline void iproc_pcie_ob_write(struct iproc_pcie *pcie,
170 writel(val, pcie->base + offset + (window * 8)); 169 writel(val, pcie->base + offset + (window * 8));
171} 170}
172 171
173static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie,
174 unsigned int slot,
175 unsigned int fn)
176{
177 if (slot > 0)
178 return false;
179
180 /* PAXC can only support limited number of functions */
181 if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF)
182 return false;
183
184 return true;
185}
186
187/** 172/**
188 * Note access to the configuration registers are protected at the higher layer 173 * Note access to the configuration registers are protected at the higher layer
189 * by 'pci_lock' in drivers/pci/access.c 174 * by 'pci_lock' in drivers/pci/access.c
@@ -199,11 +184,11 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
199 u32 val; 184 u32 val;
200 u16 offset; 185 u16 offset;
201 186
202 if (!iproc_pcie_device_is_valid(pcie, slot, fn))
203 return NULL;
204
205 /* root complex access */ 187 /* root complex access */
206 if (busno == 0) { 188 if (busno == 0) {
189 if (slot > 0 || fn > 0)
190 return NULL;
191
207 iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR, 192 iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
208 where & CFG_IND_ADDR_MASK); 193 where & CFG_IND_ADDR_MASK);
209 offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA); 194 offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
@@ -213,6 +198,14 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus,
213 return (pcie->base + offset); 198 return (pcie->base + offset);
214 } 199 }
215 200
201 /*
202 * PAXC is connected to an internally emulated EP within the SoC. It
203 * allows only one device.
204 */
205 if (pcie->type == IPROC_PCIE_PAXC)
206 if (slot > 0)
207 return NULL;
208
216 /* EP device access */ 209 /* EP device access */
217 val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | 210 val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
218 (slot << CFG_ADDR_DEV_NUM_SHIFT) | 211 (slot << CFG_ADDR_DEV_NUM_SHIFT) |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 602eb4223510..f89db3af0607 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4772,8 +4772,10 @@ int pci_get_new_domain_nr(void)
4772void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent) 4772void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
4773{ 4773{
4774 static int use_dt_domains = -1; 4774 static int use_dt_domains = -1;
4775 int domain = of_get_pci_domain_nr(parent->of_node); 4775 int domain = -1;
4776 4776
4777 if (parent)
4778 domain = of_get_pci_domain_nr(parent->of_node);
4777 /* 4779 /*
4778 * Check DT domain and use_dt_domains values. 4780 * Check DT domain and use_dt_domains values.
4779 * 4781 *
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 0bf82a20a0fb..48d21e0edd56 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
262 rpc->rpd = dev; 262 rpc->rpd = dev;
263 INIT_WORK(&rpc->dpc_handler, aer_isr); 263 INIT_WORK(&rpc->dpc_handler, aer_isr);
264 mutex_init(&rpc->rpc_mutex); 264 mutex_init(&rpc->rpc_mutex);
265 init_waitqueue_head(&rpc->wait_release);
266 265
267 /* Use PCIe bus function to store rpc into PCIe device */ 266 /* Use PCIe bus function to store rpc into PCIe device */
268 set_service_data(dev, rpc); 267 set_service_data(dev, rpc);
@@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
285 if (rpc->isr) 284 if (rpc->isr)
286 free_irq(dev->irq, dev); 285 free_irq(dev->irq, dev);
287 286
288 wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx); 287 flush_work(&rpc->dpc_handler);
289
290 aer_disable_rootport(rpc); 288 aer_disable_rootport(rpc);
291 kfree(rpc); 289 kfree(rpc);
292 set_service_data(dev, NULL); 290 set_service_data(dev, NULL);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 84420b7c9456..945c939a86c5 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -72,7 +72,6 @@ struct aer_rpc {
72 * recovery on the same 72 * recovery on the same
73 * root port hierarchy 73 * root port hierarchy
74 */ 74 */
75 wait_queue_head_t wait_release;
76}; 75};
77 76
78struct aer_broadcast_data { 77struct aer_broadcast_data {
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 712392504ed9..521e39c1b66d 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
811 while (get_e_source(rpc, &e_src)) 811 while (get_e_source(rpc, &e_src))
812 aer_isr_one_error(p_device, &e_src); 812 aer_isr_one_error(p_device, &e_src);
813 mutex_unlock(&rpc->rpc_mutex); 813 mutex_unlock(&rpc->rpc_mutex);
814
815 wake_up(&rpc->wait_release);
816} 814}
817 815
818/** 816/**
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index c777b97207d5..5f70fee59a94 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -53,7 +53,7 @@ struct pcifront_device {
53}; 53};
54 54
55struct pcifront_sd { 55struct pcifront_sd {
56 int domain; 56 struct pci_sysdata sd;
57 struct pcifront_device *pdev; 57 struct pcifront_device *pdev;
58}; 58};
59 59
@@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
67 unsigned int domain, unsigned int bus, 67 unsigned int domain, unsigned int bus,
68 struct pcifront_device *pdev) 68 struct pcifront_device *pdev)
69{ 69{
70 sd->domain = domain; 70 /* Because we do not expose that information via XenBus. */
71 sd->sd.node = first_online_node;
72 sd->sd.domain = domain;
71 sd->pdev = pdev; 73 sd->pdev = pdev;
72} 74}
73 75
@@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
468 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n", 470 dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
469 domain, bus); 471 domain, bus);
470 472
471 bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL); 473 bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
472 sd = kmalloc(sizeof(*sd), GFP_KERNEL); 474 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
473 if (!bus_entry || !sd) { 475 if (!bus_entry || !sd) {
474 err = -ENOMEM; 476 err = -ENOMEM;
475 goto err_out; 477 goto err_out;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index e7e117d5dbbe..0124d17bd9fe 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -224,6 +224,7 @@ config PHY_MT65XX_USB3
224 224
225config PHY_HI6220_USB 225config PHY_HI6220_USB
226 tristate "hi6220 USB PHY support" 226 tristate "hi6220 USB PHY support"
227 depends on (ARCH_HISI && ARM64) || COMPILE_TEST
227 select GENERIC_PHY 228 select GENERIC_PHY
228 select MFD_SYSCON 229 select MFD_SYSCON
229 help 230 help
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8c7f27db6ad3..e7e574dc667a 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
275 275
276int phy_power_on(struct phy *phy) 276int phy_power_on(struct phy *phy)
277{ 277{
278 int ret; 278 int ret = 0;
279 279
280 if (!phy) 280 if (!phy)
281 return 0; 281 goto out;
282 282
283 if (phy->pwr) { 283 if (phy->pwr) {
284 ret = regulator_enable(phy->pwr); 284 ret = regulator_enable(phy->pwr);
285 if (ret) 285 if (ret)
286 return ret; 286 goto out;
287 } 287 }
288 288
289 ret = phy_pm_runtime_get_sync(phy); 289 ret = phy_pm_runtime_get_sync(phy);
290 if (ret < 0 && ret != -ENOTSUPP) 290 if (ret < 0 && ret != -ENOTSUPP)
291 return ret; 291 goto err_pm_sync;
292
292 ret = 0; /* Override possible ret == -ENOTSUPP */ 293 ret = 0; /* Override possible ret == -ENOTSUPP */
293 294
294 mutex_lock(&phy->mutex); 295 mutex_lock(&phy->mutex);
@@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
296 ret = phy->ops->power_on(phy); 297 ret = phy->ops->power_on(phy);
297 if (ret < 0) { 298 if (ret < 0) {
298 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret); 299 dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
299 goto out; 300 goto err_pwr_on;
300 } 301 }
301 } 302 }
302 ++phy->power_count; 303 ++phy->power_count;
303 mutex_unlock(&phy->mutex); 304 mutex_unlock(&phy->mutex);
304 return 0; 305 return 0;
305 306
306out: 307err_pwr_on:
307 mutex_unlock(&phy->mutex); 308 mutex_unlock(&phy->mutex);
308 phy_pm_runtime_put_sync(phy); 309 phy_pm_runtime_put_sync(phy);
310err_pm_sync:
309 if (phy->pwr) 311 if (phy->pwr)
310 regulator_disable(phy->pwr); 312 regulator_disable(phy->pwr);
311 313out:
312 return ret; 314 return ret;
313} 315}
314EXPORT_SYMBOL_GPL(phy_power_on); 316EXPORT_SYMBOL_GPL(phy_power_on);
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 4a3fc6e59f8e..840f3eae428b 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -715,6 +715,7 @@ static int twl4030_usb_probe(struct platform_device *pdev)
715 pm_runtime_use_autosuspend(&pdev->dev); 715 pm_runtime_use_autosuspend(&pdev->dev);
716 pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); 716 pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
717 pm_runtime_enable(&pdev->dev); 717 pm_runtime_enable(&pdev->dev);
718 pm_runtime_get_sync(&pdev->dev);
718 719
719 /* Our job is to use irqs and status from the power module 720 /* Our job is to use irqs and status from the power module
720 * to keep the transceiver disabled when nothing's connected. 721 * to keep the transceiver disabled when nothing's connected.
@@ -750,6 +751,7 @@ static int twl4030_usb_remove(struct platform_device *pdev)
750 struct twl4030_usb *twl = platform_get_drvdata(pdev); 751 struct twl4030_usb *twl = platform_get_drvdata(pdev);
751 int val; 752 int val;
752 753
754 usb_remove_phy(&twl->phy);
753 pm_runtime_get_sync(twl->dev); 755 pm_runtime_get_sync(twl->dev);
754 cancel_delayed_work(&twl->id_workaround_work); 756 cancel_delayed_work(&twl->id_workaround_work);
755 device_remove_file(twl->dev, &dev_attr_vbus); 757 device_remove_file(twl->dev, &dev_attr_vbus);
@@ -757,6 +759,13 @@ static int twl4030_usb_remove(struct platform_device *pdev)
757 /* set transceiver mode to power on defaults */ 759 /* set transceiver mode to power on defaults */
758 twl4030_usb_set_mode(twl, -1); 760 twl4030_usb_set_mode(twl, -1);
759 761
762 /* idle ulpi before powering off */
763 if (cable_present(twl->linkstat))
764 pm_runtime_put_noidle(twl->dev);
765 pm_runtime_mark_last_busy(twl->dev);
766 pm_runtime_put_sync_suspend(twl->dev);
767 pm_runtime_disable(twl->dev);
768
760 /* autogate 60MHz ULPI clock, 769 /* autogate 60MHz ULPI clock,
761 * clear dpll clock request for i2c access, 770 * clear dpll clock request for i2c access,
762 * disable 32KHz 771 * disable 32KHz
@@ -771,11 +780,6 @@ static int twl4030_usb_remove(struct platform_device *pdev)
771 /* disable complete OTG block */ 780 /* disable complete OTG block */
772 twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB); 781 twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
773 782
774 if (cable_present(twl->linkstat))
775 pm_runtime_put_noidle(twl->dev);
776 pm_runtime_mark_last_busy(twl->dev);
777 pm_runtime_put(twl->dev);
778
779 return 0; 783 return 0;
780} 784}
781 785
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 16d48a4ed225..e96e86d2e745 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -347,6 +347,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
347 ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg); 347 ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg);
348 break; 348 break;
349 case PIN_CONFIG_INPUT_ENABLE: 349 case PIN_CONFIG_INPUT_ENABLE:
350 mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
350 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param); 351 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
351 break; 352 break;
352 case PIN_CONFIG_OUTPUT: 353 case PIN_CONFIG_OUTPUT:
@@ -354,6 +355,7 @@ static int mtk_pconf_parse_conf(struct pinctrl_dev *pctldev,
354 ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false); 355 ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false);
355 break; 356 break;
356 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 357 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
358 mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
357 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param); 359 ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
358 break; 360 break;
359 case PIN_CONFIG_DRIVE_STRENGTH: 361 case PIN_CONFIG_DRIVE_STRENGTH:
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index e4d473811bb3..3ef798fac81b 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -666,16 +666,19 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
666 struct mvebu_mpp_ctrl_setting *set = &mode->settings[0]; 666 struct mvebu_mpp_ctrl_setting *set = &mode->settings[0];
667 struct mvebu_pinctrl_group *grp; 667 struct mvebu_pinctrl_group *grp;
668 unsigned num_settings; 668 unsigned num_settings;
669 unsigned supp_settings;
669 670
670 for (num_settings = 0; ; set++) { 671 for (num_settings = 0, supp_settings = 0; ; set++) {
671 if (!set->name) 672 if (!set->name)
672 break; 673 break;
673 674
675 num_settings++;
676
674 /* skip unsupported settings for this variant */ 677 /* skip unsupported settings for this variant */
675 if (pctl->variant && !(pctl->variant & set->variant)) 678 if (pctl->variant && !(pctl->variant & set->variant))
676 continue; 679 continue;
677 680
678 num_settings++; 681 supp_settings++;
679 682
680 /* find gpio/gpo/gpi settings */ 683 /* find gpio/gpo/gpi settings */
681 if (strcmp(set->name, "gpio") == 0) 684 if (strcmp(set->name, "gpio") == 0)
@@ -688,7 +691,7 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
688 } 691 }
689 692
690 /* skip modes with no settings for this variant */ 693 /* skip modes with no settings for this variant */
691 if (!num_settings) 694 if (!supp_settings)
692 continue; 695 continue;
693 696
694 grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid); 697 grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 085e60106ec2..1f7469c9857d 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -191,6 +191,7 @@ static void abx500_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
191 dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret); 191 dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret);
192} 192}
193 193
194#ifdef CONFIG_DEBUG_FS
194static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset, 195static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset,
195 enum abx500_gpio_pull_updown *pull_updown) 196 enum abx500_gpio_pull_updown *pull_updown)
196{ 197{
@@ -226,6 +227,7 @@ out:
226 227
227 return ret; 228 return ret;
228} 229}
230#endif
229 231
230static int abx500_set_pull_updown(struct abx500_pinctrl *pct, 232static int abx500_set_pull_updown(struct abx500_pinctrl *pct,
231 int offset, enum abx500_gpio_pull_updown val) 233 int offset, enum abx500_gpio_pull_updown val)
@@ -468,6 +470,7 @@ out:
468 return ret; 470 return ret;
469} 471}
470 472
473#ifdef CONFIG_DEBUG_FS
471static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip, 474static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
472 unsigned gpio) 475 unsigned gpio)
473{ 476{
@@ -553,8 +556,6 @@ out:
553 return ret; 556 return ret;
554} 557}
555 558
556#ifdef CONFIG_DEBUG_FS
557
558#include <linux/seq_file.h> 559#include <linux/seq_file.h>
559 560
560static void abx500_gpio_dbg_show_one(struct seq_file *s, 561static void abx500_gpio_dbg_show_one(struct seq_file *s,
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index d90e205cf809..216f227c6009 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -426,6 +426,7 @@ int pxa2xx_pinctrl_init(struct platform_device *pdev,
426 426
427 return 0; 427 return 0;
428} 428}
429EXPORT_SYMBOL(pxa2xx_pinctrl_init);
429 430
430int pxa2xx_pinctrl_exit(struct platform_device *pdev) 431int pxa2xx_pinctrl_exit(struct platform_device *pdev)
431{ 432{
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f67b1e958589..5cc97f85db02 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -514,25 +514,35 @@ static const struct pinconf_ops samsung_pinconf_ops = {
514 .pin_config_group_set = samsung_pinconf_group_set, 514 .pin_config_group_set = samsung_pinconf_group_set,
515}; 515};
516 516
517/* gpiolib gpio_set callback function */ 517/*
518static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value) 518 * The samsung_gpio_set_vlaue() should be called with "bank->slock" held
519 * to avoid race condition.
520 */
521static void samsung_gpio_set_value(struct gpio_chip *gc,
522 unsigned offset, int value)
519{ 523{
520 struct samsung_pin_bank *bank = gpiochip_get_data(gc); 524 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
521 const struct samsung_pin_bank_type *type = bank->type; 525 const struct samsung_pin_bank_type *type = bank->type;
522 unsigned long flags;
523 void __iomem *reg; 526 void __iomem *reg;
524 u32 data; 527 u32 data;
525 528
526 reg = bank->drvdata->virt_base + bank->pctl_offset; 529 reg = bank->drvdata->virt_base + bank->pctl_offset;
527 530
528 spin_lock_irqsave(&bank->slock, flags);
529
530 data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]); 531 data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
531 data &= ~(1 << offset); 532 data &= ~(1 << offset);
532 if (value) 533 if (value)
533 data |= 1 << offset; 534 data |= 1 << offset;
534 writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]); 535 writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]);
536}
537
538/* gpiolib gpio_set callback function */
539static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
540{
541 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
542 unsigned long flags;
535 543
544 spin_lock_irqsave(&bank->slock, flags);
545 samsung_gpio_set_value(gc, offset, value);
536 spin_unlock_irqrestore(&bank->slock, flags); 546 spin_unlock_irqrestore(&bank->slock, flags);
537} 547}
538 548
@@ -553,6 +563,8 @@ static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
553} 563}
554 564
555/* 565/*
566 * The samsung_gpio_set_direction() should be called with "bank->slock" held
567 * to avoid race condition.
556 * The calls to gpio_direction_output() and gpio_direction_input() 568 * The calls to gpio_direction_output() and gpio_direction_input()
557 * leads to this function call. 569 * leads to this function call.
558 */ 570 */
@@ -564,7 +576,6 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
564 struct samsung_pinctrl_drv_data *drvdata; 576 struct samsung_pinctrl_drv_data *drvdata;
565 void __iomem *reg; 577 void __iomem *reg;
566 u32 data, mask, shift; 578 u32 data, mask, shift;
567 unsigned long flags;
568 579
569 bank = gpiochip_get_data(gc); 580 bank = gpiochip_get_data(gc);
570 type = bank->type; 581 type = bank->type;
@@ -581,31 +592,42 @@ static int samsung_gpio_set_direction(struct gpio_chip *gc,
581 reg += 4; 592 reg += 4;
582 } 593 }
583 594
584 spin_lock_irqsave(&bank->slock, flags);
585
586 data = readl(reg); 595 data = readl(reg);
587 data &= ~(mask << shift); 596 data &= ~(mask << shift);
588 if (!input) 597 if (!input)
589 data |= FUNC_OUTPUT << shift; 598 data |= FUNC_OUTPUT << shift;
590 writel(data, reg); 599 writel(data, reg);
591 600
592 spin_unlock_irqrestore(&bank->slock, flags);
593
594 return 0; 601 return 0;
595} 602}
596 603
597/* gpiolib gpio_direction_input callback function. */ 604/* gpiolib gpio_direction_input callback function. */
598static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset) 605static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
599{ 606{
600 return samsung_gpio_set_direction(gc, offset, true); 607 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
608 unsigned long flags;
609 int ret;
610
611 spin_lock_irqsave(&bank->slock, flags);
612 ret = samsung_gpio_set_direction(gc, offset, true);
613 spin_unlock_irqrestore(&bank->slock, flags);
614 return ret;
601} 615}
602 616
603/* gpiolib gpio_direction_output callback function. */ 617/* gpiolib gpio_direction_output callback function. */
604static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset, 618static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
605 int value) 619 int value)
606{ 620{
607 samsung_gpio_set(gc, offset, value); 621 struct samsung_pin_bank *bank = gpiochip_get_data(gc);
608 return samsung_gpio_set_direction(gc, offset, false); 622 unsigned long flags;
623 int ret;
624
625 spin_lock_irqsave(&bank->slock, flags);
626 samsung_gpio_set_value(gc, offset, value);
627 ret = samsung_gpio_set_direction(gc, offset, false);
628 spin_unlock_irqrestore(&bank->slock, flags);
629
630 return ret;
609} 631}
610 632
611/* 633/*
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 77d4cf047cee..11760bbe9d51 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -492,6 +492,7 @@ static const struct sunxi_pinctrl_desc sun8i_h3_pinctrl_data = {
492 .pins = sun8i_h3_pins, 492 .pins = sun8i_h3_pins,
493 .npins = ARRAY_SIZE(sun8i_h3_pins), 493 .npins = ARRAY_SIZE(sun8i_h3_pins),
494 .irq_banks = 2, 494 .irq_banks = 2,
495 .irq_read_needs_mux = true
495}; 496};
496 497
497static int sun8i_h3_pinctrl_probe(struct platform_device *pdev) 498static int sun8i_h3_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 20f0ad9bb9f3..e20f23e04c24 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -41,8 +41,7 @@ static const struct key_entry intel_hid_keymap[] = {
41 { KE_KEY, 4, { KEY_HOME } }, 41 { KE_KEY, 4, { KEY_HOME } },
42 { KE_KEY, 5, { KEY_END } }, 42 { KE_KEY, 5, { KEY_END } },
43 { KE_KEY, 6, { KEY_PAGEUP } }, 43 { KE_KEY, 6, { KEY_PAGEUP } },
44 { KE_KEY, 4, { KEY_PAGEDOWN } }, 44 { KE_KEY, 7, { KEY_PAGEDOWN } },
45 { KE_KEY, 4, { KEY_HOME } },
46 { KE_KEY, 8, { KEY_RFKILL } }, 45 { KE_KEY, 8, { KEY_RFKILL } },
47 { KE_KEY, 9, { KEY_POWER } }, 46 { KE_KEY, 9, { KEY_POWER } },
48 { KE_KEY, 11, { KEY_SLEEP } }, 47 { KE_KEY, 11, { KEY_SLEEP } },
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index 02bc5a6343c3..aa454241489c 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -49,7 +49,7 @@ struct scu_ipc_data {
49 49
50static int scu_reg_access(u32 cmd, struct scu_ipc_data *data) 50static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
51{ 51{
52 int count = data->count; 52 unsigned int count = data->count;
53 53
54 if (count == 0 || count == 3 || count > 4) 54 if (count == 0 || count == 3 || count > 4)
55 return -EINVAL; 55 return -EINVAL;
diff --git a/drivers/power/bq27xxx_battery_i2c.c b/drivers/power/bq27xxx_battery_i2c.c
index 9429e66be096..8eafc6f0df88 100644
--- a/drivers/power/bq27xxx_battery_i2c.c
+++ b/drivers/power/bq27xxx_battery_i2c.c
@@ -21,6 +21,9 @@
21 21
22#include <linux/power/bq27xxx_battery.h> 22#include <linux/power/bq27xxx_battery.h>
23 23
24static DEFINE_IDR(battery_id);
25static DEFINE_MUTEX(battery_mutex);
26
24static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data) 27static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
25{ 28{
26 struct bq27xxx_device_info *di = data; 29 struct bq27xxx_device_info *di = data;
@@ -70,19 +73,33 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
70{ 73{
71 struct bq27xxx_device_info *di; 74 struct bq27xxx_device_info *di;
72 int ret; 75 int ret;
76 char *name;
77 int num;
78
79 /* Get new ID for the new battery device */
80 mutex_lock(&battery_mutex);
81 num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
82 mutex_unlock(&battery_mutex);
83 if (num < 0)
84 return num;
85
86 name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
87 if (!name)
88 goto err_mem;
73 89
74 di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL); 90 di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
75 if (!di) 91 if (!di)
76 return -ENOMEM; 92 goto err_mem;
77 93
94 di->id = num;
78 di->dev = &client->dev; 95 di->dev = &client->dev;
79 di->chip = id->driver_data; 96 di->chip = id->driver_data;
80 di->name = id->name; 97 di->name = name;
81 di->bus.read = bq27xxx_battery_i2c_read; 98 di->bus.read = bq27xxx_battery_i2c_read;
82 99
83 ret = bq27xxx_battery_setup(di); 100 ret = bq27xxx_battery_setup(di);
84 if (ret) 101 if (ret)
85 return ret; 102 goto err_failed;
86 103
87 /* Schedule a polling after about 1 min */ 104 /* Schedule a polling after about 1 min */
88 schedule_delayed_work(&di->work, 60 * HZ); 105 schedule_delayed_work(&di->work, 60 * HZ);
@@ -103,6 +120,16 @@ static int bq27xxx_battery_i2c_probe(struct i2c_client *client,
103 } 120 }
104 121
105 return 0; 122 return 0;
123
124err_mem:
125 ret = -ENOMEM;
126
127err_failed:
128 mutex_lock(&battery_mutex);
129 idr_remove(&battery_id, num);
130 mutex_unlock(&battery_mutex);
131
132 return ret;
106} 133}
107 134
108static int bq27xxx_battery_i2c_remove(struct i2c_client *client) 135static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
@@ -111,6 +138,10 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
111 138
112 bq27xxx_battery_teardown(di); 139 bq27xxx_battery_teardown(di);
113 140
141 mutex_lock(&battery_mutex);
142 idr_remove(&battery_id, di->id);
143 mutex_unlock(&battery_mutex);
144
114 return 0; 145 return 0;
115} 146}
116 147
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 41605dac8309..c78db05e75b1 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3035,6 +3035,7 @@ static void dasd_setup_queue(struct dasd_block *block)
3035 max = block->base->discipline->max_blocks << block->s2b_shift; 3035 max = block->base->discipline->max_blocks << block->s2b_shift;
3036 } 3036 }
3037 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); 3037 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
3038 block->request_queue->limits.max_dev_sectors = max;
3038 blk_queue_logical_block_size(block->request_queue, 3039 blk_queue_logical_block_size(block->request_queue,
3039 block->bp_block); 3040 block->bp_block);
3040 blk_queue_max_hw_sectors(block->request_queue, max); 3041 blk_queue_max_hw_sectors(block->request_queue, max);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 184b1dbeb554..286782c60da4 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
264 spin_unlock_irqrestore(&lcu->lock, flags); 264 spin_unlock_irqrestore(&lcu->lock, flags);
265 cancel_work_sync(&lcu->suc_data.worker); 265 cancel_work_sync(&lcu->suc_data.worker);
266 spin_lock_irqsave(&lcu->lock, flags); 266 spin_lock_irqsave(&lcu->lock, flags);
267 if (device == lcu->suc_data.device) 267 if (device == lcu->suc_data.device) {
268 dasd_put_device(device);
268 lcu->suc_data.device = NULL; 269 lcu->suc_data.device = NULL;
270 }
269 } 271 }
270 was_pending = 0; 272 was_pending = 0;
271 if (device == lcu->ruac_data.device) { 273 if (device == lcu->ruac_data.device) {
@@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
273 was_pending = 1; 275 was_pending = 1;
274 cancel_delayed_work_sync(&lcu->ruac_data.dwork); 276 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
275 spin_lock_irqsave(&lcu->lock, flags); 277 spin_lock_irqsave(&lcu->lock, flags);
276 if (device == lcu->ruac_data.device) 278 if (device == lcu->ruac_data.device) {
279 dasd_put_device(device);
277 lcu->ruac_data.device = NULL; 280 lcu->ruac_data.device = NULL;
281 }
278 } 282 }
279 private->lcu = NULL; 283 private->lcu = NULL;
280 spin_unlock_irqrestore(&lcu->lock, flags); 284 spin_unlock_irqrestore(&lcu->lock, flags);
@@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
549 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { 553 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
550 DBF_DEV_EVENT(DBF_WARNING, device, "could not update" 554 DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
551 " alias data in lcu (rc = %d), retry later", rc); 555 " alias data in lcu (rc = %d), retry later", rc);
552 schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); 556 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
557 dasd_put_device(device);
553 } else { 558 } else {
559 dasd_put_device(device);
554 lcu->ruac_data.device = NULL; 560 lcu->ruac_data.device = NULL;
555 lcu->flags &= ~UPDATE_PENDING; 561 lcu->flags &= ~UPDATE_PENDING;
556 } 562 }
@@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
593 */ 599 */
594 if (!usedev) 600 if (!usedev)
595 return -EINVAL; 601 return -EINVAL;
602 dasd_get_device(usedev);
596 lcu->ruac_data.device = usedev; 603 lcu->ruac_data.device = usedev;
597 schedule_delayed_work(&lcu->ruac_data.dwork, 0); 604 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
605 dasd_put_device(usedev);
598 return 0; 606 return 0;
599} 607}
600 608
@@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
723 ASCEBC((char *) &cqr->magic, 4); 731 ASCEBC((char *) &cqr->magic, 4);
724 ccw = cqr->cpaddr; 732 ccw = cqr->cpaddr;
725 ccw->cmd_code = DASD_ECKD_CCW_RSCK; 733 ccw->cmd_code = DASD_ECKD_CCW_RSCK;
726 ccw->flags = 0 ; 734 ccw->flags = CCW_FLAG_SLI;
727 ccw->count = 16; 735 ccw->count = 16;
728 ccw->cda = (__u32)(addr_t) cqr->data; 736 ccw->cda = (__u32)(addr_t) cqr->data;
729 ((char *)cqr->data)[0] = reason; 737 ((char *)cqr->data)[0] = reason;
@@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
930 /* 3. read new alias configuration */ 938 /* 3. read new alias configuration */
931 _schedule_lcu_update(lcu, device); 939 _schedule_lcu_update(lcu, device);
932 lcu->suc_data.device = NULL; 940 lcu->suc_data.device = NULL;
941 dasd_put_device(device);
933 spin_unlock_irqrestore(&lcu->lock, flags); 942 spin_unlock_irqrestore(&lcu->lock, flags);
934} 943}
935 944
@@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
989 } 998 }
990 lcu->suc_data.reason = reason; 999 lcu->suc_data.reason = reason;
991 lcu->suc_data.device = device; 1000 lcu->suc_data.device = device;
1001 dasd_get_device(device);
992 spin_unlock(&lcu->lock); 1002 spin_unlock(&lcu->lock);
993 schedule_work(&lcu->suc_data.worker); 1003 if (!schedule_work(&lcu->suc_data.worker))
1004 dasd_put_device(device);
994}; 1005};
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index cb61f300f8b5..277b5c8c825c 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -67,7 +67,7 @@ static const u8 DASD_DIAG_CMS1[] = { 0xc3, 0xd4, 0xe2, 0xf1 };/* EBCDIC CMS1 */
67 * and function code cmd. 67 * and function code cmd.
68 * In case of an exception return 3. Otherwise return result of bitwise OR of 68 * In case of an exception return 3. Otherwise return result of bitwise OR of
69 * resulting condition code and DIAG return code. */ 69 * resulting condition code and DIAG return code. */
70static inline int dia250(void *iob, int cmd) 70static inline int __dia250(void *iob, int cmd)
71{ 71{
72 register unsigned long reg2 asm ("2") = (unsigned long) iob; 72 register unsigned long reg2 asm ("2") = (unsigned long) iob;
73 typedef union { 73 typedef union {
@@ -77,7 +77,6 @@ static inline int dia250(void *iob, int cmd)
77 int rc; 77 int rc;
78 78
79 rc = 3; 79 rc = 3;
80 diag_stat_inc(DIAG_STAT_X250);
81 asm volatile( 80 asm volatile(
82 " diag 2,%2,0x250\n" 81 " diag 2,%2,0x250\n"
83 "0: ipm %0\n" 82 "0: ipm %0\n"
@@ -91,6 +90,12 @@ static inline int dia250(void *iob, int cmd)
91 return rc; 90 return rc;
92} 91}
93 92
93static inline int dia250(void *iob, int cmd)
94{
95 diag_stat_inc(DIAG_STAT_X250);
96 return __dia250(iob, cmd);
97}
98
94/* Initialize block I/O to DIAG device using the specified blocksize and 99/* Initialize block I/O to DIAG device using the specified blocksize and
95 * block offset. On success, return zero and set end_block to contain the 100 * block offset. On success, return zero and set end_block to contain the
96 * number of blocks on the device minus the specified offset. Return non-zero 101 * number of blocks on the device minus the specified offset. Return non-zero
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 361358134315..93880ed6291c 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -562,7 +562,7 @@ static int mode_select_handle_sense(struct scsi_device *sdev,
562 /* 562 /*
563 * Command Lock contention 563 * Command Lock contention
564 */ 564 */
565 err = SCSI_DH_RETRY; 565 err = SCSI_DH_IMM_RETRY;
566 break; 566 break;
567 default: 567 default:
568 break; 568 break;
@@ -612,6 +612,8 @@ retry:
612 err = mode_select_handle_sense(sdev, h->sense); 612 err = mode_select_handle_sense(sdev, h->sense);
613 if (err == SCSI_DH_RETRY && retry_cnt--) 613 if (err == SCSI_DH_RETRY && retry_cnt--)
614 goto retry; 614 goto retry;
615 if (err == SCSI_DH_IMM_RETRY)
616 goto retry;
615 } 617 }
616 if (err == SCSI_DH_OK) { 618 if (err == SCSI_DH_OK) {
617 h->state = RDAC_STATE_ACTIVE; 619 h->state = RDAC_STATE_ACTIVE;
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index b67661836c9f..d1dd1616f983 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -1,6 +1,6 @@
1config SCSI_HISI_SAS 1config SCSI_HISI_SAS
2 tristate "HiSilicon SAS" 2 tristate "HiSilicon SAS"
3 depends on HAS_DMA 3 depends on HAS_DMA && HAS_IOMEM
4 depends on ARM64 || COMPILE_TEST 4 depends on ARM64 || COMPILE_TEST
5 select SCSI_SAS_LIBSAS 5 select SCSI_SAS_LIBSAS
6 select BLK_DEV_INTEGRITY 6 select BLK_DEV_INTEGRITY
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 057fdeb720ac..eea24d7531cf 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1289,13 +1289,10 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
1289 goto out; 1289 goto out;
1290 } 1290 }
1291 1291
1292 if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK) { 1292 if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK &&
1293 if (!(cmplt_hdr_data & CMPLT_HDR_CMD_CMPLT_MSK) || 1293 !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
1294 !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK))
1295 ts->stat = SAS_DATA_OVERRUN;
1296 else
1297 slot_err_v1_hw(hisi_hba, task, slot);
1298 1294
1295 slot_err_v1_hw(hisi_hba, task, slot);
1299 goto out; 1296 goto out;
1300 } 1297 }
1301 1298
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 3b3e0998fa6e..d6a691e27d33 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4002,6 +4002,7 @@ static ssize_t ipr_store_update_fw(struct device *dev,
4002 struct ipr_sglist *sglist; 4002 struct ipr_sglist *sglist;
4003 char fname[100]; 4003 char fname[100];
4004 char *src; 4004 char *src;
4005 char *endline;
4005 int result, dnld_size; 4006 int result, dnld_size;
4006 4007
4007 if (!capable(CAP_SYS_ADMIN)) 4008 if (!capable(CAP_SYS_ADMIN))
@@ -4009,6 +4010,10 @@ static ssize_t ipr_store_update_fw(struct device *dev,
4009 4010
4010 snprintf(fname, sizeof(fname), "%s", buf); 4011 snprintf(fname, sizeof(fname), "%s", buf);
4011 4012
4013 endline = strchr(fname, '\n');
4014 if (endline)
4015 *endline = '\0';
4016
4012 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { 4017 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
4013 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); 4018 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4014 return -EIO; 4019 return -EIO;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 52a87657c7dd..692a7570b5e1 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2204,7 +2204,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2204 /* Clear outstanding commands array. */ 2204 /* Clear outstanding commands array. */
2205 for (que = 0; que < ha->max_req_queues; que++) { 2205 for (que = 0; que < ha->max_req_queues; que++) {
2206 req = ha->req_q_map[que]; 2206 req = ha->req_q_map[que];
2207 if (!req) 2207 if (!req || !test_bit(que, ha->req_qid_map))
2208 continue; 2208 continue;
2209 req->out_ptr = (void *)(req->ring + req->length); 2209 req->out_ptr = (void *)(req->ring + req->length);
2210 *req->out_ptr = 0; 2210 *req->out_ptr = 0;
@@ -2221,7 +2221,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
2221 2221
2222 for (que = 0; que < ha->max_rsp_queues; que++) { 2222 for (que = 0; que < ha->max_rsp_queues; que++) {
2223 rsp = ha->rsp_q_map[que]; 2223 rsp = ha->rsp_q_map[que];
2224 if (!rsp) 2224 if (!rsp || !test_bit(que, ha->rsp_qid_map))
2225 continue; 2225 continue;
2226 rsp->in_ptr = (void *)(rsp->ring + rsp->length); 2226 rsp->in_ptr = (void *)(rsp->ring + rsp->length);
2227 *rsp->in_ptr = 0; 2227 *rsp->in_ptr = 0;
@@ -4981,7 +4981,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4981 4981
4982 for (i = 1; i < ha->max_rsp_queues; i++) { 4982 for (i = 1; i < ha->max_rsp_queues; i++) {
4983 rsp = ha->rsp_q_map[i]; 4983 rsp = ha->rsp_q_map[i];
4984 if (rsp) { 4984 if (rsp && test_bit(i, ha->rsp_qid_map)) {
4985 rsp->options &= ~BIT_0; 4985 rsp->options &= ~BIT_0;
4986 ret = qla25xx_init_rsp_que(base_vha, rsp); 4986 ret = qla25xx_init_rsp_que(base_vha, rsp);
4987 if (ret != QLA_SUCCESS) 4987 if (ret != QLA_SUCCESS)
@@ -4996,8 +4996,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
4996 } 4996 }
4997 for (i = 1; i < ha->max_req_queues; i++) { 4997 for (i = 1; i < ha->max_req_queues; i++) {
4998 req = ha->req_q_map[i]; 4998 req = ha->req_q_map[i];
4999 if (req) { 4999 if (req && test_bit(i, ha->req_qid_map)) {
5000 /* Clear outstanding commands array. */ 5000 /* Clear outstanding commands array. */
5001 req->options &= ~BIT_0; 5001 req->options &= ~BIT_0;
5002 ret = qla25xx_init_req_que(base_vha, req); 5002 ret = qla25xx_init_req_que(base_vha, req);
5003 if (ret != QLA_SUCCESS) 5003 if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d4d65eb0e9b4..4af95479a9db 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3063,9 +3063,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
3063 "MSI-X: Failed to enable support " 3063 "MSI-X: Failed to enable support "
3064 "-- %d/%d\n Retry with %d vectors.\n", 3064 "-- %d/%d\n Retry with %d vectors.\n",
3065 ha->msix_count, ret, ret); 3065 ha->msix_count, ret, ret);
3066 ha->msix_count = ret;
3067 ha->max_rsp_queues = ha->msix_count - 1;
3066 } 3068 }
3067 ha->msix_count = ret;
3068 ha->max_rsp_queues = ha->msix_count - 1;
3069 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * 3069 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
3070 ha->msix_count, GFP_KERNEL); 3070 ha->msix_count, GFP_KERNEL);
3071 if (!ha->msix_entries) { 3071 if (!ha->msix_entries) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c5dd594f6c31..cf7ba52bae66 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
600 /* Delete request queues */ 600 /* Delete request queues */
601 for (cnt = 1; cnt < ha->max_req_queues; cnt++) { 601 for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
602 req = ha->req_q_map[cnt]; 602 req = ha->req_q_map[cnt];
603 if (req) { 603 if (req && test_bit(cnt, ha->req_qid_map)) {
604 ret = qla25xx_delete_req_que(vha, req); 604 ret = qla25xx_delete_req_que(vha, req);
605 if (ret != QLA_SUCCESS) { 605 if (ret != QLA_SUCCESS) {
606 ql_log(ql_log_warn, vha, 0x00ea, 606 ql_log(ql_log_warn, vha, 0x00ea,
@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
614 /* Delete response queues */ 614 /* Delete response queues */
615 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { 615 for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
616 rsp = ha->rsp_q_map[cnt]; 616 rsp = ha->rsp_q_map[cnt];
617 if (rsp) { 617 if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
618 ret = qla25xx_delete_rsp_que(vha, rsp); 618 ret = qla25xx_delete_rsp_que(vha, rsp);
619 if (ret != QLA_SUCCESS) { 619 if (ret != QLA_SUCCESS) {
620 ql_log(ql_log_warn, vha, 0x00eb, 620 ql_log(ql_log_warn, vha, 0x00eb,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f1788db43195..f6c7ce35b542 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -409,6 +409,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
409 int cnt; 409 int cnt;
410 410
411 for (cnt = 0; cnt < ha->max_req_queues; cnt++) { 411 for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
412 if (!test_bit(cnt, ha->req_qid_map))
413 continue;
414
412 req = ha->req_q_map[cnt]; 415 req = ha->req_q_map[cnt];
413 qla2x00_free_req_que(ha, req); 416 qla2x00_free_req_que(ha, req);
414 } 417 }
@@ -416,6 +419,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
416 ha->req_q_map = NULL; 419 ha->req_q_map = NULL;
417 420
418 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { 421 for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
422 if (!test_bit(cnt, ha->rsp_qid_map))
423 continue;
424
419 rsp = ha->rsp_q_map[cnt]; 425 rsp = ha->rsp_q_map[cnt];
420 qla2x00_free_rsp_que(ha, rsp); 426 qla2x00_free_rsp_que(ha, rsp);
421 } 427 }
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8075a4cdb45c..ee967becd257 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -105,7 +105,7 @@ static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
105static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, 105static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
106 int fn, void *iocb, int flags); 106 int fn, void *iocb, int flags);
107static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd 107static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
108 *cmd, struct atio_from_isp *atio, int ha_locked); 108 *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
109static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, 109static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
110 struct qla_tgt_srr_imm *imm, int ha_lock); 110 struct qla_tgt_srr_imm *imm, int ha_lock);
111static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, 111static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
@@ -1756,7 +1756,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1756 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, 1756 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1757 0, 0, 0, 0, 0, 0); 1757 0, 0, 0, 0, 0, 0);
1758 else { 1758 else {
1759 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) 1759 if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
1760 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, 1760 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1761 mcmd->fc_tm_rsp, false); 1761 mcmd->fc_tm_rsp, false);
1762 else 1762 else
@@ -2665,7 +2665,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2665 /* no need to terminate. FW already freed exchange. */ 2665 /* no need to terminate. FW already freed exchange. */
2666 qlt_abort_cmd_on_host_reset(cmd->vha, cmd); 2666 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2667 else 2667 else
2668 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 2668 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
2669 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2669 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2670 return 0; 2670 return 0;
2671 } 2671 }
@@ -3173,7 +3173,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
3173} 3173}
3174 3174
3175static void qlt_send_term_exchange(struct scsi_qla_host *vha, 3175static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3176 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 3176 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
3177 int ul_abort)
3177{ 3178{
3178 unsigned long flags = 0; 3179 unsigned long flags = 0;
3179 int rc; 3180 int rc;
@@ -3193,8 +3194,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
3193 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3194 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
3194 3195
3195done: 3196done:
3196 if (cmd && (!cmd->aborted || 3197 if (cmd && !ul_abort && !cmd->aborted) {
3197 !cmd->cmd_sent_to_fw)) {
3198 if (cmd->sg_mapped) 3198 if (cmd->sg_mapped)
3199 qlt_unmap_sg(vha, cmd); 3199 qlt_unmap_sg(vha, cmd);
3200 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3200 vha->hw->tgt.tgt_ops->free_cmd(cmd);
@@ -3253,21 +3253,38 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
3253 3253
3254} 3254}
3255 3255
3256void qlt_abort_cmd(struct qla_tgt_cmd *cmd) 3256int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3257{ 3257{
3258 struct qla_tgt *tgt = cmd->tgt; 3258 struct qla_tgt *tgt = cmd->tgt;
3259 struct scsi_qla_host *vha = tgt->vha; 3259 struct scsi_qla_host *vha = tgt->vha;
3260 struct se_cmd *se_cmd = &cmd->se_cmd; 3260 struct se_cmd *se_cmd = &cmd->se_cmd;
3261 unsigned long flags;
3261 3262
3262 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, 3263 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3263 "qla_target(%d): terminating exchange for aborted cmd=%p " 3264 "qla_target(%d): terminating exchange for aborted cmd=%p "
3264 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, 3265 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3265 se_cmd->tag); 3266 se_cmd->tag);
3266 3267
3268 spin_lock_irqsave(&cmd->cmd_lock, flags);
3269 if (cmd->aborted) {
3270 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3271 /*
3272 * It's normal to see 2 calls in this path:
3273 * 1) XFER Rdy completion + CMD_T_ABORT
3274 * 2) TCM TMR - drain_state_list
3275 */
3276 ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
3277 "multiple abort. %p transport_state %x, t_state %x,"
3278 " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
3279 cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
3280 return EIO;
3281 }
3267 cmd->aborted = 1; 3282 cmd->aborted = 1;
3268 cmd->cmd_flags |= BIT_6; 3283 cmd->cmd_flags |= BIT_6;
3284 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
3269 3285
3270 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); 3286 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
3287 return 0;
3271} 3288}
3272EXPORT_SYMBOL(qlt_abort_cmd); 3289EXPORT_SYMBOL(qlt_abort_cmd);
3273 3290
@@ -3282,6 +3299,9 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
3282 3299
3283 BUG_ON(cmd->cmd_in_wq); 3300 BUG_ON(cmd->cmd_in_wq);
3284 3301
3302 if (cmd->sg_mapped)
3303 qlt_unmap_sg(cmd->vha, cmd);
3304
3285 if (!cmd->q_full) 3305 if (!cmd->q_full)
3286 qlt_decr_num_pend_cmds(cmd->vha); 3306 qlt_decr_num_pend_cmds(cmd->vha);
3287 3307
@@ -3399,7 +3419,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
3399 term = 1; 3419 term = 1;
3400 3420
3401 if (term) 3421 if (term)
3402 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 3422 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
3403 3423
3404 return term; 3424 return term;
3405} 3425}
@@ -3580,12 +3600,13 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3580 case CTIO_PORT_LOGGED_OUT: 3600 case CTIO_PORT_LOGGED_OUT:
3581 case CTIO_PORT_UNAVAILABLE: 3601 case CTIO_PORT_UNAVAILABLE:
3582 { 3602 {
3583 int logged_out = (status & 0xFFFF); 3603 int logged_out =
3604 (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
3605
3584 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, 3606 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
3585 "qla_target(%d): CTIO with %s status %x " 3607 "qla_target(%d): CTIO with %s status %x "
3586 "received (state %x, se_cmd %p)\n", vha->vp_idx, 3608 "received (state %x, se_cmd %p)\n", vha->vp_idx,
3587 (logged_out == CTIO_PORT_LOGGED_OUT) ? 3609 logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3588 "PORT LOGGED OUT" : "PORT UNAVAILABLE",
3589 status, cmd->state, se_cmd); 3610 status, cmd->state, se_cmd);
3590 3611
3591 if (logged_out && cmd->sess) { 3612 if (logged_out && cmd->sess) {
@@ -3754,6 +3775,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3754 goto out_term; 3775 goto out_term;
3755 } 3776 }
3756 3777
3778 spin_lock_init(&cmd->cmd_lock);
3757 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3779 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3758 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 3780 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3759 cmd->unpacked_lun = scsilun_to_int( 3781 cmd->unpacked_lun = scsilun_to_int(
@@ -3796,7 +3818,7 @@ out_term:
3796 */ 3818 */
3797 cmd->cmd_flags |= BIT_2; 3819 cmd->cmd_flags |= BIT_2;
3798 spin_lock_irqsave(&ha->hardware_lock, flags); 3820 spin_lock_irqsave(&ha->hardware_lock, flags);
3799 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); 3821 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
3800 3822
3801 qlt_decr_num_pend_cmds(vha); 3823 qlt_decr_num_pend_cmds(vha);
3802 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); 3824 percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
@@ -3918,7 +3940,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
3918 3940
3919out_term: 3941out_term:
3920 spin_lock_irqsave(&ha->hardware_lock, flags); 3942 spin_lock_irqsave(&ha->hardware_lock, flags);
3921 qlt_send_term_exchange(vha, NULL, &op->atio, 1); 3943 qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
3922 spin_unlock_irqrestore(&ha->hardware_lock, flags); 3944 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3923 kfree(op); 3945 kfree(op);
3924 3946
@@ -3982,7 +4004,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3982 4004
3983 cmd->cmd_in_wq = 1; 4005 cmd->cmd_in_wq = 1;
3984 cmd->cmd_flags |= BIT_0; 4006 cmd->cmd_flags |= BIT_0;
3985 cmd->se_cmd.cpuid = -1; 4007 cmd->se_cmd.cpuid = ha->msix_count ?
4008 ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
3986 4009
3987 spin_lock(&vha->cmd_list_lock); 4010 spin_lock(&vha->cmd_list_lock);
3988 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); 4011 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
@@ -3990,7 +4013,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3990 4013
3991 INIT_WORK(&cmd->work, qlt_do_work); 4014 INIT_WORK(&cmd->work, qlt_do_work);
3992 if (ha->msix_count) { 4015 if (ha->msix_count) {
3993 cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
3994 if (cmd->atio.u.isp24.fcp_cmnd.rddata) 4016 if (cmd->atio.u.isp24.fcp_cmnd.rddata)
3995 queue_work_on(smp_processor_id(), qla_tgt_wq, 4017 queue_work_on(smp_processor_id(), qla_tgt_wq,
3996 &cmd->work); 4018 &cmd->work);
@@ -4771,7 +4793,7 @@ out_reject:
4771 dump_stack(); 4793 dump_stack();
4772 } else { 4794 } else {
4773 cmd->cmd_flags |= BIT_9; 4795 cmd->cmd_flags |= BIT_9;
4774 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); 4796 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
4775 } 4797 }
4776 spin_unlock_irqrestore(&ha->hardware_lock, flags); 4798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4777} 4799}
@@ -4950,7 +4972,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
4950 sctio, sctio->srr_id); 4972 sctio, sctio->srr_id);
4951 list_del(&sctio->srr_list_entry); 4973 list_del(&sctio->srr_list_entry);
4952 qlt_send_term_exchange(vha, sctio->cmd, 4974 qlt_send_term_exchange(vha, sctio->cmd,
4953 &sctio->cmd->atio, 1); 4975 &sctio->cmd->atio, 1, 0);
4954 kfree(sctio); 4976 kfree(sctio);
4955 } 4977 }
4956 } 4978 }
@@ -5123,7 +5145,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
5123 atio->u.isp24.fcp_hdr.s_id); 5145 atio->u.isp24.fcp_hdr.s_id);
5124 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5146 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
5125 if (!sess) { 5147 if (!sess) {
5126 qlt_send_term_exchange(vha, NULL, atio, 1); 5148 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5127 return 0; 5149 return 0;
5128 } 5150 }
5129 /* Sending marker isn't necessary, since we called from ISR */ 5151 /* Sending marker isn't necessary, since we called from ISR */
@@ -5406,7 +5428,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
5406#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5428#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5407 qlt_send_busy(vha, atio, SAM_STAT_BUSY); 5429 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
5408#else 5430#else
5409 qlt_send_term_exchange(vha, NULL, atio, 1); 5431 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5410#endif 5432#endif
5411 5433
5412 if (!ha_locked) 5434 if (!ha_locked)
@@ -5523,7 +5545,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5523#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ 5545#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
5524 qlt_send_busy(vha, atio, 0); 5546 qlt_send_busy(vha, atio, 0);
5525#else 5547#else
5526 qlt_send_term_exchange(vha, NULL, atio, 1); 5548 qlt_send_term_exchange(vha, NULL, atio, 1, 0);
5527#endif 5549#endif
5528 } else { 5550 } else {
5529 if (tgt->tgt_stop) { 5551 if (tgt->tgt_stop) {
@@ -5532,7 +5554,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
5532 "command to target, sending TERM " 5554 "command to target, sending TERM "
5533 "EXCHANGE for rsp\n"); 5555 "EXCHANGE for rsp\n");
5534 qlt_send_term_exchange(vha, NULL, 5556 qlt_send_term_exchange(vha, NULL,
5535 atio, 1); 5557 atio, 1, 0);
5536 } else { 5558 } else {
5537 ql_dbg(ql_dbg_tgt, vha, 0xe060, 5559 ql_dbg(ql_dbg_tgt, vha, 0xe060,
5538 "qla_target(%d): Unable to send " 5560 "qla_target(%d): Unable to send "
@@ -5960,7 +5982,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5960 return; 5982 return;
5961 5983
5962out_term: 5984out_term:
5963 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0); 5985 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
5964 if (sess) 5986 if (sess)
5965 ha->tgt.tgt_ops->put_sess(sess); 5987 ha->tgt.tgt_ops->put_sess(sess);
5966 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); 5988 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 71b2865ba3c8..22a6a767fe07 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -943,6 +943,36 @@ struct qla_tgt_sess {
943 qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; 943 qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
944}; 944};
945 945
946typedef enum {
947 /*
948 * BIT_0 - Atio Arrival / schedule to work
949 * BIT_1 - qlt_do_work
950 * BIT_2 - qlt_do work failed
951 * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
952 * BIT_4 - read respond/tcm_qla2xx_queue_data_in
953 * BIT_5 - status respond / tcm_qla2xx_queue_status
954 * BIT_6 - tcm request to abort/Term exchange.
955 * pre_xmit_response->qlt_send_term_exchange
956 * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
957 * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
958 * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
959 * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
960
961 * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
962 * BIT_13 - Bad completion -
963 * qlt_ctio_do_completion --> qlt_term_ctio_exchange
964 * BIT_14 - Back end data received/sent.
965 * BIT_15 - SRR prepare ctio
966 * BIT_16 - complete free
967 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
968 * BIT_18 - completion w/abort status
969 * BIT_19 - completion w/unknown status
970 * BIT_20 - tcm_qla2xxx_free_cmd
971 */
972 CMD_FLAG_DATA_WORK = BIT_11,
973 CMD_FLAG_DATA_WORK_FREE = BIT_21,
974} cmd_flags_t;
975
946struct qla_tgt_cmd { 976struct qla_tgt_cmd {
947 struct se_cmd se_cmd; 977 struct se_cmd se_cmd;
948 struct qla_tgt_sess *sess; 978 struct qla_tgt_sess *sess;
@@ -952,6 +982,7 @@ struct qla_tgt_cmd {
952 /* Sense buffer that will be mapped into outgoing status */ 982 /* Sense buffer that will be mapped into outgoing status */
953 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; 983 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
954 984
985 spinlock_t cmd_lock;
955 /* to save extra sess dereferences */ 986 /* to save extra sess dereferences */
956 unsigned int conf_compl_supported:1; 987 unsigned int conf_compl_supported:1;
957 unsigned int sg_mapped:1; 988 unsigned int sg_mapped:1;
@@ -986,30 +1017,8 @@ struct qla_tgt_cmd {
986 1017
987 uint64_t jiffies_at_alloc; 1018 uint64_t jiffies_at_alloc;
988 uint64_t jiffies_at_free; 1019 uint64_t jiffies_at_free;
989 /* BIT_0 - Atio Arrival / schedule to work 1020
990 * BIT_1 - qlt_do_work 1021 cmd_flags_t cmd_flags;
991 * BIT_2 - qlt_do work failed
992 * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
993 * BIT_4 - read respond/tcm_qla2xx_queue_data_in
994 * BIT_5 - status respond / tcm_qla2xx_queue_status
995 * BIT_6 - tcm request to abort/Term exchange.
996 * pre_xmit_response->qlt_send_term_exchange
997 * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
998 * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
999 * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
1000 * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
1001 * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
1002 * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
1003 * BIT_13 - Bad completion -
1004 * qlt_ctio_do_completion --> qlt_term_ctio_exchange
1005 * BIT_14 - Back end data received/sent.
1006 * BIT_15 - SRR prepare ctio
1007 * BIT_16 - complete free
1008 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
1009 * BIT_18 - completion w/abort status
1010 * BIT_19 - completion w/unknown status
1011 */
1012 uint32_t cmd_flags;
1013}; 1022};
1014 1023
1015struct qla_tgt_sess_work_param { 1024struct qla_tgt_sess_work_param {
@@ -1148,7 +1157,7 @@ static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
1148extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1157extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1149extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1158extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1150extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1159extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1151extern void qlt_abort_cmd(struct qla_tgt_cmd *); 1160extern int qlt_abort_cmd(struct qla_tgt_cmd *);
1152extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1161extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1153extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1162extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1154extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1163extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index ddbe2e7ac14d..c3e622524604 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
395 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { 395 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
396 for (i = 0; i < vha->hw->max_req_queues; i++) { 396 for (i = 0; i < vha->hw->max_req_queues; i++) {
397 struct req_que *req = vha->hw->req_q_map[i]; 397 struct req_que *req = vha->hw->req_q_map[i];
398
399 if (!test_bit(i, vha->hw->req_qid_map))
400 continue;
401
398 if (req || !buf) { 402 if (req || !buf) {
399 length = req ? 403 length = req ?
400 req->length : REQUEST_ENTRY_CNT_24XX; 404 req->length : REQUEST_ENTRY_CNT_24XX;
@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
408 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { 412 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
409 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 413 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
410 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 414 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
415
416 if (!test_bit(i, vha->hw->rsp_qid_map))
417 continue;
418
411 if (rsp || !buf) { 419 if (rsp || !buf) {
412 length = rsp ? 420 length = rsp ?
413 rsp->length : RESPONSE_ENTRY_CNT_MQ; 421 rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
634 if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { 642 if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
635 for (i = 0; i < vha->hw->max_req_queues; i++) { 643 for (i = 0; i < vha->hw->max_req_queues; i++) {
636 struct req_que *req = vha->hw->req_q_map[i]; 644 struct req_que *req = vha->hw->req_q_map[i];
645
646 if (!test_bit(i, vha->hw->req_qid_map))
647 continue;
648
637 if (req || !buf) { 649 if (req || !buf) {
638 qla27xx_insert16(i, buf, len); 650 qla27xx_insert16(i, buf, len);
639 qla27xx_insert16(1, buf, len); 651 qla27xx_insert16(1, buf, len);
@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
645 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { 657 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
646 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 658 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
647 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 659 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
660
661 if (!test_bit(i, vha->hw->rsp_qid_map))
662 continue;
663
648 if (rsp || !buf) { 664 if (rsp || !buf) {
649 qla27xx_insert16(i, buf, len); 665 qla27xx_insert16(i, buf, len);
650 qla27xx_insert16(1, buf, len); 666 qla27xx_insert16(1, buf, len);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index faf0a126627f..1808a01cfb7e 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -298,6 +298,10 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
298{ 298{
299 cmd->vha->tgt_counters.core_qla_free_cmd++; 299 cmd->vha->tgt_counters.core_qla_free_cmd++;
300 cmd->cmd_in_wq = 1; 300 cmd->cmd_in_wq = 1;
301
302 BUG_ON(cmd->cmd_flags & BIT_20);
303 cmd->cmd_flags |= BIT_20;
304
301 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); 305 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
302 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); 306 queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
303} 307}
@@ -374,6 +378,20 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
374{ 378{
375 struct qla_tgt_cmd *cmd = container_of(se_cmd, 379 struct qla_tgt_cmd *cmd = container_of(se_cmd,
376 struct qla_tgt_cmd, se_cmd); 380 struct qla_tgt_cmd, se_cmd);
381
382 if (cmd->aborted) {
383 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
384 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
385 * already kick start the free.
386 */
387 pr_debug("write_pending aborted cmd[%p] refcount %d "
388 "transport_state %x, t_state %x, se_cmd_flags %x\n",
389 cmd,cmd->se_cmd.cmd_kref.refcount.counter,
390 cmd->se_cmd.transport_state,
391 cmd->se_cmd.t_state,
392 cmd->se_cmd.se_cmd_flags);
393 return 0;
394 }
377 cmd->cmd_flags |= BIT_3; 395 cmd->cmd_flags |= BIT_3;
378 cmd->bufflen = se_cmd->data_length; 396 cmd->bufflen = se_cmd->data_length;
379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 397 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -405,7 +423,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 423 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 424 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 425 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
408 3 * HZ); 426 50);
409 return 0; 427 return 0;
410 } 428 }
411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 429 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -444,6 +462,9 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
444 if (bidi) 462 if (bidi)
445 flags |= TARGET_SCF_BIDI_OP; 463 flags |= TARGET_SCF_BIDI_OP;
446 464
465 if (se_cmd->cpuid != WORK_CPU_UNBOUND)
466 flags |= TARGET_SCF_USE_CPUID;
467
447 sess = cmd->sess; 468 sess = cmd->sess;
448 if (!sess) { 469 if (!sess) {
449 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); 470 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
@@ -465,13 +486,25 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
465static void tcm_qla2xxx_handle_data_work(struct work_struct *work) 486static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
466{ 487{
467 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 488 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
489 unsigned long flags;
468 490
469 /* 491 /*
470 * Ensure that the complete FCP WRITE payload has been received. 492 * Ensure that the complete FCP WRITE payload has been received.
471 * Otherwise return an exception via CHECK_CONDITION status. 493 * Otherwise return an exception via CHECK_CONDITION status.
472 */ 494 */
473 cmd->cmd_in_wq = 0; 495 cmd->cmd_in_wq = 0;
474 cmd->cmd_flags |= BIT_11; 496
497 spin_lock_irqsave(&cmd->cmd_lock, flags);
498 cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
499 if (cmd->aborted) {
500 cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
501 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
502
503 tcm_qla2xxx_free_cmd(cmd);
504 return;
505 }
506 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
507
475 cmd->vha->tgt_counters.qla_core_ret_ctio++; 508 cmd->vha->tgt_counters.qla_core_ret_ctio++;
476 if (!cmd->write_data_transferred) { 509 if (!cmd->write_data_transferred) {
477 /* 510 /*
@@ -546,6 +579,20 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
546 struct qla_tgt_cmd *cmd = container_of(se_cmd, 579 struct qla_tgt_cmd *cmd = container_of(se_cmd,
547 struct qla_tgt_cmd, se_cmd); 580 struct qla_tgt_cmd, se_cmd);
548 581
582 if (cmd->aborted) {
583 /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
584 * can get ahead of this cmd. tcm_qla2xxx_aborted_task
585 * already kick start the free.
586 */
587 pr_debug("queue_data_in aborted cmd[%p] refcount %d "
588 "transport_state %x, t_state %x, se_cmd_flags %x\n",
589 cmd,cmd->se_cmd.cmd_kref.refcount.counter,
590 cmd->se_cmd.transport_state,
591 cmd->se_cmd.t_state,
592 cmd->se_cmd.se_cmd_flags);
593 return 0;
594 }
595
549 cmd->cmd_flags |= BIT_4; 596 cmd->cmd_flags |= BIT_4;
550 cmd->bufflen = se_cmd->data_length; 597 cmd->bufflen = se_cmd->data_length;
551 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 598 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -637,11 +684,34 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
637 qlt_xmit_tm_rsp(mcmd); 684 qlt_xmit_tm_rsp(mcmd);
638} 685}
639 686
687
688#define DATA_WORK_NOT_FREE(_flags) \
689 (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
690 CMD_FLAG_DATA_WORK)
640static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) 691static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
641{ 692{
642 struct qla_tgt_cmd *cmd = container_of(se_cmd, 693 struct qla_tgt_cmd *cmd = container_of(se_cmd,
643 struct qla_tgt_cmd, se_cmd); 694 struct qla_tgt_cmd, se_cmd);
644 qlt_abort_cmd(cmd); 695 unsigned long flags;
696
697 if (qlt_abort_cmd(cmd))
698 return;
699
700 spin_lock_irqsave(&cmd->cmd_lock, flags);
701 if ((cmd->state == QLA_TGT_STATE_NEW)||
702 ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
703 DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
704
705 cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
706 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
707 /* Cmd have not reached firmware.
708 * Use this trigger to free it. */
709 tcm_qla2xxx_free_cmd(cmd);
710 return;
711 }
712 spin_unlock_irqrestore(&cmd->cmd_lock, flags);
713 return;
714
645} 715}
646 716
647static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 717static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 47b9d13f97b8..bbfbfd9e5aa3 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -205,6 +205,8 @@ static struct {
205 {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC}, 205 {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, 207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
208 {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
209 {"Marvell", "91xx Config", "1.01", BLIST_SKIP_VPD_PAGES},
208 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 210 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
209 {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 211 {"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
210 {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 212 {"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index fa6b2c4eb7a2..8c6e31874171 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1344,6 +1344,7 @@ scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1344 1344
1345 switch (ret) { 1345 switch (ret) {
1346 case BLKPREP_KILL: 1346 case BLKPREP_KILL:
1347 case BLKPREP_INVALID:
1347 req->errors = DID_NO_CONNECT << 16; 1348 req->errors = DID_NO_CONNECT << 16;
1348 /* release the command and kill it */ 1349 /* release the command and kill it */
1349 if (req->special) { 1350 if (req->special) {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4f18a851e2c7..00bc7218a7f8 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1272,16 +1272,18 @@ static void __scsi_remove_target(struct scsi_target *starget)
1272void scsi_remove_target(struct device *dev) 1272void scsi_remove_target(struct device *dev)
1273{ 1273{
1274 struct Scsi_Host *shost = dev_to_shost(dev->parent); 1274 struct Scsi_Host *shost = dev_to_shost(dev->parent);
1275 struct scsi_target *starget; 1275 struct scsi_target *starget, *last_target = NULL;
1276 unsigned long flags; 1276 unsigned long flags;
1277 1277
1278restart: 1278restart:
1279 spin_lock_irqsave(shost->host_lock, flags); 1279 spin_lock_irqsave(shost->host_lock, flags);
1280 list_for_each_entry(starget, &shost->__targets, siblings) { 1280 list_for_each_entry(starget, &shost->__targets, siblings) {
1281 if (starget->state == STARGET_DEL) 1281 if (starget->state == STARGET_DEL ||
1282 starget == last_target)
1282 continue; 1283 continue;
1283 if (starget->dev.parent == dev || &starget->dev == dev) { 1284 if (starget->dev.parent == dev || &starget->dev == dev) {
1284 kref_get(&starget->reap_ref); 1285 kref_get(&starget->reap_ref);
1286 last_target = starget;
1285 spin_unlock_irqrestore(shost->host_lock, flags); 1287 spin_unlock_irqrestore(shost->host_lock, flags);
1286 __scsi_remove_target(starget); 1288 __scsi_remove_target(starget);
1287 scsi_target_reap(starget); 1289 scsi_target_reap(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bb669d32ccd0..d749da765df1 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -761,7 +761,7 @@ static int sd_setup_discard_cmnd(struct scsi_cmnd *cmd)
761 break; 761 break;
762 762
763 default: 763 default:
764 ret = BLKPREP_KILL; 764 ret = BLKPREP_INVALID;
765 goto out; 765 goto out;
766 } 766 }
767 767
@@ -839,7 +839,7 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
839 int ret; 839 int ret;
840 840
841 if (sdkp->device->no_write_same) 841 if (sdkp->device->no_write_same)
842 return BLKPREP_KILL; 842 return BLKPREP_INVALID;
843 843
844 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size); 844 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
845 845
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 55627d097873..292c04eec9ad 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -42,6 +42,7 @@
42#include <scsi/scsi_devinfo.h> 42#include <scsi/scsi_devinfo.h>
43#include <scsi/scsi_dbg.h> 43#include <scsi/scsi_dbg.h>
44#include <scsi/scsi_transport_fc.h> 44#include <scsi/scsi_transport_fc.h>
45#include <scsi/scsi_transport.h>
45 46
46/* 47/*
47 * All wire protocol details (storage protocol between the guest and the host) 48 * All wire protocol details (storage protocol between the guest and the host)
@@ -477,19 +478,18 @@ struct hv_host_device {
477struct storvsc_scan_work { 478struct storvsc_scan_work {
478 struct work_struct work; 479 struct work_struct work;
479 struct Scsi_Host *host; 480 struct Scsi_Host *host;
480 uint lun; 481 u8 lun;
482 u8 tgt_id;
481}; 483};
482 484
483static void storvsc_device_scan(struct work_struct *work) 485static void storvsc_device_scan(struct work_struct *work)
484{ 486{
485 struct storvsc_scan_work *wrk; 487 struct storvsc_scan_work *wrk;
486 uint lun;
487 struct scsi_device *sdev; 488 struct scsi_device *sdev;
488 489
489 wrk = container_of(work, struct storvsc_scan_work, work); 490 wrk = container_of(work, struct storvsc_scan_work, work);
490 lun = wrk->lun;
491 491
492 sdev = scsi_device_lookup(wrk->host, 0, 0, lun); 492 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
493 if (!sdev) 493 if (!sdev)
494 goto done; 494 goto done;
495 scsi_rescan_device(&sdev->sdev_gendev); 495 scsi_rescan_device(&sdev->sdev_gendev);
@@ -540,7 +540,7 @@ static void storvsc_remove_lun(struct work_struct *work)
540 if (!scsi_host_get(wrk->host)) 540 if (!scsi_host_get(wrk->host))
541 goto done; 541 goto done;
542 542
543 sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun); 543 sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
544 544
545 if (sdev) { 545 if (sdev) {
546 scsi_remove_device(sdev); 546 scsi_remove_device(sdev);
@@ -940,6 +940,7 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
940 940
941 wrk->host = host; 941 wrk->host = host;
942 wrk->lun = vm_srb->lun; 942 wrk->lun = vm_srb->lun;
943 wrk->tgt_id = vm_srb->target_id;
943 INIT_WORK(&wrk->work, process_err_fn); 944 INIT_WORK(&wrk->work, process_err_fn);
944 schedule_work(&wrk->work); 945 schedule_work(&wrk->work);
945} 946}
@@ -1770,6 +1771,11 @@ static int __init storvsc_drv_init(void)
1770 fc_transport_template = fc_attach_transport(&fc_transport_functions); 1771 fc_transport_template = fc_attach_transport(&fc_transport_functions);
1771 if (!fc_transport_template) 1772 if (!fc_transport_template)
1772 return -ENODEV; 1773 return -ENODEV;
1774
1775 /*
1776 * Install Hyper-V specific timeout handler.
1777 */
1778 fc_transport_template->eh_timed_out = storvsc_eh_timed_out;
1773#endif 1779#endif
1774 1780
1775 ret = vmbus_driver_register(&storvsc_drv); 1781 ret = vmbus_driver_register(&storvsc_drv);
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 91a003011acf..a9bac3bf20de 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
34 34
35static int __init sh_pm_runtime_init(void) 35static int __init sh_pm_runtime_init(void)
36{ 36{
37 if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) { 37 if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
38 if (!of_find_compatible_node(NULL, NULL, 38 if (!of_find_compatible_node(NULL, NULL,
39 "renesas,cpg-mstp-clocks")) 39 "renesas,cpg-mstp-clocks"))
40 return 0; 40 return 0;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index aebad36391c9..8feac599e9ab 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
1571 1571
1572 as->use_cs_gpios = true; 1572 as->use_cs_gpios = true;
1573 if (atmel_spi_is_v2(as) && 1573 if (atmel_spi_is_v2(as) &&
1574 pdev->dev.of_node &&
1574 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) { 1575 !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
1575 as->use_cs_gpios = false; 1576 as->use_cs_gpios = false;
1576 master->num_chipselect = 4; 1577 master->num_chipselect = 4;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7de6f8472a81..ecc73c0a97cf 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -73,8 +73,8 @@
73 73
74/* Bitfields in CNTL1 */ 74/* Bitfields in CNTL1 */
75#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700 75#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
76#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000080 76#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000080
77#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000040 77#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000040
78#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002 78#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
79#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001 79#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
80 80
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7fd6a4c009d2..7cb0c1921495 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -84,7 +84,7 @@ struct fsl_espi_transfer {
84/* SPCOM register values */ 84/* SPCOM register values */
85#define SPCOM_CS(x) ((x) << 30) 85#define SPCOM_CS(x) ((x) << 30)
86#define SPCOM_TRANLEN(x) ((x) << 0) 86#define SPCOM_TRANLEN(x) ((x) << 0)
87#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ 87#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
88 88
89#define AUTOSUSPEND_TIMEOUT 2000 89#define AUTOSUSPEND_TIMEOUT 2000
90 90
@@ -233,7 +233,7 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
233 reinit_completion(&mpc8xxx_spi->done); 233 reinit_completion(&mpc8xxx_spi->done);
234 234
235 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ 235 /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
236 if ((t->len - 1) > SPCOM_TRANLEN_MAX) { 236 if (t->len > SPCOM_TRANLEN_MAX) {
237 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" 237 dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
238 " beyond the SPCOM[TRANLEN] field\n", t->len); 238 " beyond the SPCOM[TRANLEN] field\n", t->len);
239 return -EINVAL; 239 return -EINVAL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d98c33cb64f9..c688efa95e29 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -204,8 +204,8 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
204{ 204{
205 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 205 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
206 206
207 if (spi_imx->dma_is_inited && 207 if (spi_imx->dma_is_inited && transfer->len >= spi_imx->wml &&
208 transfer->len > spi_imx->wml * sizeof(u32)) 208 (transfer->len % spi_imx->wml) == 0)
209 return true; 209 return true;
210 return false; 210 return false;
211} 211}
@@ -919,8 +919,6 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
919 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL; 919 struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
920 int ret; 920 int ret;
921 unsigned long timeout; 921 unsigned long timeout;
922 u32 dma;
923 int left;
924 struct spi_master *master = spi_imx->bitbang.master; 922 struct spi_master *master = spi_imx->bitbang.master;
925 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg; 923 struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
926 924
@@ -929,7 +927,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
929 tx->sgl, tx->nents, DMA_MEM_TO_DEV, 927 tx->sgl, tx->nents, DMA_MEM_TO_DEV,
930 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 928 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
931 if (!desc_tx) 929 if (!desc_tx)
932 goto no_dma; 930 goto tx_nodma;
933 931
934 desc_tx->callback = spi_imx_dma_tx_callback; 932 desc_tx->callback = spi_imx_dma_tx_callback;
935 desc_tx->callback_param = (void *)spi_imx; 933 desc_tx->callback_param = (void *)spi_imx;
@@ -941,7 +939,7 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
941 rx->sgl, rx->nents, DMA_DEV_TO_MEM, 939 rx->sgl, rx->nents, DMA_DEV_TO_MEM,
942 DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 940 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
943 if (!desc_rx) 941 if (!desc_rx)
944 goto no_dma; 942 goto rx_nodma;
945 943
946 desc_rx->callback = spi_imx_dma_rx_callback; 944 desc_rx->callback = spi_imx_dma_rx_callback;
947 desc_rx->callback_param = (void *)spi_imx; 945 desc_rx->callback_param = (void *)spi_imx;
@@ -954,13 +952,6 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
954 /* Trigger the cspi module. */ 952 /* Trigger the cspi module. */
955 spi_imx->dma_finished = 0; 953 spi_imx->dma_finished = 0;
956 954
957 dma = readl(spi_imx->base + MX51_ECSPI_DMA);
958 dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK);
959 /* Change RX_DMA_LENGTH trigger dma fetch tail data */
960 left = transfer->len % spi_imx->wml;
961 if (left)
962 writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET),
963 spi_imx->base + MX51_ECSPI_DMA);
964 /* 955 /*
965 * Set these order to avoid potential RX overflow. The overflow may 956 * Set these order to avoid potential RX overflow. The overflow may
966 * happen if we enable SPI HW before starting RX DMA due to rescheduling 957 * happen if we enable SPI HW before starting RX DMA due to rescheduling
@@ -992,10 +983,6 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
992 spi_imx->devtype_data->reset(spi_imx); 983 spi_imx->devtype_data->reset(spi_imx);
993 dmaengine_terminate_all(master->dma_rx); 984 dmaengine_terminate_all(master->dma_rx);
994 } 985 }
995 dma &= ~MX51_ECSPI_DMA_RXT_WML_MASK;
996 writel(dma |
997 spi_imx->wml << MX51_ECSPI_DMA_RXT_WML_OFFSET,
998 spi_imx->base + MX51_ECSPI_DMA);
999 } 986 }
1000 987
1001 spi_imx->dma_finished = 1; 988 spi_imx->dma_finished = 1;
@@ -1008,7 +995,9 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
1008 995
1009 return ret; 996 return ret;
1010 997
1011no_dma: 998rx_nodma:
999 dmaengine_terminate_all(master->dma_tx);
1000tx_nodma:
1012 pr_warn_once("%s %s: DMA not available, falling back to PIO\n", 1001 pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
1013 dev_driver_string(&master->dev), 1002 dev_driver_string(&master->dev),
1014 dev_name(&master->dev)); 1003 dev_name(&master->dev));
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 894616f687b0..cf4bb36bee25 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -761,6 +761,7 @@ static int spi_test_run_iter(struct spi_device *spi,
761 test.iterate_transfer_mask = 1; 761 test.iterate_transfer_mask = 1;
762 762
763 /* count number of transfers with tx/rx_buf != NULL */ 763 /* count number of transfers with tx/rx_buf != NULL */
764 rx_count = tx_count = 0;
764 for (i = 0; i < test.transfer_count; i++) { 765 for (i = 0; i < test.transfer_count; i++) {
765 if (test.transfers[i].tx_buf) 766 if (test.transfers[i].tx_buf)
766 tx_count++; 767 tx_count++;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7273820275e9..0caa3c8bef46 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1490,6 +1490,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
1490 return status; 1490 return status;
1491 1491
1492disable_pm: 1492disable_pm:
1493 pm_runtime_dont_use_autosuspend(&pdev->dev);
1494 pm_runtime_put_sync(&pdev->dev);
1493 pm_runtime_disable(&pdev->dev); 1495 pm_runtime_disable(&pdev->dev);
1494free_master: 1496free_master:
1495 spi_master_put(master); 1497 spi_master_put(master);
@@ -1501,6 +1503,7 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
1501 struct spi_master *master = platform_get_drvdata(pdev); 1503 struct spi_master *master = platform_get_drvdata(pdev);
1502 struct omap2_mcspi *mcspi = spi_master_get_devdata(master); 1504 struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
1503 1505
1506 pm_runtime_dont_use_autosuspend(mcspi->dev);
1504 pm_runtime_put_sync(mcspi->dev); 1507 pm_runtime_put_sync(mcspi->dev);
1505 pm_runtime_disable(&pdev->dev); 1508 pm_runtime_disable(&pdev->dev);
1506 1509
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index 79a8bc4f6cec..7cb1b2d710c1 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -749,6 +749,7 @@ static int rockchip_spi_probe(struct platform_device *pdev)
749 return 0; 749 return 0;
750 750
751err_register_master: 751err_register_master:
752 pm_runtime_disable(&pdev->dev);
752 if (rs->dma_tx.ch) 753 if (rs->dma_tx.ch)
753 dma_release_channel(rs->dma_tx.ch); 754 dma_release_channel(rs->dma_tx.ch);
754 if (rs->dma_rx.ch) 755 if (rs->dma_rx.ch)
@@ -778,6 +779,8 @@ static int rockchip_spi_remove(struct platform_device *pdev)
778 if (rs->dma_rx.ch) 779 if (rs->dma_rx.ch)
779 dma_release_channel(rs->dma_rx.ch); 780 dma_release_channel(rs->dma_rx.ch);
780 781
782 spi_master_put(master);
783
781 return 0; 784 return 0;
782} 785}
783 786
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 0c675861623f..d8e4219c2324 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -83,6 +83,7 @@ config SSB_SDIOHOST
83config SSB_HOST_SOC 83config SSB_HOST_SOC
84 bool "Support for SSB bus on SoC" 84 bool "Support for SSB bus on SoC"
85 depends on SSB && BCM47XX_NVRAM 85 depends on SSB && BCM47XX_NVRAM
86 select SSB_SPROM
86 help 87 help
87 Host interface for a SSB directly mapped into memory. This is 88 Host interface for a SSB directly mapped into memory. This is
88 for some Broadcom SoCs from the BCM47xx and BCM53xx lines. 89 for some Broadcom SoCs from the BCM47xx and BCM53xx lines.
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 3327c49674d3..713c63d9681b 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -898,7 +898,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
898 da->unmap_zeroes_data = flag; 898 da->unmap_zeroes_data = flag;
899 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", 899 pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
900 da->da_dev, flag); 900 da->da_dev, flag);
901 return 0; 901 return count;
902} 902}
903 903
904/* 904/*
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index cacd97a8cbd0..da457e25717a 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -828,6 +828,50 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
828 return dev; 828 return dev;
829} 829}
830 830
831/*
832 * Check if the underlying struct block_device request_queue supports
833 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
834 * in ATA and we need to set TPE=1
835 */
836bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
837 struct request_queue *q, int block_size)
838{
839 if (!blk_queue_discard(q))
840 return false;
841
842 attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
843 block_size;
844 /*
845 * Currently hardcoded to 1 in Linux/SCSI code..
846 */
847 attrib->max_unmap_block_desc_count = 1;
848 attrib->unmap_granularity = q->limits.discard_granularity / block_size;
849 attrib->unmap_granularity_alignment = q->limits.discard_alignment /
850 block_size;
851 attrib->unmap_zeroes_data = q->limits.discard_zeroes_data;
852 return true;
853}
854EXPORT_SYMBOL(target_configure_unmap_from_queue);
855
856/*
857 * Convert from blocksize advertised to the initiator to the 512 byte
858 * units unconditionally used by the Linux block layer.
859 */
860sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
861{
862 switch (dev->dev_attrib.block_size) {
863 case 4096:
864 return lb << 3;
865 case 2048:
866 return lb << 2;
867 case 1024:
868 return lb << 1;
869 default:
870 return lb;
871 }
872}
873EXPORT_SYMBOL(target_to_linux_sector);
874
831int target_configure_device(struct se_device *dev) 875int target_configure_device(struct se_device *dev)
832{ 876{
833 struct se_hba *hba = dev->se_hba; 877 struct se_hba *hba = dev->se_hba;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e3195700211a..75f0f08b2a34 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev)
160 " block_device blocks: %llu logical_block_size: %d\n", 160 " block_device blocks: %llu logical_block_size: %d\n",
161 dev_size, div_u64(dev_size, fd_dev->fd_block_size), 161 dev_size, div_u64(dev_size, fd_dev->fd_block_size),
162 fd_dev->fd_block_size); 162 fd_dev->fd_block_size);
163 /* 163
164 * Check if the underlying struct block_device request_queue supports 164 if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
165 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM 165 fd_dev->fd_block_size))
166 * in ATA and we need to set TPE=1
167 */
168 if (blk_queue_discard(q)) {
169 dev->dev_attrib.max_unmap_lba_count =
170 q->limits.max_discard_sectors;
171 /*
172 * Currently hardcoded to 1 in Linux/SCSI code..
173 */
174 dev->dev_attrib.max_unmap_block_desc_count = 1;
175 dev->dev_attrib.unmap_granularity =
176 q->limits.discard_granularity >> 9;
177 dev->dev_attrib.unmap_granularity_alignment =
178 q->limits.discard_alignment;
179 pr_debug("IFILE: BLOCK Discard support available," 166 pr_debug("IFILE: BLOCK Discard support available,"
180 " disabled by default\n"); 167 " disabled by default\n");
181 }
182 /* 168 /*
183 * Enable write same emulation for IBLOCK and use 0xFFFF as 169 * Enable write same emulation for IBLOCK and use 0xFFFF as
184 * the smaller WRITE_SAME(10) only has a two-byte block count. 170 * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
490 if (S_ISBLK(inode->i_mode)) { 476 if (S_ISBLK(inode->i_mode)) {
491 /* The backend is block device, use discard */ 477 /* The backend is block device, use discard */
492 struct block_device *bdev = inode->i_bdev; 478 struct block_device *bdev = inode->i_bdev;
479 struct se_device *dev = cmd->se_dev;
493 480
494 ret = blkdev_issue_discard(bdev, lba, 481 ret = blkdev_issue_discard(bdev,
495 nolb, GFP_KERNEL, 0); 482 target_to_linux_sector(dev, lba),
483 target_to_linux_sector(dev, nolb),
484 GFP_KERNEL, 0);
496 if (ret < 0) { 485 if (ret < 0) {
497 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", 486 pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
498 ret); 487 ret);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a2899f9f50b..abe4eb997a84 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,29 +121,11 @@ static int iblock_configure_device(struct se_device *dev)
121 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); 121 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
122 dev->dev_attrib.hw_queue_depth = q->nr_requests; 122 dev->dev_attrib.hw_queue_depth = q->nr_requests;
123 123
124 /* 124 if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
125 * Check if the underlying struct block_device request_queue supports 125 dev->dev_attrib.hw_block_size))
126 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
127 * in ATA and we need to set TPE=1
128 */
129 if (blk_queue_discard(q)) {
130 dev->dev_attrib.max_unmap_lba_count =
131 q->limits.max_discard_sectors;
132
133 /*
134 * Currently hardcoded to 1 in Linux/SCSI code..
135 */
136 dev->dev_attrib.max_unmap_block_desc_count = 1;
137 dev->dev_attrib.unmap_granularity =
138 q->limits.discard_granularity >> 9;
139 dev->dev_attrib.unmap_granularity_alignment =
140 q->limits.discard_alignment;
141 dev->dev_attrib.unmap_zeroes_data =
142 q->limits.discard_zeroes_data;
143
144 pr_debug("IBLOCK: BLOCK Discard support available," 126 pr_debug("IBLOCK: BLOCK Discard support available,"
145 " disabled by default\n"); 127 " disabled by default\n");
146 } 128
147 /* 129 /*
148 * Enable write same emulation for IBLOCK and use 0xFFFF as 130 * Enable write same emulation for IBLOCK and use 0xFFFF as
149 * the smaller WRITE_SAME(10) only has a two-byte block count. 131 * the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -415,9 +397,13 @@ static sense_reason_t
415iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) 397iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
416{ 398{
417 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; 399 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
400 struct se_device *dev = cmd->se_dev;
418 int ret; 401 int ret;
419 402
420 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); 403 ret = blkdev_issue_discard(bdev,
404 target_to_linux_sector(dev, lba),
405 target_to_linux_sector(dev, nolb),
406 GFP_KERNEL, 0);
421 if (ret < 0) { 407 if (ret < 0) {
422 pr_err("blkdev_issue_discard() failed: %d\n", ret); 408 pr_err("blkdev_issue_discard() failed: %d\n", ret);
423 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 409 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -433,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
433 struct scatterlist *sg; 419 struct scatterlist *sg;
434 struct bio *bio; 420 struct bio *bio;
435 struct bio_list list; 421 struct bio_list list;
436 sector_t block_lba = cmd->t_task_lba; 422 struct se_device *dev = cmd->se_dev;
437 sector_t sectors = sbc_get_write_same_sectors(cmd); 423 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
424 sector_t sectors = target_to_linux_sector(dev,
425 sbc_get_write_same_sectors(cmd));
438 426
439 if (cmd->prot_op) { 427 if (cmd->prot_op) {
440 pr_err("WRITE_SAME: Protection information with IBLOCK" 428 pr_err("WRITE_SAME: Protection information with IBLOCK"
@@ -648,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
648 enum dma_data_direction data_direction) 636 enum dma_data_direction data_direction)
649{ 637{
650 struct se_device *dev = cmd->se_dev; 638 struct se_device *dev = cmd->se_dev;
639 sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
651 struct iblock_req *ibr; 640 struct iblock_req *ibr;
652 struct bio *bio, *bio_start; 641 struct bio *bio, *bio_start;
653 struct bio_list list; 642 struct bio_list list;
654 struct scatterlist *sg; 643 struct scatterlist *sg;
655 u32 sg_num = sgl_nents; 644 u32 sg_num = sgl_nents;
656 sector_t block_lba;
657 unsigned bio_cnt; 645 unsigned bio_cnt;
658 int rw = 0; 646 int rw = 0;
659 int i; 647 int i;
@@ -679,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
679 rw = READ; 667 rw = READ;
680 } 668 }
681 669
682 /*
683 * Convert the blocksize advertised to the initiator to the 512 byte
684 * units unconditionally used by the Linux block layer.
685 */
686 if (dev->dev_attrib.block_size == 4096)
687 block_lba = (cmd->t_task_lba << 3);
688 else if (dev->dev_attrib.block_size == 2048)
689 block_lba = (cmd->t_task_lba << 2);
690 else if (dev->dev_attrib.block_size == 1024)
691 block_lba = (cmd->t_task_lba << 1);
692 else if (dev->dev_attrib.block_size == 512)
693 block_lba = cmd->t_task_lba;
694 else {
695 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
696 " %u\n", dev->dev_attrib.block_size);
697 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
698 }
699
700 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); 670 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
701 if (!ibr) 671 if (!ibr)
702 goto fail; 672 goto fail;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dae0750c2032..db4412fe6b8a 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -141,7 +141,6 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
141int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); 141int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
142int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); 142int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
143int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); 143int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
144bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
145void transport_clear_lun_ref(struct se_lun *); 144void transport_clear_lun_ref(struct se_lun *);
146void transport_send_task_abort(struct se_cmd *); 145void transport_send_task_abort(struct se_cmd *);
147sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); 146sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index fcdcb117c60d..82a663ba9800 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
68 68
69 if (dev) { 69 if (dev) {
70 spin_lock_irqsave(&dev->se_tmr_lock, flags); 70 spin_lock_irqsave(&dev->se_tmr_lock, flags);
71 list_del(&tmr->tmr_list); 71 list_del_init(&tmr->tmr_list);
72 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 72 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
73 } 73 }
74 74
75 kfree(tmr); 75 kfree(tmr);
76} 76}
77 77
78static void core_tmr_handle_tas_abort( 78static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
79 struct se_node_acl *tmr_nacl,
80 struct se_cmd *cmd,
81 int tas)
82{ 79{
83 bool remove = true; 80 unsigned long flags;
81 bool remove = true, send_tas;
84 /* 82 /*
85 * TASK ABORTED status (TAS) bit support 83 * TASK ABORTED status (TAS) bit support
86 */ 84 */
87 if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { 85 spin_lock_irqsave(&cmd->t_state_lock, flags);
86 send_tas = (cmd->transport_state & CMD_T_TAS);
87 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
88
89 if (send_tas) {
88 remove = false; 90 remove = false;
89 transport_send_task_abort(cmd); 91 transport_send_task_abort(cmd);
90 } 92 }
@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
107 return 1; 109 return 1;
108} 110}
109 111
112static bool __target_check_io_state(struct se_cmd *se_cmd,
113 struct se_session *tmr_sess, int tas)
114{
115 struct se_session *sess = se_cmd->se_sess;
116
117 assert_spin_locked(&sess->sess_cmd_lock);
118 WARN_ON_ONCE(!irqs_disabled());
119 /*
120 * If command already reached CMD_T_COMPLETE state within
121 * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
122 * this se_cmd has been passed to fabric driver and will
123 * not be aborted.
124 *
125 * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
126 * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
127 * long as se_cmd->cmd_kref is still active unless zero.
128 */
129 spin_lock(&se_cmd->t_state_lock);
130 if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
131 pr_debug("Attempted to abort io tag: %llu already complete or"
132 " fabric stop, skipping\n", se_cmd->tag);
133 spin_unlock(&se_cmd->t_state_lock);
134 return false;
135 }
136 if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
137 pr_debug("Attempted to abort io tag: %llu already shutdown,"
138 " skipping\n", se_cmd->tag);
139 spin_unlock(&se_cmd->t_state_lock);
140 return false;
141 }
142 se_cmd->transport_state |= CMD_T_ABORTED;
143
144 if ((tmr_sess != se_cmd->se_sess) && tas)
145 se_cmd->transport_state |= CMD_T_TAS;
146
147 spin_unlock(&se_cmd->t_state_lock);
148
149 return kref_get_unless_zero(&se_cmd->cmd_kref);
150}
151
110void core_tmr_abort_task( 152void core_tmr_abort_task(
111 struct se_device *dev, 153 struct se_device *dev,
112 struct se_tmr_req *tmr, 154 struct se_tmr_req *tmr,
@@ -130,34 +172,22 @@ void core_tmr_abort_task(
130 if (tmr->ref_task_tag != ref_tag) 172 if (tmr->ref_task_tag != ref_tag)
131 continue; 173 continue;
132 174
133 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
134 continue;
135
136 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", 175 printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
137 se_cmd->se_tfo->get_fabric_name(), ref_tag); 176 se_cmd->se_tfo->get_fabric_name(), ref_tag);
138 177
139 spin_lock(&se_cmd->t_state_lock); 178 if (!__target_check_io_state(se_cmd, se_sess, 0)) {
140 if (se_cmd->transport_state & CMD_T_COMPLETE) {
141 printk("ABORT_TASK: ref_tag: %llu already complete,"
142 " skipping\n", ref_tag);
143 spin_unlock(&se_cmd->t_state_lock);
144 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 179 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
145
146 target_put_sess_cmd(se_cmd); 180 target_put_sess_cmd(se_cmd);
147
148 goto out; 181 goto out;
149 } 182 }
150 se_cmd->transport_state |= CMD_T_ABORTED;
151 spin_unlock(&se_cmd->t_state_lock);
152
153 list_del_init(&se_cmd->se_cmd_list); 183 list_del_init(&se_cmd->se_cmd_list);
154 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 184 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
155 185
156 cancel_work_sync(&se_cmd->work); 186 cancel_work_sync(&se_cmd->work);
157 transport_wait_for_tasks(se_cmd); 187 transport_wait_for_tasks(se_cmd);
158 188
159 target_put_sess_cmd(se_cmd);
160 transport_cmd_finish_abort(se_cmd, true); 189 transport_cmd_finish_abort(se_cmd, true);
190 target_put_sess_cmd(se_cmd);
161 191
162 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" 192 printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
163 " ref_tag: %llu\n", ref_tag); 193 " ref_tag: %llu\n", ref_tag);
@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list(
178 struct list_head *preempt_and_abort_list) 208 struct list_head *preempt_and_abort_list)
179{ 209{
180 LIST_HEAD(drain_tmr_list); 210 LIST_HEAD(drain_tmr_list);
211 struct se_session *sess;
181 struct se_tmr_req *tmr_p, *tmr_pp; 212 struct se_tmr_req *tmr_p, *tmr_pp;
182 struct se_cmd *cmd; 213 struct se_cmd *cmd;
183 unsigned long flags; 214 unsigned long flags;
215 bool rc;
184 /* 216 /*
185 * Release all pending and outgoing TMRs aside from the received 217 * Release all pending and outgoing TMRs aside from the received
186 * LUN_RESET tmr.. 218 * LUN_RESET tmr..
@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list(
206 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) 238 if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
207 continue; 239 continue;
208 240
241 sess = cmd->se_sess;
242 if (WARN_ON_ONCE(!sess))
243 continue;
244
245 spin_lock(&sess->sess_cmd_lock);
209 spin_lock(&cmd->t_state_lock); 246 spin_lock(&cmd->t_state_lock);
210 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 247 if (!(cmd->transport_state & CMD_T_ACTIVE) ||
248 (cmd->transport_state & CMD_T_FABRIC_STOP)) {
211 spin_unlock(&cmd->t_state_lock); 249 spin_unlock(&cmd->t_state_lock);
250 spin_unlock(&sess->sess_cmd_lock);
212 continue; 251 continue;
213 } 252 }
214 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { 253 if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
215 spin_unlock(&cmd->t_state_lock); 254 spin_unlock(&cmd->t_state_lock);
255 spin_unlock(&sess->sess_cmd_lock);
216 continue; 256 continue;
217 } 257 }
258 if (sess->sess_tearing_down || cmd->cmd_wait_set) {
259 spin_unlock(&cmd->t_state_lock);
260 spin_unlock(&sess->sess_cmd_lock);
261 continue;
262 }
263 cmd->transport_state |= CMD_T_ABORTED;
218 spin_unlock(&cmd->t_state_lock); 264 spin_unlock(&cmd->t_state_lock);
219 265
266 rc = kref_get_unless_zero(&cmd->cmd_kref);
267 if (!rc) {
268 printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
269 spin_unlock(&sess->sess_cmd_lock);
270 continue;
271 }
272 spin_unlock(&sess->sess_cmd_lock);
273
220 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); 274 list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
221 } 275 }
222 spin_unlock_irqrestore(&dev->se_tmr_lock, flags); 276 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list(
230 (preempt_and_abort_list) ? "Preempt" : "", tmr_p, 284 (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
231 tmr_p->function, tmr_p->response, cmd->t_state); 285 tmr_p->function, tmr_p->response, cmd->t_state);
232 286
287 cancel_work_sync(&cmd->work);
288 transport_wait_for_tasks(cmd);
289
233 transport_cmd_finish_abort(cmd, 1); 290 transport_cmd_finish_abort(cmd, 1);
291 target_put_sess_cmd(cmd);
234 } 292 }
235} 293}
236 294
237static void core_tmr_drain_state_list( 295static void core_tmr_drain_state_list(
238 struct se_device *dev, 296 struct se_device *dev,
239 struct se_cmd *prout_cmd, 297 struct se_cmd *prout_cmd,
240 struct se_node_acl *tmr_nacl, 298 struct se_session *tmr_sess,
241 int tas, 299 int tas,
242 struct list_head *preempt_and_abort_list) 300 struct list_head *preempt_and_abort_list)
243{ 301{
244 LIST_HEAD(drain_task_list); 302 LIST_HEAD(drain_task_list);
303 struct se_session *sess;
245 struct se_cmd *cmd, *next; 304 struct se_cmd *cmd, *next;
246 unsigned long flags; 305 unsigned long flags;
306 int rc;
247 307
248 /* 308 /*
249 * Complete outstanding commands with TASK_ABORTED SAM status. 309 * Complete outstanding commands with TASK_ABORTED SAM status.
@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list(
282 if (prout_cmd == cmd) 342 if (prout_cmd == cmd)
283 continue; 343 continue;
284 344
345 sess = cmd->se_sess;
346 if (WARN_ON_ONCE(!sess))
347 continue;
348
349 spin_lock(&sess->sess_cmd_lock);
350 rc = __target_check_io_state(cmd, tmr_sess, tas);
351 spin_unlock(&sess->sess_cmd_lock);
352 if (!rc)
353 continue;
354
285 list_move_tail(&cmd->state_list, &drain_task_list); 355 list_move_tail(&cmd->state_list, &drain_task_list);
286 cmd->state_active = false; 356 cmd->state_active = false;
287 } 357 }
@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list(
289 359
290 while (!list_empty(&drain_task_list)) { 360 while (!list_empty(&drain_task_list)) {
291 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); 361 cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
292 list_del(&cmd->state_list); 362 list_del_init(&cmd->state_list);
293 363
294 pr_debug("LUN_RESET: %s cmd: %p" 364 pr_debug("LUN_RESET: %s cmd: %p"
295 " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" 365 " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list(
313 * loop above, but we do it down here given that 383 * loop above, but we do it down here given that
314 * cancel_work_sync may block. 384 * cancel_work_sync may block.
315 */ 385 */
316 if (cmd->t_state == TRANSPORT_COMPLETE) 386 cancel_work_sync(&cmd->work);
317 cancel_work_sync(&cmd->work); 387 transport_wait_for_tasks(cmd);
318
319 spin_lock_irqsave(&cmd->t_state_lock, flags);
320 target_stop_cmd(cmd, &flags);
321
322 cmd->transport_state |= CMD_T_ABORTED;
323 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
324 388
325 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); 389 core_tmr_handle_tas_abort(cmd, tas);
390 target_put_sess_cmd(cmd);
326 } 391 }
327} 392}
328 393
@@ -334,6 +399,7 @@ int core_tmr_lun_reset(
334{ 399{
335 struct se_node_acl *tmr_nacl = NULL; 400 struct se_node_acl *tmr_nacl = NULL;
336 struct se_portal_group *tmr_tpg = NULL; 401 struct se_portal_group *tmr_tpg = NULL;
402 struct se_session *tmr_sess = NULL;
337 int tas; 403 int tas;
338 /* 404 /*
339 * TASK_ABORTED status bit, this is configurable via ConfigFS 405 * TASK_ABORTED status bit, this is configurable via ConfigFS
@@ -352,8 +418,9 @@ int core_tmr_lun_reset(
352 * or struct se_device passthrough.. 418 * or struct se_device passthrough..
353 */ 419 */
354 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { 420 if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
355 tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; 421 tmr_sess = tmr->task_cmd->se_sess;
356 tmr_tpg = tmr->task_cmd->se_sess->se_tpg; 422 tmr_nacl = tmr_sess->se_node_acl;
423 tmr_tpg = tmr_sess->se_tpg;
357 if (tmr_nacl && tmr_tpg) { 424 if (tmr_nacl && tmr_tpg) {
358 pr_debug("LUN_RESET: TMR caller fabric: %s" 425 pr_debug("LUN_RESET: TMR caller fabric: %s"
359 " initiator port %s\n", 426 " initiator port %s\n",
@@ -366,7 +433,7 @@ int core_tmr_lun_reset(
366 dev->transport->name, tas); 433 dev->transport->name, tas);
367 434
368 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); 435 core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
369 core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, 436 core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
370 preempt_and_abort_list); 437 preempt_and_abort_list);
371 438
372 /* 439 /*
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 9f3608e10f25..867bc6d0a68a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -534,9 +534,6 @@ void transport_deregister_session(struct se_session *se_sess)
534} 534}
535EXPORT_SYMBOL(transport_deregister_session); 535EXPORT_SYMBOL(transport_deregister_session);
536 536
537/*
538 * Called with cmd->t_state_lock held.
539 */
540static void target_remove_from_state_list(struct se_cmd *cmd) 537static void target_remove_from_state_list(struct se_cmd *cmd)
541{ 538{
542 struct se_device *dev = cmd->se_dev; 539 struct se_device *dev = cmd->se_dev;
@@ -561,10 +558,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
561{ 558{
562 unsigned long flags; 559 unsigned long flags;
563 560
564 spin_lock_irqsave(&cmd->t_state_lock, flags);
565 if (write_pending)
566 cmd->t_state = TRANSPORT_WRITE_PENDING;
567
568 if (remove_from_lists) { 561 if (remove_from_lists) {
569 target_remove_from_state_list(cmd); 562 target_remove_from_state_list(cmd);
570 563
@@ -574,6 +567,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
574 cmd->se_lun = NULL; 567 cmd->se_lun = NULL;
575 } 568 }
576 569
570 spin_lock_irqsave(&cmd->t_state_lock, flags);
571 if (write_pending)
572 cmd->t_state = TRANSPORT_WRITE_PENDING;
573
577 /* 574 /*
578 * Determine if frontend context caller is requesting the stopping of 575 * Determine if frontend context caller is requesting the stopping of
579 * this command for frontend exceptions. 576 * this command for frontend exceptions.
@@ -627,6 +624,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
627 624
628void transport_cmd_finish_abort(struct se_cmd *cmd, int remove) 625void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
629{ 626{
627 bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
628
630 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) 629 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
631 transport_lun_remove_cmd(cmd); 630 transport_lun_remove_cmd(cmd);
632 /* 631 /*
@@ -638,7 +637,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
638 637
639 if (transport_cmd_check_stop_to_fabric(cmd)) 638 if (transport_cmd_check_stop_to_fabric(cmd))
640 return; 639 return;
641 if (remove) 640 if (remove && ack_kref)
642 transport_put_cmd(cmd); 641 transport_put_cmd(cmd);
643} 642}
644 643
@@ -694,19 +693,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
694 } 693 }
695 694
696 /* 695 /*
697 * See if we are waiting to complete for an exception condition.
698 */
699 if (cmd->transport_state & CMD_T_REQUEST_STOP) {
700 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
701 complete(&cmd->task_stop_comp);
702 return;
703 }
704
705 /*
706 * Check for case where an explicit ABORT_TASK has been received 696 * Check for case where an explicit ABORT_TASK has been received
707 * and transport_wait_for_tasks() will be waiting for completion.. 697 * and transport_wait_for_tasks() will be waiting for completion..
708 */ 698 */
709 if (cmd->transport_state & CMD_T_ABORTED && 699 if (cmd->transport_state & CMD_T_ABORTED ||
710 cmd->transport_state & CMD_T_STOP) { 700 cmd->transport_state & CMD_T_STOP) {
711 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 701 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
712 complete_all(&cmd->t_transport_stop_comp); 702 complete_all(&cmd->t_transport_stop_comp);
@@ -721,10 +711,10 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
721 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); 711 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
722 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 712 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
723 713
724 if (cmd->cpuid == -1) 714 if (cmd->se_cmd_flags & SCF_USE_CPUID)
725 queue_work(target_completion_wq, &cmd->work);
726 else
727 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); 715 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
716 else
717 queue_work(target_completion_wq, &cmd->work);
728} 718}
729EXPORT_SYMBOL(target_complete_cmd); 719EXPORT_SYMBOL(target_complete_cmd);
730 720
@@ -1203,7 +1193,6 @@ void transport_init_se_cmd(
1203 INIT_LIST_HEAD(&cmd->state_list); 1193 INIT_LIST_HEAD(&cmd->state_list);
1204 init_completion(&cmd->t_transport_stop_comp); 1194 init_completion(&cmd->t_transport_stop_comp);
1205 init_completion(&cmd->cmd_wait_comp); 1195 init_completion(&cmd->cmd_wait_comp);
1206 init_completion(&cmd->task_stop_comp);
1207 spin_lock_init(&cmd->t_state_lock); 1196 spin_lock_init(&cmd->t_state_lock);
1208 kref_init(&cmd->cmd_kref); 1197 kref_init(&cmd->cmd_kref);
1209 cmd->transport_state = CMD_T_DEV_ACTIVE; 1198 cmd->transport_state = CMD_T_DEV_ACTIVE;
@@ -1437,6 +1426,12 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
1437 */ 1426 */
1438 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 1427 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1439 data_length, data_dir, task_attr, sense); 1428 data_length, data_dir, task_attr, sense);
1429
1430 if (flags & TARGET_SCF_USE_CPUID)
1431 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1432 else
1433 se_cmd->cpuid = WORK_CPU_UNBOUND;
1434
1440 if (flags & TARGET_SCF_UNKNOWN_SIZE) 1435 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1441 se_cmd->unknown_data_length = 1; 1436 se_cmd->unknown_data_length = 1;
1442 /* 1437 /*
@@ -1635,33 +1630,6 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1635EXPORT_SYMBOL(target_submit_tmr); 1630EXPORT_SYMBOL(target_submit_tmr);
1636 1631
1637/* 1632/*
1638 * If the cmd is active, request it to be stopped and sleep until it
1639 * has completed.
1640 */
1641bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
1642 __releases(&cmd->t_state_lock)
1643 __acquires(&cmd->t_state_lock)
1644{
1645 bool was_active = false;
1646
1647 if (cmd->transport_state & CMD_T_BUSY) {
1648 cmd->transport_state |= CMD_T_REQUEST_STOP;
1649 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
1650
1651 pr_debug("cmd %p waiting to complete\n", cmd);
1652 wait_for_completion(&cmd->task_stop_comp);
1653 pr_debug("cmd %p stopped successfully\n", cmd);
1654
1655 spin_lock_irqsave(&cmd->t_state_lock, *flags);
1656 cmd->transport_state &= ~CMD_T_REQUEST_STOP;
1657 cmd->transport_state &= ~CMD_T_BUSY;
1658 was_active = true;
1659 }
1660
1661 return was_active;
1662}
1663
1664/*
1665 * Handle SAM-esque emulation for generic transport request failures. 1633 * Handle SAM-esque emulation for generic transport request failures.
1666 */ 1634 */
1667void transport_generic_request_failure(struct se_cmd *cmd, 1635void transport_generic_request_failure(struct se_cmd *cmd,
@@ -1859,19 +1827,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1859 return true; 1827 return true;
1860} 1828}
1861 1829
1830static int __transport_check_aborted_status(struct se_cmd *, int);
1831
1862void target_execute_cmd(struct se_cmd *cmd) 1832void target_execute_cmd(struct se_cmd *cmd)
1863{ 1833{
1864 /* 1834 /*
1865 * If the received CDB has aleady been aborted stop processing it here.
1866 */
1867 if (transport_check_aborted_status(cmd, 1))
1868 return;
1869
1870 /*
1871 * Determine if frontend context caller is requesting the stopping of 1835 * Determine if frontend context caller is requesting the stopping of
1872 * this command for frontend exceptions. 1836 * this command for frontend exceptions.
1837 *
1838 * If the received CDB has aleady been aborted stop processing it here.
1873 */ 1839 */
1874 spin_lock_irq(&cmd->t_state_lock); 1840 spin_lock_irq(&cmd->t_state_lock);
1841 if (__transport_check_aborted_status(cmd, 1)) {
1842 spin_unlock_irq(&cmd->t_state_lock);
1843 return;
1844 }
1875 if (cmd->transport_state & CMD_T_STOP) { 1845 if (cmd->transport_state & CMD_T_STOP) {
1876 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", 1846 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
1877 __func__, __LINE__, cmd->tag); 1847 __func__, __LINE__, cmd->tag);
@@ -2222,20 +2192,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
2222} 2192}
2223 2193
2224/** 2194/**
2225 * transport_release_cmd - free a command 2195 * transport_put_cmd - release a reference to a command
2226 * @cmd: command to free 2196 * @cmd: command to release
2227 * 2197 *
2228 * This routine unconditionally frees a command, and reference counting 2198 * This routine releases our reference to the command and frees it if possible.
2229 * or list removal must be done in the caller.
2230 */ 2199 */
2231static int transport_release_cmd(struct se_cmd *cmd) 2200static int transport_put_cmd(struct se_cmd *cmd)
2232{ 2201{
2233 BUG_ON(!cmd->se_tfo); 2202 BUG_ON(!cmd->se_tfo);
2234
2235 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2236 core_tmr_release_req(cmd->se_tmr_req);
2237 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2238 kfree(cmd->t_task_cdb);
2239 /* 2203 /*
2240 * If this cmd has been setup with target_get_sess_cmd(), drop 2204 * If this cmd has been setup with target_get_sess_cmd(), drop
2241 * the kref and call ->release_cmd() in kref callback. 2205 * the kref and call ->release_cmd() in kref callback.
@@ -2243,18 +2207,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
2243 return target_put_sess_cmd(cmd); 2207 return target_put_sess_cmd(cmd);
2244} 2208}
2245 2209
2246/**
2247 * transport_put_cmd - release a reference to a command
2248 * @cmd: command to release
2249 *
2250 * This routine releases our reference to the command and frees it if possible.
2251 */
2252static int transport_put_cmd(struct se_cmd *cmd)
2253{
2254 transport_free_pages(cmd);
2255 return transport_release_cmd(cmd);
2256}
2257
2258void *transport_kmap_data_sg(struct se_cmd *cmd) 2210void *transport_kmap_data_sg(struct se_cmd *cmd)
2259{ 2211{
2260 struct scatterlist *sg = cmd->t_data_sg; 2212 struct scatterlist *sg = cmd->t_data_sg;
@@ -2450,34 +2402,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
2450 } 2402 }
2451} 2403}
2452 2404
2453int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) 2405static bool
2406__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2407 unsigned long *flags);
2408
2409static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2454{ 2410{
2455 unsigned long flags; 2411 unsigned long flags;
2412
2413 spin_lock_irqsave(&cmd->t_state_lock, flags);
2414 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2415 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2416}
2417
2418int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2419{
2456 int ret = 0; 2420 int ret = 0;
2421 bool aborted = false, tas = false;
2457 2422
2458 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { 2423 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
2459 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) 2424 if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2460 transport_wait_for_tasks(cmd); 2425 target_wait_free_cmd(cmd, &aborted, &tas);
2461 2426
2462 ret = transport_release_cmd(cmd); 2427 if (!aborted || tas)
2428 ret = transport_put_cmd(cmd);
2463 } else { 2429 } else {
2464 if (wait_for_tasks) 2430 if (wait_for_tasks)
2465 transport_wait_for_tasks(cmd); 2431 target_wait_free_cmd(cmd, &aborted, &tas);
2466 /* 2432 /*
2467 * Handle WRITE failure case where transport_generic_new_cmd() 2433 * Handle WRITE failure case where transport_generic_new_cmd()
2468 * has already added se_cmd to state_list, but fabric has 2434 * has already added se_cmd to state_list, but fabric has
2469 * failed command before I/O submission. 2435 * failed command before I/O submission.
2470 */ 2436 */
2471 if (cmd->state_active) { 2437 if (cmd->state_active)
2472 spin_lock_irqsave(&cmd->t_state_lock, flags);
2473 target_remove_from_state_list(cmd); 2438 target_remove_from_state_list(cmd);
2474 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2475 }
2476 2439
2477 if (cmd->se_lun) 2440 if (cmd->se_lun)
2478 transport_lun_remove_cmd(cmd); 2441 transport_lun_remove_cmd(cmd);
2479 2442
2480 ret = transport_put_cmd(cmd); 2443 if (!aborted || tas)
2444 ret = transport_put_cmd(cmd);
2445 }
2446 /*
2447 * If the task has been internally aborted due to TMR ABORT_TASK
2448 * or LUN_RESET, target_core_tmr.c is responsible for performing
2449 * the remaining calls to target_put_sess_cmd(), and not the
2450 * callers of this function.
2451 */
2452 if (aborted) {
2453 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2454 wait_for_completion(&cmd->cmd_wait_comp);
2455 cmd->se_tfo->release_cmd(cmd);
2456 ret = 1;
2481 } 2457 }
2482 return ret; 2458 return ret;
2483} 2459}
@@ -2517,26 +2493,46 @@ out:
2517} 2493}
2518EXPORT_SYMBOL(target_get_sess_cmd); 2494EXPORT_SYMBOL(target_get_sess_cmd);
2519 2495
2496static void target_free_cmd_mem(struct se_cmd *cmd)
2497{
2498 transport_free_pages(cmd);
2499
2500 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2501 core_tmr_release_req(cmd->se_tmr_req);
2502 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2503 kfree(cmd->t_task_cdb);
2504}
2505
2520static void target_release_cmd_kref(struct kref *kref) 2506static void target_release_cmd_kref(struct kref *kref)
2521{ 2507{
2522 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2508 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2523 struct se_session *se_sess = se_cmd->se_sess; 2509 struct se_session *se_sess = se_cmd->se_sess;
2524 unsigned long flags; 2510 unsigned long flags;
2511 bool fabric_stop;
2525 2512
2526 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2513 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2527 if (list_empty(&se_cmd->se_cmd_list)) { 2514 if (list_empty(&se_cmd->se_cmd_list)) {
2528 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2515 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2516 target_free_cmd_mem(se_cmd);
2529 se_cmd->se_tfo->release_cmd(se_cmd); 2517 se_cmd->se_tfo->release_cmd(se_cmd);
2530 return; 2518 return;
2531 } 2519 }
2532 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2520
2521 spin_lock(&se_cmd->t_state_lock);
2522 fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
2523 spin_unlock(&se_cmd->t_state_lock);
2524
2525 if (se_cmd->cmd_wait_set || fabric_stop) {
2526 list_del_init(&se_cmd->se_cmd_list);
2533 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2527 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2528 target_free_cmd_mem(se_cmd);
2534 complete(&se_cmd->cmd_wait_comp); 2529 complete(&se_cmd->cmd_wait_comp);
2535 return; 2530 return;
2536 } 2531 }
2537 list_del(&se_cmd->se_cmd_list); 2532 list_del_init(&se_cmd->se_cmd_list);
2538 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2533 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2539 2534
2535 target_free_cmd_mem(se_cmd);
2540 se_cmd->se_tfo->release_cmd(se_cmd); 2536 se_cmd->se_tfo->release_cmd(se_cmd);
2541} 2537}
2542 2538
@@ -2548,6 +2544,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
2548 struct se_session *se_sess = se_cmd->se_sess; 2544 struct se_session *se_sess = se_cmd->se_sess;
2549 2545
2550 if (!se_sess) { 2546 if (!se_sess) {
2547 target_free_cmd_mem(se_cmd);
2551 se_cmd->se_tfo->release_cmd(se_cmd); 2548 se_cmd->se_tfo->release_cmd(se_cmd);
2552 return 1; 2549 return 1;
2553 } 2550 }
@@ -2564,6 +2561,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2564{ 2561{
2565 struct se_cmd *se_cmd; 2562 struct se_cmd *se_cmd;
2566 unsigned long flags; 2563 unsigned long flags;
2564 int rc;
2567 2565
2568 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 2566 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2569 if (se_sess->sess_tearing_down) { 2567 if (se_sess->sess_tearing_down) {
@@ -2573,8 +2571,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2573 se_sess->sess_tearing_down = 1; 2571 se_sess->sess_tearing_down = 1;
2574 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); 2572 list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
2575 2573
2576 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) 2574 list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
2577 se_cmd->cmd_wait_set = 1; 2575 rc = kref_get_unless_zero(&se_cmd->cmd_kref);
2576 if (rc) {
2577 se_cmd->cmd_wait_set = 1;
2578 spin_lock(&se_cmd->t_state_lock);
2579 se_cmd->transport_state |= CMD_T_FABRIC_STOP;
2580 spin_unlock(&se_cmd->t_state_lock);
2581 }
2582 }
2578 2583
2579 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2584 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2580} 2585}
@@ -2587,15 +2592,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
2587{ 2592{
2588 struct se_cmd *se_cmd, *tmp_cmd; 2593 struct se_cmd *se_cmd, *tmp_cmd;
2589 unsigned long flags; 2594 unsigned long flags;
2595 bool tas;
2590 2596
2591 list_for_each_entry_safe(se_cmd, tmp_cmd, 2597 list_for_each_entry_safe(se_cmd, tmp_cmd,
2592 &se_sess->sess_wait_list, se_cmd_list) { 2598 &se_sess->sess_wait_list, se_cmd_list) {
2593 list_del(&se_cmd->se_cmd_list); 2599 list_del_init(&se_cmd->se_cmd_list);
2594 2600
2595 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" 2601 pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
2596 " %d\n", se_cmd, se_cmd->t_state, 2602 " %d\n", se_cmd, se_cmd->t_state,
2597 se_cmd->se_tfo->get_cmd_state(se_cmd)); 2603 se_cmd->se_tfo->get_cmd_state(se_cmd));
2598 2604
2605 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
2606 tas = (se_cmd->transport_state & CMD_T_TAS);
2607 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
2608
2609 if (!target_put_sess_cmd(se_cmd)) {
2610 if (tas)
2611 target_put_sess_cmd(se_cmd);
2612 }
2613
2599 wait_for_completion(&se_cmd->cmd_wait_comp); 2614 wait_for_completion(&se_cmd->cmd_wait_comp);
2600 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" 2615 pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
2601 " fabric state: %d\n", se_cmd, se_cmd->t_state, 2616 " fabric state: %d\n", se_cmd, se_cmd->t_state,
@@ -2617,53 +2632,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
2617 wait_for_completion(&lun->lun_ref_comp); 2632 wait_for_completion(&lun->lun_ref_comp);
2618} 2633}
2619 2634
2620/** 2635static bool
2621 * transport_wait_for_tasks - wait for completion to occur 2636__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2622 * @cmd: command to wait 2637 bool *aborted, bool *tas, unsigned long *flags)
2623 * 2638 __releases(&cmd->t_state_lock)
2624 * Called from frontend fabric context to wait for storage engine 2639 __acquires(&cmd->t_state_lock)
2625 * to pause and/or release frontend generated struct se_cmd.
2626 */
2627bool transport_wait_for_tasks(struct se_cmd *cmd)
2628{ 2640{
2629 unsigned long flags;
2630 2641
2631 spin_lock_irqsave(&cmd->t_state_lock, flags); 2642 assert_spin_locked(&cmd->t_state_lock);
2643 WARN_ON_ONCE(!irqs_disabled());
2644
2645 if (fabric_stop)
2646 cmd->transport_state |= CMD_T_FABRIC_STOP;
2647
2648 if (cmd->transport_state & CMD_T_ABORTED)
2649 *aborted = true;
2650
2651 if (cmd->transport_state & CMD_T_TAS)
2652 *tas = true;
2653
2632 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && 2654 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
2633 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2655 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2634 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2635 return false; 2656 return false;
2636 }
2637 2657
2638 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && 2658 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
2639 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { 2659 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
2640 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2641 return false; 2660 return false;
2642 }
2643 2661
2644 if (!(cmd->transport_state & CMD_T_ACTIVE)) { 2662 if (!(cmd->transport_state & CMD_T_ACTIVE))
2645 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2663 return false;
2664
2665 if (fabric_stop && *aborted)
2646 return false; 2666 return false;
2647 }
2648 2667
2649 cmd->transport_state |= CMD_T_STOP; 2668 cmd->transport_state |= CMD_T_STOP;
2650 2669
2651 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n", 2670 pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
2652 cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); 2671 " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
2672 cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
2653 2673
2654 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2674 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
2655 2675
2656 wait_for_completion(&cmd->t_transport_stop_comp); 2676 wait_for_completion(&cmd->t_transport_stop_comp);
2657 2677
2658 spin_lock_irqsave(&cmd->t_state_lock, flags); 2678 spin_lock_irqsave(&cmd->t_state_lock, *flags);
2659 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); 2679 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
2660 2680
2661 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n", 2681 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
2662 cmd->tag); 2682 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
2663 2683
2684 return true;
2685}
2686
2687/**
2688 * transport_wait_for_tasks - wait for completion to occur
2689 * @cmd: command to wait
2690 *
2691 * Called from frontend fabric context to wait for storage engine
2692 * to pause and/or release frontend generated struct se_cmd.
2693 */
2694bool transport_wait_for_tasks(struct se_cmd *cmd)
2695{
2696 unsigned long flags;
2697 bool ret, aborted = false, tas = false;
2698
2699 spin_lock_irqsave(&cmd->t_state_lock, flags);
2700 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
2664 spin_unlock_irqrestore(&cmd->t_state_lock, flags); 2701 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2665 2702
2666 return true; 2703 return ret;
2667} 2704}
2668EXPORT_SYMBOL(transport_wait_for_tasks); 2705EXPORT_SYMBOL(transport_wait_for_tasks);
2669 2706
@@ -2845,28 +2882,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
2845} 2882}
2846EXPORT_SYMBOL(transport_send_check_condition_and_sense); 2883EXPORT_SYMBOL(transport_send_check_condition_and_sense);
2847 2884
2848int transport_check_aborted_status(struct se_cmd *cmd, int send_status) 2885static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2886 __releases(&cmd->t_state_lock)
2887 __acquires(&cmd->t_state_lock)
2849{ 2888{
2889 assert_spin_locked(&cmd->t_state_lock);
2890 WARN_ON_ONCE(!irqs_disabled());
2891
2850 if (!(cmd->transport_state & CMD_T_ABORTED)) 2892 if (!(cmd->transport_state & CMD_T_ABORTED))
2851 return 0; 2893 return 0;
2852
2853 /* 2894 /*
2854 * If cmd has been aborted but either no status is to be sent or it has 2895 * If cmd has been aborted but either no status is to be sent or it has
2855 * already been sent, just return 2896 * already been sent, just return
2856 */ 2897 */
2857 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) 2898 if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
2899 if (send_status)
2900 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2858 return 1; 2901 return 1;
2902 }
2859 2903
2860 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n", 2904 pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
2861 cmd->t_task_cdb[0], cmd->tag); 2905 " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
2862 2906
2863 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS; 2907 cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
2864 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2908 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2865 trace_target_cmd_complete(cmd); 2909 trace_target_cmd_complete(cmd);
2910
2911 spin_unlock_irq(&cmd->t_state_lock);
2866 cmd->se_tfo->queue_status(cmd); 2912 cmd->se_tfo->queue_status(cmd);
2913 spin_lock_irq(&cmd->t_state_lock);
2867 2914
2868 return 1; 2915 return 1;
2869} 2916}
2917
2918int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
2919{
2920 int ret;
2921
2922 spin_lock_irq(&cmd->t_state_lock);
2923 ret = __transport_check_aborted_status(cmd, send_status);
2924 spin_unlock_irq(&cmd->t_state_lock);
2925
2926 return ret;
2927}
2870EXPORT_SYMBOL(transport_check_aborted_status); 2928EXPORT_SYMBOL(transport_check_aborted_status);
2871 2929
2872void transport_send_task_abort(struct se_cmd *cmd) 2930void transport_send_task_abort(struct se_cmd *cmd)
@@ -2888,11 +2946,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
2888 */ 2946 */
2889 if (cmd->data_direction == DMA_TO_DEVICE) { 2947 if (cmd->data_direction == DMA_TO_DEVICE) {
2890 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2948 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2891 cmd->transport_state |= CMD_T_ABORTED; 2949 spin_lock_irqsave(&cmd->t_state_lock, flags);
2950 if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
2951 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2952 goto send_abort;
2953 }
2892 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2954 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2955 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2893 return; 2956 return;
2894 } 2957 }
2895 } 2958 }
2959send_abort:
2896 cmd->scsi_status = SAM_STAT_TASK_ABORTED; 2960 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
2897 2961
2898 transport_lun_remove_cmd(cmd); 2962 transport_lun_remove_cmd(cmd);
@@ -2909,8 +2973,17 @@ static void target_tmr_work(struct work_struct *work)
2909 struct se_cmd *cmd = container_of(work, struct se_cmd, work); 2973 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2910 struct se_device *dev = cmd->se_dev; 2974 struct se_device *dev = cmd->se_dev;
2911 struct se_tmr_req *tmr = cmd->se_tmr_req; 2975 struct se_tmr_req *tmr = cmd->se_tmr_req;
2976 unsigned long flags;
2912 int ret; 2977 int ret;
2913 2978
2979 spin_lock_irqsave(&cmd->t_state_lock, flags);
2980 if (cmd->transport_state & CMD_T_ABORTED) {
2981 tmr->response = TMR_FUNCTION_REJECTED;
2982 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2983 goto check_stop;
2984 }
2985 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2986
2914 switch (tmr->function) { 2987 switch (tmr->function) {
2915 case TMR_ABORT_TASK: 2988 case TMR_ABORT_TASK:
2916 core_tmr_abort_task(dev, tmr, cmd->se_sess); 2989 core_tmr_abort_task(dev, tmr, cmd->se_sess);
@@ -2943,9 +3016,17 @@ static void target_tmr_work(struct work_struct *work)
2943 break; 3016 break;
2944 } 3017 }
2945 3018
3019 spin_lock_irqsave(&cmd->t_state_lock, flags);
3020 if (cmd->transport_state & CMD_T_ABORTED) {
3021 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3022 goto check_stop;
3023 }
2946 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; 3024 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3025 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3026
2947 cmd->se_tfo->queue_tm_rsp(cmd); 3027 cmd->se_tfo->queue_tm_rsp(cmd);
2948 3028
3029check_stop:
2949 transport_cmd_check_stop_to_fabric(cmd); 3030 transport_cmd_check_stop_to_fabric(cmd);
2950} 3031}
2951 3032
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index dd600e5ead71..94f5154ac788 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -903,7 +903,7 @@ static int tcmu_configure_device(struct se_device *dev)
903 info->version = __stringify(TCMU_MAILBOX_VERSION); 903 info->version = __stringify(TCMU_MAILBOX_VERSION);
904 904
905 info->mem[0].name = "tcm-user command & data buffer"; 905 info->mem[0].name = "tcm-user command & data buffer";
906 info->mem[0].addr = (phys_addr_t) udev->mb_addr; 906 info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
907 info->mem[0].size = TCMU_RING_SIZE; 907 info->mem[0].size = TCMU_RING_SIZE;
908 info->mem[0].memtype = UIO_MEM_VIRTUAL; 908 info->mem[0].memtype = UIO_MEM_VIRTUAL;
909 909
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 8cc4ac64a91c..7c92c09be213 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -195,7 +195,7 @@ config IMX_THERMAL
195 passive trip is crossed. 195 passive trip is crossed.
196 196
197config SPEAR_THERMAL 197config SPEAR_THERMAL
198 bool "SPEAr thermal sensor driver" 198 tristate "SPEAr thermal sensor driver"
199 depends on PLAT_SPEAR || COMPILE_TEST 199 depends on PLAT_SPEAR || COMPILE_TEST
200 depends on OF 200 depends on OF
201 help 201 help
@@ -237,8 +237,8 @@ config DOVE_THERMAL
237 framework. 237 framework.
238 238
239config DB8500_THERMAL 239config DB8500_THERMAL
240 bool "DB8500 thermal management" 240 tristate "DB8500 thermal management"
241 depends on ARCH_U8500 241 depends on MFD_DB8500_PRCMU
242 default y 242 default y
243 help 243 help
244 Adds DB8500 thermal management implementation according to the thermal 244 Adds DB8500 thermal management implementation according to the thermal
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index e3fbc5a5d88f..6ceac4f2d4b2 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -377,26 +377,28 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device,
377 * get_load() - get load for a cpu since last updated 377 * get_load() - get load for a cpu since last updated
378 * @cpufreq_device: &struct cpufreq_cooling_device for this cpu 378 * @cpufreq_device: &struct cpufreq_cooling_device for this cpu
379 * @cpu: cpu number 379 * @cpu: cpu number
380 * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
380 * 381 *
381 * Return: The average load of cpu @cpu in percentage since this 382 * Return: The average load of cpu @cpu in percentage since this
382 * function was last called. 383 * function was last called.
383 */ 384 */
384static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu) 385static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
386 int cpu_idx)
385{ 387{
386 u32 load; 388 u32 load;
387 u64 now, now_idle, delta_time, delta_idle; 389 u64 now, now_idle, delta_time, delta_idle;
388 390
389 now_idle = get_cpu_idle_time(cpu, &now, 0); 391 now_idle = get_cpu_idle_time(cpu, &now, 0);
390 delta_idle = now_idle - cpufreq_device->time_in_idle[cpu]; 392 delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
391 delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu]; 393 delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
392 394
393 if (delta_time <= delta_idle) 395 if (delta_time <= delta_idle)
394 load = 0; 396 load = 0;
395 else 397 else
396 load = div64_u64(100 * (delta_time - delta_idle), delta_time); 398 load = div64_u64(100 * (delta_time - delta_idle), delta_time);
397 399
398 cpufreq_device->time_in_idle[cpu] = now_idle; 400 cpufreq_device->time_in_idle[cpu_idx] = now_idle;
399 cpufreq_device->time_in_idle_timestamp[cpu] = now; 401 cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
400 402
401 return load; 403 return load;
402} 404}
@@ -598,7 +600,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
598 u32 load; 600 u32 load;
599 601
600 if (cpu_online(cpu)) 602 if (cpu_online(cpu))
601 load = get_load(cpufreq_device, cpu); 603 load = get_load(cpufreq_device, cpu, i);
602 else 604 else
603 load = 0; 605 load = 0;
604 606
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index be4eedcb839a..9043f8f91852 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -475,14 +475,10 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
475 475
476 sensor_np = of_node_get(dev->of_node); 476 sensor_np = of_node_get(dev->of_node);
477 477
478 for_each_child_of_node(np, child) { 478 for_each_available_child_of_node(np, child) {
479 struct of_phandle_args sensor_specs; 479 struct of_phandle_args sensor_specs;
480 int ret, id; 480 int ret, id;
481 481
482 /* Check whether child is enabled or not */
483 if (!of_device_is_available(child))
484 continue;
485
486 /* For now, thermal framework supports only 1 sensor per zone */ 482 /* For now, thermal framework supports only 1 sensor per zone */
487 ret = of_parse_phandle_with_args(child, "thermal-sensors", 483 ret = of_parse_phandle_with_args(child, "thermal-sensors",
488 "#thermal-sensor-cells", 484 "#thermal-sensor-cells",
@@ -881,16 +877,12 @@ int __init of_parse_thermal_zones(void)
881 return 0; /* Run successfully on systems without thermal DT */ 877 return 0; /* Run successfully on systems without thermal DT */
882 } 878 }
883 879
884 for_each_child_of_node(np, child) { 880 for_each_available_child_of_node(np, child) {
885 struct thermal_zone_device *zone; 881 struct thermal_zone_device *zone;
886 struct thermal_zone_params *tzp; 882 struct thermal_zone_params *tzp;
887 int i, mask = 0; 883 int i, mask = 0;
888 u32 prop; 884 u32 prop;
889 885
890 /* Check whether child is enabled or not */
891 if (!of_device_is_available(child))
892 continue;
893
894 tz = thermal_of_build_thermal_zone(child); 886 tz = thermal_of_build_thermal_zone(child);
895 if (IS_ERR(tz)) { 887 if (IS_ERR(tz)) {
896 pr_err("failed to build thermal zone %s: %ld\n", 888 pr_err("failed to build thermal zone %s: %ld\n",
@@ -968,13 +960,9 @@ void of_thermal_destroy_zones(void)
968 return; 960 return;
969 } 961 }
970 962
971 for_each_child_of_node(np, child) { 963 for_each_available_child_of_node(np, child) {
972 struct thermal_zone_device *zone; 964 struct thermal_zone_device *zone;
973 965
974 /* Check whether child is enabled or not */
975 if (!of_device_is_available(child))
976 continue;
977
978 zone = thermal_zone_get_zone_by_name(child->name); 966 zone = thermal_zone_get_zone_by_name(child->name);
979 if (IS_ERR(zone)) 967 if (IS_ERR(zone))
980 continue; 968 continue;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 44b9c485157d..0e735acea33a 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/io.h> 24#include <linux/io.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/of_device.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
28#include <linux/reboot.h> 29#include <linux/reboot.h>
@@ -75,8 +76,10 @@ struct rcar_thermal_priv {
75#define rcar_has_irq_support(priv) ((priv)->common->base) 76#define rcar_has_irq_support(priv) ((priv)->common->base)
76#define rcar_id_to_shift(priv) ((priv)->id * 8) 77#define rcar_id_to_shift(priv) ((priv)->id * 8)
77 78
79#define USE_OF_THERMAL 1
78static const struct of_device_id rcar_thermal_dt_ids[] = { 80static const struct of_device_id rcar_thermal_dt_ids[] = {
79 { .compatible = "renesas,rcar-thermal", }, 81 { .compatible = "renesas,rcar-thermal", },
82 { .compatible = "renesas,rcar-gen2-thermal", .data = (void *)USE_OF_THERMAL },
80 {}, 83 {},
81}; 84};
82MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids); 85MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
@@ -200,9 +203,9 @@ err_out_unlock:
200 return ret; 203 return ret;
201} 204}
202 205
203static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp) 206static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
207 int *temp)
204{ 208{
205 struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
206 int tmp; 209 int tmp;
207 int ret; 210 int ret;
208 211
@@ -226,6 +229,20 @@ static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
226 return 0; 229 return 0;
227} 230}
228 231
232static int rcar_thermal_of_get_temp(void *data, int *temp)
233{
234 struct rcar_thermal_priv *priv = data;
235
236 return rcar_thermal_get_current_temp(priv, temp);
237}
238
239static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
240{
241 struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
242
243 return rcar_thermal_get_current_temp(priv, temp);
244}
245
229static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone, 246static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
230 int trip, enum thermal_trip_type *type) 247 int trip, enum thermal_trip_type *type)
231{ 248{
@@ -282,6 +299,10 @@ static int rcar_thermal_notify(struct thermal_zone_device *zone,
282 return 0; 299 return 0;
283} 300}
284 301
302static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = {
303 .get_temp = rcar_thermal_of_get_temp,
304};
305
285static struct thermal_zone_device_ops rcar_thermal_zone_ops = { 306static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
286 .get_temp = rcar_thermal_get_temp, 307 .get_temp = rcar_thermal_get_temp,
287 .get_trip_type = rcar_thermal_get_trip_type, 308 .get_trip_type = rcar_thermal_get_trip_type,
@@ -318,14 +339,20 @@ static void rcar_thermal_work(struct work_struct *work)
318 339
319 priv = container_of(work, struct rcar_thermal_priv, work.work); 340 priv = container_of(work, struct rcar_thermal_priv, work.work);
320 341
321 rcar_thermal_get_temp(priv->zone, &cctemp); 342 ret = rcar_thermal_get_current_temp(priv, &cctemp);
343 if (ret < 0)
344 return;
345
322 ret = rcar_thermal_update_temp(priv); 346 ret = rcar_thermal_update_temp(priv);
323 if (ret < 0) 347 if (ret < 0)
324 return; 348 return;
325 349
326 rcar_thermal_irq_enable(priv); 350 rcar_thermal_irq_enable(priv);
327 351
328 rcar_thermal_get_temp(priv->zone, &nctemp); 352 ret = rcar_thermal_get_current_temp(priv, &nctemp);
353 if (ret < 0)
354 return;
355
329 if (nctemp != cctemp) 356 if (nctemp != cctemp)
330 thermal_zone_device_update(priv->zone); 357 thermal_zone_device_update(priv->zone);
331} 358}
@@ -403,6 +430,8 @@ static int rcar_thermal_probe(struct platform_device *pdev)
403 struct rcar_thermal_priv *priv; 430 struct rcar_thermal_priv *priv;
404 struct device *dev = &pdev->dev; 431 struct device *dev = &pdev->dev;
405 struct resource *res, *irq; 432 struct resource *res, *irq;
433 const struct of_device_id *of_id = of_match_device(rcar_thermal_dt_ids, dev);
434 unsigned long of_data = (unsigned long)of_id->data;
406 int mres = 0; 435 int mres = 0;
407 int i; 436 int i;
408 int ret = -ENODEV; 437 int ret = -ENODEV;
@@ -463,7 +492,13 @@ static int rcar_thermal_probe(struct platform_device *pdev)
463 if (ret < 0) 492 if (ret < 0)
464 goto error_unregister; 493 goto error_unregister;
465 494
466 priv->zone = thermal_zone_device_register("rcar_thermal", 495 if (of_data == USE_OF_THERMAL)
496 priv->zone = thermal_zone_of_sensor_register(
497 dev, i, priv,
498 &rcar_thermal_zone_of_ops);
499 else
500 priv->zone = thermal_zone_device_register(
501 "rcar_thermal",
467 1, 0, priv, 502 1, 0, priv,
468 &rcar_thermal_zone_ops, NULL, 0, 503 &rcar_thermal_zone_ops, NULL, 0,
469 idle); 504 idle);
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 534dd9136662..81b35aace9de 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -54,8 +54,7 @@ static struct thermal_zone_device_ops ops = {
54 .get_temp = thermal_get_temp, 54 .get_temp = thermal_get_temp,
55}; 55};
56 56
57#ifdef CONFIG_PM 57static int __maybe_unused spear_thermal_suspend(struct device *dev)
58static int spear_thermal_suspend(struct device *dev)
59{ 58{
60 struct platform_device *pdev = to_platform_device(dev); 59 struct platform_device *pdev = to_platform_device(dev);
61 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); 60 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -72,7 +71,7 @@ static int spear_thermal_suspend(struct device *dev)
72 return 0; 71 return 0;
73} 72}
74 73
75static int spear_thermal_resume(struct device *dev) 74static int __maybe_unused spear_thermal_resume(struct device *dev)
76{ 75{
77 struct platform_device *pdev = to_platform_device(dev); 76 struct platform_device *pdev = to_platform_device(dev);
78 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev); 77 struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -94,7 +93,6 @@ static int spear_thermal_resume(struct device *dev)
94 93
95 return 0; 94 return 0;
96} 95}
97#endif
98 96
99static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend, 97static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
100 spear_thermal_resume); 98 spear_thermal_resume);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index b3110040164a..2348fa613707 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -681,7 +681,14 @@ static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
681/* this is called once with whichever end is closed last */ 681/* this is called once with whichever end is closed last */
682static void pty_unix98_shutdown(struct tty_struct *tty) 682static void pty_unix98_shutdown(struct tty_struct *tty)
683{ 683{
684 devpts_kill_index(tty->driver_data, tty->index); 684 struct inode *ptmx_inode;
685
686 if (tty->driver->subtype == PTY_TYPE_MASTER)
687 ptmx_inode = tty->driver_data;
688 else
689 ptmx_inode = tty->link->driver_data;
690 devpts_kill_index(ptmx_inode, tty->index);
691 devpts_del_ref(ptmx_inode);
685} 692}
686 693
687static const struct tty_operations ptm_unix98_ops = { 694static const struct tty_operations ptm_unix98_ops = {
@@ -773,6 +780,18 @@ static int ptmx_open(struct inode *inode, struct file *filp)
773 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 780 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
774 tty->driver_data = inode; 781 tty->driver_data = inode;
775 782
783 /*
784 * In the case where all references to ptmx inode are dropped and we
785 * still have /dev/tty opened pointing to the master/slave pair (ptmx
786 * is closed/released before /dev/tty), we must make sure that the inode
787 * is still valid when we call the final pty_unix98_shutdown, thus we
788 * hold an additional reference to the ptmx inode. For the same /dev/tty
789 * last close case, we also need to make sure the super_block isn't
790 * destroyed (devpts instance unmounted), before /dev/tty is closed and
791 * on its release devpts_kill_index is called.
792 */
793 devpts_add_ref(inode);
794
776 tty_add_file(tty, filp); 795 tty_add_file(tty, filp);
777 796
778 slave_inode = devpts_pty_new(inode, 797 slave_inode = devpts_pty_new(inode,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e71ec78fc11e..7cd6f9a90542 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1941,6 +1941,7 @@ pci_wch_ch38x_setup(struct serial_private *priv,
1941#define PCIE_VENDOR_ID_WCH 0x1c00 1941#define PCIE_VENDOR_ID_WCH 0x1c00
1942#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 1942#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
1943#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 1943#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
1944#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
1944 1945
1945#define PCI_VENDOR_ID_PERICOM 0x12D8 1946#define PCI_VENDOR_ID_PERICOM 0x12D8
1946#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951 1947#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
@@ -2637,6 +2638,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
2637 .subdevice = PCI_ANY_ID, 2638 .subdevice = PCI_ANY_ID,
2638 .setup = pci_wch_ch353_setup, 2639 .setup = pci_wch_ch353_setup,
2639 }, 2640 },
2641 /* WCH CH382 2S card (16850 clone) */
2642 {
2643 .vendor = PCIE_VENDOR_ID_WCH,
2644 .device = PCIE_DEVICE_ID_WCH_CH382_2S,
2645 .subvendor = PCI_ANY_ID,
2646 .subdevice = PCI_ANY_ID,
2647 .setup = pci_wch_ch38x_setup,
2648 },
2640 /* WCH CH382 2S1P card (16850 clone) */ 2649 /* WCH CH382 2S1P card (16850 clone) */
2641 { 2650 {
2642 .vendor = PCIE_VENDOR_ID_WCH, 2651 .vendor = PCIE_VENDOR_ID_WCH,
@@ -2955,6 +2964,7 @@ enum pci_board_num_t {
2955 pbn_fintek_4, 2964 pbn_fintek_4,
2956 pbn_fintek_8, 2965 pbn_fintek_8,
2957 pbn_fintek_12, 2966 pbn_fintek_12,
2967 pbn_wch382_2,
2958 pbn_wch384_4, 2968 pbn_wch384_4,
2959 pbn_pericom_PI7C9X7951, 2969 pbn_pericom_PI7C9X7951,
2960 pbn_pericom_PI7C9X7952, 2970 pbn_pericom_PI7C9X7952,
@@ -3775,6 +3785,13 @@ static struct pciserial_board pci_boards[] = {
3775 .base_baud = 115200, 3785 .base_baud = 115200,
3776 .first_offset = 0x40, 3786 .first_offset = 0x40,
3777 }, 3787 },
3788 [pbn_wch382_2] = {
3789 .flags = FL_BASE0,
3790 .num_ports = 2,
3791 .base_baud = 115200,
3792 .uart_offset = 8,
3793 .first_offset = 0xC0,
3794 },
3778 [pbn_wch384_4] = { 3795 [pbn_wch384_4] = {
3779 .flags = FL_BASE0, 3796 .flags = FL_BASE0,
3780 .num_ports = 4, 3797 .num_ports = 4,
@@ -5574,6 +5591,10 @@ static struct pci_device_id serial_pci_tbl[] = {
5574 PCI_ANY_ID, PCI_ANY_ID, 5591 PCI_ANY_ID, PCI_ANY_ID,
5575 0, 0, pbn_b0_bt_2_115200 }, 5592 0, 0, pbn_b0_bt_2_115200 },
5576 5593
5594 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
5595 PCI_ANY_ID, PCI_ANY_ID,
5596 0, 0, pbn_wch382_2 },
5597
5577 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, 5598 { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
5578 PCI_ANY_ID, PCI_ANY_ID, 5599 PCI_ANY_ID, PCI_ANY_ID,
5579 0, 0, pbn_wch384_4 }, 5600 0, 0, pbn_wch384_4 },
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index b645f9228ed7..fa49eb1e2fa2 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1165,7 +1165,7 @@ serial_omap_type(struct uart_port *port)
1165 1165
1166#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) 1166#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
1167 1167
1168static void wait_for_xmitr(struct uart_omap_port *up) 1168static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
1169{ 1169{
1170 unsigned int status, tmout = 10000; 1170 unsigned int status, tmout = 10000;
1171 1171
@@ -1343,7 +1343,7 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
1343 1343
1344/* Enable or disable the rs485 support */ 1344/* Enable or disable the rs485 support */
1345static int 1345static int
1346serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf) 1346serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
1347{ 1347{
1348 struct uart_omap_port *up = to_uart_omap_port(port); 1348 struct uart_omap_port *up = to_uart_omap_port(port);
1349 unsigned int mode; 1349 unsigned int mode;
@@ -1356,8 +1356,12 @@ serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
1356 up->ier = 0; 1356 up->ier = 0;
1357 serial_out(up, UART_IER, 0); 1357 serial_out(up, UART_IER, 0);
1358 1358
1359 /* Clamp the delays to [0, 100ms] */
1360 rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
1361 rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
1362
1359 /* store new config */ 1363 /* store new config */
1360 port->rs485 = *rs485conf; 1364 port->rs485 = *rs485;
1361 1365
1362 /* 1366 /*
1363 * Just as a precaution, only allow rs485 1367 * Just as a precaution, only allow rs485
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 5cec01c75691..a7eacef1bd22 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2066,13 +2066,12 @@ retry_open:
2066 if (tty) { 2066 if (tty) {
2067 mutex_unlock(&tty_mutex); 2067 mutex_unlock(&tty_mutex);
2068 retval = tty_lock_interruptible(tty); 2068 retval = tty_lock_interruptible(tty);
2069 tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
2069 if (retval) { 2070 if (retval) {
2070 if (retval == -EINTR) 2071 if (retval == -EINTR)
2071 retval = -ERESTARTSYS; 2072 retval = -ERESTARTSYS;
2072 goto err_unref; 2073 goto err_unref;
2073 } 2074 }
2074 /* safe to drop the kref from tty_driver_lookup_tty() */
2075 tty_kref_put(tty);
2076 retval = tty_reopen(tty); 2075 retval = tty_reopen(tty);
2077 if (retval < 0) { 2076 if (retval < 0) {
2078 tty_unlock(tty); 2077 tty_unlock(tty);
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index d2f3c4cd697f..dfa9ec03fa8e 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -21,10 +21,15 @@ EXPORT_SYMBOL(tty_lock);
21 21
22int tty_lock_interruptible(struct tty_struct *tty) 22int tty_lock_interruptible(struct tty_struct *tty)
23{ 23{
24 int ret;
25
24 if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty)) 26 if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
25 return -EIO; 27 return -EIO;
26 tty_kref_get(tty); 28 tty_kref_get(tty);
27 return mutex_lock_interruptible(&tty->legacy_mutex); 29 ret = mutex_lock_interruptible(&tty->legacy_mutex);
30 if (ret)
31 tty_kref_put(tty);
32 return ret;
28} 33}
29 34
30void __lockfunc tty_unlock(struct tty_struct *tty) 35void __lockfunc tty_unlock(struct tty_struct *tty)
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index b59195edf636..b635ab67490d 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -85,8 +85,8 @@ static int ci_hdrc_pci_probe(struct pci_dev *pdev,
85 85
86 /* register a nop PHY */ 86 /* register a nop PHY */
87 ci->phy = usb_phy_generic_register(); 87 ci->phy = usb_phy_generic_register();
88 if (!ci->phy) 88 if (IS_ERR(ci->phy))
89 return -ENOMEM; 89 return PTR_ERR(ci->phy);
90 90
91 memset(res, 0, sizeof(res)); 91 memset(res, 0, sizeof(res));
92 res[0].start = pci_resource_start(pdev, 0); 92 res[0].start = pci_resource_start(pdev, 0);
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index a4f7db2e18dd..df47110bad2d 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -100,6 +100,9 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
100 if (sscanf(buf, "%u", &mode) != 1) 100 if (sscanf(buf, "%u", &mode) != 1)
101 return -EINVAL; 101 return -EINVAL;
102 102
103 if (mode > 255)
104 return -EBADRQC;
105
103 pm_runtime_get_sync(ci->dev); 106 pm_runtime_get_sync(ci->dev);
104 spin_lock_irqsave(&ci->lock, flags); 107 spin_lock_irqsave(&ci->lock, flags);
105 ret = hw_port_test_set(ci, mode); 108 ret = hw_port_test_set(ci, mode);
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 45f86da1d6d3..03b6743461d1 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -158,7 +158,7 @@ static void ci_otg_work(struct work_struct *work)
158int ci_hdrc_otg_init(struct ci_hdrc *ci) 158int ci_hdrc_otg_init(struct ci_hdrc *ci)
159{ 159{
160 INIT_WORK(&ci->work, ci_otg_work); 160 INIT_WORK(&ci->work, ci_otg_work);
161 ci->wq = create_singlethread_workqueue("ci_otg"); 161 ci->wq = create_freezable_workqueue("ci_otg");
162 if (!ci->wq) { 162 if (!ci->wq) {
163 dev_err(ci->dev, "can't create workqueue\n"); 163 dev_err(ci->dev, "can't create workqueue\n");
164 return -ENODEV; 164 return -ENODEV;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 350dcd9af5d8..51b436918f78 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5401,6 +5401,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
5401 } 5401 }
5402 5402
5403 bos = udev->bos; 5403 bos = udev->bos;
5404 udev->bos = NULL;
5404 5405
5405 for (i = 0; i < SET_CONFIG_TRIES; ++i) { 5406 for (i = 0; i < SET_CONFIG_TRIES; ++i) {
5406 5407
@@ -5493,11 +5494,8 @@ done:
5493 usb_set_usb2_hardware_lpm(udev, 1); 5494 usb_set_usb2_hardware_lpm(udev, 1);
5494 usb_unlocked_enable_lpm(udev); 5495 usb_unlocked_enable_lpm(udev);
5495 usb_enable_ltm(udev); 5496 usb_enable_ltm(udev);
5496 /* release the new BOS descriptor allocated by hub_port_init() */ 5497 usb_release_bos_descriptor(udev);
5497 if (udev->bos != bos) { 5498 udev->bos = bos;
5498 usb_release_bos_descriptor(udev);
5499 udev->bos = bos;
5500 }
5501 return 0; 5499 return 0;
5502 5500
5503re_enumerate: 5501re_enumerate:
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index fd95ba6ec317..f0decc0d69b5 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -1,5 +1,6 @@
1config USB_DWC2 1config USB_DWC2
2 tristate "DesignWare USB2 DRD Core Support" 2 tristate "DesignWare USB2 DRD Core Support"
3 depends on HAS_DMA
3 depends on USB || USB_GADGET 4 depends on USB || USB_GADGET
4 help 5 help
5 Say Y here if your system has a Dual Role Hi-Speed USB 6 Say Y here if your system has a Dual Role Hi-Speed USB
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index e991d55914db..46c4ba75dc2a 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -619,6 +619,12 @@ void dwc2_force_dr_mode(struct dwc2_hsotg *hsotg)
619 __func__, hsotg->dr_mode); 619 __func__, hsotg->dr_mode);
620 break; 620 break;
621 } 621 }
622
623 /*
624 * NOTE: This is required for some rockchip soc based
625 * platforms.
626 */
627 msleep(50);
622} 628}
623 629
624/* 630/*
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 36606fc33c0d..a41274aa52ad 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -1174,14 +1174,11 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1174 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc, 1174 failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1175 halt_status, n_bytes, 1175 halt_status, n_bytes,
1176 xfer_done); 1176 xfer_done);
1177 if (*xfer_done && urb->status != -EINPROGRESS) 1177 if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1178 failed = 1;
1179
1180 if (failed) {
1181 dwc2_host_complete(hsotg, qtd, urb->status); 1178 dwc2_host_complete(hsotg, qtd, urb->status);
1182 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh); 1179 dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1183 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n", 1180 dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
1184 failed, *xfer_done, urb->status); 1181 failed, *xfer_done);
1185 return failed; 1182 return failed;
1186 } 1183 }
1187 1184
@@ -1236,21 +1233,23 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1236 1233
1237 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) { 1234 list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1238 int i; 1235 int i;
1236 int qtd_desc_count;
1239 1237
1240 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry); 1238 qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1241 xfer_done = 0; 1239 xfer_done = 0;
1240 qtd_desc_count = qtd->n_desc;
1242 1241
1243 for (i = 0; i < qtd->n_desc; i++) { 1242 for (i = 0; i < qtd_desc_count; i++) {
1244 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd, 1243 if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1245 desc_num, halt_status, 1244 desc_num, halt_status,
1246 &xfer_done)) { 1245 &xfer_done))
1247 qtd = NULL; 1246 goto stop_scan;
1248 break; 1247
1249 }
1250 desc_num++; 1248 desc_num++;
1251 } 1249 }
1252 } 1250 }
1253 1251
1252stop_scan:
1254 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) { 1253 if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1255 /* 1254 /*
1256 * Resetting the data toggle for bulk and interrupt endpoints 1255 * Resetting the data toggle for bulk and interrupt endpoints
@@ -1258,7 +1257,7 @@ static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1258 */ 1257 */
1259 if (halt_status == DWC2_HC_XFER_STALL) 1258 if (halt_status == DWC2_HC_XFER_STALL)
1260 qh->data_toggle = DWC2_HC_PID_DATA0; 1259 qh->data_toggle = DWC2_HC_PID_DATA0;
1261 else if (qtd) 1260 else
1262 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd); 1261 dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1263 } 1262 }
1264 1263
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index f8253803a050..cadba8b13c48 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -525,11 +525,19 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
525 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT; 525 u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
526 526
527 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) { 527 if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
528 if (WARN(!chan || !chan->qh,
529 "chan->qh must be specified for non-control eps\n"))
530 return;
531
528 if (pid == TSIZ_SC_MC_PID_DATA0) 532 if (pid == TSIZ_SC_MC_PID_DATA0)
529 chan->qh->data_toggle = DWC2_HC_PID_DATA0; 533 chan->qh->data_toggle = DWC2_HC_PID_DATA0;
530 else 534 else
531 chan->qh->data_toggle = DWC2_HC_PID_DATA1; 535 chan->qh->data_toggle = DWC2_HC_PID_DATA1;
532 } else { 536 } else {
537 if (WARN(!qtd,
538 "qtd must be specified for control eps\n"))
539 return;
540
533 if (pid == TSIZ_SC_MC_PID_DATA0) 541 if (pid == TSIZ_SC_MC_PID_DATA0)
534 qtd->data_toggle = DWC2_HC_PID_DATA0; 542 qtd->data_toggle = DWC2_HC_PID_DATA0;
535 else 543 else
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 29130682e547..e4f8b90d9627 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -856,7 +856,6 @@ struct dwc3 {
856 unsigned pullups_connected:1; 856 unsigned pullups_connected:1;
857 unsigned resize_fifos:1; 857 unsigned resize_fifos:1;
858 unsigned setup_packet_pending:1; 858 unsigned setup_packet_pending:1;
859 unsigned start_config_issued:1;
860 unsigned three_stage_setup:1; 859 unsigned three_stage_setup:1;
861 unsigned usb3_lpm_capable:1; 860 unsigned usb3_lpm_capable:1;
862 861
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 3a9354abcb68..8d6b75c2f53b 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
555 int ret; 555 int ret;
556 u32 reg; 556 u32 reg;
557 557
558 dwc->start_config_issued = false;
559 cfg = le16_to_cpu(ctrl->wValue); 558 cfg = le16_to_cpu(ctrl->wValue);
560 559
561 switch (state) { 560 switch (state) {
@@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
737 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); 736 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
738 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); 737 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
739 break; 738 break;
740 case USB_REQ_SET_INTERFACE:
741 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
742 dwc->start_config_issued = false;
743 /* Fall through */
744 default: 739 default:
745 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); 740 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
746 ret = dwc3_ep0_delegate_req(dwc, ctrl); 741 ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7d1dd82a95ac..2363bad45af8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -385,24 +385,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
385 dep->trb_pool_dma = 0; 385 dep->trb_pool_dma = 0;
386} 386}
387 387
388static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
389
390/**
391 * dwc3_gadget_start_config - Configure EP resources
392 * @dwc: pointer to our controller context structure
393 * @dep: endpoint that is being enabled
394 *
395 * The assignment of transfer resources cannot perfectly follow the
396 * data book due to the fact that the controller driver does not have
397 * all knowledge of the configuration in advance. It is given this
398 * information piecemeal by the composite gadget framework after every
399 * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
400 * programming model in this scenario can cause errors. For two
401 * reasons:
402 *
403 * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
404 * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
405 * multiple interfaces.
406 *
407 * 2) The databook does not mention doing more DEPXFERCFG for new
408 * endpoint on alt setting (8.1.6).
409 *
410 * The following simplified method is used instead:
411 *
412 * All hardware endpoints can be assigned a transfer resource and this
413 * setting will stay persistent until either a core reset or
414 * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
415 * do DEPXFERCFG for every hardware endpoint as well. We are
416 * guaranteed that there are as many transfer resources as endpoints.
417 *
418 * This function is called for each endpoint when it is being enabled
419 * but is triggered only when called for EP0-out, which always happens
420 * first, and which should only happen in one of the above conditions.
421 */
388static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep) 422static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
389{ 423{
390 struct dwc3_gadget_ep_cmd_params params; 424 struct dwc3_gadget_ep_cmd_params params;
391 u32 cmd; 425 u32 cmd;
426 int i;
427 int ret;
428
429 if (dep->number)
430 return 0;
392 431
393 memset(&params, 0x00, sizeof(params)); 432 memset(&params, 0x00, sizeof(params));
433 cmd = DWC3_DEPCMD_DEPSTARTCFG;
394 434
395 if (dep->number != 1) { 435 ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
396 cmd = DWC3_DEPCMD_DEPSTARTCFG; 436 if (ret)
397 /* XferRscIdx == 0 for ep0 and 2 for the remaining */ 437 return ret;
398 if (dep->number > 1) {
399 if (dwc->start_config_issued)
400 return 0;
401 dwc->start_config_issued = true;
402 cmd |= DWC3_DEPCMD_PARAM(2);
403 }
404 438
405 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params); 439 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
440 struct dwc3_ep *dep = dwc->eps[i];
441
442 if (!dep)
443 continue;
444
445 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
446 if (ret)
447 return ret;
406 } 448 }
407 449
408 return 0; 450 return 0;
@@ -516,10 +558,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
516 struct dwc3_trb *trb_st_hw; 558 struct dwc3_trb *trb_st_hw;
517 struct dwc3_trb *trb_link; 559 struct dwc3_trb *trb_link;
518 560
519 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
520 if (ret)
521 return ret;
522
523 dep->endpoint.desc = desc; 561 dep->endpoint.desc = desc;
524 dep->comp_desc = comp_desc; 562 dep->comp_desc = comp_desc;
525 dep->type = usb_endpoint_type(desc); 563 dep->type = usb_endpoint_type(desc);
@@ -1636,8 +1674,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
1636 } 1674 }
1637 dwc3_writel(dwc->regs, DWC3_DCFG, reg); 1675 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1638 1676
1639 dwc->start_config_issued = false;
1640
1641 /* Start with SuperSpeed Default */ 1677 /* Start with SuperSpeed Default */
1642 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); 1678 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1643 1679
@@ -2237,7 +2273,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2237 dwc3_writel(dwc->regs, DWC3_DCTL, reg); 2273 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2238 2274
2239 dwc3_disconnect_gadget(dwc); 2275 dwc3_disconnect_gadget(dwc);
2240 dwc->start_config_issued = false;
2241 2276
2242 dwc->gadget.speed = USB_SPEED_UNKNOWN; 2277 dwc->gadget.speed = USB_SPEED_UNKNOWN;
2243 dwc->setup_packet_pending = false; 2278 dwc->setup_packet_pending = false;
@@ -2288,7 +2323,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2288 2323
2289 dwc3_stop_active_transfers(dwc); 2324 dwc3_stop_active_transfers(dwc);
2290 dwc3_clear_stall_all_ep(dwc); 2325 dwc3_clear_stall_all_ep(dwc);
2291 dwc->start_config_issued = false;
2292 2326
2293 /* Reset device address to zero */ 2327 /* Reset device address to zero */
2294 reg = dwc3_readl(dwc->regs, DWC3_DCFG); 2328 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 7e179f81d05c..87fb0fd6aaab 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -130,7 +130,8 @@ struct dev_data {
130 setup_can_stall : 1, 130 setup_can_stall : 1,
131 setup_out_ready : 1, 131 setup_out_ready : 1,
132 setup_out_error : 1, 132 setup_out_error : 1,
133 setup_abort : 1; 133 setup_abort : 1,
134 gadget_registered : 1;
134 unsigned setup_wLength; 135 unsigned setup_wLength;
135 136
136 /* the rest is basically write-once */ 137 /* the rest is basically write-once */
@@ -1179,7 +1180,8 @@ dev_release (struct inode *inode, struct file *fd)
1179 1180
1180 /* closing ep0 === shutdown all */ 1181 /* closing ep0 === shutdown all */
1181 1182
1182 usb_gadget_unregister_driver (&gadgetfs_driver); 1183 if (dev->gadget_registered)
1184 usb_gadget_unregister_driver (&gadgetfs_driver);
1183 1185
1184 /* at this point "good" hardware has disconnected the 1186 /* at this point "good" hardware has disconnected the
1185 * device from USB; the host won't see it any more. 1187 * device from USB; the host won't see it any more.
@@ -1847,6 +1849,7 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
1847 * kick in after the ep0 descriptor is closed. 1849 * kick in after the ep0 descriptor is closed.
1848 */ 1850 */
1849 value = len; 1851 value = len;
1852 dev->gadget_registered = true;
1850 } 1853 }
1851 return value; 1854 return value;
1852 1855
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 53c0692f1b09..93d28cb00b76 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2340,7 +2340,7 @@ static struct qe_udc *qe_udc_config(struct platform_device *ofdev)
2340{ 2340{
2341 struct qe_udc *udc; 2341 struct qe_udc *udc;
2342 struct device_node *np = ofdev->dev.of_node; 2342 struct device_node *np = ofdev->dev.of_node;
2343 unsigned int tmp_addr = 0; 2343 unsigned long tmp_addr = 0;
2344 struct usb_device_para __iomem *usbpram; 2344 struct usb_device_para __iomem *usbpram;
2345 unsigned int i; 2345 unsigned int i;
2346 u64 size; 2346 u64 size;
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 4dff60d34f73..0d32052bf16f 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -369,9 +369,20 @@ static inline void set_max_speed(struct net2280_ep *ep, u32 max)
369 static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80, 369 static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80,
370 0x50, 0x20, 0x70, 0x40, 0x90 }; 370 0x50, 0x20, 0x70, 0x40, 0x90 };
371 371
372 if (ep->dev->enhanced_mode) 372 if (ep->dev->enhanced_mode) {
373 reg = ep_enhanced[ep->num]; 373 reg = ep_enhanced[ep->num];
374 else{ 374 switch (ep->dev->gadget.speed) {
375 case USB_SPEED_SUPER:
376 reg += 2;
377 break;
378 case USB_SPEED_FULL:
379 reg += 1;
380 break;
381 case USB_SPEED_HIGH:
382 default:
383 break;
384 }
385 } else {
375 reg = (ep->num + 1) * 0x10; 386 reg = (ep->num + 1) * 0x10;
376 if (ep->dev->gadget.speed != USB_SPEED_HIGH) 387 if (ep->dev->gadget.speed != USB_SPEED_HIGH)
377 reg += 1; 388 reg += 1;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index fd73a3ea07c2..b86a6f03592e 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -413,9 +413,10 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
413 if (!driver->udc_name || strcmp(driver->udc_name, 413 if (!driver->udc_name || strcmp(driver->udc_name,
414 dev_name(&udc->dev)) == 0) { 414 dev_name(&udc->dev)) == 0) {
415 ret = udc_bind_to_driver(udc, driver); 415 ret = udc_bind_to_driver(udc, driver);
416 if (ret != -EPROBE_DEFER)
417 list_del(&driver->pending);
416 if (ret) 418 if (ret)
417 goto err4; 419 goto err4;
418 list_del(&driver->pending);
419 break; 420 break;
420 } 421 }
421 } 422 }
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 795a45b1b25b..58487a473521 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -662,7 +662,7 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
662 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE); 662 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
663 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */ 663 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
664 } 664 }
665 channel->desired_mode = mode; 665 channel->desired_mode = *mode;
666 musb_writew(epio, MUSB_TXCSR, csr); 666 musb_writew(epio, MUSB_TXCSR, csr);
667 667
668 return 0; 668 return 0;
@@ -2003,10 +2003,8 @@ void musb_host_rx(struct musb *musb, u8 epnum)
2003 qh->offset, 2003 qh->offset,
2004 urb->transfer_buffer_length); 2004 urb->transfer_buffer_length);
2005 2005
2006 done = musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, 2006 if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
2007 urb, xfer_len, 2007 xfer_len, iso_err))
2008 iso_err);
2009 if (done)
2010 goto finish; 2008 goto finish;
2011 else 2009 else
2012 dev_err(musb->controller, "error: rx_dma failed\n"); 2010 dev_err(musb->controller, "error: rx_dma failed\n");
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 970a30e155cb..72b387d592c2 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -757,14 +757,8 @@ static int msm_otg_set_host(struct usb_otg *otg, struct usb_bus *host)
757 otg->host = host; 757 otg->host = host;
758 dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n"); 758 dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
759 759
760 /* 760 pm_runtime_get_sync(otg->usb_phy->dev);
761 * Kick the state machine work, if peripheral is not supported 761 schedule_work(&motg->sm_work);
762 * or peripheral is already registered with us.
763 */
764 if (motg->pdata->mode == USB_DR_MODE_HOST || otg->gadget) {
765 pm_runtime_get_sync(otg->usb_phy->dev);
766 schedule_work(&motg->sm_work);
767 }
768 762
769 return 0; 763 return 0;
770} 764}
@@ -827,14 +821,8 @@ static int msm_otg_set_peripheral(struct usb_otg *otg,
827 dev_dbg(otg->usb_phy->dev, 821 dev_dbg(otg->usb_phy->dev,
828 "peripheral driver registered w/ tranceiver\n"); 822 "peripheral driver registered w/ tranceiver\n");
829 823
830 /* 824 pm_runtime_get_sync(otg->usb_phy->dev);
831 * Kick the state machine work, if host is not supported 825 schedule_work(&motg->sm_work);
832 * or host is already registered with us.
833 */
834 if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL || otg->host) {
835 pm_runtime_get_sync(otg->usb_phy->dev);
836 schedule_work(&motg->sm_work);
837 }
838 826
839 return 0; 827 return 0;
840} 828}
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index f612dda9c977..56ecb8b5115d 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -475,22 +475,6 @@ config USB_SERIAL_MOS7840
475 To compile this driver as a module, choose M here: the 475 To compile this driver as a module, choose M here: the
476 module will be called mos7840. If unsure, choose N. 476 module will be called mos7840. If unsure, choose N.
477 477
478config USB_SERIAL_MXUPORT11
479 tristate "USB Moxa UPORT 11x0 Serial Driver"
480 ---help---
481 Say Y here if you want to use a MOXA UPort 11x0 Serial hub.
482
483 This driver supports:
484
485 - UPort 1110 : 1 port RS-232 USB to Serial Hub.
486 - UPort 1130 : 1 port RS-422/485 USB to Serial Hub.
487 - UPort 1130I : 1 port RS-422/485 USB to Serial Hub with Isolation.
488 - UPort 1150 : 1 port RS-232/422/485 USB to Serial Hub.
489 - UPort 1150I : 1 port RS-232/422/485 USB to Serial Hub with Isolation.
490
491 To compile this driver as a module, choose M here: the
492 module will be called mxu11x0.
493
494config USB_SERIAL_MXUPORT 478config USB_SERIAL_MXUPORT
495 tristate "USB Moxa UPORT Serial Driver" 479 tristate "USB Moxa UPORT Serial Driver"
496 ---help--- 480 ---help---
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index f3fa5e53702d..349d9df0895f 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -38,7 +38,6 @@ obj-$(CONFIG_USB_SERIAL_METRO) += metro-usb.o
38obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o 38obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o
39obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o 39obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o
40obj-$(CONFIG_USB_SERIAL_MXUPORT) += mxuport.o 40obj-$(CONFIG_USB_SERIAL_MXUPORT) += mxuport.o
41obj-$(CONFIG_USB_SERIAL_MXUPORT11) += mxu11x0.o
42obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o 41obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o
43obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o 42obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o
44obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o 43obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 987813b8a7f9..73a366de5102 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -163,6 +163,9 @@ static const struct usb_device_id id_table[] = {
163 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 163 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
164 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 164 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ 165 { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
166 { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
167 { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
168 { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
166 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 169 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
167 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ 170 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
168 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ 171 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
diff --git a/drivers/usb/serial/mxu11x0.c b/drivers/usb/serial/mxu11x0.c
deleted file mode 100644
index 619607323bfd..000000000000
--- a/drivers/usb/serial/mxu11x0.c
+++ /dev/null
@@ -1,1006 +0,0 @@
1/*
2 * USB Moxa UPORT 11x0 Serial Driver
3 *
4 * Copyright (C) 2007 MOXA Technologies Co., Ltd.
5 * Copyright (C) 2015 Mathieu Othacehe <m.othacehe@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 *
13 * Supports the following Moxa USB to serial converters:
14 * UPort 1110, 1 port RS-232 USB to Serial Hub.
15 * UPort 1130, 1 port RS-422/485 USB to Serial Hub.
16 * UPort 1130I, 1 port RS-422/485 USB to Serial Hub with isolation
17 * protection.
18 * UPort 1150, 1 port RS-232/422/485 USB to Serial Hub.
19 * UPort 1150I, 1 port RS-232/422/485 USB to Serial Hub with isolation
20 * protection.
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/firmware.h>
26#include <linux/jiffies.h>
27#include <linux/serial.h>
28#include <linux/serial_reg.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/mutex.h>
32#include <linux/tty.h>
33#include <linux/tty_driver.h>
34#include <linux/tty_flip.h>
35#include <linux/uaccess.h>
36#include <linux/usb.h>
37#include <linux/usb/serial.h>
38
39/* Vendor and product ids */
40#define MXU1_VENDOR_ID 0x110a
41#define MXU1_1110_PRODUCT_ID 0x1110
42#define MXU1_1130_PRODUCT_ID 0x1130
43#define MXU1_1150_PRODUCT_ID 0x1150
44#define MXU1_1151_PRODUCT_ID 0x1151
45#define MXU1_1131_PRODUCT_ID 0x1131
46
47/* Commands */
48#define MXU1_GET_VERSION 0x01
49#define MXU1_GET_PORT_STATUS 0x02
50#define MXU1_GET_PORT_DEV_INFO 0x03
51#define MXU1_GET_CONFIG 0x04
52#define MXU1_SET_CONFIG 0x05
53#define MXU1_OPEN_PORT 0x06
54#define MXU1_CLOSE_PORT 0x07
55#define MXU1_START_PORT 0x08
56#define MXU1_STOP_PORT 0x09
57#define MXU1_TEST_PORT 0x0A
58#define MXU1_PURGE_PORT 0x0B
59#define MXU1_RESET_EXT_DEVICE 0x0C
60#define MXU1_GET_OUTQUEUE 0x0D
61#define MXU1_WRITE_DATA 0x80
62#define MXU1_READ_DATA 0x81
63#define MXU1_REQ_TYPE_CLASS 0x82
64
65/* Module identifiers */
66#define MXU1_I2C_PORT 0x01
67#define MXU1_IEEE1284_PORT 0x02
68#define MXU1_UART1_PORT 0x03
69#define MXU1_UART2_PORT 0x04
70#define MXU1_RAM_PORT 0x05
71
72/* Modem status */
73#define MXU1_MSR_DELTA_CTS 0x01
74#define MXU1_MSR_DELTA_DSR 0x02
75#define MXU1_MSR_DELTA_RI 0x04
76#define MXU1_MSR_DELTA_CD 0x08
77#define MXU1_MSR_CTS 0x10
78#define MXU1_MSR_DSR 0x20
79#define MXU1_MSR_RI 0x40
80#define MXU1_MSR_CD 0x80
81#define MXU1_MSR_DELTA_MASK 0x0F
82#define MXU1_MSR_MASK 0xF0
83
84/* Line status */
85#define MXU1_LSR_OVERRUN_ERROR 0x01
86#define MXU1_LSR_PARITY_ERROR 0x02
87#define MXU1_LSR_FRAMING_ERROR 0x04
88#define MXU1_LSR_BREAK 0x08
89#define MXU1_LSR_ERROR 0x0F
90#define MXU1_LSR_RX_FULL 0x10
91#define MXU1_LSR_TX_EMPTY 0x20
92
93/* Modem control */
94#define MXU1_MCR_LOOP 0x04
95#define MXU1_MCR_DTR 0x10
96#define MXU1_MCR_RTS 0x20
97
98/* Mask settings */
99#define MXU1_UART_ENABLE_RTS_IN 0x0001
100#define MXU1_UART_DISABLE_RTS 0x0002
101#define MXU1_UART_ENABLE_PARITY_CHECKING 0x0008
102#define MXU1_UART_ENABLE_DSR_OUT 0x0010
103#define MXU1_UART_ENABLE_CTS_OUT 0x0020
104#define MXU1_UART_ENABLE_X_OUT 0x0040
105#define MXU1_UART_ENABLE_XA_OUT 0x0080
106#define MXU1_UART_ENABLE_X_IN 0x0100
107#define MXU1_UART_ENABLE_DTR_IN 0x0800
108#define MXU1_UART_DISABLE_DTR 0x1000
109#define MXU1_UART_ENABLE_MS_INTS 0x2000
110#define MXU1_UART_ENABLE_AUTO_START_DMA 0x4000
111#define MXU1_UART_SEND_BREAK_SIGNAL 0x8000
112
113/* Parity */
114#define MXU1_UART_NO_PARITY 0x00
115#define MXU1_UART_ODD_PARITY 0x01
116#define MXU1_UART_EVEN_PARITY 0x02
117#define MXU1_UART_MARK_PARITY 0x03
118#define MXU1_UART_SPACE_PARITY 0x04
119
120/* Stop bits */
121#define MXU1_UART_1_STOP_BITS 0x00
122#define MXU1_UART_1_5_STOP_BITS 0x01
123#define MXU1_UART_2_STOP_BITS 0x02
124
125/* Bits per character */
126#define MXU1_UART_5_DATA_BITS 0x00
127#define MXU1_UART_6_DATA_BITS 0x01
128#define MXU1_UART_7_DATA_BITS 0x02
129#define MXU1_UART_8_DATA_BITS 0x03
130
131/* Operation modes */
132#define MXU1_UART_232 0x00
133#define MXU1_UART_485_RECEIVER_DISABLED 0x01
134#define MXU1_UART_485_RECEIVER_ENABLED 0x02
135
136/* Pipe transfer mode and timeout */
137#define MXU1_PIPE_MODE_CONTINUOUS 0x01
138#define MXU1_PIPE_MODE_MASK 0x03
139#define MXU1_PIPE_TIMEOUT_MASK 0x7C
140#define MXU1_PIPE_TIMEOUT_ENABLE 0x80
141
142/* Config struct */
143struct mxu1_uart_config {
144 __be16 wBaudRate;
145 __be16 wFlags;
146 u8 bDataBits;
147 u8 bParity;
148 u8 bStopBits;
149 char cXon;
150 char cXoff;
151 u8 bUartMode;
152} __packed;
153
154/* Purge modes */
155#define MXU1_PURGE_OUTPUT 0x00
156#define MXU1_PURGE_INPUT 0x80
157
158/* Read/Write data */
159#define MXU1_RW_DATA_ADDR_SFR 0x10
160#define MXU1_RW_DATA_ADDR_IDATA 0x20
161#define MXU1_RW_DATA_ADDR_XDATA 0x30
162#define MXU1_RW_DATA_ADDR_CODE 0x40
163#define MXU1_RW_DATA_ADDR_GPIO 0x50
164#define MXU1_RW_DATA_ADDR_I2C 0x60
165#define MXU1_RW_DATA_ADDR_FLASH 0x70
166#define MXU1_RW_DATA_ADDR_DSP 0x80
167
168#define MXU1_RW_DATA_UNSPECIFIED 0x00
169#define MXU1_RW_DATA_BYTE 0x01
170#define MXU1_RW_DATA_WORD 0x02
171#define MXU1_RW_DATA_DOUBLE_WORD 0x04
172
173struct mxu1_write_data_bytes {
174 u8 bAddrType;
175 u8 bDataType;
176 u8 bDataCounter;
177 __be16 wBaseAddrHi;
178 __be16 wBaseAddrLo;
179 u8 bData[0];
180} __packed;
181
182/* Interrupt codes */
183#define MXU1_CODE_HARDWARE_ERROR 0xFF
184#define MXU1_CODE_DATA_ERROR 0x03
185#define MXU1_CODE_MODEM_STATUS 0x04
186
187static inline int mxu1_get_func_from_code(unsigned char code)
188{
189 return code & 0x0f;
190}
191
192/* Download firmware max packet size */
193#define MXU1_DOWNLOAD_MAX_PACKET_SIZE 64
194
195/* Firmware image header */
196struct mxu1_firmware_header {
197 __le16 wLength;
198 u8 bCheckSum;
199} __packed;
200
201#define MXU1_UART_BASE_ADDR 0xFFA0
202#define MXU1_UART_OFFSET_MCR 0x0004
203
204#define MXU1_BAUD_BASE 923077
205
206#define MXU1_TRANSFER_TIMEOUT 2
207#define MXU1_DOWNLOAD_TIMEOUT 1000
208#define MXU1_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */
209
210struct mxu1_port {
211 u8 msr;
212 u8 mcr;
213 u8 uart_mode;
214 spinlock_t spinlock; /* Protects msr */
215 struct mutex mutex; /* Protects mcr */
216 bool send_break;
217};
218
219struct mxu1_device {
220 u16 mxd_model;
221};
222
223static const struct usb_device_id mxu1_idtable[] = {
224 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1110_PRODUCT_ID) },
225 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1130_PRODUCT_ID) },
226 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1150_PRODUCT_ID) },
227 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1151_PRODUCT_ID) },
228 { USB_DEVICE(MXU1_VENDOR_ID, MXU1_1131_PRODUCT_ID) },
229 { }
230};
231
232MODULE_DEVICE_TABLE(usb, mxu1_idtable);
233
234/* Write the given buffer out to the control pipe. */
235static int mxu1_send_ctrl_data_urb(struct usb_serial *serial,
236 u8 request,
237 u16 value, u16 index,
238 void *data, size_t size)
239{
240 int status;
241
242 status = usb_control_msg(serial->dev,
243 usb_sndctrlpipe(serial->dev, 0),
244 request,
245 (USB_DIR_OUT | USB_TYPE_VENDOR |
246 USB_RECIP_DEVICE), value, index,
247 data, size,
248 USB_CTRL_SET_TIMEOUT);
249 if (status < 0) {
250 dev_err(&serial->interface->dev,
251 "%s - usb_control_msg failed: %d\n",
252 __func__, status);
253 return status;
254 }
255
256 if (status != size) {
257 dev_err(&serial->interface->dev,
258 "%s - short write (%d / %zd)\n",
259 __func__, status, size);
260 return -EIO;
261 }
262
263 return 0;
264}
265
266/* Send a vendor request without any data */
267static int mxu1_send_ctrl_urb(struct usb_serial *serial,
268 u8 request, u16 value, u16 index)
269{
270 return mxu1_send_ctrl_data_urb(serial, request, value, index,
271 NULL, 0);
272}
273
274static int mxu1_download_firmware(struct usb_serial *serial,
275 const struct firmware *fw_p)
276{
277 int status = 0;
278 int buffer_size;
279 int pos;
280 int len;
281 int done;
282 u8 cs = 0;
283 u8 *buffer;
284 struct usb_device *dev = serial->dev;
285 struct mxu1_firmware_header *header;
286 unsigned int pipe;
287
288 pipe = usb_sndbulkpipe(dev, serial->port[0]->bulk_out_endpointAddress);
289
290 buffer_size = fw_p->size + sizeof(*header);
291 buffer = kmalloc(buffer_size, GFP_KERNEL);
292 if (!buffer)
293 return -ENOMEM;
294
295 memcpy(buffer, fw_p->data, fw_p->size);
296 memset(buffer + fw_p->size, 0xff, buffer_size - fw_p->size);
297
298 for (pos = sizeof(*header); pos < buffer_size; pos++)
299 cs = (u8)(cs + buffer[pos]);
300
301 header = (struct mxu1_firmware_header *)buffer;
302 header->wLength = cpu_to_le16(buffer_size - sizeof(*header));
303 header->bCheckSum = cs;
304
305 dev_dbg(&dev->dev, "%s - downloading firmware\n", __func__);
306
307 for (pos = 0; pos < buffer_size; pos += done) {
308 len = min(buffer_size - pos, MXU1_DOWNLOAD_MAX_PACKET_SIZE);
309
310 status = usb_bulk_msg(dev, pipe, buffer + pos, len, &done,
311 MXU1_DOWNLOAD_TIMEOUT);
312 if (status)
313 break;
314 }
315
316 kfree(buffer);
317
318 if (status) {
319 dev_err(&dev->dev, "failed to download firmware: %d\n", status);
320 return status;
321 }
322
323 msleep_interruptible(100);
324 usb_reset_device(dev);
325
326 dev_dbg(&dev->dev, "%s - download successful\n", __func__);
327
328 return 0;
329}
330
331static int mxu1_port_probe(struct usb_serial_port *port)
332{
333 struct mxu1_port *mxport;
334 struct mxu1_device *mxdev;
335
336 if (!port->interrupt_in_urb) {
337 dev_err(&port->dev, "no interrupt urb\n");
338 return -ENODEV;
339 }
340
341 mxport = kzalloc(sizeof(struct mxu1_port), GFP_KERNEL);
342 if (!mxport)
343 return -ENOMEM;
344
345 spin_lock_init(&mxport->spinlock);
346 mutex_init(&mxport->mutex);
347
348 mxdev = usb_get_serial_data(port->serial);
349
350 switch (mxdev->mxd_model) {
351 case MXU1_1110_PRODUCT_ID:
352 case MXU1_1150_PRODUCT_ID:
353 case MXU1_1151_PRODUCT_ID:
354 mxport->uart_mode = MXU1_UART_232;
355 break;
356 case MXU1_1130_PRODUCT_ID:
357 case MXU1_1131_PRODUCT_ID:
358 mxport->uart_mode = MXU1_UART_485_RECEIVER_DISABLED;
359 break;
360 }
361
362 usb_set_serial_port_data(port, mxport);
363
364 port->port.closing_wait =
365 msecs_to_jiffies(MXU1_DEFAULT_CLOSING_WAIT * 10);
366 port->port.drain_delay = 1;
367
368 return 0;
369}
370
371static int mxu1_port_remove(struct usb_serial_port *port)
372{
373 struct mxu1_port *mxport;
374
375 mxport = usb_get_serial_port_data(port);
376 kfree(mxport);
377
378 return 0;
379}
380
381static int mxu1_startup(struct usb_serial *serial)
382{
383 struct mxu1_device *mxdev;
384 struct usb_device *dev = serial->dev;
385 struct usb_host_interface *cur_altsetting;
386 char fw_name[32];
387 const struct firmware *fw_p = NULL;
388 int err;
389
390 dev_dbg(&serial->interface->dev, "%s - product 0x%04X, num configurations %d, configuration value %d\n",
391 __func__, le16_to_cpu(dev->descriptor.idProduct),
392 dev->descriptor.bNumConfigurations,
393 dev->actconfig->desc.bConfigurationValue);
394
395 /* create device structure */
396 mxdev = kzalloc(sizeof(struct mxu1_device), GFP_KERNEL);
397 if (!mxdev)
398 return -ENOMEM;
399
400 usb_set_serial_data(serial, mxdev);
401
402 mxdev->mxd_model = le16_to_cpu(dev->descriptor.idProduct);
403
404 cur_altsetting = serial->interface->cur_altsetting;
405
406 /* if we have only 1 configuration, download firmware */
407 if (cur_altsetting->desc.bNumEndpoints == 1) {
408
409 snprintf(fw_name,
410 sizeof(fw_name),
411 "moxa/moxa-%04x.fw",
412 mxdev->mxd_model);
413
414 err = request_firmware(&fw_p, fw_name, &serial->interface->dev);
415 if (err) {
416 dev_err(&serial->interface->dev, "failed to request firmware: %d\n",
417 err);
418 goto err_free_mxdev;
419 }
420
421 err = mxu1_download_firmware(serial, fw_p);
422 if (err)
423 goto err_release_firmware;
424
425 /* device is being reset */
426 err = -ENODEV;
427 goto err_release_firmware;
428 }
429
430 return 0;
431
432err_release_firmware:
433 release_firmware(fw_p);
434err_free_mxdev:
435 kfree(mxdev);
436
437 return err;
438}
439
440static void mxu1_release(struct usb_serial *serial)
441{
442 struct mxu1_device *mxdev;
443
444 mxdev = usb_get_serial_data(serial);
445 kfree(mxdev);
446}
447
448static int mxu1_write_byte(struct usb_serial_port *port, u32 addr,
449 u8 mask, u8 byte)
450{
451 int status;
452 size_t size;
453 struct mxu1_write_data_bytes *data;
454
455 dev_dbg(&port->dev, "%s - addr 0x%08X, mask 0x%02X, byte 0x%02X\n",
456 __func__, addr, mask, byte);
457
458 size = sizeof(struct mxu1_write_data_bytes) + 2;
459 data = kzalloc(size, GFP_KERNEL);
460 if (!data)
461 return -ENOMEM;
462
463 data->bAddrType = MXU1_RW_DATA_ADDR_XDATA;
464 data->bDataType = MXU1_RW_DATA_BYTE;
465 data->bDataCounter = 1;
466 data->wBaseAddrHi = cpu_to_be16(addr >> 16);
467 data->wBaseAddrLo = cpu_to_be16(addr);
468 data->bData[0] = mask;
469 data->bData[1] = byte;
470
471 status = mxu1_send_ctrl_data_urb(port->serial, MXU1_WRITE_DATA, 0,
472 MXU1_RAM_PORT, data, size);
473 if (status < 0)
474 dev_err(&port->dev, "%s - failed: %d\n", __func__, status);
475
476 kfree(data);
477
478 return status;
479}
480
481static int mxu1_set_mcr(struct usb_serial_port *port, unsigned int mcr)
482{
483 int status;
484
485 status = mxu1_write_byte(port,
486 MXU1_UART_BASE_ADDR + MXU1_UART_OFFSET_MCR,
487 MXU1_MCR_RTS | MXU1_MCR_DTR | MXU1_MCR_LOOP,
488 mcr);
489 return status;
490}
491
492static void mxu1_set_termios(struct tty_struct *tty,
493 struct usb_serial_port *port,
494 struct ktermios *old_termios)
495{
496 struct mxu1_port *mxport = usb_get_serial_port_data(port);
497 struct mxu1_uart_config *config;
498 tcflag_t cflag, iflag;
499 speed_t baud;
500 int status;
501 unsigned int mcr;
502
503 cflag = tty->termios.c_cflag;
504 iflag = tty->termios.c_iflag;
505
506 if (old_termios &&
507 !tty_termios_hw_change(&tty->termios, old_termios) &&
508 tty->termios.c_iflag == old_termios->c_iflag) {
509 dev_dbg(&port->dev, "%s - nothing to change\n", __func__);
510 return;
511 }
512
513 dev_dbg(&port->dev,
514 "%s - cflag 0x%08x, iflag 0x%08x\n", __func__, cflag, iflag);
515
516 if (old_termios) {
517 dev_dbg(&port->dev, "%s - old cflag 0x%08x, old iflag 0x%08x\n",
518 __func__,
519 old_termios->c_cflag,
520 old_termios->c_iflag);
521 }
522
523 config = kzalloc(sizeof(*config), GFP_KERNEL);
524 if (!config)
525 return;
526
527 /* these flags must be set */
528 config->wFlags |= MXU1_UART_ENABLE_MS_INTS;
529 config->wFlags |= MXU1_UART_ENABLE_AUTO_START_DMA;
530 if (mxport->send_break)
531 config->wFlags |= MXU1_UART_SEND_BREAK_SIGNAL;
532 config->bUartMode = mxport->uart_mode;
533
534 switch (C_CSIZE(tty)) {
535 case CS5:
536 config->bDataBits = MXU1_UART_5_DATA_BITS;
537 break;
538 case CS6:
539 config->bDataBits = MXU1_UART_6_DATA_BITS;
540 break;
541 case CS7:
542 config->bDataBits = MXU1_UART_7_DATA_BITS;
543 break;
544 default:
545 case CS8:
546 config->bDataBits = MXU1_UART_8_DATA_BITS;
547 break;
548 }
549
550 if (C_PARENB(tty)) {
551 config->wFlags |= MXU1_UART_ENABLE_PARITY_CHECKING;
552 if (C_CMSPAR(tty)) {
553 if (C_PARODD(tty))
554 config->bParity = MXU1_UART_MARK_PARITY;
555 else
556 config->bParity = MXU1_UART_SPACE_PARITY;
557 } else {
558 if (C_PARODD(tty))
559 config->bParity = MXU1_UART_ODD_PARITY;
560 else
561 config->bParity = MXU1_UART_EVEN_PARITY;
562 }
563 } else {
564 config->bParity = MXU1_UART_NO_PARITY;
565 }
566
567 if (C_CSTOPB(tty))
568 config->bStopBits = MXU1_UART_2_STOP_BITS;
569 else
570 config->bStopBits = MXU1_UART_1_STOP_BITS;
571
572 if (C_CRTSCTS(tty)) {
573 /* RTS flow control must be off to drop RTS for baud rate B0 */
574 if (C_BAUD(tty) != B0)
575 config->wFlags |= MXU1_UART_ENABLE_RTS_IN;
576 config->wFlags |= MXU1_UART_ENABLE_CTS_OUT;
577 }
578
579 if (I_IXOFF(tty) || I_IXON(tty)) {
580 config->cXon = START_CHAR(tty);
581 config->cXoff = STOP_CHAR(tty);
582
583 if (I_IXOFF(tty))
584 config->wFlags |= MXU1_UART_ENABLE_X_IN;
585
586 if (I_IXON(tty))
587 config->wFlags |= MXU1_UART_ENABLE_X_OUT;
588 }
589
590 baud = tty_get_baud_rate(tty);
591 if (!baud)
592 baud = 9600;
593 config->wBaudRate = MXU1_BAUD_BASE / baud;
594
595 dev_dbg(&port->dev, "%s - BaudRate=%d, wBaudRate=%d, wFlags=0x%04X, bDataBits=%d, bParity=%d, bStopBits=%d, cXon=%d, cXoff=%d, bUartMode=%d\n",
596 __func__, baud, config->wBaudRate, config->wFlags,
597 config->bDataBits, config->bParity, config->bStopBits,
598 config->cXon, config->cXoff, config->bUartMode);
599
600 cpu_to_be16s(&config->wBaudRate);
601 cpu_to_be16s(&config->wFlags);
602
603 status = mxu1_send_ctrl_data_urb(port->serial, MXU1_SET_CONFIG, 0,
604 MXU1_UART1_PORT, config,
605 sizeof(*config));
606 if (status)
607 dev_err(&port->dev, "cannot set config: %d\n", status);
608
609 mutex_lock(&mxport->mutex);
610 mcr = mxport->mcr;
611
612 if (C_BAUD(tty) == B0)
613 mcr &= ~(MXU1_MCR_DTR | MXU1_MCR_RTS);
614 else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
615 mcr |= MXU1_MCR_DTR | MXU1_MCR_RTS;
616
617 status = mxu1_set_mcr(port, mcr);
618 if (status)
619 dev_err(&port->dev, "cannot set modem control: %d\n", status);
620 else
621 mxport->mcr = mcr;
622
623 mutex_unlock(&mxport->mutex);
624
625 kfree(config);
626}
627
628static int mxu1_get_serial_info(struct usb_serial_port *port,
629 struct serial_struct __user *ret_arg)
630{
631 struct serial_struct ret_serial;
632 unsigned cwait;
633
634 if (!ret_arg)
635 return -EFAULT;
636
637 cwait = port->port.closing_wait;
638 if (cwait != ASYNC_CLOSING_WAIT_NONE)
639 cwait = jiffies_to_msecs(cwait) / 10;
640
641 memset(&ret_serial, 0, sizeof(ret_serial));
642
643 ret_serial.type = PORT_16550A;
644 ret_serial.line = port->minor;
645 ret_serial.port = 0;
646 ret_serial.xmit_fifo_size = port->bulk_out_size;
647 ret_serial.baud_base = MXU1_BAUD_BASE;
648 ret_serial.close_delay = 5*HZ;
649 ret_serial.closing_wait = cwait;
650
651 if (copy_to_user(ret_arg, &ret_serial, sizeof(*ret_arg)))
652 return -EFAULT;
653
654 return 0;
655}
656
657
658static int mxu1_set_serial_info(struct usb_serial_port *port,
659 struct serial_struct __user *new_arg)
660{
661 struct serial_struct new_serial;
662 unsigned cwait;
663
664 if (copy_from_user(&new_serial, new_arg, sizeof(new_serial)))
665 return -EFAULT;
666
667 cwait = new_serial.closing_wait;
668 if (cwait != ASYNC_CLOSING_WAIT_NONE)
669 cwait = msecs_to_jiffies(10 * new_serial.closing_wait);
670
671 port->port.closing_wait = cwait;
672
673 return 0;
674}
675
676static int mxu1_ioctl(struct tty_struct *tty,
677 unsigned int cmd, unsigned long arg)
678{
679 struct usb_serial_port *port = tty->driver_data;
680
681 switch (cmd) {
682 case TIOCGSERIAL:
683 return mxu1_get_serial_info(port,
684 (struct serial_struct __user *)arg);
685 case TIOCSSERIAL:
686 return mxu1_set_serial_info(port,
687 (struct serial_struct __user *)arg);
688 }
689
690 return -ENOIOCTLCMD;
691}
692
693static int mxu1_tiocmget(struct tty_struct *tty)
694{
695 struct usb_serial_port *port = tty->driver_data;
696 struct mxu1_port *mxport = usb_get_serial_port_data(port);
697 unsigned int result;
698 unsigned int msr;
699 unsigned int mcr;
700 unsigned long flags;
701
702 mutex_lock(&mxport->mutex);
703 spin_lock_irqsave(&mxport->spinlock, flags);
704
705 msr = mxport->msr;
706 mcr = mxport->mcr;
707
708 spin_unlock_irqrestore(&mxport->spinlock, flags);
709 mutex_unlock(&mxport->mutex);
710
711 result = ((mcr & MXU1_MCR_DTR) ? TIOCM_DTR : 0) |
712 ((mcr & MXU1_MCR_RTS) ? TIOCM_RTS : 0) |
713 ((mcr & MXU1_MCR_LOOP) ? TIOCM_LOOP : 0) |
714 ((msr & MXU1_MSR_CTS) ? TIOCM_CTS : 0) |
715 ((msr & MXU1_MSR_CD) ? TIOCM_CAR : 0) |
716 ((msr & MXU1_MSR_RI) ? TIOCM_RI : 0) |
717 ((msr & MXU1_MSR_DSR) ? TIOCM_DSR : 0);
718
719 dev_dbg(&port->dev, "%s - 0x%04X\n", __func__, result);
720
721 return result;
722}
723
724static int mxu1_tiocmset(struct tty_struct *tty,
725 unsigned int set, unsigned int clear)
726{
727 struct usb_serial_port *port = tty->driver_data;
728 struct mxu1_port *mxport = usb_get_serial_port_data(port);
729 int err;
730 unsigned int mcr;
731
732 mutex_lock(&mxport->mutex);
733 mcr = mxport->mcr;
734
735 if (set & TIOCM_RTS)
736 mcr |= MXU1_MCR_RTS;
737 if (set & TIOCM_DTR)
738 mcr |= MXU1_MCR_DTR;
739 if (set & TIOCM_LOOP)
740 mcr |= MXU1_MCR_LOOP;
741
742 if (clear & TIOCM_RTS)
743 mcr &= ~MXU1_MCR_RTS;
744 if (clear & TIOCM_DTR)
745 mcr &= ~MXU1_MCR_DTR;
746 if (clear & TIOCM_LOOP)
747 mcr &= ~MXU1_MCR_LOOP;
748
749 err = mxu1_set_mcr(port, mcr);
750 if (!err)
751 mxport->mcr = mcr;
752
753 mutex_unlock(&mxport->mutex);
754
755 return err;
756}
757
758static void mxu1_break(struct tty_struct *tty, int break_state)
759{
760 struct usb_serial_port *port = tty->driver_data;
761 struct mxu1_port *mxport = usb_get_serial_port_data(port);
762
763 if (break_state == -1)
764 mxport->send_break = true;
765 else
766 mxport->send_break = false;
767
768 mxu1_set_termios(tty, port, NULL);
769}
770
771static int mxu1_open(struct tty_struct *tty, struct usb_serial_port *port)
772{
773 struct mxu1_port *mxport = usb_get_serial_port_data(port);
774 struct usb_serial *serial = port->serial;
775 int status;
776 u16 open_settings;
777
778 open_settings = (MXU1_PIPE_MODE_CONTINUOUS |
779 MXU1_PIPE_TIMEOUT_ENABLE |
780 (MXU1_TRANSFER_TIMEOUT << 2));
781
782 mxport->msr = 0;
783
784 status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
785 if (status) {
786 dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
787 status);
788 return status;
789 }
790
791 if (tty)
792 mxu1_set_termios(tty, port, NULL);
793
794 status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
795 open_settings, MXU1_UART1_PORT);
796 if (status) {
797 dev_err(&port->dev, "cannot send open command: %d\n", status);
798 goto unlink_int_urb;
799 }
800
801 status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
802 0, MXU1_UART1_PORT);
803 if (status) {
804 dev_err(&port->dev, "cannot send start command: %d\n", status);
805 goto unlink_int_urb;
806 }
807
808 status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
809 MXU1_PURGE_INPUT, MXU1_UART1_PORT);
810 if (status) {
811 dev_err(&port->dev, "cannot clear input buffers: %d\n",
812 status);
813
814 goto unlink_int_urb;
815 }
816
817 status = mxu1_send_ctrl_urb(serial, MXU1_PURGE_PORT,
818 MXU1_PURGE_OUTPUT, MXU1_UART1_PORT);
819 if (status) {
820 dev_err(&port->dev, "cannot clear output buffers: %d\n",
821 status);
822
823 goto unlink_int_urb;
824 }
825
826 /*
827 * reset the data toggle on the bulk endpoints to work around bug in
828 * host controllers where things get out of sync some times
829 */
830 usb_clear_halt(serial->dev, port->write_urb->pipe);
831 usb_clear_halt(serial->dev, port->read_urb->pipe);
832
833 if (tty)
834 mxu1_set_termios(tty, port, NULL);
835
836 status = mxu1_send_ctrl_urb(serial, MXU1_OPEN_PORT,
837 open_settings, MXU1_UART1_PORT);
838 if (status) {
839 dev_err(&port->dev, "cannot send open command: %d\n", status);
840 goto unlink_int_urb;
841 }
842
843 status = mxu1_send_ctrl_urb(serial, MXU1_START_PORT,
844 0, MXU1_UART1_PORT);
845 if (status) {
846 dev_err(&port->dev, "cannot send start command: %d\n", status);
847 goto unlink_int_urb;
848 }
849
850 status = usb_serial_generic_open(tty, port);
851 if (status)
852 goto unlink_int_urb;
853
854 return 0;
855
856unlink_int_urb:
857 usb_kill_urb(port->interrupt_in_urb);
858
859 return status;
860}
861
862static void mxu1_close(struct usb_serial_port *port)
863{
864 int status;
865
866 usb_serial_generic_close(port);
867 usb_kill_urb(port->interrupt_in_urb);
868
869 status = mxu1_send_ctrl_urb(port->serial, MXU1_CLOSE_PORT,
870 0, MXU1_UART1_PORT);
871 if (status) {
872 dev_err(&port->dev, "failed to send close port command: %d\n",
873 status);
874 }
875}
876
877static void mxu1_handle_new_msr(struct usb_serial_port *port, u8 msr)
878{
879 struct mxu1_port *mxport = usb_get_serial_port_data(port);
880 struct async_icount *icount;
881 unsigned long flags;
882
883 dev_dbg(&port->dev, "%s - msr 0x%02X\n", __func__, msr);
884
885 spin_lock_irqsave(&mxport->spinlock, flags);
886 mxport->msr = msr & MXU1_MSR_MASK;
887 spin_unlock_irqrestore(&mxport->spinlock, flags);
888
889 if (msr & MXU1_MSR_DELTA_MASK) {
890 icount = &port->icount;
891 if (msr & MXU1_MSR_DELTA_CTS)
892 icount->cts++;
893 if (msr & MXU1_MSR_DELTA_DSR)
894 icount->dsr++;
895 if (msr & MXU1_MSR_DELTA_CD)
896 icount->dcd++;
897 if (msr & MXU1_MSR_DELTA_RI)
898 icount->rng++;
899
900 wake_up_interruptible(&port->port.delta_msr_wait);
901 }
902}
903
904static void mxu1_interrupt_callback(struct urb *urb)
905{
906 struct usb_serial_port *port = urb->context;
907 unsigned char *data = urb->transfer_buffer;
908 int length = urb->actual_length;
909 int function;
910 int status;
911 u8 msr;
912
913 switch (urb->status) {
914 case 0:
915 break;
916 case -ECONNRESET:
917 case -ENOENT:
918 case -ESHUTDOWN:
919 dev_dbg(&port->dev, "%s - urb shutting down: %d\n",
920 __func__, urb->status);
921 return;
922 default:
923 dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
924 __func__, urb->status);
925 goto exit;
926 }
927
928 if (length != 2) {
929 dev_dbg(&port->dev, "%s - bad packet size: %d\n",
930 __func__, length);
931 goto exit;
932 }
933
934 if (data[0] == MXU1_CODE_HARDWARE_ERROR) {
935 dev_err(&port->dev, "hardware error: %d\n", data[1]);
936 goto exit;
937 }
938
939 function = mxu1_get_func_from_code(data[0]);
940
941 dev_dbg(&port->dev, "%s - function %d, data 0x%02X\n",
942 __func__, function, data[1]);
943
944 switch (function) {
945 case MXU1_CODE_DATA_ERROR:
946 dev_dbg(&port->dev, "%s - DATA ERROR, data 0x%02X\n",
947 __func__, data[1]);
948 break;
949
950 case MXU1_CODE_MODEM_STATUS:
951 msr = data[1];
952 mxu1_handle_new_msr(port, msr);
953 break;
954
955 default:
956 dev_err(&port->dev, "unknown interrupt code: 0x%02X\n",
957 data[1]);
958 break;
959 }
960
961exit:
962 status = usb_submit_urb(urb, GFP_ATOMIC);
963 if (status) {
964 dev_err(&port->dev, "resubmit interrupt urb failed: %d\n",
965 status);
966 }
967}
968
969static struct usb_serial_driver mxu11x0_device = {
970 .driver = {
971 .owner = THIS_MODULE,
972 .name = "mxu11x0",
973 },
974 .description = "MOXA UPort 11x0",
975 .id_table = mxu1_idtable,
976 .num_ports = 1,
977 .port_probe = mxu1_port_probe,
978 .port_remove = mxu1_port_remove,
979 .attach = mxu1_startup,
980 .release = mxu1_release,
981 .open = mxu1_open,
982 .close = mxu1_close,
983 .ioctl = mxu1_ioctl,
984 .set_termios = mxu1_set_termios,
985 .tiocmget = mxu1_tiocmget,
986 .tiocmset = mxu1_tiocmset,
987 .tiocmiwait = usb_serial_generic_tiocmiwait,
988 .get_icount = usb_serial_generic_get_icount,
989 .break_ctl = mxu1_break,
990 .read_int_callback = mxu1_interrupt_callback,
991};
992
993static struct usb_serial_driver *const serial_drivers[] = {
994 &mxu11x0_device, NULL
995};
996
997module_usb_serial_driver(serial_drivers, mxu1_idtable);
998
999MODULE_AUTHOR("Mathieu Othacehe <m.othacehe@gmail.com>");
1000MODULE_DESCRIPTION("MOXA UPort 11x0 USB to Serial Hub Driver");
1001MODULE_LICENSE("GPL");
1002MODULE_FIRMWARE("moxa/moxa-1110.fw");
1003MODULE_FIRMWARE("moxa/moxa-1130.fw");
1004MODULE_FIRMWARE("moxa/moxa-1131.fw");
1005MODULE_FIRMWARE("moxa/moxa-1150.fw");
1006MODULE_FIRMWARE("moxa/moxa-1151.fw");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index db86e512e0fc..348e19834b83 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -270,6 +270,7 @@ static void option_instat_callback(struct urb *urb);
270#define TELIT_PRODUCT_UE910_V2 0x1012 270#define TELIT_PRODUCT_UE910_V2 0x1012
271#define TELIT_PRODUCT_LE922_USBCFG0 0x1042 271#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
272#define TELIT_PRODUCT_LE922_USBCFG3 0x1043 272#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
273#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
273#define TELIT_PRODUCT_LE920 0x1200 274#define TELIT_PRODUCT_LE920 0x1200
274#define TELIT_PRODUCT_LE910 0x1201 275#define TELIT_PRODUCT_LE910 0x1201
275 276
@@ -315,6 +316,7 @@ static void option_instat_callback(struct urb *urb);
315#define TOSHIBA_PRODUCT_G450 0x0d45 316#define TOSHIBA_PRODUCT_G450 0x0d45
316 317
317#define ALINK_VENDOR_ID 0x1e0e 318#define ALINK_VENDOR_ID 0x1e0e
319#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
318#define ALINK_PRODUCT_PH300 0x9100 320#define ALINK_PRODUCT_PH300 0x9100
319#define ALINK_PRODUCT_3GU 0x9200 321#define ALINK_PRODUCT_3GU 0x9200
320 322
@@ -607,6 +609,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
607 .reserved = BIT(3) | BIT(4), 609 .reserved = BIT(3) | BIT(4),
608}; 610};
609 611
612static const struct option_blacklist_info simcom_sim7100e_blacklist = {
613 .reserved = BIT(5) | BIT(6),
614};
615
610static const struct option_blacklist_info telit_le910_blacklist = { 616static const struct option_blacklist_info telit_le910_blacklist = {
611 .sendsetup = BIT(0), 617 .sendsetup = BIT(0),
612 .reserved = BIT(1) | BIT(2), 618 .reserved = BIT(1) | BIT(2),
@@ -1122,9 +1128,13 @@ static const struct usb_device_id option_ids[] = {
1122 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) }, 1128 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
1123 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, 1129 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
1124 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ 1130 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
1131 { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
1132 .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
1125 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1133 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1126 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1134 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1127 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1135 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1136 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
1137 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1128 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1138 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1129 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1139 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1130 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1140 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1176,6 +1186,8 @@ static const struct usb_device_id option_ids[] = {
1176 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, 1186 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1177 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), 1187 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
1178 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, 1188 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
1189 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
1190 .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
1179 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), 1191 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1180 .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, 1192 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1181 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), 1193 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
@@ -1645,6 +1657,8 @@ static const struct usb_device_id option_ids[] = {
1645 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, 1657 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
1646 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) }, 1658 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
1647 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 1659 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
1660 { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
1661 .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
1648 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), 1662 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
1649 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist 1663 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
1650 }, 1664 },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9919d2a9faf2..1bc6089b9008 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -157,14 +157,17 @@ static const struct usb_device_id id_table[] = {
157 {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */ 157 {DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
158 {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */ 158 {DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
159 {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */ 159 {DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
160 {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx/EM74xx */ 160 {DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
161 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx/EM74xx */ 161 {DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
162 {DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
163 {DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
162 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ 164 {DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
163 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ 165 {DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
164 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 166 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
165 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 167 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
166 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 168 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
167 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */ 169 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
170 {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
168 171
169 /* Huawei devices */ 172 /* Huawei devices */
170 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 173 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 2760a7ba3f30..8c80a48e3233 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -446,7 +446,8 @@ static long vfio_pci_ioctl(void *device_data,
446 info.num_regions = VFIO_PCI_NUM_REGIONS; 446 info.num_regions = VFIO_PCI_NUM_REGIONS;
447 info.num_irqs = VFIO_PCI_NUM_IRQS; 447 info.num_irqs = VFIO_PCI_NUM_IRQS;
448 448
449 return copy_to_user((void __user *)arg, &info, minsz); 449 return copy_to_user((void __user *)arg, &info, minsz) ?
450 -EFAULT : 0;
450 451
451 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { 452 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
452 struct pci_dev *pdev = vdev->pdev; 453 struct pci_dev *pdev = vdev->pdev;
@@ -520,7 +521,8 @@ static long vfio_pci_ioctl(void *device_data,
520 return -EINVAL; 521 return -EINVAL;
521 } 522 }
522 523
523 return copy_to_user((void __user *)arg, &info, minsz); 524 return copy_to_user((void __user *)arg, &info, minsz) ?
525 -EFAULT : 0;
524 526
525 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { 527 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
526 struct vfio_irq_info info; 528 struct vfio_irq_info info;
@@ -555,7 +557,8 @@ static long vfio_pci_ioctl(void *device_data,
555 else 557 else
556 info.flags |= VFIO_IRQ_INFO_NORESIZE; 558 info.flags |= VFIO_IRQ_INFO_NORESIZE;
557 559
558 return copy_to_user((void __user *)arg, &info, minsz); 560 return copy_to_user((void __user *)arg, &info, minsz) ?
561 -EFAULT : 0;
559 562
560 } else if (cmd == VFIO_DEVICE_SET_IRQS) { 563 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
561 struct vfio_irq_set hdr; 564 struct vfio_irq_set hdr;
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 418cdd9ba3f4..e65b142d3422 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -219,7 +219,8 @@ static long vfio_platform_ioctl(void *device_data,
219 info.num_regions = vdev->num_regions; 219 info.num_regions = vdev->num_regions;
220 info.num_irqs = vdev->num_irqs; 220 info.num_irqs = vdev->num_irqs;
221 221
222 return copy_to_user((void __user *)arg, &info, minsz); 222 return copy_to_user((void __user *)arg, &info, minsz) ?
223 -EFAULT : 0;
223 224
224 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) { 225 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
225 struct vfio_region_info info; 226 struct vfio_region_info info;
@@ -240,7 +241,8 @@ static long vfio_platform_ioctl(void *device_data,
240 info.size = vdev->regions[info.index].size; 241 info.size = vdev->regions[info.index].size;
241 info.flags = vdev->regions[info.index].flags; 242 info.flags = vdev->regions[info.index].flags;
242 243
243 return copy_to_user((void __user *)arg, &info, minsz); 244 return copy_to_user((void __user *)arg, &info, minsz) ?
245 -EFAULT : 0;
244 246
245 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) { 247 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
246 struct vfio_irq_info info; 248 struct vfio_irq_info info;
@@ -259,7 +261,8 @@ static long vfio_platform_ioctl(void *device_data,
259 info.flags = vdev->irqs[info.index].flags; 261 info.flags = vdev->irqs[info.index].flags;
260 info.count = vdev->irqs[info.index].count; 262 info.count = vdev->irqs[info.index].count;
261 263
262 return copy_to_user((void __user *)arg, &info, minsz); 264 return copy_to_user((void __user *)arg, &info, minsz) ?
265 -EFAULT : 0;
263 266
264 } else if (cmd == VFIO_DEVICE_SET_IRQS) { 267 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
265 struct vfio_irq_set hdr; 268 struct vfio_irq_set hdr;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 6f1ea3dddbad..75b24e93cedb 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -999,7 +999,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
999 999
1000 info.iova_pgsizes = vfio_pgsize_bitmap(iommu); 1000 info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
1001 1001
1002 return copy_to_user((void __user *)arg, &info, minsz); 1002 return copy_to_user((void __user *)arg, &info, minsz) ?
1003 -EFAULT : 0;
1003 1004
1004 } else if (cmd == VFIO_IOMMU_MAP_DMA) { 1005 } else if (cmd == VFIO_IOMMU_MAP_DMA) {
1005 struct vfio_iommu_type1_dma_map map; 1006 struct vfio_iommu_type1_dma_map map;
@@ -1032,7 +1033,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
1032 if (ret) 1033 if (ret)
1033 return ret; 1034 return ret;
1034 1035
1035 return copy_to_user((void __user *)arg, &unmap, minsz); 1036 return copy_to_user((void __user *)arg, &unmap, minsz) ?
1037 -EFAULT : 0;
1036 } 1038 }
1037 1039
1038 return -ENOTTY; 1040 return -ENOTTY;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ad2146a9ab2d..236553e81027 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1156,6 +1156,8 @@ int vhost_init_used(struct vhost_virtqueue *vq)
1156{ 1156{
1157 __virtio16 last_used_idx; 1157 __virtio16 last_used_idx;
1158 int r; 1158 int r;
1159 bool is_le = vq->is_le;
1160
1159 if (!vq->private_data) { 1161 if (!vq->private_data) {
1160 vq->is_le = virtio_legacy_is_little_endian(); 1162 vq->is_le = virtio_legacy_is_little_endian();
1161 return 0; 1163 return 0;
@@ -1165,15 +1167,20 @@ int vhost_init_used(struct vhost_virtqueue *vq)
1165 1167
1166 r = vhost_update_used_flags(vq); 1168 r = vhost_update_used_flags(vq);
1167 if (r) 1169 if (r)
1168 return r; 1170 goto err;
1169 vq->signalled_used_valid = false; 1171 vq->signalled_used_valid = false;
1170 if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) 1172 if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx)) {
1171 return -EFAULT; 1173 r = -EFAULT;
1174 goto err;
1175 }
1172 r = __get_user(last_used_idx, &vq->used->idx); 1176 r = __get_user(last_used_idx, &vq->used->idx);
1173 if (r) 1177 if (r)
1174 return r; 1178 goto err;
1175 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx); 1179 vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
1176 return 0; 1180 return 0;
1181err:
1182 vq->is_le = is_le;
1183 return r;
1177} 1184}
1178EXPORT_SYMBOL_GPL(vhost_init_used); 1185EXPORT_SYMBOL_GPL(vhost_init_used);
1179 1186
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 92f394927f24..6e92917ba77a 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -709,6 +709,7 @@ static int con2fb_acquire_newinfo(struct vc_data *vc, struct fb_info *info,
709 } 709 }
710 710
711 if (!err) { 711 if (!err) {
712 ops->cur_blink_jiffies = HZ / 5;
712 info->fbcon_par = ops; 713 info->fbcon_par = ops;
713 714
714 if (vc) 715 if (vc)
@@ -956,6 +957,7 @@ static const char *fbcon_startup(void)
956 ops->currcon = -1; 957 ops->currcon = -1;
957 ops->graphics = 1; 958 ops->graphics = 1;
958 ops->cur_rotate = -1; 959 ops->cur_rotate = -1;
960 ops->cur_blink_jiffies = HZ / 5;
959 info->fbcon_par = ops; 961 info->fbcon_par = ops;
960 p->con_rotate = initial_rotation; 962 p->con_rotate = initial_rotation;
961 set_blitting_type(vc, info); 963 set_blitting_type(vc, info);
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 0081725c6b5b..6b2a06d09f2b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -152,7 +152,7 @@ static void lcdc_write(unsigned int val, unsigned int addr)
152 152
153struct da8xx_fb_par { 153struct da8xx_fb_par {
154 struct device *dev; 154 struct device *dev;
155 resource_size_t p_palette_base; 155 dma_addr_t p_palette_base;
156 unsigned char *v_palette_base; 156 unsigned char *v_palette_base;
157 dma_addr_t vram_phys; 157 dma_addr_t vram_phys;
158 unsigned long vram_size; 158 unsigned long vram_size;
@@ -1428,7 +1428,7 @@ static int fb_probe(struct platform_device *device)
1428 1428
1429 par->vram_virt = dma_alloc_coherent(NULL, 1429 par->vram_virt = dma_alloc_coherent(NULL,
1430 par->vram_size, 1430 par->vram_size,
1431 (resource_size_t *) &par->vram_phys, 1431 &par->vram_phys,
1432 GFP_KERNEL | GFP_DMA); 1432 GFP_KERNEL | GFP_DMA);
1433 if (!par->vram_virt) { 1433 if (!par->vram_virt) {
1434 dev_err(&device->dev, 1434 dev_err(&device->dev,
@@ -1448,7 +1448,7 @@ static int fb_probe(struct platform_device *device)
1448 1448
1449 /* allocate palette buffer */ 1449 /* allocate palette buffer */
1450 par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE, 1450 par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE,
1451 (resource_size_t *)&par->p_palette_base, 1451 &par->p_palette_base,
1452 GFP_KERNEL | GFP_DMA); 1452 GFP_KERNEL | GFP_DMA);
1453 if (!par->v_palette_base) { 1453 if (!par->v_palette_base) {
1454 dev_err(&device->dev, 1454 dev_err(&device->dev,
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
index 95873f26e39c..de2f3e793786 100644
--- a/drivers/video/fbdev/exynos/s6e8ax0.c
+++ b/drivers/video/fbdev/exynos/s6e8ax0.c
@@ -829,8 +829,7 @@ static int s6e8ax0_probe(struct mipi_dsim_lcd_device *dsim_dev)
829 return 0; 829 return 0;
830} 830}
831 831
832#ifdef CONFIG_PM 832static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
833static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
834{ 833{
835 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); 834 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
836 835
@@ -843,7 +842,7 @@ static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
843 return 0; 842 return 0;
844} 843}
845 844
846static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev) 845static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
847{ 846{
848 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev); 847 struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
849 848
@@ -855,10 +854,6 @@ static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
855 854
856 return 0; 855 return 0;
857} 856}
858#else
859#define s6e8ax0_suspend NULL
860#define s6e8ax0_resume NULL
861#endif
862 857
863static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = { 858static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
864 .name = "s6e8ax0", 859 .name = "s6e8ax0",
@@ -867,8 +862,8 @@ static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
867 .power_on = s6e8ax0_power_on, 862 .power_on = s6e8ax0_power_on,
868 .set_sequence = s6e8ax0_set_sequence, 863 .set_sequence = s6e8ax0_set_sequence,
869 .probe = s6e8ax0_probe, 864 .probe = s6e8ax0_probe,
870 .suspend = s6e8ax0_suspend, 865 .suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
871 .resume = s6e8ax0_resume, 866 .resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
872}; 867};
873 868
874static int s6e8ax0_init(void) 869static int s6e8ax0_init(void)
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index cee88603efc9..bb2f1e866020 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -902,6 +902,21 @@ static int imxfb_probe(struct platform_device *pdev)
902 goto failed_getclock; 902 goto failed_getclock;
903 } 903 }
904 904
905 /*
906 * The LCDC controller does not have an enable bit. The
907 * controller starts directly when the clocks are enabled.
908 * If the clocks are enabled when the controller is not yet
909 * programmed with proper register values (enabled at the
910 * bootloader, for example) then it just goes into some undefined
911 * state.
912 * To avoid this issue, let's enable and disable LCDC IPG clock
913 * so that we force some kind of 'reset' to the LCDC block.
914 */
915 ret = clk_prepare_enable(fbi->clk_ipg);
916 if (ret)
917 goto failed_getclock;
918 clk_disable_unprepare(fbi->clk_ipg);
919
905 fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 920 fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
906 if (IS_ERR(fbi->clk_ahb)) { 921 if (IS_ERR(fbi->clk_ahb)) {
907 ret = PTR_ERR(fbi->clk_ahb); 922 ret = PTR_ERR(fbi->clk_ahb);
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index de54a4748065..b6f83d5df9fd 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -503,8 +503,7 @@ static int mmphw_probe(struct platform_device *pdev)
503 ctrl->reg_base = devm_ioremap_nocache(ctrl->dev, 503 ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
504 res->start, resource_size(res)); 504 res->start, resource_size(res));
505 if (ctrl->reg_base == NULL) { 505 if (ctrl->reg_base == NULL) {
506 dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__, 506 dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
507 res->start, res->end);
508 ret = -ENOMEM; 507 ret = -ENOMEM;
509 goto failed; 508 goto failed;
510 } 509 }
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index c9293aea8ec3..a970edc2a6f8 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -123,11 +123,11 @@ static int ocfb_setupfb(struct ocfb_dev *fbdev)
123 123
124 /* Horizontal timings */ 124 /* Horizontal timings */
125 ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 | 125 ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 |
126 (var->right_margin - 1) << 16 | (var->xres - 1)); 126 (var->left_margin - 1) << 16 | (var->xres - 1));
127 127
128 /* Vertical timings */ 128 /* Vertical timings */
129 ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 | 129 ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 |
130 (var->lower_margin - 1) << 16 | (var->yres - 1)); 130 (var->upper_margin - 1) << 16 | (var->yres - 1));
131 131
132 /* Total length of frame */ 132 /* Total length of frame */
133 hlen = var->left_margin + var->right_margin + var->hsync_len + 133 hlen = var->left_margin + var->right_margin + var->hsync_len +
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index c0c11fad4611..7760fc1a2218 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -679,7 +679,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev)
679 679
680 pci_read_config_dword(pci_dev, 680 pci_read_config_dword(pci_dev,
681 notify + offsetof(struct virtio_pci_notify_cap, 681 notify + offsetof(struct virtio_pci_notify_cap,
682 cap.length), 682 cap.offset),
683 &notify_offset); 683 &notify_offset);
684 684
685 /* We don't know how many VQs we'll map, ahead of the time. 685 /* We don't know how many VQs we'll map, ahead of the time.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 0f6d8515ba4f..80825a7e8e48 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1569,6 +1569,17 @@ config WATCHDOG_RIO
1569 machines. The watchdog timeout period is normally one minute but 1569 machines. The watchdog timeout period is normally one minute but
1570 can be changed with a boot-time parameter. 1570 can be changed with a boot-time parameter.
1571 1571
1572config WATCHDOG_SUN4V
1573 tristate "Sun4v Watchdog support"
1574 select WATCHDOG_CORE
1575 depends on SPARC64
1576 help
1577 Say Y here to support the hypervisor watchdog capability embedded
1578 in the SPARC sun4v architecture.
1579
1580 To compile this driver as a module, choose M here. The module will
1581 be called sun4v_wdt.
1582
1572# XTENSA Architecture 1583# XTENSA Architecture
1573 1584
1574# Xen Architecture 1585# Xen Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index f566753256ab..f6a6a387c6c7 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -179,6 +179,7 @@ obj-$(CONFIG_SH_WDT) += shwdt.o
179 179
180obj-$(CONFIG_WATCHDOG_RIO) += riowd.o 180obj-$(CONFIG_WATCHDOG_RIO) += riowd.o
181obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o 181obj-$(CONFIG_WATCHDOG_CP1XXX) += cpwd.o
182obj-$(CONFIG_WATCHDOG_SUN4V) += sun4v_wdt.o
182 183
183# XTENSA Architecture 184# XTENSA Architecture
184 185
diff --git a/drivers/watchdog/sun4v_wdt.c b/drivers/watchdog/sun4v_wdt.c
new file mode 100644
index 000000000000..1467fe50a76f
--- /dev/null
+++ b/drivers/watchdog/sun4v_wdt.c
@@ -0,0 +1,191 @@
1/*
2 * sun4v watchdog timer
3 * (c) Copyright 2016 Oracle Corporation
4 *
5 * Implement a simple watchdog driver using the built-in sun4v hypervisor
6 * watchdog support. If time expires, the hypervisor stops or bounces
7 * the guest domain.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/errno.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/watchdog.h>
23#include <asm/hypervisor.h>
24#include <asm/mdesc.h>
25
26#define WDT_TIMEOUT 60
27#define WDT_MAX_TIMEOUT 31536000
28#define WDT_MIN_TIMEOUT 1
29#define WDT_DEFAULT_RESOLUTION_MS 1000 /* 1 second */
30
31static unsigned int timeout;
32module_param(timeout, uint, 0);
33MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
34 __MODULE_STRING(WDT_TIMEOUT) ")");
35
36static bool nowayout = WATCHDOG_NOWAYOUT;
37module_param(nowayout, bool, S_IRUGO);
38MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
39 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
40
41static int sun4v_wdt_stop(struct watchdog_device *wdd)
42{
43 sun4v_mach_set_watchdog(0, NULL);
44
45 return 0;
46}
47
48static int sun4v_wdt_ping(struct watchdog_device *wdd)
49{
50 int hverr;
51
52 /*
53 * HV watchdog timer will round up the timeout
54 * passed in to the nearest multiple of the
55 * watchdog resolution in milliseconds.
56 */
57 hverr = sun4v_mach_set_watchdog(wdd->timeout * 1000, NULL);
58 if (hverr == HV_EINVAL)
59 return -EINVAL;
60
61 return 0;
62}
63
64static int sun4v_wdt_set_timeout(struct watchdog_device *wdd,
65 unsigned int timeout)
66{
67 wdd->timeout = timeout;
68
69 return 0;
70}
71
72static const struct watchdog_info sun4v_wdt_ident = {
73 .options = WDIOF_SETTIMEOUT |
74 WDIOF_MAGICCLOSE |
75 WDIOF_KEEPALIVEPING,
76 .identity = "sun4v hypervisor watchdog",
77 .firmware_version = 0,
78};
79
80static struct watchdog_ops sun4v_wdt_ops = {
81 .owner = THIS_MODULE,
82 .start = sun4v_wdt_ping,
83 .stop = sun4v_wdt_stop,
84 .ping = sun4v_wdt_ping,
85 .set_timeout = sun4v_wdt_set_timeout,
86};
87
88static struct watchdog_device wdd = {
89 .info = &sun4v_wdt_ident,
90 .ops = &sun4v_wdt_ops,
91 .min_timeout = WDT_MIN_TIMEOUT,
92 .max_timeout = WDT_MAX_TIMEOUT,
93 .timeout = WDT_TIMEOUT,
94};
95
96static int __init sun4v_wdt_init(void)
97{
98 struct mdesc_handle *handle;
99 u64 node;
100 const u64 *value;
101 int err = 0;
102 unsigned long major = 1, minor = 1;
103
104 /*
105 * There are 2 properties that can be set from the control
106 * domain for the watchdog.
107 * watchdog-resolution
108 * watchdog-max-timeout
109 *
110 * We can expect a handle to be returned otherwise something
111 * serious is wrong. Correct to return -ENODEV here.
112 */
113
114 handle = mdesc_grab();
115 if (!handle)
116 return -ENODEV;
117
118 node = mdesc_node_by_name(handle, MDESC_NODE_NULL, "platform");
119 err = -ENODEV;
120 if (node == MDESC_NODE_NULL)
121 goto out_release;
122
123 /*
124 * This is a safe way to validate if we are on the right
125 * platform.
126 */
127 if (sun4v_hvapi_register(HV_GRP_CORE, major, &minor))
128 goto out_hv_unreg;
129
130 /* Allow value of watchdog-resolution up to 1s (default) */
131 value = mdesc_get_property(handle, node, "watchdog-resolution", NULL);
132 err = -EINVAL;
133 if (value) {
134 if (*value == 0 ||
135 *value > WDT_DEFAULT_RESOLUTION_MS)
136 goto out_hv_unreg;
137 }
138
139 value = mdesc_get_property(handle, node, "watchdog-max-timeout", NULL);
140 if (value) {
141 /*
142 * If the property value (in ms) is smaller than
143 * min_timeout, return -EINVAL.
144 */
145 if (*value < wdd.min_timeout * 1000)
146 goto out_hv_unreg;
147
148 /*
149 * If the property value is smaller than
150 * default max_timeout then set watchdog max_timeout to
151 * the value of the property in seconds.
152 */
153 if (*value < wdd.max_timeout * 1000)
154 wdd.max_timeout = *value / 1000;
155 }
156
157 watchdog_init_timeout(&wdd, timeout, NULL);
158
159 watchdog_set_nowayout(&wdd, nowayout);
160
161 err = watchdog_register_device(&wdd);
162 if (err)
163 goto out_hv_unreg;
164
165 pr_info("initialized (timeout=%ds, nowayout=%d)\n",
166 wdd.timeout, nowayout);
167
168 mdesc_release(handle);
169
170 return 0;
171
172out_hv_unreg:
173 sun4v_hvapi_unregister(HV_GRP_CORE);
174
175out_release:
176 mdesc_release(handle);
177 return err;
178}
179
180static void __exit sun4v_wdt_exit(void)
181{
182 sun4v_hvapi_unregister(HV_GRP_CORE);
183 watchdog_unregister_device(&wdd);
184}
185
186module_init(sun4v_wdt_init);
187module_exit(sun4v_wdt_exit);
188
189MODULE_AUTHOR("Wim Coekaerts <wim.coekaerts@oracle.com>");
190MODULE_DESCRIPTION("sun4v watchdog driver");
191MODULE_LICENSE("GPL");
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 73dafdc494aa..fb0221434f81 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
227 /* 227 /*
228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able 228 * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
229 * to access the BARs where the MSI-X entries reside. 229 * to access the BARs where the MSI-X entries reside.
230 * But VF devices are unique in which the PF needs to be checked.
230 */ 231 */
231 pci_read_config_word(dev, PCI_COMMAND, &cmd); 232 pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
232 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY)) 233 if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
233 return -ENXIO; 234 return -ENXIO;
234 235
@@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
332 struct xen_pcibk_dev_data *dev_data = NULL; 333 struct xen_pcibk_dev_data *dev_data = NULL;
333 struct xen_pci_op *op = &pdev->op; 334 struct xen_pci_op *op = &pdev->op;
334 int test_intx = 0; 335 int test_intx = 0;
336#ifdef CONFIG_PCI_MSI
337 unsigned int nr = 0;
338#endif
335 339
336 *op = pdev->sh_info->op; 340 *op = pdev->sh_info->op;
337 barrier(); 341 barrier();
@@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
360 op->err = xen_pcibk_disable_msi(pdev, dev, op); 364 op->err = xen_pcibk_disable_msi(pdev, dev, op);
361 break; 365 break;
362 case XEN_PCI_OP_enable_msix: 366 case XEN_PCI_OP_enable_msix:
367 nr = op->value;
363 op->err = xen_pcibk_enable_msix(pdev, dev, op); 368 op->err = xen_pcibk_enable_msix(pdev, dev, op);
364 break; 369 break;
365 case XEN_PCI_OP_disable_msix: 370 case XEN_PCI_OP_disable_msix:
@@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
382 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) { 387 if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
383 unsigned int i; 388 unsigned int i;
384 389
385 for (i = 0; i < op->value; i++) 390 for (i = 0; i < nr; i++)
386 pdev->sh_info->op.msix_entries[i].vector = 391 pdev->sh_info->op.msix_entries[i].vector =
387 op->msix_entries[i].vector; 392 op->msix_entries[i].vector;
388 } 393 }
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ad4eb1024d1f..c46ee189466f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -849,15 +849,31 @@ static int scsiback_map(struct vscsibk_info *info)
849} 849}
850 850
851/* 851/*
852 Check for a translation entry being present
853*/
854static struct v2p_entry *scsiback_chk_translation_entry(
855 struct vscsibk_info *info, struct ids_tuple *v)
856{
857 struct list_head *head = &(info->v2p_entry_lists);
858 struct v2p_entry *entry;
859
860 list_for_each_entry(entry, head, l)
861 if ((entry->v.chn == v->chn) &&
862 (entry->v.tgt == v->tgt) &&
863 (entry->v.lun == v->lun))
864 return entry;
865
866 return NULL;
867}
868
869/*
852 Add a new translation entry 870 Add a new translation entry
853*/ 871*/
854static int scsiback_add_translation_entry(struct vscsibk_info *info, 872static int scsiback_add_translation_entry(struct vscsibk_info *info,
855 char *phy, struct ids_tuple *v) 873 char *phy, struct ids_tuple *v)
856{ 874{
857 int err = 0; 875 int err = 0;
858 struct v2p_entry *entry;
859 struct v2p_entry *new; 876 struct v2p_entry *new;
860 struct list_head *head = &(info->v2p_entry_lists);
861 unsigned long flags; 877 unsigned long flags;
862 char *lunp; 878 char *lunp;
863 unsigned long long unpacked_lun; 879 unsigned long long unpacked_lun;
@@ -917,15 +933,10 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
917 spin_lock_irqsave(&info->v2p_lock, flags); 933 spin_lock_irqsave(&info->v2p_lock, flags);
918 934
919 /* Check double assignment to identical virtual ID */ 935 /* Check double assignment to identical virtual ID */
920 list_for_each_entry(entry, head, l) { 936 if (scsiback_chk_translation_entry(info, v)) {
921 if ((entry->v.chn == v->chn) && 937 pr_warn("Virtual ID is already used. Assignment was not performed.\n");
922 (entry->v.tgt == v->tgt) && 938 err = -EEXIST;
923 (entry->v.lun == v->lun)) { 939 goto out;
924 pr_warn("Virtual ID is already used. Assignment was not performed.\n");
925 err = -EEXIST;
926 goto out;
927 }
928
929 } 940 }
930 941
931 /* Create a new translation entry and add to the list */ 942 /* Create a new translation entry and add to the list */
@@ -933,18 +944,18 @@ static int scsiback_add_translation_entry(struct vscsibk_info *info,
933 new->v = *v; 944 new->v = *v;
934 new->tpg = tpg; 945 new->tpg = tpg;
935 new->lun = unpacked_lun; 946 new->lun = unpacked_lun;
936 list_add_tail(&new->l, head); 947 list_add_tail(&new->l, &info->v2p_entry_lists);
937 948
938out: 949out:
939 spin_unlock_irqrestore(&info->v2p_lock, flags); 950 spin_unlock_irqrestore(&info->v2p_lock, flags);
940 951
941out_free: 952out_free:
942 mutex_lock(&tpg->tv_tpg_mutex); 953 if (err) {
943 tpg->tv_tpg_fe_count--; 954 mutex_lock(&tpg->tv_tpg_mutex);
944 mutex_unlock(&tpg->tv_tpg_mutex); 955 tpg->tv_tpg_fe_count--;
945 956 mutex_unlock(&tpg->tv_tpg_mutex);
946 if (err)
947 kfree(new); 957 kfree(new);
958 }
948 959
949 return err; 960 return err;
950} 961}
@@ -956,39 +967,40 @@ static void __scsiback_del_translation_entry(struct v2p_entry *entry)
956} 967}
957 968
958/* 969/*
959 Delete the translation entry specfied 970 Delete the translation entry specified
960*/ 971*/
961static int scsiback_del_translation_entry(struct vscsibk_info *info, 972static int scsiback_del_translation_entry(struct vscsibk_info *info,
962 struct ids_tuple *v) 973 struct ids_tuple *v)
963{ 974{
964 struct v2p_entry *entry; 975 struct v2p_entry *entry;
965 struct list_head *head = &(info->v2p_entry_lists);
966 unsigned long flags; 976 unsigned long flags;
977 int ret = 0;
967 978
968 spin_lock_irqsave(&info->v2p_lock, flags); 979 spin_lock_irqsave(&info->v2p_lock, flags);
969 /* Find out the translation entry specified */ 980 /* Find out the translation entry specified */
970 list_for_each_entry(entry, head, l) { 981 entry = scsiback_chk_translation_entry(info, v);
971 if ((entry->v.chn == v->chn) && 982 if (entry)
972 (entry->v.tgt == v->tgt) && 983 __scsiback_del_translation_entry(entry);
973 (entry->v.lun == v->lun)) { 984 else
974 goto found; 985 ret = -ENOENT;
975 }
976 }
977
978 spin_unlock_irqrestore(&info->v2p_lock, flags);
979 return 1;
980
981found:
982 /* Delete the translation entry specfied */
983 __scsiback_del_translation_entry(entry);
984 986
985 spin_unlock_irqrestore(&info->v2p_lock, flags); 987 spin_unlock_irqrestore(&info->v2p_lock, flags);
986 return 0; 988 return ret;
987} 989}
988 990
989static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state, 991static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
990 char *phy, struct ids_tuple *vir, int try) 992 char *phy, struct ids_tuple *vir, int try)
991{ 993{
994 struct v2p_entry *entry;
995 unsigned long flags;
996
997 if (try) {
998 spin_lock_irqsave(&info->v2p_lock, flags);
999 entry = scsiback_chk_translation_entry(info, vir);
1000 spin_unlock_irqrestore(&info->v2p_lock, flags);
1001 if (entry)
1002 return;
1003 }
992 if (!scsiback_add_translation_entry(info, phy, vir)) { 1004 if (!scsiback_add_translation_entry(info, phy, vir)) {
993 if (xenbus_printf(XBT_NIL, info->dev->nodename, state, 1005 if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
994 "%d", XenbusStateInitialised)) { 1006 "%d", XenbusStateInitialised)) {
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 9433e46518c8..912b64edb42b 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -188,6 +188,8 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
188 188
189 if (len == 0) 189 if (len == 0)
190 return 0; 190 return 0;
191 if (len > XENSTORE_PAYLOAD_MAX)
192 return -EINVAL;
191 193
192 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL); 194 rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
193 if (rb == NULL) 195 if (rb == NULL)