aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/cppc_acpi.c9
-rw-r--r--drivers/acpi/nfit/core.c8
-rw-r--r--drivers/acpi/numa.c1
-rw-r--r--drivers/acpi/utils.c1
-rw-r--r--drivers/amba/bus.c45
-rw-r--r--drivers/android/binder.c3
-rw-r--r--drivers/android/binder_alloc.c18
-rw-r--r--drivers/ata/libata-zpodd.c34
-rw-r--r--drivers/auxdisplay/Kconfig38
-rw-r--r--drivers/auxdisplay/Makefile2
-rw-r--r--drivers/auxdisplay/charlcd.c55
-rw-r--r--drivers/auxdisplay/hd44780.c4
-rw-r--r--drivers/auxdisplay/panel.c4
-rw-r--r--drivers/base/memory.c1
-rw-r--r--drivers/base/power/domain.c13
-rw-r--r--drivers/base/swnode.c4
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/paride/pcd.c6
-rw-r--r--drivers/block/paride/pf.c16
-rw-r--r--drivers/block/rbd.c28
-rw-r--r--drivers/block/xen-blkback/xenbus.c99
-rw-r--r--drivers/block/zram/zram_drv.c32
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/clocksource/arm_arch_timer.c11
-rw-r--r--drivers/clocksource/clps711x-timer.c44
-rw-r--r--drivers/clocksource/mips-gic-timer.c2
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/clocksource/timer-riscv.c5
-rw-r--r--drivers/clocksource/timer-ti-dm.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c10
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/dax/Kconfig28
-rw-r--r--drivers/dax/Makefile6
-rw-r--r--drivers/dax/bus.c503
-rw-r--r--drivers/dax/bus.h61
-rw-r--r--drivers/dax/dax-private.h34
-rw-r--r--drivers/dax/dax.h18
-rw-r--r--drivers/dax/device-dax.h25
-rw-r--r--drivers/dax/device.c363
-rw-r--r--drivers/dax/kmem.c108
-rw-r--r--drivers/dax/pmem.c153
-rw-r--r--drivers/dax/pmem/Makefile7
-rw-r--r--drivers/dax/pmem/compat.c73
-rw-r--r--drivers/dax/pmem/core.c71
-rw-r--r--drivers/dax/pmem/pmem.c40
-rw-r--r--drivers/dax/super.c41
-rw-r--r--drivers/dma/stm32-mdma.c4
-rw-r--r--drivers/gpio/gpio-adnp.c6
-rw-r--r--drivers/gpio/gpio-aspeed.c2
-rw-r--r--drivers/gpio/gpio-exar.c2
-rw-r--r--drivers/gpio/gpio-mockup.c10
-rw-r--r--drivers/gpio/gpiolib-of.c17
-rw-r--r--drivers/gpio/gpiolib.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c52
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c47
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c22
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c7
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c30
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c240
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h8
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c6
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c59
-rw-r--r--drivers/gpu/drm/drm_drv.c6
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_file.c6
-rw-r--r--drivers/gpu/drm/drm_ioc32.c6
-rw-r--r--drivers/gpu/drm/etnaviv/Kconfig1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_dump.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.h1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c2
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_perfmon.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c110
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c33
-rw-r--r--drivers/gpu/drm/i915/i915_active.c36
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c35
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c27
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c1
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c18
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c14
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_context.c69
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_evict.c2
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c9
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c12
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c18
-rw-r--r--drivers/gpu/drm/tegra/hub.c4
-rw-r--r--drivers/gpu/drm/tegra/vic.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c72
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c6
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c2
-rw-r--r--drivers/gpu/vga/vgaarb.c49
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c13
-rw-r--r--drivers/hid/hid-quirks.c11
-rw-r--r--drivers/hid/hid-steam.c26
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x.c44
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c21
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h40
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c14
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c30
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c7
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c8
-rw-r--r--drivers/i2c/busses/i2c-rcar.c15
-rw-r--r--drivers/i2c/busses/i2c-sis630.c4
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c2
-rw-r--r--drivers/i2c/i2c-core-base.c9
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_utils.c12
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c34
-rw-r--r--drivers/infiniband/hw/mlx5/main.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/iommu/amd_iommu.c24
-rw-r--r--drivers/iommu/amd_iommu_init.c7
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/intel-iommu.c5
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c19
-rw-r--r--drivers/iommu/iommu.c8
-rw-r--r--drivers/iommu/iova.c5
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic.c45
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c8
-rw-r--r--drivers/irqchip/irq-mbigen.c3
-rw-r--r--drivers/irqchip/irq-mmp.c2
-rw-r--r--drivers/irqchip/irq-mvebu-sei.c2
-rw-r--r--drivers/irqchip/irq-stm32-exti.c10
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c3
-rw-r--r--drivers/leds/leds-pca9532.c8
-rw-r--r--drivers/leds/trigger/ledtrig-netdev.c16
-rw-r--r--drivers/lightnvm/pblk-rl.c7
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-init.c2
-rw-r--r--drivers/md/dm-integrity.c16
-rw-r--r--drivers/md/dm-rq.c11
-rw-r--r--drivers/md/dm-table.c39
-rw-r--r--drivers/md/dm.c30
-rw-r--r--drivers/md/raid10.c3
-rw-r--r--drivers/md/raid5-log.h1
-rw-r--r--drivers/md/raid5-ppl.c63
-rw-r--r--drivers/md/raid5.c3
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c42
-rw-r--r--drivers/mfd/twl-core.c23
-rw-r--r--drivers/misc/habanalabs/command_submission.c6
-rw-r--r--drivers/misc/habanalabs/debugfs.c7
-rw-r--r--drivers/misc/habanalabs/device.c71
-rw-r--r--drivers/misc/habanalabs/goya/goya.c65
-rw-r--r--drivers/misc/habanalabs/habanalabs.h21
-rw-r--r--drivers/misc/habanalabs/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/memory.c38
-rw-r--r--drivers/misc/habanalabs/mmu.c6
-rw-r--r--drivers/mmc/host/alcor.c25
-rw-r--r--drivers/mmc/host/davinci_mmc.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c16
-rw-r--r--drivers/mmc/host/pxamci.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c8
-rw-r--r--drivers/mmc/host/sdhci-omap.c3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c6
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/dsa/qca8k.c174
-rw-r--r--drivers/net/dsa/qca8k.h13
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c5
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c10
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c15
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c18
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c2
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c42
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h93
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c317
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c2
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c10
-rw-r--r--drivers/net/ethernet/sis/sis900.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c48
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/ieee802154/adf7242.c4
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c2
-rw-r--r--drivers/net/phy/Kconfig3
-rw-r--r--drivers/net/phy/broadcom.c13
-rw-r--r--drivers/net/phy/dp83822.c34
-rw-r--r--drivers/net/phy/meson-gxl.c6
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/aqc111.c15
-rw-r--r--drivers/net/usb/cdc_ether.c8
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c18
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/beacon.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mcu.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/soc.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c27
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c67
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c82
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_phy.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c21
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/phy.c30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c6
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/usb.c4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.c25
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.h5
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen3.c33
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c20
-rw-r--r--drivers/ntb/ntb_transport.c31
-rw-r--r--drivers/nvdimm/e820.c1
-rw-r--r--drivers/nvdimm/nd.h2
-rw-r--r--drivers/nvdimm/of_pmem.c1
-rw-r--r--drivers/nvdimm/region_devs.c1
-rw-r--r--drivers/nvme/host/core.c28
-rw-r--r--drivers/nvme/host/fc.c36
-rw-r--r--drivers/nvme/host/multipath.c5
-rw-r--r--drivers/nvme/host/nvme.h5
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/tcp.c32
-rw-r--r--drivers/nvme/host/trace.c14
-rw-r--r--drivers/nvme/host/trace.h2
-rw-r--r--drivers/nvme/target/core.c24
-rw-r--r--drivers/nvme/target/fc.c42
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c8
-rw-r--r--drivers/nvme/target/io-cmd-file.c22
-rw-r--r--drivers/parisc/iosapic.c6
-rw-r--r--drivers/parport/daisy.c32
-rw-r--r--drivers/parport/probe.c2
-rw-r--r--drivers/parport/share.c10
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/bw_notification.c23
-rw-r--r--drivers/pci/probe.c2
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c5
-rw-r--r--drivers/platform/chrome/cros_ec_debugfs.c10
-rw-r--r--drivers/platform/chrome/wilco_ec/mailbox.c2
-rw-r--r--drivers/rtc/Kconfig4
-rw-r--r--drivers/rtc/rtc-cros-ec.c4
-rw-r--r--drivers/rtc/rtc-da9063.c7
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/s390/cio/chsc.c50
-rw-r--r--drivers/s390/cio/chsc.h1
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c8
-rw-r--r--drivers/s390/crypto/ap_bus.c19
-rw-r--r--drivers/s390/crypto/ap_bus.h2
-rw-r--r--drivers/s390/crypto/ap_queue.c26
-rw-r--r--drivers/s390/crypto/zcrypt_api.c30
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c7
-rw-r--r--drivers/s390/net/qeth_l3_main.c8
-rw-r--r--drivers/s390/scsi/zfcp_erp.c17
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c21
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c4
-rw-r--r--drivers/scsi/aacraid/aacraid.h7
-rw-r--r--drivers/scsi/aacraid/commsup.c4
-rw-r--r--drivers/scsi/aacraid/linit.c13
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h8
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c63
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c89
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c39
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h7
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c23
-rw-r--r--drivers/scsi/libiscsi.c22
-rw-r--r--drivers/scsi/libiscsi_tcp.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c6
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c12
-rw-r--r--drivers/scsi/qedi/qedi_main.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_dh.c1
-rw-r--r--drivers/scsi/scsi_lib.c15
-rw-r--r--drivers/scsi/scsi_sysfs.c6
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c22
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c6
-rw-r--r--drivers/scsi/storvsc_drv.c15
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c11
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.h2
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/soc/bcm/bcm2835-power.c49
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/axis-fifo/Kconfig1
-rw-r--r--drivers/staging/comedi/comedidev.h2
-rw-r--r--drivers/staging/comedi/drivers.c33
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c10
-rw-r--r--drivers/staging/erofs/dir.c45
-rw-r--r--drivers/staging/erofs/unzip_vle.c45
-rw-r--r--drivers/staging/erofs/unzip_vle_lz4.c7
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts29
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi73
-rw-r--r--drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt48
-rw-r--r--drivers/staging/mt7621-eth/Kconfig39
-rw-r--r--drivers/staging/mt7621-eth/Makefile14
-rw-r--r--drivers/staging/mt7621-eth/TODO13
-rw-r--r--drivers/staging/mt7621-eth/ethtool.c250
-rw-r--r--drivers/staging/mt7621-eth/ethtool.h15
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7620.h277
-rw-r--r--drivers/staging/mt7621-eth/gsw_mt7621.c297
-rw-r--r--drivers/staging/mt7621-eth/mdio.c275
-rw-r--r--drivers/staging/mt7621-eth/mdio.h27
-rw-r--r--drivers/staging/mt7621-eth/mdio_mt7620.c173
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.c2176
-rw-r--r--drivers/staging/mt7621-eth/mtk_eth_soc.h716
-rw-r--r--drivers/staging/mt7621-eth/soc_mt7621.c161
-rw-r--r--drivers/staging/mt7621-pci/Kconfig1
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/octeon/ethernet.c40
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h4
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c2
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c9
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.h2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c14
-rw-r--r--drivers/staging/rtl8723bs/include/rtw_xmit.h2
-rw-r--r--drivers/staging/rtlwifi/phydm/rtl_phydm.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/fw.c2
-rw-r--r--drivers/staging/speakup/speakup_soft.c16
-rw-r--r--drivers/staging/speakup/spk_priv.h1
-rw-r--r--drivers/staging/speakup/synth.c6
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c8
-rw-r--r--drivers/staging/vt6655/device_main.c11
-rw-r--r--drivers/target/target_core_user.c19
-rw-r--r--drivers/thermal/broadcom/bcm2835_thermal.c9
-rw-r--r--drivers/thermal/cpu_cooling.c3
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c21
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c4
-rw-r--r--drivers/thermal/mtk_thermal.c7
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c2
-rw-r--r--drivers/tty/serial/ar933x_uart.c24
-rw-r--r--drivers/tty/serial/atmel_serial.c52
-rw-r--r--drivers/tty/serial/kgdboc.c4
-rw-r--r--drivers/tty/serial/max310x.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c3
-rw-r--r--drivers/tty/serial/mxs-auart.c4
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c2
-rw-r--r--drivers/tty/serial/sc16is7xx.c12
-rw-r--r--drivers/tty/serial/sh-sci.c12
-rw-r--r--drivers/tty/tty_port.c10
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/common/common.c2
-rw-r--r--drivers/usb/core/hcd.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c6
-rw-r--r--drivers/usb/gadget/udc/net2272.c1
-rw-r--r--drivers/usb/gadget/udc/net2280.c8
-rw-r--r--drivers/usb/host/u132-hcd.c3
-rw-r--r--drivers/usb/host/xhci-dbgcap.c5
-rw-r--r--drivers/usb/host/xhci-hub.c19
-rw-r--r--drivers/usb/host/xhci-rcar.c1
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci.h8
-rw-r--r--drivers/usb/misc/usb251xb.c4
-rw-r--r--drivers/usb/mtu3/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h4
-rw-r--r--drivers/usb/serial/mos7720.c4
-rw-r--r--drivers/usb/serial/option.c17
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c27
-rw-r--r--drivers/usb/typec/tcpm/wcove.c9
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c14
-rw-r--r--drivers/video/fbdev/aty/radeon_pm.c6
-rw-r--r--drivers/video/fbdev/cg14.c4
-rw-r--r--drivers/video/fbdev/cg3.c2
-rw-r--r--drivers/video/fbdev/chipsfb.c3
-rw-r--r--drivers/video/fbdev/core/fb_cmdline.c23
-rw-r--r--drivers/video/fbdev/core/fbcon.c14
-rw-r--r--drivers/video/fbdev/core/fbmem.c3
-rw-r--r--drivers/video/fbdev/core/fbmon.c2
-rw-r--r--drivers/video/fbdev/ffb.c2
-rw-r--r--drivers/video/fbdev/geode/gxfb_core.c13
-rw-r--r--drivers/video/fbdev/geode/lxfb_core.c13
-rw-r--r--drivers/video/fbdev/imsttfb.c4
-rw-r--r--drivers/video/fbdev/mbx/mbxdebugfs.c40
-rw-r--r--drivers/video/fbdev/mbx/mbxfb.c2
-rw-r--r--drivers/video/fbdev/offb.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c34
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c2
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/via/viafbdev.c2
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c106
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h15
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c26
-rw-r--r--drivers/virt/vboxguest/vboxguest_utils.c32
-rw-r--r--drivers/virt/vboxguest/vboxguest_version.h9
-rw-r--r--drivers/virt/vboxguest/vmmdev.h8
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
536 files changed, 5446 insertions, 7757 deletions
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 62d3aa74277b..5e9d7348c16f 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
81 81
82 ACPI_FUNCTION_TRACE(ev_enable_gpe); 82 ACPI_FUNCTION_TRACE(ev_enable_gpe);
83 83
84 /* Enable the requested GPE */ 84 /* Clear the GPE status */
85 status = acpi_hw_clear_gpe(gpe_event_info);
86 if (ACPI_FAILURE(status))
87 return_ACPI_STATUS(status);
85 88
89 /* Enable the requested GPE */
86 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); 90 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
87 return_ACPI_STATUS(status); 91 return_ACPI_STATUS(status);
88} 92}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 6ecbbabf1233..eec263c9019e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1043,9 +1043,6 @@ void __init acpi_early_init(void)
1043 1043
1044 acpi_permanent_mmap = true; 1044 acpi_permanent_mmap = true;
1045 1045
1046 /* Initialize debug output. Linux does not use ACPICA defaults */
1047 acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
1048
1049#ifdef CONFIG_X86 1046#ifdef CONFIG_X86
1050 /* 1047 /*
1051 * If the machine falls into the DMI check table, 1048 * If the machine falls into the DMI check table,
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 1b207fca1420..d4244e7d0e38 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1150 cpc_read(cpunum, nominal_reg, &nom); 1150 cpc_read(cpunum, nominal_reg, &nom);
1151 perf_caps->nominal_perf = nom; 1151 perf_caps->nominal_perf = nom;
1152 1152
1153 cpc_read(cpunum, guaranteed_reg, &guaranteed); 1153 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1154 perf_caps->guaranteed_perf = guaranteed; 1154 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1155 perf_caps->guaranteed_perf = 0;
1156 } else {
1157 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1158 perf_caps->guaranteed_perf = guaranteed;
1159 }
1155 1160
1156 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear); 1161 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1157 perf_caps->lowest_nonlinear_perf = min_nonlinear; 1162 perf_caps->lowest_nonlinear_perf = min_nonlinear;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index df8979008dd4..5a389a4f4f65 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -2956,11 +2956,15 @@ static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
2956 ndr_desc->res = &res; 2956 ndr_desc->res = &res;
2957 ndr_desc->provider_data = nfit_spa; 2957 ndr_desc->provider_data = nfit_spa;
2958 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups; 2958 ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2959 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) 2959 if (spa->flags & ACPI_NFIT_PROXIMITY_VALID) {
2960 ndr_desc->numa_node = acpi_map_pxm_to_online_node( 2960 ndr_desc->numa_node = acpi_map_pxm_to_online_node(
2961 spa->proximity_domain); 2961 spa->proximity_domain);
2962 else 2962 ndr_desc->target_node = acpi_map_pxm_to_node(
2963 spa->proximity_domain);
2964 } else {
2963 ndr_desc->numa_node = NUMA_NO_NODE; 2965 ndr_desc->numa_node = NUMA_NO_NODE;
2966 ndr_desc->target_node = NUMA_NO_NODE;
2967 }
2964 2968
2965 /* 2969 /*
2966 * Persistence domain bits are hierarchical, if 2970 * Persistence domain bits are hierarchical, if
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 7bbbf8256a41..867f6e3f2b4f 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -84,6 +84,7 @@ int acpi_map_pxm_to_node(int pxm)
84 84
85 return node; 85 return node;
86} 86}
87EXPORT_SYMBOL(acpi_map_pxm_to_node);
87 88
88/** 89/**
89 * acpi_map_pxm_to_online_node - Map proximity ID to online node 90 * acpi_map_pxm_to_online_node - Map proximity ID to online node
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 78db97687f26..c4b06cc075f9 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -800,6 +800,7 @@ bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
800 match.hrv = hrv; 800 match.hrv = hrv;
801 801
802 dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb); 802 dev = bus_find_device(&acpi_bus_type, NULL, &match, acpi_dev_match_cb);
803 put_device(dev);
803 return !!dev; 804 return !!dev;
804} 805}
805EXPORT_SYMBOL(acpi_dev_present); 806EXPORT_SYMBOL(acpi_dev_present);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 41b706403ef7..b4dae624b9af 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -26,19 +26,36 @@
26 26
27#define to_amba_driver(d) container_of(d, struct amba_driver, drv) 27#define to_amba_driver(d) container_of(d, struct amba_driver, drv)
28 28
29static const struct amba_id * 29/* called on periphid match and class 0x9 coresight device. */
30amba_lookup(const struct amba_id *table, struct amba_device *dev) 30static int
31amba_cs_uci_id_match(const struct amba_id *table, struct amba_device *dev)
31{ 32{
32 int ret = 0; 33 int ret = 0;
34 struct amba_cs_uci_id *uci;
35
36 uci = table->data;
33 37
38 /* no table data or zero mask - return match on periphid */
39 if (!uci || (uci->devarch_mask == 0))
40 return 1;
41
42 /* test against read devtype and masked devarch value */
43 ret = (dev->uci.devtype == uci->devtype) &&
44 ((dev->uci.devarch & uci->devarch_mask) == uci->devarch);
45 return ret;
46}
47
48static const struct amba_id *
49amba_lookup(const struct amba_id *table, struct amba_device *dev)
50{
34 while (table->mask) { 51 while (table->mask) {
35 ret = (dev->periphid & table->mask) == table->id; 52 if (((dev->periphid & table->mask) == table->id) &&
36 if (ret) 53 ((dev->cid != CORESIGHT_CID) ||
37 break; 54 (amba_cs_uci_id_match(table, dev))))
55 return table;
38 table++; 56 table++;
39 } 57 }
40 58 return NULL;
41 return ret ? table : NULL;
42} 59}
43 60
44static int amba_match(struct device *dev, struct device_driver *drv) 61static int amba_match(struct device *dev, struct device_driver *drv)
@@ -399,10 +416,22 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
399 cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) << 416 cid |= (readl(tmp + size - 0x10 + 4 * i) & 255) <<
400 (i * 8); 417 (i * 8);
401 418
419 if (cid == CORESIGHT_CID) {
420 /* set the base to the start of the last 4k block */
421 void __iomem *csbase = tmp + size - 4096;
422
423 dev->uci.devarch =
424 readl(csbase + UCI_REG_DEVARCH_OFFSET);
425 dev->uci.devtype =
426 readl(csbase + UCI_REG_DEVTYPE_OFFSET) & 0xff;
427 }
428
402 amba_put_disable_pclk(dev); 429 amba_put_disable_pclk(dev);
403 430
404 if (cid == AMBA_CID || cid == CORESIGHT_CID) 431 if (cid == AMBA_CID || cid == CORESIGHT_CID) {
405 dev->periphid = pid; 432 dev->periphid = pid;
433 dev->cid = cid;
434 }
406 435
407 if (!dev->periphid) 436 if (!dev->periphid)
408 ret = -ENODEV; 437 ret = -ENODEV;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 8685882da64c..4b9c7ca492e6 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
2057 size_t object_size = 0; 2057 size_t object_size = 0;
2058 2058
2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); 2059 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
2060 if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32))) 2060 if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
2061 !IS_ALIGNED(offset, sizeof(u32)))
2061 return 0; 2062 return 0;
2062 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, 2063 binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
2063 offset, read_size); 2064 offset, read_size);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 6389467670a0..195f120c4e8c 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
927 927
928 index = page - alloc->pages; 928 index = page - alloc->pages;
929 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; 929 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
930
931 mm = alloc->vma_vm_mm;
932 if (!mmget_not_zero(mm))
933 goto err_mmget;
934 if (!down_write_trylock(&mm->mmap_sem))
935 goto err_down_write_mmap_sem_failed;
930 vma = binder_alloc_get_vma(alloc); 936 vma = binder_alloc_get_vma(alloc);
931 if (vma) {
932 if (!mmget_not_zero(alloc->vma_vm_mm))
933 goto err_mmget;
934 mm = alloc->vma_vm_mm;
935 if (!down_read_trylock(&mm->mmap_sem))
936 goto err_down_write_mmap_sem_failed;
937 }
938 937
939 list_lru_isolate(lru, item); 938 list_lru_isolate(lru, item);
940 spin_unlock(lock); 939 spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
945 zap_page_range(vma, page_addr, PAGE_SIZE); 944 zap_page_range(vma, page_addr, PAGE_SIZE);
946 945
947 trace_binder_unmap_user_end(alloc, index); 946 trace_binder_unmap_user_end(alloc, index);
948
949 up_read(&mm->mmap_sem);
950 mmput(mm);
951 } 947 }
948 up_write(&mm->mmap_sem);
949 mmput(mm);
952 950
953 trace_binder_unmap_kernel_start(alloc, index); 951 trace_binder_unmap_kernel_start(alloc, index);
954 952
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index b3ed8f9953a8..173e6f2dd9af 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
52/* Per the spec, only slot type and drawer type ODD can be supported */ 52/* Per the spec, only slot type and drawer type ODD can be supported */
53static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev) 53static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
54{ 54{
55 char buf[16]; 55 char *buf;
56 unsigned int ret; 56 unsigned int ret;
57 struct rm_feature_desc *desc = (void *)(buf + 8); 57 struct rm_feature_desc *desc;
58 struct ata_taskfile tf; 58 struct ata_taskfile tf;
59 static const char cdb[] = { GPCMD_GET_CONFIGURATION, 59 static const char cdb[] = { GPCMD_GET_CONFIGURATION,
60 2, /* only 1 feature descriptor requested */ 60 2, /* only 1 feature descriptor requested */
61 0, 3, /* 3, removable medium feature */ 61 0, 3, /* 3, removable medium feature */
62 0, 0, 0,/* reserved */ 62 0, 0, 0,/* reserved */
63 0, sizeof(buf), 63 0, 16,
64 0, 0, 0, 64 0, 0, 0,
65 }; 65 };
66 66
67 buf = kzalloc(16, GFP_KERNEL);
68 if (!buf)
69 return ODD_MECH_TYPE_UNSUPPORTED;
70 desc = (void *)(buf + 8);
71
67 ata_tf_init(dev, &tf); 72 ata_tf_init(dev, &tf);
68 tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 73 tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
69 tf.command = ATA_CMD_PACKET; 74 tf.command = ATA_CMD_PACKET;
70 tf.protocol = ATAPI_PROT_PIO; 75 tf.protocol = ATAPI_PROT_PIO;
71 tf.lbam = sizeof(buf); 76 tf.lbam = 16;
72 77
73 ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE, 78 ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
74 buf, sizeof(buf), 0); 79 buf, 16, 0);
75 if (ret) 80 if (ret) {
81 kfree(buf);
76 return ODD_MECH_TYPE_UNSUPPORTED; 82 return ODD_MECH_TYPE_UNSUPPORTED;
83 }
77 84
78 if (be16_to_cpu(desc->feature_code) != 3) 85 if (be16_to_cpu(desc->feature_code) != 3) {
86 kfree(buf);
79 return ODD_MECH_TYPE_UNSUPPORTED; 87 return ODD_MECH_TYPE_UNSUPPORTED;
88 }
80 89
81 if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) 90 if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
91 kfree(buf);
82 return ODD_MECH_TYPE_SLOT; 92 return ODD_MECH_TYPE_SLOT;
83 else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1) 93 } else if (desc->mech_type == 1 && desc->load == 0 &&
94 desc->eject == 1) {
95 kfree(buf);
84 return ODD_MECH_TYPE_DRAWER; 96 return ODD_MECH_TYPE_DRAWER;
85 else 97 } else {
98 kfree(buf);
86 return ODD_MECH_TYPE_UNSUPPORTED; 99 return ODD_MECH_TYPE_UNSUPPORTED;
100 }
87} 101}
88 102
89/* Test if ODD is zero power ready by sense code */ 103/* Test if ODD is zero power ready by sense code */
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 57410f9c5d44..c52c738e554a 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -164,9 +164,7 @@ config ARM_CHARLCD
164 line and the Linux version on the second line, but that's 164 line and the Linux version on the second line, but that's
165 still useful. 165 still useful.
166 166
167endif # AUXDISPLAY 167menuconfig PARPORT_PANEL
168
169menuconfig PANEL
170 tristate "Parallel port LCD/Keypad Panel support" 168 tristate "Parallel port LCD/Keypad Panel support"
171 depends on PARPORT 169 depends on PARPORT
172 select CHARLCD 170 select CHARLCD
@@ -178,7 +176,7 @@ menuconfig PANEL
178 compiled as a module, or linked into the kernel and started at boot. 176 compiled as a module, or linked into the kernel and started at boot.
179 If you don't understand what all this is about, say N. 177 If you don't understand what all this is about, say N.
180 178
181if PANEL 179if PARPORT_PANEL
182 180
183config PANEL_PARPORT 181config PANEL_PARPORT
184 int "Default parallel port number (0=LPT1)" 182 int "Default parallel port number (0=LPT1)"
@@ -419,8 +417,11 @@ config PANEL_LCD_PIN_BL
419 417
420 Default for the 'BL' pin in custom profile is '0' (uncontrolled). 418 Default for the 'BL' pin in custom profile is '0' (uncontrolled).
421 419
420endif # PARPORT_PANEL
421
422config PANEL_CHANGE_MESSAGE 422config PANEL_CHANGE_MESSAGE
423 bool "Change LCD initialization message ?" 423 bool "Change LCD initialization message ?"
424 depends on CHARLCD
424 default "n" 425 default "n"
425 ---help--- 426 ---help---
426 This allows you to replace the boot message indicating the kernel version 427 This allows you to replace the boot message indicating the kernel version
@@ -444,7 +445,34 @@ config PANEL_BOOT_MESSAGE
444 An empty message will only clear the display at driver init time. Any other 445 An empty message will only clear the display at driver init time. Any other
445 printf()-formatted message is valid with newline and escape codes. 446 printf()-formatted message is valid with newline and escape codes.
446 447
447endif # PANEL 448choice
449 prompt "Backlight initial state"
450 default CHARLCD_BL_FLASH
451
452 config CHARLCD_BL_OFF
453 bool "Off"
454 help
455 Backlight is initially turned off
456
457 config CHARLCD_BL_ON
458 bool "On"
459 help
460 Backlight is initially turned on
461
462 config CHARLCD_BL_FLASH
463 bool "Flash"
464 help
465 Backlight is flashed briefly on init
466
467endchoice
468
469endif # AUXDISPLAY
470
471config PANEL
472 tristate "Parallel port LCD/Keypad Panel support (OLD OPTION)"
473 depends on PARPORT
474 select AUXDISPLAY
475 select PARPORT_PANEL
448 476
449config CHARLCD 477config CHARLCD
450 tristate "Character LCD core support" if COMPILE_TEST 478 tristate "Character LCD core support" if COMPILE_TEST
diff --git a/drivers/auxdisplay/Makefile b/drivers/auxdisplay/Makefile
index 7ac6776ca3f6..cf54b5efb07e 100644
--- a/drivers/auxdisplay/Makefile
+++ b/drivers/auxdisplay/Makefile
@@ -10,4 +10,4 @@ obj-$(CONFIG_CFAG12864B) += cfag12864b.o cfag12864bfb.o
10obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o 10obj-$(CONFIG_IMG_ASCII_LCD) += img-ascii-lcd.o
11obj-$(CONFIG_HD44780) += hd44780.o 11obj-$(CONFIG_HD44780) += hd44780.o
12obj-$(CONFIG_HT16K33) += ht16k33.o 12obj-$(CONFIG_HT16K33) += ht16k33.o
13obj-$(CONFIG_PANEL) += panel.o 13obj-$(CONFIG_PARPORT_PANEL) += panel.o
diff --git a/drivers/auxdisplay/charlcd.c b/drivers/auxdisplay/charlcd.c
index 60e0b772673f..92745efefb54 100644
--- a/drivers/auxdisplay/charlcd.c
+++ b/drivers/auxdisplay/charlcd.c
@@ -91,7 +91,7 @@ struct charlcd_priv {
91 unsigned long long drvdata[0]; 91 unsigned long long drvdata[0];
92}; 92};
93 93
94#define to_priv(p) container_of(p, struct charlcd_priv, lcd) 94#define charlcd_to_priv(p) container_of(p, struct charlcd_priv, lcd)
95 95
96/* Device single-open policy control */ 96/* Device single-open policy control */
97static atomic_t charlcd_available = ATOMIC_INIT(1); 97static atomic_t charlcd_available = ATOMIC_INIT(1);
@@ -105,7 +105,7 @@ static void long_sleep(int ms)
105/* turn the backlight on or off */ 105/* turn the backlight on or off */
106static void charlcd_backlight(struct charlcd *lcd, int on) 106static void charlcd_backlight(struct charlcd *lcd, int on)
107{ 107{
108 struct charlcd_priv *priv = to_priv(lcd); 108 struct charlcd_priv *priv = charlcd_to_priv(lcd);
109 109
110 if (!lcd->ops->backlight) 110 if (!lcd->ops->backlight)
111 return; 111 return;
@@ -134,7 +134,7 @@ static void charlcd_bl_off(struct work_struct *work)
134/* turn the backlight on for a little while */ 134/* turn the backlight on for a little while */
135void charlcd_poke(struct charlcd *lcd) 135void charlcd_poke(struct charlcd *lcd)
136{ 136{
137 struct charlcd_priv *priv = to_priv(lcd); 137 struct charlcd_priv *priv = charlcd_to_priv(lcd);
138 138
139 if (!lcd->ops->backlight) 139 if (!lcd->ops->backlight)
140 return; 140 return;
@@ -152,7 +152,7 @@ EXPORT_SYMBOL_GPL(charlcd_poke);
152 152
153static void charlcd_gotoxy(struct charlcd *lcd) 153static void charlcd_gotoxy(struct charlcd *lcd)
154{ 154{
155 struct charlcd_priv *priv = to_priv(lcd); 155 struct charlcd_priv *priv = charlcd_to_priv(lcd);
156 unsigned int addr; 156 unsigned int addr;
157 157
158 /* 158 /*
@@ -170,7 +170,7 @@ static void charlcd_gotoxy(struct charlcd *lcd)
170 170
171static void charlcd_home(struct charlcd *lcd) 171static void charlcd_home(struct charlcd *lcd)
172{ 172{
173 struct charlcd_priv *priv = to_priv(lcd); 173 struct charlcd_priv *priv = charlcd_to_priv(lcd);
174 174
175 priv->addr.x = 0; 175 priv->addr.x = 0;
176 priv->addr.y = 0; 176 priv->addr.y = 0;
@@ -179,7 +179,7 @@ static void charlcd_home(struct charlcd *lcd)
179 179
180static void charlcd_print(struct charlcd *lcd, char c) 180static void charlcd_print(struct charlcd *lcd, char c)
181{ 181{
182 struct charlcd_priv *priv = to_priv(lcd); 182 struct charlcd_priv *priv = charlcd_to_priv(lcd);
183 183
184 if (priv->addr.x < lcd->bwidth) { 184 if (priv->addr.x < lcd->bwidth) {
185 if (lcd->char_conv) 185 if (lcd->char_conv)
@@ -211,7 +211,7 @@ static void charlcd_clear_fast(struct charlcd *lcd)
211/* clears the display and resets X/Y */ 211/* clears the display and resets X/Y */
212static void charlcd_clear_display(struct charlcd *lcd) 212static void charlcd_clear_display(struct charlcd *lcd)
213{ 213{
214 struct charlcd_priv *priv = to_priv(lcd); 214 struct charlcd_priv *priv = charlcd_to_priv(lcd);
215 215
216 lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR); 216 lcd->ops->write_cmd(lcd, LCD_CMD_DISPLAY_CLEAR);
217 priv->addr.x = 0; 217 priv->addr.x = 0;
@@ -223,7 +223,7 @@ static void charlcd_clear_display(struct charlcd *lcd)
223static int charlcd_init_display(struct charlcd *lcd) 223static int charlcd_init_display(struct charlcd *lcd)
224{ 224{
225 void (*write_cmd_raw)(struct charlcd *lcd, int cmd); 225 void (*write_cmd_raw)(struct charlcd *lcd, int cmd);
226 struct charlcd_priv *priv = to_priv(lcd); 226 struct charlcd_priv *priv = charlcd_to_priv(lcd);
227 u8 init; 227 u8 init;
228 228
229 if (lcd->ifwidth != 4 && lcd->ifwidth != 8) 229 if (lcd->ifwidth != 4 && lcd->ifwidth != 8)
@@ -369,7 +369,7 @@ static bool parse_xy(const char *s, unsigned long *x, unsigned long *y)
369 369
370static inline int handle_lcd_special_code(struct charlcd *lcd) 370static inline int handle_lcd_special_code(struct charlcd *lcd)
371{ 371{
372 struct charlcd_priv *priv = to_priv(lcd); 372 struct charlcd_priv *priv = charlcd_to_priv(lcd);
373 373
374 /* LCD special codes */ 374 /* LCD special codes */
375 375
@@ -580,7 +580,7 @@ static inline int handle_lcd_special_code(struct charlcd *lcd)
580 580
581static void charlcd_write_char(struct charlcd *lcd, char c) 581static void charlcd_write_char(struct charlcd *lcd, char c)
582{ 582{
583 struct charlcd_priv *priv = to_priv(lcd); 583 struct charlcd_priv *priv = charlcd_to_priv(lcd);
584 584
585 /* first, we'll test if we're in escape mode */ 585 /* first, we'll test if we're in escape mode */
586 if ((c != '\n') && priv->esc_seq.len >= 0) { 586 if ((c != '\n') && priv->esc_seq.len >= 0) {
@@ -705,7 +705,7 @@ static ssize_t charlcd_write(struct file *file, const char __user *buf,
705 705
706static int charlcd_open(struct inode *inode, struct file *file) 706static int charlcd_open(struct inode *inode, struct file *file)
707{ 707{
708 struct charlcd_priv *priv = to_priv(the_charlcd); 708 struct charlcd_priv *priv = charlcd_to_priv(the_charlcd);
709 int ret; 709 int ret;
710 710
711 ret = -EBUSY; 711 ret = -EBUSY;
@@ -763,10 +763,24 @@ static void charlcd_puts(struct charlcd *lcd, const char *s)
763 } 763 }
764} 764}
765 765
766#ifdef CONFIG_PANEL_BOOT_MESSAGE
767#define LCD_INIT_TEXT CONFIG_PANEL_BOOT_MESSAGE
768#else
769#define LCD_INIT_TEXT "Linux-" UTS_RELEASE "\n"
770#endif
771
772#ifdef CONFIG_CHARLCD_BL_ON
773#define LCD_INIT_BL "\x1b[L+"
774#elif defined(CONFIG_CHARLCD_BL_FLASH)
775#define LCD_INIT_BL "\x1b[L*"
776#else
777#define LCD_INIT_BL "\x1b[L-"
778#endif
779
766/* initialize the LCD driver */ 780/* initialize the LCD driver */
767static int charlcd_init(struct charlcd *lcd) 781static int charlcd_init(struct charlcd *lcd)
768{ 782{
769 struct charlcd_priv *priv = to_priv(lcd); 783 struct charlcd_priv *priv = charlcd_to_priv(lcd);
770 int ret; 784 int ret;
771 785
772 if (lcd->ops->backlight) { 786 if (lcd->ops->backlight) {
@@ -784,13 +798,8 @@ static int charlcd_init(struct charlcd *lcd)
784 return ret; 798 return ret;
785 799
786 /* display a short message */ 800 /* display a short message */
787#ifdef CONFIG_PANEL_CHANGE_MESSAGE 801 charlcd_puts(lcd, "\x1b[Lc\x1b[Lb" LCD_INIT_BL LCD_INIT_TEXT);
788#ifdef CONFIG_PANEL_BOOT_MESSAGE 802
789 charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*" CONFIG_PANEL_BOOT_MESSAGE);
790#endif
791#else
792 charlcd_puts(lcd, "\x1b[Lc\x1b[Lb\x1b[L*Linux-" UTS_RELEASE "\n");
793#endif
794 /* clear the display on the next device opening */ 803 /* clear the display on the next device opening */
795 priv->must_clear = true; 804 priv->must_clear = true;
796 charlcd_home(lcd); 805 charlcd_home(lcd);
@@ -818,6 +827,12 @@ struct charlcd *charlcd_alloc(unsigned int drvdata_size)
818} 827}
819EXPORT_SYMBOL_GPL(charlcd_alloc); 828EXPORT_SYMBOL_GPL(charlcd_alloc);
820 829
830void charlcd_free(struct charlcd *lcd)
831{
832 kfree(charlcd_to_priv(lcd));
833}
834EXPORT_SYMBOL_GPL(charlcd_free);
835
821static int panel_notify_sys(struct notifier_block *this, unsigned long code, 836static int panel_notify_sys(struct notifier_block *this, unsigned long code,
822 void *unused) 837 void *unused)
823{ 838{
@@ -866,7 +881,7 @@ EXPORT_SYMBOL_GPL(charlcd_register);
866 881
867int charlcd_unregister(struct charlcd *lcd) 882int charlcd_unregister(struct charlcd *lcd)
868{ 883{
869 struct charlcd_priv *priv = to_priv(lcd); 884 struct charlcd_priv *priv = charlcd_to_priv(lcd);
870 885
871 unregister_reboot_notifier(&panel_notifier); 886 unregister_reboot_notifier(&panel_notifier);
872 charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-"); 887 charlcd_puts(lcd, "\x0cLCD driver unloaded.\x1b[Lc\x1b[Lb\x1b[L-");
diff --git a/drivers/auxdisplay/hd44780.c b/drivers/auxdisplay/hd44780.c
index 9ad93ea42fdc..ab15b64707ad 100644
--- a/drivers/auxdisplay/hd44780.c
+++ b/drivers/auxdisplay/hd44780.c
@@ -271,7 +271,7 @@ static int hd44780_probe(struct platform_device *pdev)
271 return 0; 271 return 0;
272 272
273fail: 273fail:
274 kfree(lcd); 274 charlcd_free(lcd);
275 return ret; 275 return ret;
276} 276}
277 277
@@ -280,6 +280,8 @@ static int hd44780_remove(struct platform_device *pdev)
280 struct charlcd *lcd = platform_get_drvdata(pdev); 280 struct charlcd *lcd = platform_get_drvdata(pdev);
281 281
282 charlcd_unregister(lcd); 282 charlcd_unregister(lcd);
283
284 charlcd_free(lcd);
283 return 0; 285 return 0;
284} 286}
285 287
diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c
index 21b9b2f2470a..e06de63497cf 100644
--- a/drivers/auxdisplay/panel.c
+++ b/drivers/auxdisplay/panel.c
@@ -1620,7 +1620,7 @@ err_lcd_unreg:
1620 if (lcd.enabled) 1620 if (lcd.enabled)
1621 charlcd_unregister(lcd.charlcd); 1621 charlcd_unregister(lcd.charlcd);
1622err_unreg_device: 1622err_unreg_device:
1623 kfree(lcd.charlcd); 1623 charlcd_free(lcd.charlcd);
1624 lcd.charlcd = NULL; 1624 lcd.charlcd = NULL;
1625 parport_unregister_device(pprt); 1625 parport_unregister_device(pprt);
1626 pprt = NULL; 1626 pprt = NULL;
@@ -1647,7 +1647,7 @@ static void panel_detach(struct parport *port)
1647 if (lcd.enabled) { 1647 if (lcd.enabled) {
1648 charlcd_unregister(lcd.charlcd); 1648 charlcd_unregister(lcd.charlcd);
1649 lcd.initialized = false; 1649 lcd.initialized = false;
1650 kfree(lcd.charlcd); 1650 charlcd_free(lcd.charlcd);
1651 lcd.charlcd = NULL; 1651 lcd.charlcd = NULL;
1652 } 1652 }
1653 1653
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 048cbf7d5233..cb8347500ce2 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -88,6 +88,7 @@ unsigned long __weak memory_block_size_bytes(void)
88{ 88{
89 return MIN_MEMORY_BLOCK_SIZE; 89 return MIN_MEMORY_BLOCK_SIZE;
90} 90}
91EXPORT_SYMBOL_GPL(memory_block_size_bytes);
91 92
92static unsigned long get_memory_block_size(void) 93static unsigned long get_memory_block_size(void)
93{ 94{
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 76c9969b7124..96a6dc9d305c 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1469,12 +1469,12 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1469 if (IS_ERR(gpd_data)) 1469 if (IS_ERR(gpd_data))
1470 return PTR_ERR(gpd_data); 1470 return PTR_ERR(gpd_data);
1471 1471
1472 genpd_lock(genpd);
1473
1474 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0; 1472 ret = genpd->attach_dev ? genpd->attach_dev(genpd, dev) : 0;
1475 if (ret) 1473 if (ret)
1476 goto out; 1474 goto out;
1477 1475
1476 genpd_lock(genpd);
1477
1478 dev_pm_domain_set(dev, &genpd->domain); 1478 dev_pm_domain_set(dev, &genpd->domain);
1479 1479
1480 genpd->device_count++; 1480 genpd->device_count++;
@@ -1482,9 +1482,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1482 1482
1483 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1483 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1484 1484
1485 out:
1486 genpd_unlock(genpd); 1485 genpd_unlock(genpd);
1487 1486 out:
1488 if (ret) 1487 if (ret)
1489 genpd_free_dev_data(dev, gpd_data); 1488 genpd_free_dev_data(dev, gpd_data);
1490 else 1489 else
@@ -1533,15 +1532,15 @@ static int genpd_remove_device(struct generic_pm_domain *genpd,
1533 genpd->device_count--; 1532 genpd->device_count--;
1534 genpd->max_off_time_changed = true; 1533 genpd->max_off_time_changed = true;
1535 1534
1536 if (genpd->detach_dev)
1537 genpd->detach_dev(genpd, dev);
1538
1539 dev_pm_domain_set(dev, NULL); 1535 dev_pm_domain_set(dev, NULL);
1540 1536
1541 list_del_init(&pdd->list_node); 1537 list_del_init(&pdd->list_node);
1542 1538
1543 genpd_unlock(genpd); 1539 genpd_unlock(genpd);
1544 1540
1541 if (genpd->detach_dev)
1542 genpd->detach_dev(genpd, dev);
1543
1545 genpd_free_dev_data(dev, gpd_data); 1544 genpd_free_dev_data(dev, gpd_data);
1546 1545
1547 return 0; 1546 return 0;
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 1fad9291f6aa..7fc5a18e02ad 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -472,7 +472,7 @@ static int software_node_read_string_array(const struct fwnode_handle *fwnode,
472 val, nval); 472 val, nval);
473} 473}
474 474
475struct fwnode_handle * 475static struct fwnode_handle *
476software_node_get_parent(const struct fwnode_handle *fwnode) 476software_node_get_parent(const struct fwnode_handle *fwnode)
477{ 477{
478 struct software_node *swnode = to_software_node(fwnode); 478 struct software_node *swnode = to_software_node(fwnode);
@@ -481,7 +481,7 @@ software_node_get_parent(const struct fwnode_handle *fwnode)
481 NULL; 481 NULL;
482} 482}
483 483
484struct fwnode_handle * 484static struct fwnode_handle *
485software_node_get_next_child(const struct fwnode_handle *fwnode, 485software_node_get_next_child(const struct fwnode_handle *fwnode,
486 struct fwnode_handle *child) 486 struct fwnode_handle *child)
487{ 487{
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e6edd568214..bf1c61cab8eb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
656 return -EBADF; 656 return -EBADF;
657 657
658 l = f->f_mapping->host->i_bdev->bd_disk->private_data; 658 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
659 if (l->lo_state == Lo_unbound) { 659 if (l->lo_state != Lo_bound) {
660 return -EINVAL; 660 return -EINVAL;
661 } 661 }
662 f = l->lo_backing_file; 662 f = l->lo_backing_file;
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 96670eefaeb2..377a694dc228 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -749,8 +749,12 @@ static int pcd_detect(void)
749 return 0; 749 return 0;
750 750
751 printk("%s: No CD-ROM drive found\n", name); 751 printk("%s: No CD-ROM drive found\n", name);
752 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) 752 for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
753 blk_cleanup_queue(cd->disk->queue);
754 cd->disk->queue = NULL;
755 blk_mq_free_tag_set(&cd->tag_set);
753 put_disk(cd->disk); 756 put_disk(cd->disk);
757 }
754 pi_unregister_driver(par_drv); 758 pi_unregister_driver(par_drv);
755 return -1; 759 return -1;
756} 760}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index e92e7a8eeeb2..103b617cdc31 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -761,8 +761,12 @@ static int pf_detect(void)
761 return 0; 761 return 0;
762 762
763 printk("%s: No ATAPI disk detected\n", name); 763 printk("%s: No ATAPI disk detected\n", name);
764 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) 764 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
765 blk_cleanup_queue(pf->disk->queue);
766 pf->disk->queue = NULL;
767 blk_mq_free_tag_set(&pf->tag_set);
765 put_disk(pf->disk); 768 put_disk(pf->disk);
769 }
766 pi_unregister_driver(par_drv); 770 pi_unregister_driver(par_drv);
767 return -1; 771 return -1;
768} 772}
@@ -1047,13 +1051,15 @@ static void __exit pf_exit(void)
1047 int unit; 1051 int unit;
1048 unregister_blkdev(major, name); 1052 unregister_blkdev(major, name);
1049 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { 1053 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
1050 if (!pf->present) 1054 if (pf->present)
1051 continue; 1055 del_gendisk(pf->disk);
1052 del_gendisk(pf->disk); 1056
1053 blk_cleanup_queue(pf->disk->queue); 1057 blk_cleanup_queue(pf->disk->queue);
1054 blk_mq_free_tag_set(&pf->tag_set); 1058 blk_mq_free_tag_set(&pf->tag_set);
1055 put_disk(pf->disk); 1059 put_disk(pf->disk);
1056 pi_release(pf->pi); 1060
1061 if (pf->present)
1062 pi_release(pf->pi);
1057 } 1063 }
1058} 1064}
1059 1065
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4ba967d65cf9..2210c1b9491b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -833,7 +833,7 @@ static int parse_rbd_opts_token(char *c, void *private)
833 pctx->opts->queue_depth = intval; 833 pctx->opts->queue_depth = intval;
834 break; 834 break;
835 case Opt_alloc_size: 835 case Opt_alloc_size:
836 if (intval < 1) { 836 if (intval < SECTOR_SIZE) {
837 pr_err("alloc_size out of range\n"); 837 pr_err("alloc_size out of range\n");
838 return -EINVAL; 838 return -EINVAL;
839 } 839 }
@@ -924,23 +924,6 @@ static void rbd_put_client(struct rbd_client *rbdc)
924 kref_put(&rbdc->kref, rbd_client_release); 924 kref_put(&rbdc->kref, rbd_client_release);
925} 925}
926 926
927static int wait_for_latest_osdmap(struct ceph_client *client)
928{
929 u64 newest_epoch;
930 int ret;
931
932 ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
933 if (ret)
934 return ret;
935
936 if (client->osdc.osdmap->epoch >= newest_epoch)
937 return 0;
938
939 ceph_osdc_maybe_request_map(&client->osdc);
940 return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
941 client->options->mount_timeout);
942}
943
944/* 927/*
945 * Get a ceph client with specific addr and configuration, if one does 928 * Get a ceph client with specific addr and configuration, if one does
946 * not exist create it. Either way, ceph_opts is consumed by this 929 * not exist create it. Either way, ceph_opts is consumed by this
@@ -960,7 +943,8 @@ static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
960 * Using an existing client. Make sure ->pg_pools is up to 943 * Using an existing client. Make sure ->pg_pools is up to
961 * date before we look up the pool id in do_rbd_add(). 944 * date before we look up the pool id in do_rbd_add().
962 */ 945 */
963 ret = wait_for_latest_osdmap(rbdc->client); 946 ret = ceph_wait_for_latest_osdmap(rbdc->client,
947 rbdc->client->options->mount_timeout);
964 if (ret) { 948 if (ret) {
965 rbd_warn(NULL, "failed to get latest osdmap: %d", ret); 949 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
966 rbd_put_client(rbdc); 950 rbd_put_client(rbdc);
@@ -4203,12 +4187,12 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
4203 q->limits.max_sectors = queue_max_hw_sectors(q); 4187 q->limits.max_sectors = queue_max_hw_sectors(q);
4204 blk_queue_max_segments(q, USHRT_MAX); 4188 blk_queue_max_segments(q, USHRT_MAX);
4205 blk_queue_max_segment_size(q, UINT_MAX); 4189 blk_queue_max_segment_size(q, UINT_MAX);
4206 blk_queue_io_min(q, objset_bytes); 4190 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
4207 blk_queue_io_opt(q, objset_bytes); 4191 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
4208 4192
4209 if (rbd_dev->opts->trim) { 4193 if (rbd_dev->opts->trim) {
4210 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); 4194 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
4211 q->limits.discard_granularity = objset_bytes; 4195 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
4212 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); 4196 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4213 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); 4197 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4214 } 4198 }
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index a4bc74e72c39..24896ffb04ed 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -926,7 +926,7 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
926 int err, i, j; 926 int err, i, j;
927 struct xen_blkif *blkif = ring->blkif; 927 struct xen_blkif *blkif = ring->blkif;
928 struct xenbus_device *dev = blkif->be->dev; 928 struct xenbus_device *dev = blkif->be->dev;
929 unsigned int ring_page_order, nr_grefs, evtchn; 929 unsigned int nr_grefs, evtchn;
930 930
931 err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u", 931 err = xenbus_scanf(XBT_NIL, dir, "event-channel", "%u",
932 &evtchn); 932 &evtchn);
@@ -936,43 +936,42 @@ static int read_per_ring_refs(struct xen_blkif_ring *ring, const char *dir)
936 return err; 936 return err;
937 } 937 }
938 938
939 err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-page-order", "%u", 939 nr_grefs = blkif->nr_ring_pages;
940 &ring_page_order); 940
941 if (err != 1) { 941 if (unlikely(!nr_grefs)) {
942 err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u", &ring_ref[0]); 942 WARN_ON(true);
943 return -EINVAL;
944 }
945
946 for (i = 0; i < nr_grefs; i++) {
947 char ring_ref_name[RINGREF_NAME_LEN];
948
949 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
950 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
951 "%u", &ring_ref[i]);
952
943 if (err != 1) { 953 if (err != 1) {
954 if (nr_grefs == 1)
955 break;
956
944 err = -EINVAL; 957 err = -EINVAL;
945 xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir); 958 xenbus_dev_fatal(dev, err, "reading %s/%s",
959 dir, ring_ref_name);
946 return err; 960 return err;
947 } 961 }
948 nr_grefs = 1; 962 }
949 } else {
950 unsigned int i;
951 963
952 if (ring_page_order > xen_blkif_max_ring_order) { 964 if (err != 1) {
965 WARN_ON(nr_grefs != 1);
966
967 err = xenbus_scanf(XBT_NIL, dir, "ring-ref", "%u",
968 &ring_ref[0]);
969 if (err != 1) {
953 err = -EINVAL; 970 err = -EINVAL;
954 xenbus_dev_fatal(dev, err, "%s/request %d ring page order exceed max:%d", 971 xenbus_dev_fatal(dev, err, "reading %s/ring-ref", dir);
955 dir, ring_page_order,
956 xen_blkif_max_ring_order);
957 return err; 972 return err;
958 } 973 }
959
960 nr_grefs = 1 << ring_page_order;
961 for (i = 0; i < nr_grefs; i++) {
962 char ring_ref_name[RINGREF_NAME_LEN];
963
964 snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
965 err = xenbus_scanf(XBT_NIL, dir, ring_ref_name,
966 "%u", &ring_ref[i]);
967 if (err != 1) {
968 err = -EINVAL;
969 xenbus_dev_fatal(dev, err, "reading %s/%s",
970 dir, ring_ref_name);
971 return err;
972 }
973 }
974 } 974 }
975 blkif->nr_ring_pages = nr_grefs;
976 975
977 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) { 976 for (i = 0; i < nr_grefs * XEN_BLKIF_REQS_PER_PAGE; i++) {
978 req = kzalloc(sizeof(*req), GFP_KERNEL); 977 req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -1023,6 +1022,7 @@ fail:
1023static int connect_ring(struct backend_info *be) 1022static int connect_ring(struct backend_info *be)
1024{ 1023{
1025 struct xenbus_device *dev = be->dev; 1024 struct xenbus_device *dev = be->dev;
1025 struct xen_blkif *blkif = be->blkif;
1026 unsigned int pers_grants; 1026 unsigned int pers_grants;
1027 char protocol[64] = ""; 1027 char protocol[64] = "";
1028 int err, i; 1028 int err, i;
@@ -1030,28 +1030,29 @@ static int connect_ring(struct backend_info *be)
1030 size_t xspathsize; 1030 size_t xspathsize;
1031 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ 1031 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */
1032 unsigned int requested_num_queues = 0; 1032 unsigned int requested_num_queues = 0;
1033 unsigned int ring_page_order;
1033 1034
1034 pr_debug("%s %s\n", __func__, dev->otherend); 1035 pr_debug("%s %s\n", __func__, dev->otherend);
1035 1036
1036 be->blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT; 1037 blkif->blk_protocol = BLKIF_PROTOCOL_DEFAULT;
1037 err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol", 1038 err = xenbus_scanf(XBT_NIL, dev->otherend, "protocol",
1038 "%63s", protocol); 1039 "%63s", protocol);
1039 if (err <= 0) 1040 if (err <= 0)
1040 strcpy(protocol, "unspecified, assuming default"); 1041 strcpy(protocol, "unspecified, assuming default");
1041 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE)) 1042 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
1042 be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE; 1043 blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
1043 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32)) 1044 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
1044 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32; 1045 blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
1045 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64)) 1046 else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
1046 be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64; 1047 blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
1047 else { 1048 else {
1048 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol); 1049 xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
1049 return -ENOSYS; 1050 return -ENOSYS;
1050 } 1051 }
1051 pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent", 1052 pers_grants = xenbus_read_unsigned(dev->otherend, "feature-persistent",
1052 0); 1053 0);
1053 be->blkif->vbd.feature_gnt_persistent = pers_grants; 1054 blkif->vbd.feature_gnt_persistent = pers_grants;
1054 be->blkif->vbd.overflow_max_grants = 0; 1055 blkif->vbd.overflow_max_grants = 0;
1055 1056
1056 /* 1057 /*
1057 * Read the number of hardware queues from frontend. 1058 * Read the number of hardware queues from frontend.
@@ -1067,16 +1068,30 @@ static int connect_ring(struct backend_info *be)
1067 requested_num_queues, xenblk_max_queues); 1068 requested_num_queues, xenblk_max_queues);
1068 return -ENOSYS; 1069 return -ENOSYS;
1069 } 1070 }
1070 be->blkif->nr_rings = requested_num_queues; 1071 blkif->nr_rings = requested_num_queues;
1071 if (xen_blkif_alloc_rings(be->blkif)) 1072 if (xen_blkif_alloc_rings(blkif))
1072 return -ENOMEM; 1073 return -ENOMEM;
1073 1074
1074 pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename, 1075 pr_info("%s: using %d queues, protocol %d (%s) %s\n", dev->nodename,
1075 be->blkif->nr_rings, be->blkif->blk_protocol, protocol, 1076 blkif->nr_rings, blkif->blk_protocol, protocol,
1076 pers_grants ? "persistent grants" : ""); 1077 pers_grants ? "persistent grants" : "");
1077 1078
1078 if (be->blkif->nr_rings == 1) 1079 ring_page_order = xenbus_read_unsigned(dev->otherend,
1079 return read_per_ring_refs(&be->blkif->rings[0], dev->otherend); 1080 "ring-page-order", 0);
1081
1082 if (ring_page_order > xen_blkif_max_ring_order) {
1083 err = -EINVAL;
1084 xenbus_dev_fatal(dev, err,
1085 "requested ring page order %d exceed max:%d",
1086 ring_page_order,
1087 xen_blkif_max_ring_order);
1088 return err;
1089 }
1090
1091 blkif->nr_ring_pages = 1 << ring_page_order;
1092
1093 if (blkif->nr_rings == 1)
1094 return read_per_ring_refs(&blkif->rings[0], dev->otherend);
1080 else { 1095 else {
1081 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size; 1096 xspathsize = strlen(dev->otherend) + xenstore_path_ext_size;
1082 xspath = kmalloc(xspathsize, GFP_KERNEL); 1097 xspath = kmalloc(xspathsize, GFP_KERNEL);
@@ -1085,10 +1100,10 @@ static int connect_ring(struct backend_info *be)
1085 return -ENOMEM; 1100 return -ENOMEM;
1086 } 1101 }
1087 1102
1088 for (i = 0; i < be->blkif->nr_rings; i++) { 1103 for (i = 0; i < blkif->nr_rings; i++) {
1089 memset(xspath, 0, xspathsize); 1104 memset(xspath, 0, xspathsize);
1090 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i); 1105 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, i);
1091 err = read_per_ring_refs(&be->blkif->rings[i], xspath); 1106 err = read_per_ring_refs(&blkif->rings[i], xspath);
1092 if (err) { 1107 if (err) {
1093 kfree(xspath); 1108 kfree(xspath);
1094 return err; 1109 return err;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index e7a5f1d1c314..399cad7daae7 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
290 struct zram *zram = dev_to_zram(dev); 290 struct zram *zram = dev_to_zram(dev);
291 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT; 291 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
292 int index; 292 int index;
293 char mode_buf[8];
294 ssize_t sz;
295 293
296 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); 294 if (!sysfs_streq(buf, "all"))
297 if (sz <= 0)
298 return -EINVAL;
299
300 /* ignore trailing new line */
301 if (mode_buf[sz - 1] == '\n')
302 mode_buf[sz - 1] = 0x00;
303
304 if (strcmp(mode_buf, "all"))
305 return -EINVAL; 295 return -EINVAL;
306 296
307 down_read(&zram->init_lock); 297 down_read(&zram->init_lock);
@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
635 struct bio bio; 625 struct bio bio;
636 struct bio_vec bio_vec; 626 struct bio_vec bio_vec;
637 struct page *page; 627 struct page *page;
638 ssize_t ret, sz; 628 ssize_t ret;
639 char mode_buf[8]; 629 int mode;
640 int mode = -1;
641 unsigned long blk_idx = 0; 630 unsigned long blk_idx = 0;
642 631
643 sz = strscpy(mode_buf, buf, sizeof(mode_buf)); 632 if (sysfs_streq(buf, "idle"))
644 if (sz <= 0)
645 return -EINVAL;
646
647 /* ignore trailing newline */
648 if (mode_buf[sz - 1] == '\n')
649 mode_buf[sz - 1] = 0x00;
650
651 if (!strcmp(mode_buf, "idle"))
652 mode = IDLE_WRITEBACK; 633 mode = IDLE_WRITEBACK;
653 else if (!strcmp(mode_buf, "huge")) 634 else if (sysfs_streq(buf, "huge"))
654 mode = HUGE_WRITEBACK; 635 mode = HUGE_WRITEBACK;
655 636 else
656 if (mode == -1)
657 return -EINVAL; 637 return -EINVAL;
658 638
659 down_read(&zram->init_lock); 639 down_read(&zram->init_lock);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72866a004f07..466ebd84ad17 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -348,7 +348,7 @@ config XILINX_HWICAP
348 348
349config R3964 349config R3964
350 tristate "Siemens R3964 line discipline" 350 tristate "Siemens R3964 line discipline"
351 depends on TTY 351 depends on TTY && BROKEN
352 ---help--- 352 ---help---
353 This driver allows synchronous communication with devices using the 353 This driver allows synchronous communication with devices using the
354 Siemens R3964 packet protocol. Unless you are dealing with special 354 Siemens R3964 packet protocol. Unless you are dealing with special
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index a8b20b65bd4b..aa4ec53281ce 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -1261,6 +1261,13 @@ static enum arch_timer_ppi_nr __init arch_timer_select_ppi(void)
1261 return ARCH_TIMER_PHYS_SECURE_PPI; 1261 return ARCH_TIMER_PHYS_SECURE_PPI;
1262} 1262}
1263 1263
1264static void __init arch_timer_populate_kvm_info(void)
1265{
1266 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI];
1267 if (is_kernel_in_hyp_mode())
1268 arch_timer_kvm_info.physical_irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
1269}
1270
1264static int __init arch_timer_of_init(struct device_node *np) 1271static int __init arch_timer_of_init(struct device_node *np)
1265{ 1272{
1266 int i, ret; 1273 int i, ret;
@@ -1275,7 +1282,7 @@ static int __init arch_timer_of_init(struct device_node *np)
1275 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++) 1282 for (i = ARCH_TIMER_PHYS_SECURE_PPI; i < ARCH_TIMER_MAX_TIMER_PPI; i++)
1276 arch_timer_ppi[i] = irq_of_parse_and_map(np, i); 1283 arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
1277 1284
1278 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; 1285 arch_timer_populate_kvm_info();
1279 1286
1280 rate = arch_timer_get_cntfrq(); 1287 rate = arch_timer_get_cntfrq();
1281 arch_timer_of_configure_rate(rate, np); 1288 arch_timer_of_configure_rate(rate, np);
@@ -1605,7 +1612,7 @@ static int __init arch_timer_acpi_init(struct acpi_table_header *table)
1605 arch_timer_ppi[ARCH_TIMER_HYP_PPI] = 1612 arch_timer_ppi[ARCH_TIMER_HYP_PPI] =
1606 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI); 1613 acpi_gtdt_map_ppi(ARCH_TIMER_HYP_PPI);
1607 1614
1608 arch_timer_kvm_info.virtual_irq = arch_timer_ppi[ARCH_TIMER_VIRT_PPI]; 1615 arch_timer_populate_kvm_info();
1609 1616
1610 /* 1617 /*
1611 * When probing via ACPI, we have no mechanism to override the sysreg 1618 * When probing via ACPI, we have no mechanism to override the sysreg
diff --git a/drivers/clocksource/clps711x-timer.c b/drivers/clocksource/clps711x-timer.c
index a8dd80576c95..857f8c086274 100644
--- a/drivers/clocksource/clps711x-timer.c
+++ b/drivers/clocksource/clps711x-timer.c
@@ -31,16 +31,9 @@ static u64 notrace clps711x_sched_clock_read(void)
31 return ~readw(tcd); 31 return ~readw(tcd);
32} 32}
33 33
34static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base) 34static void __init clps711x_clksrc_init(struct clk *clock, void __iomem *base)
35{ 35{
36 unsigned long rate; 36 unsigned long rate = clk_get_rate(clock);
37
38 if (!base)
39 return -ENOMEM;
40 if (IS_ERR(clock))
41 return PTR_ERR(clock);
42
43 rate = clk_get_rate(clock);
44 37
45 tcd = base; 38 tcd = base;
46 39
@@ -48,8 +41,6 @@ static int __init _clps711x_clksrc_init(struct clk *clock, void __iomem *base)
48 clocksource_mmio_readw_down); 41 clocksource_mmio_readw_down);
49 42
50 sched_clock_register(clps711x_sched_clock_read, 16, rate); 43 sched_clock_register(clps711x_sched_clock_read, 16, rate);
51
52 return 0;
53} 44}
54 45
55static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id) 46static irqreturn_t clps711x_timer_interrupt(int irq, void *dev_id)
@@ -67,13 +58,6 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
67 struct clock_event_device *clkevt; 58 struct clock_event_device *clkevt;
68 unsigned long rate; 59 unsigned long rate;
69 60
70 if (!irq)
71 return -EINVAL;
72 if (!base)
73 return -ENOMEM;
74 if (IS_ERR(clock))
75 return PTR_ERR(clock);
76
77 clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL); 61 clkevt = kzalloc(sizeof(*clkevt), GFP_KERNEL);
78 if (!clkevt) 62 if (!clkevt)
79 return -ENOMEM; 63 return -ENOMEM;
@@ -93,31 +77,29 @@ static int __init _clps711x_clkevt_init(struct clk *clock, void __iomem *base,
93 "clps711x-timer", clkevt); 77 "clps711x-timer", clkevt);
94} 78}
95 79
96void __init clps711x_clksrc_init(void __iomem *tc1_base, void __iomem *tc2_base,
97 unsigned int irq)
98{
99 struct clk *tc1 = clk_get_sys("clps711x-timer.0", NULL);
100 struct clk *tc2 = clk_get_sys("clps711x-timer.1", NULL);
101
102 BUG_ON(_clps711x_clksrc_init(tc1, tc1_base));
103 BUG_ON(_clps711x_clkevt_init(tc2, tc2_base, irq));
104}
105
106#ifdef CONFIG_TIMER_OF
107static int __init clps711x_timer_init(struct device_node *np) 80static int __init clps711x_timer_init(struct device_node *np)
108{ 81{
109 unsigned int irq = irq_of_parse_and_map(np, 0); 82 unsigned int irq = irq_of_parse_and_map(np, 0);
110 struct clk *clock = of_clk_get(np, 0); 83 struct clk *clock = of_clk_get(np, 0);
111 void __iomem *base = of_iomap(np, 0); 84 void __iomem *base = of_iomap(np, 0);
112 85
86 if (!base)
87 return -ENOMEM;
88 if (!irq)
89 return -EINVAL;
90 if (IS_ERR(clock))
91 return PTR_ERR(clock);
92
113 switch (of_alias_get_id(np, "timer")) { 93 switch (of_alias_get_id(np, "timer")) {
114 case CLPS711X_CLKSRC_CLOCKSOURCE: 94 case CLPS711X_CLKSRC_CLOCKSOURCE:
115 return _clps711x_clksrc_init(clock, base); 95 clps711x_clksrc_init(clock, base);
96 break;
116 case CLPS711X_CLKSRC_CLOCKEVENT: 97 case CLPS711X_CLKSRC_CLOCKEVENT:
117 return _clps711x_clkevt_init(clock, base, irq); 98 return _clps711x_clkevt_init(clock, base, irq);
118 default: 99 default:
119 return -EINVAL; 100 return -EINVAL;
120 } 101 }
102
103 return 0;
121} 104}
122TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init); 105TIMER_OF_DECLARE(clps711x, "cirrus,ep7209-timer", clps711x_timer_init);
123#endif
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index 54f8a331b53a..37671a5d4ed9 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -67,7 +67,7 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
67 return IRQ_HANDLED; 67 return IRQ_HANDLED;
68} 68}
69 69
70struct irqaction gic_compare_irqaction = { 70static struct irqaction gic_compare_irqaction = {
71 .handler = gic_compare_interrupt, 71 .handler = gic_compare_interrupt,
72 .percpu_dev_id = &gic_clockevent_device, 72 .percpu_dev_id = &gic_clockevent_device,
73 .flags = IRQF_PERCPU | IRQF_TIMER, 73 .flags = IRQF_PERCPU | IRQF_TIMER,
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 43f4d5c4d6fa..f987027ca566 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -71,7 +71,7 @@ static u64 tc_get_cycles32(struct clocksource *cs)
71 return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV)); 71 return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
72} 72}
73 73
74void tc_clksrc_suspend(struct clocksource *cs) 74static void tc_clksrc_suspend(struct clocksource *cs)
75{ 75{
76 int i; 76 int i;
77 77
@@ -86,7 +86,7 @@ void tc_clksrc_suspend(struct clocksource *cs)
86 bmr_cache = readl(tcaddr + ATMEL_TC_BMR); 86 bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
87} 87}
88 88
89void tc_clksrc_resume(struct clocksource *cs) 89static void tc_clksrc_resume(struct clocksource *cs)
90{ 90{
91 int i; 91 int i;
92 92
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index e8163693e936..5e6038fbf115 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -58,7 +58,7 @@ static u64 riscv_sched_clock(void)
58static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = { 58static DEFINE_PER_CPU(struct clocksource, riscv_clocksource) = {
59 .name = "riscv_clocksource", 59 .name = "riscv_clocksource",
60 .rating = 300, 60 .rating = 300,
61 .mask = CLOCKSOURCE_MASK(BITS_PER_LONG), 61 .mask = CLOCKSOURCE_MASK(64),
62 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 62 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
63 .read = riscv_clocksource_rdtime, 63 .read = riscv_clocksource_rdtime,
64}; 64};
@@ -120,8 +120,7 @@ static int __init riscv_timer_init_dt(struct device_node *n)
120 return error; 120 return error;
121 } 121 }
122 122
123 sched_clock_register(riscv_sched_clock, 123 sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
124 BITS_PER_LONG, riscv_timebase);
125 124
126 error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING, 125 error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
127 "clockevents/riscv/timer:starting", 126 "clockevents/riscv/timer:starting",
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index c364027638e1..3352da6ed61f 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -586,8 +586,8 @@ static int omap_dm_timer_set_load(struct omap_dm_timer *timer, int autoreload,
586} 586}
587 587
588/* Optimized set_load which removes costly spin wait in timer_start */ 588/* Optimized set_load which removes costly spin wait in timer_start */
589int omap_dm_timer_set_load_start(struct omap_dm_timer *timer, int autoreload, 589static int omap_dm_timer_set_load_start(struct omap_dm_timer *timer,
590 unsigned int load) 590 int autoreload, unsigned int load)
591{ 591{
592 u32 l; 592 u32 l;
593 593
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index e22f0dbaebb1..2986119dd31f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
385 if (ret) 385 if (ret)
386 return ret; 386 return ret;
387 387
388 return cppc_perf.guaranteed_perf; 388 if (cppc_perf.guaranteed_perf)
389 return cppc_perf.guaranteed_perf;
390
391 return cppc_perf.nominal_perf;
389} 392}
390 393
391#else /* CONFIG_ACPI_CPPC_LIB */ 394#else /* CONFIG_ACPI_CPPC_LIB */
@@ -2593,6 +2596,9 @@ static int __init intel_pstate_init(void)
2593 const struct x86_cpu_id *id; 2596 const struct x86_cpu_id *id;
2594 int rc; 2597 int rc;
2595 2598
2599 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2600 return -ENODEV;
2601
2596 if (no_load) 2602 if (no_load)
2597 return -ENODEV; 2603 return -ENODEV;
2598 2604
@@ -2608,7 +2614,7 @@ static int __init intel_pstate_init(void)
2608 } else { 2614 } else {
2609 id = x86_match_cpu(intel_pstate_cpu_ids); 2615 id = x86_match_cpu(intel_pstate_cpu_ids);
2610 if (!id) { 2616 if (!id) {
2611 pr_info("CPU ID not supported\n"); 2617 pr_info("CPU model not supported\n");
2612 return -ENODEV; 2618 return -ENODEV;
2613 } 2619 }
2614 2620
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 3f49427766b8..2b51e0718c9f 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
189 189
190 clk_put(priv->clk); 190 clk_put(priv->clk);
191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); 191 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
192 kfree(priv);
193 dev_pm_opp_remove_all_dynamic(priv->cpu_dev); 192 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
193 kfree(priv);
194 194
195 return 0; 195 return 0;
196} 196}
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index e0700bf4893a..5ef624fe3934 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -23,12 +23,38 @@ config DEV_DAX
23config DEV_DAX_PMEM 23config DEV_DAX_PMEM
24 tristate "PMEM DAX: direct access to persistent memory" 24 tristate "PMEM DAX: direct access to persistent memory"
25 depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX 25 depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX
26 depends on m # until we can kill DEV_DAX_PMEM_COMPAT
26 default DEV_DAX 27 default DEV_DAX
27 help 28 help
28 Support raw access to persistent memory. Note that this 29 Support raw access to persistent memory. Note that this
29 driver consumes memory ranges allocated and exported by the 30 driver consumes memory ranges allocated and exported by the
30 libnvdimm sub-system. 31 libnvdimm sub-system.
31 32
32 Say Y if unsure 33 Say M if unsure
34
35config DEV_DAX_KMEM
36 tristate "KMEM DAX: volatile-use of persistent memory"
37 default DEV_DAX
38 depends on DEV_DAX
39 depends on MEMORY_HOTPLUG # for add_memory() and friends
40 help
41 Support access to persistent memory as if it were RAM. This
42 allows easier use of persistent memory by unmodified
43 applications.
44
45 To use this feature, a DAX device must be unbound from the
46 device_dax driver (PMEM DAX) and bound to this kmem driver
47 on each boot.
48
49 Say N if unsure.
50
51config DEV_DAX_PMEM_COMPAT
52 tristate "PMEM DAX: support the deprecated /sys/class/dax interface"
53 depends on DEV_DAX_PMEM
54 default DEV_DAX_PMEM
55 help
56 Older versions of the libdaxctl library expect to find all
57 device-dax instances under /sys/class/dax. If libdaxctl in
58 your distribution is older than v58 say M, otherwise say N.
33 59
34endif 60endif
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index 574286fac87c..81f7d54dadfb 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -1,8 +1,10 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_DAX) += dax.o 2obj-$(CONFIG_DAX) += dax.o
3obj-$(CONFIG_DEV_DAX) += device_dax.o 3obj-$(CONFIG_DEV_DAX) += device_dax.o
4obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o 4obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o
5 5
6dax-y := super.o 6dax-y := super.o
7dax_pmem-y := pmem.o 7dax-y += bus.o
8device_dax-y := device.o 8device_dax-y := device.o
9
10obj-y += pmem/
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
new file mode 100644
index 000000000000..2109cfe80219
--- /dev/null
+++ b/drivers/dax/bus.c
@@ -0,0 +1,503 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */
3#include <linux/memremap.h>
4#include <linux/device.h>
5#include <linux/mutex.h>
6#include <linux/list.h>
7#include <linux/slab.h>
8#include <linux/dax.h>
9#include "dax-private.h"
10#include "bus.h"
11
12static struct class *dax_class;
13
14static DEFINE_MUTEX(dax_bus_lock);
15
16#define DAX_NAME_LEN 30
17struct dax_id {
18 struct list_head list;
19 char dev_name[DAX_NAME_LEN];
20};
21
22static int dax_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
23{
24 /*
25 * We only ever expect to handle device-dax instances, i.e. the
26 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
27 */
28 return add_uevent_var(env, "MODALIAS=" DAX_DEVICE_MODALIAS_FMT, 0);
29}
30
31static struct dax_device_driver *to_dax_drv(struct device_driver *drv)
32{
33 return container_of(drv, struct dax_device_driver, drv);
34}
35
36static struct dax_id *__dax_match_id(struct dax_device_driver *dax_drv,
37 const char *dev_name)
38{
39 struct dax_id *dax_id;
40
41 lockdep_assert_held(&dax_bus_lock);
42
43 list_for_each_entry(dax_id, &dax_drv->ids, list)
44 if (sysfs_streq(dax_id->dev_name, dev_name))
45 return dax_id;
46 return NULL;
47}
48
49static int dax_match_id(struct dax_device_driver *dax_drv, struct device *dev)
50{
51 int match;
52
53 mutex_lock(&dax_bus_lock);
54 match = !!__dax_match_id(dax_drv, dev_name(dev));
55 mutex_unlock(&dax_bus_lock);
56
57 return match;
58}
59
60enum id_action {
61 ID_REMOVE,
62 ID_ADD,
63};
64
65static ssize_t do_id_store(struct device_driver *drv, const char *buf,
66 size_t count, enum id_action action)
67{
68 struct dax_device_driver *dax_drv = to_dax_drv(drv);
69 unsigned int region_id, id;
70 char devname[DAX_NAME_LEN];
71 struct dax_id *dax_id;
72 ssize_t rc = count;
73 int fields;
74
75 fields = sscanf(buf, "dax%d.%d", &region_id, &id);
76 if (fields != 2)
77 return -EINVAL;
78 sprintf(devname, "dax%d.%d", region_id, id);
79 if (!sysfs_streq(buf, devname))
80 return -EINVAL;
81
82 mutex_lock(&dax_bus_lock);
83 dax_id = __dax_match_id(dax_drv, buf);
84 if (!dax_id) {
85 if (action == ID_ADD) {
86 dax_id = kzalloc(sizeof(*dax_id), GFP_KERNEL);
87 if (dax_id) {
88 strncpy(dax_id->dev_name, buf, DAX_NAME_LEN);
89 list_add(&dax_id->list, &dax_drv->ids);
90 } else
91 rc = -ENOMEM;
92 } else
93 /* nothing to remove */;
94 } else if (action == ID_REMOVE) {
95 list_del(&dax_id->list);
96 kfree(dax_id);
97 } else
98 /* dax_id already added */;
99 mutex_unlock(&dax_bus_lock);
100
101 if (rc < 0)
102 return rc;
103 if (action == ID_ADD)
104 rc = driver_attach(drv);
105 if (rc)
106 return rc;
107 return count;
108}
109
110static ssize_t new_id_store(struct device_driver *drv, const char *buf,
111 size_t count)
112{
113 return do_id_store(drv, buf, count, ID_ADD);
114}
115static DRIVER_ATTR_WO(new_id);
116
117static ssize_t remove_id_store(struct device_driver *drv, const char *buf,
118 size_t count)
119{
120 return do_id_store(drv, buf, count, ID_REMOVE);
121}
122static DRIVER_ATTR_WO(remove_id);
123
124static struct attribute *dax_drv_attrs[] = {
125 &driver_attr_new_id.attr,
126 &driver_attr_remove_id.attr,
127 NULL,
128};
129ATTRIBUTE_GROUPS(dax_drv);
130
131static int dax_bus_match(struct device *dev, struct device_driver *drv);
132
133static struct bus_type dax_bus_type = {
134 .name = "dax",
135 .uevent = dax_bus_uevent,
136 .match = dax_bus_match,
137 .drv_groups = dax_drv_groups,
138};
139
140static int dax_bus_match(struct device *dev, struct device_driver *drv)
141{
142 struct dax_device_driver *dax_drv = to_dax_drv(drv);
143
144 /*
145 * All but the 'device-dax' driver, which has 'match_always'
146 * set, requires an exact id match.
147 */
148 if (dax_drv->match_always)
149 return 1;
150
151 return dax_match_id(dax_drv, dev);
152}
153
154/*
155 * Rely on the fact that drvdata is set before the attributes are
156 * registered, and that the attributes are unregistered before drvdata
157 * is cleared to assume that drvdata is always valid.
158 */
159static ssize_t id_show(struct device *dev,
160 struct device_attribute *attr, char *buf)
161{
162 struct dax_region *dax_region = dev_get_drvdata(dev);
163
164 return sprintf(buf, "%d\n", dax_region->id);
165}
166static DEVICE_ATTR_RO(id);
167
168static ssize_t region_size_show(struct device *dev,
169 struct device_attribute *attr, char *buf)
170{
171 struct dax_region *dax_region = dev_get_drvdata(dev);
172
173 return sprintf(buf, "%llu\n", (unsigned long long)
174 resource_size(&dax_region->res));
175}
176static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
177 region_size_show, NULL);
178
179static ssize_t align_show(struct device *dev,
180 struct device_attribute *attr, char *buf)
181{
182 struct dax_region *dax_region = dev_get_drvdata(dev);
183
184 return sprintf(buf, "%u\n", dax_region->align);
185}
186static DEVICE_ATTR_RO(align);
187
188static struct attribute *dax_region_attributes[] = {
189 &dev_attr_region_size.attr,
190 &dev_attr_align.attr,
191 &dev_attr_id.attr,
192 NULL,
193};
194
195static const struct attribute_group dax_region_attribute_group = {
196 .name = "dax_region",
197 .attrs = dax_region_attributes,
198};
199
200static const struct attribute_group *dax_region_attribute_groups[] = {
201 &dax_region_attribute_group,
202 NULL,
203};
204
205static void dax_region_free(struct kref *kref)
206{
207 struct dax_region *dax_region;
208
209 dax_region = container_of(kref, struct dax_region, kref);
210 kfree(dax_region);
211}
212
213void dax_region_put(struct dax_region *dax_region)
214{
215 kref_put(&dax_region->kref, dax_region_free);
216}
217EXPORT_SYMBOL_GPL(dax_region_put);
218
219static void dax_region_unregister(void *region)
220{
221 struct dax_region *dax_region = region;
222
223 sysfs_remove_groups(&dax_region->dev->kobj,
224 dax_region_attribute_groups);
225 dax_region_put(dax_region);
226}
227
228struct dax_region *alloc_dax_region(struct device *parent, int region_id,
229 struct resource *res, int target_node, unsigned int align,
230 unsigned long pfn_flags)
231{
232 struct dax_region *dax_region;
233
234 /*
235 * The DAX core assumes that it can store its private data in
236 * parent->driver_data. This WARN is a reminder / safeguard for
237 * developers of device-dax drivers.
238 */
239 if (dev_get_drvdata(parent)) {
240 dev_WARN(parent, "dax core failed to setup private data\n");
241 return NULL;
242 }
243
244 if (!IS_ALIGNED(res->start, align)
245 || !IS_ALIGNED(resource_size(res), align))
246 return NULL;
247
248 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
249 if (!dax_region)
250 return NULL;
251
252 dev_set_drvdata(parent, dax_region);
253 memcpy(&dax_region->res, res, sizeof(*res));
254 dax_region->pfn_flags = pfn_flags;
255 kref_init(&dax_region->kref);
256 dax_region->id = region_id;
257 dax_region->align = align;
258 dax_region->dev = parent;
259 dax_region->target_node = target_node;
260 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
261 kfree(dax_region);
262 return NULL;
263 }
264
265 kref_get(&dax_region->kref);
266 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region))
267 return NULL;
268 return dax_region;
269}
270EXPORT_SYMBOL_GPL(alloc_dax_region);
271
272static ssize_t size_show(struct device *dev,
273 struct device_attribute *attr, char *buf)
274{
275 struct dev_dax *dev_dax = to_dev_dax(dev);
276 unsigned long long size = resource_size(&dev_dax->region->res);
277
278 return sprintf(buf, "%llu\n", size);
279}
280static DEVICE_ATTR_RO(size);
281
282static int dev_dax_target_node(struct dev_dax *dev_dax)
283{
284 struct dax_region *dax_region = dev_dax->region;
285
286 return dax_region->target_node;
287}
288
289static ssize_t target_node_show(struct device *dev,
290 struct device_attribute *attr, char *buf)
291{
292 struct dev_dax *dev_dax = to_dev_dax(dev);
293
294 return sprintf(buf, "%d\n", dev_dax_target_node(dev_dax));
295}
296static DEVICE_ATTR_RO(target_node);
297
298static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
299 char *buf)
300{
301 /*
302 * We only ever expect to handle device-dax instances, i.e. the
303 * @type argument to MODULE_ALIAS_DAX_DEVICE() is always zero
304 */
305 return sprintf(buf, DAX_DEVICE_MODALIAS_FMT "\n", 0);
306}
307static DEVICE_ATTR_RO(modalias);
308
309static umode_t dev_dax_visible(struct kobject *kobj, struct attribute *a, int n)
310{
311 struct device *dev = container_of(kobj, struct device, kobj);
312 struct dev_dax *dev_dax = to_dev_dax(dev);
313
314 if (a == &dev_attr_target_node.attr && dev_dax_target_node(dev_dax) < 0)
315 return 0;
316 return a->mode;
317}
318
319static struct attribute *dev_dax_attributes[] = {
320 &dev_attr_modalias.attr,
321 &dev_attr_size.attr,
322 &dev_attr_target_node.attr,
323 NULL,
324};
325
326static const struct attribute_group dev_dax_attribute_group = {
327 .attrs = dev_dax_attributes,
328 .is_visible = dev_dax_visible,
329};
330
331static const struct attribute_group *dax_attribute_groups[] = {
332 &dev_dax_attribute_group,
333 NULL,
334};
335
336void kill_dev_dax(struct dev_dax *dev_dax)
337{
338 struct dax_device *dax_dev = dev_dax->dax_dev;
339 struct inode *inode = dax_inode(dax_dev);
340
341 kill_dax(dax_dev);
342 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
343}
344EXPORT_SYMBOL_GPL(kill_dev_dax);
345
346static void dev_dax_release(struct device *dev)
347{
348 struct dev_dax *dev_dax = to_dev_dax(dev);
349 struct dax_region *dax_region = dev_dax->region;
350 struct dax_device *dax_dev = dev_dax->dax_dev;
351
352 dax_region_put(dax_region);
353 put_dax(dax_dev);
354 kfree(dev_dax);
355}
356
357static void unregister_dev_dax(void *dev)
358{
359 struct dev_dax *dev_dax = to_dev_dax(dev);
360
361 dev_dbg(dev, "%s\n", __func__);
362
363 kill_dev_dax(dev_dax);
364 device_del(dev);
365 put_device(dev);
366}
367
368struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
369 struct dev_pagemap *pgmap, enum dev_dax_subsys subsys)
370{
371 struct device *parent = dax_region->dev;
372 struct dax_device *dax_dev;
373 struct dev_dax *dev_dax;
374 struct inode *inode;
375 struct device *dev;
376 int rc = -ENOMEM;
377
378 if (id < 0)
379 return ERR_PTR(-EINVAL);
380
381 dev_dax = kzalloc(sizeof(*dev_dax), GFP_KERNEL);
382 if (!dev_dax)
383 return ERR_PTR(-ENOMEM);
384
385 memcpy(&dev_dax->pgmap, pgmap, sizeof(*pgmap));
386
387 /*
388 * No 'host' or dax_operations since there is no access to this
389 * device outside of mmap of the resulting character device.
390 */
391 dax_dev = alloc_dax(dev_dax, NULL, NULL);
392 if (!dax_dev)
393 goto err;
394
395 /* a device_dax instance is dead while the driver is not attached */
396 kill_dax(dax_dev);
397
398 /* from here on we're committed to teardown via dax_dev_release() */
399 dev = &dev_dax->dev;
400 device_initialize(dev);
401
402 dev_dax->dax_dev = dax_dev;
403 dev_dax->region = dax_region;
404 dev_dax->target_node = dax_region->target_node;
405 kref_get(&dax_region->kref);
406
407 inode = dax_inode(dax_dev);
408 dev->devt = inode->i_rdev;
409 if (subsys == DEV_DAX_BUS)
410 dev->bus = &dax_bus_type;
411 else
412 dev->class = dax_class;
413 dev->parent = parent;
414 dev->groups = dax_attribute_groups;
415 dev->release = dev_dax_release;
416 dev_set_name(dev, "dax%d.%d", dax_region->id, id);
417
418 rc = device_add(dev);
419 if (rc) {
420 kill_dev_dax(dev_dax);
421 put_device(dev);
422 return ERR_PTR(rc);
423 }
424
425 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
426 if (rc)
427 return ERR_PTR(rc);
428
429 return dev_dax;
430
431 err:
432 kfree(dev_dax);
433
434 return ERR_PTR(rc);
435}
436EXPORT_SYMBOL_GPL(__devm_create_dev_dax);
437
438static int match_always_count;
439
440int __dax_driver_register(struct dax_device_driver *dax_drv,
441 struct module *module, const char *mod_name)
442{
443 struct device_driver *drv = &dax_drv->drv;
444 int rc = 0;
445
446 INIT_LIST_HEAD(&dax_drv->ids);
447 drv->owner = module;
448 drv->name = mod_name;
449 drv->mod_name = mod_name;
450 drv->bus = &dax_bus_type;
451
452 /* there can only be one default driver */
453 mutex_lock(&dax_bus_lock);
454 match_always_count += dax_drv->match_always;
455 if (match_always_count > 1) {
456 match_always_count--;
457 WARN_ON(1);
458 rc = -EINVAL;
459 }
460 mutex_unlock(&dax_bus_lock);
461 if (rc)
462 return rc;
463 return driver_register(drv);
464}
465EXPORT_SYMBOL_GPL(__dax_driver_register);
466
467void dax_driver_unregister(struct dax_device_driver *dax_drv)
468{
469 struct device_driver *drv = &dax_drv->drv;
470 struct dax_id *dax_id, *_id;
471
472 mutex_lock(&dax_bus_lock);
473 match_always_count -= dax_drv->match_always;
474 list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) {
475 list_del(&dax_id->list);
476 kfree(dax_id);
477 }
478 mutex_unlock(&dax_bus_lock);
479 driver_unregister(drv);
480}
481EXPORT_SYMBOL_GPL(dax_driver_unregister);
482
483int __init dax_bus_init(void)
484{
485 int rc;
486
487 if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)) {
488 dax_class = class_create(THIS_MODULE, "dax");
489 if (IS_ERR(dax_class))
490 return PTR_ERR(dax_class);
491 }
492
493 rc = bus_register(&dax_bus_type);
494 if (rc)
495 class_destroy(dax_class);
496 return rc;
497}
498
499void __exit dax_bus_exit(void)
500{
501 bus_unregister(&dax_bus_type);
502 class_destroy(dax_class);
503}
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
new file mode 100644
index 000000000000..8619e3299943
--- /dev/null
+++ b/drivers/dax/bus.h
@@ -0,0 +1,61 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
3#ifndef __DAX_BUS_H__
4#define __DAX_BUS_H__
5#include <linux/device.h>
6
7struct dev_dax;
8struct resource;
9struct dax_device;
10struct dax_region;
11void dax_region_put(struct dax_region *dax_region);
12struct dax_region *alloc_dax_region(struct device *parent, int region_id,
13 struct resource *res, int target_node, unsigned int align,
14 unsigned long flags);
15
16enum dev_dax_subsys {
17 DEV_DAX_BUS,
18 DEV_DAX_CLASS,
19};
20
21struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
22 struct dev_pagemap *pgmap, enum dev_dax_subsys subsys);
23
24static inline struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
25 int id, struct dev_pagemap *pgmap)
26{
27 return __devm_create_dev_dax(dax_region, id, pgmap, DEV_DAX_BUS);
28}
29
30/* to be deleted when DEV_DAX_CLASS is removed */
31struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys);
32
33struct dax_device_driver {
34 struct device_driver drv;
35 struct list_head ids;
36 int match_always;
37};
38
39int __dax_driver_register(struct dax_device_driver *dax_drv,
40 struct module *module, const char *mod_name);
41#define dax_driver_register(driver) \
42 __dax_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
43void dax_driver_unregister(struct dax_device_driver *dax_drv);
44void kill_dev_dax(struct dev_dax *dev_dax);
45
46#if IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)
47int dev_dax_probe(struct device *dev);
48#endif
49
50/*
51 * While run_dax() is potentially a generic operation that could be
52 * defined in include/linux/dax.h we don't want to grow any users
53 * outside of drivers/dax/
54 */
55void run_dax(struct dax_device *dax_dev);
56
57#define MODULE_ALIAS_DAX_DEVICE(type) \
58 MODULE_ALIAS("dax:t" __stringify(type) "*")
59#define DAX_DEVICE_MODALIAS_FMT "dax:t%d"
60
61#endif /* __DAX_BUS_H__ */
diff --git a/drivers/dax/dax-private.h b/drivers/dax/dax-private.h
index b6fc4f04636d..a45612148ca0 100644
--- a/drivers/dax/dax-private.h
+++ b/drivers/dax/dax-private.h
@@ -16,10 +16,17 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/cdev.h> 17#include <linux/cdev.h>
18 18
19/* private routines between core files */
20struct dax_device;
21struct dax_device *inode_dax(struct inode *inode);
22struct inode *dax_inode(struct dax_device *dax_dev);
23int dax_bus_init(void);
24void dax_bus_exit(void);
25
19/** 26/**
20 * struct dax_region - mapping infrastructure for dax devices 27 * struct dax_region - mapping infrastructure for dax devices
21 * @id: kernel-wide unique region for a memory range 28 * @id: kernel-wide unique region for a memory range
22 * @base: linear address corresponding to @res 29 * @target_node: effective numa node if this memory range is onlined
23 * @kref: to pin while other agents have a need to do lookups 30 * @kref: to pin while other agents have a need to do lookups
24 * @dev: parent device backing this region 31 * @dev: parent device backing this region
25 * @align: allocation and mapping alignment for child dax devices 32 * @align: allocation and mapping alignment for child dax devices
@@ -28,8 +35,7 @@
28 */ 35 */
29struct dax_region { 36struct dax_region {
30 int id; 37 int id;
31 struct ida ida; 38 int target_node;
32 void *base;
33 struct kref kref; 39 struct kref kref;
34 struct device *dev; 40 struct device *dev;
35 unsigned int align; 41 unsigned int align;
@@ -38,20 +44,28 @@ struct dax_region {
38}; 44};
39 45
40/** 46/**
41 * struct dev_dax - instance data for a subdivision of a dax region 47 * struct dev_dax - instance data for a subdivision of a dax region, and
48 * data while the device is activated in the driver.
42 * @region - parent region 49 * @region - parent region
43 * @dax_dev - core dax functionality 50 * @dax_dev - core dax functionality
51 * @target_node: effective numa node if dev_dax memory range is onlined
44 * @dev - device core 52 * @dev - device core
45 * @id - child id in the region 53 * @pgmap - pgmap for memmap setup / lifetime (driver owned)
46 * @num_resources - number of physical address extents in this device 54 * @ref: pgmap reference count (driver owned)
47 * @res - array of physical address ranges 55 * @cmp: @ref final put completion (driver owned)
48 */ 56 */
49struct dev_dax { 57struct dev_dax {
50 struct dax_region *region; 58 struct dax_region *region;
51 struct dax_device *dax_dev; 59 struct dax_device *dax_dev;
60 int target_node;
52 struct device dev; 61 struct device dev;
53 int id; 62 struct dev_pagemap pgmap;
54 int num_resources; 63 struct percpu_ref ref;
55 struct resource res[0]; 64 struct completion cmp;
56}; 65};
66
67static inline struct dev_dax *to_dev_dax(struct device *dev)
68{
69 return container_of(dev, struct dev_dax, dev);
70}
57#endif 71#endif
diff --git a/drivers/dax/dax.h b/drivers/dax/dax.h
deleted file mode 100644
index f9e5feea742c..000000000000
--- a/drivers/dax/dax.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __DAX_H__
14#define __DAX_H__
15struct dax_device;
16struct dax_device *inode_dax(struct inode *inode);
17struct inode *dax_inode(struct dax_device *dax_dev);
18#endif /* __DAX_H__ */
diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h
deleted file mode 100644
index 688b051750bd..000000000000
--- a/drivers/dax/device-dax.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#ifndef __DEVICE_DAX_H__
14#define __DEVICE_DAX_H__
15struct device;
16struct dev_dax;
17struct resource;
18struct dax_region;
19void dax_region_put(struct dax_region *dax_region);
20struct dax_region *alloc_dax_region(struct device *parent,
21 int region_id, struct resource *res, unsigned int align,
22 void *addr, unsigned long flags);
23struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
24 int id, struct resource *res, int count);
25#endif /* __DEVICE_DAX_H__ */
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index 948806e57cee..e428468ab661 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -1,15 +1,6 @@
1/* 1// SPDX-License-Identifier: GPL-2.0
2 * Copyright(c) 2016 - 2017 Intel Corporation. All rights reserved. 2/* Copyright(c) 2016-2018 Intel Corporation. All rights reserved. */
3 * 3#include <linux/memremap.h>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/pagemap.h> 4#include <linux/pagemap.h>
14#include <linux/module.h> 5#include <linux/module.h>
15#include <linux/device.h> 6#include <linux/device.h>
@@ -21,161 +12,39 @@
21#include <linux/mm.h> 12#include <linux/mm.h>
22#include <linux/mman.h> 13#include <linux/mman.h>
23#include "dax-private.h" 14#include "dax-private.h"
24#include "dax.h" 15#include "bus.h"
25 16
26static struct class *dax_class; 17static struct dev_dax *ref_to_dev_dax(struct percpu_ref *ref)
27
28/*
29 * Rely on the fact that drvdata is set before the attributes are
30 * registered, and that the attributes are unregistered before drvdata
31 * is cleared to assume that drvdata is always valid.
32 */
33static ssize_t id_show(struct device *dev,
34 struct device_attribute *attr, char *buf)
35{
36 struct dax_region *dax_region = dev_get_drvdata(dev);
37
38 return sprintf(buf, "%d\n", dax_region->id);
39}
40static DEVICE_ATTR_RO(id);
41
42static ssize_t region_size_show(struct device *dev,
43 struct device_attribute *attr, char *buf)
44{
45 struct dax_region *dax_region = dev_get_drvdata(dev);
46
47 return sprintf(buf, "%llu\n", (unsigned long long)
48 resource_size(&dax_region->res));
49}
50static struct device_attribute dev_attr_region_size = __ATTR(size, 0444,
51 region_size_show, NULL);
52
53static ssize_t align_show(struct device *dev,
54 struct device_attribute *attr, char *buf)
55{
56 struct dax_region *dax_region = dev_get_drvdata(dev);
57
58 return sprintf(buf, "%u\n", dax_region->align);
59}
60static DEVICE_ATTR_RO(align);
61
62static struct attribute *dax_region_attributes[] = {
63 &dev_attr_region_size.attr,
64 &dev_attr_align.attr,
65 &dev_attr_id.attr,
66 NULL,
67};
68
69static const struct attribute_group dax_region_attribute_group = {
70 .name = "dax_region",
71 .attrs = dax_region_attributes,
72};
73
74static const struct attribute_group *dax_region_attribute_groups[] = {
75 &dax_region_attribute_group,
76 NULL,
77};
78
79static void dax_region_free(struct kref *kref)
80{
81 struct dax_region *dax_region;
82
83 dax_region = container_of(kref, struct dax_region, kref);
84 kfree(dax_region);
85}
86
87void dax_region_put(struct dax_region *dax_region)
88{ 18{
89 kref_put(&dax_region->kref, dax_region_free); 19 return container_of(ref, struct dev_dax, ref);
90} 20}
91EXPORT_SYMBOL_GPL(dax_region_put);
92 21
93static void dax_region_unregister(void *region) 22static void dev_dax_percpu_release(struct percpu_ref *ref)
94{ 23{
95 struct dax_region *dax_region = region; 24 struct dev_dax *dev_dax = ref_to_dev_dax(ref);
96 25
97 sysfs_remove_groups(&dax_region->dev->kobj, 26 dev_dbg(&dev_dax->dev, "%s\n", __func__);
98 dax_region_attribute_groups); 27 complete(&dev_dax->cmp);
99 dax_region_put(dax_region);
100} 28}
101 29
102struct dax_region *alloc_dax_region(struct device *parent, int region_id, 30static void dev_dax_percpu_exit(void *data)
103 struct resource *res, unsigned int align, void *addr,
104 unsigned long pfn_flags)
105{ 31{
106 struct dax_region *dax_region; 32 struct percpu_ref *ref = data;
107 33 struct dev_dax *dev_dax = ref_to_dev_dax(ref);
108 /*
109 * The DAX core assumes that it can store its private data in
110 * parent->driver_data. This WARN is a reminder / safeguard for
111 * developers of device-dax drivers.
112 */
113 if (dev_get_drvdata(parent)) {
114 dev_WARN(parent, "dax core failed to setup private data\n");
115 return NULL;
116 }
117
118 if (!IS_ALIGNED(res->start, align)
119 || !IS_ALIGNED(resource_size(res), align))
120 return NULL;
121
122 dax_region = kzalloc(sizeof(*dax_region), GFP_KERNEL);
123 if (!dax_region)
124 return NULL;
125
126 dev_set_drvdata(parent, dax_region);
127 memcpy(&dax_region->res, res, sizeof(*res));
128 dax_region->pfn_flags = pfn_flags;
129 kref_init(&dax_region->kref);
130 dax_region->id = region_id;
131 ida_init(&dax_region->ida);
132 dax_region->align = align;
133 dax_region->dev = parent;
134 dax_region->base = addr;
135 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) {
136 kfree(dax_region);
137 return NULL;
138 }
139 34
140 kref_get(&dax_region->kref); 35 dev_dbg(&dev_dax->dev, "%s\n", __func__);
141 if (devm_add_action_or_reset(parent, dax_region_unregister, dax_region)) 36 wait_for_completion(&dev_dax->cmp);
142 return NULL; 37 percpu_ref_exit(ref);
143 return dax_region;
144} 38}
145EXPORT_SYMBOL_GPL(alloc_dax_region);
146 39
147static struct dev_dax *to_dev_dax(struct device *dev) 40static void dev_dax_percpu_kill(struct percpu_ref *data)
148{ 41{
149 return container_of(dev, struct dev_dax, dev); 42 struct percpu_ref *ref = data;
150} 43 struct dev_dax *dev_dax = ref_to_dev_dax(ref);
151
152static ssize_t size_show(struct device *dev,
153 struct device_attribute *attr, char *buf)
154{
155 struct dev_dax *dev_dax = to_dev_dax(dev);
156 unsigned long long size = 0;
157 int i;
158 44
159 for (i = 0; i < dev_dax->num_resources; i++) 45 dev_dbg(&dev_dax->dev, "%s\n", __func__);
160 size += resource_size(&dev_dax->res[i]); 46 percpu_ref_kill(ref);
161
162 return sprintf(buf, "%llu\n", size);
163} 47}
164static DEVICE_ATTR_RO(size);
165
166static struct attribute *dev_dax_attributes[] = {
167 &dev_attr_size.attr,
168 NULL,
169};
170
171static const struct attribute_group dev_dax_attribute_group = {
172 .attrs = dev_dax_attributes,
173};
174
175static const struct attribute_group *dax_attribute_groups[] = {
176 &dev_dax_attribute_group,
177 NULL,
178};
179 48
180static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma, 49static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
181 const char *func) 50 const char *func)
@@ -226,21 +95,11 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
226__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff, 95__weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
227 unsigned long size) 96 unsigned long size)
228{ 97{
229 struct resource *res; 98 struct resource *res = &dev_dax->region->res;
230 /* gcc-4.6.3-nolibc for i386 complains that this is uninitialized */ 99 phys_addr_t phys;
231 phys_addr_t uninitialized_var(phys);
232 int i;
233
234 for (i = 0; i < dev_dax->num_resources; i++) {
235 res = &dev_dax->res[i];
236 phys = pgoff * PAGE_SIZE + res->start;
237 if (phys >= res->start && phys <= res->end)
238 break;
239 pgoff -= PHYS_PFN(resource_size(res));
240 }
241 100
242 if (i < dev_dax->num_resources) { 101 phys = pgoff * PAGE_SIZE + res->start;
243 res = &dev_dax->res[i]; 102 if (phys >= res->start && phys <= res->end) {
244 if (phys + size - 1 <= res->end) 103 if (phys + size - 1 <= res->end)
245 return phys; 104 return phys;
246 } 105 }
@@ -576,152 +435,100 @@ static const struct file_operations dax_fops = {
576 .mmap_supported_flags = MAP_SYNC, 435 .mmap_supported_flags = MAP_SYNC,
577}; 436};
578 437
579static void dev_dax_release(struct device *dev) 438static void dev_dax_cdev_del(void *cdev)
580{ 439{
581 struct dev_dax *dev_dax = to_dev_dax(dev); 440 cdev_del(cdev);
582 struct dax_region *dax_region = dev_dax->region;
583 struct dax_device *dax_dev = dev_dax->dax_dev;
584
585 if (dev_dax->id >= 0)
586 ida_simple_remove(&dax_region->ida, dev_dax->id);
587 dax_region_put(dax_region);
588 put_dax(dax_dev);
589 kfree(dev_dax);
590} 441}
591 442
592static void kill_dev_dax(struct dev_dax *dev_dax) 443static void dev_dax_kill(void *dev_dax)
593{ 444{
594 struct dax_device *dax_dev = dev_dax->dax_dev; 445 kill_dev_dax(dev_dax);
595 struct inode *inode = dax_inode(dax_dev);
596
597 kill_dax(dax_dev);
598 unmap_mapping_range(inode->i_mapping, 0, 0, 1);
599} 446}
600 447
601static void unregister_dev_dax(void *dev) 448int dev_dax_probe(struct device *dev)
602{ 449{
603 struct dev_dax *dev_dax = to_dev_dax(dev); 450 struct dev_dax *dev_dax = to_dev_dax(dev);
604 struct dax_device *dax_dev = dev_dax->dax_dev; 451 struct dax_device *dax_dev = dev_dax->dax_dev;
605 struct inode *inode = dax_inode(dax_dev); 452 struct resource *res = &dev_dax->region->res;
606 struct cdev *cdev = inode->i_cdev;
607
608 dev_dbg(dev, "trace\n");
609
610 kill_dev_dax(dev_dax);
611 cdev_device_del(cdev, dev);
612 put_device(dev);
613}
614
615struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region,
616 int id, struct resource *res, int count)
617{
618 struct device *parent = dax_region->dev;
619 struct dax_device *dax_dev;
620 struct dev_dax *dev_dax;
621 struct inode *inode; 453 struct inode *inode;
622 struct device *dev;
623 struct cdev *cdev; 454 struct cdev *cdev;
624 int rc, i; 455 void *addr;
625 456 int rc;
626 if (!count) 457
627 return ERR_PTR(-EINVAL); 458 /* 1:1 map region resource range to device-dax instance range */
628 459 if (!devm_request_mem_region(dev, res->start, resource_size(res),
629 dev_dax = kzalloc(struct_size(dev_dax, res, count), GFP_KERNEL); 460 dev_name(dev))) {
630 if (!dev_dax) 461 dev_warn(dev, "could not reserve region %pR\n", res);
631 return ERR_PTR(-ENOMEM); 462 return -EBUSY;
632
633 for (i = 0; i < count; i++) {
634 if (!IS_ALIGNED(res[i].start, dax_region->align)
635 || !IS_ALIGNED(resource_size(&res[i]),
636 dax_region->align)) {
637 rc = -EINVAL;
638 break;
639 }
640 dev_dax->res[i].start = res[i].start;
641 dev_dax->res[i].end = res[i].end;
642 } 463 }
643 464
644 if (i < count) 465 init_completion(&dev_dax->cmp);
645 goto err_id; 466 rc = percpu_ref_init(&dev_dax->ref, dev_dax_percpu_release, 0,
467 GFP_KERNEL);
468 if (rc)
469 return rc;
646 470
647 if (id < 0) { 471 rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref);
648 id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); 472 if (rc)
649 dev_dax->id = id; 473 return rc;
650 if (id < 0) {
651 rc = id;
652 goto err_id;
653 }
654 } else {
655 /* region provider owns @id lifetime */
656 dev_dax->id = -1;
657 }
658 474
659 /* 475 dev_dax->pgmap.ref = &dev_dax->ref;
660 * No 'host' or dax_operations since there is no access to this 476 dev_dax->pgmap.kill = dev_dax_percpu_kill;
661 * device outside of mmap of the resulting character device. 477 addr = devm_memremap_pages(dev, &dev_dax->pgmap);
662 */ 478 if (IS_ERR(addr)) {
663 dax_dev = alloc_dax(dev_dax, NULL, NULL); 479 devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref);
664 if (!dax_dev) { 480 percpu_ref_exit(&dev_dax->ref);
665 rc = -ENOMEM; 481 return PTR_ERR(addr);
666 goto err_dax;
667 } 482 }
668 483
669 /* from here on we're committed to teardown via dax_dev_release() */
670 dev = &dev_dax->dev;
671 device_initialize(dev);
672
673 inode = dax_inode(dax_dev); 484 inode = dax_inode(dax_dev);
674 cdev = inode->i_cdev; 485 cdev = inode->i_cdev;
675 cdev_init(cdev, &dax_fops); 486 cdev_init(cdev, &dax_fops);
676 cdev->owner = parent->driver->owner; 487 if (dev->class) {
677 488 /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
678 dev_dax->num_resources = count; 489 cdev->owner = dev->parent->driver->owner;
679 dev_dax->dax_dev = dax_dev; 490 } else
680 dev_dax->region = dax_region; 491 cdev->owner = dev->driver->owner;
681 kref_get(&dax_region->kref); 492 cdev_set_parent(cdev, &dev->kobj);
682 493 rc = cdev_add(cdev, dev->devt, 1);
683 dev->devt = inode->i_rdev;
684 dev->class = dax_class;
685 dev->parent = parent;
686 dev->groups = dax_attribute_groups;
687 dev->release = dev_dax_release;
688 dev_set_name(dev, "dax%d.%d", dax_region->id, id);
689
690 rc = cdev_device_add(cdev, dev);
691 if (rc) {
692 kill_dev_dax(dev_dax);
693 put_device(dev);
694 return ERR_PTR(rc);
695 }
696
697 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev);
698 if (rc) 494 if (rc)
699 return ERR_PTR(rc); 495 return rc;
700 496
701 return dev_dax; 497 rc = devm_add_action_or_reset(dev, dev_dax_cdev_del, cdev);
498 if (rc)
499 return rc;
702 500
703 err_dax: 501 run_dax(dax_dev);
704 if (dev_dax->id >= 0) 502 return devm_add_action_or_reset(dev, dev_dax_kill, dev_dax);
705 ida_simple_remove(&dax_region->ida, dev_dax->id); 503}
706 err_id: 504EXPORT_SYMBOL_GPL(dev_dax_probe);
707 kfree(dev_dax);
708 505
709 return ERR_PTR(rc); 506static int dev_dax_remove(struct device *dev)
507{
508 /* all probe actions are unwound by devm */
509 return 0;
710} 510}
711EXPORT_SYMBOL_GPL(devm_create_dev_dax); 511
512static struct dax_device_driver device_dax_driver = {
513 .drv = {
514 .probe = dev_dax_probe,
515 .remove = dev_dax_remove,
516 },
517 .match_always = 1,
518};
712 519
713static int __init dax_init(void) 520static int __init dax_init(void)
714{ 521{
715 dax_class = class_create(THIS_MODULE, "dax"); 522 return dax_driver_register(&device_dax_driver);
716 return PTR_ERR_OR_ZERO(dax_class);
717} 523}
718 524
719static void __exit dax_exit(void) 525static void __exit dax_exit(void)
720{ 526{
721 class_destroy(dax_class); 527 dax_driver_unregister(&device_dax_driver);
722} 528}
723 529
724MODULE_AUTHOR("Intel Corporation"); 530MODULE_AUTHOR("Intel Corporation");
725MODULE_LICENSE("GPL v2"); 531MODULE_LICENSE("GPL v2");
726subsys_initcall(dax_init); 532module_init(dax_init);
727module_exit(dax_exit); 533module_exit(dax_exit);
534MODULE_ALIAS_DAX_DEVICE(0);
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
new file mode 100644
index 000000000000..a02318c6d28a
--- /dev/null
+++ b/drivers/dax/kmem.c
@@ -0,0 +1,108 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2016-2019 Intel Corporation. All rights reserved. */
3#include <linux/memremap.h>
4#include <linux/pagemap.h>
5#include <linux/memory.h>
6#include <linux/module.h>
7#include <linux/device.h>
8#include <linux/pfn_t.h>
9#include <linux/slab.h>
10#include <linux/dax.h>
11#include <linux/fs.h>
12#include <linux/mm.h>
13#include <linux/mman.h>
14#include "dax-private.h"
15#include "bus.h"
16
17int dev_dax_kmem_probe(struct device *dev)
18{
19 struct dev_dax *dev_dax = to_dev_dax(dev);
20 struct resource *res = &dev_dax->region->res;
21 resource_size_t kmem_start;
22 resource_size_t kmem_size;
23 resource_size_t kmem_end;
24 struct resource *new_res;
25 int numa_node;
26 int rc;
27
28 /*
29 * Ensure good NUMA information for the persistent memory.
30 * Without this check, there is a risk that slow memory
31 * could be mixed in a node with faster memory, causing
32 * unavoidable performance issues.
33 */
34 numa_node = dev_dax->target_node;
35 if (numa_node < 0) {
36 dev_warn(dev, "rejecting DAX region %pR with invalid node: %d\n",
37 res, numa_node);
38 return -EINVAL;
39 }
40
41 /* Hotplug starting at the beginning of the next block: */
42 kmem_start = ALIGN(res->start, memory_block_size_bytes());
43
44 kmem_size = resource_size(res);
45 /* Adjust the size down to compensate for moving up kmem_start: */
46 kmem_size -= kmem_start - res->start;
47 /* Align the size down to cover only complete blocks: */
48 kmem_size &= ~(memory_block_size_bytes() - 1);
49 kmem_end = kmem_start + kmem_size;
50
51 /* Region is permanently reserved. Hot-remove not yet implemented. */
52 new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev));
53 if (!new_res) {
54 dev_warn(dev, "could not reserve region [%pa-%pa]\n",
55 &kmem_start, &kmem_end);
56 return -EBUSY;
57 }
58
59 /*
60 * Set flags appropriate for System RAM. Leave ..._BUSY clear
61 * so that add_memory() can add a child resource. Do not
62 * inherit flags from the parent since it may set new flags
63 * unknown to us that will break add_memory() below.
64 */
65 new_res->flags = IORESOURCE_SYSTEM_RAM;
66 new_res->name = dev_name(dev);
67
68 rc = add_memory(numa_node, new_res->start, resource_size(new_res));
69 if (rc)
70 return rc;
71
72 return 0;
73}
74
75static int dev_dax_kmem_remove(struct device *dev)
76{
77 /*
78 * Purposely leak the request_mem_region() for the device-dax
79 * range and return '0' to ->remove() attempts. The removal of
80 * the device from the driver always succeeds, but the region
81 * is permanently pinned as reserved by the unreleased
82 * request_mem_region().
83 */
84 return 0;
85}
86
87static struct dax_device_driver device_dax_kmem_driver = {
88 .drv = {
89 .probe = dev_dax_kmem_probe,
90 .remove = dev_dax_kmem_remove,
91 },
92};
93
94static int __init dax_kmem_init(void)
95{
96 return dax_driver_register(&device_dax_kmem_driver);
97}
98
99static void __exit dax_kmem_exit(void)
100{
101 dax_driver_unregister(&device_dax_kmem_driver);
102}
103
104MODULE_AUTHOR("Intel Corporation");
105MODULE_LICENSE("GPL v2");
106module_init(dax_kmem_init);
107module_exit(dax_kmem_exit);
108MODULE_ALIAS_DAX_DEVICE(0);
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
deleted file mode 100644
index 2c1f459c0c63..000000000000
--- a/drivers/dax/pmem.c
+++ /dev/null
@@ -1,153 +0,0 @@
1/*
2 * Copyright(c) 2016 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13#include <linux/percpu-refcount.h>
14#include <linux/memremap.h>
15#include <linux/module.h>
16#include <linux/pfn_t.h>
17#include "../nvdimm/pfn.h"
18#include "../nvdimm/nd.h"
19#include "device-dax.h"
20
21struct dax_pmem {
22 struct device *dev;
23 struct percpu_ref ref;
24 struct dev_pagemap pgmap;
25 struct completion cmp;
26};
27
28static struct dax_pmem *to_dax_pmem(struct percpu_ref *ref)
29{
30 return container_of(ref, struct dax_pmem, ref);
31}
32
33static void dax_pmem_percpu_release(struct percpu_ref *ref)
34{
35 struct dax_pmem *dax_pmem = to_dax_pmem(ref);
36
37 dev_dbg(dax_pmem->dev, "trace\n");
38 complete(&dax_pmem->cmp);
39}
40
41static void dax_pmem_percpu_exit(void *data)
42{
43 struct percpu_ref *ref = data;
44 struct dax_pmem *dax_pmem = to_dax_pmem(ref);
45
46 dev_dbg(dax_pmem->dev, "trace\n");
47 wait_for_completion(&dax_pmem->cmp);
48 percpu_ref_exit(ref);
49}
50
51static void dax_pmem_percpu_kill(struct percpu_ref *ref)
52{
53 struct dax_pmem *dax_pmem = to_dax_pmem(ref);
54
55 dev_dbg(dax_pmem->dev, "trace\n");
56 percpu_ref_kill(ref);
57}
58
59static int dax_pmem_probe(struct device *dev)
60{
61 void *addr;
62 struct resource res;
63 int rc, id, region_id;
64 struct nd_pfn_sb *pfn_sb;
65 struct dev_dax *dev_dax;
66 struct dax_pmem *dax_pmem;
67 struct nd_namespace_io *nsio;
68 struct dax_region *dax_region;
69 struct nd_namespace_common *ndns;
70 struct nd_dax *nd_dax = to_nd_dax(dev);
71 struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
72
73 ndns = nvdimm_namespace_common_probe(dev);
74 if (IS_ERR(ndns))
75 return PTR_ERR(ndns);
76 nsio = to_nd_namespace_io(&ndns->dev);
77
78 dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
79 if (!dax_pmem)
80 return -ENOMEM;
81
82 /* parse the 'pfn' info block via ->rw_bytes */
83 rc = devm_nsio_enable(dev, nsio);
84 if (rc)
85 return rc;
86 rc = nvdimm_setup_pfn(nd_pfn, &dax_pmem->pgmap);
87 if (rc)
88 return rc;
89 devm_nsio_disable(dev, nsio);
90
91 pfn_sb = nd_pfn->pfn_sb;
92
93 if (!devm_request_mem_region(dev, nsio->res.start,
94 resource_size(&nsio->res),
95 dev_name(&ndns->dev))) {
96 dev_warn(dev, "could not reserve region %pR\n", &nsio->res);
97 return -EBUSY;
98 }
99
100 dax_pmem->dev = dev;
101 init_completion(&dax_pmem->cmp);
102 rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0,
103 GFP_KERNEL);
104 if (rc)
105 return rc;
106
107 rc = devm_add_action(dev, dax_pmem_percpu_exit, &dax_pmem->ref);
108 if (rc) {
109 percpu_ref_exit(&dax_pmem->ref);
110 return rc;
111 }
112
113 dax_pmem->pgmap.ref = &dax_pmem->ref;
114 dax_pmem->pgmap.kill = dax_pmem_percpu_kill;
115 addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
116 if (IS_ERR(addr))
117 return PTR_ERR(addr);
118
119 /* adjust the dax_region resource to the start of data */
120 memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
121 res.start += le64_to_cpu(pfn_sb->dataoff);
122
123 rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
124 if (rc != 2)
125 return -EINVAL;
126
127 dax_region = alloc_dax_region(dev, region_id, &res,
128 le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
129 if (!dax_region)
130 return -ENOMEM;
131
132 /* TODO: support for subdividing a dax region... */
133 dev_dax = devm_create_dev_dax(dax_region, id, &res, 1);
134
135 /* child dev_dax instances now own the lifetime of the dax_region */
136 dax_region_put(dax_region);
137
138 return PTR_ERR_OR_ZERO(dev_dax);
139}
140
141static struct nd_device_driver dax_pmem_driver = {
142 .probe = dax_pmem_probe,
143 .drv = {
144 .name = "dax_pmem",
145 },
146 .type = ND_DRIVER_DAX_PMEM,
147};
148
149module_nd_driver(dax_pmem_driver);
150
151MODULE_LICENSE("GPL v2");
152MODULE_AUTHOR("Intel Corporation");
153MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile
new file mode 100644
index 000000000000..e2e79bd3fdcf
--- /dev/null
+++ b/drivers/dax/pmem/Makefile
@@ -0,0 +1,7 @@
1obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
2obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o
3obj-$(CONFIG_DEV_DAX_PMEM_COMPAT) += dax_pmem_compat.o
4
5dax_pmem-y := pmem.o
6dax_pmem_core-y := core.o
7dax_pmem_compat-y := compat.o
diff --git a/drivers/dax/pmem/compat.c b/drivers/dax/pmem/compat.c
new file mode 100644
index 000000000000..d7b15e6f30c5
--- /dev/null
+++ b/drivers/dax/pmem/compat.c
@@ -0,0 +1,73 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
3#include <linux/percpu-refcount.h>
4#include <linux/memremap.h>
5#include <linux/module.h>
6#include <linux/pfn_t.h>
7#include <linux/nd.h>
8#include "../bus.h"
9
10/* we need the private definitions to implement compat suport */
11#include "../dax-private.h"
12
13static int dax_pmem_compat_probe(struct device *dev)
14{
15 struct dev_dax *dev_dax = __dax_pmem_probe(dev, DEV_DAX_CLASS);
16 int rc;
17
18 if (IS_ERR(dev_dax))
19 return PTR_ERR(dev_dax);
20
21 if (!devres_open_group(&dev_dax->dev, dev_dax, GFP_KERNEL))
22 return -ENOMEM;
23
24 device_lock(&dev_dax->dev);
25 rc = dev_dax_probe(&dev_dax->dev);
26 device_unlock(&dev_dax->dev);
27
28 devres_close_group(&dev_dax->dev, dev_dax);
29 if (rc)
30 devres_release_group(&dev_dax->dev, dev_dax);
31
32 return rc;
33}
34
35static int dax_pmem_compat_release(struct device *dev, void *data)
36{
37 device_lock(dev);
38 devres_release_group(dev, to_dev_dax(dev));
39 device_unlock(dev);
40
41 return 0;
42}
43
44static int dax_pmem_compat_remove(struct device *dev)
45{
46 device_for_each_child(dev, NULL, dax_pmem_compat_release);
47 return 0;
48}
49
50static struct nd_device_driver dax_pmem_compat_driver = {
51 .probe = dax_pmem_compat_probe,
52 .remove = dax_pmem_compat_remove,
53 .drv = {
54 .name = "dax_pmem_compat",
55 },
56 .type = ND_DRIVER_DAX_PMEM,
57};
58
59static int __init dax_pmem_compat_init(void)
60{
61 return nd_driver_register(&dax_pmem_compat_driver);
62}
63module_init(dax_pmem_compat_init);
64
65static void __exit dax_pmem_compat_exit(void)
66{
67 driver_unregister(&dax_pmem_compat_driver.drv);
68}
69module_exit(dax_pmem_compat_exit);
70
71MODULE_LICENSE("GPL v2");
72MODULE_AUTHOR("Intel Corporation");
73MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
new file mode 100644
index 000000000000..f71019ce0647
--- /dev/null
+++ b/drivers/dax/pmem/core.c
@@ -0,0 +1,71 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
3#include <linux/memremap.h>
4#include <linux/module.h>
5#include <linux/pfn_t.h>
6#include "../../nvdimm/pfn.h"
7#include "../../nvdimm/nd.h"
8#include "../bus.h"
9
10struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
11{
12 struct resource res;
13 int rc, id, region_id;
14 resource_size_t offset;
15 struct nd_pfn_sb *pfn_sb;
16 struct dev_dax *dev_dax;
17 struct nd_namespace_io *nsio;
18 struct dax_region *dax_region;
19 struct dev_pagemap pgmap = { 0 };
20 struct nd_namespace_common *ndns;
21 struct nd_dax *nd_dax = to_nd_dax(dev);
22 struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
23 struct nd_region *nd_region = to_nd_region(dev->parent);
24
25 ndns = nvdimm_namespace_common_probe(dev);
26 if (IS_ERR(ndns))
27 return ERR_CAST(ndns);
28 nsio = to_nd_namespace_io(&ndns->dev);
29
30 /* parse the 'pfn' info block via ->rw_bytes */
31 rc = devm_nsio_enable(dev, nsio);
32 if (rc)
33 return ERR_PTR(rc);
34 rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
35 if (rc)
36 return ERR_PTR(rc);
37 devm_nsio_disable(dev, nsio);
38
39 /* reserve the metadata area, device-dax will reserve the data */
40 pfn_sb = nd_pfn->pfn_sb;
41 offset = le64_to_cpu(pfn_sb->dataoff);
42 if (!devm_request_mem_region(dev, nsio->res.start, offset,
43 dev_name(&ndns->dev))) {
44 dev_warn(dev, "could not reserve metadata\n");
45 return ERR_PTR(-EBUSY);
46 }
47
48 rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
49 if (rc != 2)
50 return ERR_PTR(-EINVAL);
51
52 /* adjust the dax_region resource to the start of data */
53 memcpy(&res, &pgmap.res, sizeof(res));
54 res.start += offset;
55 dax_region = alloc_dax_region(dev, region_id, &res,
56 nd_region->target_node, le32_to_cpu(pfn_sb->align),
57 PFN_DEV|PFN_MAP);
58 if (!dax_region)
59 return ERR_PTR(-ENOMEM);
60
61 dev_dax = __devm_create_dev_dax(dax_region, id, &pgmap, subsys);
62
63 /* child dev_dax instances now own the lifetime of the dax_region */
64 dax_region_put(dax_region);
65
66 return dev_dax;
67}
68EXPORT_SYMBOL_GPL(__dax_pmem_probe);
69
70MODULE_LICENSE("GPL v2");
71MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c
new file mode 100644
index 000000000000..0ae4238a0ef8
--- /dev/null
+++ b/drivers/dax/pmem/pmem.c
@@ -0,0 +1,40 @@
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
3#include <linux/percpu-refcount.h>
4#include <linux/memremap.h>
5#include <linux/module.h>
6#include <linux/pfn_t.h>
7#include <linux/nd.h>
8#include "../bus.h"
9
10static int dax_pmem_probe(struct device *dev)
11{
12 return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev, DEV_DAX_BUS));
13}
14
15static struct nd_device_driver dax_pmem_driver = {
16 .probe = dax_pmem_probe,
17 .drv = {
18 .name = "dax_pmem",
19 },
20 .type = ND_DRIVER_DAX_PMEM,
21};
22
23static int __init dax_pmem_init(void)
24{
25 return nd_driver_register(&dax_pmem_driver);
26}
27module_init(dax_pmem_init);
28
29static void __exit dax_pmem_exit(void)
30{
31 driver_unregister(&dax_pmem_driver.drv);
32}
33module_exit(dax_pmem_exit);
34
35MODULE_LICENSE("GPL v2");
36MODULE_AUTHOR("Intel Corporation");
37#if !IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)
38/* For compat builds, don't load this module by default */
39MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
40#endif
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 0cb8c30ea278..0a339b85133e 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -22,6 +22,7 @@
22#include <linux/uio.h> 22#include <linux/uio.h>
23#include <linux/dax.h> 23#include <linux/dax.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include "dax-private.h"
25 26
26static dev_t dax_devt; 27static dev_t dax_devt;
27DEFINE_STATIC_SRCU(dax_srcu); 28DEFINE_STATIC_SRCU(dax_srcu);
@@ -383,11 +384,15 @@ void kill_dax(struct dax_device *dax_dev)
383 spin_lock(&dax_host_lock); 384 spin_lock(&dax_host_lock);
384 hlist_del_init(&dax_dev->list); 385 hlist_del_init(&dax_dev->list);
385 spin_unlock(&dax_host_lock); 386 spin_unlock(&dax_host_lock);
386
387 dax_dev->private = NULL;
388} 387}
389EXPORT_SYMBOL_GPL(kill_dax); 388EXPORT_SYMBOL_GPL(kill_dax);
390 389
390void run_dax(struct dax_device *dax_dev)
391{
392 set_bit(DAXDEV_ALIVE, &dax_dev->flags);
393}
394EXPORT_SYMBOL_GPL(run_dax);
395
391static struct inode *dax_alloc_inode(struct super_block *sb) 396static struct inode *dax_alloc_inode(struct super_block *sb)
392{ 397{
393 struct dax_device *dax_dev; 398 struct dax_device *dax_dev;
@@ -602,6 +607,8 @@ EXPORT_SYMBOL_GPL(dax_inode);
602 607
603void *dax_get_private(struct dax_device *dax_dev) 608void *dax_get_private(struct dax_device *dax_dev)
604{ 609{
610 if (!test_bit(DAXDEV_ALIVE, &dax_dev->flags))
611 return NULL;
605 return dax_dev->private; 612 return dax_dev->private;
606} 613}
607EXPORT_SYMBOL_GPL(dax_get_private); 614EXPORT_SYMBOL_GPL(dax_get_private);
@@ -615,7 +622,7 @@ static void init_once(void *_dax_dev)
615 inode_init_once(inode); 622 inode_init_once(inode);
616} 623}
617 624
618static int __dax_fs_init(void) 625static int dax_fs_init(void)
619{ 626{
620 int rc; 627 int rc;
621 628
@@ -647,35 +654,45 @@ static int __dax_fs_init(void)
647 return rc; 654 return rc;
648} 655}
649 656
650static void __dax_fs_exit(void) 657static void dax_fs_exit(void)
651{ 658{
652 kern_unmount(dax_mnt); 659 kern_unmount(dax_mnt);
653 unregister_filesystem(&dax_fs_type); 660 unregister_filesystem(&dax_fs_type);
654 kmem_cache_destroy(dax_cache); 661 kmem_cache_destroy(dax_cache);
655} 662}
656 663
657static int __init dax_fs_init(void) 664static int __init dax_core_init(void)
658{ 665{
659 int rc; 666 int rc;
660 667
661 rc = __dax_fs_init(); 668 rc = dax_fs_init();
662 if (rc) 669 if (rc)
663 return rc; 670 return rc;
664 671
665 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax"); 672 rc = alloc_chrdev_region(&dax_devt, 0, MINORMASK+1, "dax");
666 if (rc) 673 if (rc)
667 __dax_fs_exit(); 674 goto err_chrdev;
668 return rc; 675
676 rc = dax_bus_init();
677 if (rc)
678 goto err_bus;
679 return 0;
680
681err_bus:
682 unregister_chrdev_region(dax_devt, MINORMASK+1);
683err_chrdev:
684 dax_fs_exit();
685 return 0;
669} 686}
670 687
671static void __exit dax_fs_exit(void) 688static void __exit dax_core_exit(void)
672{ 689{
673 unregister_chrdev_region(dax_devt, MINORMASK+1); 690 unregister_chrdev_region(dax_devt, MINORMASK+1);
674 ida_destroy(&dax_minor_ida); 691 ida_destroy(&dax_minor_ida);
675 __dax_fs_exit(); 692 dax_fs_exit();
676} 693}
677 694
678MODULE_AUTHOR("Intel Corporation"); 695MODULE_AUTHOR("Intel Corporation");
679MODULE_LICENSE("GPL v2"); 696MODULE_LICENSE("GPL v2");
680subsys_initcall(dax_fs_init); 697subsys_initcall(dax_core_init);
681module_exit(dax_fs_exit); 698module_exit(dax_core_exit);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 4e0eede599a8..ac0301b69593 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
1578 1578
1579 dmadev->nr_channels = nr_channels; 1579 dmadev->nr_channels = nr_channels;
1580 dmadev->nr_requests = nr_requests; 1580 dmadev->nr_requests = nr_requests;
1581 ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks", 1581 device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
1582 dmadev->ahb_addr_masks, 1582 dmadev->ahb_addr_masks,
1583 count); 1583 count);
1584 if (ret)
1585 return ret;
1586 dmadev->nr_ahb_addr_masks = count; 1584 dmadev->nr_ahb_addr_masks = count;
1587 1585
1588 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1586 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
index 91b90c0cea73..12acdac85820 100644
--- a/drivers/gpio/gpio-adnp.c
+++ b/drivers/gpio/gpio-adnp.c
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
132 if (err < 0) 132 if (err < 0)
133 goto out; 133 goto out;
134 134
135 if (err & BIT(pos)) 135 if (value & BIT(pos)) {
136 err = -EACCES; 136 err = -EPERM;
137 goto out;
138 }
137 139
138 err = 0; 140 err = 0;
139 141
diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
index 854bce4fb9e7..217507002dbc 100644
--- a/drivers/gpio/gpio-aspeed.c
+++ b/drivers/gpio/gpio-aspeed.c
@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
1224 1224
1225 gpio->offset_timer = 1225 gpio->offset_timer =
1226 devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL); 1226 devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
1227 if (!gpio->offset_timer)
1228 return -ENOMEM;
1227 1229
1228 return aspeed_gpio_setup_irqs(gpio, pdev); 1230 return aspeed_gpio_setup_irqs(gpio, pdev);
1229} 1231}
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index 0ecd2369c2ca..a09d2f9ebacc 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
148 mutex_init(&exar_gpio->lock); 148 mutex_init(&exar_gpio->lock);
149 149
150 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); 150 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
151 if (index < 0)
152 goto err_destroy;
151 153
152 sprintf(exar_gpio->name, "exar_gpio%d", index); 154 sprintf(exar_gpio->name, "exar_gpio%d", index);
153 exar_gpio->gpio_chip.label = exar_gpio->name; 155 exar_gpio->gpio_chip.label = exar_gpio->name;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 154d959e8993..b6a4efce7c92 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
204 struct gpio_mockup_chip *chip; 204 struct gpio_mockup_chip *chip;
205 struct seq_file *sfile; 205 struct seq_file *sfile;
206 struct gpio_chip *gc; 206 struct gpio_chip *gc;
207 int val, cnt;
207 char buf[3]; 208 char buf[3];
208 int val, rv;
209 209
210 if (*ppos != 0) 210 if (*ppos != 0)
211 return 0; 211 return 0;
@@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
216 gc = &chip->gc; 216 gc = &chip->gc;
217 217
218 val = gpio_mockup_get(gc, priv->offset); 218 val = gpio_mockup_get(gc, priv->offset);
219 snprintf(buf, sizeof(buf), "%d\n", val); 219 cnt = snprintf(buf, sizeof(buf), "%d\n", val);
220 220
221 rv = copy_to_user(usr_buf, buf, sizeof(buf)); 221 return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt);
222 if (rv)
223 return rv;
224
225 return sizeof(buf) - 1;
226} 222}
227 223
228static ssize_t gpio_mockup_debugfs_write(struct file *file, 224static ssize_t gpio_mockup_debugfs_write(struct file *file,
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 8b9c3ab70f6e..6a3ec575a404 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
120 * to determine if the flags should have inverted semantics. 120 * to determine if the flags should have inverted semantics.
121 */ 121 */
122 if (IS_ENABLED(CONFIG_SPI_MASTER) && 122 if (IS_ENABLED(CONFIG_SPI_MASTER) &&
123 of_property_read_bool(np, "cs-gpios")) { 123 of_property_read_bool(np, "cs-gpios") &&
124 !strcmp(propname, "cs-gpios")) {
124 struct device_node *child; 125 struct device_node *child;
125 u32 cs; 126 u32 cs;
126 int ret; 127 int ret;
@@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
142 * conflict and the "spi-cs-high" flag will 143 * conflict and the "spi-cs-high" flag will
143 * take precedence. 144 * take precedence.
144 */ 145 */
145 if (of_property_read_bool(np, "spi-cs-high")) { 146 if (of_property_read_bool(child, "spi-cs-high")) {
146 if (*flags & OF_GPIO_ACTIVE_LOW) { 147 if (*flags & OF_GPIO_ACTIVE_LOW) {
147 pr_warn("%s GPIO handle specifies active low - ignored\n", 148 pr_warn("%s GPIO handle specifies active low - ignored\n",
148 of_node_full_name(np)); 149 of_node_full_name(child));
149 *flags &= ~OF_GPIO_ACTIVE_LOW; 150 *flags &= ~OF_GPIO_ACTIVE_LOW;
150 } 151 }
151 } else { 152 } else {
152 if (!(*flags & OF_GPIO_ACTIVE_LOW)) 153 if (!(*flags & OF_GPIO_ACTIVE_LOW))
153 pr_info("%s enforce active low on chipselect handle\n", 154 pr_info("%s enforce active low on chipselect handle\n",
154 of_node_full_name(np)); 155 of_node_full_name(child));
155 *flags |= OF_GPIO_ACTIVE_LOW; 156 *flags |= OF_GPIO_ACTIVE_LOW;
156 } 157 }
157 break; 158 break;
@@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
717 718
718 of_node_get(chip->of_node); 719 of_node_get(chip->of_node);
719 720
720 return of_gpiochip_scan_gpios(chip); 721 status = of_gpiochip_scan_gpios(chip);
722 if (status) {
723 of_node_put(chip->of_node);
724 gpiochip_remove_pin_ranges(chip);
725 }
726
727 return status;
721} 728}
722 729
723void of_gpiochip_remove(struct gpio_chip *chip) 730void of_gpiochip_remove(struct gpio_chip *chip)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 144af0733581..0495bf1d480a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2776,7 +2776,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
2776 } 2776 }
2777 2777
2778 config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce); 2778 config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
2779 return gpio_set_config(chip, gpio_chip_hwgpio(desc), config); 2779 return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
2780} 2780}
2781EXPORT_SYMBOL_GPL(gpiod_set_debounce); 2781EXPORT_SYMBOL_GPL(gpiod_set_debounce);
2782 2782
@@ -2813,7 +2813,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
2813 packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE, 2813 packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
2814 !transitory); 2814 !transitory);
2815 gpio = gpio_chip_hwgpio(desc); 2815 gpio = gpio_chip_hwgpio(desc);
2816 rc = gpio_set_config(chip, gpio, packed); 2816 rc = chip->set_config(chip, gpio, packed);
2817 if (rc == -ENOTSUPP) { 2817 if (rc == -ENOTSUPP) {
2818 dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n", 2818 dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
2819 gpio); 2819 gpio);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4f8fb4ecde34..ac0d646a7b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3625,6 +3625,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
3625 struct pci_dev *pdev = adev->pdev; 3625 struct pci_dev *pdev = adev->pdev;
3626 enum pci_bus_speed cur_speed; 3626 enum pci_bus_speed cur_speed;
3627 enum pcie_link_width cur_width; 3627 enum pcie_link_width cur_width;
3628 u32 ret = 1;
3628 3629
3629 *speed = PCI_SPEED_UNKNOWN; 3630 *speed = PCI_SPEED_UNKNOWN;
3630 *width = PCIE_LNK_WIDTH_UNKNOWN; 3631 *width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3633,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
3632 while (pdev) { 3633 while (pdev) {
3633 cur_speed = pcie_get_speed_cap(pdev); 3634 cur_speed = pcie_get_speed_cap(pdev);
3634 cur_width = pcie_get_width_cap(pdev); 3635 cur_width = pcie_get_width_cap(pdev);
3636 ret = pcie_bandwidth_available(adev->pdev, NULL,
3637 NULL, &cur_width);
3638 if (!ret)
3639 cur_width = PCIE_LNK_WIDTH_RESRV;
3635 3640
3636 if (cur_speed != PCI_SPEED_UNKNOWN) { 3641 if (cur_speed != PCI_SPEED_UNKNOWN) {
3637 if (*speed == PCI_SPEED_UNKNOWN) 3642 if (*speed == PCI_SPEED_UNKNOWN)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ead851413c0a..16fcb56c232b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -700,6 +700,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
700 struct amdgpu_vm_bo_base *bo_base, *tmp; 700 struct amdgpu_vm_bo_base *bo_base, *tmp;
701 int r = 0; 701 int r = 0;
702 702
703 vm->bulk_moveable &= list_empty(&vm->evicted);
704
703 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { 705 list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
704 struct amdgpu_bo *bo = bo_base->bo; 706 struct amdgpu_bo *bo = bo_base->bo;
705 707
@@ -947,10 +949,6 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
947 if (r) 949 if (r)
948 return r; 950 return r;
949 951
950 r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
951 if (r)
952 goto error_free_pt;
953
954 if (vm->use_cpu_for_update) { 952 if (vm->use_cpu_for_update) {
955 r = amdgpu_bo_kmap(pt, NULL); 953 r = amdgpu_bo_kmap(pt, NULL);
956 if (r) 954 if (r)
@@ -963,6 +961,10 @@ int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
963 pt->parent = amdgpu_bo_ref(cursor.parent->base.bo); 961 pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
964 962
965 amdgpu_vm_bo_base_init(&entry->base, vm, pt); 963 amdgpu_vm_bo_base_init(&entry->base, vm, pt);
964
965 r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
966 if (r)
967 goto error_free_pt;
966 } 968 }
967 969
968 return 0; 970 return 0;
@@ -3033,13 +3035,14 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
3033 if (r) 3035 if (r)
3034 goto error_unreserve; 3036 goto error_unreserve;
3035 3037
3038 amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
3039
3036 r = amdgpu_vm_clear_bo(adev, vm, root, 3040 r = amdgpu_vm_clear_bo(adev, vm, root,
3037 adev->vm_manager.root_level, 3041 adev->vm_manager.root_level,
3038 vm->pte_support_ats); 3042 vm->pte_support_ats);
3039 if (r) 3043 if (r)
3040 goto error_unreserve; 3044 goto error_unreserve;
3041 3045
3042 amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
3043 amdgpu_bo_unreserve(vm->root.base.bo); 3046 amdgpu_bo_unreserve(vm->root.base.bo);
3044 3047
3045 if (pasid) { 3048 if (pasid) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 5533f6e4f4a4..a11db2b1a63f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -220,6 +220,7 @@ static const struct soc15_reg_golden golden_settings_gc_9_1_rv2[] =
220 220
221static const struct soc15_reg_golden golden_settings_gc_9_x_common[] = 221static const struct soc15_reg_golden golden_settings_gc_9_x_common[] =
222{ 222{
223 SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_SD_CNTL, 0xffffffff, 0x000001ff),
223 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000), 224 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
224 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382) 225 SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382)
225}; 226};
@@ -2404,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2404 /* disable CG */ 2405 /* disable CG */
2405 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); 2406 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2406 2407
2407 adev->gfx.rlc.funcs->reset(adev);
2408
2409 gfx_v9_0_init_pg(adev); 2408 gfx_v9_0_init_pg(adev);
2410 2409
2411 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2410 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 600259b4e291..2fe8397241ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -742,7 +742,7 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev)
742 } 742 }
743 743
744 ring->vm_inv_eng = inv_eng - 1; 744 ring->vm_inv_eng = inv_eng - 1;
745 change_bit(inv_eng - 1, (unsigned long *)(&vm_inv_engs[vmhub])); 745 vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng);
746 746
747 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 747 dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n",
748 ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 748 ring->name, ring->vm_inv_eng, ring->funcs->vmhub);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index c63de945c021..0487e3a4e9e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -500,9 +500,7 @@ static bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
500 struct amdgpu_device *adev = psp->adev; 500 struct amdgpu_device *adev = psp->adev;
501 uint32_t reg; 501 uint32_t reg;
502 502
503 reg = smnMP1_FIRMWARE_FLAGS | 0x03b00000; 503 reg = RREG32_PCIE(smnMP1_FIRMWARE_FLAGS | 0x03b00000);
504 WREG32_SOC15(NBIO, 0, mmPCIE_INDEX2, reg);
505 reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2);
506 return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false; 504 return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
507} 505}
508 506
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 99ebcf29dcb0..ed89a101f73f 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -461,7 +461,6 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
461 461
462 switch (adev->asic_type) { 462 switch (adev->asic_type) {
463 case CHIP_VEGA10: 463 case CHIP_VEGA10:
464 case CHIP_VEGA20:
465 soc15_asic_get_baco_capability(adev, &baco_reset); 464 soc15_asic_get_baco_capability(adev, &baco_reset);
466 break; 465 break;
467 default: 466 default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 47243165a082..ae90a99909ef 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -323,57 +323,7 @@ static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
323 struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr, 323 struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
324 struct queue_properties *q) 324 struct queue_properties *q)
325{ 325{
326 uint64_t addr; 326 return init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
327 struct cik_mqd *m;
328 int retval;
329
330 retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct cik_mqd),
331 mqd_mem_obj);
332
333 if (retval != 0)
334 return -ENOMEM;
335
336 m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
337 addr = (*mqd_mem_obj)->gpu_addr;
338
339 memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
340
341 m->header = 0xC0310800;
342 m->compute_pipelinestat_enable = 1;
343 m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
344 m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
345 m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
346 m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
347
348 m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
349 PRELOAD_REQ;
350 m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
351 QUANTUM_DURATION(10);
352
353 m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
354 m->cp_mqd_base_addr_lo = lower_32_bits(addr);
355 m->cp_mqd_base_addr_hi = upper_32_bits(addr);
356
357 m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
358
359 /*
360 * Pipe Priority
361 * Identifies the pipe relative priority when this queue is connected
362 * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
363 * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
364 * 0 = CS_LOW (typically below GFX)
365 * 1 = CS_MEDIUM (typically between HP3D and GFX
366 * 2 = CS_HIGH (typically above HP3D)
367 */
368 m->cp_hqd_pipe_priority = 1;
369 m->cp_hqd_queue_priority = 15;
370
371 *mqd = m;
372 if (gart_addr)
373 *gart_addr = addr;
374 retval = mm->update_mqd(mm, m, q);
375
376 return retval;
377} 327}
378 328
379static int update_mqd_hiq(struct mqd_manager *mm, void *mqd, 329static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2f26581b93ff..81127f7d6ed1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -886,6 +886,7 @@ static void emulated_link_detect(struct dc_link *link)
886 return; 886 return;
887 } 887 }
888 888
889 /* dc_sink_create returns a new reference */
889 link->local_sink = sink; 890 link->local_sink = sink;
890 891
891 edid_status = dm_helpers_read_local_edid( 892 edid_status = dm_helpers_read_local_edid(
@@ -952,6 +953,8 @@ static int dm_resume(void *handle)
952 if (aconnector->fake_enable && aconnector->dc_link->local_sink) 953 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
953 aconnector->fake_enable = false; 954 aconnector->fake_enable = false;
954 955
956 if (aconnector->dc_sink)
957 dc_sink_release(aconnector->dc_sink);
955 aconnector->dc_sink = NULL; 958 aconnector->dc_sink = NULL;
956 amdgpu_dm_update_connector_after_detect(aconnector); 959 amdgpu_dm_update_connector_after_detect(aconnector);
957 mutex_unlock(&aconnector->hpd_lock); 960 mutex_unlock(&aconnector->hpd_lock);
@@ -1061,6 +1064,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1061 1064
1062 1065
1063 sink = aconnector->dc_link->local_sink; 1066 sink = aconnector->dc_link->local_sink;
1067 if (sink)
1068 dc_sink_retain(sink);
1064 1069
1065 /* 1070 /*
1066 * Edid mgmt connector gets first update only in mode_valid hook and then 1071 * Edid mgmt connector gets first update only in mode_valid hook and then
@@ -1085,21 +1090,24 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1085 * to it anymore after disconnect, so on next crtc to connector 1090 * to it anymore after disconnect, so on next crtc to connector
1086 * reshuffle by UMD we will get into unwanted dc_sink release 1091 * reshuffle by UMD we will get into unwanted dc_sink release
1087 */ 1092 */
1088 if (aconnector->dc_sink != aconnector->dc_em_sink) 1093 dc_sink_release(aconnector->dc_sink);
1089 dc_sink_release(aconnector->dc_sink);
1090 } 1094 }
1091 aconnector->dc_sink = sink; 1095 aconnector->dc_sink = sink;
1096 dc_sink_retain(aconnector->dc_sink);
1092 amdgpu_dm_update_freesync_caps(connector, 1097 amdgpu_dm_update_freesync_caps(connector,
1093 aconnector->edid); 1098 aconnector->edid);
1094 } else { 1099 } else {
1095 amdgpu_dm_update_freesync_caps(connector, NULL); 1100 amdgpu_dm_update_freesync_caps(connector, NULL);
1096 if (!aconnector->dc_sink) 1101 if (!aconnector->dc_sink) {
1097 aconnector->dc_sink = aconnector->dc_em_sink; 1102 aconnector->dc_sink = aconnector->dc_em_sink;
1098 else if (aconnector->dc_sink != aconnector->dc_em_sink)
1099 dc_sink_retain(aconnector->dc_sink); 1103 dc_sink_retain(aconnector->dc_sink);
1104 }
1100 } 1105 }
1101 1106
1102 mutex_unlock(&dev->mode_config.mutex); 1107 mutex_unlock(&dev->mode_config.mutex);
1108
1109 if (sink)
1110 dc_sink_release(sink);
1103 return; 1111 return;
1104 } 1112 }
1105 1113
@@ -1107,8 +1115,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1107 * TODO: temporary guard to look for proper fix 1115 * TODO: temporary guard to look for proper fix
1108 * if this sink is MST sink, we should not do anything 1116 * if this sink is MST sink, we should not do anything
1109 */ 1117 */
1110 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 1118 if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
1119 dc_sink_release(sink);
1111 return; 1120 return;
1121 }
1112 1122
1113 if (aconnector->dc_sink == sink) { 1123 if (aconnector->dc_sink == sink) {
1114 /* 1124 /*
@@ -1117,6 +1127,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1117 */ 1127 */
1118 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", 1128 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
1119 aconnector->connector_id); 1129 aconnector->connector_id);
1130 if (sink)
1131 dc_sink_release(sink);
1120 return; 1132 return;
1121 } 1133 }
1122 1134
@@ -1138,6 +1150,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1138 amdgpu_dm_update_freesync_caps(connector, NULL); 1150 amdgpu_dm_update_freesync_caps(connector, NULL);
1139 1151
1140 aconnector->dc_sink = sink; 1152 aconnector->dc_sink = sink;
1153 dc_sink_retain(aconnector->dc_sink);
1141 if (sink->dc_edid.length == 0) { 1154 if (sink->dc_edid.length == 0) {
1142 aconnector->edid = NULL; 1155 aconnector->edid = NULL;
1143 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); 1156 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
@@ -1158,11 +1171,15 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1158 amdgpu_dm_update_freesync_caps(connector, NULL); 1171 amdgpu_dm_update_freesync_caps(connector, NULL);
1159 drm_connector_update_edid_property(connector, NULL); 1172 drm_connector_update_edid_property(connector, NULL);
1160 aconnector->num_modes = 0; 1173 aconnector->num_modes = 0;
1174 dc_sink_release(aconnector->dc_sink);
1161 aconnector->dc_sink = NULL; 1175 aconnector->dc_sink = NULL;
1162 aconnector->edid = NULL; 1176 aconnector->edid = NULL;
1163 } 1177 }
1164 1178
1165 mutex_unlock(&dev->mode_config.mutex); 1179 mutex_unlock(&dev->mode_config.mutex);
1180
1181 if (sink)
1182 dc_sink_release(sink);
1166} 1183}
1167 1184
1168static void handle_hpd_irq(void *param) 1185static void handle_hpd_irq(void *param)
@@ -2977,6 +2994,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2977 return stream; 2994 return stream;
2978 } else { 2995 } else {
2979 sink = aconnector->dc_sink; 2996 sink = aconnector->dc_sink;
2997 dc_sink_retain(sink);
2980 } 2998 }
2981 2999
2982 stream = dc_create_stream_for_sink(sink); 3000 stream = dc_create_stream_for_sink(sink);
@@ -3042,8 +3060,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
3042 update_stream_signal(stream, sink); 3060 update_stream_signal(stream, sink);
3043 3061
3044finish: 3062finish:
3045 if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON) 3063 dc_sink_release(sink);
3046 dc_sink_release(sink);
3047 3064
3048 return stream; 3065 return stream;
3049} 3066}
@@ -3301,6 +3318,14 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
3301 dm->backlight_dev = NULL; 3318 dm->backlight_dev = NULL;
3302 } 3319 }
3303#endif 3320#endif
3321
3322 if (aconnector->dc_em_sink)
3323 dc_sink_release(aconnector->dc_em_sink);
3324 aconnector->dc_em_sink = NULL;
3325 if (aconnector->dc_sink)
3326 dc_sink_release(aconnector->dc_sink);
3327 aconnector->dc_sink = NULL;
3328
3304 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 3329 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
3305 drm_connector_unregister(connector); 3330 drm_connector_unregister(connector);
3306 drm_connector_cleanup(connector); 3331 drm_connector_cleanup(connector);
@@ -3398,10 +3423,12 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
3398 (edid->extensions + 1) * EDID_LENGTH, 3423 (edid->extensions + 1) * EDID_LENGTH,
3399 &init_params); 3424 &init_params);
3400 3425
3401 if (aconnector->base.force == DRM_FORCE_ON) 3426 if (aconnector->base.force == DRM_FORCE_ON) {
3402 aconnector->dc_sink = aconnector->dc_link->local_sink ? 3427 aconnector->dc_sink = aconnector->dc_link->local_sink ?
3403 aconnector->dc_link->local_sink : 3428 aconnector->dc_link->local_sink :
3404 aconnector->dc_em_sink; 3429 aconnector->dc_em_sink;
3430 dc_sink_retain(aconnector->dc_sink);
3431 }
3405} 3432}
3406 3433
3407static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) 3434static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
@@ -5402,9 +5429,11 @@ static void get_freesync_config_for_crtc(
5402 struct amdgpu_dm_connector *aconnector = 5429 struct amdgpu_dm_connector *aconnector =
5403 to_amdgpu_dm_connector(new_con_state->base.connector); 5430 to_amdgpu_dm_connector(new_con_state->base.connector);
5404 struct drm_display_mode *mode = &new_crtc_state->base.mode; 5431 struct drm_display_mode *mode = &new_crtc_state->base.mode;
5432 int vrefresh = drm_mode_vrefresh(mode);
5405 5433
5406 new_crtc_state->vrr_supported = new_con_state->freesync_capable && 5434 new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
5407 aconnector->min_vfreq <= drm_mode_vrefresh(mode); 5435 vrefresh >= aconnector->min_vfreq &&
5436 vrefresh <= aconnector->max_vfreq;
5408 5437
5409 if (new_crtc_state->vrr_supported) { 5438 if (new_crtc_state->vrr_supported) {
5410 new_crtc_state->stream->ignore_msa_timing_param = true; 5439 new_crtc_state->stream->ignore_msa_timing_param = true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index f51d52eb52e6..c4ea3a91f17a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -191,6 +191,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
191 &init_params); 191 &init_params);
192 192
193 dc_sink->priv = aconnector; 193 dc_sink->priv = aconnector;
194 /* dc_link_add_remote_sink returns a new reference */
194 aconnector->dc_sink = dc_sink; 195 aconnector->dc_sink = dc_sink;
195 196
196 if (aconnector->dc_sink) 197 if (aconnector->dc_sink)
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 12d1842079ae..eb62d10bb65c 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -1348,12 +1348,12 @@ void dcn_bw_update_from_pplib(struct dc *dc)
1348 struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; 1348 struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0};
1349 bool res; 1349 bool res;
1350 1350
1351 kernel_fpu_begin();
1352
1353 /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ 1351 /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */
1354 res = dm_pp_get_clock_levels_by_type_with_voltage( 1352 res = dm_pp_get_clock_levels_by_type_with_voltage(
1355 ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); 1353 ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks);
1356 1354
1355 kernel_fpu_begin();
1356
1357 if (res) 1357 if (res)
1358 res = verify_clock_values(&fclks); 1358 res = verify_clock_values(&fclks);
1359 1359
@@ -1372,9 +1372,13 @@ void dcn_bw_update_from_pplib(struct dc *dc)
1372 } else 1372 } else
1373 BREAK_TO_DEBUGGER(); 1373 BREAK_TO_DEBUGGER();
1374 1374
1375 kernel_fpu_end();
1376
1375 res = dm_pp_get_clock_levels_by_type_with_voltage( 1377 res = dm_pp_get_clock_levels_by_type_with_voltage(
1376 ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); 1378 ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks);
1377 1379
1380 kernel_fpu_begin();
1381
1378 if (res) 1382 if (res)
1379 res = verify_clock_values(&dcfclks); 1383 res = verify_clock_values(&dcfclks);
1380 1384
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 7f5a947ad31d..ea18e9c2d8ce 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -794,6 +794,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
794 sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; 794 sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
795 sink->converter_disable_audio = converter_disable_audio; 795 sink->converter_disable_audio = converter_disable_audio;
796 796
797 /* dc_sink_create returns a new reference */
797 link->local_sink = sink; 798 link->local_sink = sink;
798 799
799 edid_status = dm_helpers_read_local_edid( 800 edid_status = dm_helpers_read_local_edid(
@@ -2037,6 +2038,9 @@ static enum dc_status enable_link(
2037 break; 2038 break;
2038 } 2039 }
2039 2040
2041 if (status == DC_OK)
2042 pipe_ctx->stream->link->link_status.link_active = true;
2043
2040 return status; 2044 return status;
2041} 2045}
2042 2046
@@ -2060,6 +2064,14 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
2060 dp_disable_link_phy_mst(link, signal); 2064 dp_disable_link_phy_mst(link, signal);
2061 } else 2065 } else
2062 link->link_enc->funcs->disable_output(link->link_enc, signal); 2066 link->link_enc->funcs->disable_output(link->link_enc, signal);
2067
2068 if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2069 /* MST disable link only when no stream use the link */
2070 if (link->mst_stream_alloc_table.stream_count <= 0)
2071 link->link_status.link_active = false;
2072 } else {
2073 link->link_status.link_active = false;
2074 }
2063} 2075}
2064 2076
2065static bool dp_active_dongle_validate_timing( 2077static bool dp_active_dongle_validate_timing(
@@ -2623,8 +2635,6 @@ void core_link_enable_stream(
2623 } 2635 }
2624 } 2636 }
2625 2637
2626 stream->link->link_status.link_active = true;
2627
2628 core_dc->hwss.enable_audio_stream(pipe_ctx); 2638 core_dc->hwss.enable_audio_stream(pipe_ctx);
2629 2639
2630 /* turn off otg test pattern if enable */ 2640 /* turn off otg test pattern if enable */
@@ -2650,17 +2660,21 @@ void core_link_enable_stream(
2650void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) 2660void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
2651{ 2661{
2652 struct dc *core_dc = pipe_ctx->stream->ctx->dc; 2662 struct dc *core_dc = pipe_ctx->stream->ctx->dc;
2663 struct dc_stream_state *stream = pipe_ctx->stream;
2653 2664
2654 core_dc->hwss.blank_stream(pipe_ctx); 2665 core_dc->hwss.blank_stream(pipe_ctx);
2655 2666
2656 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 2667 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
2657 deallocate_mst_payload(pipe_ctx); 2668 deallocate_mst_payload(pipe_ctx);
2658 2669
2670 if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
2671 dal_ddc_service_write_scdc_data(
2672 stream->link->ddc, 0,
2673 stream->timing.flags.LTE_340MCSC_SCRAMBLE);
2674
2659 core_dc->hwss.disable_stream(pipe_ctx, option); 2675 core_dc->hwss.disable_stream(pipe_ctx, option);
2660 2676
2661 disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); 2677 disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
2662
2663 pipe_ctx->stream->link->link_status.link_active = false;
2664} 2678}
2665 2679
2666void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) 2680void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 94a84bc57c7a..bfd27f10879e 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -724,7 +724,7 @@ static void build_vrr_infopacket_v1(enum signal_type signal,
724 724
725static void build_vrr_infopacket_v2(enum signal_type signal, 725static void build_vrr_infopacket_v2(enum signal_type signal,
726 const struct mod_vrr_params *vrr, 726 const struct mod_vrr_params *vrr,
727 const enum color_transfer_func *app_tf, 727 enum color_transfer_func app_tf,
728 struct dc_info_packet *infopacket) 728 struct dc_info_packet *infopacket)
729{ 729{
730 unsigned int payload_size = 0; 730 unsigned int payload_size = 0;
@@ -732,8 +732,7 @@ static void build_vrr_infopacket_v2(enum signal_type signal,
732 build_vrr_infopacket_header_v2(signal, infopacket, &payload_size); 732 build_vrr_infopacket_header_v2(signal, infopacket, &payload_size);
733 build_vrr_infopacket_data(vrr, infopacket); 733 build_vrr_infopacket_data(vrr, infopacket);
734 734
735 if (app_tf != NULL) 735 build_vrr_infopacket_fs2_data(app_tf, infopacket);
736 build_vrr_infopacket_fs2_data(*app_tf, infopacket);
737 736
738 build_vrr_infopacket_checksum(&payload_size, infopacket); 737 build_vrr_infopacket_checksum(&payload_size, infopacket);
739 738
@@ -757,7 +756,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
757 const struct dc_stream_state *stream, 756 const struct dc_stream_state *stream,
758 const struct mod_vrr_params *vrr, 757 const struct mod_vrr_params *vrr,
759 enum vrr_packet_type packet_type, 758 enum vrr_packet_type packet_type,
760 const enum color_transfer_func *app_tf, 759 enum color_transfer_func app_tf,
761 struct dc_info_packet *infopacket) 760 struct dc_info_packet *infopacket)
762{ 761{
763 /* SPD info packet for FreeSync 762 /* SPD info packet for FreeSync
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
index 4222e403b151..dcef85994c45 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
@@ -145,7 +145,7 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync,
145 const struct dc_stream_state *stream, 145 const struct dc_stream_state *stream,
146 const struct mod_vrr_params *vrr, 146 const struct mod_vrr_params *vrr,
147 enum vrr_packet_type packet_type, 147 enum vrr_packet_type packet_type,
148 const enum color_transfer_func *app_tf, 148 enum color_transfer_func app_tf,
149 struct dc_info_packet *infopacket); 149 struct dc_info_packet *infopacket);
150 150
151void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, 151void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
index ce177d7f04cb..6bf48934fdc4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_psm.c
@@ -277,8 +277,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
277 if (!skip_display_settings) 277 if (!skip_display_settings)
278 phm_notify_smc_display_config_after_ps_adjustment(hwmgr); 278 phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
279 279
280 if ((hwmgr->request_dpm_level != hwmgr->dpm_level) && 280 if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
281 !phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
282 hwmgr->dpm_level = hwmgr->request_dpm_level; 281 hwmgr->dpm_level = hwmgr->request_dpm_level;
283 282
284 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { 283 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 4588bddf8b33..615cf2c09e54 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -489,15 +489,16 @@ int pp_atomfwctrl_get_gpio_information(struct pp_hwmgr *hwmgr,
489} 489}
490 490
491int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, 491int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
492 uint8_t id, uint32_t *frequency) 492 uint8_t clk_id, uint8_t syspll_id,
493 uint32_t *frequency)
493{ 494{
494 struct amdgpu_device *adev = hwmgr->adev; 495 struct amdgpu_device *adev = hwmgr->adev;
495 struct atom_get_smu_clock_info_parameters_v3_1 parameters; 496 struct atom_get_smu_clock_info_parameters_v3_1 parameters;
496 struct atom_get_smu_clock_info_output_parameters_v3_1 *output; 497 struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
497 uint32_t ix; 498 uint32_t ix;
498 499
499 parameters.clk_id = id; 500 parameters.clk_id = clk_id;
500 parameters.syspll_id = 0; 501 parameters.syspll_id = syspll_id;
501 parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 502 parameters.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
502 parameters.dfsdid = 0; 503 parameters.dfsdid = 0;
503 504
@@ -530,20 +531,23 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
530 boot_values->ulSocClk = 0; 531 boot_values->ulSocClk = 0;
531 boot_values->ulDCEFClk = 0; 532 boot_values->ulDCEFClk = 0;
532 533
533 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency)) 534 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, SMU11_SYSPLL0_ID, &frequency))
534 boot_values->ulSocClk = frequency; 535 boot_values->ulSocClk = frequency;
535 536
536 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency)) 537 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, SMU11_SYSPLL0_ID, &frequency))
537 boot_values->ulDCEFClk = frequency; 538 boot_values->ulDCEFClk = frequency;
538 539
539 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency)) 540 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, SMU11_SYSPLL0_ID, &frequency))
540 boot_values->ulEClk = frequency; 541 boot_values->ulEClk = frequency;
541 542
542 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency)) 543 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, SMU11_SYSPLL0_ID, &frequency))
543 boot_values->ulVClk = frequency; 544 boot_values->ulVClk = frequency;
544 545
545 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency)) 546 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, SMU11_SYSPLL0_ID, &frequency))
546 boot_values->ulDClk = frequency; 547 boot_values->ulDClk = frequency;
548
549 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL1_0_FCLK_ID, SMU11_SYSPLL1_2_ID, &frequency))
550 boot_values->ulFClk = frequency;
547} 551}
548 552
549static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr, 553static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
@@ -563,19 +567,19 @@ static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
563 boot_values->ulSocClk = 0; 567 boot_values->ulSocClk = 0;
564 boot_values->ulDCEFClk = 0; 568 boot_values->ulDCEFClk = 0;
565 569
566 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency)) 570 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, 0, &frequency))
567 boot_values->ulSocClk = frequency; 571 boot_values->ulSocClk = frequency;
568 572
569 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency)) 573 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, 0, &frequency))
570 boot_values->ulDCEFClk = frequency; 574 boot_values->ulDCEFClk = frequency;
571 575
572 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency)) 576 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, 0, &frequency))
573 boot_values->ulEClk = frequency; 577 boot_values->ulEClk = frequency;
574 578
575 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency)) 579 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, 0, &frequency))
576 boot_values->ulVClk = frequency; 580 boot_values->ulVClk = frequency;
577 581
578 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency)) 582 if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, 0, &frequency))
579 boot_values->ulDClk = frequency; 583 boot_values->ulDClk = frequency;
580} 584}
581 585
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index fe9e8ceef50e..b7e2651b570b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -139,6 +139,7 @@ struct pp_atomfwctrl_bios_boot_up_values {
139 uint32_t ulEClk; 139 uint32_t ulEClk;
140 uint32_t ulVClk; 140 uint32_t ulVClk;
141 uint32_t ulDClk; 141 uint32_t ulDClk;
142 uint32_t ulFClk;
142 uint16_t usVddc; 143 uint16_t usVddc;
143 uint16_t usVddci; 144 uint16_t usVddci;
144 uint16_t usMvddc; 145 uint16_t usMvddc;
@@ -236,7 +237,8 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
236int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr, 237int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
237 struct pp_atomfwctrl_smc_dpm_parameters *param); 238 struct pp_atomfwctrl_smc_dpm_parameters *param);
238int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, 239int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr,
239 uint8_t id, uint32_t *frequency); 240 uint8_t clk_id, uint8_t syspll_id,
241 uint32_t *frequency);
240 242
241#endif 243#endif
242 244
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index 48187acac59e..83d3d935f3ac 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -3491,14 +3491,14 @@ static int smu7_get_gpu_power(struct pp_hwmgr *hwmgr, u32 *query)
3491 3491
3492 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart); 3492 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogStart);
3493 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, 3493 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
3494 ixSMU_PM_STATUS_94, 0); 3494 ixSMU_PM_STATUS_95, 0);
3495 3495
3496 for (i = 0; i < 10; i++) { 3496 for (i = 0; i < 10; i++) {
3497 mdelay(1); 3497 mdelay(500);
3498 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample); 3498 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_PmStatusLogSample);
3499 tmp = cgs_read_ind_register(hwmgr->device, 3499 tmp = cgs_read_ind_register(hwmgr->device,
3500 CGS_IND_REG__SMC, 3500 CGS_IND_REG__SMC,
3501 ixSMU_PM_STATUS_94); 3501 ixSMU_PM_STATUS_95);
3502 if (tmp != 0) 3502 if (tmp != 0)
3503 break; 3503 break;
3504 } 3504 }
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 5479125ff4f6..5c4f701939ea 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -2575,10 +2575,10 @@ static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2575 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk; 2575 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2576 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk; 2576 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2577 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, 2577 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2578 SMU9_SYSPLL0_SOCCLK_ID, &boot_up_values.ulSocClk); 2578 SMU9_SYSPLL0_SOCCLK_ID, 0, &boot_up_values.ulSocClk);
2579 2579
2580 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, 2580 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr,
2581 SMU9_SYSPLL0_DCEFCLK_ID, &boot_up_values.ulDCEFClk); 2581 SMU9_SYSPLL0_DCEFCLK_ID, 0, &boot_up_values.ulDCEFClk);
2582 2582
2583 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk; 2583 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2584 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk; 2584 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
@@ -4407,9 +4407,9 @@ static int vega10_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
4407 return ret; 4407 return ret;
4408 4408
4409 features_to_disable = 4409 features_to_disable =
4410 (features_enabled ^ new_ppfeature_masks) & features_enabled; 4410 features_enabled & ~new_ppfeature_masks;
4411 features_to_enable = 4411 features_to_enable =
4412 (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; 4412 ~features_enabled & new_ppfeature_masks;
4413 4413
4414 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 4414 pr_debug("features_to_disable 0x%llx\n", features_to_disable);
4415 pr_debug("features_to_enable 0x%llx\n", features_to_enable); 4415 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 6c8e78611c03..bdb48e94eff6 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -2009,9 +2009,9 @@ static int vega12_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
2009 return ret; 2009 return ret;
2010 2010
2011 features_to_disable = 2011 features_to_disable =
2012 (features_enabled ^ new_ppfeature_masks) & features_enabled; 2012 features_enabled & ~new_ppfeature_masks;
2013 features_to_enable = 2013 features_to_enable =
2014 (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; 2014 ~features_enabled & new_ppfeature_masks;
2015 2015
2016 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 2016 pr_debug("features_to_disable 0x%llx\n", features_to_disable);
2017 pr_debug("features_to_enable 0x%llx\n", features_to_enable); 2017 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index aad79affb081..23b5b94a4939 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
91 * MP0CLK DS 91 * MP0CLK DS
92 */ 92 */
93 data->registry_data.disallowed_features = 0xE0041C00; 93 data->registry_data.disallowed_features = 0xE0041C00;
94 /* ECC feature should be disabled on old SMUs */
95 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
96 hwmgr->smu_version = smum_get_argument(hwmgr);
97 if (hwmgr->smu_version < 0x282100)
98 data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
99
94 data->registry_data.od_state_in_dc_support = 0; 100 data->registry_data.od_state_in_dc_support = 0;
95 data->registry_data.thermal_support = 1; 101 data->registry_data.thermal_support = 1;
96 data->registry_data.skip_baco_hardware = 0; 102 data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
357 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; 363 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
358 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; 364 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
359 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; 365 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
366 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
360 367
361 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 368 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
362 data->smu_features[i].smu_feature_bitmap = 369 data->smu_features[i].smu_feature_bitmap =
@@ -463,9 +470,9 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
463static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state) 470static void vega20_init_dpm_state(struct vega20_dpm_state *dpm_state)
464{ 471{
465 dpm_state->soft_min_level = 0x0; 472 dpm_state->soft_min_level = 0x0;
466 dpm_state->soft_max_level = 0xffff; 473 dpm_state->soft_max_level = VG20_CLOCK_MAX_DEFAULT;
467 dpm_state->hard_min_level = 0x0; 474 dpm_state->hard_min_level = 0x0;
468 dpm_state->hard_max_level = 0xffff; 475 dpm_state->hard_max_level = VG20_CLOCK_MAX_DEFAULT;
469} 476}
470 477
471static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr, 478static int vega20_get_number_of_dpm_level(struct pp_hwmgr *hwmgr,
@@ -711,8 +718,10 @@ static int vega20_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
711 PP_ASSERT_WITH_CODE(!ret, 718 PP_ASSERT_WITH_CODE(!ret,
712 "[SetupDefaultDpmTable] failed to get fclk dpm levels!", 719 "[SetupDefaultDpmTable] failed to get fclk dpm levels!",
713 return ret); 720 return ret);
714 } else 721 } else {
715 dpm_table->count = 0; 722 dpm_table->count = 1;
723 dpm_table->dpm_levels[0].value = data->vbios_boot_state.fclock / 100;
724 }
716 vega20_init_dpm_state(&(dpm_table->dpm_state)); 725 vega20_init_dpm_state(&(dpm_table->dpm_state));
717 726
718 /* save a copy of the default DPM table */ 727 /* save a copy of the default DPM table */
@@ -754,6 +763,7 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
754 data->vbios_boot_state.eclock = boot_up_values.ulEClk; 763 data->vbios_boot_state.eclock = boot_up_values.ulEClk;
755 data->vbios_boot_state.vclock = boot_up_values.ulVClk; 764 data->vbios_boot_state.vclock = boot_up_values.ulVClk;
756 data->vbios_boot_state.dclock = boot_up_values.ulDClk; 765 data->vbios_boot_state.dclock = boot_up_values.ulDClk;
766 data->vbios_boot_state.fclock = boot_up_values.ulFClk;
757 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID; 767 data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
758 768
759 smum_send_msg_to_smc_with_parameter(hwmgr, 769 smum_send_msg_to_smc_with_parameter(hwmgr,
@@ -780,6 +790,8 @@ static int vega20_init_smc_table(struct pp_hwmgr *hwmgr)
780static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr) 790static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
781{ 791{
782 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); 792 struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
793 struct vega20_hwmgr *data =
794 (struct vega20_hwmgr *)(hwmgr->backend);
783 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; 795 uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg;
784 int ret; 796 int ret;
785 797
@@ -816,6 +828,10 @@ static int vega20_override_pcie_parameters(struct pp_hwmgr *hwmgr)
816 "[OverridePcieParameters] Attempt to override pcie params failed!", 828 "[OverridePcieParameters] Attempt to override pcie params failed!",
817 return ret); 829 return ret);
818 830
831 data->pcie_parameters_override = 1;
832 data->pcie_gen_level1 = pcie_gen;
833 data->pcie_width_level1 = pcie_width;
834
819 return 0; 835 return 0;
820} 836}
821 837
@@ -979,6 +995,8 @@ static int vega20_od8_set_feature_capabilities(
979 } 995 }
980 996
981 if (data->smu_features[GNLD_DPM_UCLK].enabled) { 997 if (data->smu_features[GNLD_DPM_UCLK].enabled) {
998 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] =
999 data->dpm_table.mem_table.dpm_levels[data->dpm_table.mem_table.count - 2].value;
982 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] && 1000 if (pptable_information->od_feature_capabilities[ATOM_VEGA20_ODFEATURE_UCLK_MAX] &&
983 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 && 1001 pptable_information->od_settings_min[OD8_SETTING_UCLK_FMAX] > 0 &&
984 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 && 1002 pptable_information->od_settings_max[OD8_SETTING_UCLK_FMAX] > 0 &&
@@ -2314,32 +2332,8 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr)
2314 2332
2315static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) 2333static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
2316{ 2334{
2317 struct vega20_hwmgr *data =
2318 (struct vega20_hwmgr *)(hwmgr->backend);
2319 uint32_t soft_min_level, soft_max_level;
2320 int ret = 0; 2335 int ret = 0;
2321 2336
2322 soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
2323 soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table));
2324 data->dpm_table.gfx_table.dpm_state.soft_min_level =
2325 data->dpm_table.gfx_table.dpm_levels[soft_min_level].value;
2326 data->dpm_table.gfx_table.dpm_state.soft_max_level =
2327 data->dpm_table.gfx_table.dpm_levels[soft_max_level].value;
2328
2329 soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table));
2330 soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.mem_table));
2331 data->dpm_table.mem_table.dpm_state.soft_min_level =
2332 data->dpm_table.mem_table.dpm_levels[soft_min_level].value;
2333 data->dpm_table.mem_table.dpm_state.soft_max_level =
2334 data->dpm_table.mem_table.dpm_levels[soft_max_level].value;
2335
2336 soft_min_level = vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table));
2337 soft_max_level = vega20_find_highest_dpm_level(&(data->dpm_table.soc_table));
2338 data->dpm_table.soc_table.dpm_state.soft_min_level =
2339 data->dpm_table.soc_table.dpm_levels[soft_min_level].value;
2340 data->dpm_table.soc_table.dpm_state.soft_max_level =
2341 data->dpm_table.soc_table.dpm_levels[soft_max_level].value;
2342
2343 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); 2337 ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF);
2344 PP_ASSERT_WITH_CODE(!ret, 2338 PP_ASSERT_WITH_CODE(!ret,
2345 "Failed to upload DPM Bootup Levels!", 2339 "Failed to upload DPM Bootup Levels!",
@@ -2641,9 +2635,8 @@ static int vega20_get_sclks(struct pp_hwmgr *hwmgr,
2641 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table); 2635 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2642 int i, count; 2636 int i, count;
2643 2637
2644 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_GFXCLK].enabled, 2638 if (!data->smu_features[GNLD_DPM_GFXCLK].enabled)
2645 "[GetSclks]: gfxclk dpm not enabled!\n", 2639 return -1;
2646 return -EPERM);
2647 2640
2648 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2641 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2649 clocks->num_levels = count; 2642 clocks->num_levels = count;
@@ -2670,9 +2663,8 @@ static int vega20_get_memclocks(struct pp_hwmgr *hwmgr,
2670 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table); 2663 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.mem_table);
2671 int i, count; 2664 int i, count;
2672 2665
2673 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_UCLK].enabled, 2666 if (!data->smu_features[GNLD_DPM_UCLK].enabled)
2674 "[GetMclks]: uclk dpm not enabled!\n", 2667 return -1;
2675 return -EPERM);
2676 2668
2677 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2669 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2678 clocks->num_levels = data->mclk_latency_table.count = count; 2670 clocks->num_levels = data->mclk_latency_table.count = count;
@@ -2696,9 +2688,8 @@ static int vega20_get_dcefclocks(struct pp_hwmgr *hwmgr,
2696 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table); 2688 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.dcef_table);
2697 int i, count; 2689 int i, count;
2698 2690
2699 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_DCEFCLK].enabled, 2691 if (!data->smu_features[GNLD_DPM_DCEFCLK].enabled)
2700 "[GetDcfclocks]: dcefclk dpm not enabled!\n", 2692 return -1;
2701 return -EPERM);
2702 2693
2703 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2694 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2704 clocks->num_levels = count; 2695 clocks->num_levels = count;
@@ -2719,9 +2710,8 @@ static int vega20_get_socclocks(struct pp_hwmgr *hwmgr,
2719 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table); 2710 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.soc_table);
2720 int i, count; 2711 int i, count;
2721 2712
2722 PP_ASSERT_WITH_CODE(data->smu_features[GNLD_DPM_SOCCLK].enabled, 2713 if (!data->smu_features[GNLD_DPM_SOCCLK].enabled)
2723 "[GetSocclks]: socclk dpm not enabled!\n", 2714 return -1;
2724 return -EPERM);
2725 2715
2726 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count; 2716 count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
2727 clocks->num_levels = count; 2717 clocks->num_levels = count;
@@ -2799,7 +2789,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
2799 data->od8_settings.od8_settings_array; 2789 data->od8_settings.od8_settings_array;
2800 OverDriveTable_t *od_table = 2790 OverDriveTable_t *od_table =
2801 &(data->smc_state_table.overdrive_table); 2791 &(data->smc_state_table.overdrive_table);
2802 struct pp_clock_levels_with_latency clocks;
2803 int32_t input_index, input_clk, input_vol, i; 2792 int32_t input_index, input_clk, input_vol, i;
2804 int od8_id; 2793 int od8_id;
2805 int ret; 2794 int ret;
@@ -2858,11 +2847,6 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
2858 return -EOPNOTSUPP; 2847 return -EOPNOTSUPP;
2859 } 2848 }
2860 2849
2861 ret = vega20_get_memclocks(hwmgr, &clocks);
2862 PP_ASSERT_WITH_CODE(!ret,
2863 "Attempt to get memory clk levels failed!",
2864 return ret);
2865
2866 for (i = 0; i < size; i += 2) { 2850 for (i = 0; i < size; i += 2) {
2867 if (i + 2 > size) { 2851 if (i + 2 > size) {
2868 pr_info("invalid number of input parameters %d\n", 2852 pr_info("invalid number of input parameters %d\n",
@@ -2879,11 +2863,11 @@ static int vega20_odn_edit_dpm_table(struct pp_hwmgr *hwmgr,
2879 return -EINVAL; 2863 return -EINVAL;
2880 } 2864 }
2881 2865
2882 if (input_clk < clocks.data[0].clocks_in_khz / 1000 || 2866 if (input_clk < od8_settings[OD8_SETTING_UCLK_FMAX].min_value ||
2883 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) { 2867 input_clk > od8_settings[OD8_SETTING_UCLK_FMAX].max_value) {
2884 pr_info("clock freq %d is not within allowed range [%d - %d]\n", 2868 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
2885 input_clk, 2869 input_clk,
2886 clocks.data[0].clocks_in_khz / 1000, 2870 od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
2887 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 2871 od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
2888 return -EINVAL; 2872 return -EINVAL;
2889 } 2873 }
@@ -3043,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
3043 "FCLK_DS", 3027 "FCLK_DS",
3044 "MP1CLK_DS", 3028 "MP1CLK_DS",
3045 "MP0CLK_DS", 3029 "MP0CLK_DS",
3046 "XGMI"}; 3030 "XGMI",
3031 "ECC"};
3047 static const char *output_title[] = { 3032 static const char *output_title[] = {
3048 "FEATURES", 3033 "FEATURES",
3049 "BITMASK", 3034 "BITMASK",
@@ -3088,9 +3073,9 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
3088 return ret; 3073 return ret;
3089 3074
3090 features_to_disable = 3075 features_to_disable =
3091 (features_enabled ^ new_ppfeature_masks) & features_enabled; 3076 features_enabled & ~new_ppfeature_masks;
3092 features_to_enable = 3077 features_to_enable =
3093 (features_enabled ^ new_ppfeature_masks) ^ features_to_disable; 3078 ~features_enabled & new_ppfeature_masks;
3094 3079
3095 pr_debug("features_to_disable 0x%llx\n", features_to_disable); 3080 pr_debug("features_to_disable 0x%llx\n", features_to_disable);
3096 pr_debug("features_to_enable 0x%llx\n", features_to_enable); 3081 pr_debug("features_to_enable 0x%llx\n", features_to_enable);
@@ -3128,7 +3113,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3128 &(data->dpm_table.fclk_table); 3113 &(data->dpm_table.fclk_table);
3129 int i, now, size = 0; 3114 int i, now, size = 0;
3130 int ret = 0; 3115 int ret = 0;
3131 uint32_t gen_speed, lane_width; 3116 uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
3132 3117
3133 switch (type) { 3118 switch (type) {
3134 case PP_SCLK: 3119 case PP_SCLK:
@@ -3137,10 +3122,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3137 "Attempt to get current gfx clk Failed!", 3122 "Attempt to get current gfx clk Failed!",
3138 return ret); 3123 return ret);
3139 3124
3140 ret = vega20_get_sclks(hwmgr, &clocks); 3125 if (vega20_get_sclks(hwmgr, &clocks)) {
3141 PP_ASSERT_WITH_CODE(!ret, 3126 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3142 "Attempt to get gfx clk levels Failed!", 3127 now / 100);
3143 return ret); 3128 break;
3129 }
3144 3130
3145 for (i = 0; i < clocks.num_levels; i++) 3131 for (i = 0; i < clocks.num_levels; i++)
3146 size += sprintf(buf + size, "%d: %uMhz %s\n", 3132 size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3154,10 +3140,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3154 "Attempt to get current mclk freq Failed!", 3140 "Attempt to get current mclk freq Failed!",
3155 return ret); 3141 return ret);
3156 3142
3157 ret = vega20_get_memclocks(hwmgr, &clocks); 3143 if (vega20_get_memclocks(hwmgr, &clocks)) {
3158 PP_ASSERT_WITH_CODE(!ret, 3144 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3159 "Attempt to get memory clk levels Failed!", 3145 now / 100);
3160 return ret); 3146 break;
3147 }
3161 3148
3162 for (i = 0; i < clocks.num_levels; i++) 3149 for (i = 0; i < clocks.num_levels; i++)
3163 size += sprintf(buf + size, "%d: %uMhz %s\n", 3150 size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3171,10 +3158,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3171 "Attempt to get current socclk freq Failed!", 3158 "Attempt to get current socclk freq Failed!",
3172 return ret); 3159 return ret);
3173 3160
3174 ret = vega20_get_socclocks(hwmgr, &clocks); 3161 if (vega20_get_socclocks(hwmgr, &clocks)) {
3175 PP_ASSERT_WITH_CODE(!ret, 3162 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3176 "Attempt to get soc clk levels Failed!", 3163 now / 100);
3177 return ret); 3164 break;
3165 }
3178 3166
3179 for (i = 0; i < clocks.num_levels; i++) 3167 for (i = 0; i < clocks.num_levels; i++)
3180 size += sprintf(buf + size, "%d: %uMhz %s\n", 3168 size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3200,10 +3188,11 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3200 "Attempt to get current dcefclk freq Failed!", 3188 "Attempt to get current dcefclk freq Failed!",
3201 return ret); 3189 return ret);
3202 3190
3203 ret = vega20_get_dcefclocks(hwmgr, &clocks); 3191 if (vega20_get_dcefclocks(hwmgr, &clocks)) {
3204 PP_ASSERT_WITH_CODE(!ret, 3192 size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
3205 "Attempt to get dcefclk levels Failed!", 3193 now / 100);
3206 return ret); 3194 break;
3195 }
3207 3196
3208 for (i = 0; i < clocks.num_levels; i++) 3197 for (i = 0; i < clocks.num_levels; i++)
3209 size += sprintf(buf + size, "%d: %uMhz %s\n", 3198 size += sprintf(buf + size, "%d: %uMhz %s\n",
@@ -3212,28 +3201,36 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3212 break; 3201 break;
3213 3202
3214 case PP_PCIE: 3203 case PP_PCIE:
3215 gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & 3204 current_gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
3216 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) 3205 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
3217 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; 3206 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
3218 lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & 3207 current_lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
3219 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) 3208 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
3220 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; 3209 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
3221 for (i = 0; i < NUM_LINK_LEVELS; i++) 3210 for (i = 0; i < NUM_LINK_LEVELS; i++) {
3211 if (i == 1 && data->pcie_parameters_override) {
3212 gen_speed = data->pcie_gen_level1;
3213 lane_width = data->pcie_width_level1;
3214 } else {
3215 gen_speed = pptable->PcieGenSpeed[i];
3216 lane_width = pptable->PcieLaneCount[i];
3217 }
3222 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, 3218 size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
3223 (pptable->PcieGenSpeed[i] == 0) ? "2.5GT/s," : 3219 (gen_speed == 0) ? "2.5GT/s," :
3224 (pptable->PcieGenSpeed[i] == 1) ? "5.0GT/s," : 3220 (gen_speed == 1) ? "5.0GT/s," :
3225 (pptable->PcieGenSpeed[i] == 2) ? "8.0GT/s," : 3221 (gen_speed == 2) ? "8.0GT/s," :
3226 (pptable->PcieGenSpeed[i] == 3) ? "16.0GT/s," : "", 3222 (gen_speed == 3) ? "16.0GT/s," : "",
3227 (pptable->PcieLaneCount[i] == 1) ? "x1" : 3223 (lane_width == 1) ? "x1" :
3228 (pptable->PcieLaneCount[i] == 2) ? "x2" : 3224 (lane_width == 2) ? "x2" :
3229 (pptable->PcieLaneCount[i] == 3) ? "x4" : 3225 (lane_width == 3) ? "x4" :
3230 (pptable->PcieLaneCount[i] == 4) ? "x8" : 3226 (lane_width == 4) ? "x8" :
3231 (pptable->PcieLaneCount[i] == 5) ? "x12" : 3227 (lane_width == 5) ? "x12" :
3232 (pptable->PcieLaneCount[i] == 6) ? "x16" : "", 3228 (lane_width == 6) ? "x16" : "",
3233 pptable->LclkFreq[i], 3229 pptable->LclkFreq[i],
3234 (gen_speed == pptable->PcieGenSpeed[i]) && 3230 (current_gen_speed == gen_speed) &&
3235 (lane_width == pptable->PcieLaneCount[i]) ? 3231 (current_lane_width == lane_width) ?
3236 "*" : ""); 3232 "*" : "");
3233 }
3237 break; 3234 break;
3238 3235
3239 case OD_SCLK: 3236 case OD_SCLK:
@@ -3288,13 +3285,8 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
3288 } 3285 }
3289 3286
3290 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) { 3287 if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
3291 ret = vega20_get_memclocks(hwmgr, &clocks);
3292 PP_ASSERT_WITH_CODE(!ret,
3293 "Fail to get memory clk levels!",
3294 return ret);
3295
3296 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", 3288 size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
3297 clocks.data[0].clocks_in_khz / 1000, 3289 od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
3298 od8_settings[OD8_SETTING_UCLK_FMAX].max_value); 3290 od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
3299 } 3291 }
3300 3292
@@ -3356,6 +3348,31 @@ static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr,
3356 return ret; 3348 return ret;
3357} 3349}
3358 3350
3351static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr *hwmgr)
3352{
3353 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
3354 struct vega20_single_dpm_table *dpm_table = &(data->dpm_table.fclk_table);
3355 int ret = 0;
3356
3357 if (data->smu_features[GNLD_DPM_FCLK].enabled) {
3358 PP_ASSERT_WITH_CODE(dpm_table->count > 0,
3359 "[SetFclkToHightestDpmLevel] Dpm table has no entry!",
3360 return -EINVAL);
3361 PP_ASSERT_WITH_CODE(dpm_table->count <= NUM_FCLK_DPM_LEVELS,
3362 "[SetFclkToHightestDpmLevel] Dpm table has too many entries!",
3363 return -EINVAL);
3364
3365 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3366 PP_ASSERT_WITH_CODE(!(ret = smum_send_msg_to_smc_with_parameter(hwmgr,
3367 PPSMC_MSG_SetSoftMinByFreq,
3368 (PPCLK_FCLK << 16 ) | dpm_table->dpm_state.soft_min_level)),
3369 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
3370 return ret);
3371 }
3372
3373 return ret;
3374}
3375
3359static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3376static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3360{ 3377{
3361 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend); 3378 struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
@@ -3366,8 +3383,10 @@ static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
3366 3383
3367 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr, 3384 ret = vega20_set_uclk_to_highest_dpm_level(hwmgr,
3368 &data->dpm_table.mem_table); 3385 &data->dpm_table.mem_table);
3386 if (ret)
3387 return ret;
3369 3388
3370 return ret; 3389 return vega20_set_fclk_to_highest_dpm_level(hwmgr);
3371} 3390}
3372 3391
3373static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr) 3392static int vega20_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
@@ -3451,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3451 struct vega20_single_dpm_table *dpm_table; 3470 struct vega20_single_dpm_table *dpm_table;
3452 bool vblank_too_short = false; 3471 bool vblank_too_short = false;
3453 bool disable_mclk_switching; 3472 bool disable_mclk_switching;
3473 bool disable_fclk_switching;
3454 uint32_t i, latency; 3474 uint32_t i, latency;
3455 3475
3456 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && 3476 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3461,9 +3481,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3461 /* gfxclk */ 3481 /* gfxclk */
3462 dpm_table = &(data->dpm_table.gfx_table); 3482 dpm_table = &(data->dpm_table.gfx_table);
3463 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3483 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3464 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3484 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3465 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3485 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3466 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3486 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3467 3487
3468 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3488 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3469 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) { 3489 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL < dpm_table->count) {
@@ -3485,9 +3505,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3485 /* memclk */ 3505 /* memclk */
3486 dpm_table = &(data->dpm_table.mem_table); 3506 dpm_table = &(data->dpm_table.mem_table);
3487 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3507 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3488 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3508 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3489 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3509 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3490 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3510 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3491 3511
3492 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3512 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3493 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) { 3513 if (VEGA20_UMD_PSTATE_MCLK_LEVEL < dpm_table->count) {
@@ -3526,12 +3546,28 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3526 if (hwmgr->display_config->nb_pstate_switch_disable) 3546 if (hwmgr->display_config->nb_pstate_switch_disable)
3527 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3547 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3528 3548
3549 if ((disable_mclk_switching &&
3550 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
3551 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
3552 disable_fclk_switching = true;
3553 else
3554 disable_fclk_switching = false;
3555
3556 /* fclk */
3557 dpm_table = &(data->dpm_table.fclk_table);
3558 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3559 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3560 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3561 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3562 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
3563 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3564
3529 /* vclk */ 3565 /* vclk */
3530 dpm_table = &(data->dpm_table.vclk_table); 3566 dpm_table = &(data->dpm_table.vclk_table);
3531 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3567 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3532 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3568 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3533 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3569 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3534 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3570 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3535 3571
3536 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3572 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3537 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { 3573 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
@@ -3548,9 +3584,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3548 /* dclk */ 3584 /* dclk */
3549 dpm_table = &(data->dpm_table.dclk_table); 3585 dpm_table = &(data->dpm_table.dclk_table);
3550 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3586 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3551 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3587 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3552 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3588 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3553 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3589 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3554 3590
3555 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3591 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3556 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) { 3592 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL < dpm_table->count) {
@@ -3567,9 +3603,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3567 /* socclk */ 3603 /* socclk */
3568 dpm_table = &(data->dpm_table.soc_table); 3604 dpm_table = &(data->dpm_table.soc_table);
3569 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3605 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3570 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3606 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3571 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3607 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3572 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3608 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3573 3609
3574 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3610 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3575 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) { 3611 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL < dpm_table->count) {
@@ -3586,9 +3622,9 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3586 /* eclk */ 3622 /* eclk */
3587 dpm_table = &(data->dpm_table.eclk_table); 3623 dpm_table = &(data->dpm_table.eclk_table);
3588 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3624 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3589 dpm_table->dpm_state.soft_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3625 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3590 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3626 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3591 dpm_table->dpm_state.hard_max_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3627 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3592 3628
3593 if (PP_CAP(PHM_PlatformCaps_UMDPState)) { 3629 if (PP_CAP(PHM_PlatformCaps_UMDPState)) {
3594 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) { 3630 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL < dpm_table->count) {
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index 37f5f5e657da..ac2a3118a0ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -42,6 +42,8 @@
42#define AVFS_CURVE 0 42#define AVFS_CURVE 0
43#define OD8_HOTCURVE_TEMPERATURE 85 43#define OD8_HOTCURVE_TEMPERATURE 85
44 44
45#define VG20_CLOCK_MAX_DEFAULT 0xFFFF
46
45typedef uint32_t PP_Clock; 47typedef uint32_t PP_Clock;
46 48
47enum { 49enum {
@@ -78,6 +80,7 @@ enum {
78 GNLD_DS_MP1CLK, 80 GNLD_DS_MP1CLK,
79 GNLD_DS_MP0CLK, 81 GNLD_DS_MP0CLK,
80 GNLD_XGMI, 82 GNLD_XGMI,
83 GNLD_ECC,
81 84
82 GNLD_FEATURES_MAX 85 GNLD_FEATURES_MAX
83}; 86};
@@ -219,6 +222,7 @@ struct vega20_vbios_boot_state {
219 uint32_t eclock; 222 uint32_t eclock;
220 uint32_t dclock; 223 uint32_t dclock;
221 uint32_t vclock; 224 uint32_t vclock;
225 uint32_t fclock;
222}; 226};
223 227
224#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 228#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
@@ -523,6 +527,10 @@ struct vega20_hwmgr {
523 527
524 unsigned long metrics_time; 528 unsigned long metrics_time;
525 SmuMetrics_t metrics_table; 529 SmuMetrics_t metrics_table;
530
531 bool pcie_parameters_override;
532 uint32_t pcie_gen_level1;
533 uint32_t pcie_width_level1;
526}; 534};
527 535
528#define VEGA20_DPM2_NEAR_TDP_DEC 10 536#define VEGA20_DPM2_NEAR_TDP_DEC 10
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
index 97f8a1a970c3..7a7f15d0c53a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_processpptables.c
@@ -32,6 +32,8 @@
32#include "cgs_common.h" 32#include "cgs_common.h"
33#include "vega20_pptable.h" 33#include "vega20_pptable.h"
34 34
35#define VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE 105
36
35static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable, 37static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
36 enum phm_platform_caps cap) 38 enum phm_platform_caps cap)
37{ 39{
@@ -798,6 +800,17 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
798 return 0; 800 return 0;
799} 801}
800 802
803static int override_powerplay_table_fantargettemperature(struct pp_hwmgr *hwmgr)
804{
805 struct phm_ppt_v3_information *pptable_information =
806 (struct phm_ppt_v3_information *)hwmgr->pptable;
807 PPTable_t *ppsmc_pptable = (PPTable_t *)(pptable_information->smc_pptable);
808
809 ppsmc_pptable->FanTargetTemperature = VEGA20_FAN_TARGET_TEMPERATURE_OVERRIDE;
810
811 return 0;
812}
813
801#define VEGA20_ENGINECLOCK_HARDMAX 198000 814#define VEGA20_ENGINECLOCK_HARDMAX 198000
802static int init_powerplay_table_information( 815static int init_powerplay_table_information(
803 struct pp_hwmgr *hwmgr, 816 struct pp_hwmgr *hwmgr,
@@ -887,6 +900,10 @@ static int init_powerplay_table_information(
887 900
888 901
889 result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable)); 902 result = append_vbios_pptable(hwmgr, (pptable_information->smc_pptable));
903 if (result)
904 return result;
905
906 result = override_powerplay_table_fantargettemperature(hwmgr);
890 907
891 return result; 908 return result;
892} 909}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 63d5cf691549..195c4ae67058 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -99,7 +99,7 @@
99#define FEATURE_DS_MP1CLK_BIT 30 99#define FEATURE_DS_MP1CLK_BIT 30
100#define FEATURE_DS_MP0CLK_BIT 31 100#define FEATURE_DS_MP0CLK_BIT 31
101#define FEATURE_XGMI_BIT 32 101#define FEATURE_XGMI_BIT 32
102#define FEATURE_SPARE_33_BIT 33 102#define FEATURE_ECC_BIT 33
103#define FEATURE_SPARE_34_BIT 34 103#define FEATURE_SPARE_34_BIT 34
104#define FEATURE_SPARE_35_BIT 35 104#define FEATURE_SPARE_35_BIT 35
105#define FEATURE_SPARE_36_BIT 36 105#define FEATURE_SPARE_36_BIT 36
@@ -165,7 +165,8 @@
165#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) 165#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
166#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) 166#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
167#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) 167#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
168#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT ) 168#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT )
169#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT )
169 170
170#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 171#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
171#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 172#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 52abca065764..2d4cfe14f72e 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -2330,6 +2330,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
2330 case DRAM_LOG_BUFF_SIZE: 2330 case DRAM_LOG_BUFF_SIZE:
2331 return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE); 2331 return offsetof(SMU74_SoftRegisters, DRAM_LOG_BUFF_SIZE);
2332 } 2332 }
2333 break;
2333 case SMU_Discrete_DpmTable: 2334 case SMU_Discrete_DpmTable:
2334 switch (member) { 2335 switch (member) {
2335 case UvdBootLevel: 2336 case UvdBootLevel:
@@ -2339,6 +2340,7 @@ static uint32_t polaris10_get_offsetof(uint32_t type, uint32_t member)
2339 case LowSclkInterruptThreshold: 2340 case LowSclkInterruptThreshold:
2340 return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold); 2341 return offsetof(SMU74_Discrete_DpmTable, LowSclkInterruptThreshold);
2341 } 2342 }
2343 break;
2342 } 2344 }
2343 pr_warn("can't get the offset of type %x member %x\n", type, member); 2345 pr_warn("can't get the offset of type %x member %x\n", type, member);
2344 return 0; 2346 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
index 079fc8e8f709..742b3dc1f6cb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu9_smumgr.c
@@ -40,10 +40,8 @@ bool smu9_is_smc_ram_running(struct pp_hwmgr *hwmgr)
40 struct amdgpu_device *adev = hwmgr->adev; 40 struct amdgpu_device *adev = hwmgr->adev;
41 uint32_t mp1_fw_flags; 41 uint32_t mp1_fw_flags;
42 42
43 WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, 43 mp1_fw_flags = RREG32_PCIE(MP1_Public |
44 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); 44 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
45
46 mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
47 45
48 if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) 46 if (mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
49 return true; 47 return true;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index b7ff7d4d6f44..ba00744c3413 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -49,10 +49,8 @@ static bool vega20_is_smc_ram_running(struct pp_hwmgr *hwmgr)
49 struct amdgpu_device *adev = hwmgr->adev; 49 struct amdgpu_device *adev = hwmgr->adev;
50 uint32_t mp1_fw_flags; 50 uint32_t mp1_fw_flags;
51 51
52 WREG32_SOC15(NBIF, 0, mmPCIE_INDEX2, 52 mp1_fw_flags = RREG32_PCIE(MP1_Public |
53 (MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff))); 53 (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
54
55 mp1_fw_flags = RREG32_SOC15(NBIF, 0, mmPCIE_DATA2);
56 54
57 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 55 if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
58 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 56 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 540a77a2ade9..40ac19848034 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3039,9 +3039,31 @@ commit:
3039 return 0; 3039 return 0;
3040} 3040}
3041 3041
3042static int __drm_atomic_helper_disable_all(struct drm_device *dev, 3042/**
3043 struct drm_modeset_acquire_ctx *ctx, 3043 * drm_atomic_helper_disable_all - disable all currently active outputs
3044 bool clean_old_fbs) 3044 * @dev: DRM device
3045 * @ctx: lock acquisition context
3046 *
3047 * Loops through all connectors, finding those that aren't turned off and then
3048 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3049 * that they are connected to.
3050 *
3051 * This is used for example in suspend/resume to disable all currently active
3052 * functions when suspending. If you just want to shut down everything at e.g.
3053 * driver unload, look at drm_atomic_helper_shutdown().
3054 *
3055 * Note that if callers haven't already acquired all modeset locks this might
3056 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3057 *
3058 * Returns:
3059 * 0 on success or a negative error code on failure.
3060 *
3061 * See also:
3062 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3063 * drm_atomic_helper_shutdown().
3064 */
3065int drm_atomic_helper_disable_all(struct drm_device *dev,
3066 struct drm_modeset_acquire_ctx *ctx)
3045{ 3067{
3046 struct drm_atomic_state *state; 3068 struct drm_atomic_state *state;
3047 struct drm_connector_state *conn_state; 3069 struct drm_connector_state *conn_state;
@@ -3099,35 +3121,6 @@ free:
3099 drm_atomic_state_put(state); 3121 drm_atomic_state_put(state);
3100 return ret; 3122 return ret;
3101} 3123}
3102
3103/**
3104 * drm_atomic_helper_disable_all - disable all currently active outputs
3105 * @dev: DRM device
3106 * @ctx: lock acquisition context
3107 *
3108 * Loops through all connectors, finding those that aren't turned off and then
3109 * turns them off by setting their DPMS mode to OFF and deactivating the CRTC
3110 * that they are connected to.
3111 *
3112 * This is used for example in suspend/resume to disable all currently active
3113 * functions when suspending. If you just want to shut down everything at e.g.
3114 * driver unload, look at drm_atomic_helper_shutdown().
3115 *
3116 * Note that if callers haven't already acquired all modeset locks this might
3117 * return -EDEADLK, which must be handled by calling drm_modeset_backoff().
3118 *
3119 * Returns:
3120 * 0 on success or a negative error code on failure.
3121 *
3122 * See also:
3123 * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and
3124 * drm_atomic_helper_shutdown().
3125 */
3126int drm_atomic_helper_disable_all(struct drm_device *dev,
3127 struct drm_modeset_acquire_ctx *ctx)
3128{
3129 return __drm_atomic_helper_disable_all(dev, ctx, false);
3130}
3131EXPORT_SYMBOL(drm_atomic_helper_disable_all); 3124EXPORT_SYMBOL(drm_atomic_helper_disable_all);
3132 3125
3133/** 3126/**
@@ -3148,7 +3141,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
3148 3141
3149 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); 3142 DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
3150 3143
3151 ret = __drm_atomic_helper_disable_all(dev, &ctx, true); 3144 ret = drm_atomic_helper_disable_all(dev, &ctx);
3152 if (ret) 3145 if (ret)
3153 DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret); 3146 DRM_ERROR("Disabling all crtc's during unload failed with %i\n", ret);
3154 3147
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 381581b01d48..05bbc2b622fc 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
376 synchronize_srcu(&drm_unplug_srcu); 376 synchronize_srcu(&drm_unplug_srcu);
377 377
378 drm_dev_unregister(dev); 378 drm_dev_unregister(dev);
379 379 drm_dev_put(dev);
380 mutex_lock(&drm_global_mutex);
381 if (dev->open_count == 0)
382 drm_dev_put(dev);
383 mutex_unlock(&drm_global_mutex);
384} 380}
385EXPORT_SYMBOL(drm_dev_unplug); 381EXPORT_SYMBOL(drm_dev_unplug);
386 382
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0e9349ff2d16..af2ab640cadb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
1963 best_depth = fmt->depth; 1963 best_depth = fmt->depth;
1964 } 1964 }
1965 } 1965 }
1966 if (sizes.surface_depth != best_depth) { 1966 if (sizes.surface_depth != best_depth && best_depth) {
1967 DRM_INFO("requested bpp %d, scaled depth down to %d", 1967 DRM_INFO("requested bpp %d, scaled depth down to %d",
1968 sizes.surface_bpp, best_depth); 1968 sizes.surface_bpp, best_depth);
1969 sizes.surface_depth = best_depth; 1969 sizes.surface_depth = best_depth;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 83a5bbca6e7e..7caa3c7ed978 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
489 489
490 drm_close_helper(filp); 490 drm_close_helper(filp);
491 491
492 if (!--dev->open_count) { 492 if (!--dev->open_count)
493 drm_lastclose(dev); 493 drm_lastclose(dev);
494 if (drm_dev_is_unplugged(dev)) 494
495 drm_put_dev(dev);
496 }
497 mutex_unlock(&drm_global_mutex); 495 mutex_unlock(&drm_global_mutex);
498 496
499 drm_minor_release(minor); 497 drm_minor_release(minor);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 67b1fca39aa6..0e3043e08c69 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -185,7 +185,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
185 m32.size = map.size; 185 m32.size = map.size;
186 m32.type = map.type; 186 m32.type = map.type;
187 m32.flags = map.flags; 187 m32.flags = map.flags;
188 m32.handle = ptr_to_compat(map.handle); 188 m32.handle = ptr_to_compat((void __user *)map.handle);
189 m32.mtrr = map.mtrr; 189 m32.mtrr = map.mtrr;
190 if (copy_to_user(argp, &m32, sizeof(m32))) 190 if (copy_to_user(argp, &m32, sizeof(m32)))
191 return -EFAULT; 191 return -EFAULT;
@@ -216,7 +216,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
216 216
217 m32.offset = map.offset; 217 m32.offset = map.offset;
218 m32.mtrr = map.mtrr; 218 m32.mtrr = map.mtrr;
219 m32.handle = ptr_to_compat(map.handle); 219 m32.handle = ptr_to_compat((void __user *)map.handle);
220 if (map.handle != compat_ptr(m32.handle)) 220 if (map.handle != compat_ptr(m32.handle))
221 pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n", 221 pr_err_ratelimited("compat_drm_addmap truncated handle %p for type %d offset %x\n",
222 map.handle, m32.type, m32.offset); 222 map.handle, m32.type, m32.offset);
@@ -526,7 +526,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
526 if (err) 526 if (err)
527 return err; 527 return err;
528 528
529 req32.handle = ptr_to_compat(req.handle); 529 req32.handle = ptr_to_compat((void __user *)req.handle);
530 if (copy_to_user(argp, &req32, sizeof(req32))) 530 if (copy_to_user(argp, &req32, sizeof(req32)))
531 return -EFAULT; 531 return -EFAULT;
532 532
diff --git a/drivers/gpu/drm/etnaviv/Kconfig b/drivers/gpu/drm/etnaviv/Kconfig
index 041a77e400d4..21df44b78df3 100644
--- a/drivers/gpu/drm/etnaviv/Kconfig
+++ b/drivers/gpu/drm/etnaviv/Kconfig
@@ -2,7 +2,6 @@
2config DRM_ETNAVIV 2config DRM_ETNAVIV
3 tristate "ETNAVIV (DRM support for Vivante GPU IP cores)" 3 tristate "ETNAVIV (DRM support for Vivante GPU IP cores)"
4 depends on DRM 4 depends on DRM
5 depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST)
6 depends on MMU 5 depends on MMU
7 select SHMEM 6 select SHMEM
8 select SYNC_FILE 7 select SYNC_FILE
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
index acb68c698363..4d5d1a77eb2a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_cmdbuf.h
@@ -15,8 +15,6 @@ struct etnaviv_perfmon_request;
15struct etnaviv_cmdbuf { 15struct etnaviv_cmdbuf {
16 /* suballocator this cmdbuf is allocated from */ 16 /* suballocator this cmdbuf is allocated from */
17 struct etnaviv_cmdbuf_suballoc *suballoc; 17 struct etnaviv_cmdbuf_suballoc *suballoc;
18 /* user context key, must be unique between all active users */
19 struct etnaviv_file_private *ctx;
20 /* cmdbuf properties */ 18 /* cmdbuf properties */
21 int suballoc_offset; 19 int suballoc_offset;
22 void *vaddr; 20 void *vaddr;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
index 3fbb4855396c..33854c94cb85 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
@@ -215,7 +215,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
215 mutex_lock(&obj->lock); 215 mutex_lock(&obj->lock);
216 pages = etnaviv_gem_get_pages(obj); 216 pages = etnaviv_gem_get_pages(obj);
217 mutex_unlock(&obj->lock); 217 mutex_unlock(&obj->lock);
218 if (pages) { 218 if (!IS_ERR(pages)) {
219 int j; 219 int j;
220 220
221 iter.hdr->data[0] = bomap - bomap_start; 221 iter.hdr->data[0] = bomap - bomap_start;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.h b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
index 76079c2291f8..f0abb744ef95 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.h
@@ -95,6 +95,7 @@ struct etnaviv_gem_submit_bo {
95struct etnaviv_gem_submit { 95struct etnaviv_gem_submit {
96 struct drm_sched_job sched_job; 96 struct drm_sched_job sched_job;
97 struct kref refcount; 97 struct kref refcount;
98 struct etnaviv_file_private *ctx;
98 struct etnaviv_gpu *gpu; 99 struct etnaviv_gpu *gpu;
99 struct dma_fence *out_fence, *in_fence; 100 struct dma_fence *out_fence, *in_fence;
100 int out_fence_id; 101 int out_fence_id;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
index 0566171f8df2..f21529e635e3 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
@@ -15,7 +15,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
15 int npages = obj->size >> PAGE_SHIFT; 15 int npages = obj->size >> PAGE_SHIFT;
16 16
17 if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */ 17 if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
18 return NULL; 18 return ERR_PTR(-EINVAL);
19 19
20 return drm_prime_pages_to_sg(etnaviv_obj->pages, npages); 20 return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
21} 21}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 30875f8f2933..b2fe3446bfbc 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -506,7 +506,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
506 if (ret) 506 if (ret)
507 goto err_submit_objects; 507 goto err_submit_objects;
508 508
509 submit->cmdbuf.ctx = file->driver_priv; 509 submit->ctx = file->driver_priv;
510 submit->exec_state = args->exec_state; 510 submit->exec_state = args->exec_state;
511 submit->flags = args->flags; 511 submit->flags = args->flags;
512 512
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index f1c88d8ad5ba..f794e04be9e6 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -320,8 +320,8 @@ etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu)
320 domain = &etnaviv_domain->base; 320 domain = &etnaviv_domain->base;
321 321
322 domain->dev = gpu->dev; 322 domain->dev = gpu->dev;
323 domain->base = 0; 323 domain->base = SZ_4K;
324 domain->size = (u64)SZ_1G * 4; 324 domain->size = (u64)SZ_1G * 4 - SZ_4K;
325 domain->ops = &etnaviv_iommuv2_ops; 325 domain->ops = &etnaviv_iommuv2_ops;
326 326
327 ret = etnaviv_iommuv2_init(etnaviv_domain); 327 ret = etnaviv_iommuv2_init(etnaviv_domain);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
index 9980d81a26e3..4227a4006c34 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
@@ -113,7 +113,7 @@ static const struct etnaviv_pm_domain doms_3d[] = {
113 .name = "PE", 113 .name = "PE",
114 .profile_read = VIVS_MC_PROFILE_PE_READ, 114 .profile_read = VIVS_MC_PROFILE_PE_READ,
115 .profile_config = VIVS_MC_PROFILE_CONFIG0, 115 .profile_config = VIVS_MC_PROFILE_CONFIG0,
116 .nr_signals = 5, 116 .nr_signals = 4,
117 .signal = (const struct etnaviv_pm_signal[]) { 117 .signal = (const struct etnaviv_pm_signal[]) {
118 { 118 {
119 "PIXEL_COUNT_KILLED_BY_COLOR_PIPE", 119 "PIXEL_COUNT_KILLED_BY_COLOR_PIPE",
@@ -435,7 +435,7 @@ int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
435 435
436 dom = meta->domains + signal->domain; 436 dom = meta->domains + signal->domain;
437 437
438 if (signal->iter > dom->nr_signals) 438 if (signal->iter >= dom->nr_signals)
439 return -EINVAL; 439 return -EINVAL;
440 440
441 sig = &dom->signal[signal->iter]; 441 sig = &dom->signal[signal->iter];
@@ -461,7 +461,7 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
461 461
462 dom = meta->domains + r->domain; 462 dom = meta->domains + r->domain;
463 463
464 if (r->signal > dom->nr_signals) 464 if (r->signal >= dom->nr_signals)
465 return -EINVAL; 465 return -EINVAL;
466 466
467 return 0; 467 return 0;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 67ae26602024..6d24fea1766b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -153,7 +153,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
153 mutex_lock(&submit->gpu->fence_lock); 153 mutex_lock(&submit->gpu->fence_lock);
154 154
155 ret = drm_sched_job_init(&submit->sched_job, sched_entity, 155 ret = drm_sched_job_init(&submit->sched_job, sched_entity,
156 submit->cmdbuf.ctx); 156 submit->ctx);
157 if (ret) 157 if (ret)
158 goto out_unlock; 158 goto out_unlock;
159 159
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 0573eab0e190..f35e4ab55b27 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -20,6 +20,7 @@
20#include "regs-vp.h" 20#include "regs-vp.h"
21 21
22#include <linux/kernel.h> 22#include <linux/kernel.h>
23#include <linux/ktime.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/wait.h> 25#include <linux/wait.h>
25#include <linux/i2c.h> 26#include <linux/i2c.h>
@@ -352,15 +353,62 @@ static void mixer_cfg_vp_blend(struct mixer_context *ctx, unsigned int alpha)
352 mixer_reg_write(ctx, MXR_VIDEO_CFG, val); 353 mixer_reg_write(ctx, MXR_VIDEO_CFG, val);
353} 354}
354 355
355static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) 356static bool mixer_is_synced(struct mixer_context *ctx)
356{ 357{
357 /* block update on vsync */ 358 u32 base, shadow;
358 mixer_reg_writemask(ctx, MXR_STATUS, enable ?
359 MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
360 359
360 if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
361 ctx->mxr_ver == MXR_VER_128_0_0_184)
362 return !(mixer_reg_read(ctx, MXR_CFG) &
363 MXR_CFG_LAYER_UPDATE_COUNT_MASK);
364
365 if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) &&
366 vp_reg_read(ctx, VP_SHADOW_UPDATE))
367 return false;
368
369 base = mixer_reg_read(ctx, MXR_CFG);
370 shadow = mixer_reg_read(ctx, MXR_CFG_S);
371 if (base != shadow)
372 return false;
373
374 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
375 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
376 if (base != shadow)
377 return false;
378
379 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
380 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
381 if (base != shadow)
382 return false;
383
384 return true;
385}
386
387static int mixer_wait_for_sync(struct mixer_context *ctx)
388{
389 ktime_t timeout = ktime_add_us(ktime_get(), 100000);
390
391 while (!mixer_is_synced(ctx)) {
392 usleep_range(1000, 2000);
393 if (ktime_compare(ktime_get(), timeout) > 0)
394 return -ETIMEDOUT;
395 }
396 return 0;
397}
398
399static void mixer_disable_sync(struct mixer_context *ctx)
400{
401 mixer_reg_writemask(ctx, MXR_STATUS, 0, MXR_STATUS_SYNC_ENABLE);
402}
403
404static void mixer_enable_sync(struct mixer_context *ctx)
405{
406 if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
407 ctx->mxr_ver == MXR_VER_128_0_0_184)
408 mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
409 mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SYNC_ENABLE);
361 if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags)) 410 if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags))
362 vp_reg_write(ctx, VP_SHADOW_UPDATE, enable ? 411 vp_reg_write(ctx, VP_SHADOW_UPDATE, VP_SHADOW_UPDATE_ENABLE);
363 VP_SHADOW_UPDATE_ENABLE : 0);
364} 412}
365 413
366static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height) 414static void mixer_cfg_scan(struct mixer_context *ctx, int width, int height)
@@ -498,7 +546,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
498 546
499 spin_lock_irqsave(&ctx->reg_slock, flags); 547 spin_lock_irqsave(&ctx->reg_slock, flags);
500 548
501 vp_reg_write(ctx, VP_SHADOW_UPDATE, 1);
502 /* interlace or progressive scan mode */ 549 /* interlace or progressive scan mode */
503 val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0); 550 val = (test_bit(MXR_BIT_INTERLACE, &ctx->flags) ? ~0 : 0);
504 vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP); 551 vp_reg_writemask(ctx, VP_MODE, val, VP_MODE_LINE_SKIP);
@@ -553,11 +600,6 @@ static void vp_video_buffer(struct mixer_context *ctx,
553 vp_regs_dump(ctx); 600 vp_regs_dump(ctx);
554} 601}
555 602
556static void mixer_layer_update(struct mixer_context *ctx)
557{
558 mixer_reg_writemask(ctx, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
559}
560
561static void mixer_graph_buffer(struct mixer_context *ctx, 603static void mixer_graph_buffer(struct mixer_context *ctx,
562 struct exynos_drm_plane *plane) 604 struct exynos_drm_plane *plane)
563{ 605{
@@ -640,11 +682,6 @@ static void mixer_graph_buffer(struct mixer_context *ctx,
640 mixer_cfg_layer(ctx, win, priority, true); 682 mixer_cfg_layer(ctx, win, priority, true);
641 mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha); 683 mixer_cfg_gfx_blend(ctx, win, pixel_alpha, state->base.alpha);
642 684
643 /* layer update mandatory for mixer 16.0.33.0 */
644 if (ctx->mxr_ver == MXR_VER_16_0_33_0 ||
645 ctx->mxr_ver == MXR_VER_128_0_0_184)
646 mixer_layer_update(ctx);
647
648 spin_unlock_irqrestore(&ctx->reg_slock, flags); 685 spin_unlock_irqrestore(&ctx->reg_slock, flags);
649 686
650 mixer_regs_dump(ctx); 687 mixer_regs_dump(ctx);
@@ -709,7 +746,7 @@ static void mixer_win_reset(struct mixer_context *ctx)
709static irqreturn_t mixer_irq_handler(int irq, void *arg) 746static irqreturn_t mixer_irq_handler(int irq, void *arg)
710{ 747{
711 struct mixer_context *ctx = arg; 748 struct mixer_context *ctx = arg;
712 u32 val, base, shadow; 749 u32 val;
713 750
714 spin_lock(&ctx->reg_slock); 751 spin_lock(&ctx->reg_slock);
715 752
@@ -723,26 +760,9 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
723 val &= ~MXR_INT_STATUS_VSYNC; 760 val &= ~MXR_INT_STATUS_VSYNC;
724 761
725 /* interlace scan need to check shadow register */ 762 /* interlace scan need to check shadow register */
726 if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)) { 763 if (test_bit(MXR_BIT_INTERLACE, &ctx->flags)
727 if (test_bit(MXR_BIT_VP_ENABLED, &ctx->flags) && 764 && !mixer_is_synced(ctx))
728 vp_reg_read(ctx, VP_SHADOW_UPDATE)) 765 goto out;
729 goto out;
730
731 base = mixer_reg_read(ctx, MXR_CFG);
732 shadow = mixer_reg_read(ctx, MXR_CFG_S);
733 if (base != shadow)
734 goto out;
735
736 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(0));
737 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(0));
738 if (base != shadow)
739 goto out;
740
741 base = mixer_reg_read(ctx, MXR_GRAPHIC_BASE(1));
742 shadow = mixer_reg_read(ctx, MXR_GRAPHIC_BASE_S(1));
743 if (base != shadow)
744 goto out;
745 }
746 766
747 drm_crtc_handle_vblank(&ctx->crtc->base); 767 drm_crtc_handle_vblank(&ctx->crtc->base);
748 } 768 }
@@ -917,12 +937,14 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
917 937
918static void mixer_atomic_begin(struct exynos_drm_crtc *crtc) 938static void mixer_atomic_begin(struct exynos_drm_crtc *crtc)
919{ 939{
920 struct mixer_context *mixer_ctx = crtc->ctx; 940 struct mixer_context *ctx = crtc->ctx;
921 941
922 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) 942 if (!test_bit(MXR_BIT_POWERED, &ctx->flags))
923 return; 943 return;
924 944
925 mixer_vsync_set_update(mixer_ctx, false); 945 if (mixer_wait_for_sync(ctx))
946 dev_err(ctx->dev, "timeout waiting for VSYNC\n");
947 mixer_disable_sync(ctx);
926} 948}
927 949
928static void mixer_update_plane(struct exynos_drm_crtc *crtc, 950static void mixer_update_plane(struct exynos_drm_crtc *crtc,
@@ -964,7 +986,7 @@ static void mixer_atomic_flush(struct exynos_drm_crtc *crtc)
964 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags)) 986 if (!test_bit(MXR_BIT_POWERED, &mixer_ctx->flags))
965 return; 987 return;
966 988
967 mixer_vsync_set_update(mixer_ctx, true); 989 mixer_enable_sync(mixer_ctx);
968 exynos_crtc_handle_event(crtc); 990 exynos_crtc_handle_event(crtc);
969} 991}
970 992
@@ -979,7 +1001,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
979 1001
980 exynos_drm_pipe_clk_enable(crtc, true); 1002 exynos_drm_pipe_clk_enable(crtc, true);
981 1003
982 mixer_vsync_set_update(ctx, false); 1004 mixer_disable_sync(ctx);
983 1005
984 mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); 1006 mixer_reg_writemask(ctx, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
985 1007
@@ -992,7 +1014,7 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
992 1014
993 mixer_commit(ctx); 1015 mixer_commit(ctx);
994 1016
995 mixer_vsync_set_update(ctx, true); 1017 mixer_enable_sync(ctx);
996 1018
997 set_bit(MXR_BIT_POWERED, &ctx->flags); 1019 set_bit(MXR_BIT_POWERED, &ctx->flags);
998} 1020}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 35b4ec3f7618..3592d04c33b2 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
1441 } 1441 }
1442 1442
1443 if (index_mode) { 1443 if (index_mode) {
1444 if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) { 1444 if (guest_gma >= I915_GTT_PAGE_SIZE) {
1445 ret = -EFAULT; 1445 ret = -EFAULT;
1446 goto err; 1446 goto err;
1447 } 1447 }
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 035479e273be..e3f9caa7839f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
448/** 448/**
449 * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU 449 * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
450 * @vgpu: a vGPU 450 * @vgpu: a vGPU
451 * @conncted: link state 451 * @connected: link state
452 * 452 *
453 * This function is used to trigger hotplug interrupt for vGPU 453 * This function is used to trigger hotplug interrupt for vGPU
454 * 454 *
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 3e7e2b80c857..5d887f7cc0d5 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -238,9 +238,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
238 default: 238 default:
239 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); 239 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
240 } 240 }
241
242 info->size = (((p.stride * p.height * p.bpp) / 8) +
243 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
244 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { 241 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
245 ret = intel_vgpu_decode_cursor_plane(vgpu, &c); 242 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
246 if (ret) 243 if (ret)
@@ -262,14 +259,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
262 info->x_hot = UINT_MAX; 259 info->x_hot = UINT_MAX;
263 info->y_hot = UINT_MAX; 260 info->y_hot = UINT_MAX;
264 } 261 }
265
266 info->size = (((info->stride * c.height * c.bpp) / 8)
267 + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
268 } else { 262 } else {
269 gvt_vgpu_err("invalid plane id:%d\n", plane_id); 263 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
270 return -EINVAL; 264 return -EINVAL;
271 } 265 }
272 266
267 info->size = (info->stride * info->height + PAGE_SIZE - 1)
268 >> PAGE_SHIFT;
273 if (info->size == 0) { 269 if (info->size == 0) {
274 gvt_vgpu_err("fb size is zero\n"); 270 gvt_vgpu_err("fb size is zero\n");
275 return -EINVAL; 271 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index c7103dd2d8d5..cf133ef03873 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1882 } 1882 }
1883 1883
1884 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head); 1884 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1885
1886 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1885 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head); 1887 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1888 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1889
1886 return mm; 1890 return mm;
1887} 1891}
1888 1892
@@ -1942,7 +1946,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1942 */ 1946 */
1943void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1947void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1944{ 1948{
1945 atomic_dec(&mm->pincount); 1949 atomic_dec_if_positive(&mm->pincount);
1946} 1950}
1947 1951
1948/** 1952/**
@@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
1967 if (ret) 1971 if (ret)
1968 return ret; 1972 return ret;
1969 1973
1974 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1970 list_move_tail(&mm->ppgtt_mm.lru_list, 1975 list_move_tail(&mm->ppgtt_mm.lru_list,
1971 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head); 1976 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
1972 1977 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
1973 } 1978 }
1974 1979
1975 return 0; 1980 return 0;
@@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
1980 struct intel_vgpu_mm *mm; 1985 struct intel_vgpu_mm *mm;
1981 struct list_head *pos, *n; 1986 struct list_head *pos, *n;
1982 1987
1988 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1989
1983 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) { 1990 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
1984 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list); 1991 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
1985 1992
@@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
1987 continue; 1994 continue;
1988 1995
1989 list_del_init(&mm->ppgtt_mm.lru_list); 1996 list_del_init(&mm->ppgtt_mm.lru_list);
1997 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1990 invalidate_ppgtt_mm(mm); 1998 invalidate_ppgtt_mm(mm);
1991 return 1; 1999 return 1;
1992 } 2000 }
2001 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1993 return 0; 2002 return 0;
1994} 2003}
1995 2004
@@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
2659 } 2668 }
2660 } 2669 }
2661 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head); 2670 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2671 mutex_init(&gvt->gtt.ppgtt_mm_lock);
2662 return 0; 2672 return 0;
2663} 2673}
2664 2674
@@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2699 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) { 2709 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2700 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list); 2710 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2701 if (mm->type == INTEL_GVT_MM_PPGTT) { 2711 if (mm->type == INTEL_GVT_MM_PPGTT) {
2712 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2702 list_del_init(&mm->ppgtt_mm.lru_list); 2713 list_del_init(&mm->ppgtt_mm.lru_list);
2714 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2703 if (mm->ppgtt_mm.shadowed) 2715 if (mm->ppgtt_mm.shadowed)
2704 invalidate_ppgtt_mm(mm); 2716 invalidate_ppgtt_mm(mm);
2705 } 2717 }
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index d8cb04cc946d..edb610dc5d86 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
88 void (*mm_free_page_table)(struct intel_vgpu_mm *mm); 88 void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
89 struct list_head oos_page_use_list_head; 89 struct list_head oos_page_use_list_head;
90 struct list_head oos_page_free_list_head; 90 struct list_head oos_page_free_list_head;
91 struct mutex ppgtt_mm_lock;
91 struct list_head ppgtt_mm_lru_list_head; 92 struct list_head ppgtt_mm_lru_list_head;
92 93
93 struct page *scratch_page; 94 struct page *scratch_page;
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 7d84cfb9051a..7902fb162d09 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
132 132
133 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */ 133 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
134 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */ 134 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
135 {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
135 136
136 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 137 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
137 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 138 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bb8f936fdaa..05b953793316 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
346 int i = 0; 346 int i = 0;
347 347
348 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed) 348 if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
349 return -1; 349 return -EINVAL;
350 350
351 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { 351 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
352 px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0]; 352 px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
410 if (workload->shadow) 410 if (workload->shadow)
411 return 0; 411 return 0;
412 412
413 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
414 if (ret < 0) {
415 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
416 return ret;
417 }
418
419 /* pin shadow context by gvt even the shadow context will be pinned 413 /* pin shadow context by gvt even the shadow context will be pinned
420 * when i915 alloc request. That is because gvt will update the guest 414 * when i915 alloc request. That is because gvt will update the guest
421 * context from shadow context when workload is completed, and at that 415 * context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
678{ 672{
679 struct intel_vgpu *vgpu = workload->vgpu; 673 struct intel_vgpu *vgpu = workload->vgpu;
680 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 674 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
675 struct intel_vgpu_submission *s = &vgpu->submission;
676 struct i915_gem_context *shadow_ctx = s->shadow_ctx;
677 struct i915_request *rq;
681 int ring_id = workload->ring_id; 678 int ring_id = workload->ring_id;
682 int ret; 679 int ret;
683 680
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
687 mutex_lock(&vgpu->vgpu_lock); 684 mutex_lock(&vgpu->vgpu_lock);
688 mutex_lock(&dev_priv->drm.struct_mutex); 685 mutex_lock(&dev_priv->drm.struct_mutex);
689 686
687 ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
688 if (ret < 0) {
689 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
690 goto err_req;
691 }
692
690 ret = intel_gvt_workload_req_alloc(workload); 693 ret = intel_gvt_workload_req_alloc(workload);
691 if (ret) 694 if (ret)
692 goto err_req; 695 goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
703 706
704 ret = prepare_workload(workload); 707 ret = prepare_workload(workload);
705out: 708out:
709 if (ret) {
710 /* We might still need to add request with
711 * clean ctx to retire it properly..
712 */
713 rq = fetch_and_zero(&workload->req);
714 i915_request_put(rq);
715 }
716
706 if (!IS_ERR_OR_NULL(workload->req)) { 717 if (!IS_ERR_OR_NULL(workload->req)) {
707 gvt_dbg_sched("ring id %d submit workload to i915 %p\n", 718 gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
708 ring_id, workload->req); 719 ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
739 goto out; 750 goto out;
740 } 751 }
741 752
742 if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id))) 753 if (!scheduler->current_vgpu->active ||
754 list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
743 goto out; 755 goto out;
744 756
745 /* 757 /*
@@ -1474,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1474 intel_runtime_pm_put_unchecked(dev_priv); 1486 intel_runtime_pm_put_unchecked(dev_priv);
1475 } 1487 }
1476 1488
1477 if (ret && (vgpu_is_vm_unhealthy(ret))) { 1489 if (ret) {
1478 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); 1490 if (vgpu_is_vm_unhealthy(ret))
1491 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1479 intel_vgpu_destroy_workload(workload); 1492 intel_vgpu_destroy_workload(workload);
1480 return ERR_PTR(ret); 1493 return ERR_PTR(ret);
1481 } 1494 }
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 215b6ff8aa73..db7bb5bd5add 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -163,17 +163,25 @@ int i915_active_ref(struct i915_active *ref,
163 struct i915_request *rq) 163 struct i915_request *rq)
164{ 164{
165 struct i915_active_request *active; 165 struct i915_active_request *active;
166 int err = 0;
167
168 /* Prevent reaping in case we malloc/wait while building the tree */
169 i915_active_acquire(ref);
166 170
167 active = active_instance(ref, timeline); 171 active = active_instance(ref, timeline);
168 if (IS_ERR(active)) 172 if (IS_ERR(active)) {
169 return PTR_ERR(active); 173 err = PTR_ERR(active);
174 goto out;
175 }
170 176
171 if (!i915_active_request_isset(active)) 177 if (!i915_active_request_isset(active))
172 ref->count++; 178 ref->count++;
173 __i915_active_request_set(active, rq); 179 __i915_active_request_set(active, rq);
174 180
175 GEM_BUG_ON(!ref->count); 181 GEM_BUG_ON(!ref->count);
176 return 0; 182out:
183 i915_active_release(ref);
184 return err;
177} 185}
178 186
179bool i915_active_acquire(struct i915_active *ref) 187bool i915_active_acquire(struct i915_active *ref)
@@ -223,19 +231,25 @@ int i915_request_await_active_request(struct i915_request *rq,
223int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) 231int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
224{ 232{
225 struct active_node *it, *n; 233 struct active_node *it, *n;
226 int ret; 234 int err = 0;
227 235
228 ret = i915_request_await_active_request(rq, &ref->last); 236 /* await allocates and so we need to avoid hitting the shrinker */
229 if (ret) 237 if (i915_active_acquire(ref))
230 return ret; 238 goto out; /* was idle */
239
240 err = i915_request_await_active_request(rq, &ref->last);
241 if (err)
242 goto out;
231 243
232 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { 244 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
233 ret = i915_request_await_active_request(rq, &it->base); 245 err = i915_request_await_active_request(rq, &it->base);
234 if (ret) 246 if (err)
235 return ret; 247 goto out;
236 } 248 }
237 249
238 return 0; 250out:
251 i915_active_release(ref);
252 return err;
239} 253}
240 254
241#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) 255#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0bd890c04fe4..f6f6e5b78e97 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4830 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 4830 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4831 &ctx); 4831 &ctx);
4832 if (ret) { 4832 if (ret) {
4833 ret = -EINTR; 4833 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4834 try_again = true;
4835 continue;
4836 }
4834 break; 4837 break;
4835 } 4838 }
4836 crtc = connector->state->crtc; 4839 crtc = connector->state->crtc;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 6630212f2faf..9df65d386d11 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -757,39 +757,6 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
757 return ret; 757 return ret;
758} 758}
759 759
760#if !defined(CONFIG_VGA_CONSOLE)
761static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
762{
763 return 0;
764}
765#elif !defined(CONFIG_DUMMY_CONSOLE)
766static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
767{
768 return -ENODEV;
769}
770#else
771static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
772{
773 int ret = 0;
774
775 DRM_INFO("Replacing VGA console driver\n");
776
777 console_lock();
778 if (con_is_bound(&vga_con))
779 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
780 if (ret == 0) {
781 ret = do_unregister_con_driver(&vga_con);
782
783 /* Ignore "already unregistered". */
784 if (ret == -ENODEV)
785 ret = 0;
786 }
787 console_unlock();
788
789 return ret;
790}
791#endif
792
793static void intel_init_dpio(struct drm_i915_private *dev_priv) 760static void intel_init_dpio(struct drm_i915_private *dev_priv)
794{ 761{
795 /* 762 /*
@@ -1420,7 +1387,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1420 goto err_ggtt; 1387 goto err_ggtt;
1421 } 1388 }
1422 1389
1423 ret = i915_kick_out_vgacon(dev_priv); 1390 ret = vga_remove_vgacon(pdev);
1424 if (ret) { 1391 if (ret) {
1425 DRM_ERROR("failed to remove conflicting VGA console\n"); 1392 DRM_ERROR("failed to remove conflicting VGA console\n");
1426 goto err_ggtt; 1393 goto err_ggtt;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9adc7bb9e69c..a67a63b5aa84 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
2346 INTEL_DEVID(dev_priv) == 0x5915 || \ 2346 INTEL_DEVID(dev_priv) == 0x5915 || \
2347 INTEL_DEVID(dev_priv) == 0x591E) 2347 INTEL_DEVID(dev_priv) == 0x591E)
2348#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \ 2348#define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
2349 INTEL_DEVID(dev_priv) == 0x87C0) 2349 INTEL_DEVID(dev_priv) == 0x87C0 || \
2350 INTEL_DEVID(dev_priv) == 0x87CA)
2350#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2351#define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
2351 INTEL_INFO(dev_priv)->gt == 2) 2352 INTEL_INFO(dev_priv)->gt == 2)
2352#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \ 2353#define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6728ea5c71d4..8558e81fdc2a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1688,7 +1688,8 @@ __vma_matches(struct vm_area_struct *vma, struct file *filp,
1688 if (vma->vm_file != filp) 1688 if (vma->vm_file != filp)
1689 return false; 1689 return false;
1690 1690
1691 return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; 1691 return vma->vm_start == addr &&
1692 (vma->vm_end - vma->vm_start) == PAGE_ALIGN(size);
1692} 1693}
1693 1694
1694/** 1695/**
@@ -1733,8 +1734,13 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1733 * pages from. 1734 * pages from.
1734 */ 1735 */
1735 if (!obj->base.filp) { 1736 if (!obj->base.filp) {
1736 i915_gem_object_put(obj); 1737 addr = -ENXIO;
1737 return -ENXIO; 1738 goto err;
1739 }
1740
1741 if (range_overflows(args->offset, args->size, (u64)obj->base.size)) {
1742 addr = -EINVAL;
1743 goto err;
1738 } 1744 }
1739 1745
1740 addr = vm_mmap(obj->base.filp, 0, args->size, 1746 addr = vm_mmap(obj->base.filp, 0, args->size,
@@ -1748,8 +1754,8 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1748 struct vm_area_struct *vma; 1754 struct vm_area_struct *vma;
1749 1755
1750 if (down_write_killable(&mm->mmap_sem)) { 1756 if (down_write_killable(&mm->mmap_sem)) {
1751 i915_gem_object_put(obj); 1757 addr = -EINTR;
1752 return -EINTR; 1758 goto err;
1753 } 1759 }
1754 vma = find_vma(mm, addr); 1760 vma = find_vma(mm, addr);
1755 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) 1761 if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
@@ -1767,12 +1773,10 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1767 i915_gem_object_put(obj); 1773 i915_gem_object_put(obj);
1768 1774
1769 args->addr_ptr = (u64)addr; 1775 args->addr_ptr = (u64)addr;
1770
1771 return 0; 1776 return 0;
1772 1777
1773err: 1778err:
1774 i915_gem_object_put(obj); 1779 i915_gem_object_put(obj);
1775
1776 return addr; 1780 return addr;
1777} 1781}
1778 1782
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9a65341fec09..aa6791255252 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1721,7 +1721,7 @@ error_msg(struct i915_gpu_state *error, unsigned long engines, const char *msg)
1721 i915_error_generate_code(error, engines)); 1721 i915_error_generate_code(error, engines));
1722 if (engines) { 1722 if (engines) {
1723 /* Just show the first executing process, more is confusing */ 1723 /* Just show the first executing process, more is confusing */
1724 i = ffs(engines); 1724 i = __ffs(engines);
1725 len += scnprintf(error->error_msg + len, 1725 len += scnprintf(error->error_msg + len,
1726 sizeof(error->error_msg) - len, 1726 sizeof(error->error_msg) - len,
1727 ", in %s [%d]", 1727 ", in %s [%d]",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 638a586469f9..047855dd8c6b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
2863#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140) 2863#define GEN11_GT_VEBOX_VDBOX_DISABLE _MMIO(0x9140)
2864#define GEN11_GT_VDBOX_DISABLE_MASK 0xff 2864#define GEN11_GT_VDBOX_DISABLE_MASK 0xff
2865#define GEN11_GT_VEBOX_DISABLE_SHIFT 16 2865#define GEN11_GT_VEBOX_DISABLE_SHIFT 16
2866#define GEN11_GT_VEBOX_DISABLE_MASK (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT) 2866#define GEN11_GT_VEBOX_DISABLE_MASK (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
2867 2867
2868#define GEN11_EU_DISABLE _MMIO(0x9134) 2868#define GEN11_EU_DISABLE _MMIO(0x9134)
2869#define GEN11_EU_DIS_MASK 0xFF 2869#define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
9243#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \ 9243#define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
9244 _TRANS_DDI_FUNC_CTL2_A) 9244 _TRANS_DDI_FUNC_CTL2_A)
9245#define PORT_SYNC_MODE_ENABLE (1 << 4) 9245#define PORT_SYNC_MODE_ENABLE (1 << 4)
9246#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0) 9246#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
9247#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0) 9247#define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
9248#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0 9248#define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
9249 9249
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index d01683167c77..8bc042551692 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -223,8 +223,14 @@ out:
223 return &p->requests[idx]; 223 return &p->requests[idx];
224} 224}
225 225
226struct sched_cache {
227 struct list_head *priolist;
228};
229
226static struct intel_engine_cs * 230static struct intel_engine_cs *
227sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked) 231sched_lock_engine(const struct i915_sched_node *node,
232 struct intel_engine_cs *locked,
233 struct sched_cache *cache)
228{ 234{
229 struct intel_engine_cs *engine = node_to_request(node)->engine; 235 struct intel_engine_cs *engine = node_to_request(node)->engine;
230 236
@@ -232,6 +238,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
232 238
233 if (engine != locked) { 239 if (engine != locked) {
234 spin_unlock(&locked->timeline.lock); 240 spin_unlock(&locked->timeline.lock);
241 memset(cache, 0, sizeof(*cache));
235 spin_lock(&engine->timeline.lock); 242 spin_lock(&engine->timeline.lock);
236 } 243 }
237 244
@@ -253,11 +260,11 @@ static bool inflight(const struct i915_request *rq,
253static void __i915_schedule(struct i915_request *rq, 260static void __i915_schedule(struct i915_request *rq,
254 const struct i915_sched_attr *attr) 261 const struct i915_sched_attr *attr)
255{ 262{
256 struct list_head *uninitialized_var(pl); 263 struct intel_engine_cs *engine;
257 struct intel_engine_cs *engine, *last;
258 struct i915_dependency *dep, *p; 264 struct i915_dependency *dep, *p;
259 struct i915_dependency stack; 265 struct i915_dependency stack;
260 const int prio = attr->priority; 266 const int prio = attr->priority;
267 struct sched_cache cache;
261 LIST_HEAD(dfs); 268 LIST_HEAD(dfs);
262 269
263 /* Needed in order to use the temporary link inside i915_dependency */ 270 /* Needed in order to use the temporary link inside i915_dependency */
@@ -328,7 +335,7 @@ static void __i915_schedule(struct i915_request *rq,
328 __list_del_entry(&stack.dfs_link); 335 __list_del_entry(&stack.dfs_link);
329 } 336 }
330 337
331 last = NULL; 338 memset(&cache, 0, sizeof(cache));
332 engine = rq->engine; 339 engine = rq->engine;
333 spin_lock_irq(&engine->timeline.lock); 340 spin_lock_irq(&engine->timeline.lock);
334 341
@@ -338,7 +345,7 @@ static void __i915_schedule(struct i915_request *rq,
338 345
339 INIT_LIST_HEAD(&dep->dfs_link); 346 INIT_LIST_HEAD(&dep->dfs_link);
340 347
341 engine = sched_lock_engine(node, engine); 348 engine = sched_lock_engine(node, engine, &cache);
342 lockdep_assert_held(&engine->timeline.lock); 349 lockdep_assert_held(&engine->timeline.lock);
343 350
344 /* Recheck after acquiring the engine->timeline.lock */ 351 /* Recheck after acquiring the engine->timeline.lock */
@@ -347,11 +354,11 @@ static void __i915_schedule(struct i915_request *rq,
347 354
348 node->attr.priority = prio; 355 node->attr.priority = prio;
349 if (!list_empty(&node->link)) { 356 if (!list_empty(&node->link)) {
350 if (last != engine) { 357 if (!cache.priolist)
351 pl = i915_sched_lookup_priolist(engine, prio); 358 cache.priolist =
352 last = engine; 359 i915_sched_lookup_priolist(engine,
353 } 360 prio);
354 list_move_tail(&node->link, pl); 361 list_move_tail(&node->link, cache.priolist);
355 } else { 362 } else {
356 /* 363 /*
357 * If the request is not in the priolist queue because 364 * If the request is not in the priolist queue because
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b508d8a735e0..4364f42cac6b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1673,6 +1673,7 @@ init_vbt_missing_defaults(struct drm_i915_private *dev_priv)
1673 info->supports_dvi = (port != PORT_A && port != PORT_E); 1673 info->supports_dvi = (port != PORT_A && port != PORT_E);
1674 info->supports_hdmi = info->supports_dvi; 1674 info->supports_hdmi = info->supports_dvi;
1675 info->supports_dp = (port != PORT_E); 1675 info->supports_dp = (port != PORT_E);
1676 info->supports_edp = (port == PORT_A);
1676 } 1677 }
1677} 1678}
1678 1679
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index cacaa1d04d17..09ed90c0ba00 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -106,16 +106,6 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
106 106
107 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL, 107 GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_SIGNAL,
108 &rq->fence.flags)); 108 &rq->fence.flags));
109 clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
110
111 /*
112 * We may race with direct invocation of
113 * dma_fence_signal(), e.g. i915_request_retire(),
114 * in which case we can skip processing it ourselves.
115 */
116 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
117 &rq->fence.flags))
118 continue;
119 109
120 /* 110 /*
121 * Queue for execution after dropping the signaling 111 * Queue for execution after dropping the signaling
@@ -123,6 +113,14 @@ bool intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine)
123 * more signalers to the same context or engine. 113 * more signalers to the same context or engine.
124 */ 114 */
125 i915_request_get(rq); 115 i915_request_get(rq);
116
117 /*
118 * We may race with direct invocation of
119 * dma_fence_signal(), e.g. i915_request_retire(),
120 * so we need to acquire our reference to the request
121 * before we cancel the breadcrumb.
122 */
123 clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags);
126 list_add_tail(&rq->signal_link, &signal); 124 list_add_tail(&rq->signal_link, &signal);
127 } 125 }
128 126
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index ca705546a0ab..14d580cdefd3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -3568,6 +3568,13 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
3568{ 3568{
3569 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 3569 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
3570 intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state); 3570 intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
3571
3572 if (conn_state->content_protection ==
3573 DRM_MODE_CONTENT_PROTECTION_DESIRED)
3574 intel_hdcp_enable(to_intel_connector(conn_state->connector));
3575 else if (conn_state->content_protection ==
3576 DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
3577 intel_hdcp_disable(to_intel_connector(conn_state->connector));
3571} 3578}
3572 3579
3573static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder, 3580static void intel_ddi_set_fia_lane_count(struct intel_encoder *encoder,
@@ -3962,12 +3969,7 @@ static int modeset_pipe(struct drm_crtc *crtc,
3962 goto out; 3969 goto out;
3963 3970
3964 ret = drm_atomic_commit(state); 3971 ret = drm_atomic_commit(state);
3965 if (ret) 3972out:
3966 goto out;
3967
3968 return 0;
3969
3970 out:
3971 drm_atomic_state_put(state); 3973 drm_atomic_state_put(state);
3972 3974
3973 return ret; 3975 return ret;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index d00d0bb07784..7eb58a9d1319 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -710,47 +710,45 @@ __sseu_prepare(struct drm_i915_private *i915,
710 unsigned int flags, 710 unsigned int flags,
711 struct i915_gem_context *ctx, 711 struct i915_gem_context *ctx,
712 struct intel_engine_cs *engine, 712 struct intel_engine_cs *engine,
713 struct igt_spinner **spin_out) 713 struct igt_spinner **spin)
714{ 714{
715 int ret = 0; 715 struct i915_request *rq;
716 716 int ret;
717 if (flags & (TEST_BUSY | TEST_RESET)) {
718 struct igt_spinner *spin;
719 struct i915_request *rq;
720 717
721 spin = kzalloc(sizeof(*spin), GFP_KERNEL); 718 *spin = NULL;
722 if (!spin) { 719 if (!(flags & (TEST_BUSY | TEST_RESET)))
723 ret = -ENOMEM; 720 return 0;
724 goto out;
725 }
726 721
727 ret = igt_spinner_init(spin, i915); 722 *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
728 if (ret) 723 if (!*spin)
729 return ret; 724 return -ENOMEM;
730 725
731 rq = igt_spinner_create_request(spin, ctx, engine, MI_NOOP); 726 ret = igt_spinner_init(*spin, i915);
732 if (IS_ERR(rq)) { 727 if (ret)
733 ret = PTR_ERR(rq); 728 goto err_free;
734 igt_spinner_fini(spin);
735 kfree(spin);
736 goto out;
737 }
738 729
739 i915_request_add(rq); 730 rq = igt_spinner_create_request(*spin, ctx, engine, MI_NOOP);
731 if (IS_ERR(rq)) {
732 ret = PTR_ERR(rq);
733 goto err_fini;
734 }
740 735
741 if (!igt_wait_for_spinner(spin, rq)) { 736 i915_request_add(rq);
742 pr_err("%s: Spinner failed to start!\n", name);
743 igt_spinner_end(spin);
744 igt_spinner_fini(spin);
745 kfree(spin);
746 ret = -ETIMEDOUT;
747 goto out;
748 }
749 737
750 *spin_out = spin; 738 if (!igt_wait_for_spinner(*spin, rq)) {
739 pr_err("%s: Spinner failed to start!\n", name);
740 ret = -ETIMEDOUT;
741 goto err_end;
751 } 742 }
752 743
753out: 744 return 0;
745
746err_end:
747 igt_spinner_end(*spin);
748err_fini:
749 igt_spinner_fini(*spin);
750err_free:
751 kfree(fetch_and_zero(spin));
754 return ret; 752 return ret;
755} 753}
756 754
@@ -897,22 +895,23 @@ __sseu_test(struct drm_i915_private *i915,
897 895
898 ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin); 896 ret = __sseu_prepare(i915, name, flags, ctx, engine, &spin);
899 if (ret) 897 if (ret)
900 goto out; 898 goto out_context;
901 899
902 ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu); 900 ret = __i915_gem_context_reconfigure_sseu(ctx, engine, sseu);
903 if (ret) 901 if (ret)
904 goto out; 902 goto out_spin;
905 903
906 ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj, 904 ret = __sseu_finish(i915, name, flags, ctx, kctx, engine, obj,
907 hweight32(sseu.slice_mask), spin); 905 hweight32(sseu.slice_mask), spin);
908 906
909out: 907out_spin:
910 if (spin) { 908 if (spin) {
911 igt_spinner_end(spin); 909 igt_spinner_end(spin);
912 igt_spinner_fini(spin); 910 igt_spinner_fini(spin);
913 kfree(spin); 911 kfree(spin);
914 } 912 }
915 913
914out_context:
916 kernel_context_close(kctx); 915 kernel_context_close(kctx);
917 916
918 return ret; 917 return ret;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
index 32dce7176f63..b9b0ea4e2404 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
455 struct i915_gem_context *ctx; 455 struct i915_gem_context *ctx;
456 456
457 ctx = live_context(i915, file); 457 ctx = live_context(i915, file);
458 if (!ctx) 458 if (IS_ERR(ctx))
459 break; 459 break;
460 460
461 /* We will need some GGTT space for the rq's context */ 461 /* We will need some GGTT space for the rq's context */
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 2281ed3eb774..8a4ebcb6405c 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
337 337
338 ret = drm_dev_register(drm, 0); 338 ret = drm_dev_register(drm, 0);
339 if (ret) 339 if (ret)
340 goto free_drm; 340 goto uninstall_irq;
341 341
342 drm_fbdev_generic_setup(drm, 32); 342 drm_fbdev_generic_setup(drm, 32);
343 343
344 return 0; 344 return 0;
345 345
346uninstall_irq:
347 drm_irq_uninstall(drm);
346free_drm: 348free_drm:
347 drm_dev_put(drm); 349 drm_dev_put(drm);
348 350
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
356 358
357static void meson_drv_unbind(struct device *dev) 359static void meson_drv_unbind(struct device *dev)
358{ 360{
359 struct drm_device *drm = dev_get_drvdata(dev); 361 struct meson_drm *priv = dev_get_drvdata(dev);
360 struct meson_drm *priv = drm->dev_private; 362 struct drm_device *drm = priv->drm;
361 363
362 if (priv->canvas) { 364 if (priv->canvas) {
363 meson_canvas_free(priv->canvas, priv->canvas_id_osd1); 365 meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
367 } 369 }
368 370
369 drm_dev_unregister(drm); 371 drm_dev_unregister(drm);
372 drm_irq_uninstall(drm);
370 drm_kms_helper_poll_fini(drm); 373 drm_kms_helper_poll_fini(drm);
371 drm_mode_config_cleanup(drm); 374 drm_mode_config_cleanup(drm);
372 drm_dev_put(drm); 375 drm_dev_put(drm);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index e28814f4ea6c..563953ec6ad0 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
569 DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode)); 569 DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
570 570
571 /* If sink max TMDS clock, we reject the mode */ 571 /* If sink max TMDS clock, we reject the mode */
572 if (mode->clock > connector->display_info.max_tmds_clock) 572 if (connector->display_info.max_tmds_clock &&
573 mode->clock > connector->display_info.max_tmds_clock)
573 return MODE_BAD; 574 return MODE_BAD;
574 575
575 /* Check against non-VIC supported modes */ 576 /* Check against non-VIC supported modes */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 88a52f6b39fe..7dfbbbc1beea 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -181,7 +181,7 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
181 } 181 }
182 182
183 ret = pm_runtime_get_sync(drm->dev); 183 ret = pm_runtime_get_sync(drm->dev);
184 if (IS_ERR_VALUE(ret) && ret != -EACCES) 184 if (ret < 0 && ret != -EACCES)
185 return ret; 185 return ret;
186 ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args)); 186 ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
187 pm_runtime_put_autosuspend(drm->dev); 187 pm_runtime_put_autosuspend(drm->dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index aa9fec80492d..40c47d6a7d78 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -100,12 +100,10 @@ static void
100nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page) 100nouveau_dmem_free(struct hmm_devmem *devmem, struct page *page)
101{ 101{
102 struct nouveau_dmem_chunk *chunk; 102 struct nouveau_dmem_chunk *chunk;
103 struct nouveau_drm *drm;
104 unsigned long idx; 103 unsigned long idx;
105 104
106 chunk = (void *)hmm_devmem_page_get_drvdata(page); 105 chunk = (void *)hmm_devmem_page_get_drvdata(page);
107 idx = page_to_pfn(page) - chunk->pfn_first; 106 idx = page_to_pfn(page) - chunk->pfn_first;
108 drm = chunk->drm;
109 107
110 /* 108 /*
111 * FIXME: 109 * FIXME:
@@ -456,11 +454,6 @@ nouveau_dmem_resume(struct nouveau_drm *drm)
456 /* FIXME handle pin failure */ 454 /* FIXME handle pin failure */
457 WARN_ON(ret); 455 WARN_ON(ret);
458 } 456 }
459 list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
460 ret = nouveau_bo_pin(chunk->bo, TTM_PL_FLAG_VRAM, false);
461 /* FIXME handle pin failure */
462 WARN_ON(ret);
463 }
464 mutex_unlock(&drm->dmem->mutex); 457 mutex_unlock(&drm->dmem->mutex);
465} 458}
466 459
@@ -479,9 +472,6 @@ nouveau_dmem_suspend(struct nouveau_drm *drm)
479 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) { 472 list_for_each_entry (chunk, &drm->dmem->chunk_full, list) {
480 nouveau_bo_unpin(chunk->bo); 473 nouveau_bo_unpin(chunk->bo);
481 } 474 }
482 list_for_each_entry (chunk, &drm->dmem->chunk_empty, list) {
483 nouveau_bo_unpin(chunk->bo);
484 }
485 mutex_unlock(&drm->dmem->mutex); 475 mutex_unlock(&drm->dmem->mutex);
486} 476}
487 477
@@ -623,7 +613,7 @@ nouveau_dmem_init(struct nouveau_drm *drm)
623 */ 613 */
624 drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops, 614 drm->dmem->devmem = hmm_devmem_add(&nouveau_dmem_devmem_ops,
625 device, size); 615 device, size);
626 if (drm->dmem->devmem == NULL) { 616 if (IS_ERR(drm->dmem->devmem)) {
627 kfree(drm->dmem); 617 kfree(drm->dmem);
628 drm->dmem = NULL; 618 drm->dmem = NULL;
629 return; 619 return;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index bb81e310eb6d..578d867a81d5 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -79,6 +79,10 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
79 if (ret) 79 if (ret)
80 goto free_dev; 80 goto free_dev;
81 81
82 ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
83 if (ret)
84 goto disable_pci;
85
82 ret = qxl_device_init(qdev, &qxl_driver, pdev); 86 ret = qxl_device_init(qdev, &qxl_driver, pdev);
83 if (ret) 87 if (ret)
84 goto disable_pci; 88 goto disable_pci;
@@ -94,7 +98,6 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
94 if (ret) 98 if (ret)
95 goto modeset_cleanup; 99 goto modeset_cleanup;
96 100
97 drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl");
98 drm_fbdev_generic_setup(&qdev->ddev, 32); 101 drm_fbdev_generic_setup(&qdev->ddev, 32);
99 return 0; 102 return 0;
100 103
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index c7d4c6073ea5..0d4ade9d4722 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
541 clk_disable(vop->hclk); 541 clk_disable(vop->hclk);
542} 542}
543 543
544static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
545{
546 if (win->phy->scl && win->phy->scl->ext) {
547 VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
548 VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
549 VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
550 VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
551 }
552
553 VOP_WIN_SET(vop, win, enable, 0);
554}
555
544static int vop_enable(struct drm_crtc *crtc) 556static int vop_enable(struct drm_crtc *crtc)
545{ 557{
546 struct vop *vop = to_vop(crtc); 558 struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
586 struct vop_win *vop_win = &vop->win[i]; 598 struct vop_win *vop_win = &vop->win[i];
587 const struct vop_win_data *win = vop_win->data; 599 const struct vop_win_data *win = vop_win->data;
588 600
589 VOP_WIN_SET(vop, win, enable, 0); 601 vop_win_disable(vop, win);
590 } 602 }
591 spin_unlock(&vop->reg_lock); 603 spin_unlock(&vop->reg_lock);
592 604
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
735 747
736 spin_lock(&vop->reg_lock); 748 spin_lock(&vop->reg_lock);
737 749
738 VOP_WIN_SET(vop, win, enable, 0); 750 vop_win_disable(vop, win);
739 751
740 spin_unlock(&vop->reg_lock); 752 spin_unlock(&vop->reg_lock);
741} 753}
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
1622 int channel = i * 2 + 1; 1634 int channel = i * 2 + 1;
1623 1635
1624 VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel); 1636 VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
1625 VOP_WIN_SET(vop, win, enable, 0); 1637 vop_win_disable(vop, win);
1626 VOP_WIN_SET(vop, win, gate, 1); 1638 VOP_WIN_SET(vop, win, gate, 1);
1627 } 1639 }
1628 1640
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index ba9b3cfb8c3d..b3436c2aed68 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
378static void tegra_shared_plane_atomic_disable(struct drm_plane *plane, 378static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
379 struct drm_plane_state *old_state) 379 struct drm_plane_state *old_state)
380{ 380{
381 struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
382 struct tegra_plane *p = to_tegra_plane(plane); 381 struct tegra_plane *p = to_tegra_plane(plane);
382 struct tegra_dc *dc;
383 u32 value; 383 u32 value;
384 384
385 /* rien ne va plus */ 385 /* rien ne va plus */
386 if (!old_state || !old_state->crtc) 386 if (!old_state || !old_state->crtc)
387 return; 387 return;
388 388
389 dc = to_tegra_dc(old_state->crtc);
390
389 /* 391 /*
390 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even 392 * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
391 * on planes that are already disabled. Make sure we fallback to the 393 * on planes that are already disabled. Make sure we fallback to the
diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c
index 39bfed9623de..982ce37ecde1 100644
--- a/drivers/gpu/drm/tegra/vic.c
+++ b/drivers/gpu/drm/tegra/vic.c
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
106 if (vic->booted) 106 if (vic->booted)
107 return 0; 107 return 0;
108 108
109#ifdef CONFIG_IOMMU_API
109 if (vic->config->supports_sid) { 110 if (vic->config->supports_sid) {
110 struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); 111 struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
111 u32 value; 112 u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
121 vic_writel(vic, value, VIC_THI_STREAMID1); 122 vic_writel(vic, value, VIC_THI_STREAMID1);
122 } 123 }
123 } 124 }
125#endif
124 126
125 /* setup clockgating registers */ 127 /* setup clockgating registers */
126 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) | 128 vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index 66885c24590f..c1bd5e3d9e4a 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -18,18 +18,19 @@
18#include "udl_connector.h" 18#include "udl_connector.h"
19#include "udl_drv.h" 19#include "udl_drv.h"
20 20
21static bool udl_get_edid_block(struct udl_device *udl, int block_idx, 21static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
22 u8 *buff) 22 size_t len)
23{ 23{
24 int ret, i; 24 int ret, i;
25 u8 *read_buff; 25 u8 *read_buff;
26 struct udl_device *udl = data;
26 27
27 read_buff = kmalloc(2, GFP_KERNEL); 28 read_buff = kmalloc(2, GFP_KERNEL);
28 if (!read_buff) 29 if (!read_buff)
29 return false; 30 return -1;
30 31
31 for (i = 0; i < EDID_LENGTH; i++) { 32 for (i = 0; i < len; i++) {
32 int bval = (i + block_idx * EDID_LENGTH) << 8; 33 int bval = (i + block * EDID_LENGTH) << 8;
33 ret = usb_control_msg(udl->udev, 34 ret = usb_control_msg(udl->udev,
34 usb_rcvctrlpipe(udl->udev, 0), 35 usb_rcvctrlpipe(udl->udev, 0),
35 (0x02), (0x80 | (0x02 << 5)), bval, 36 (0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
37 if (ret < 1) { 38 if (ret < 1) {
38 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret); 39 DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
39 kfree(read_buff); 40 kfree(read_buff);
40 return false; 41 return -1;
41 } 42 }
42 buff[i] = read_buff[1]; 43 buf[i] = read_buff[1];
43 } 44 }
44 45
45 kfree(read_buff); 46 kfree(read_buff);
46 return true; 47 return 0;
47}
48
49static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
50 int *result_buff_size)
51{
52 int i, extensions;
53 u8 *block_buff = NULL, *buff_ptr;
54
55 block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
56 if (block_buff == NULL)
57 return false;
58
59 if (udl_get_edid_block(udl, 0, block_buff) &&
60 memchr_inv(block_buff, 0, EDID_LENGTH)) {
61 extensions = ((struct edid *)block_buff)->extensions;
62 if (extensions > 0) {
63 /* we have to read all extensions one by one */
64 *result_buff_size = EDID_LENGTH * (extensions + 1);
65 *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
66 buff_ptr = *result_buff;
67 if (buff_ptr == NULL) {
68 kfree(block_buff);
69 return false;
70 }
71 memcpy(buff_ptr, block_buff, EDID_LENGTH);
72 kfree(block_buff);
73 buff_ptr += EDID_LENGTH;
74 for (i = 1; i < extensions; ++i) {
75 if (udl_get_edid_block(udl, i, buff_ptr)) {
76 buff_ptr += EDID_LENGTH;
77 } else {
78 kfree(*result_buff);
79 *result_buff = NULL;
80 return false;
81 }
82 }
83 return true;
84 }
85 /* we have only base edid block */
86 *result_buff = block_buff;
87 *result_buff_size = EDID_LENGTH;
88 return true;
89 }
90
91 kfree(block_buff);
92
93 return false;
94} 48}
95 49
96static int udl_get_modes(struct drm_connector *connector) 50static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
122static enum drm_connector_status 76static enum drm_connector_status
123udl_detect(struct drm_connector *connector, bool force) 77udl_detect(struct drm_connector *connector, bool force)
124{ 78{
125 u8 *edid_buff = NULL;
126 int edid_buff_size = 0;
127 struct udl_device *udl = connector->dev->dev_private; 79 struct udl_device *udl = connector->dev->dev_private;
128 struct udl_drm_connector *udl_connector = 80 struct udl_drm_connector *udl_connector =
129 container_of(connector, 81 container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
136 udl_connector->edid = NULL; 88 udl_connector->edid = NULL;
137 } 89 }
138 90
139 91 udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
140 if (!udl_get_edid(udl, &edid_buff, &edid_buff_size)) 92 if (!udl_connector->edid)
141 return connector_status_disconnected; 93 return connector_status_disconnected;
142 94
143 udl_connector->edid = (struct edid *)edid_buff;
144
145 return connector_status_connected; 95 return connector_status_connected;
146} 96}
147 97
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index d5a23295dd80..bb7b58407039 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -224,7 +224,7 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
224 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node); 224 *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
225 225
226out: 226out:
227 drm_gem_object_put(&gobj->base); 227 drm_gem_object_put_unlocked(&gobj->base);
228unlock: 228unlock:
229 mutex_unlock(&udl->gem_lock); 229 mutex_unlock(&udl->gem_lock);
230 return ret; 230 return ret;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5930facd6d2d..11a8f99ba18c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
191 ret = drm_gem_handle_create(file, &obj->base, handle); 191 ret = drm_gem_handle_create(file, &obj->base, handle);
192 drm_gem_object_put_unlocked(&obj->base); 192 drm_gem_object_put_unlocked(&obj->base);
193 if (ret) 193 if (ret)
194 goto err; 194 return ERR_PTR(ret);
195 195
196 return &obj->base; 196 return &obj->base;
197
198err:
199 __vgem_gem_destroy(obj);
200 return ERR_PTR(ret);
201} 197}
202 198
203static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 199static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index 138b0bb325cf..69048e73377d 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
111 111
112 ret = drm_gem_handle_create(file, &obj->gem, handle); 112 ret = drm_gem_handle_create(file, &obj->gem, handle);
113 drm_gem_object_put_unlocked(&obj->gem); 113 drm_gem_object_put_unlocked(&obj->gem);
114 if (ret) { 114 if (ret)
115 drm_gem_object_release(&obj->gem);
116 kfree(obj);
117 return ERR_PTR(ret); 115 return ERR_PTR(ret);
118 }
119 116
120 return &obj->gem; 117 return &obj->gem;
121} 118}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b913a56f3426..2a9112515f46 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -564,11 +564,9 @@ static int vmw_fb_set_par(struct fb_info *info)
564 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 564 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
565 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 565 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
566 }; 566 };
567 struct drm_display_mode *old_mode;
568 struct drm_display_mode *mode; 567 struct drm_display_mode *mode;
569 int ret; 568 int ret;
570 569
571 old_mode = par->set_mode;
572 mode = drm_mode_duplicate(vmw_priv->dev, &new_mode); 570 mode = drm_mode_duplicate(vmw_priv->dev, &new_mode);
573 if (!mode) { 571 if (!mode) {
574 DRM_ERROR("Could not create new fb mode.\n"); 572 DRM_ERROR("Could not create new fb mode.\n");
@@ -579,11 +577,7 @@ static int vmw_fb_set_par(struct fb_info *info)
579 mode->vdisplay = var->yres; 577 mode->vdisplay = var->yres;
580 vmw_guess_mode_timing(mode); 578 vmw_guess_mode_timing(mode);
581 579
582 if (old_mode && drm_mode_equal(old_mode, mode)) { 580 if (!vmw_kms_validate_mode_vram(vmw_priv,
583 drm_mode_destroy(vmw_priv->dev, mode);
584 mode = old_mode;
585 old_mode = NULL;
586 } else if (!vmw_kms_validate_mode_vram(vmw_priv,
587 mode->hdisplay * 581 mode->hdisplay *
588 DIV_ROUND_UP(var->bits_per_pixel, 8), 582 DIV_ROUND_UP(var->bits_per_pixel, 8),
589 mode->vdisplay)) { 583 mode->vdisplay)) {
@@ -620,8 +614,8 @@ static int vmw_fb_set_par(struct fb_info *info)
620 schedule_delayed_work(&par->local_work, 0); 614 schedule_delayed_work(&par->local_work, 0);
621 615
622out_unlock: 616out_unlock:
623 if (old_mode) 617 if (par->set_mode)
624 drm_mode_destroy(vmw_priv->dev, old_mode); 618 drm_mode_destroy(vmw_priv->dev, par->set_mode);
625 par->set_mode = mode; 619 par->set_mode = mode;
626 620
627 mutex_unlock(&par->bo_mutex); 621 mutex_unlock(&par->bo_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index b93c558dd86e..7da752ca1c34 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -57,7 +57,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
57 57
58 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); 58 id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
59 if (id < 0) 59 if (id < 0)
60 return id; 60 return (id != -ENOMEM ? 0 : id);
61 61
62 spin_lock(&gman->lock); 62 spin_lock(&gman->lock);
63 63
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index dc8e039bfab5..f2f3ef8af271 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -48,6 +48,8 @@
48#include <linux/miscdevice.h> 48#include <linux/miscdevice.h>
49#include <linux/slab.h> 49#include <linux/slab.h>
50#include <linux/screen_info.h> 50#include <linux/screen_info.h>
51#include <linux/vt.h>
52#include <linux/console.h>
51 53
52#include <linux/uaccess.h> 54#include <linux/uaccess.h>
53 55
@@ -168,6 +170,53 @@ void vga_set_default_device(struct pci_dev *pdev)
168 vga_default = pci_dev_get(pdev); 170 vga_default = pci_dev_get(pdev);
169} 171}
170 172
173/**
174 * vga_remove_vgacon - deactivete vga console
175 *
176 * Unbind and unregister vgacon in case pdev is the default vga
177 * device. Can be called by gpu drivers on initialization to make
178 * sure vga register access done by vgacon will not disturb the
179 * device.
180 *
181 * @pdev: pci device.
182 */
183#if !defined(CONFIG_VGA_CONSOLE)
184int vga_remove_vgacon(struct pci_dev *pdev)
185{
186 return 0;
187}
188#elif !defined(CONFIG_DUMMY_CONSOLE)
189int vga_remove_vgacon(struct pci_dev *pdev)
190{
191 return -ENODEV;
192}
193#else
194int vga_remove_vgacon(struct pci_dev *pdev)
195{
196 int ret = 0;
197
198 if (pdev != vga_default)
199 return 0;
200 vgaarb_info(&pdev->dev, "deactivate vga console\n");
201
202 console_lock();
203 if (con_is_bound(&vga_con))
204 ret = do_take_over_console(&dummy_con, 0,
205 MAX_NR_CONSOLES - 1, 1);
206 if (ret == 0) {
207 ret = do_unregister_con_driver(&vga_con);
208
209 /* Ignore "already unregistered". */
210 if (ret == -ENODEV)
211 ret = 0;
212 }
213 console_unlock();
214
215 return ret;
216}
217#endif
218EXPORT_SYMBOL(vga_remove_vgacon);
219
171static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) 220static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
172{ 221{
173 if (vgadev->irq_set_state) 222 if (vgadev->irq_set_state)
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6ca8d322b487..4ca0cdfa6b33 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -150,6 +150,7 @@ config HID_ASUS
150 tristate "Asus" 150 tristate "Asus"
151 depends on LEDS_CLASS 151 depends on LEDS_CLASS
152 depends on ASUS_WMI || ASUS_WMI=n 152 depends on ASUS_WMI || ASUS_WMI=n
153 select POWER_SUPPLY
153 ---help--- 154 ---help---
154 Support for Asus notebook built-in keyboard and touchpad via i2c, and 155 Support for Asus notebook built-in keyboard and touchpad via i2c, and
155 the Asus Republic of Gamers laptop keyboard special keys. 156 the Asus Republic of Gamers laptop keyboard special keys.
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9993b692598f..860e21ec6a49 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
1301u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1301u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1302 unsigned offset, unsigned n) 1302 unsigned offset, unsigned n)
1303{ 1303{
1304 if (n > 32) { 1304 if (n > 256) {
1305 hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", 1305 hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
1306 n, current->comm); 1306 n, current->comm);
1307 n = 32; 1307 n = 256;
1308 } 1308 }
1309 1309
1310 return __extract(report, offset, n); 1310 return __extract(report, offset, n);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ac9fda1b5a72..1384e57182af 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
1060 seq_printf(f, "\n\n"); 1060 seq_printf(f, "\n\n");
1061 1061
1062 /* dump parsed data and input mappings */ 1062 /* dump parsed data and input mappings */
1063 if (down_interruptible(&hdev->driver_input_lock))
1064 return 0;
1065
1063 hid_dump_device(hdev, f); 1066 hid_dump_device(hdev, f);
1064 seq_printf(f, "\n"); 1067 seq_printf(f, "\n");
1065 hid_dump_input_mapping(hdev, f); 1068 hid_dump_input_mapping(hdev, f);
1066 1069
1070 up(&hdev->driver_input_lock);
1071
1067 return 0; 1072 return 0;
1068} 1073}
1069 1074
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b6d93f4ad037..adce58f24f76 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1083,6 +1083,7 @@
1083#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 1083#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
1084#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 1084#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
1085#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 1085#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
1086#define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e
1086 1087
1087#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 1088#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
1088#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 1089#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b10b1922c5bd..1fce0076e7dc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -998,6 +998,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
998 case 0x1b8: map_key_clear(KEY_VIDEO); break; 998 case 0x1b8: map_key_clear(KEY_VIDEO); break;
999 case 0x1bc: map_key_clear(KEY_MESSENGER); break; 999 case 0x1bc: map_key_clear(KEY_MESSENGER); break;
1000 case 0x1bd: map_key_clear(KEY_INFO); break; 1000 case 0x1bd: map_key_clear(KEY_INFO); break;
1001 case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
1001 case 0x201: map_key_clear(KEY_NEW); break; 1002 case 0x201: map_key_clear(KEY_NEW); break;
1002 case 0x202: map_key_clear(KEY_OPEN); break; 1003 case 0x202: map_key_clear(KEY_OPEN); break;
1003 case 0x203: map_key_clear(KEY_CLOSE); break; 1004 case 0x203: map_key_clear(KEY_CLOSE); break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 15ed6177a7a3..199cc256e9d9 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
2111 kfree(data); 2111 kfree(data);
2112 return -ENOMEM; 2112 return -ENOMEM;
2113 } 2113 }
2114 data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
2115 if (!data->wq) {
2116 kfree(data->effect_ids);
2117 kfree(data);
2118 return -ENOMEM;
2119 }
2120
2114 data->hidpp = hidpp; 2121 data->hidpp = hidpp;
2115 data->feature_index = feature_index; 2122 data->feature_index = feature_index;
2116 data->version = version; 2123 data->version = version;
@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
2155 /* ignore boost value at response.fap.params[2] */ 2162 /* ignore boost value at response.fap.params[2] */
2156 2163
2157 /* init the hardware command queue */ 2164 /* init the hardware command queue */
2158 data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
2159 atomic_set(&data->workqueue_size, 0); 2165 atomic_set(&data->workqueue_size, 0);
2160 2166
2161 /* initialize with zero autocenter to get wheel in usable state */ 2167 /* initialize with zero autocenter to get wheel in usable state */
@@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
2608 input_report_rel(mydata->input, REL_Y, v); 2614 input_report_rel(mydata->input, REL_Y, v);
2609 2615
2610 v = hid_snto32(data[6], 8); 2616 v = hid_snto32(data[6], 8);
2611 hidpp_scroll_counter_handle_scroll( 2617 if (v != 0)
2612 &hidpp->vertical_wheel_counter, v); 2618 hidpp_scroll_counter_handle_scroll(
2619 &hidpp->vertical_wheel_counter, v);
2613 2620
2614 input_sync(mydata->input); 2621 input_sync(mydata->input);
2615 } 2622 }
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 953908f2267c..77ffba48cc73 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = {
715 { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, 715 { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
716 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, 716 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
717 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, 717 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
718 { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
719 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, 718 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
720 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, 719 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
721 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, 720 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = {
855 { } 854 { }
856}; 855};
857 856
858/** 857/*
859 * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer 858 * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
860 * 859 *
861 * There are composite devices for which we want to ignore only a certain 860 * There are composite devices for which we want to ignore only a certain
@@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev)
996 if (hdev->product == 0x0401 && 995 if (hdev->product == 0x0401 &&
997 strncmp(hdev->name, "ELAN0800", 8) != 0) 996 strncmp(hdev->name, "ELAN0800", 8) != 0)
998 return true; 997 return true;
998 /* Same with product id 0x0400 */
999 if (hdev->product == 0x0400 &&
1000 strncmp(hdev->name, "QTEC0001", 8) != 0)
1001 return true;
999 break; 1002 break;
1000 } 1003 }
1001 1004
@@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
1042 } 1045 }
1043 1046
1044 if (bl_entry != NULL) 1047 if (bl_entry != NULL)
1045 dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n", 1048 dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
1046 bl_entry->driver_data, bl_entry->vendor, 1049 bl_entry->driver_data, bl_entry->vendor,
1047 bl_entry->product); 1050 bl_entry->product);
1048 1051
@@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
1209 quirks |= bl_entry->driver_data; 1212 quirks |= bl_entry->driver_data;
1210 1213
1211 if (quirks) 1214 if (quirks)
1212 dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n", 1215 dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
1213 quirks, hdev->vendor, hdev->product); 1216 quirks, hdev->vendor, hdev->product);
1214 return quirks; 1217 return quirks;
1215} 1218}
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8141cadfca0e..8dae0f9b819e 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
499static int steam_register(struct steam_device *steam) 499static int steam_register(struct steam_device *steam)
500{ 500{
501 int ret; 501 int ret;
502 bool client_opened;
502 503
503 /* 504 /*
504 * This function can be called several times in a row with the 505 * This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
511 * Unlikely, but getting the serial could fail, and it is not so 512 * Unlikely, but getting the serial could fail, and it is not so
512 * important, so make up a serial number and go on. 513 * important, so make up a serial number and go on.
513 */ 514 */
515 mutex_lock(&steam->mutex);
514 if (steam_get_serial(steam) < 0) 516 if (steam_get_serial(steam) < 0)
515 strlcpy(steam->serial_no, "XXXXXXXXXX", 517 strlcpy(steam->serial_no, "XXXXXXXXXX",
516 sizeof(steam->serial_no)); 518 sizeof(steam->serial_no));
519 mutex_unlock(&steam->mutex);
517 520
518 hid_info(steam->hdev, "Steam Controller '%s' connected", 521 hid_info(steam->hdev, "Steam Controller '%s' connected",
519 steam->serial_no); 522 steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
528 } 531 }
529 532
530 mutex_lock(&steam->mutex); 533 mutex_lock(&steam->mutex);
531 if (!steam->client_opened) { 534 client_opened = steam->client_opened;
535 if (!client_opened)
532 steam_set_lizard_mode(steam, lizard_mode); 536 steam_set_lizard_mode(steam, lizard_mode);
537 mutex_unlock(&steam->mutex);
538
539 if (!client_opened)
533 ret = steam_input_register(steam); 540 ret = steam_input_register(steam);
534 } else { 541 else
535 ret = 0; 542 ret = 0;
536 }
537 mutex_unlock(&steam->mutex);
538 543
539 return ret; 544 return ret;
540} 545}
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
630{ 635{
631 struct steam_device *steam = hdev->driver_data; 636 struct steam_device *steam = hdev->driver_data;
632 637
638 unsigned long flags;
639 bool connected;
640
641 spin_lock_irqsave(&steam->lock, flags);
642 connected = steam->connected;
643 spin_unlock_irqrestore(&steam->lock, flags);
644
633 mutex_lock(&steam->mutex); 645 mutex_lock(&steam->mutex);
634 steam->client_opened = false; 646 steam->client_opened = false;
647 if (connected)
648 steam_set_lizard_mode(steam, lizard_mode);
635 mutex_unlock(&steam->mutex); 649 mutex_unlock(&steam->mutex);
636 650
637 if (steam->connected) { 651 if (connected)
638 steam_set_lizard_mode(steam, lizard_mode);
639 steam_input_register(steam); 652 steam_input_register(steam);
640 }
641} 653}
642 654
643static int steam_client_ll_raw_request(struct hid_device *hdev, 655static int steam_client_ll_raw_request(struct hid_device *hdev,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 7710d9f957da..0187c9f8fc22 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
735 goto cleanup; 735 goto cleanup;
736 } 736 }
737 rc = usb_string(udev, 201, ver_ptr, ver_len); 737 rc = usb_string(udev, 201, ver_ptr, ver_len);
738 if (ver_ptr == NULL) {
739 rc = -ENOMEM;
740 goto cleanup;
741 }
742 if (rc == -EPIPE) { 738 if (rc == -EPIPE) {
743 *ver_ptr = '\0'; 739 *ver_ptr = '\0';
744 } else if (rc < 0) { 740 } else if (rc < 0) {
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 90164fed08d3..4d1f24ee249c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -184,6 +184,8 @@ static const struct i2c_hid_quirks {
184 I2C_HID_QUIRK_NO_RUNTIME_PM }, 184 I2C_HID_QUIRK_NO_RUNTIME_PM },
185 { USB_VENDOR_ID_ELAN, HID_ANY_ID, 185 { USB_VENDOR_ID_ELAN, HID_ANY_ID,
186 I2C_HID_QUIRK_BOGUS_IRQ }, 186 I2C_HID_QUIRK_BOGUS_IRQ },
187 { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
188 I2C_HID_QUIRK_NO_RUNTIME_PM },
187 { 0, 0 } 189 { 0, 0 }
188}; 190};
189 191
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6f929bfa9fcd..d0f1dfe2bcbb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
1759config SENSORS_W83773G 1759config SENSORS_W83773G
1760 tristate "Nuvoton W83773G" 1760 tristate "Nuvoton W83773G"
1761 depends on I2C 1761 depends on I2C
1762 select REGMAP_I2C
1762 help 1763 help
1763 If you say yes here you get support for the Nuvoton W83773G hardware 1764 If you say yes here you get support for the Nuvoton W83773G hardware
1764 monitoring chip. 1765 monitoring chip.
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e4f9f7ce92fa..f9abeeeead9e 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = {
640}; 640};
641 641
642static const u32 ntc_temp_config[] = { 642static const u32 ntc_temp_config[] = {
643 HWMON_T_INPUT, HWMON_T_TYPE, 643 HWMON_T_INPUT | HWMON_T_TYPE,
644 0 644 0
645}; 645};
646 646
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index b91a80abf724..4679acb4918e 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
890 s++; 890 s++;
891 } 891 }
892 } 892 }
893
894 s = (sensors->power.num_sensors * 4) + 1;
893 } else { 895 } else {
894 for (i = 0; i < sensors->power.num_sensors; ++i) { 896 for (i = 0; i < sensors->power.num_sensors; ++i) {
895 s = i + 1; 897 s = i + 1;
@@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
918 show_power, NULL, 3, i); 920 show_power, NULL, 3, i);
919 attr++; 921 attr++;
920 } 922 }
921 }
922 923
923 if (sensors->caps.num_sensors >= 1) {
924 s = sensors->power.num_sensors + 1; 924 s = sensors->power.num_sensors + 1;
925 }
925 926
927 if (sensors->caps.num_sensors >= 1) {
926 snprintf(attr->name, sizeof(attr->name), "power%d_label", s); 928 snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
927 attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 929 attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
928 0, 0); 930 0, 0);
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 9a63e87ea5f3..be302ec5f66b 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -871,7 +871,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
871 } 871 }
872 872
873 pm_runtime_put(&adev->dev); 873 pm_runtime_put(&adev->dev);
874 dev_info(dev, "%s initialized\n", (char *)id->data); 874 dev_info(dev, "%s initialized\n", (char *)coresight_get_uci_data(id));
875 if (boot_enable) { 875 if (boot_enable) {
876 coresight_enable(drvdata->csdev); 876 coresight_enable(drvdata->csdev);
877 drvdata->boot_enable = true; 877 drvdata->boot_enable = true;
@@ -915,36 +915,18 @@ static const struct dev_pm_ops etm_dev_pm_ops = {
915}; 915};
916 916
917static const struct amba_id etm_ids[] = { 917static const struct amba_id etm_ids[] = {
918 { /* ETM 3.3 */ 918 /* ETM 3.3 */
919 .id = 0x000bb921, 919 CS_AMBA_ID_DATA(0x000bb921, "ETM 3.3"),
920 .mask = 0x000fffff, 920 /* ETM 3.5 - Cortex-A5 */
921 .data = "ETM 3.3", 921 CS_AMBA_ID_DATA(0x000bb955, "ETM 3.5"),
922 }, 922 /* ETM 3.5 */
923 { /* ETM 3.5 - Cortex-A5 */ 923 CS_AMBA_ID_DATA(0x000bb956, "ETM 3.5"),
924 .id = 0x000bb955, 924 /* PTM 1.0 */
925 .mask = 0x000fffff, 925 CS_AMBA_ID_DATA(0x000bb950, "PTM 1.0"),
926 .data = "ETM 3.5", 926 /* PTM 1.1 */
927 }, 927 CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1"),
928 { /* ETM 3.5 */ 928 /* PTM 1.1 Qualcomm */
929 .id = 0x000bb956, 929 CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1"),
930 .mask = 0x000fffff,
931 .data = "ETM 3.5",
932 },
933 { /* PTM 1.0 */
934 .id = 0x000bb950,
935 .mask = 0x000fffff,
936 .data = "PTM 1.0",
937 },
938 { /* PTM 1.1 */
939 .id = 0x000bb95f,
940 .mask = 0x000fffff,
941 .data = "PTM 1.1",
942 },
943 { /* PTM 1.1 Qualcomm */
944 .id = 0x000b006f,
945 .mask = 0x000fffff,
946 .data = "PTM 1.1",
947 },
948 { 0, 0}, 930 { 0, 0},
949}; 931};
950 932
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index fe76b176974a..08ce37c9475d 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1068,18 +1068,21 @@ err_arch_supported:
1068 return ret; 1068 return ret;
1069} 1069}
1070 1070
1071#define ETM4x_AMBA_ID(pid) \ 1071static struct amba_cs_uci_id uci_id_etm4[] = {
1072 { \ 1072 {
1073 .id = pid, \ 1073 /* ETMv4 UCI data */
1074 .mask = 0x000fffff, \ 1074 .devarch = 0x47704a13,
1075 .devarch_mask = 0xfff0ffff,
1076 .devtype = 0x00000013,
1075 } 1077 }
1078};
1076 1079
1077static const struct amba_id etm4_ids[] = { 1080static const struct amba_id etm4_ids[] = {
1078 ETM4x_AMBA_ID(0x000bb95d), /* Cortex-A53 */ 1081 CS_AMBA_ID(0x000bb95d), /* Cortex-A53 */
1079 ETM4x_AMBA_ID(0x000bb95e), /* Cortex-A57 */ 1082 CS_AMBA_ID(0x000bb95e), /* Cortex-A57 */
1080 ETM4x_AMBA_ID(0x000bb95a), /* Cortex-A72 */ 1083 CS_AMBA_ID(0x000bb95a), /* Cortex-A72 */
1081 ETM4x_AMBA_ID(0x000bb959), /* Cortex-A73 */ 1084 CS_AMBA_ID(0x000bb959), /* Cortex-A73 */
1082 ETM4x_AMBA_ID(0x000bb9da), /* Cortex-A35 */ 1085 CS_AMBA_UCI_ID(0x000bb9da, uci_id_etm4), /* Cortex-A35 */
1083 {}, 1086 {},
1084}; 1087};
1085 1088
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index b936c6d7e13f..e0684d06e9ee 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -6,6 +6,7 @@
6#ifndef _CORESIGHT_PRIV_H 6#ifndef _CORESIGHT_PRIV_H
7#define _CORESIGHT_PRIV_H 7#define _CORESIGHT_PRIV_H
8 8
9#include <linux/amba/bus.h>
9#include <linux/bitops.h> 10#include <linux/bitops.h>
10#include <linux/io.h> 11#include <linux/io.h>
11#include <linux/coresight.h> 12#include <linux/coresight.h>
@@ -160,4 +161,43 @@ static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
160static inline int etm_writel_cp14(u32 off, u32 val) { return 0; } 161static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
161#endif 162#endif
162 163
164/*
165 * Macros and inline functions to handle CoreSight UCI data and driver
166 * private data in AMBA ID table entries, and extract data values.
167 */
168
169/* coresight AMBA ID, no UCI, no driver data: id table entry */
170#define CS_AMBA_ID(pid) \
171 { \
172 .id = pid, \
173 .mask = 0x000fffff, \
174 }
175
176/* coresight AMBA ID, UCI with driver data only: id table entry. */
177#define CS_AMBA_ID_DATA(pid, dval) \
178 { \
179 .id = pid, \
180 .mask = 0x000fffff, \
181 .data = (void *)&(struct amba_cs_uci_id) \
182 { \
183 .data = (void *)dval, \
184 } \
185 }
186
187/* coresight AMBA ID, full UCI structure: id table entry. */
188#define CS_AMBA_UCI_ID(pid, uci_ptr) \
189 { \
190 .id = pid, \
191 .mask = 0x000fffff, \
192 .data = uci_ptr \
193 }
194
195/* extract the data value from a UCI structure given amba_id pointer. */
196static inline void *coresight_get_uci_data(const struct amba_id *id)
197{
198 if (id->data)
199 return ((struct amba_cs_uci_id *)(id->data))->data;
200 return 0;
201}
202
163#endif 203#endif
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index f07825df5c7a..9f8a844ed7aa 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -870,7 +870,7 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
870 870
871 pm_runtime_put(&adev->dev); 871 pm_runtime_put(&adev->dev);
872 872
873 dev_info(dev, "%s initialized\n", (char *)id->data); 873 dev_info(dev, "%s initialized\n", (char *)coresight_get_uci_data(id));
874 return 0; 874 return 0;
875 875
876stm_unregister: 876stm_unregister:
@@ -905,16 +905,8 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
905}; 905};
906 906
907static const struct amba_id stm_ids[] = { 907static const struct amba_id stm_ids[] = {
908 { 908 CS_AMBA_ID_DATA(0x000bb962, "STM32"),
909 .id = 0x000bb962, 909 CS_AMBA_ID_DATA(0x000bb963, "STM500"),
910 .mask = 0x000fffff,
911 .data = "STM32",
912 },
913 {
914 .id = 0x000bb963,
915 .mask = 0x000fffff,
916 .data = "STM500",
917 },
918 { 0, 0}, 910 { 0, 0},
919}; 911};
920 912
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index ea249f0bcd73..2a02da3d630f 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -443,7 +443,8 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
443 desc.type = CORESIGHT_DEV_TYPE_SINK; 443 desc.type = CORESIGHT_DEV_TYPE_SINK;
444 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; 444 desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
445 desc.ops = &tmc_etr_cs_ops; 445 desc.ops = &tmc_etr_cs_ops;
446 ret = tmc_etr_setup_caps(drvdata, devid, id->data); 446 ret = tmc_etr_setup_caps(drvdata, devid,
447 coresight_get_uci_data(id));
447 if (ret) 448 if (ret)
448 goto out; 449 goto out;
449 break; 450 break;
@@ -475,26 +476,13 @@ out:
475} 476}
476 477
477static const struct amba_id tmc_ids[] = { 478static const struct amba_id tmc_ids[] = {
478 { 479 CS_AMBA_ID(0x000bb961),
479 .id = 0x000bb961, 480 /* Coresight SoC 600 TMC-ETR/ETS */
480 .mask = 0x000fffff, 481 CS_AMBA_ID_DATA(0x000bb9e8, (unsigned long)CORESIGHT_SOC_600_ETR_CAPS),
481 }, 482 /* Coresight SoC 600 TMC-ETB */
482 { 483 CS_AMBA_ID(0x000bb9e9),
483 /* Coresight SoC 600 TMC-ETR/ETS */ 484 /* Coresight SoC 600 TMC-ETF */
484 .id = 0x000bb9e8, 485 CS_AMBA_ID(0x000bb9ea),
485 .mask = 0x000fffff,
486 .data = (void *)(unsigned long)CORESIGHT_SOC_600_ETR_CAPS,
487 },
488 {
489 /* Coresight SoC 600 TMC-ETB */
490 .id = 0x000bb9e9,
491 .mask = 0x000fffff,
492 },
493 {
494 /* Coresight SoC 600 TMC-ETF */
495 .id = 0x000bb9ea,
496 .mask = 0x000fffff,
497 },
498 { 0, 0}, 486 { 0, 0},
499}; 487};
500 488
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f2c681971201..f8979abb9a19 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -131,6 +131,7 @@ config I2C_I801
131 Cannon Lake (PCH) 131 Cannon Lake (PCH)
132 Cedar Fork (PCH) 132 Cedar Fork (PCH)
133 Ice Lake (PCH) 133 Ice Lake (PCH)
134 Comet Lake (PCH)
134 135
135 This driver can also be built as a module. If so, the module 136 This driver can also be built as a module. If so, the module
136 will be called i2c-i801. 137 will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index ead5e7de3e4d..416f89b8f881 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -86,7 +86,6 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
86 struct i2c_timings *t = &dev->timings; 86 struct i2c_timings *t = &dev->timings;
87 u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0; 87 u32 ss_ht = 0, fp_ht = 0, hs_ht = 0, fs_ht = 0;
88 88
89 dev->adapter.nr = -1;
90 dev->tx_fifo_depth = 32; 89 dev->tx_fifo_depth = 32;
91 dev->rx_fifo_depth = 32; 90 dev->rx_fifo_depth = 32;
92 91
@@ -219,7 +218,7 @@ static void i2c_dw_configure_slave(struct dw_i2c_dev *dev)
219 dev->mode = DW_IC_SLAVE; 218 dev->mode = DW_IC_SLAVE;
220} 219}
221 220
222static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id) 221static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev)
223{ 222{
224 u32 param, tx_fifo_depth, rx_fifo_depth; 223 u32 param, tx_fifo_depth, rx_fifo_depth;
225 224
@@ -233,7 +232,6 @@ static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id)
233 if (!dev->tx_fifo_depth) { 232 if (!dev->tx_fifo_depth) {
234 dev->tx_fifo_depth = tx_fifo_depth; 233 dev->tx_fifo_depth = tx_fifo_depth;
235 dev->rx_fifo_depth = rx_fifo_depth; 234 dev->rx_fifo_depth = rx_fifo_depth;
236 dev->adapter.nr = id;
237 } else if (tx_fifo_depth >= 2) { 235 } else if (tx_fifo_depth >= 2) {
238 dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth, 236 dev->tx_fifo_depth = min_t(u32, dev->tx_fifo_depth,
239 tx_fifo_depth); 237 tx_fifo_depth);
@@ -358,13 +356,14 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
358 div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000); 356 div_u64(clk_khz * t->sda_hold_ns + 500000, 1000000);
359 } 357 }
360 358
361 dw_i2c_set_fifo_size(dev, pdev->id); 359 dw_i2c_set_fifo_size(dev);
362 360
363 adap = &dev->adapter; 361 adap = &dev->adapter;
364 adap->owner = THIS_MODULE; 362 adap->owner = THIS_MODULE;
365 adap->class = I2C_CLASS_DEPRECATED; 363 adap->class = I2C_CLASS_DEPRECATED;
366 ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); 364 ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
367 adap->dev.of_node = pdev->dev.of_node; 365 adap->dev.of_node = pdev->dev.of_node;
366 adap->nr = -1;
368 367
369 dev_pm_set_driver_flags(&pdev->dev, 368 dev_pm_set_driver_flags(&pdev->dev,
370 DPM_FLAG_SMART_PREPARE | 369 DPM_FLAG_SMART_PREPARE |
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c91e145ef5a5..679c6c41f64b 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -71,6 +71,7 @@
71 * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes 71 * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes
72 * Cedar Fork (PCH) 0x18df 32 hard yes yes yes 72 * Cedar Fork (PCH) 0x18df 32 hard yes yes yes
73 * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes 73 * Ice Lake-LP (PCH) 0x34a3 32 hard yes yes yes
74 * Comet Lake (PCH) 0x02a3 32 hard yes yes yes
74 * 75 *
75 * Features supported by this driver: 76 * Features supported by this driver:
76 * Software PEC no 77 * Software PEC no
@@ -240,6 +241,7 @@
240#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 241#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223
241#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 242#define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3
242#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 243#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
244#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS 0x02a3
243 245
244struct i801_mux_config { 246struct i801_mux_config {
245 char *gpio_chip; 247 char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
1038 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) }, 1040 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
1039 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) }, 1041 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
1040 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) }, 1042 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
1043 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
1041 { 0, } 1044 { 0, }
1042}; 1045};
1043 1046
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1534 case PCI_DEVICE_ID_INTEL_DNV_SMBUS: 1537 case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
1535 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS: 1538 case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
1536 case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS: 1539 case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
1540 case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
1537 priv->features |= FEATURE_I2C_BLOCK_READ; 1541 priv->features |= FEATURE_I2C_BLOCK_READ;
1538 priv->features |= FEATURE_IRQ; 1542 priv->features |= FEATURE_IRQ;
1539 priv->features |= FEATURE_SMBUS_PEC; 1543 priv->features |= FEATURE_SMBUS_PEC;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 42fed40198a0..c0c3043b5d61 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1169,11 +1169,13 @@ static int i2c_imx_probe(struct platform_device *pdev)
1169 /* Init DMA config if supported */ 1169 /* Init DMA config if supported */
1170 ret = i2c_imx_dma_request(i2c_imx, phy_addr); 1170 ret = i2c_imx_dma_request(i2c_imx, phy_addr);
1171 if (ret < 0) 1171 if (ret < 0)
1172 goto clk_notifier_unregister; 1172 goto del_adapter;
1173 1173
1174 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); 1174 dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
1175 return 0; /* Return OK */ 1175 return 0; /* Return OK */
1176 1176
1177del_adapter:
1178 i2c_del_adapter(&i2c_imx->adapter);
1177clk_notifier_unregister: 1179clk_notifier_unregister:
1178 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb); 1180 clk_notifier_unregister(i2c_imx->clk, &i2c_imx->clk_change_nb);
1179rpm_disable: 1181rpm_disable:
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 660de1ee68ed..684d651612b3 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -503,7 +503,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
503 writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG); 503 writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
504 writel(I2C_DMA_CON_RX, i2c->pdmabase + OFFSET_CON); 504 writel(I2C_DMA_CON_RX, i2c->pdmabase + OFFSET_CON);
505 505
506 dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 0); 506 dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
507 if (!dma_rd_buf) 507 if (!dma_rd_buf)
508 return -ENOMEM; 508 return -ENOMEM;
509 509
@@ -526,7 +526,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
526 writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG); 526 writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG);
527 writel(I2C_DMA_CON_TX, i2c->pdmabase + OFFSET_CON); 527 writel(I2C_DMA_CON_TX, i2c->pdmabase + OFFSET_CON);
528 528
529 dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 0); 529 dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
530 if (!dma_wr_buf) 530 if (!dma_wr_buf)
531 return -ENOMEM; 531 return -ENOMEM;
532 532
@@ -549,7 +549,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
549 writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG); 549 writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG);
550 writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_CON); 550 writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_CON);
551 551
552 dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 0); 552 dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1);
553 if (!dma_wr_buf) 553 if (!dma_wr_buf)
554 return -ENOMEM; 554 return -ENOMEM;
555 555
@@ -561,7 +561,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs,
561 return -ENOMEM; 561 return -ENOMEM;
562 } 562 }
563 563
564 dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 0); 564 dma_rd_buf = i2c_get_dma_safe_msg_buf((msgs + 1), 1);
565 if (!dma_rd_buf) { 565 if (!dma_rd_buf) {
566 dma_unmap_single(i2c->dev, wpaddr, 566 dma_unmap_single(i2c->dev, wpaddr,
567 msgs->len, DMA_TO_DEVICE); 567 msgs->len, DMA_TO_DEVICE);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index dd52a068b140..a7578f6da979 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -363,9 +363,6 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
363 struct dma_chan *chan = priv->dma_direction == DMA_FROM_DEVICE 363 struct dma_chan *chan = priv->dma_direction == DMA_FROM_DEVICE
364 ? priv->dma_rx : priv->dma_tx; 364 ? priv->dma_rx : priv->dma_tx;
365 365
366 /* Disable DMA Master Received/Transmitted */
367 rcar_i2c_write(priv, ICDMAER, 0);
368
369 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), 366 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
370 sg_dma_len(&priv->sg), priv->dma_direction); 367 sg_dma_len(&priv->sg), priv->dma_direction);
371 368
@@ -375,6 +372,9 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
375 priv->flags |= ID_P_NO_RXDMA; 372 priv->flags |= ID_P_NO_RXDMA;
376 373
377 priv->dma_direction = DMA_NONE; 374 priv->dma_direction = DMA_NONE;
375
376 /* Disable DMA Master Received/Transmitted, must be last! */
377 rcar_i2c_write(priv, ICDMAER, 0);
378} 378}
379 379
380static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv) 380static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv)
@@ -611,6 +611,15 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
611 return true; 611 return true;
612} 612}
613 613
614/*
615 * This driver has a lock-free design because there are IP cores (at least
616 * R-Car Gen2) which have an inherent race condition in their hardware design.
617 * There, we need to clear RCAR_BUS_MASK_DATA bits as soon as possible after
618 * the interrupt was generated, otherwise an unwanted repeated message gets
619 * generated. It turned out that taking a spinlock at the beginning of the ISR
620 * was already causing repeated messages. Thus, this driver was converted to
621 * the now lockless behaviour. Please keep this in mind when hacking the driver.
622 */
614static irqreturn_t rcar_i2c_irq(int irq, void *ptr) 623static irqreturn_t rcar_i2c_irq(int irq, void *ptr)
615{ 624{
616 struct rcar_i2c_priv *priv = ptr; 625 struct rcar_i2c_priv *priv = ptr;
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 1e6805b5cef2..a57aa4fe51a4 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -478,7 +478,7 @@ static int sis630_setup(struct pci_dev *sis630_dev)
478 if (!request_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION, 478 if (!request_region(smbus_base + SMB_STS, SIS630_SMB_IOREGION,
479 sis630_driver.name)) { 479 sis630_driver.name)) {
480 dev_err(&sis630_dev->dev, 480 dev_err(&sis630_dev->dev,
481 "I/O Region 0x%04hx-0x%04hx for SMBus already in use.\n", 481 "I/O Region 0x%04x-0x%04x for SMBus already in use.\n",
482 smbus_base + SMB_STS, 482 smbus_base + SMB_STS,
483 smbus_base + SMB_STS + SIS630_SMB_IOREGION - 1); 483 smbus_base + SMB_STS + SIS630_SMB_IOREGION - 1);
484 retval = -EBUSY; 484 retval = -EBUSY;
@@ -528,7 +528,7 @@ static int sis630_probe(struct pci_dev *dev, const struct pci_device_id *id)
528 sis630_adapter.dev.parent = &dev->dev; 528 sis630_adapter.dev.parent = &dev->dev;
529 529
530 snprintf(sis630_adapter.name, sizeof(sis630_adapter.name), 530 snprintf(sis630_adapter.name, sizeof(sis630_adapter.name),
531 "SMBus SIS630 adapter at %04hx", smbus_base + SMB_STS); 531 "SMBus SIS630 adapter at %04x", smbus_base + SMB_STS);
532 532
533 return i2c_add_adapter(&sis630_adapter); 533 return i2c_add_adapter(&sis630_adapter);
534} 534}
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 13e1213561d4..4284fc991cfd 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -432,7 +432,7 @@ static int stm32f7_i2c_compute_timing(struct stm32f7_i2c_dev *i2c_dev,
432 STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0); 432 STM32F7_I2C_ANALOG_FILTER_DELAY_MAX : 0);
433 dnf_delay = setup->dnf * i2cclk; 433 dnf_delay = setup->dnf * i2cclk;
434 434
435 sdadel_min = setup->fall_time - i2c_specs[setup->speed].hddat_min - 435 sdadel_min = i2c_specs[setup->speed].hddat_min + setup->fall_time -
436 af_delay_min - (setup->dnf + 3) * i2cclk; 436 af_delay_min - (setup->dnf + 3) * i2cclk;
437 437
438 sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time - 438 sdadel_max = i2c_specs[setup->speed].vddat_max - setup->rise_time -
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index cb6c5cb0df0b..38af18645133 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -2258,7 +2258,8 @@ EXPORT_SYMBOL(i2c_put_adapter);
2258/** 2258/**
2259 * i2c_get_dma_safe_msg_buf() - get a DMA safe buffer for the given i2c_msg 2259 * i2c_get_dma_safe_msg_buf() - get a DMA safe buffer for the given i2c_msg
2260 * @msg: the message to be checked 2260 * @msg: the message to be checked
2261 * @threshold: the minimum number of bytes for which using DMA makes sense 2261 * @threshold: the minimum number of bytes for which using DMA makes sense.
2262 * Should at least be 1.
2262 * 2263 *
2263 * Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO. 2264 * Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO.
2264 * Or a valid pointer to be used with DMA. After use, release it by 2265 * Or a valid pointer to be used with DMA. After use, release it by
@@ -2268,7 +2269,11 @@ EXPORT_SYMBOL(i2c_put_adapter);
2268 */ 2269 */
2269u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold) 2270u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold)
2270{ 2271{
2271 if (msg->len < threshold) 2272 /* also skip 0-length msgs for bogus thresholds of 0 */
2273 if (!threshold)
2274 pr_debug("DMA buffer for addr=0x%02x with length 0 is bogus\n",
2275 msg->addr);
2276 if (msg->len < threshold || msg->len == 0)
2272 return NULL; 2277 return NULL;
2273 2278
2274 if (msg->flags & I2C_M_DMA_SAFE) 2279 if (msg->flags & I2C_M_DMA_SAFE)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c
index c5a881172524..337410f40860 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_utils.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c
@@ -173,7 +173,12 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
173 173
174 rcu_read_lock(); 174 rcu_read_lock();
175 in = __in_dev_get_rcu(upper_dev); 175 in = __in_dev_get_rcu(upper_dev);
176 local_ipaddr = ntohl(in->ifa_list->ifa_address); 176
177 if (!in->ifa_list)
178 local_ipaddr = 0;
179 else
180 local_ipaddr = ntohl(in->ifa_list->ifa_address);
181
177 rcu_read_unlock(); 182 rcu_read_unlock();
178 } else { 183 } else {
179 local_ipaddr = ntohl(ifa->ifa_address); 184 local_ipaddr = ntohl(ifa->ifa_address);
@@ -185,6 +190,11 @@ int i40iw_inetaddr_event(struct notifier_block *notifier,
185 case NETDEV_UP: 190 case NETDEV_UP:
186 /* Fall through */ 191 /* Fall through */
187 case NETDEV_CHANGEADDR: 192 case NETDEV_CHANGEADDR:
193
194 /* Just skip if no need to handle ARP cache */
195 if (!local_ipaddr)
196 break;
197
188 i40iw_manage_arp_cache(iwdev, 198 i40iw_manage_arp_cache(iwdev,
189 netdev->dev_addr, 199 netdev->dev_addr,
190 &local_ipaddr, 200 &local_ipaddr,
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 782499abcd98..2a0b59a4b6eb 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -804,8 +804,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
804 unsigned long flags; 804 unsigned long flags;
805 805
806 for (i = 0 ; i < dev->num_ports; i++) { 806 for (i = 0 ; i < dev->num_ports; i++) {
807 cancel_delayed_work(&dev->sriov.alias_guid.ports_guid[i].alias_guid_work);
808 det = &sriov->alias_guid.ports_guid[i]; 807 det = &sriov->alias_guid.ports_guid[i];
808 cancel_delayed_work_sync(&det->alias_guid_work);
809 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags); 809 spin_lock_irqsave(&sriov->alias_guid.ag_work_lock, flags);
810 while (!list_empty(&det->cb_list)) { 810 while (!list_empty(&det->cb_list)) {
811 cb_ctx = list_entry(det->cb_list.next, 811 cb_ctx = list_entry(det->cb_list.next,
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index eaa055007f28..9e08df7914aa 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -20,6 +20,7 @@
20 20
21enum devx_obj_flags { 21enum devx_obj_flags {
22 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0, 22 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
23 DEVX_OBJ_FLAGS_DCT = 1 << 1,
23}; 24};
24 25
25struct devx_async_data { 26struct devx_async_data {
@@ -39,7 +40,10 @@ struct devx_obj {
39 u32 dinlen; /* destroy inbox length */ 40 u32 dinlen; /* destroy inbox length */
40 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW]; 41 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
41 u32 flags; 42 u32 flags;
42 struct mlx5_ib_devx_mr devx_mr; 43 union {
44 struct mlx5_ib_devx_mr devx_mr;
45 struct mlx5_core_dct core_dct;
46 };
43}; 47};
44 48
45struct devx_umem { 49struct devx_umem {
@@ -347,7 +351,6 @@ static u64 devx_get_obj_id(const void *in)
347 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ, 351 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
348 MLX5_GET(arm_rq_in, in, srq_number)); 352 MLX5_GET(arm_rq_in, in, srq_number));
349 break; 353 break;
350 case MLX5_CMD_OP_DRAIN_DCT:
351 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 354 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
352 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT, 355 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
353 MLX5_GET(drain_dct_in, in, dctn)); 356 MLX5_GET(drain_dct_in, in, dctn));
@@ -618,7 +621,6 @@ static bool devx_is_obj_modify_cmd(const void *in)
618 case MLX5_CMD_OP_2RST_QP: 621 case MLX5_CMD_OP_2RST_QP:
619 case MLX5_CMD_OP_ARM_XRC_SRQ: 622 case MLX5_CMD_OP_ARM_XRC_SRQ:
620 case MLX5_CMD_OP_ARM_RQ: 623 case MLX5_CMD_OP_ARM_RQ:
621 case MLX5_CMD_OP_DRAIN_DCT:
622 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION: 624 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
623 case MLX5_CMD_OP_ARM_XRQ: 625 case MLX5_CMD_OP_ARM_XRQ:
624 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY: 626 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
@@ -1124,7 +1126,11 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
1124 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) 1126 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1125 devx_cleanup_mkey(obj); 1127 devx_cleanup_mkey(obj);
1126 1128
1127 ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); 1129 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1130 ret = mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
1131 else
1132 ret = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
1133 sizeof(out));
1128 if (ib_is_destroy_retryable(ret, why, uobject)) 1134 if (ib_is_destroy_retryable(ret, why, uobject))
1129 return ret; 1135 return ret;
1130 1136
@@ -1185,9 +1191,17 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1185 devx_set_umem_valid(cmd_in); 1191 devx_set_umem_valid(cmd_in);
1186 } 1192 }
1187 1193
1188 err = mlx5_cmd_exec(dev->mdev, cmd_in, 1194 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1189 cmd_in_len, 1195 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1190 cmd_out, cmd_out_len); 1196 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1197 cmd_in, cmd_in_len,
1198 cmd_out, cmd_out_len);
1199 } else {
1200 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1201 cmd_in_len,
1202 cmd_out, cmd_out_len);
1203 }
1204
1191 if (err) 1205 if (err)
1192 goto obj_free; 1206 goto obj_free;
1193 1207
@@ -1214,7 +1228,11 @@ err_copy:
1214 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) 1228 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
1215 devx_cleanup_mkey(obj); 1229 devx_cleanup_mkey(obj);
1216obj_destroy: 1230obj_destroy:
1217 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out)); 1231 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1232 mlx5_core_destroy_dct(obj->mdev, &obj->core_dct);
1233 else
1234 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out,
1235 sizeof(out));
1218obj_free: 1236obj_free:
1219 kfree(obj); 1237 kfree(obj);
1220 return err; 1238 return err;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 994c19d01211..531ff20b32ad 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -415,10 +415,17 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed,
415 *active_speed = IB_SPEED_EDR; 415 *active_speed = IB_SPEED_EDR;
416 break; 416 break;
417 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): 417 case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2):
418 *active_width = IB_WIDTH_2X;
419 *active_speed = IB_SPEED_EDR;
420 break;
418 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): 421 case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR):
419 *active_width = IB_WIDTH_1X; 422 *active_width = IB_WIDTH_1X;
420 *active_speed = IB_SPEED_HDR; 423 *active_speed = IB_SPEED_HDR;
421 break; 424 break;
425 case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4):
426 *active_width = IB_WIDTH_4X;
427 *active_speed = IB_SPEED_EDR;
428 break;
422 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): 429 case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2):
423 *active_width = IB_WIDTH_2X; 430 *active_width = IB_WIDTH_2X;
424 *active_speed = IB_SPEED_HDR; 431 *active_speed = IB_SPEED_HDR;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 6b1f0e76900b..7cd006da1dae 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3729,6 +3729,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3729 3729
3730 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { 3730 } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
3731 struct mlx5_ib_modify_qp_resp resp = {}; 3731 struct mlx5_ib_modify_qp_resp resp = {};
3732 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
3732 u32 min_resp_len = offsetof(typeof(resp), dctn) + 3733 u32 min_resp_len = offsetof(typeof(resp), dctn) +
3733 sizeof(resp.dctn); 3734 sizeof(resp.dctn);
3734 3735
@@ -3747,7 +3748,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
3747 MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); 3748 MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
3748 3749
3749 err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in, 3750 err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in,
3750 MLX5_ST_SZ_BYTES(create_dct_in)); 3751 MLX5_ST_SZ_BYTES(create_dct_in), out,
3752 sizeof(out));
3751 if (err) 3753 if (err)
3752 return err; 3754 return err;
3753 resp.dctn = qp->dct.mdct.mqp.qpn; 3755 resp.dctn = qp->dct.mdct.mqp.qpn;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 6b0760dafb3e..f7cdd2ab7f11 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -140,10 +140,14 @@ static struct lock_class_key reserved_rbtree_key;
140static inline int match_hid_uid(struct device *dev, 140static inline int match_hid_uid(struct device *dev,
141 struct acpihid_map_entry *entry) 141 struct acpihid_map_entry *entry)
142{ 142{
143 struct acpi_device *adev = ACPI_COMPANION(dev);
143 const char *hid, *uid; 144 const char *hid, *uid;
144 145
145 hid = acpi_device_hid(ACPI_COMPANION(dev)); 146 if (!adev)
146 uid = acpi_device_uid(ACPI_COMPANION(dev)); 147 return -ENODEV;
148
149 hid = acpi_device_hid(adev);
150 uid = acpi_device_uid(adev);
147 151
148 if (!hid || !(*hid)) 152 if (!hid || !(*hid))
149 return -ENODEV; 153 return -ENODEV;
@@ -2604,7 +2608,12 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
2604 2608
2605 /* Everything is mapped - write the right values into s->dma_address */ 2609 /* Everything is mapped - write the right values into s->dma_address */
2606 for_each_sg(sglist, s, nelems, i) { 2610 for_each_sg(sglist, s, nelems, i) {
2607 s->dma_address += address + s->offset; 2611 /*
2612 * Add in the remaining piece of the scatter-gather offset that
2613 * was masked out when we were determining the physical address
2614 * via (sg_phys(s) & PAGE_MASK) earlier.
2615 */
2616 s->dma_address += address + (s->offset & ~PAGE_MASK);
2608 s->dma_length = s->length; 2617 s->dma_length = s->length;
2609 } 2618 }
2610 2619
@@ -3160,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
3160 return; 3169 return;
3161 3170
3162 list_for_each_entry(entry, &amd_iommu_unity_map, list) { 3171 list_for_each_entry(entry, &amd_iommu_unity_map, list) {
3172 int type, prot = 0;
3163 size_t length; 3173 size_t length;
3164 int prot = 0;
3165 3174
3166 if (devid < entry->devid_start || devid > entry->devid_end) 3175 if (devid < entry->devid_start || devid > entry->devid_end)
3167 continue; 3176 continue;
3168 3177
3178 type = IOMMU_RESV_DIRECT;
3169 length = entry->address_end - entry->address_start; 3179 length = entry->address_end - entry->address_start;
3170 if (entry->prot & IOMMU_PROT_IR) 3180 if (entry->prot & IOMMU_PROT_IR)
3171 prot |= IOMMU_READ; 3181 prot |= IOMMU_READ;
3172 if (entry->prot & IOMMU_PROT_IW) 3182 if (entry->prot & IOMMU_PROT_IW)
3173 prot |= IOMMU_WRITE; 3183 prot |= IOMMU_WRITE;
3184 if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
3185 /* Exclusion range */
3186 type = IOMMU_RESV_RESERVED;
3174 3187
3175 region = iommu_alloc_resv_region(entry->address_start, 3188 region = iommu_alloc_resv_region(entry->address_start,
3176 length, prot, 3189 length, prot, type);
3177 IOMMU_RESV_DIRECT);
3178 if (!region) { 3190 if (!region) {
3179 dev_err(dev, "Out of memory allocating dm-regions\n"); 3191 dev_err(dev, "Out of memory allocating dm-regions\n");
3180 return; 3192 return;
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index f773792d77fd..1b1378619fc9 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
2013 if (e == NULL) 2013 if (e == NULL)
2014 return -ENOMEM; 2014 return -ENOMEM;
2015 2015
2016 if (m->flags & IVMD_FLAG_EXCL_RANGE)
2017 init_exclusion_range(m);
2018
2016 switch (m->type) { 2019 switch (m->type) {
2017 default: 2020 default:
2018 kfree(e); 2021 kfree(e);
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
2059 2062
2060 while (p < end) { 2063 while (p < end) {
2061 m = (struct ivmd_header *)p; 2064 m = (struct ivmd_header *)p;
2062 if (m->flags & IVMD_FLAG_EXCL_RANGE) 2065 if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
2063 init_exclusion_range(m);
2064 else if (m->flags & IVMD_FLAG_UNITY_MAP)
2065 init_unity_map_range(m); 2066 init_unity_map_range(m);
2066 2067
2067 p += m->length; 2068 p += m->length;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index eae0741f72dc..87965e4d9647 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -374,6 +374,8 @@
374#define IOMMU_PROT_IR 0x01 374#define IOMMU_PROT_IR 0x01
375#define IOMMU_PROT_IW 0x02 375#define IOMMU_PROT_IW 0x02
376 376
377#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE (1 << 2)
378
377/* IOMMU capabilities */ 379/* IOMMU capabilities */
378#define IOMMU_CAP_IOTLB 24 380#define IOMMU_CAP_IOTLB 24
379#define IOMMU_CAP_NPCACHE 26 381#define IOMMU_CAP_NPCACHE 26
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 87274b54febd..28cb713d728c 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1538,6 +1538,9 @@ static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu)
1538 u32 pmen; 1538 u32 pmen;
1539 unsigned long flags; 1539 unsigned long flags;
1540 1540
1541 if (!cap_plmr(iommu->cap) && !cap_phmr(iommu->cap))
1542 return;
1543
1541 raw_spin_lock_irqsave(&iommu->register_lock, flags); 1544 raw_spin_lock_irqsave(&iommu->register_lock, flags);
1542 pmen = readl(iommu->reg + DMAR_PMEN_REG); 1545 pmen = readl(iommu->reg + DMAR_PMEN_REG);
1543 pmen &= ~DMA_PMEN_EPM; 1546 pmen &= ~DMA_PMEN_EPM;
@@ -5332,7 +5335,7 @@ int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sd
5332 5335
5333 ctx_lo = context[0].lo; 5336 ctx_lo = context[0].lo;
5334 5337
5335 sdev->did = domain->iommu_did[iommu->seq_id]; 5338 sdev->did = FLPT_DEFAULT_DID;
5336 sdev->sid = PCI_DEVID(info->bus, info->devfn); 5339 sdev->sid = PCI_DEVID(info->bus, info->devfn);
5337 5340
5338 if (!(ctx_lo & CONTEXT_PASIDE)) { 5341 if (!(ctx_lo & CONTEXT_PASIDE)) {
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index f101afc315ab..9a8a8870e267 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -160,6 +160,14 @@
160 160
161#define ARM_V7S_TCR_PD1 BIT(5) 161#define ARM_V7S_TCR_PD1 BIT(5)
162 162
163#ifdef CONFIG_ZONE_DMA32
164#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
165#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
166#else
167#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
168#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
169#endif
170
163typedef u32 arm_v7s_iopte; 171typedef u32 arm_v7s_iopte;
164 172
165static bool selftest_running; 173static bool selftest_running;
@@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
197 void *table = NULL; 205 void *table = NULL;
198 206
199 if (lvl == 1) 207 if (lvl == 1)
200 table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); 208 table = (void *)__get_free_pages(
209 __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
201 else if (lvl == 2) 210 else if (lvl == 2)
202 table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA); 211 table = kmem_cache_zalloc(data->l2_tables, gfp);
203 phys = virt_to_phys(table); 212 phys = virt_to_phys(table);
204 if (phys != (arm_v7s_iopte)phys) 213 if (phys != (arm_v7s_iopte)phys) {
205 /* Doesn't fit in PTE */ 214 /* Doesn't fit in PTE */
215 dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
206 goto out_free; 216 goto out_free;
217 }
207 if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { 218 if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
208 dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); 219 dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
209 if (dma_mapping_error(dev, dma)) 220 if (dma_mapping_error(dev, dma))
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
733 data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", 744 data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
734 ARM_V7S_TABLE_SIZE(2), 745 ARM_V7S_TABLE_SIZE(2),
735 ARM_V7S_TABLE_SIZE(2), 746 ARM_V7S_TABLE_SIZE(2),
736 SLAB_CACHE_DMA, NULL); 747 ARM_V7S_TABLE_SLAB_FLAGS, NULL);
737 if (!data->l2_tables) 748 if (!data->l2_tables)
738 goto out_free_data; 749 goto out_free_data;
739 750
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 33a982e33716..109de67d5d72 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
1105 1105
1106 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type); 1106 dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
1107 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) { 1107 if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
1108 dev_warn(dev,
1109 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1110 iommu_def_domain_type);
1111 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA); 1108 dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
1109 if (dom) {
1110 dev_warn(dev,
1111 "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
1112 iommu_def_domain_type);
1113 }
1112 } 1114 }
1113 1115
1114 group->default_domain = dom; 1116 group->default_domain = dom;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index f8d3ba247523..2de8122e218f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -207,8 +207,10 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
207 curr_iova = rb_entry(curr, struct iova, node); 207 curr_iova = rb_entry(curr, struct iova, node);
208 } while (curr && new_pfn <= curr_iova->pfn_hi); 208 } while (curr && new_pfn <= curr_iova->pfn_hi);
209 209
210 if (limit_pfn < size || new_pfn < iovad->start_pfn) 210 if (limit_pfn < size || new_pfn < iovad->start_pfn) {
211 iovad->max32_alloc_size = size;
211 goto iova32_full; 212 goto iova32_full;
213 }
212 214
213 /* pfn_lo will point to size aligned address if size_aligned is set */ 215 /* pfn_lo will point to size aligned address if size_aligned is set */
214 new->pfn_lo = new_pfn; 216 new->pfn_lo = new_pfn;
@@ -222,7 +224,6 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
222 return 0; 224 return 0;
223 225
224iova32_full: 226iova32_full:
225 iovad->max32_alloc_size = size;
226 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); 227 spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
227 return -ENOMEM; 228 return -ENOMEM;
228} 229}
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 83364fedbf0a..5e4ca139e4ea 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -275,14 +275,14 @@ out_free:
275 return ret; 275 return ret;
276} 276}
277 277
278int __init brcmstb_l2_edge_intc_of_init(struct device_node *np, 278static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
279 struct device_node *parent) 279 struct device_node *parent)
280{ 280{
281 return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init); 281 return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
282} 282}
283IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init); 283IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
284 284
285int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np, 285static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
286 struct device_node *parent) 286 struct device_node *parent)
287{ 287{
288 return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init); 288 return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 2dd1ff0cf558..7577755bdcf4 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1482,7 +1482,7 @@ static int lpi_range_cmp(void *priv, struct list_head *a, struct list_head *b)
1482 ra = container_of(a, struct lpi_range, entry); 1482 ra = container_of(a, struct lpi_range, entry);
1483 rb = container_of(b, struct lpi_range, entry); 1483 rb = container_of(b, struct lpi_range, entry);
1484 1484
1485 return rb->base_id - ra->base_id; 1485 return ra->base_id - rb->base_id;
1486} 1486}
1487 1487
1488static void merge_lpi_ranges(void) 1488static void merge_lpi_ranges(void)
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ba2a37a27a54..fd3110c171ba 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1089,11 +1089,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
1089#endif 1089#endif
1090} 1090}
1091 1091
1092static int gic_init_bases(struct gic_chip_data *gic, int irq_start, 1092static int gic_init_bases(struct gic_chip_data *gic,
1093 struct fwnode_handle *handle) 1093 struct fwnode_handle *handle)
1094{ 1094{
1095 irq_hw_number_t hwirq_base; 1095 int gic_irqs, ret;
1096 int gic_irqs, irq_base, ret;
1097 1096
1098 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) { 1097 if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
1099 /* Frankein-GIC without banked registers... */ 1098 /* Frankein-GIC without banked registers... */
@@ -1145,28 +1144,21 @@ static int gic_init_bases(struct gic_chip_data *gic, int irq_start,
1145 } else { /* Legacy support */ 1144 } else { /* Legacy support */
1146 /* 1145 /*
1147 * For primary GICs, skip over SGIs. 1146 * For primary GICs, skip over SGIs.
1148 * For secondary GICs, skip over PPIs, too. 1147 * No secondary GIC support whatsoever.
1149 */ 1148 */
1150 if (gic == &gic_data[0] && (irq_start & 31) > 0) { 1149 int irq_base;
1151 hwirq_base = 16;
1152 if (irq_start != -1)
1153 irq_start = (irq_start & ~31) + 16;
1154 } else {
1155 hwirq_base = 32;
1156 }
1157 1150
1158 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ 1151 gic_irqs -= 16; /* calculate # of irqs to allocate */
1159 1152
1160 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, 1153 irq_base = irq_alloc_descs(16, 16, gic_irqs,
1161 numa_node_id()); 1154 numa_node_id());
1162 if (irq_base < 0) { 1155 if (irq_base < 0) {
1163 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 1156 WARN(1, "Cannot allocate irq_descs @ IRQ16, assuming pre-allocated\n");
1164 irq_start); 1157 irq_base = 16;
1165 irq_base = irq_start;
1166 } 1158 }
1167 1159
1168 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base, 1160 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base,
1169 hwirq_base, &gic_irq_domain_ops, gic); 1161 16, &gic_irq_domain_ops, gic);
1170 } 1162 }
1171 1163
1172 if (WARN_ON(!gic->domain)) { 1164 if (WARN_ON(!gic->domain)) {
@@ -1195,7 +1187,6 @@ error:
1195} 1187}
1196 1188
1197static int __init __gic_init_bases(struct gic_chip_data *gic, 1189static int __init __gic_init_bases(struct gic_chip_data *gic,
1198 int irq_start,
1199 struct fwnode_handle *handle) 1190 struct fwnode_handle *handle)
1200{ 1191{
1201 char *name; 1192 char *name;
@@ -1231,32 +1222,28 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
1231 gic_init_chip(gic, NULL, name, false); 1222 gic_init_chip(gic, NULL, name, false);
1232 } 1223 }
1233 1224
1234 ret = gic_init_bases(gic, irq_start, handle); 1225 ret = gic_init_bases(gic, handle);
1235 if (ret) 1226 if (ret)
1236 kfree(name); 1227 kfree(name);
1237 1228
1238 return ret; 1229 return ret;
1239} 1230}
1240 1231
1241void __init gic_init(unsigned int gic_nr, int irq_start, 1232void __init gic_init(void __iomem *dist_base, void __iomem *cpu_base)
1242 void __iomem *dist_base, void __iomem *cpu_base)
1243{ 1233{
1244 struct gic_chip_data *gic; 1234 struct gic_chip_data *gic;
1245 1235
1246 if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
1247 return;
1248
1249 /* 1236 /*
1250 * Non-DT/ACPI systems won't run a hypervisor, so let's not 1237 * Non-DT/ACPI systems won't run a hypervisor, so let's not
1251 * bother with these... 1238 * bother with these...
1252 */ 1239 */
1253 static_branch_disable(&supports_deactivate_key); 1240 static_branch_disable(&supports_deactivate_key);
1254 1241
1255 gic = &gic_data[gic_nr]; 1242 gic = &gic_data[0];
1256 gic->raw_dist_base = dist_base; 1243 gic->raw_dist_base = dist_base;
1257 gic->raw_cpu_base = cpu_base; 1244 gic->raw_cpu_base = cpu_base;
1258 1245
1259 __gic_init_bases(gic, irq_start, NULL); 1246 __gic_init_bases(gic, NULL);
1260} 1247}
1261 1248
1262static void gic_teardown(struct gic_chip_data *gic) 1249static void gic_teardown(struct gic_chip_data *gic)
@@ -1399,7 +1386,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
1399 if (ret) 1386 if (ret)
1400 return ret; 1387 return ret;
1401 1388
1402 ret = gic_init_bases(*gic, -1, &dev->of_node->fwnode); 1389 ret = gic_init_bases(*gic, &dev->of_node->fwnode);
1403 if (ret) { 1390 if (ret) {
1404 gic_teardown(*gic); 1391 gic_teardown(*gic);
1405 return ret; 1392 return ret;
@@ -1459,7 +1446,7 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1459 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base)) 1446 if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
1460 static_branch_disable(&supports_deactivate_key); 1447 static_branch_disable(&supports_deactivate_key);
1461 1448
1462 ret = __gic_init_bases(gic, -1, &node->fwnode); 1449 ret = __gic_init_bases(gic, &node->fwnode);
1463 if (ret) { 1450 if (ret) {
1464 gic_teardown(gic); 1451 gic_teardown(gic);
1465 return ret; 1452 return ret;
@@ -1650,7 +1637,7 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
1650 return -ENOMEM; 1637 return -ENOMEM;
1651 } 1638 }
1652 1639
1653 ret = __gic_init_bases(gic, -1, domain_handle); 1640 ret = __gic_init_bases(gic, domain_handle);
1654 if (ret) { 1641 if (ret) {
1655 pr_err("Failed to initialise GIC\n"); 1642 pr_err("Failed to initialise GIC\n");
1656 irq_domain_free_fwnode(domain_handle); 1643 irq_domain_free_fwnode(domain_handle);
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index d1098f4da6a4..88df3d00052c 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -169,8 +169,12 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
169 169
170 raw_spin_lock_init(&data->lock); 170 raw_spin_lock_init(&data->lock);
171 171
172 of_property_read_u32(np, "fsl,num-irqs", &irqs_num); 172 ret = of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
173 of_property_read_u32(np, "fsl,channel", &data->channel); 173 if (ret)
174 return ret;
175 ret = of_property_read_u32(np, "fsl,channel", &data->channel);
176 if (ret)
177 return ret;
174 178
175 /* 179 /*
176 * There is one output irq for each group of 64 inputs. 180 * There is one output irq for each group of 64 inputs.
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 567b29c47608..98b6e1d4b1a6 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -161,6 +161,9 @@ static void mbigen_write_msg(struct msi_desc *desc, struct msi_msg *msg)
161 void __iomem *base = d->chip_data; 161 void __iomem *base = d->chip_data;
162 u32 val; 162 u32 val;
163 163
164 if (!msg->address_lo && !msg->address_hi)
165 return;
166
164 base += get_mbigen_vec_reg(d->hwirq); 167 base += get_mbigen_vec_reg(d->hwirq);
165 val = readl_relaxed(base); 168 val = readl_relaxed(base);
166 169
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 3496b61a312a..8eed478f3b7e 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -179,7 +179,7 @@ static int mmp_irq_domain_xlate(struct irq_domain *d, struct device_node *node,
179 return 0; 179 return 0;
180} 180}
181 181
182const struct irq_domain_ops mmp_irq_domain_ops = { 182static const struct irq_domain_ops mmp_irq_domain_ops = {
183 .map = mmp_irq_domain_map, 183 .map = mmp_irq_domain_map,
184 .xlate = mmp_irq_domain_xlate, 184 .xlate = mmp_irq_domain_xlate,
185}; 185};
diff --git a/drivers/irqchip/irq-mvebu-sei.c b/drivers/irqchip/irq-mvebu-sei.c
index add4c9c934c8..18832ccc8ff8 100644
--- a/drivers/irqchip/irq-mvebu-sei.c
+++ b/drivers/irqchip/irq-mvebu-sei.c
@@ -478,7 +478,7 @@ dispose_irq:
478 return ret; 478 return ret;
479} 479}
480 480
481struct mvebu_sei_caps mvebu_sei_ap806_caps = { 481static struct mvebu_sei_caps mvebu_sei_ap806_caps = {
482 .ap_range = { 482 .ap_range = {
483 .first = 0, 483 .first = 0,
484 .size = 21, 484 .size = 21,
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index a93296b9b45d..7bd1d4cb2e19 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -716,7 +716,6 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
716 const struct stm32_exti_bank *stm32_bank; 716 const struct stm32_exti_bank *stm32_bank;
717 struct stm32_exti_chip_data *chip_data; 717 struct stm32_exti_chip_data *chip_data;
718 void __iomem *base = h_data->base; 718 void __iomem *base = h_data->base;
719 u32 irqs_mask;
720 719
721 stm32_bank = h_data->drv_data->exti_banks[bank_idx]; 720 stm32_bank = h_data->drv_data->exti_banks[bank_idx];
722 chip_data = &h_data->chips_data[bank_idx]; 721 chip_data = &h_data->chips_data[bank_idx];
@@ -725,21 +724,12 @@ stm32_exti_chip_data *stm32_exti_chip_init(struct stm32_exti_host_data *h_data,
725 724
726 raw_spin_lock_init(&chip_data->rlock); 725 raw_spin_lock_init(&chip_data->rlock);
727 726
728 /* Determine number of irqs supported */
729 writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
730 irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
731
732 /* 727 /*
733 * This IP has no reset, so after hot reboot we should 728 * This IP has no reset, so after hot reboot we should
734 * clear registers to avoid residue 729 * clear registers to avoid residue
735 */ 730 */
736 writel_relaxed(0, base + stm32_bank->imr_ofst); 731 writel_relaxed(0, base + stm32_bank->imr_ofst);
737 writel_relaxed(0, base + stm32_bank->emr_ofst); 732 writel_relaxed(0, base + stm32_bank->emr_ofst);
738 writel_relaxed(0, base + stm32_bank->rtsr_ofst);
739 writel_relaxed(0, base + stm32_bank->ftsr_ofst);
740 writel_relaxed(~0UL, base + stm32_bank->rpr_ofst);
741 if (stm32_bank->fpr_ofst != UNDEF_REG)
742 writel_relaxed(~0UL, base + stm32_bank->fpr_ofst);
743 733
744 pr_info("%pOF: bank%d\n", h_data->node, bank_idx); 734 pr_info("%pOF: bank%d\n", h_data->node, bank_idx);
745 735
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 4d85645c87f7..0928fd1f0e0c 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
4365 if (m->clock2) 4365 if (m->clock2)
4366 test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip); 4366 test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
4367 4367
4368 if (ent->device == 0xB410) { 4368 if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
4369 ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
4369 test_and_set_bit(HFC_CHIP_B410P, &hc->chip); 4370 test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
4370 test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip); 4371 test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
4371 test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip); 4372 test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index 7fea18b0c15d..7cb4d685a1f1 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
513 const struct i2c_device_id *id) 513 const struct i2c_device_id *id)
514{ 514{
515 int devid; 515 int devid;
516 const struct of_device_id *of_id;
516 struct pca9532_data *data = i2c_get_clientdata(client); 517 struct pca9532_data *data = i2c_get_clientdata(client);
517 struct pca9532_platform_data *pca9532_pdata = 518 struct pca9532_platform_data *pca9532_pdata =
518 dev_get_platdata(&client->dev); 519 dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
528 dev_err(&client->dev, "no platform data\n"); 529 dev_err(&client->dev, "no platform data\n");
529 return -EINVAL; 530 return -EINVAL;
530 } 531 }
531 devid = (int)(uintptr_t)of_match_device( 532 of_id = of_match_device(of_pca9532_leds_match,
532 of_pca9532_leds_match, &client->dev)->data; 533 &client->dev);
534 if (unlikely(!of_id))
535 return -EINVAL;
536 devid = (int)(uintptr_t) of_id->data;
533 } else { 537 } else {
534 devid = id->driver_data; 538 devid = id->driver_data;
535 } 539 }
diff --git a/drivers/leds/trigger/ledtrig-netdev.c b/drivers/leds/trigger/ledtrig-netdev.c
index 3dd3ed46d473..136f86a1627d 100644
--- a/drivers/leds/trigger/ledtrig-netdev.c
+++ b/drivers/leds/trigger/ledtrig-netdev.c
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
122 trigger_data->net_dev = NULL; 122 trigger_data->net_dev = NULL;
123 } 123 }
124 124
125 strncpy(trigger_data->device_name, buf, size); 125 memcpy(trigger_data->device_name, buf, size);
126 trigger_data->device_name[size] = 0;
126 if (size > 0 && trigger_data->device_name[size - 1] == '\n') 127 if (size > 0 && trigger_data->device_name[size - 1] == '\n')
127 trigger_data->device_name[size - 1] = 0; 128 trigger_data->device_name[size - 1] = 0;
128 129
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
301 container_of(nb, struct led_netdev_data, notifier); 302 container_of(nb, struct led_netdev_data, notifier);
302 303
303 if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE 304 if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
304 && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER 305 && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
305 && evt != NETDEV_CHANGENAME)
306 return NOTIFY_DONE; 306 return NOTIFY_DONE;
307 307
308 if (strcmp(dev->name, trigger_data->device_name)) 308 if (!(dev == trigger_data->net_dev ||
309 (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
309 return NOTIFY_DONE; 310 return NOTIFY_DONE;
310 311
311 cancel_delayed_work_sync(&trigger_data->work); 312 cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
320 dev_hold(dev); 321 dev_hold(dev);
321 trigger_data->net_dev = dev; 322 trigger_data->net_dev = dev;
322 break; 323 break;
323 case NETDEV_CHANGENAME:
324 case NETDEV_UNREGISTER: 324 case NETDEV_UNREGISTER:
325 if (trigger_data->net_dev) { 325 dev_put(trigger_data->net_dev);
326 dev_put(trigger_data->net_dev); 326 trigger_data->net_dev = NULL;
327 trigger_data->net_dev = NULL;
328 }
329 break; 327 break;
330 case NETDEV_UP: 328 case NETDEV_UP:
331 case NETDEV_CHANGE: 329 case NETDEV_CHANGE:
diff --git a/drivers/lightnvm/pblk-rl.c b/drivers/lightnvm/pblk-rl.c
index b014957dde0b..a5f8bc2defbc 100644
--- a/drivers/lightnvm/pblk-rl.c
+++ b/drivers/lightnvm/pblk-rl.c
@@ -233,10 +233,15 @@ void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold)
233 /* To start with, all buffer is available to user I/O writers */ 233 /* To start with, all buffer is available to user I/O writers */
234 rl->rb_budget = budget; 234 rl->rb_budget = budget;
235 rl->rb_user_max = budget; 235 rl->rb_user_max = budget;
236 rl->rb_max_io = threshold ? (budget - threshold) : (budget - 1);
237 rl->rb_gc_max = 0; 236 rl->rb_gc_max = 0;
238 rl->rb_state = PBLK_RL_HIGH; 237 rl->rb_state = PBLK_RL_HIGH;
239 238
239 /* Maximize I/O size and ansure that back threshold is respected */
240 if (threshold)
241 rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold;
242 else
243 rl->rb_max_io = budget - pblk->min_write_pgs_data - 1;
244
240 atomic_set(&rl->rb_user_cnt, 0); 245 atomic_set(&rl->rb_user_cnt, 0);
241 atomic_set(&rl->rb_gc_cnt, 0); 246 atomic_set(&rl->rb_gc_cnt, 0);
242 atomic_set(&rl->rb_space, -1); 247 atomic_set(&rl->rb_space, -1);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 95c6d86ab5e8..c4ef1fceead6 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -115,6 +115,7 @@ struct mapped_device {
115 struct srcu_struct io_barrier; 115 struct srcu_struct io_barrier;
116}; 116};
117 117
118void disable_discard(struct mapped_device *md);
118void disable_write_same(struct mapped_device *md); 119void disable_write_same(struct mapped_device *md);
119void disable_write_zeroes(struct mapped_device *md); 120void disable_write_zeroes(struct mapped_device *md);
120 121
diff --git a/drivers/md/dm-init.c b/drivers/md/dm-init.c
index b53f30f16b4d..4b76f84424c3 100644
--- a/drivers/md/dm-init.c
+++ b/drivers/md/dm-init.c
@@ -36,7 +36,7 @@ struct dm_device {
36 struct list_head list; 36 struct list_head list;
37}; 37};
38 38
39const char *dm_allowed_targets[] __initconst = { 39const char * const dm_allowed_targets[] __initconst = {
40 "crypt", 40 "crypt",
41 "delay", 41 "delay",
42 "linear", 42 "linear",
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index d57d997a52c8..7c678f50aaa3 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -913,7 +913,7 @@ static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsig
913static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2) 913static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
914{ 914{
915 return range1->logical_sector < range2->logical_sector + range2->n_sectors && 915 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
916 range2->logical_sector + range2->n_sectors > range2->logical_sector; 916 range1->logical_sector + range1->n_sectors > range2->logical_sector;
917} 917}
918 918
919static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting) 919static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
@@ -959,8 +959,6 @@ static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity
959 struct dm_integrity_range *last_range = 959 struct dm_integrity_range *last_range =
960 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry); 960 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
961 struct task_struct *last_range_task; 961 struct task_struct *last_range_task;
962 if (!ranges_overlap(range, last_range))
963 break;
964 last_range_task = last_range->task; 962 last_range_task = last_range->task;
965 list_del(&last_range->wait_entry); 963 list_del(&last_range->wait_entry);
966 if (!add_new_range(ic, last_range, false)) { 964 if (!add_new_range(ic, last_range, false)) {
@@ -3185,7 +3183,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3185 journal_watermark = val; 3183 journal_watermark = val;
3186 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1) 3184 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3187 sync_msec = val; 3185 sync_msec = val;
3188 else if (!memcmp(opt_string, "meta_device:", strlen("meta_device:"))) { 3186 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3189 if (ic->meta_dev) { 3187 if (ic->meta_dev) {
3190 dm_put_device(ti, ic->meta_dev); 3188 dm_put_device(ti, ic->meta_dev);
3191 ic->meta_dev = NULL; 3189 ic->meta_dev = NULL;
@@ -3204,17 +3202,17 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3204 goto bad; 3202 goto bad;
3205 } 3203 }
3206 ic->sectors_per_block = val >> SECTOR_SHIFT; 3204 ic->sectors_per_block = val >> SECTOR_SHIFT;
3207 } else if (!memcmp(opt_string, "internal_hash:", strlen("internal_hash:"))) { 3205 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3208 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error, 3206 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3209 "Invalid internal_hash argument"); 3207 "Invalid internal_hash argument");
3210 if (r) 3208 if (r)
3211 goto bad; 3209 goto bad;
3212 } else if (!memcmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) { 3210 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3213 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error, 3211 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3214 "Invalid journal_crypt argument"); 3212 "Invalid journal_crypt argument");
3215 if (r) 3213 if (r)
3216 goto bad; 3214 goto bad;
3217 } else if (!memcmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { 3215 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3218 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, 3216 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
3219 "Invalid journal_mac argument"); 3217 "Invalid journal_mac argument");
3220 if (r) 3218 if (r)
@@ -3616,7 +3614,7 @@ static struct target_type integrity_target = {
3616 .io_hints = dm_integrity_io_hints, 3614 .io_hints = dm_integrity_io_hints,
3617}; 3615};
3618 3616
3619int __init dm_integrity_init(void) 3617static int __init dm_integrity_init(void)
3620{ 3618{
3621 int r; 3619 int r;
3622 3620
@@ -3635,7 +3633,7 @@ int __init dm_integrity_init(void)
3635 return r; 3633 return r;
3636} 3634}
3637 3635
3638void dm_integrity_exit(void) 3636static void __exit dm_integrity_exit(void)
3639{ 3637{
3640 dm_unregister_target(&integrity_target); 3638 dm_unregister_target(&integrity_target);
3641 kmem_cache_destroy(journal_io_cache); 3639 kmem_cache_destroy(journal_io_cache);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 09773636602d..b66745bd08bb 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -222,11 +222,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
222 } 222 }
223 223
224 if (unlikely(error == BLK_STS_TARGET)) { 224 if (unlikely(error == BLK_STS_TARGET)) {
225 if (req_op(clone) == REQ_OP_WRITE_SAME && 225 if (req_op(clone) == REQ_OP_DISCARD &&
226 !clone->q->limits.max_write_same_sectors) 226 !clone->q->limits.max_discard_sectors)
227 disable_discard(tio->md);
228 else if (req_op(clone) == REQ_OP_WRITE_SAME &&
229 !clone->q->limits.max_write_same_sectors)
227 disable_write_same(tio->md); 230 disable_write_same(tio->md);
228 if (req_op(clone) == REQ_OP_WRITE_ZEROES && 231 else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
229 !clone->q->limits.max_write_zeroes_sectors) 232 !clone->q->limits.max_write_zeroes_sectors)
230 disable_write_zeroes(tio->md); 233 disable_write_zeroes(tio->md);
231 } 234 }
232 235
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ba9481f1bf3c..cde3b49b2a91 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1844,6 +1844,36 @@ static bool dm_table_supports_secure_erase(struct dm_table *t)
1844 return true; 1844 return true;
1845} 1845}
1846 1846
1847static int device_requires_stable_pages(struct dm_target *ti,
1848 struct dm_dev *dev, sector_t start,
1849 sector_t len, void *data)
1850{
1851 struct request_queue *q = bdev_get_queue(dev->bdev);
1852
1853 return q && bdi_cap_stable_pages_required(q->backing_dev_info);
1854}
1855
1856/*
1857 * If any underlying device requires stable pages, a table must require
1858 * them as well. Only targets that support iterate_devices are considered:
1859 * don't want error, zero, etc to require stable pages.
1860 */
1861static bool dm_table_requires_stable_pages(struct dm_table *t)
1862{
1863 struct dm_target *ti;
1864 unsigned i;
1865
1866 for (i = 0; i < dm_table_get_num_targets(t); i++) {
1867 ti = dm_table_get_target(t, i);
1868
1869 if (ti->type->iterate_devices &&
1870 ti->type->iterate_devices(ti, device_requires_stable_pages, NULL))
1871 return true;
1872 }
1873
1874 return false;
1875}
1876
1847void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, 1877void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1848 struct queue_limits *limits) 1878 struct queue_limits *limits)
1849{ 1879{
@@ -1897,6 +1927,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
1897 dm_table_verify_integrity(t); 1927 dm_table_verify_integrity(t);
1898 1928
1899 /* 1929 /*
1930 * Some devices don't use blk_integrity but still want stable pages
1931 * because they do their own checksumming.
1932 */
1933 if (dm_table_requires_stable_pages(t))
1934 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
1935 else
1936 q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
1937
1938 /*
1900 * Determine whether or not this queue's I/O timings contribute 1939 * Determine whether or not this queue's I/O timings contribute
1901 * to the entropy pool, Only request-based targets use this. 1940 * to the entropy pool, Only request-based targets use this.
1902 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not 1941 * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 68d24056d0b1..043f0761e4a0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -945,6 +945,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
945 } 945 }
946} 946}
947 947
948void disable_discard(struct mapped_device *md)
949{
950 struct queue_limits *limits = dm_get_queue_limits(md);
951
952 /* device doesn't really support DISCARD, disable it */
953 limits->max_discard_sectors = 0;
954 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
955}
956
948void disable_write_same(struct mapped_device *md) 957void disable_write_same(struct mapped_device *md)
949{ 958{
950 struct queue_limits *limits = dm_get_queue_limits(md); 959 struct queue_limits *limits = dm_get_queue_limits(md);
@@ -970,11 +979,14 @@ static void clone_endio(struct bio *bio)
970 dm_endio_fn endio = tio->ti->type->end_io; 979 dm_endio_fn endio = tio->ti->type->end_io;
971 980
972 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { 981 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
973 if (bio_op(bio) == REQ_OP_WRITE_SAME && 982 if (bio_op(bio) == REQ_OP_DISCARD &&
974 !bio->bi_disk->queue->limits.max_write_same_sectors) 983 !bio->bi_disk->queue->limits.max_discard_sectors)
984 disable_discard(md);
985 else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
986 !bio->bi_disk->queue->limits.max_write_same_sectors)
975 disable_write_same(md); 987 disable_write_same(md);
976 if (bio_op(bio) == REQ_OP_WRITE_ZEROES && 988 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
977 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) 989 !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
978 disable_write_zeroes(md); 990 disable_write_zeroes(md);
979 } 991 }
980 992
@@ -1042,15 +1054,7 @@ int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1042 return -EINVAL; 1054 return -EINVAL;
1043 } 1055 }
1044 1056
1045 /* 1057 ti->max_io_len = (uint32_t) len;
1046 * BIO based queue uses its own splitting. When multipage bvecs
1047 * is switched on, size of the incoming bio may be too big to
1048 * be handled in some targets, such as crypt.
1049 *
1050 * When these targets are ready for the big bio, we can remove
1051 * the limit.
1052 */
1053 ti->max_io_len = min_t(uint32_t, len, BIO_MAX_PAGES * PAGE_SIZE);
1054 1058
1055 return 0; 1059 return 0;
1056} 1060}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index abb5d382f64d..3b6880dd648d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3939,6 +3939,8 @@ static int raid10_run(struct mddev *mddev)
3939 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3939 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3940 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 3940 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3941 "reshape"); 3941 "reshape");
3942 if (!mddev->sync_thread)
3943 goto out_free_conf;
3942 } 3944 }
3943 3945
3944 return 0; 3946 return 0;
@@ -4670,7 +4672,6 @@ read_more:
4670 atomic_inc(&r10_bio->remaining); 4672 atomic_inc(&r10_bio->remaining);
4671 read_bio->bi_next = NULL; 4673 read_bio->bi_next = NULL;
4672 generic_make_request(read_bio); 4674 generic_make_request(read_bio);
4673 sector_nr += nr_sectors;
4674 sectors_done += nr_sectors; 4675 sectors_done += nr_sectors;
4675 if (sector_nr <= last) 4676 if (sector_nr <= last)
4676 goto read_more; 4677 goto read_more;
diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
index bfb811407061..43c714a8798c 100644
--- a/drivers/md/raid5-log.h
+++ b/drivers/md/raid5-log.h
@@ -45,6 +45,7 @@ extern void ppl_stripe_write_finished(struct stripe_head *sh);
45extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add); 45extern int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add);
46extern void ppl_quiesce(struct r5conf *conf, int quiesce); 46extern void ppl_quiesce(struct r5conf *conf, int quiesce);
47extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio); 47extern int ppl_handle_flush_request(struct r5l_log *log, struct bio *bio);
48extern struct md_sysfs_entry ppl_write_hint;
48 49
49static inline bool raid5_has_log(struct r5conf *conf) 50static inline bool raid5_has_log(struct r5conf *conf)
50{ 51{
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c
index 0b096ddc9c1e..17e9e7d51097 100644
--- a/drivers/md/raid5-ppl.c
+++ b/drivers/md/raid5-ppl.c
@@ -20,6 +20,7 @@
20#include <linux/raid/md_p.h> 20#include <linux/raid/md_p.h>
21#include "md.h" 21#include "md.h"
22#include "raid5.h" 22#include "raid5.h"
23#include "raid5-log.h"
23 24
24/* 25/*
25 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for 26 * PPL consists of a 4KB header (struct ppl_header) and at least 128KB for
@@ -115,6 +116,8 @@ struct ppl_conf {
115 /* stripes to retry if failed to allocate io_unit */ 116 /* stripes to retry if failed to allocate io_unit */
116 struct list_head no_mem_stripes; 117 struct list_head no_mem_stripes;
117 spinlock_t no_mem_stripes_lock; 118 spinlock_t no_mem_stripes_lock;
119
120 unsigned short write_hint;
118}; 121};
119 122
120struct ppl_log { 123struct ppl_log {
@@ -474,6 +477,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
474 bio_set_dev(bio, log->rdev->bdev); 477 bio_set_dev(bio, log->rdev->bdev);
475 bio->bi_iter.bi_sector = log->next_io_sector; 478 bio->bi_iter.bi_sector = log->next_io_sector;
476 bio_add_page(bio, io->header_page, PAGE_SIZE, 0); 479 bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
480 bio->bi_write_hint = ppl_conf->write_hint;
477 481
478 pr_debug("%s: log->current_io_sector: %llu\n", __func__, 482 pr_debug("%s: log->current_io_sector: %llu\n", __func__,
479 (unsigned long long)log->next_io_sector); 483 (unsigned long long)log->next_io_sector);
@@ -503,6 +507,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
503 bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, 507 bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
504 &ppl_conf->bs); 508 &ppl_conf->bs);
505 bio->bi_opf = prev->bi_opf; 509 bio->bi_opf = prev->bi_opf;
510 bio->bi_write_hint = prev->bi_write_hint;
506 bio_copy_dev(bio, prev); 511 bio_copy_dev(bio, prev);
507 bio->bi_iter.bi_sector = bio_end_sector(prev); 512 bio->bi_iter.bi_sector = bio_end_sector(prev);
508 bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); 513 bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);
@@ -1407,6 +1412,7 @@ int ppl_init_log(struct r5conf *conf)
1407 atomic64_set(&ppl_conf->seq, 0); 1412 atomic64_set(&ppl_conf->seq, 0);
1408 INIT_LIST_HEAD(&ppl_conf->no_mem_stripes); 1413 INIT_LIST_HEAD(&ppl_conf->no_mem_stripes);
1409 spin_lock_init(&ppl_conf->no_mem_stripes_lock); 1414 spin_lock_init(&ppl_conf->no_mem_stripes_lock);
1415 ppl_conf->write_hint = RWF_WRITE_LIFE_NOT_SET;
1410 1416
1411 if (!mddev->external) { 1417 if (!mddev->external) {
1412 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid)); 1418 ppl_conf->signature = ~crc32c_le(~0, mddev->uuid, sizeof(mddev->uuid));
@@ -1501,3 +1507,60 @@ int ppl_modify_log(struct r5conf *conf, struct md_rdev *rdev, bool add)
1501 1507
1502 return ret; 1508 return ret;
1503} 1509}
1510
1511static ssize_t
1512ppl_write_hint_show(struct mddev *mddev, char *buf)
1513{
1514 size_t ret = 0;
1515 struct r5conf *conf;
1516 struct ppl_conf *ppl_conf = NULL;
1517
1518 spin_lock(&mddev->lock);
1519 conf = mddev->private;
1520 if (conf && raid5_has_ppl(conf))
1521 ppl_conf = conf->log_private;
1522 ret = sprintf(buf, "%d\n", ppl_conf ? ppl_conf->write_hint : 0);
1523 spin_unlock(&mddev->lock);
1524
1525 return ret;
1526}
1527
1528static ssize_t
1529ppl_write_hint_store(struct mddev *mddev, const char *page, size_t len)
1530{
1531 struct r5conf *conf;
1532 struct ppl_conf *ppl_conf;
1533 int err = 0;
1534 unsigned short new;
1535
1536 if (len >= PAGE_SIZE)
1537 return -EINVAL;
1538 if (kstrtou16(page, 10, &new))
1539 return -EINVAL;
1540
1541 err = mddev_lock(mddev);
1542 if (err)
1543 return err;
1544
1545 conf = mddev->private;
1546 if (!conf) {
1547 err = -ENODEV;
1548 } else if (raid5_has_ppl(conf)) {
1549 ppl_conf = conf->log_private;
1550 if (!ppl_conf)
1551 err = -EINVAL;
1552 else
1553 ppl_conf->write_hint = new;
1554 } else {
1555 err = -EINVAL;
1556 }
1557
1558 mddev_unlock(mddev);
1559
1560 return err ?: len;
1561}
1562
1563struct md_sysfs_entry
1564ppl_write_hint = __ATTR(ppl_write_hint, S_IRUGO | S_IWUSR,
1565 ppl_write_hint_show,
1566 ppl_write_hint_store);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 77ffd09be486..c033bfcb209e 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -6650,6 +6650,7 @@ static struct attribute *raid5_attrs[] = {
6650 &raid5_skip_copy.attr, 6650 &raid5_skip_copy.attr,
6651 &raid5_rmw_level.attr, 6651 &raid5_rmw_level.attr,
6652 &r5c_journal_mode.attr, 6652 &r5c_journal_mode.attr,
6653 &ppl_write_hint.attr,
6653 NULL, 6654 NULL,
6654}; 6655};
6655static struct attribute_group raid5_attrs_group = { 6656static struct attribute_group raid5_attrs_group = {
@@ -7393,6 +7394,8 @@ static int raid5_run(struct mddev *mddev)
7393 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7394 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7394 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7395 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
7395 "reshape"); 7396 "reshape");
7397 if (!mddev->sync_thread)
7398 goto abort;
7396 } 7399 }
7397 7400
7398 /* Ok, everything is just fine now */ 7401 /* Ok, everything is just fine now */
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0ce2d8dfc5f1..26ad6468d13a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1246,7 +1246,7 @@ config MFD_STA2X11
1246 1246
1247config MFD_SUN6I_PRCM 1247config MFD_SUN6I_PRCM
1248 bool "Allwinner A31 PRCM controller" 1248 bool "Allwinner A31 PRCM controller"
1249 depends on ARCH_SUNXI 1249 depends on ARCH_SUNXI || COMPILE_TEST
1250 select MFD_CORE 1250 select MFD_CORE
1251 help 1251 help
1252 Support for the PRCM (Power/Reset/Clock Management) unit available 1252 Support for the PRCM (Power/Reset/Clock Management) unit available
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 69df27769c21..43ac71691fe4 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
53static const struct mfd_cell sprd_pmic_devs[] = { 53static const struct mfd_cell sprd_pmic_devs[] = {
54 { 54 {
55 .name = "sc27xx-wdt", 55 .name = "sc27xx-wdt",
56 .of_compatible = "sprd,sc27xx-wdt", 56 .of_compatible = "sprd,sc2731-wdt",
57 }, { 57 }, {
58 .name = "sc27xx-rtc", 58 .name = "sc27xx-rtc",
59 .of_compatible = "sprd,sc27xx-rtc", 59 .of_compatible = "sprd,sc2731-rtc",
60 }, { 60 }, {
61 .name = "sc27xx-charger", 61 .name = "sc27xx-charger",
62 .of_compatible = "sprd,sc27xx-charger", 62 .of_compatible = "sprd,sc2731-charger",
63 }, { 63 }, {
64 .name = "sc27xx-chg-timer", 64 .name = "sc27xx-chg-timer",
65 .of_compatible = "sprd,sc27xx-chg-timer", 65 .of_compatible = "sprd,sc2731-chg-timer",
66 }, { 66 }, {
67 .name = "sc27xx-fast-chg", 67 .name = "sc27xx-fast-chg",
68 .of_compatible = "sprd,sc27xx-fast-chg", 68 .of_compatible = "sprd,sc2731-fast-chg",
69 }, { 69 }, {
70 .name = "sc27xx-chg-wdt", 70 .name = "sc27xx-chg-wdt",
71 .of_compatible = "sprd,sc27xx-chg-wdt", 71 .of_compatible = "sprd,sc2731-chg-wdt",
72 }, { 72 }, {
73 .name = "sc27xx-typec", 73 .name = "sc27xx-typec",
74 .of_compatible = "sprd,sc27xx-typec", 74 .of_compatible = "sprd,sc2731-typec",
75 }, { 75 }, {
76 .name = "sc27xx-flash", 76 .name = "sc27xx-flash",
77 .of_compatible = "sprd,sc27xx-flash", 77 .of_compatible = "sprd,sc2731-flash",
78 }, { 78 }, {
79 .name = "sc27xx-eic", 79 .name = "sc27xx-eic",
80 .of_compatible = "sprd,sc27xx-eic", 80 .of_compatible = "sprd,sc2731-eic",
81 }, { 81 }, {
82 .name = "sc27xx-efuse", 82 .name = "sc27xx-efuse",
83 .of_compatible = "sprd,sc27xx-efuse", 83 .of_compatible = "sprd,sc2731-efuse",
84 }, { 84 }, {
85 .name = "sc27xx-thermal", 85 .name = "sc27xx-thermal",
86 .of_compatible = "sprd,sc27xx-thermal", 86 .of_compatible = "sprd,sc2731-thermal",
87 }, { 87 }, {
88 .name = "sc27xx-adc", 88 .name = "sc27xx-adc",
89 .of_compatible = "sprd,sc27xx-adc", 89 .of_compatible = "sprd,sc2731-adc",
90 }, { 90 }, {
91 .name = "sc27xx-audio-codec", 91 .name = "sc27xx-audio-codec",
92 .of_compatible = "sprd,sc27xx-audio-codec", 92 .of_compatible = "sprd,sc2731-audio-codec",
93 }, { 93 }, {
94 .name = "sc27xx-regulator", 94 .name = "sc27xx-regulator",
95 .of_compatible = "sprd,sc27xx-regulator", 95 .of_compatible = "sprd,sc2731-regulator",
96 }, { 96 }, {
97 .name = "sc27xx-vibrator", 97 .name = "sc27xx-vibrator",
98 .of_compatible = "sprd,sc27xx-vibrator", 98 .of_compatible = "sprd,sc2731-vibrator",
99 }, { 99 }, {
100 .name = "sc27xx-keypad-led", 100 .name = "sc27xx-keypad-led",
101 .of_compatible = "sprd,sc27xx-keypad-led", 101 .of_compatible = "sprd,sc2731-keypad-led",
102 }, { 102 }, {
103 .name = "sc27xx-bltc", 103 .name = "sc27xx-bltc",
104 .of_compatible = "sprd,sc27xx-bltc", 104 .of_compatible = "sprd,sc2731-bltc",
105 }, { 105 }, {
106 .name = "sc27xx-fgu", 106 .name = "sc27xx-fgu",
107 .of_compatible = "sprd,sc27xx-fgu", 107 .of_compatible = "sprd,sc2731-fgu",
108 }, { 108 }, {
109 .name = "sc27xx-7sreset", 109 .name = "sc27xx-7sreset",
110 .of_compatible = "sprd,sc27xx-7sreset", 110 .of_compatible = "sprd,sc2731-7sreset",
111 }, { 111 }, {
112 .name = "sc27xx-poweroff", 112 .name = "sc27xx-poweroff",
113 .of_compatible = "sprd,sc27xx-poweroff", 113 .of_compatible = "sprd,sc2731-poweroff",
114 }, { 114 }, {
115 .name = "sc27xx-syscon", 115 .name = "sc27xx-syscon",
116 .of_compatible = "sprd,sc27xx-syscon", 116 .of_compatible = "sprd,sc2731-syscon",
117 }, 117 },
118}; 118};
119 119
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 299016bc46d9..104477b512a2 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1245,6 +1245,28 @@ free:
1245 return status; 1245 return status;
1246} 1246}
1247 1247
1248static int __maybe_unused twl_suspend(struct device *dev)
1249{
1250 struct i2c_client *client = to_i2c_client(dev);
1251
1252 if (client->irq)
1253 disable_irq(client->irq);
1254
1255 return 0;
1256}
1257
1258static int __maybe_unused twl_resume(struct device *dev)
1259{
1260 struct i2c_client *client = to_i2c_client(dev);
1261
1262 if (client->irq)
1263 enable_irq(client->irq);
1264
1265 return 0;
1266}
1267
1268static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
1269
1248static const struct i2c_device_id twl_ids[] = { 1270static const struct i2c_device_id twl_ids[] = {
1249 { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ 1271 { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
1250 { "twl5030", 0 }, /* T2 updated */ 1272 { "twl5030", 0 }, /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
1262/* One Client Driver , 4 Clients */ 1284/* One Client Driver , 4 Clients */
1263static struct i2c_driver twl_driver = { 1285static struct i2c_driver twl_driver = {
1264 .driver.name = DRIVER_NAME, 1286 .driver.name = DRIVER_NAME,
1287 .driver.pm = &twl_dev_pm_ops,
1265 .id_table = twl_ids, 1288 .id_table = twl_ids,
1266 .probe = twl_probe, 1289 .probe = twl_probe,
1267 .remove = twl_remove, 1290 .remove = twl_remove,
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index 3525236ed8d9..19c84214a7ea 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
179 179
180 /* We also need to update CI for internal queues */ 180 /* We also need to update CI for internal queues */
181 if (cs->submitted) { 181 if (cs->submitted) {
182 int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
183
184 WARN_ONCE((cs_cnt < 0),
185 "hl%d: error in CS active cnt %d\n",
186 hdev->id, cs_cnt);
187
182 hl_int_hw_queue_update_ci(cs); 188 hl_int_hw_queue_update_ci(cs);
183 189
184 spin_lock(&hdev->hw_queues_mirror_lock); 190 spin_lock(&hdev->hw_queues_mirror_lock);
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index a53c12aff6ad..974a87789bd8 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
232 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; 232 struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
233 enum vm_type_t *vm_type; 233 enum vm_type_t *vm_type;
234 bool once = true; 234 bool once = true;
235 u64 j;
235 int i; 236 int i;
236 237
237 if (!dev_entry->hdev->mmu_enable) 238 if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
260 } else { 261 } else {
261 phys_pg_pack = hnode->ptr; 262 phys_pg_pack = hnode->ptr;
262 seq_printf(s, 263 seq_printf(s,
263 " 0x%-14llx %-10u %-4u\n", 264 " 0x%-14llx %-10llu %-4u\n",
264 hnode->vaddr, phys_pg_pack->total_size, 265 hnode->vaddr, phys_pg_pack->total_size,
265 phys_pg_pack->handle); 266 phys_pg_pack->handle);
266 } 267 }
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
282 phys_pg_pack->page_size); 283 phys_pg_pack->page_size);
283 seq_puts(s, " physical address\n"); 284 seq_puts(s, " physical address\n");
284 seq_puts(s, "---------------------\n"); 285 seq_puts(s, "---------------------\n");
285 for (i = 0 ; i < phys_pg_pack->npages ; i++) { 286 for (j = 0 ; j < phys_pg_pack->npages ; j++) {
286 seq_printf(s, " 0x%-14llx\n", 287 seq_printf(s, " 0x%-14llx\n",
287 phys_pg_pack->pages[i]); 288 phys_pg_pack->pages[j]);
288 } 289 }
289 } 290 }
290 spin_unlock(&vm->idr_lock); 291 spin_unlock(&vm->idr_lock);
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index de46aa6ed154..77d51be66c7e 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -11,6 +11,8 @@
11#include <linux/sched/signal.h> 11#include <linux/sched/signal.h>
12#include <linux/hwmon.h> 12#include <linux/hwmon.h>
13 13
14#define HL_PLDM_PENDING_RESET_PER_SEC (HL_PENDING_RESET_PER_SEC * 10)
15
14bool hl_device_disabled_or_in_reset(struct hl_device *hdev) 16bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
15{ 17{
16 if ((hdev->disabled) || (atomic_read(&hdev->in_reset))) 18 if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
216 spin_lock_init(&hdev->hw_queues_mirror_lock); 218 spin_lock_init(&hdev->hw_queues_mirror_lock);
217 atomic_set(&hdev->in_reset, 0); 219 atomic_set(&hdev->in_reset, 0);
218 atomic_set(&hdev->fd_open_cnt, 0); 220 atomic_set(&hdev->fd_open_cnt, 0);
221 atomic_set(&hdev->cs_active_cnt, 0);
219 222
220 return 0; 223 return 0;
221 224
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
413 416
414 pci_save_state(hdev->pdev); 417 pci_save_state(hdev->pdev);
415 418
419 /* Block future CS/VM/JOB completion operations */
420 rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
421 if (rc) {
422 dev_err(hdev->dev, "Can't suspend while in reset\n");
423 return -EIO;
424 }
425
426 /* This blocks all other stuff that is not blocked by in_reset */
427 hdev->disabled = true;
428
429 /*
430 * Flush anyone that is inside the critical section of enqueue
431 * jobs to the H/W
432 */
433 hdev->asic_funcs->hw_queues_lock(hdev);
434 hdev->asic_funcs->hw_queues_unlock(hdev);
435
436 /* Flush processes that are sending message to CPU */
437 mutex_lock(&hdev->send_cpu_message_lock);
438 mutex_unlock(&hdev->send_cpu_message_lock);
439
416 rc = hdev->asic_funcs->suspend(hdev); 440 rc = hdev->asic_funcs->suspend(hdev);
417 if (rc) 441 if (rc)
418 dev_err(hdev->dev, 442 dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
440 464
441 pci_set_power_state(hdev->pdev, PCI_D0); 465 pci_set_power_state(hdev->pdev, PCI_D0);
442 pci_restore_state(hdev->pdev); 466 pci_restore_state(hdev->pdev);
443 rc = pci_enable_device(hdev->pdev); 467 rc = pci_enable_device_mem(hdev->pdev);
444 if (rc) { 468 if (rc) {
445 dev_err(hdev->dev, 469 dev_err(hdev->dev,
446 "Failed to enable PCI device in resume\n"); 470 "Failed to enable PCI device in resume\n");
447 return rc; 471 return rc;
448 } 472 }
449 473
474 pci_set_master(hdev->pdev);
475
450 rc = hdev->asic_funcs->resume(hdev); 476 rc = hdev->asic_funcs->resume(hdev);
451 if (rc) { 477 if (rc) {
452 dev_err(hdev->dev, 478 dev_err(hdev->dev, "Failed to resume device after suspend\n");
453 "Failed to enable PCI access from device CPU\n"); 479 goto disable_device;
454 return rc; 480 }
481
482
483 hdev->disabled = false;
484 atomic_set(&hdev->in_reset, 0);
485
486 rc = hl_device_reset(hdev, true, false);
487 if (rc) {
488 dev_err(hdev->dev, "Failed to reset device during resume\n");
489 goto disable_device;
455 } 490 }
456 491
457 return 0; 492 return 0;
493
494disable_device:
495 pci_clear_master(hdev->pdev);
496 pci_disable_device(hdev->pdev);
497
498 return rc;
458} 499}
459 500
460static void hl_device_hard_reset_pending(struct work_struct *work) 501static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
462 struct hl_device_reset_work *device_reset_work = 503 struct hl_device_reset_work *device_reset_work =
463 container_of(work, struct hl_device_reset_work, reset_work); 504 container_of(work, struct hl_device_reset_work, reset_work);
464 struct hl_device *hdev = device_reset_work->hdev; 505 struct hl_device *hdev = device_reset_work->hdev;
465 u16 pending_cnt = HL_PENDING_RESET_PER_SEC; 506 u16 pending_total, pending_cnt;
466 struct task_struct *task = NULL; 507 struct task_struct *task = NULL;
467 508
509 if (hdev->pldm)
510 pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
511 else
512 pending_total = HL_PENDING_RESET_PER_SEC;
513
514 pending_cnt = pending_total;
515
468 /* Flush all processes that are inside hl_open */ 516 /* Flush all processes that are inside hl_open */
469 mutex_lock(&hdev->fd_open_cnt_lock); 517 mutex_lock(&hdev->fd_open_cnt_lock);
470 518
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
489 } 537 }
490 } 538 }
491 539
540 pending_cnt = pending_total;
541
542 while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
543
544 pending_cnt--;
545
546 ssleep(1);
547 }
548
549 if (atomic_read(&hdev->fd_open_cnt))
550 dev_crit(hdev->dev,
551 "Going to hard reset with open user contexts\n");
552
492 mutex_unlock(&hdev->fd_open_cnt_lock); 553 mutex_unlock(&hdev->fd_open_cnt_lock);
493 554
494 hl_device_reset(hdev, true, true); 555 hl_device_reset(hdev, true, true);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 238dd57c541b..ea979ebd62fb 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
1201 return retval; 1201 return retval;
1202} 1202}
1203 1203
1204static void goya_resume_external_queues(struct hl_device *hdev)
1205{
1206 WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
1207 WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
1208 WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
1209 WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
1210 WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
1211}
1212
1213/* 1204/*
1214 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU 1205 * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
1215 * 1206 *
@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
2178 return retval; 2169 return retval;
2179} 2170}
2180 2171
2181static void goya_resume_internal_queues(struct hl_device *hdev)
2182{
2183 WREG32(mmMME_QM_GLBL_CFG1, 0);
2184 WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
2185
2186 WREG32(mmTPC0_QM_GLBL_CFG1, 0);
2187 WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
2188
2189 WREG32(mmTPC1_QM_GLBL_CFG1, 0);
2190 WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
2191
2192 WREG32(mmTPC2_QM_GLBL_CFG1, 0);
2193 WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
2194
2195 WREG32(mmTPC3_QM_GLBL_CFG1, 0);
2196 WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
2197
2198 WREG32(mmTPC4_QM_GLBL_CFG1, 0);
2199 WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
2200
2201 WREG32(mmTPC5_QM_GLBL_CFG1, 0);
2202 WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
2203
2204 WREG32(mmTPC6_QM_GLBL_CFG1, 0);
2205 WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
2206
2207 WREG32(mmTPC7_QM_GLBL_CFG1, 0);
2208 WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
2209}
2210
2211static void goya_dma_stall(struct hl_device *hdev) 2172static void goya_dma_stall(struct hl_device *hdev)
2212{ 2173{
2213 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT); 2174 WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
2905{ 2866{
2906 int rc; 2867 int rc;
2907 2868
2908 rc = goya_stop_internal_queues(hdev);
2909
2910 if (rc) {
2911 dev_err(hdev->dev, "failed to stop internal queues\n");
2912 return rc;
2913 }
2914
2915 rc = goya_stop_external_queues(hdev);
2916
2917 if (rc) {
2918 dev_err(hdev->dev, "failed to stop external queues\n");
2919 return rc;
2920 }
2921
2922 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); 2869 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
2923 if (rc) 2870 if (rc)
2924 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); 2871 dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
2928 2875
2929int goya_resume(struct hl_device *hdev) 2876int goya_resume(struct hl_device *hdev)
2930{ 2877{
2931 int rc; 2878 return goya_init_iatu(hdev);
2932
2933 goya_resume_external_queues(hdev);
2934 goya_resume_internal_queues(hdev);
2935
2936 rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
2937 if (rc)
2938 dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
2939 return rc;
2940} 2879}
2941 2880
2942static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, 2881static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
3070 3009
3071 *dma_handle = hdev->asic_prop.sram_base_address; 3010 *dma_handle = hdev->asic_prop.sram_base_address;
3072 3011
3073 base = hdev->pcie_bar[SRAM_CFG_BAR_ID]; 3012 base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
3074 3013
3075 switch (queue_id) { 3014 switch (queue_id) {
3076 case GOYA_QUEUE_ID_MME: 3015 case GOYA_QUEUE_ID_MME:
diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h
index a7c95e9f9b9a..a8ee52c880cd 100644
--- a/drivers/misc/habanalabs/habanalabs.h
+++ b/drivers/misc/habanalabs/habanalabs.h
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
793 * struct hl_vm_phys_pg_pack - physical page pack. 793 * struct hl_vm_phys_pg_pack - physical page pack.
794 * @vm_type: describes the type of the virtual area descriptor. 794 * @vm_type: describes the type of the virtual area descriptor.
795 * @pages: the physical page array. 795 * @pages: the physical page array.
796 * @npages: num physical pages in the pack.
797 * @total_size: total size of all the pages in this list.
796 * @mapping_cnt: number of shared mappings. 798 * @mapping_cnt: number of shared mappings.
797 * @asid: the context related to this list. 799 * @asid: the context related to this list.
798 * @npages: num physical pages in the pack.
799 * @page_size: size of each page in the pack. 800 * @page_size: size of each page in the pack.
800 * @total_size: total size of all the pages in this list.
801 * @flags: HL_MEM_* flags related to this list. 801 * @flags: HL_MEM_* flags related to this list.
802 * @handle: the provided handle related to this list. 802 * @handle: the provided handle related to this list.
803 * @offset: offset from the first page. 803 * @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
807struct hl_vm_phys_pg_pack { 807struct hl_vm_phys_pg_pack {
808 enum vm_type_t vm_type; /* must be first */ 808 enum vm_type_t vm_type; /* must be first */
809 u64 *pages; 809 u64 *pages;
810 u64 npages;
811 u64 total_size;
810 atomic_t mapping_cnt; 812 atomic_t mapping_cnt;
811 u32 asid; 813 u32 asid;
812 u32 npages;
813 u32 page_size; 814 u32 page_size;
814 u32 total_size;
815 u32 flags; 815 u32 flags;
816 u32 handle; 816 u32 handle;
817 u32 offset; 817 u32 offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
1056 * @cb_pool_lock: protects the CB pool. 1056 * @cb_pool_lock: protects the CB pool.
1057 * @user_ctx: current user context executing. 1057 * @user_ctx: current user context executing.
1058 * @dram_used_mem: current DRAM memory consumption. 1058 * @dram_used_mem: current DRAM memory consumption.
1059 * @in_reset: is device in reset flow.
1060 * @curr_pll_profile: current PLL profile.
1061 * @fd_open_cnt: number of open user processes.
1062 * @timeout_jiffies: device CS timeout value. 1059 * @timeout_jiffies: device CS timeout value.
1063 * @max_power: the max power of the device, as configured by the sysadmin. This 1060 * @max_power: the max power of the device, as configured by the sysadmin. This
1064 * value is saved so in case of hard-reset, KMD will restore this 1061 * value is saved so in case of hard-reset, KMD will restore this
1065 * value and update the F/W after the re-initialization 1062 * value and update the F/W after the re-initialization
1063 * @in_reset: is device in reset flow.
1064 * @curr_pll_profile: current PLL profile.
1065 * @fd_open_cnt: number of open user processes.
1066 * @cs_active_cnt: number of active command submissions on this device (active
1067 * means already in H/W queues)
1066 * @major: habanalabs KMD major. 1068 * @major: habanalabs KMD major.
1067 * @high_pll: high PLL profile frequency. 1069 * @high_pll: high PLL profile frequency.
1068 * @soft_reset_cnt: number of soft reset since KMD loading. 1070 * @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
1128 struct hl_ctx *user_ctx; 1130 struct hl_ctx *user_ctx;
1129 1131
1130 atomic64_t dram_used_mem; 1132 atomic64_t dram_used_mem;
1133 u64 timeout_jiffies;
1134 u64 max_power;
1131 atomic_t in_reset; 1135 atomic_t in_reset;
1132 atomic_t curr_pll_profile; 1136 atomic_t curr_pll_profile;
1133 atomic_t fd_open_cnt; 1137 atomic_t fd_open_cnt;
1134 u64 timeout_jiffies; 1138 atomic_t cs_active_cnt;
1135 u64 max_power;
1136 u32 major; 1139 u32 major;
1137 u32 high_pll; 1140 u32 high_pll;
1138 u32 soft_reset_cnt; 1141 u32 soft_reset_cnt;
diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c
index 67bece26417c..ef3bb6951360 100644
--- a/drivers/misc/habanalabs/hw_queue.c
+++ b/drivers/misc/habanalabs/hw_queue.c
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
370 spin_unlock(&hdev->hw_queues_mirror_lock); 370 spin_unlock(&hdev->hw_queues_mirror_lock);
371 } 371 }
372 372
373 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) { 373 atomic_inc(&hdev->cs_active_cnt);
374
375 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
374 if (job->ext_queue) 376 if (job->ext_queue)
375 ext_hw_queue_schedule_job(job); 377 ext_hw_queue_schedule_job(job);
376 else 378 else
377 int_hw_queue_schedule_job(job); 379 int_hw_queue_schedule_job(job);
378 }
379 380
380 cs->submitted = true; 381 cs->submitted = true;
381 382
diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c
index 3a12fd1a5274..ce1fda40a8b8 100644
--- a/drivers/misc/habanalabs/memory.c
+++ b/drivers/misc/habanalabs/memory.c
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
56 struct hl_device *hdev = ctx->hdev; 56 struct hl_device *hdev = ctx->hdev;
57 struct hl_vm *vm = &hdev->vm; 57 struct hl_vm *vm = &hdev->vm;
58 struct hl_vm_phys_pg_pack *phys_pg_pack; 58 struct hl_vm_phys_pg_pack *phys_pg_pack;
59 u64 paddr = 0; 59 u64 paddr = 0, total_size, num_pgs, i;
60 u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift; 60 u32 num_curr_pgs, page_size, page_shift;
61 int handle, rc, i; 61 int handle, rc;
62 bool contiguous; 62 bool contiguous;
63 63
64 num_curr_pgs = 0; 64 num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
73 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); 73 paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
74 if (!paddr) { 74 if (!paddr) {
75 dev_err(hdev->dev, 75 dev_err(hdev->dev,
76 "failed to allocate %u huge contiguous pages\n", 76 "failed to allocate %llu huge contiguous pages\n",
77 num_pgs); 77 num_pgs);
78 return -ENOMEM; 78 return -ENOMEM;
79 } 79 }
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
93 phys_pg_pack->flags = args->flags; 93 phys_pg_pack->flags = args->flags;
94 phys_pg_pack->contiguous = contiguous; 94 phys_pg_pack->contiguous = contiguous;
95 95
96 phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL); 96 phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
97 if (!phys_pg_pack->pages) { 97 if (!phys_pg_pack->pages) {
98 rc = -ENOMEM; 98 rc = -ENOMEM;
99 goto pages_arr_err; 99 goto pages_arr_err;
@@ -148,7 +148,7 @@ page_err:
148 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], 148 gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
149 page_size); 149 page_size);
150 150
151 kfree(phys_pg_pack->pages); 151 kvfree(phys_pg_pack->pages);
152pages_arr_err: 152pages_arr_err:
153 kfree(phys_pg_pack); 153 kfree(phys_pg_pack);
154pages_pack_err: 154pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
267 struct hl_vm_phys_pg_pack *phys_pg_pack) 267 struct hl_vm_phys_pg_pack *phys_pg_pack)
268{ 268{
269 struct hl_vm *vm = &hdev->vm; 269 struct hl_vm *vm = &hdev->vm;
270 int i; 270 u64 i;
271 271
272 if (!phys_pg_pack->created_from_userptr) { 272 if (!phys_pg_pack->created_from_userptr) {
273 if (phys_pg_pack->contiguous) { 273 if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
288 } 288 }
289 } 289 }
290 290
291 kfree(phys_pg_pack->pages); 291 kvfree(phys_pg_pack->pages);
292 kfree(phys_pg_pack); 292 kfree(phys_pg_pack);
293} 293}
294 294
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
519 * - Return the start address of the virtual block 519 * - Return the start address of the virtual block
520 */ 520 */
521static u64 get_va_block(struct hl_device *hdev, 521static u64 get_va_block(struct hl_device *hdev,
522 struct hl_va_range *va_range, u32 size, u64 hint_addr, 522 struct hl_va_range *va_range, u64 size, u64 hint_addr,
523 bool is_userptr) 523 bool is_userptr)
524{ 524{
525 struct hl_vm_va_block *va_block, *new_va_block = NULL; 525 struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
577 } 577 }
578 578
579 if (!new_va_block) { 579 if (!new_va_block) {
580 dev_err(hdev->dev, "no available va block for size %u\n", size); 580 dev_err(hdev->dev, "no available va block for size %llu\n",
581 size);
581 goto out; 582 goto out;
582 } 583 }
583 584
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
648 struct hl_vm_phys_pg_pack *phys_pg_pack; 649 struct hl_vm_phys_pg_pack *phys_pg_pack;
649 struct scatterlist *sg; 650 struct scatterlist *sg;
650 dma_addr_t dma_addr; 651 dma_addr_t dma_addr;
651 u64 page_mask; 652 u64 page_mask, total_npages;
652 u32 npages, total_npages, page_size = PAGE_SIZE; 653 u32 npages, page_size = PAGE_SIZE;
653 bool first = true, is_huge_page_opt = true; 654 bool first = true, is_huge_page_opt = true;
654 int rc, i, j; 655 int rc, i, j;
655 656
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
691 692
692 page_mask = ~(((u64) page_size) - 1); 693 page_mask = ~(((u64) page_size) - 1);
693 694
694 phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL); 695 phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
696 GFP_KERNEL);
695 if (!phys_pg_pack->pages) { 697 if (!phys_pg_pack->pages) {
696 rc = -ENOMEM; 698 rc = -ENOMEM;
697 goto page_pack_arr_mem_err; 699 goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
750 struct hl_vm_phys_pg_pack *phys_pg_pack) 752 struct hl_vm_phys_pg_pack *phys_pg_pack)
751{ 753{
752 struct hl_device *hdev = ctx->hdev; 754 struct hl_device *hdev = ctx->hdev;
753 u64 next_vaddr = vaddr, paddr; 755 u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
754 u32 page_size = phys_pg_pack->page_size; 756 u32 page_size = phys_pg_pack->page_size;
755 int i, rc = 0, mapped_pg_cnt = 0; 757 int rc = 0;
756 758
757 for (i = 0 ; i < phys_pg_pack->npages ; i++) { 759 for (i = 0 ; i < phys_pg_pack->npages ; i++) {
758 paddr = phys_pg_pack->pages[i]; 760 paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
764 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); 766 rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
765 if (rc) { 767 if (rc) {
766 dev_err(hdev->dev, 768 dev_err(hdev->dev,
767 "map failed for handle %u, npages: %d, mapped: %d", 769 "map failed for handle %u, npages: %llu, mapped: %llu",
768 phys_pg_pack->handle, phys_pg_pack->npages, 770 phys_pg_pack->handle, phys_pg_pack->npages,
769 mapped_pg_cnt); 771 mapped_pg_cnt);
770 goto err; 772 goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
985 struct hl_vm_hash_node *hnode = NULL; 987 struct hl_vm_hash_node *hnode = NULL;
986 struct hl_userptr *userptr = NULL; 988 struct hl_userptr *userptr = NULL;
987 enum vm_type_t *vm_type; 989 enum vm_type_t *vm_type;
988 u64 next_vaddr; 990 u64 next_vaddr, i;
989 u32 page_size; 991 u32 page_size;
990 bool is_userptr; 992 bool is_userptr;
991 int i, rc; 993 int rc;
992 994
993 /* protect from double entrance */ 995 /* protect from double entrance */
994 mutex_lock(&ctx->mem_hash_lock); 996 mutex_lock(&ctx->mem_hash_lock);
diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c
index 2f2e99cb2743..3a5a2cec8305 100644
--- a/drivers/misc/habanalabs/mmu.c
+++ b/drivers/misc/habanalabs/mmu.c
@@ -832,7 +832,7 @@ err:
832int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) 832int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
833{ 833{
834 struct hl_device *hdev = ctx->hdev; 834 struct hl_device *hdev = ctx->hdev;
835 u64 real_virt_addr; 835 u64 real_virt_addr, real_phys_addr;
836 u32 real_page_size, npages; 836 u32 real_page_size, npages;
837 int i, rc, mapped_cnt = 0; 837 int i, rc, mapped_cnt = 0;
838 838
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
857 857
858 npages = page_size / real_page_size; 858 npages = page_size / real_page_size;
859 real_virt_addr = virt_addr; 859 real_virt_addr = virt_addr;
860 real_phys_addr = phys_addr;
860 861
861 for (i = 0 ; i < npages ; i++) { 862 for (i = 0 ; i < npages ; i++) {
862 rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr, 863 rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
863 real_page_size); 864 real_page_size);
864 if (rc) 865 if (rc)
865 goto err; 866 goto err;
866 867
867 real_virt_addr += real_page_size; 868 real_virt_addr += real_page_size;
869 real_phys_addr += real_page_size;
868 mapped_cnt++; 870 mapped_cnt++;
869 } 871 }
870 872
diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
index c712b7deb3a9..82a97866e0cf 100644
--- a/drivers/mmc/host/alcor.c
+++ b/drivers/mmc/host/alcor.c
@@ -1044,14 +1044,27 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
1044 mmc->caps2 = MMC_CAP2_NO_SDIO; 1044 mmc->caps2 = MMC_CAP2_NO_SDIO;
1045 mmc->ops = &alcor_sdc_ops; 1045 mmc->ops = &alcor_sdc_ops;
1046 1046
1047 /* Hardware cannot do scatter lists */ 1047 /* The hardware does DMA data transfer of 4096 bytes to/from a single
1048 * buffer address. Scatterlists are not supported, but upon DMA
1049 * completion (signalled via IRQ), the original vendor driver does
1050 * then immediately set up another DMA transfer of the next 4096
1051 * bytes.
1052 *
1053 * This means that we need to handle the I/O in 4096 byte chunks.
1054 * Lacking a way to limit the sglist entries to 4096 bytes, we instead
1055 * impose that only one segment is provided, with maximum size 4096,
1056 * which also happens to be the minimum size. This means that the
1057 * single-entry sglist handled by this driver can be handed directly
1058 * to the hardware, nice and simple.
1059 *
1060 * Unfortunately though, that means we only do 4096 bytes I/O per
1061 * MMC command. A future improvement would be to make the driver
1062 * accept sg lists and entries of any size, and simply iterate
1063 * through them 4096 bytes at a time.
1064 */
1048 mmc->max_segs = AU6601_MAX_DMA_SEGMENTS; 1065 mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
1049 mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE; 1066 mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
1050 1067 mmc->max_req_size = mmc->max_seg_size;
1051 mmc->max_blk_size = mmc->max_seg_size;
1052 mmc->max_blk_count = mmc->max_segs;
1053
1054 mmc->max_req_size = mmc->max_seg_size * mmc->max_segs;
1055} 1068}
1056 1069
1057static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev) 1070static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 49e0daf2ef5e..f37003df1e01 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1117,7 +1117,7 @@ static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host)
1117{ 1117{
1118} 1118}
1119#endif 1119#endif
1120static void __init init_mmcsd_host(struct mmc_davinci_host *host) 1120static void init_mmcsd_host(struct mmc_davinci_host *host)
1121{ 1121{
1122 1122
1123 mmc_davinci_reset_ctrl(host, 1); 1123 mmc_davinci_reset_ctrl(host, 1);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index d54612257b06..45f7b9b53d48 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -290,11 +290,8 @@ static void mxcmci_swap_buffers(struct mmc_data *data)
290 struct scatterlist *sg; 290 struct scatterlist *sg;
291 int i; 291 int i;
292 292
293 for_each_sg(data->sg, sg, data->sg_len, i) { 293 for_each_sg(data->sg, sg, data->sg_len, i)
294 void *buf = kmap_atomic(sg_page(sg) + sg->offset); 294 buffer_swap32(sg_virt(sg), sg->length);
295 buffer_swap32(buf, sg->length);
296 kunmap_atomic(buf);
297 }
298} 295}
299#else 296#else
300static inline void mxcmci_swap_buffers(struct mmc_data *data) {} 297static inline void mxcmci_swap_buffers(struct mmc_data *data) {}
@@ -611,7 +608,6 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
611{ 608{
612 struct mmc_data *data = host->req->data; 609 struct mmc_data *data = host->req->data;
613 struct scatterlist *sg; 610 struct scatterlist *sg;
614 void *buf;
615 int stat, i; 611 int stat, i;
616 612
617 host->data = data; 613 host->data = data;
@@ -619,18 +615,14 @@ static int mxcmci_transfer_data(struct mxcmci_host *host)
619 615
620 if (data->flags & MMC_DATA_READ) { 616 if (data->flags & MMC_DATA_READ) {
621 for_each_sg(data->sg, sg, data->sg_len, i) { 617 for_each_sg(data->sg, sg, data->sg_len, i) {
622 buf = kmap_atomic(sg_page(sg) + sg->offset); 618 stat = mxcmci_pull(host, sg_virt(sg), sg->length);
623 stat = mxcmci_pull(host, buf, sg->length);
624 kunmap(buf);
625 if (stat) 619 if (stat)
626 return stat; 620 return stat;
627 host->datasize += sg->length; 621 host->datasize += sg->length;
628 } 622 }
629 } else { 623 } else {
630 for_each_sg(data->sg, sg, data->sg_len, i) { 624 for_each_sg(data->sg, sg, data->sg_len, i) {
631 buf = kmap_atomic(sg_page(sg) + sg->offset); 625 stat = mxcmci_push(host, sg_virt(sg), sg->length);
632 stat = mxcmci_push(host, buf, sg->length);
633 kunmap(buf);
634 if (stat) 626 if (stat)
635 return stat; 627 return stat;
636 host->datasize += sg->length; 628 host->datasize += sg->length;
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index c907bf502a12..c1d3f0e38921 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -162,7 +162,7 @@ static void pxamci_dma_irq(void *param);
162static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) 162static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data)
163{ 163{
164 struct dma_async_tx_descriptor *tx; 164 struct dma_async_tx_descriptor *tx;
165 enum dma_data_direction direction; 165 enum dma_transfer_direction direction;
166 struct dma_slave_config config; 166 struct dma_slave_config config;
167 struct dma_chan *chan; 167 struct dma_chan *chan;
168 unsigned int nob = data->blocks; 168 unsigned int nob = data->blocks;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 71e13844df6c..8742e27e4e8b 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -641,6 +641,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
641 struct renesas_sdhi *priv; 641 struct renesas_sdhi *priv;
642 struct resource *res; 642 struct resource *res;
643 int irq, ret, i; 643 int irq, ret, i;
644 u16 ver;
644 645
645 of_data = of_device_get_match_data(&pdev->dev); 646 of_data = of_device_get_match_data(&pdev->dev);
646 647
@@ -773,12 +774,17 @@ int renesas_sdhi_probe(struct platform_device *pdev,
773 if (ret) 774 if (ret)
774 goto efree; 775 goto efree;
775 776
777 ver = sd_ctrl_read16(host, CTL_VERSION);
778 /* GEN2_SDR104 is first known SDHI to use 32bit block count */
779 if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
780 mmc_data->max_blk_count = U16_MAX;
781
776 ret = tmio_mmc_host_probe(host); 782 ret = tmio_mmc_host_probe(host);
777 if (ret < 0) 783 if (ret < 0)
778 goto edisclk; 784 goto edisclk;
779 785
780 /* One Gen2 SDHI incarnation does NOT have a CBSY bit */ 786 /* One Gen2 SDHI incarnation does NOT have a CBSY bit */
781 if (sd_ctrl_read16(host, CTL_VERSION) == SDHI_VER_GEN2_SDR50) 787 if (ver == SDHI_VER_GEN2_SDR50)
782 mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY; 788 mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
783 789
784 /* Enable tuning iff we have an SCC and a supported mode */ 790 /* Enable tuning iff we have an SCC and a supported mode */
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index b1a66ca3821a..5bbed477c9b1 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -1056,6 +1056,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
1056 mmc->f_max = 48000000; 1056 mmc->f_max = 48000000;
1057 } 1057 }
1058 1058
1059 if (!mmc_can_gpio_ro(mmc))
1060 mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT;
1061
1059 pltfm_host->clk = devm_clk_get(dev, "fck"); 1062 pltfm_host->clk = devm_clk_get(dev, "fck");
1060 if (IS_ERR(pltfm_host->clk)) { 1063 if (IS_ERR(pltfm_host->clk)) {
1061 ret = PTR_ERR(pltfm_host->clk); 1064 ret = PTR_ERR(pltfm_host->clk);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index 72428b6bfc47..7b7286b4d81e 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -1876,7 +1876,11 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1876 continue; 1876 continue;
1877 } 1877 }
1878 1878
1879 if (time_after(jiffies, timeo) && !chip_ready(map, adr)) 1879 /*
1880 * We check "time_after" and "!chip_good" before checking "chip_good" to avoid
1881 * the failure due to scheduling.
1882 */
1883 if (time_after(jiffies, timeo) && !chip_good(map, adr, datum))
1880 break; 1884 break;
1881 1885
1882 if (chip_good(map, adr, datum)) { 1886 if (chip_good(map, adr, datum)) {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 5e4ca082cfcd..7a96d168efc4 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -216,8 +216,8 @@ config GENEVE
216 216
217config GTP 217config GTP
218 tristate "GPRS Tunneling Protocol datapath (GTP-U)" 218 tristate "GPRS Tunneling Protocol datapath (GTP-U)"
219 depends on INET && NET_UDP_TUNNEL 219 depends on INET
220 select NET_IP_TUNNEL 220 select NET_UDP_TUNNEL
221 ---help--- 221 ---help---
222 This allows one to create gtp virtual interfaces that provide 222 This allows one to create gtp virtual interfaces that provide
223 the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol 223 the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2f120b2ffef0..4985268e2273 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
55 55
56static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) 56static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
57{ 57{
58 return sprintf(buf, "%pM\n", slave->perm_hwaddr); 58 return sprintf(buf, "%*phC\n",
59 slave->dev->addr_len,
60 slave->perm_hwaddr);
59} 61}
60static SLAVE_ATTR_RO(perm_hwaddr); 62static SLAVE_ATTR_RO(perm_hwaddr);
61 63
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dce84a2a65c7..c44b2822e4dd 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
427 return 0; 427 return 0;
428 428
429 lane = mv88e6390x_serdes_get_lane(chip, port); 429 lane = mv88e6390x_serdes_get_lane(chip, port);
430 if (lane < 0) 430 if (lane < 0 && lane != -ENODEV)
431 return lane; 431 return lane;
432 432
433 if (chip->ports[port].serdes_irq) { 433 if (lane >= 0) {
434 err = mv88e6390_serdes_irq_disable(chip, port, lane); 434 if (chip->ports[port].serdes_irq) {
435 err = mv88e6390_serdes_irq_disable(chip, port, lane);
436 if (err)
437 return err;
438 }
439
440 err = mv88e6390x_serdes_power(chip, port, false);
435 if (err) 441 if (err)
436 return err; 442 return err;
437 } 443 }
438 444
439 err = mv88e6390x_serdes_power(chip, port, false); 445 chip->ports[port].cmode = 0;
440 if (err)
441 return err;
442 446
443 if (cmode) { 447 if (cmode) {
444 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg); 448 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
452 if (err) 456 if (err)
453 return err; 457 return err;
454 458
459 chip->ports[port].cmode = cmode;
460
461 lane = mv88e6390x_serdes_get_lane(chip, port);
462 if (lane < 0)
463 return lane;
464
455 err = mv88e6390x_serdes_power(chip, port, true); 465 err = mv88e6390x_serdes_power(chip, port, true);
456 if (err) 466 if (err)
457 return err; 467 return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
463 } 473 }
464 } 474 }
465 475
466 chip->ports[port].cmode = cmode;
467
468 return 0; 476 return 0;
469} 477}
470 478
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 576b37d12a63..c4fa400efdcc 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
481 qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask); 481 qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
482} 482}
483 483
484static u32
485qca8k_port_to_phy(int port)
486{
487 /* From Andrew Lunn:
488 * Port 0 has no internal phy.
489 * Port 1 has an internal PHY at MDIO address 0.
490 * Port 2 has an internal PHY at MDIO address 1.
491 * ...
492 * Port 5 has an internal PHY at MDIO address 4.
493 * Port 6 has no internal PHY.
494 */
495
496 return port - 1;
497}
498
499static int
500qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data)
501{
502 u32 phy, val;
503
504 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
505 return -EINVAL;
506
507 /* callee is responsible for not passing bad ports,
508 * but we still would like to make spills impossible.
509 */
510 phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
511 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
512 QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
513 QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
514 QCA8K_MDIO_MASTER_DATA(data);
515
516 qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
517
518 return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
519 QCA8K_MDIO_MASTER_BUSY);
520}
521
522static int
523qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum)
524{
525 u32 phy, val;
526
527 if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
528 return -EINVAL;
529
530 /* callee is responsible for not passing bad ports,
531 * but we still would like to make spills impossible.
532 */
533 phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
534 val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
535 QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
536 QCA8K_MDIO_MASTER_REG_ADDR(regnum);
537
538 qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
539
540 if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
541 QCA8K_MDIO_MASTER_BUSY))
542 return -ETIMEDOUT;
543
544 val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) &
545 QCA8K_MDIO_MASTER_DATA_MASK);
546
547 return val;
548}
549
550static int
551qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
552{
553 struct qca8k_priv *priv = ds->priv;
554
555 return qca8k_mdio_write(priv, port, regnum, data);
556}
557
558static int
559qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
560{
561 struct qca8k_priv *priv = ds->priv;
562 int ret;
563
564 ret = qca8k_mdio_read(priv, port, regnum);
565
566 if (ret < 0)
567 return 0xffff;
568
569 return ret;
570}
571
572static int
573qca8k_setup_mdio_bus(struct qca8k_priv *priv)
574{
575 u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
576 struct device_node *ports, *port;
577 int err;
578
579 ports = of_get_child_by_name(priv->dev->of_node, "ports");
580 if (!ports)
581 return -EINVAL;
582
583 for_each_available_child_of_node(ports, port) {
584 err = of_property_read_u32(port, "reg", &reg);
585 if (err)
586 return err;
587
588 if (!dsa_is_user_port(priv->ds, reg))
589 continue;
590
591 if (of_property_read_bool(port, "phy-handle"))
592 external_mdio_mask |= BIT(reg);
593 else
594 internal_mdio_mask |= BIT(reg);
595 }
596
597 if (!external_mdio_mask && !internal_mdio_mask) {
598 dev_err(priv->dev, "no PHYs are defined.\n");
599 return -EINVAL;
600 }
601
602 /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
603 * the MDIO_MASTER register also _disconnects_ the external MDC
604 * passthrough to the internal PHYs. It's not possible to use both
605 * configurations at the same time!
606 *
607 * Because this came up during the review process:
608 * If the external mdio-bus driver is capable magically disabling
609 * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
610 * accessors for the time being, it would be possible to pull this
611 * off.
612 */
613 if (!!external_mdio_mask && !!internal_mdio_mask) {
614 dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
615 return -EINVAL;
616 }
617
618 if (external_mdio_mask) {
619 /* Make sure to disable the internal mdio bus in cases
620 * a dt-overlay and driver reload changed the configuration
621 */
622
623 qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
624 QCA8K_MDIO_MASTER_EN);
625 return 0;
626 }
627
628 priv->ops.phy_read = qca8k_phy_read;
629 priv->ops.phy_write = qca8k_phy_write;
630 return 0;
631}
632
484static int 633static int
485qca8k_setup(struct dsa_switch *ds) 634qca8k_setup(struct dsa_switch *ds)
486{ 635{
@@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds)
502 if (IS_ERR(priv->regmap)) 651 if (IS_ERR(priv->regmap))
503 pr_warn("regmap initialization failed"); 652 pr_warn("regmap initialization failed");
504 653
654 ret = qca8k_setup_mdio_bus(priv);
655 if (ret)
656 return ret;
657
505 /* Initialize CPU port pad mode (xMII type, delays...) */ 658 /* Initialize CPU port pad mode (xMII type, delays...) */
506 phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn); 659 phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
507 if (phy_mode < 0) { 660 if (phy_mode < 0) {
@@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
624 qca8k_port_set_status(priv, port, 1); 777 qca8k_port_set_status(priv, port, 1);
625} 778}
626 779
627static int
628qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
629{
630 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
631
632 return mdiobus_read(priv->bus, phy, regnum);
633}
634
635static int
636qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
637{
638 struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
639
640 return mdiobus_write(priv->bus, phy, regnum, val);
641}
642
643static void 780static void
644qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data) 781qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
645{ 782{
@@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
879 .setup = qca8k_setup, 1016 .setup = qca8k_setup,
880 .adjust_link = qca8k_adjust_link, 1017 .adjust_link = qca8k_adjust_link,
881 .get_strings = qca8k_get_strings, 1018 .get_strings = qca8k_get_strings,
882 .phy_read = qca8k_phy_read,
883 .phy_write = qca8k_phy_write,
884 .get_ethtool_stats = qca8k_get_ethtool_stats, 1019 .get_ethtool_stats = qca8k_get_ethtool_stats,
885 .get_sset_count = qca8k_get_sset_count, 1020 .get_sset_count = qca8k_get_sset_count,
886 .get_mac_eee = qca8k_get_mac_eee, 1021 .get_mac_eee = qca8k_get_mac_eee,
@@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
923 return -ENOMEM; 1058 return -ENOMEM;
924 1059
925 priv->ds->priv = priv; 1060 priv->ds->priv = priv;
926 priv->ds->ops = &qca8k_switch_ops; 1061 priv->ops = qca8k_switch_ops;
1062 priv->ds->ops = &priv->ops;
927 mutex_init(&priv->reg_mutex); 1063 mutex_init(&priv->reg_mutex);
928 dev_set_drvdata(&mdiodev->dev, priv); 1064 dev_set_drvdata(&mdiodev->dev, priv);
929 1065
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index d146e54c8a6c..249fd62268e5 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -49,6 +49,18 @@
49#define QCA8K_MIB_FLUSH BIT(24) 49#define QCA8K_MIB_FLUSH BIT(24)
50#define QCA8K_MIB_CPU_KEEP BIT(20) 50#define QCA8K_MIB_CPU_KEEP BIT(20)
51#define QCA8K_MIB_BUSY BIT(17) 51#define QCA8K_MIB_BUSY BIT(17)
52#define QCA8K_MDIO_MASTER_CTRL 0x3c
53#define QCA8K_MDIO_MASTER_BUSY BIT(31)
54#define QCA8K_MDIO_MASTER_EN BIT(30)
55#define QCA8K_MDIO_MASTER_READ BIT(27)
56#define QCA8K_MDIO_MASTER_WRITE 0
57#define QCA8K_MDIO_MASTER_SUP_PRE BIT(26)
58#define QCA8K_MDIO_MASTER_PHY_ADDR(x) ((x) << 21)
59#define QCA8K_MDIO_MASTER_REG_ADDR(x) ((x) << 16)
60#define QCA8K_MDIO_MASTER_DATA(x) (x)
61#define QCA8K_MDIO_MASTER_DATA_MASK GENMASK(15, 0)
62#define QCA8K_MDIO_MASTER_MAX_PORTS 5
63#define QCA8K_MDIO_MASTER_MAX_REG 32
52#define QCA8K_GOL_MAC_ADDR0 0x60 64#define QCA8K_GOL_MAC_ADDR0 0x60
53#define QCA8K_GOL_MAC_ADDR1 0x64 65#define QCA8K_GOL_MAC_ADDR1 0x64
54#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) 66#define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4)
@@ -169,6 +181,7 @@ struct qca8k_priv {
169 struct dsa_switch *ds; 181 struct dsa_switch *ds;
170 struct mutex reg_mutex; 182 struct mutex reg_mutex;
171 struct device *dev; 183 struct device *dev;
184 struct dsa_switch_ops ops;
172}; 185};
173 186
174struct qca8k_mib_desc { 187struct qca8k_mib_desc {
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 808abb6b3671..b15752267c8d 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev)
1521static void set_rx_mode(struct net_device *dev) 1521static void set_rx_mode(struct net_device *dev)
1522{ 1522{
1523 int ioaddr = dev->base_addr; 1523 int ioaddr = dev->base_addr;
1524 short new_mode; 1524 unsigned short new_mode;
1525 1525
1526 if (dev->flags & IFF_PROMISC) { 1526 if (dev->flags & IFF_PROMISC) {
1527 if (corkscrew_debug > 3) 1527 if (corkscrew_debug > 3)
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 342ae08ec3c2..d60a86aa8aa8 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
153static void dayna_block_output(struct net_device *dev, int count, 153static void dayna_block_output(struct net_device *dev, int count,
154 const unsigned char *buf, int start_page); 154 const unsigned char *buf, int start_page);
155 155
156#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
157
158/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */ 156/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
159static void slow_sane_get_8390_hdr(struct net_device *dev, 157static void slow_sane_get_8390_hdr(struct net_device *dev,
160 struct e8390_pkt_hdr *hdr, int ring_page); 158 struct e8390_pkt_hdr *hdr, int ring_page);
@@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
233 231
234static enum mac8390_access mac8390_testio(unsigned long membase) 232static enum mac8390_access mac8390_testio(unsigned long membase)
235{ 233{
236 unsigned long outdata = 0xA5A0B5B0; 234 u32 outdata = 0xA5A0B5B0;
237 unsigned long indata = 0x00000000; 235 u32 indata = 0;
236
238 /* Try writing 32 bits */ 237 /* Try writing 32 bits */
239 memcpy_toio((void __iomem *)membase, &outdata, 4); 238 nubus_writel(outdata, membase);
240 /* Now compare them */ 239 /* Now read it back */
241 if (memcmp_withio(&outdata, membase, 4) == 0) 240 indata = nubus_readl(membase);
241 if (outdata == indata)
242 return ACCESS_32; 242 return ACCESS_32;
243
244 outdata = 0xC5C0D5D0;
245 indata = 0;
246
243 /* Write 16 bit output */ 247 /* Write 16 bit output */
244 word_memcpy_tocard(membase, &outdata, 4); 248 word_memcpy_tocard(membase, &outdata, 4);
245 /* Now read it back */ 249 /* Now read it back */
246 word_memcpy_fromcard(&indata, membase, 4); 250 word_memcpy_fromcard(&indata, membase, 4);
247 if (outdata == indata) 251 if (outdata == indata)
248 return ACCESS_16; 252 return ACCESS_16;
253
249 return ACCESS_UNKNOWN; 254 return ACCESS_UNKNOWN;
250} 255}
251 256
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 74550ccc7a20..e2ffb159cbe2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
186 } 186 }
187 if (buff->is_ip_cso) { 187 if (buff->is_ip_cso) {
188 __skb_incr_checksum_unnecessary(skb); 188 __skb_incr_checksum_unnecessary(skb);
189 if (buff->is_udp_cso || buff->is_tcp_cso)
190 __skb_incr_checksum_unnecessary(skb);
191 } else { 189 } else {
192 skb->ip_summed = CHECKSUM_NONE; 190 skb->ip_summed = CHECKSUM_NONE;
193 } 191 }
192
193 if (buff->is_udp_cso || buff->is_tcp_cso)
194 __skb_incr_checksum_unnecessary(skb);
194} 195}
195 196
196#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) 197#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index ad099fd01b45..1522aee81884 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -3370,14 +3370,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
3370 *hclk = devm_clk_get(&pdev->dev, "hclk"); 3370 *hclk = devm_clk_get(&pdev->dev, "hclk");
3371 } 3371 }
3372 3372
3373 if (IS_ERR(*pclk)) { 3373 if (IS_ERR_OR_NULL(*pclk)) {
3374 err = PTR_ERR(*pclk); 3374 err = PTR_ERR(*pclk);
3375 if (!err)
3376 err = -ENODEV;
3377
3375 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 3378 dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
3376 return err; 3379 return err;
3377 } 3380 }
3378 3381
3379 if (IS_ERR(*hclk)) { 3382 if (IS_ERR_OR_NULL(*hclk)) {
3380 err = PTR_ERR(*hclk); 3383 err = PTR_ERR(*hclk);
3384 if (!err)
3385 err = -ENODEV;
3386
3381 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 3387 dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
3382 return err; 3388 return err;
3383 } 3389 }
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aa2be4807191..28eac9056211 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
1328 struct nicvf_cq_poll *cq_poll = NULL; 1328 struct nicvf_cq_poll *cq_poll = NULL;
1329 union nic_mbx mbx = {}; 1329 union nic_mbx mbx = {};
1330 1330
1331 cancel_delayed_work_sync(&nic->link_change_work);
1332
1333 /* wait till all queued set_rx_mode tasks completes */ 1331 /* wait till all queued set_rx_mode tasks completes */
1334 drain_workqueue(nic->nicvf_rx_mode_wq); 1332 if (nic->nicvf_rx_mode_wq) {
1333 cancel_delayed_work_sync(&nic->link_change_work);
1334 drain_workqueue(nic->nicvf_rx_mode_wq);
1335 }
1335 1336
1336 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1337 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1337 nicvf_send_msg_to_pf(nic, &mbx); 1338 nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev)
1452 struct nicvf_cq_poll *cq_poll = NULL; 1453 struct nicvf_cq_poll *cq_poll = NULL;
1453 1454
1454 /* wait till all queued set_rx_mode tasks completes if any */ 1455 /* wait till all queued set_rx_mode tasks completes if any */
1455 drain_workqueue(nic->nicvf_rx_mode_wq); 1456 if (nic->nicvf_rx_mode_wq)
1457 drain_workqueue(nic->nicvf_rx_mode_wq);
1456 1458
1457 netif_carrier_off(netdev); 1459 netif_carrier_off(netdev);
1458 1460
@@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev)
1550 /* Send VF config done msg to PF */ 1552 /* Send VF config done msg to PF */
1551 nicvf_send_cfg_done(nic); 1553 nicvf_send_cfg_done(nic);
1552 1554
1553 INIT_DELAYED_WORK(&nic->link_change_work, 1555 if (nic->nicvf_rx_mode_wq) {
1554 nicvf_link_status_check_task); 1556 INIT_DELAYED_WORK(&nic->link_change_work,
1555 queue_delayed_work(nic->nicvf_rx_mode_wq, 1557 nicvf_link_status_check_task);
1556 &nic->link_change_work, 0); 1558 queue_delayed_work(nic->nicvf_rx_mode_wq,
1559 &nic->link_change_work, 0);
1560 }
1557 1561
1558 return 0; 1562 return 0;
1559cleanup: 1563cleanup:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 5b4d3badcb73..e246f9733bb8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
105 /* Check if page can be recycled */ 105 /* Check if page can be recycled */
106 if (page) { 106 if (page) {
107 ref_count = page_ref_count(page); 107 ref_count = page_ref_count(page);
108 /* Check if this page has been used once i.e 'put_page' 108 /* This page can be recycled if internal ref_count and page's
109 * called after packet transmission i.e internal ref_count 109 * ref_count are equal, indicating that the page has been used
110 * and page's ref_count are equal i.e page can be recycled. 110 * once for packet transmission. For non-XDP mode, internal
111 * ref_count is always '1'.
111 */ 112 */
112 if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) 113 if (rbdr->is_xdp) {
113 pgcache->ref_count--; 114 if (ref_count == pgcache->ref_count)
114 else 115 pgcache->ref_count--;
115 page = NULL; 116 else
116 117 page = NULL;
117 /* In non-XDP mode, page's ref_count needs to be '1' for it 118 } else if (ref_count != 1) {
118 * to be recycled.
119 */
120 if (!rbdr->is_xdp && (ref_count != 1))
121 page = NULL; 119 page = NULL;
120 }
122 } 121 }
123 122
124 if (!page) { 123 if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
365 while (head < rbdr->pgcnt) { 364 while (head < rbdr->pgcnt) {
366 pgcache = &rbdr->pgcache[head]; 365 pgcache = &rbdr->pgcache[head];
367 if (pgcache->page && page_ref_count(pgcache->page) != 0) { 366 if (pgcache->page && page_ref_count(pgcache->page) != 0) {
368 if (!rbdr->is_xdp) { 367 if (rbdr->is_xdp) {
369 put_page(pgcache->page); 368 page_ref_sub(pgcache->page,
370 continue; 369 pgcache->ref_count - 1);
371 } 370 }
372 page_ref_sub(pgcache->page, pgcache->ref_count - 1);
373 put_page(pgcache->page); 371 put_page(pgcache->page);
374 } 372 }
375 head++; 373 head++;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 3130b43bba52..02959035ed3f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
2620 } 2620 }
2621 2621
2622 /* should never happen! */ 2622 /* should never happen! */
2623 BUG_ON(1); 2623 BUG();
2624 return NULL; 2624 return NULL;
2625} 2625}
2626 2626
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 88773ca58e6b..b3da81e90132 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter,
476 break; 476 break;
477 477
478 default: 478 default:
479 BUG_ON(1); 479 BUG();
480 } 480 }
481 481
482 return buf_size; 482 return buf_size;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be5f004..e2919005ead3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
354 ppmax = max; 354 ppmax = max;
355 355
356 /* pool size must be multiple of unsigned long */ 356 /* pool size must be multiple of unsigned long */
357 bmap = BITS_TO_LONGS(ppmax); 357 bmap = ppmax / BITS_PER_TYPE(unsigned long);
358 if (!bmap)
359 return NULL;
360
358 ppmax = (bmap * sizeof(unsigned long)) << 3; 361 ppmax = (bmap * sizeof(unsigned long)) << 3;
359 362
360 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; 363 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
402 if (reserve_factor) { 405 if (reserve_factor) {
403 ppmax_pool = ppmax / reserve_factor; 406 ppmax_pool = ppmax / reserve_factor;
404 pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); 407 pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
408 if (!pool) {
409 ppmax_pool = 0;
410 reserve_factor = 0;
411 }
405 412
406 pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", 413 pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
407 ndev->name, ppmax, ppmax_pool, pool_index_max); 414 ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 2ba49e959c3f..dc339dc1adb2 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
815 */ 815 */
816 queue_mapping = skb_get_queue_mapping(skb); 816 queue_mapping = skb_get_queue_mapping(skb);
817 fq = &priv->fq[queue_mapping]; 817 fq = &priv->fq[queue_mapping];
818
819 fd_len = dpaa2_fd_get_len(&fd);
820 nq = netdev_get_tx_queue(net_dev, queue_mapping);
821 netdev_tx_sent_queue(nq, fd_len);
822
823 /* Everything that happens after this enqueues might race with
824 * the Tx confirmation callback for this frame
825 */
818 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 826 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
819 err = priv->enqueue(priv, fq, &fd, 0); 827 err = priv->enqueue(priv, fq, &fd, 0);
820 if (err != -EBUSY) 828 if (err != -EBUSY)
@@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
825 percpu_stats->tx_errors++; 833 percpu_stats->tx_errors++;
826 /* Clean up everything, including freeing the skb */ 834 /* Clean up everything, including freeing the skb */
827 free_tx_fd(priv, fq, &fd, false); 835 free_tx_fd(priv, fq, &fd, false);
836 netdev_tx_completed_queue(nq, 1, fd_len);
828 } else { 837 } else {
829 fd_len = dpaa2_fd_get_len(&fd);
830 percpu_stats->tx_packets++; 838 percpu_stats->tx_packets++;
831 percpu_stats->tx_bytes += fd_len; 839 percpu_stats->tx_bytes += fd_len;
832
833 nq = netdev_get_tx_queue(net_dev, queue_mapping);
834 netdev_tx_sent_queue(nq, fd_len);
835 } 840 }
836 841
837 return NETDEV_TX_OK; 842 return NETDEV_TX_OK;
@@ -1817,7 +1822,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
1817 dpaa2_fd_set_format(&fd, dpaa2_fd_single); 1822 dpaa2_fd_set_format(&fd, dpaa2_fd_single);
1818 dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA); 1823 dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
1819 1824
1820 fq = &priv->fq[smp_processor_id()]; 1825 fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
1821 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 1826 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1822 err = priv->enqueue(priv, fq, &fd, 0); 1827 err = priv->enqueue(priv, fq, &fd, 0);
1823 if (err != -EBUSY) 1828 if (err != -EBUSY)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 79d03f8ee7b1..c7fa97a7e1f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -150,7 +150,6 @@ out_buffer_fail:
150/* free desc along with its attached buffer */ 150/* free desc along with its attached buffer */
151static void hnae_free_desc(struct hnae_ring *ring) 151static void hnae_free_desc(struct hnae_ring *ring)
152{ 152{
153 hnae_free_buffers(ring);
154 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 153 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
155 ring->desc_num * sizeof(ring->desc[0]), 154 ring->desc_num * sizeof(ring->desc[0]),
156 ring_to_dma_dir(ring)); 155 ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
183/* fini ring, also free the buffer for the ring */ 182/* fini ring, also free the buffer for the ring */
184static void hnae_fini_ring(struct hnae_ring *ring) 183static void hnae_fini_ring(struct hnae_ring *ring)
185{ 184{
185 if (is_rx_ring(ring))
186 hnae_free_buffers(ring);
187
186 hnae_free_desc(ring); 188 hnae_free_desc(ring);
187 kfree(ring->desc_cb); 189 kfree(ring->desc_cb);
188 ring->desc_cb = NULL; 190 ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 08a750fb60c4..d6fb83437230 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
357}; 357};
358 358
359struct hnae_queue { 359struct hnae_queue {
360 void __iomem *io_base; 360 u8 __iomem *io_base;
361 phys_addr_t phy_base; 361 phys_addr_t phy_base;
362 struct hnae_ae_dev *dev; /* the device who use this queue */ 362 struct hnae_ae_dev *dev; /* the device who use this queue */
363 struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; 363 struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a97228c93831..6c0507921623 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
370static void hns_mac_param_get(struct mac_params *param, 370static void hns_mac_param_get(struct mac_params *param,
371 struct hns_mac_cb *mac_cb) 371 struct hns_mac_cb *mac_cb)
372{ 372{
373 param->vaddr = (void *)mac_cb->vaddr; 373 param->vaddr = mac_cb->vaddr;
374 param->mac_mode = hns_get_enet_interface(mac_cb); 374 param->mac_mode = hns_get_enet_interface(mac_cb);
375 ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); 375 ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
376 param->mac_id = mac_cb->mac_id; 376 param->mac_id = mac_cb->mac_id;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index fbc75341bef7..22589799f1a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -187,7 +187,7 @@ struct mac_statistics {
187/*mac para struct ,mac get param from nic or dsaf when initialize*/ 187/*mac para struct ,mac get param from nic or dsaf when initialize*/
188struct mac_params { 188struct mac_params {
189 char addr[ETH_ALEN]; 189 char addr[ETH_ALEN];
190 void *vaddr; /*virtual address*/ 190 u8 __iomem *vaddr; /*virtual address*/
191 struct device *dev; 191 struct device *dev;
192 u8 mac_id; 192 u8 mac_id;
193 /**< Ethernet operation mode (MAC-PHY interface and speed) */ 193 /**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
402 enum mac_mode mac_mode; 402 enum mac_mode mac_mode;
403 u8 mac_id; 403 u8 mac_id;
404 struct hns_mac_cb *mac_cb; 404 struct hns_mac_cb *mac_cb;
405 void __iomem *io_base; 405 u8 __iomem *io_base;
406 unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ 406 unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
407 unsigned int virt_dev_num; 407 unsigned int virt_dev_num;
408 struct device *dev; 408 struct device *dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ac55db065f16..61eea6ac846f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
1602 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); 1602 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
1603 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, 1603 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
1604 DSAF_TBL_TCAM_KEY_PORT_S, port); 1604 DSAF_TBL_TCAM_KEY_PORT_S, port);
1605
1606 mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
1607} 1605}
1608 1606
1609/** 1607/**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
1663 /* default config dvc to 0 */ 1661 /* default config dvc to 0 */
1664 mac_data.tbl_ucast_dvc = 0; 1662 mac_data.tbl_ucast_dvc = 0;
1665 mac_data.tbl_ucast_out_port = mac_entry->port_num; 1663 mac_data.tbl_ucast_out_port = mac_entry->port_num;
1666 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 1664 tcam_data.tbl_tcam_data_high = mac_key.high.val;
1667 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 1665 tcam_data.tbl_tcam_data_low = mac_key.low.val;
1668 1666
1669 hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); 1667 hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
1670 1668
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1786 0xff, 1784 0xff,
1787 mc_mask); 1785 mc_mask);
1788 1786
1789 mask_key.high.val = le32_to_cpu(mask_key.high.val);
1790 mask_key.low.val = le32_to_cpu(mask_key.low.val);
1791
1792 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); 1787 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
1793 } 1788 }
1794 1789
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1840 dsaf_dev->ae_dev.name, mac_key.high.val, 1835 dsaf_dev->ae_dev.name, mac_key.high.val,
1841 mac_key.low.val, entry_index); 1836 mac_key.low.val, entry_index);
1842 1837
1843 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 1838 tcam_data.tbl_tcam_data_high = mac_key.high.val;
1844 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 1839 tcam_data.tbl_tcam_data_low = mac_key.low.val;
1845 1840
1846 /* config mc entry with mask */ 1841 /* config mc entry with mask */
1847 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, 1842 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
1956 /* config key mask */ 1951 /* config key mask */
1957 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); 1952 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
1958 1953
1959 mask_key.high.val = le32_to_cpu(mask_key.high.val);
1960 mask_key.low.val = le32_to_cpu(mask_key.low.val);
1961
1962 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); 1954 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
1963 } 1955 }
1964 1956
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
2012 soft_mac_entry += entry_index; 2004 soft_mac_entry += entry_index;
2013 soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; 2005 soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
2014 } else { /* not zero, just del port, update */ 2006 } else { /* not zero, just del port, update */
2015 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 2007 tcam_data.tbl_tcam_data_high = mac_key.high.val;
2016 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 2008 tcam_data.tbl_tcam_data_low = mac_key.low.val;
2017 2009
2018 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, 2010 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
2019 &tcam_data, 2011 &tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
2750 return DSAF_DUMP_REGS_NUM; 2742 return DSAF_DUMP_REGS_NUM;
2751} 2743}
2752 2744
2745static int hns_dsaf_get_port_id(u8 port)
2746{
2747 if (port < DSAF_SERVICE_NW_NUM)
2748 return port;
2749
2750 if (port >= DSAF_BASE_INNER_PORT_NUM)
2751 return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
2752
2753 return -EINVAL;
2754}
2755
2753static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) 2756static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
2754{ 2757{
2755 struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; 2758 struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
2815 memset(&temp_key, 0x0, sizeof(temp_key)); 2818 memset(&temp_key, 0x0, sizeof(temp_key));
2816 mask_entry.addr[0] = 0x01; 2819 mask_entry.addr[0] = 0x01;
2817 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, 2820 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
2818 port, mask_entry.addr); 2821 0xf, mask_entry.addr);
2819 tbl_tcam_mcast.tbl_mcast_item_vld = 1; 2822 tbl_tcam_mcast.tbl_mcast_item_vld = 1;
2820 tbl_tcam_mcast.tbl_mcast_old_en = 0; 2823 tbl_tcam_mcast.tbl_mcast_old_en = 0;
2821 2824
2822 if (port < DSAF_SERVICE_NW_NUM) { 2825 /* set MAC port to handle multicast */
2823 mskid = port; 2826 mskid = hns_dsaf_get_port_id(port);
2824 } else if (port >= DSAF_BASE_INNER_PORT_NUM) { 2827 if (mskid == -EINVAL) {
2825 mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
2826 } else {
2827 dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", 2828 dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
2828 dsaf_dev->ae_dev.name, port, 2829 dsaf_dev->ae_dev.name, port,
2829 mask_key.high.val, mask_key.low.val); 2830 mask_key.high.val, mask_key.low.val);
2830 return; 2831 return;
2831 } 2832 }
2833 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
2834 mskid % 32, 1);
2832 2835
2836 /* set pool bit map to handle multicast */
2837 mskid = hns_dsaf_get_port_id(port_num);
2838 if (mskid == -EINVAL) {
2839 dev_err(dsaf_dev->dev,
2840 "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
2841 dsaf_dev->ae_dev.name, port_num,
2842 mask_key.high.val, mask_key.low.val);
2843 return;
2844 }
2833 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], 2845 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
2834 mskid % 32, 1); 2846 mskid % 32, 1);
2847
2835 memcpy(&temp_key, &mask_key, sizeof(mask_key)); 2848 memcpy(&temp_key, &mask_key, sizeof(mask_key));
2836 hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, 2849 hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
2837 (struct dsaf_tbl_tcam_data *)(&mask_key), 2850 (struct dsaf_tbl_tcam_data *)(&mask_key),
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0e1cd99831a6..76cc8887e1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
467 u8 mac_id, u8 port_num); 467 u8 mac_id, u8 port_num);
468int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); 468int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
469 469
470int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
471
470#endif /* __HNS_DSAF_MAIN_H__ */ 472#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 16294cd3c954..19b94879691f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
670 dsaf_set_field(origin, 1ull << 10, 10, en); 670 dsaf_set_field(origin, 1ull << 10, 10, en);
671 dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); 671 dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
672 } else { 672 } else {
673 u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + 673 u8 __iomem *base_addr = mac_cb->serdes_vaddr +
674 (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); 674 (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
675 dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); 675 dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
676 } 676 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 3d07c8a7639d..17c019106e6e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
61 } 61 }
62} 62}
63 63
64static void __iomem * 64static u8 __iomem *
65hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) 65hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
66{ 66{
67 return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; 67 return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
111 dsaf_dev->ppe_common[comm_index] = NULL; 111 dsaf_dev->ppe_common[comm_index] = NULL;
112} 112}
113 113
114static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, 114static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
115 int ppe_idx) 115 int ppe_idx)
116{ 116{
117 return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; 117 return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
118} 118}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index f670e63a5a01..110c6e8222c7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
80 struct hns_ppe_hw_stats hw_stats; 80 struct hns_ppe_hw_stats hw_stats;
81 81
82 u8 index; /* index in a ppe common device */ 82 u8 index; /* index in a ppe common device */
83 void __iomem *io_base; 83 u8 __iomem *io_base;
84 int virq; 84 int virq;
85 u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ 85 u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
86 u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ 86 u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
89struct ppe_common_cb { 89struct ppe_common_cb {
90 struct device *dev; 90 struct device *dev;
91 struct dsaf_device *dsaf_dev; 91 struct dsaf_device *dsaf_dev;
92 void __iomem *io_base; 92 u8 __iomem *io_base;
93 93
94 enum ppe_common_mode ppe_mode; 94 enum ppe_common_mode ppe_mode;
95 95
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6bf346c11b25..ac3518ca4d7b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
458 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; 458 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
459 } else { 459 } else {
460 ring = &q->tx_ring; 460 ring = &q->tx_ring;
461 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + 461 ring->io_base = ring_pair_cb->q.io_base +
462 HNS_RCB_TX_REG_OFFSET; 462 HNS_RCB_TX_REG_OFFSET;
463 irq_idx = HNS_RCB_IRQ_IDX_TX; 463 irq_idx = HNS_RCB_IRQ_IDX_TX;
464 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : 464 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
764 } 764 }
765} 765}
766 766
767static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) 767static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
768{ 768{
769 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; 769 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
770 770
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b9733b0b8482..b9e7f11f0896 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1018,7 +1018,7 @@
1018#define XGMAC_PAUSE_CTL_RSP_MODE_B 2 1018#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
1019#define XGMAC_PAUSE_CTL_TX_XOFF_B 3 1019#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
1020 1020
1021static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) 1021static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
1022{ 1022{
1023 writel(value, base + reg); 1023 writel(value, base + reg);
1024} 1024}
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
1053#define dsaf_set_bit(origin, shift, val) \ 1053#define dsaf_set_bit(origin, shift, val) \
1054 dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) 1054 dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
1055 1055
1056static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, 1056static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
1057 u32 shift, u32 val) 1057 u32 shift, u32 val)
1058{ 1058{
1059 u32 origin = dsaf_read_reg(base, reg); 1059 u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
1073#define dsaf_get_bit(origin, shift) \ 1073#define dsaf_get_bit(origin, shift) \
1074 dsaf_get_field((origin), (1ull << (shift)), (shift)) 1074 dsaf_get_field((origin), (1ull << (shift)), (shift))
1075 1075
1076static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, 1076static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
1077 u32 shift) 1077 u32 shift)
1078{ 1078{
1079 u32 origin; 1079 u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
1089 dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) 1089 dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
1090 1090
1091#define dsaf_write_b(addr, data)\ 1091#define dsaf_write_b(addr, data)\
1092 writeb((data), (__iomem unsigned char *)(addr)) 1092 writeb((data), (__iomem u8 *)(addr))
1093#define dsaf_read_b(addr)\ 1093#define dsaf_read_b(addr)\
1094 readb((__iomem unsigned char *)(addr)) 1094 readb((__iomem u8 *)(addr))
1095 1095
1096#define hns_mac_reg_read64(drv, offset) \ 1096#define hns_mac_reg_read64(drv, offset) \
1097 readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) 1097 readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
1098 1098
1099#endif /* _DSAF_REG_H */ 1099#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index ba4316910dea..a60f207768fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
129 dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); 129 dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
130 dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); 130 dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
131 dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); 131 dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
132 dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); 132 dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
133} 133}
134 134
135/** 135/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 60e7d7ae3787..4cd86ba1f050 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -29,9 +29,6 @@
29 29
30#define SERVICE_TIMER_HZ (1 * HZ) 30#define SERVICE_TIMER_HZ (1 * HZ)
31 31
32#define NIC_TX_CLEAN_MAX_NUM 256
33#define NIC_RX_CLEAN_MAX_NUM 64
34
35#define RCB_IRQ_NOT_INITED 0 32#define RCB_IRQ_NOT_INITED 0
36#define RCB_IRQ_INITED 1 33#define RCB_IRQ_INITED 1
37#define HNS_BUFFER_SIZE_2048 2048 34#define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
376 wmb(); /* commit all data before submit */ 373 wmb(); /* commit all data before submit */
377 assert(skb->queue_mapping < priv->ae_handle->q_num); 374 assert(skb->queue_mapping < priv->ae_handle->q_num);
378 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); 375 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
379 ring->stats.tx_pkts++;
380 ring->stats.tx_bytes += skb->len;
381 376
382 return NETDEV_TX_OK; 377 return NETDEV_TX_OK;
383 378
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
999 /* issue prefetch for next Tx descriptor */ 994 /* issue prefetch for next Tx descriptor */
1000 prefetch(&ring->desc_cb[ring->next_to_clean]); 995 prefetch(&ring->desc_cb[ring->next_to_clean]);
1001 } 996 }
997 /* update tx ring statistics. */
998 ring->stats.tx_pkts += pkts;
999 ring->stats.tx_bytes += bytes;
1002 1000
1003 NETIF_TX_UNLOCK(ring); 1001 NETIF_TX_UNLOCK(ring);
1004 1002
@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2152 hns_nic_tx_fini_pro_v2; 2150 hns_nic_tx_fini_pro_v2;
2153 2151
2154 netif_napi_add(priv->netdev, &rd->napi, 2152 netif_napi_add(priv->netdev, &rd->napi,
2155 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 2153 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2156 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2154 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2157 } 2155 }
2158 for (i = h->q_num; i < h->q_num * 2; i++) { 2156 for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2165 hns_nic_rx_fini_pro_v2; 2163 hns_nic_rx_fini_pro_v2;
2166 2164
2167 netif_napi_add(priv->netdev, &rd->napi, 2165 netif_napi_add(priv->netdev, &rd->napi,
2168 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 2166 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2169 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2167 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2170 } 2168 }
2171 2169
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 1c1f17ec6be2..162cb9afa0e7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -22,6 +22,7 @@
22#include "hns3_enet.h" 22#include "hns3_enet.h"
23 23
24#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift))) 24#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
25#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
25 26
26static void hns3_clear_all_ring(struct hnae3_handle *h); 27static void hns3_clear_all_ring(struct hnae3_handle *h);
27static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h); 28static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
@@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1079 1080
1080 desc_cb->length = size; 1081 desc_cb->length = size;
1081 1082
1082 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; 1083 frag_buf_num = hns3_tx_bd_count(size);
1083 sizeoflast = size & HNS3_TX_LAST_SIZE_M; 1084 sizeoflast = size & HNS3_TX_LAST_SIZE_M;
1084 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1085 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1085 1086
@@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1124 int i; 1125 int i;
1125 1126
1126 size = skb_headlen(skb); 1127 size = skb_headlen(skb);
1127 buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET; 1128 buf_num = hns3_tx_bd_count(size);
1128 1129
1129 frag_num = skb_shinfo(skb)->nr_frags; 1130 frag_num = skb_shinfo(skb)->nr_frags;
1130 for (i = 0; i < frag_num; i++) { 1131 for (i = 0; i < frag_num; i++) {
1131 frag = &skb_shinfo(skb)->frags[i]; 1132 frag = &skb_shinfo(skb)->frags[i];
1132 size = skb_frag_size(frag); 1133 size = skb_frag_size(frag);
1133 bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >> 1134 bdnum_for_frag = hns3_tx_bd_count(size);
1134 HNS3_MAX_BD_SIZE_OFFSET;
1135 if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)) 1135 if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
1136 return -ENOMEM; 1136 return -ENOMEM;
1137 1137
@@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
1139 } 1139 }
1140 1140
1141 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { 1141 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1142 buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >> 1142 buf_num = hns3_tx_bd_count(skb->len);
1143 HNS3_MAX_BD_SIZE_OFFSET;
1144 if (ring_space(ring) < buf_num) 1143 if (ring_space(ring) < buf_num)
1145 return -EBUSY; 1144 return -EBUSY;
1146 /* manual split the send packet */ 1145 /* manual split the send packet */
@@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1169 buf_num = skb_shinfo(skb)->nr_frags + 1; 1168 buf_num = skb_shinfo(skb)->nr_frags + 1;
1170 1169
1171 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { 1170 if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
1172 buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 1171 buf_num = hns3_tx_bd_count(skb->len);
1173 if (ring_space(ring) < buf_num) 1172 if (ring_space(ring) < buf_num)
1174 return -EBUSY; 1173 return -EBUSY;
1175 /* manual split the send packet */ 1174 /* manual split the send packet */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 1db0bd41d209..75669cd0c311 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -193,7 +193,6 @@ enum hns3_nic_state {
193#define HNS3_VECTOR_INITED 1 193#define HNS3_VECTOR_INITED 1
194 194
195#define HNS3_MAX_BD_SIZE 65535 195#define HNS3_MAX_BD_SIZE 65535
196#define HNS3_MAX_BD_SIZE_OFFSET 16
197#define HNS3_MAX_BD_PER_FRAG 8 196#define HNS3_MAX_BD_PER_FRAG 8
198#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS 197#define HNS3_MAX_BD_PER_PKT MAX_SKB_FRAGS
199 198
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index fffe8c1c45d3..0fb61d440d3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -3,7 +3,7 @@
3# Makefile for the HISILICON network device drivers. 3# Makefile for the HISILICON network device drivers.
4# 4#
5 5
6ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 6ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
7 7
8obj-$(CONFIG_HNS3_HCLGE) += hclge.o 8obj-$(CONFIG_HNS3_HCLGE) += hclge.o
9hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o 9hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index fb93bbd35845..6193f8fa7cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -3,7 +3,7 @@
3# Makefile for the HISILICON network device drivers. 3# Makefile for the HISILICON network device drivers.
4# 4#
5 5
6ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 6ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
7 7
8obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o 8obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
9hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file 9hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc251f32..8b8a7d00e8e0 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
39}; 39};
40 40
41struct hns_mdio_device { 41struct hns_mdio_device {
42 void *vbase; /* mdio reg base address */ 42 u8 __iomem *vbase; /* mdio reg base address */
43 struct regmap *subctrl_vbase; 43 struct regmap *subctrl_vbase;
44 struct hns_mdio_sc_reg sc_reg; 44 struct hns_mdio_sc_reg sc_reg;
45}; 45};
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
96#define MDIO_SC_CLK_ST 0x531C 96#define MDIO_SC_CLK_ST 0x531C
97#define MDIO_SC_RESET_ST 0x5A1C 97#define MDIO_SC_RESET_ST 0x5A1C
98 98
99static void mdio_write_reg(void *base, u32 reg, u32 value) 99static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
100{ 100{
101 u8 __iomem *reg_addr = (u8 __iomem *)base; 101 writel_relaxed(value, base + reg);
102
103 writel_relaxed(value, reg_addr + reg);
104} 102}
105 103
106#define MDIO_WRITE_REG(a, reg, value) \ 104#define MDIO_WRITE_REG(a, reg, value) \
107 mdio_write_reg((a)->vbase, (reg), (value)) 105 mdio_write_reg((a)->vbase, (reg), (value))
108 106
109static u32 mdio_read_reg(void *base, u32 reg) 107static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
110{ 108{
111 u8 __iomem *reg_addr = (u8 __iomem *)base; 109 return readl_relaxed(base + reg);
112
113 return readl_relaxed(reg_addr + reg);
114} 110}
115 111
116#define mdio_set_field(origin, mask, shift, val) \ 112#define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
121 117
122#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) 118#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
123 119
124static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, 120static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
125 u32 val) 121 u32 val)
126{ 122{
127 u32 origin = mdio_read_reg(base, reg); 123 u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
133#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ 129#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
134 mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) 130 mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
135 131
136static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) 132static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
137{ 133{
138 u32 origin; 134 u32 origin;
139 135
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 3baabdc89726..90b62c1412c8 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev,
3160 3160
3161 if (ehea_add_adapter_mr(adapter)) { 3161 if (ehea_add_adapter_mr(adapter)) {
3162 pr_err("creating MR failed\n"); 3162 pr_err("creating MR failed\n");
3163 of_node_put(eth_dn);
3163 return -EIO; 3164 return -EIO;
3164 } 3165 }
3165 3166
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ecbb1adcf3b..51cfe95f3e24 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
1885 */ 1885 */
1886 adapter->state = VNIC_PROBED; 1886 adapter->state = VNIC_PROBED;
1887 1887
1888 reinit_completion(&adapter->init_done);
1888 rc = init_crq_queue(adapter); 1889 rc = init_crq_queue(adapter);
1889 if (rc) { 1890 if (rc) {
1890 netdev_err(adapter->netdev, 1891 netdev_err(adapter->netdev,
@@ -4625,7 +4626,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4625 old_num_rx_queues = adapter->req_rx_queues; 4626 old_num_rx_queues = adapter->req_rx_queues;
4626 old_num_tx_queues = adapter->req_tx_queues; 4627 old_num_tx_queues = adapter->req_tx_queues;
4627 4628
4628 init_completion(&adapter->init_done); 4629 reinit_completion(&adapter->init_done);
4629 adapter->init_done_rc = 0; 4630 adapter->init_done_rc = 0;
4630 ibmvnic_send_crq_init(adapter); 4631 ibmvnic_send_crq_init(adapter);
4631 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4632 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4680,7 +4681,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4680 4681
4681 adapter->from_passive_init = false; 4682 adapter->from_passive_init = false;
4682 4683
4683 init_completion(&adapter->init_done);
4684 adapter->init_done_rc = 0; 4684 adapter->init_done_rc = 0;
4685 ibmvnic_send_crq_init(adapter); 4685 ibmvnic_send_crq_init(adapter);
4686 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4686 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4759,6 +4759,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4759 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4759 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4760 INIT_LIST_HEAD(&adapter->rwi_list); 4760 INIT_LIST_HEAD(&adapter->rwi_list);
4761 spin_lock_init(&adapter->rwi_lock); 4761 spin_lock_init(&adapter->rwi_lock);
4762 init_completion(&adapter->init_done);
4762 adapter->resetting = false; 4763 adapter->resetting = false;
4763 4764
4764 adapter->mac_change_pending = false; 4765 adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5a0419421511..ecef949f3baa 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
41 /* create driver workqueue */ 41 /* create driver workqueue */
42 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, 42 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
43 fm10k_driver_name); 43 fm10k_driver_name);
44 if (!fm10k_workqueue)
45 return -ENOMEM;
44 46
45 fm10k_dbg_init(); 47 fm10k_dbg_init();
46 48
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d684998ba2b0..d3cc3427caad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -790,6 +790,8 @@ struct i40e_vsi {
790 790
791 /* VSI specific handlers */ 791 /* VSI specific handlers */
792 irqreturn_t (*irq_handler)(int irq, void *data); 792 irqreturn_t (*irq_handler)(int irq, void *data);
793
794 unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
793} ____cacheline_internodealigned_in_smp; 795} ____cacheline_internodealigned_in_smp;
794 796
795struct i40e_netdev_priv { 797struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
1096 return !!vsi->xdp_prog; 1098 return !!vsi->xdp_prog;
1097} 1099}
1098 1100
1099static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
1100{
1101 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
1102 int qid = ring->queue_index;
1103
1104 if (ring_is_xdp(ring))
1105 qid -= ring->vsi->alloc_queue_pairs;
1106
1107 if (!xdp_on)
1108 return NULL;
1109
1110 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
1111}
1112
1113int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); 1101int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
1114int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); 1102int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
1115int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, 1103int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4c885801fa26..7874d0ec7fb0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2573 return -EOPNOTSUPP; 2573 return -EOPNOTSUPP;
2574 2574
2575 /* only magic packet is supported */ 2575 /* only magic packet is supported */
2576 if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) 2576 if (wol->wolopts & ~WAKE_MAGIC)
2577 | (wol->wolopts != WAKE_FILTER))
2578 return -EOPNOTSUPP; 2577 return -EOPNOTSUPP;
2579 2578
2580 /* is this a new value? */ 2579 /* is this a new value? */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index da62218eb70a..b1c265012c8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3064} 3064}
3065 3065
3066/** 3066/**
3067 * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
3068 * @ring: The Tx or Rx ring
3069 *
3070 * Returns the UMEM or NULL.
3071 **/
3072static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3073{
3074 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3075 int qid = ring->queue_index;
3076
3077 if (ring_is_xdp(ring))
3078 qid -= ring->vsi->alloc_queue_pairs;
3079
3080 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3081 return NULL;
3082
3083 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3084}
3085
3086/**
3067 * i40e_configure_tx_ring - Configure a transmit ring context and rest 3087 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3068 * @ring: The Tx ring to configure 3088 * @ring: The Tx ring to configure
3069 * 3089 *
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10064 hash_init(vsi->mac_filter_hash); 10084 hash_init(vsi->mac_filter_hash);
10065 vsi->irqs_ready = false; 10085 vsi->irqs_ready = false;
10066 10086
10087 if (type == I40E_VSI_MAIN) {
10088 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10089 if (!vsi->af_xdp_zc_qps)
10090 goto err_rings;
10091 }
10092
10067 ret = i40e_set_num_rings_in_vsi(vsi); 10093 ret = i40e_set_num_rings_in_vsi(vsi);
10068 if (ret) 10094 if (ret)
10069 goto err_rings; 10095 goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10082 goto unlock_pf; 10108 goto unlock_pf;
10083 10109
10084err_rings: 10110err_rings:
10111 bitmap_free(vsi->af_xdp_zc_qps);
10085 pf->next_vsi = i - 1; 10112 pf->next_vsi = i - 1;
10086 kfree(vsi); 10113 kfree(vsi);
10087unlock_pf: 10114unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
10162 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 10189 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10163 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 10190 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10164 10191
10192 bitmap_free(vsi->af_xdp_zc_qps);
10165 i40e_vsi_free_arrays(vsi, true); 10193 i40e_vsi_free_arrays(vsi, true);
10166 i40e_clear_rss_config_user(vsi); 10194 i40e_clear_rss_config_user(vsi);
10167 10195
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5fb4353c742b..31575c0bb884 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
146static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 146static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
147{ 147{
148 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); 148 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
149 struct timespec64 now; 149 struct timespec64 now, then;
150 150
151 then = ns_to_timespec64(delta);
151 mutex_lock(&pf->tmreg_lock); 152 mutex_lock(&pf->tmreg_lock);
152 153
153 i40e_ptp_read(pf, &now, NULL); 154 i40e_ptp_read(pf, &now, NULL);
154 timespec64_add_ns(&now, delta); 155 now = timespec64_add(now, then);
155 i40e_ptp_write(pf, (const struct timespec64 *)&now); 156 i40e_ptp_write(pf, (const struct timespec64 *)&now);
156 157
157 mutex_unlock(&pf->tmreg_lock); 158 mutex_unlock(&pf->tmreg_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b5c182e688e3..1b17486543ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
102 if (err) 102 if (err)
103 return err; 103 return err;
104 104
105 set_bit(qid, vsi->af_xdp_zc_qps);
106
105 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 107 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
106 108
107 if (if_running) { 109 if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
148 return err; 150 return err;
149 } 151 }
150 152
153 clear_bit(qid, vsi->af_xdp_zc_qps);
151 i40e_xsk_umem_dma_unmap(vsi, umem); 154 i40e_xsk_umem_dma_unmap(vsi, umem);
152 155
153 if (if_running) { 156 if (if_running) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 01fcfc6f3415..d2e2c50ce257 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -194,6 +194,8 @@
194/* enable link status from external LINK_0 and LINK_1 pins */ 194/* enable link status from external LINK_0 and LINK_1 pins */
195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
197#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
198#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
197#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ 199#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
198#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ 200#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
199#define E1000_CTRL_RST 0x04000000 /* Global reset */ 201#define E1000_CTRL_RST 0x04000000 /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 69b230c53fed..3269d8e94744 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8740 struct e1000_hw *hw = &adapter->hw; 8740 struct e1000_hw *hw = &adapter->hw;
8741 u32 ctrl, rctl, status; 8741 u32 ctrl, rctl, status;
8742 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 8742 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8743#ifdef CONFIG_PM 8743 bool wake;
8744 int retval = 0;
8745#endif
8746 8744
8747 rtnl_lock(); 8745 rtnl_lock();
8748 netif_device_detach(netdev); 8746 netif_device_detach(netdev);
@@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8755 igb_clear_interrupt_scheme(adapter); 8753 igb_clear_interrupt_scheme(adapter);
8756 rtnl_unlock(); 8754 rtnl_unlock();
8757 8755
8758#ifdef CONFIG_PM
8759 if (!runtime) {
8760 retval = pci_save_state(pdev);
8761 if (retval)
8762 return retval;
8763 }
8764#endif
8765
8766 status = rd32(E1000_STATUS); 8756 status = rd32(E1000_STATUS);
8767 if (status & E1000_STATUS_LU) 8757 if (status & E1000_STATUS_LU)
8768 wufc &= ~E1000_WUFC_LNKC; 8758 wufc &= ~E1000_WUFC_LNKC;
@@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8779 } 8769 }
8780 8770
8781 ctrl = rd32(E1000_CTRL); 8771 ctrl = rd32(E1000_CTRL);
8782 /* advertise wake from D3Cold */
8783 #define E1000_CTRL_ADVD3WUC 0x00100000
8784 /* phy power management enable */
8785 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
8786 ctrl |= E1000_CTRL_ADVD3WUC; 8772 ctrl |= E1000_CTRL_ADVD3WUC;
8787 wr32(E1000_CTRL, ctrl); 8773 wr32(E1000_CTRL, ctrl);
8788 8774
@@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8796 wr32(E1000_WUFC, 0); 8782 wr32(E1000_WUFC, 0);
8797 } 8783 }
8798 8784
8799 *enable_wake = wufc || adapter->en_mng_pt; 8785 wake = wufc || adapter->en_mng_pt;
8800 if (!*enable_wake) 8786 if (!wake)
8801 igb_power_down_link(adapter); 8787 igb_power_down_link(adapter);
8802 else 8788 else
8803 igb_power_up_link(adapter); 8789 igb_power_up_link(adapter);
8804 8790
8791 if (enable_wake)
8792 *enable_wake = wake;
8793
8805 /* Release control of h/w to f/w. If f/w is AMT enabled, this 8794 /* Release control of h/w to f/w. If f/w is AMT enabled, this
8806 * would have already happened in close and is redundant. 8795 * would have already happened in close and is redundant.
8807 */ 8796 */
@@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
8844 8833
8845static int __maybe_unused igb_suspend(struct device *dev) 8834static int __maybe_unused igb_suspend(struct device *dev)
8846{ 8835{
8847 int retval; 8836 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
8848 bool wake;
8849 struct pci_dev *pdev = to_pci_dev(dev);
8850
8851 retval = __igb_shutdown(pdev, &wake, 0);
8852 if (retval)
8853 return retval;
8854
8855 if (wake) {
8856 pci_prepare_to_sleep(pdev);
8857 } else {
8858 pci_wake_from_d3(pdev, false);
8859 pci_set_power_state(pdev, PCI_D3hot);
8860 }
8861
8862 return 0;
8863} 8837}
8864 8838
8865static int __maybe_unused igb_resume(struct device *dev) 8839static int __maybe_unused igb_resume(struct device *dev)
@@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
8930 8904
8931static int __maybe_unused igb_runtime_suspend(struct device *dev) 8905static int __maybe_unused igb_runtime_suspend(struct device *dev)
8932{ 8906{
8933 struct pci_dev *pdev = to_pci_dev(dev); 8907 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
8934 int retval;
8935 bool wake;
8936
8937 retval = __igb_shutdown(pdev, &wake, 1);
8938 if (retval)
8939 return retval;
8940
8941 if (wake) {
8942 pci_prepare_to_sleep(pdev);
8943 } else {
8944 pci_wake_from_d3(pdev, false);
8945 pci_set_power_state(pdev, PCI_D3hot);
8946 }
8947
8948 return 0;
8949} 8908}
8950 8909
8951static int __maybe_unused igb_runtime_resume(struct device *dev) 8910static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index cc4907f9ff02..2fb97967961c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
905 struct pci_dev *pdev = adapter->pdev; 905 struct pci_dev *pdev = adapter->pdev;
906 struct device *dev = &adapter->netdev->dev; 906 struct device *dev = &adapter->netdev->dev;
907 struct mii_bus *bus; 907 struct mii_bus *bus;
908 int err = -ENODEV;
908 909
909 adapter->mii_bus = devm_mdiobus_alloc(dev); 910 bus = devm_mdiobus_alloc(dev);
910 if (!adapter->mii_bus) 911 if (!bus)
911 return -ENOMEM; 912 return -ENOMEM;
912 913
913 bus = adapter->mii_bus;
914
915 switch (hw->device_id) { 914 switch (hw->device_id) {
916 /* C3000 SoCs */ 915 /* C3000 SoCs */
917 case IXGBE_DEV_ID_X550EM_A_KR: 916 case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
949 */ 948 */
950 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; 949 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
951 950
952 return mdiobus_register(bus); 951 err = mdiobus_register(bus);
952 if (!err) {
953 adapter->mii_bus = bus;
954 return 0;
955 }
953 956
954ixgbe_no_mii_bus: 957ixgbe_no_mii_bus:
955 devm_mdiobus_free(dev, bus); 958 devm_mdiobus_free(dev, bus);
956 adapter->mii_bus = NULL; 959 return err;
957 return -ENODEV;
958} 960}
959 961
960/** 962/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 122927f3a600..d5e5afbdca6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
96 if (!eproto) 96 if (!eproto)
97 return -EINVAL; 97 return -EINVAL;
98 98
99 if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
100 return -EOPNOTSUPP;
101
102 err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); 99 err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
103 if (err) 100 if (err)
104 return err; 101 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index eac245a93f91..4ab0d030b544 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -122,7 +122,9 @@ out:
122 return err; 122 return err;
123} 123}
124 124
125/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ 125/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
126 * minimum speed value is 40Gbps
127 */
126static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) 128static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
127{ 129{
128 u32 speed; 130 u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
130 int err; 132 int err;
131 133
132 err = mlx5e_port_linkspeed(priv->mdev, &speed); 134 err = mlx5e_port_linkspeed(priv->mdev, &speed);
133 if (err) { 135 if (err)
134 mlx5_core_warn(priv->mdev, "cannot get port speed\n"); 136 speed = SPEED_40000;
135 return 0; 137 speed = max_t(u32, speed, SPEED_40000);
136 }
137 138
138 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; 139 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
139 140
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
142} 143}
143 144
144static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, 145static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
145 u32 xoff, unsigned int mtu) 146 u32 xoff, unsigned int max_mtu)
146{ 147{
147 int i; 148 int i;
148 149
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
154 } 155 }
155 156
156 if (port_buffer->buffer[i].size < 157 if (port_buffer->buffer[i].size <
157 (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) 158 (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
158 return -ENOMEM; 159 return -ENOMEM;
159 160
160 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; 161 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
161 port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; 162 port_buffer->buffer[i].xon =
163 port_buffer->buffer[i].xoff - max_mtu;
162 } 164 }
163 165
164 return 0; 166 return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
166 168
167/** 169/**
168 * update_buffer_lossy() 170 * update_buffer_lossy()
169 * mtu: device's MTU 171 * max_mtu: netdev's max_mtu
170 * pfc_en: <input> current pfc configuration 172 * pfc_en: <input> current pfc configuration
171 * buffer: <input> current prio to buffer mapping 173 * buffer: <input> current prio to buffer mapping
172 * xoff: <input> xoff value 174 * xoff: <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
183 * Return 0 if no error. 185 * Return 0 if no error.
184 * Set change to true if buffer configuration is modified. 186 * Set change to true if buffer configuration is modified.
185 */ 187 */
186static int update_buffer_lossy(unsigned int mtu, 188static int update_buffer_lossy(unsigned int max_mtu,
187 u8 pfc_en, u8 *buffer, u32 xoff, 189 u8 pfc_en, u8 *buffer, u32 xoff,
188 struct mlx5e_port_buffer *port_buffer, 190 struct mlx5e_port_buffer *port_buffer,
189 bool *change) 191 bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
220 } 222 }
221 223
222 if (changed) { 224 if (changed) {
223 err = update_xoff_threshold(port_buffer, xoff, mtu); 225 err = update_xoff_threshold(port_buffer, xoff, max_mtu);
224 if (err) 226 if (err)
225 return err; 227 return err;
226 228
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
230 return 0; 232 return 0;
231} 233}
232 234
235#define MINIMUM_MAX_MTU 9216
233int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, 236int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
234 u32 change, unsigned int mtu, 237 u32 change, unsigned int mtu,
235 struct ieee_pfc *pfc, 238 struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
241 bool update_prio2buffer = false; 244 bool update_prio2buffer = false;
242 u8 buffer[MLX5E_MAX_PRIORITY]; 245 u8 buffer[MLX5E_MAX_PRIORITY];
243 bool update_buffer = false; 246 bool update_buffer = false;
247 unsigned int max_mtu;
244 u32 total_used = 0; 248 u32 total_used = 0;
245 u8 curr_pfc_en; 249 u8 curr_pfc_en;
246 int err; 250 int err;
247 int i; 251 int i;
248 252
249 mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); 253 mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
254 max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
250 255
251 err = mlx5e_port_query_buffer(priv, &port_buffer); 256 err = mlx5e_port_query_buffer(priv, &port_buffer);
252 if (err) 257 if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
254 259
255 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { 260 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
256 update_buffer = true; 261 update_buffer = true;
257 err = update_xoff_threshold(&port_buffer, xoff, mtu); 262 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
258 if (err) 263 if (err)
259 return err; 264 return err;
260 } 265 }
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
264 if (err) 269 if (err)
265 return err; 270 return err;
266 271
267 err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, 272 err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
268 &port_buffer, &update_buffer); 273 &port_buffer, &update_buffer);
269 if (err) 274 if (err)
270 return err; 275 return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
276 if (err) 281 if (err)
277 return err; 282 return err;
278 283
279 err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, 284 err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
280 &port_buffer, &update_buffer); 285 xoff, &port_buffer, &update_buffer);
281 if (err) 286 if (err)
282 return err; 287 return err;
283 } 288 }
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
301 return -EINVAL; 306 return -EINVAL;
302 307
303 update_buffer = true; 308 update_buffer = true;
304 err = update_xoff_threshold(&port_buffer, xoff, mtu); 309 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
305 if (err) 310 if (err)
306 return err; 311 return err;
307 } 312 }
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
309 /* Need to update buffer configuration if xoff value is changed */ 314 /* Need to update buffer configuration if xoff value is changed */
310 if (!update_buffer && xoff != priv->dcbx.xoff) { 315 if (!update_buffer && xoff != priv->dcbx.xoff) {
311 update_buffer = true; 316 update_buffer = true;
312 err = update_xoff_threshold(&port_buffer, xoff, mtu); 317 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
313 if (err) 318 if (err)
314 return err; 319 return err;
315 } 320 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 3078491cc0d0..1539cf3de5dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
45 if (err) 45 if (err)
46 return err; 46 return err;
47 47
48 mutex_lock(&mdev->mlx5e_res.td.list_lock);
48 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); 49 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
50 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
49 51
50 return 0; 52 return 0;
51} 53}
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
53void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, 55void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
54 struct mlx5e_tir *tir) 56 struct mlx5e_tir *tir)
55{ 57{
58 mutex_lock(&mdev->mlx5e_res.td.list_lock);
56 mlx5_core_destroy_tir(mdev, tir->tirn); 59 mlx5_core_destroy_tir(mdev, tir->tirn);
57 list_del(&tir->list); 60 list_del(&tir->list);
61 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
58} 62}
59 63
60static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, 64static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
114 } 118 }
115 119
116 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); 120 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
121 mutex_init(&mdev->mlx5e_res.td.list_lock);
117 122
118 return 0; 123 return 0;
119 124
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
141{ 146{
142 struct mlx5_core_dev *mdev = priv->mdev; 147 struct mlx5_core_dev *mdev = priv->mdev;
143 struct mlx5e_tir *tir; 148 struct mlx5e_tir *tir;
144 int err = -ENOMEM; 149 int err = 0;
145 u32 tirn = 0; 150 u32 tirn = 0;
146 int inlen; 151 int inlen;
147 void *in; 152 void *in;
148 153
149 inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 154 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
150 in = kvzalloc(inlen, GFP_KERNEL); 155 in = kvzalloc(inlen, GFP_KERNEL);
151 if (!in) 156 if (!in) {
157 err = -ENOMEM;
152 goto out; 158 goto out;
159 }
153 160
154 if (enable_uc_lb) 161 if (enable_uc_lb)
155 MLX5_SET(modify_tir_in, in, ctx.self_lb_block, 162 MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
157 164
158 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); 165 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
159 166
167 mutex_lock(&mdev->mlx5e_res.td.list_lock);
160 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { 168 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
161 tirn = tir->tirn; 169 tirn = tir->tirn;
162 err = mlx5_core_modify_tir(mdev, tirn, in, inlen); 170 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
168 kvfree(in); 176 kvfree(in);
169 if (err) 177 if (err)
170 netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); 178 netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
179 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
171 180
172 return err; 181 return err;
173} 182}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a0987cc5fe4a..5efce4a3ff79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
603 __ETHTOOL_LINK_MODE_MASK_NBITS); 603 __ETHTOOL_LINK_MODE_MASK_NBITS);
604} 604}
605 605
606static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, 606static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
607 unsigned long *advertising_modes, 607 u32 eth_proto_cap, bool ext)
608 u32 eth_proto_cap)
609{ 608{
610 unsigned long proto_cap = eth_proto_cap; 609 unsigned long proto_cap = eth_proto_cap;
611 struct ptys2ethtool_config *table; 610 struct ptys2ethtool_config *table;
612 u32 max_size; 611 u32 max_size;
613 int proto; 612 int proto;
614 613
615 mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); 614 table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
615 max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
616 ARRAY_SIZE(ptys2legacy_ethtool_table);
617
616 for_each_set_bit(proto, &proto_cap, max_size) 618 for_each_set_bit(proto, &proto_cap, max_size)
617 bitmap_or(advertising_modes, advertising_modes, 619 bitmap_or(advertising_modes, advertising_modes,
618 table[proto].advertised, 620 table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
794 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); 796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
795} 797}
796 798
797static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, 799static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
798 u8 tx_pause, u8 rx_pause, 800 struct ethtool_link_ksettings *link_ksettings,
799 struct ethtool_link_ksettings *link_ksettings) 801 bool ext)
800{ 802{
801 unsigned long *advertising = link_ksettings->link_modes.advertising; 803 unsigned long *advertising = link_ksettings->link_modes.advertising;
802 ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); 804 ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
803 805
804 if (rx_pause) 806 if (rx_pause)
805 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); 807 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
854 struct ethtool_link_ksettings *link_ksettings) 856 struct ethtool_link_ksettings *link_ksettings)
855{ 857{
856 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; 858 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
859 bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
857 860
858 ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); 861 ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
859} 862}
860 863
861int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, 864int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
872 u8 an_disable_admin; 875 u8 an_disable_admin;
873 u8 an_status; 876 u8 an_status;
874 u8 connector_type; 877 u8 connector_type;
878 bool admin_ext;
875 bool ext; 879 bool ext;
876 int err; 880 int err;
877 881
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
886 eth_proto_capability); 890 eth_proto_capability);
887 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 891 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
888 eth_proto_admin); 892 eth_proto_admin);
893 /* Fields: eth_proto_admin and ext_eth_proto_admin are
894 * mutually exclusive. Hence try reading legacy advertising
895 * when extended advertising is zero.
896 * admin_ext indicates how eth_proto_admin should be
897 * interpreted
898 */
899 admin_ext = ext;
900 if (ext && !eth_proto_admin) {
901 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
902 eth_proto_admin);
903 admin_ext = false;
904 }
905
889 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 906 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
890 eth_proto_oper); 907 eth_proto_oper);
891 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); 908 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
899 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 916 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
900 917
901 get_supported(mdev, eth_proto_cap, link_ksettings); 918 get_supported(mdev, eth_proto_cap, link_ksettings);
902 get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); 919 get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
920 admin_ext);
903 get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); 921 get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
904 922
905 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 923 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
997 1015
998#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) 1016#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
999 1017
1000 ext_requested = (link_ksettings->link_modes.advertising[0] > 1018 ext_requested = !!(link_ksettings->link_modes.advertising[0] >
1001 MLX5E_PTYS_EXT); 1019 MLX5E_PTYS_EXT ||
1020 link_ksettings->link_modes.advertising[1]);
1002 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 1021 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
1003 1022 ext_requested &= ext_supported;
1004 /*when ptys_extended_ethernet is set legacy link modes are deprecated */
1005 if (ext_requested != ext_supported)
1006 return -EPROTONOSUPPORT;
1007 1023
1008 speed = link_ksettings->base.speed; 1024 speed = link_ksettings->base.speed;
1009 ethtool2ptys_adver_func = ext_requested ? 1025 ethtool2ptys_adver_func = ext_requested ?
1010 mlx5e_ethtool2ptys_ext_adver_link : 1026 mlx5e_ethtool2ptys_ext_adver_link :
1011 mlx5e_ethtool2ptys_adver_link; 1027 mlx5e_ethtool2ptys_adver_link;
1012 err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); 1028 err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
1013 if (err) { 1029 if (err) {
1014 netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", 1030 netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
1015 __func__, err); 1031 __func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1037 if (!an_changes && link_modes == eproto.admin) 1053 if (!an_changes && link_modes == eproto.admin)
1038 goto out; 1054 goto out;
1039 1055
1040 mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); 1056 mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
1041 mlx5_toggle_port_link(mdev); 1057 mlx5_toggle_port_link(mdev);
1042 1058
1043out: 1059out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b4967a0ff8c7..d75dc44eb2ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
2158 return true; 2158 return true;
2159} 2159}
2160 2160
2161struct ip_ttl_word {
2162 __u8 ttl;
2163 __u8 protocol;
2164 __sum16 check;
2165};
2166
2167struct ipv6_hoplimit_word {
2168 __be16 payload_len;
2169 __u8 nexthdr;
2170 __u8 hop_limit;
2171};
2172
2173static bool is_action_keys_supported(const struct flow_action_entry *act)
2174{
2175 u32 mask, offset;
2176 u8 htype;
2177
2178 htype = act->mangle.htype;
2179 offset = act->mangle.offset;
2180 mask = ~act->mangle.mask;
2181 /* For IPv4 & IPv6 header check 4 byte word,
2182 * to determine that modified fields
2183 * are NOT ttl & hop_limit only.
2184 */
2185 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2186 struct ip_ttl_word *ttl_word =
2187 (struct ip_ttl_word *)&mask;
2188
2189 if (offset != offsetof(struct iphdr, ttl) ||
2190 ttl_word->protocol ||
2191 ttl_word->check) {
2192 return true;
2193 }
2194 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2195 struct ipv6_hoplimit_word *hoplimit_word =
2196 (struct ipv6_hoplimit_word *)&mask;
2197
2198 if (offset != offsetof(struct ipv6hdr, payload_len) ||
2199 hoplimit_word->payload_len ||
2200 hoplimit_word->nexthdr) {
2201 return true;
2202 }
2203 }
2204 return false;
2205}
2206
2161static bool modify_header_match_supported(struct mlx5_flow_spec *spec, 2207static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2162 struct flow_action *flow_action, 2208 struct flow_action *flow_action,
2163 u32 actions, 2209 u32 actions,
@@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2165{ 2211{
2166 const struct flow_action_entry *act; 2212 const struct flow_action_entry *act;
2167 bool modify_ip_header; 2213 bool modify_ip_header;
2168 u8 htype, ip_proto;
2169 void *headers_v; 2214 void *headers_v;
2170 u16 ethertype; 2215 u16 ethertype;
2216 u8 ip_proto;
2171 int i; 2217 int i;
2172 2218
2173 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) 2219 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2187 act->id != FLOW_ACTION_ADD) 2233 act->id != FLOW_ACTION_ADD)
2188 continue; 2234 continue;
2189 2235
2190 htype = act->mangle.htype; 2236 if (is_action_keys_supported(act)) {
2191 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
2192 htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2193 modify_ip_header = true; 2237 modify_ip_header = true;
2194 break; 2238 break;
2195 } 2239 }
@@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
2340 return 0; 2384 return 0;
2341} 2385}
2342 2386
2343static inline int cmp_encap_info(struct ip_tunnel_key *a, 2387struct encap_key {
2344 struct ip_tunnel_key *b) 2388 struct ip_tunnel_key *ip_tun_key;
2389 int tunnel_type;
2390};
2391
2392static inline int cmp_encap_info(struct encap_key *a,
2393 struct encap_key *b)
2345{ 2394{
2346 return memcmp(a, b, sizeof(*a)); 2395 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
2396 a->tunnel_type != b->tunnel_type;
2347} 2397}
2348 2398
2349static inline int hash_encap_info(struct ip_tunnel_key *key) 2399static inline int hash_encap_info(struct encap_key *key)
2350{ 2400{
2351 return jhash(key, sizeof(*key), 0); 2401 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
2402 key->tunnel_type);
2352} 2403}
2353 2404
2354 2405
@@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2379 struct mlx5_esw_flow_attr *attr = flow->esw_attr; 2430 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2380 struct mlx5e_tc_flow_parse_attr *parse_attr; 2431 struct mlx5e_tc_flow_parse_attr *parse_attr;
2381 struct ip_tunnel_info *tun_info; 2432 struct ip_tunnel_info *tun_info;
2382 struct ip_tunnel_key *key; 2433 struct encap_key key, e_key;
2383 struct mlx5e_encap_entry *e; 2434 struct mlx5e_encap_entry *e;
2384 unsigned short family; 2435 unsigned short family;
2385 uintptr_t hash_key; 2436 uintptr_t hash_key;
@@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2389 parse_attr = attr->parse_attr; 2440 parse_attr = attr->parse_attr;
2390 tun_info = &parse_attr->tun_info[out_index]; 2441 tun_info = &parse_attr->tun_info[out_index];
2391 family = ip_tunnel_info_af(tun_info); 2442 family = ip_tunnel_info_af(tun_info);
2392 key = &tun_info->key; 2443 key.ip_tun_key = &tun_info->key;
2444 key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
2393 2445
2394 hash_key = hash_encap_info(key); 2446 hash_key = hash_encap_info(&key);
2395 2447
2396 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, 2448 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2397 encap_hlist, hash_key) { 2449 encap_hlist, hash_key) {
2398 if (!cmp_encap_info(&e->tun_info.key, key)) { 2450 e_key.ip_tun_key = &e->tun_info.key;
2451 e_key.tunnel_type = e->tunnel_type;
2452 if (!cmp_encap_info(&e_key, &key)) {
2399 found = true; 2453 found = true;
2400 break; 2454 break;
2401 } 2455 }
@@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
2657 2711
2658 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || 2712 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2659 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { 2713 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2660 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, 2714 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
2661 parse_attr, hdrs, extack); 2715 parse_attr, hdrs, extack);
2662 if (err) 2716 if (err)
2663 return err; 2717 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ecd2c747f726..8a67fd197b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
105 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 105 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
106 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); 106 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
107 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 107 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
108 if (vport) 108 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
109 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
110 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 109 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
111 in, nic_vport_context); 110 in, nic_vport_context);
112 111
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
134 MLX5_SET(modify_esw_vport_context_in, in, opcode, 133 MLX5_SET(modify_esw_vport_context_in, in, opcode,
135 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); 134 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
136 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 135 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
137 if (vport) 136 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
138 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
139 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 137 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
140} 138}
141 139
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
431{ 429{
432 int err; 430 int err;
433 431
432 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
433
434 err = esw_create_legacy_vepa_table(esw); 434 err = esw_create_legacy_vepa_table(esw);
435 if (err) 435 if (err)
436 return err; 436 return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2157 2157
2158 /* Star rule to forward all traffic to uplink vport */ 2158 /* Star rule to forward all traffic to uplink vport */
2159 memset(spec, 0, sizeof(*spec)); 2159 memset(spec, 0, sizeof(*spec));
2160 memset(&dest, 0, sizeof(dest));
2160 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2161 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2161 dest.vport.num = MLX5_VPORT_UPLINK; 2162 dest.vport.num = MLX5_VPORT_UPLINK;
2162 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2163 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f2260391be5b..9b2d78ee22b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
1611{ 1611{
1612 int err; 1612 int err;
1613 1613
1614 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
1614 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); 1615 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1615 1616
1616 err = esw_create_offloads_fdb_tables(esw, nvports); 1617 err = esw_create_offloads_fdb_tables(esw, nvports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 5cf5f2a9d51f..8de64e88c670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
217 void *cmd; 217 void *cmd;
218 int ret; 218 int ret;
219 219
220 rcu_read_lock();
221 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
222 rcu_read_unlock();
223
224 if (!flow) {
225 WARN_ONCE(1, "Received NULL pointer for handle\n");
226 return -EINVAL;
227 }
228
220 buf = kzalloc(size, GFP_ATOMIC); 229 buf = kzalloc(size, GFP_ATOMIC);
221 if (!buf) 230 if (!buf)
222 return -ENOMEM; 231 return -ENOMEM;
223 232
224 cmd = (buf + 1); 233 cmd = (buf + 1);
225 234
226 rcu_read_lock();
227 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
228 rcu_read_unlock();
229 mlx5_fpga_tls_flow_to_cmd(flow, cmd); 235 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
230 236
231 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); 237 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
238 buf->complete = mlx_tls_kfree_complete; 244 buf->complete = mlx_tls_kfree_complete;
239 245
240 ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); 246 ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
247 if (ret < 0)
248 kfree(buf);
241 249
242 return ret; 250 return ret;
243} 251}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 70cc906a102b..76716419370d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
164 .size = 8, 164 .size = 8,
165 .limit = 4 165 .limit = 4
166 }, 166 },
167 .mr_cache[16] = {
168 .size = 8,
169 .limit = 4
170 },
171 .mr_cache[17] = {
172 .size = 8,
173 .limit = 4
174 },
175 .mr_cache[18] = {
176 .size = 8,
177 .limit = 4
178 },
179 .mr_cache[19] = {
180 .size = 4,
181 .limit = 2
182 },
183 .mr_cache[20] = {
184 .size = 4,
185 .limit = 2
186 },
187 }, 167 },
188}; 168};
189 169
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 370ca94b6775..b8ba74de9555 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -40,6 +40,9 @@
40#include "mlx5_core.h" 40#include "mlx5_core.h"
41#include "lib/eq.h" 41#include "lib/eq.h"
42 42
43static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
44 struct mlx5_core_dct *dct);
45
43static struct mlx5_core_rsc_common * 46static struct mlx5_core_rsc_common *
44mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) 47mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
45{ 48{
@@ -227,20 +230,49 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
227 wait_for_completion(&qp->common.free); 230 wait_for_completion(&qp->common.free);
228} 231}
229 232
233static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
234 struct mlx5_core_dct *dct, bool need_cleanup)
235{
236 u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
237 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
238 struct mlx5_core_qp *qp = &dct->mqp;
239 int err;
240
241 err = mlx5_core_drain_dct(dev, dct);
242 if (err) {
243 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
244 goto destroy;
245 } else {
246 mlx5_core_warn(
247 dev, "failed drain DCT 0x%x with error 0x%x\n",
248 qp->qpn, err);
249 return err;
250 }
251 }
252 wait_for_completion(&dct->drained);
253destroy:
254 if (need_cleanup)
255 destroy_resource_common(dev, &dct->mqp);
256 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
257 MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
258 MLX5_SET(destroy_dct_in, in, uid, qp->uid);
259 err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
260 (void *)&out, sizeof(out));
261 return err;
262}
263
230int mlx5_core_create_dct(struct mlx5_core_dev *dev, 264int mlx5_core_create_dct(struct mlx5_core_dev *dev,
231 struct mlx5_core_dct *dct, 265 struct mlx5_core_dct *dct,
232 u32 *in, int inlen) 266 u32 *in, int inlen,
267 u32 *out, int outlen)
233{ 268{
234 u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {0};
235 u32 din[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
236 u32 dout[MLX5_ST_SZ_DW(destroy_dct_out)] = {0};
237 struct mlx5_core_qp *qp = &dct->mqp; 269 struct mlx5_core_qp *qp = &dct->mqp;
238 int err; 270 int err;
239 271
240 init_completion(&dct->drained); 272 init_completion(&dct->drained);
241 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT); 273 MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
242 274
243 err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); 275 err = mlx5_cmd_exec(dev, in, inlen, out, outlen);
244 if (err) { 276 if (err) {
245 mlx5_core_warn(dev, "create DCT failed, ret %d\n", err); 277 mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
246 return err; 278 return err;
@@ -254,11 +286,7 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
254 286
255 return 0; 287 return 0;
256err_cmd: 288err_cmd:
257 MLX5_SET(destroy_dct_in, din, opcode, MLX5_CMD_OP_DESTROY_DCT); 289 _mlx5_core_destroy_dct(dev, dct, false);
258 MLX5_SET(destroy_dct_in, din, dctn, qp->qpn);
259 MLX5_SET(destroy_dct_in, din, uid, qp->uid);
260 mlx5_cmd_exec(dev, (void *)&in, sizeof(din),
261 (void *)&out, sizeof(dout));
262 return err; 290 return err;
263} 291}
264EXPORT_SYMBOL_GPL(mlx5_core_create_dct); 292EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
@@ -323,29 +351,7 @@ static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
323int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, 351int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
324 struct mlx5_core_dct *dct) 352 struct mlx5_core_dct *dct)
325{ 353{
326 u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0}; 354 return _mlx5_core_destroy_dct(dev, dct, true);
327 u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
328 struct mlx5_core_qp *qp = &dct->mqp;
329 int err;
330
331 err = mlx5_core_drain_dct(dev, dct);
332 if (err) {
333 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
334 goto destroy;
335 } else {
336 mlx5_core_warn(dev, "failed drain DCT 0x%x with error 0x%x\n", qp->qpn, err);
337 return err;
338 }
339 }
340 wait_for_completion(&dct->drained);
341destroy:
342 destroy_resource_common(dev, &dct->mqp);
343 MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
344 MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
345 MLX5_SET(destroy_dct_in, in, uid, qp->uid);
346 err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in),
347 (void *)&out, sizeof(out));
348 return err;
349} 355}
350EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct); 356EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
351 357
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 7a15e932ed2f..c1c1965d7acc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
113 return 0; 113 return 0;
114 default: 114 default:
115 /* Do not consider thresholds for zero temperature. */ 115 /* Do not consider thresholds for zero temperature. */
116 if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) { 116 if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) {
117 *temp = 0; 117 *temp = 0;
118 return 0; 118 return 0;
119 } 119 }
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index bd6e9014bc74..7849119d407a 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -142,6 +142,12 @@ struct ks8851_net {
142 142
143static int msg_enable; 143static int msg_enable;
144 144
145/* SPI frame opcodes */
146#define KS_SPIOP_RD (0x00)
147#define KS_SPIOP_WR (0x40)
148#define KS_SPIOP_RXFIFO (0x80)
149#define KS_SPIOP_TXFIFO (0xC0)
150
145/* shift for byte-enable data */ 151/* shift for byte-enable data */
146#define BYTE_EN(_x) ((_x) << 2) 152#define BYTE_EN(_x) ((_x) << 2)
147 153
@@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
535 /* set dma read address */ 541 /* set dma read address */
536 ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00); 542 ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
537 543
538 /* start the packet dma process, and set auto-dequeue rx */ 544 /* start DMA access */
539 ks8851_wrreg16(ks, KS_RXQCR, 545 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
540 ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
541 546
542 if (rxlen > 4) { 547 if (rxlen > 4) {
543 unsigned int rxalign; 548 unsigned int rxalign;
@@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
568 } 573 }
569 } 574 }
570 575
571 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr); 576 /* end DMA access and dequeue packet */
577 ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
572 } 578 }
573} 579}
574 580
@@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work)
785static int ks8851_net_open(struct net_device *dev) 791static int ks8851_net_open(struct net_device *dev)
786{ 792{
787 struct ks8851_net *ks = netdev_priv(dev); 793 struct ks8851_net *ks = netdev_priv(dev);
794 int ret;
795
796 ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
797 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
798 dev->name, ks);
799 if (ret < 0) {
800 netdev_err(dev, "failed to get irq\n");
801 return ret;
802 }
788 803
789 /* lock the card, even if we may not actually be doing anything 804 /* lock the card, even if we may not actually be doing anything
790 * else at the moment */ 805 * else at the moment */
@@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev)
849 netif_dbg(ks, ifup, ks->netdev, "network device up\n"); 864 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
850 865
851 mutex_unlock(&ks->lock); 866 mutex_unlock(&ks->lock);
867 mii_check_link(&ks->mii);
852 return 0; 868 return 0;
853} 869}
854 870
@@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev)
899 dev_kfree_skb(txb); 915 dev_kfree_skb(txb);
900 } 916 }
901 917
918 free_irq(dev->irq, ks);
919
902 return 0; 920 return 0;
903} 921}
904 922
@@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi)
1508 1526
1509 spi_set_drvdata(spi, ks); 1527 spi_set_drvdata(spi, ks);
1510 1528
1529 netif_carrier_off(ks->netdev);
1511 ndev->if_port = IF_PORT_100BASET; 1530 ndev->if_port = IF_PORT_100BASET;
1512 ndev->netdev_ops = &ks8851_netdev_ops; 1531 ndev->netdev_ops = &ks8851_netdev_ops;
1513 ndev->irq = spi->irq; 1532 ndev->irq = spi->irq;
@@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi)
1529 ks8851_read_selftest(ks); 1548 ks8851_read_selftest(ks);
1530 ks8851_init_mac(ks); 1549 ks8851_init_mac(ks);
1531 1550
1532 ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
1533 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
1534 ndev->name, ks);
1535 if (ret < 0) {
1536 dev_err(&spi->dev, "failed to get irq\n");
1537 goto err_irq;
1538 }
1539
1540 ret = register_netdev(ndev); 1551 ret = register_netdev(ndev);
1541 if (ret) { 1552 if (ret) {
1542 dev_err(&spi->dev, "failed to register network device\n"); 1553 dev_err(&spi->dev, "failed to register network device\n");
@@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi)
1549 1560
1550 return 0; 1561 return 0;
1551 1562
1552
1553err_netdev: 1563err_netdev:
1554 free_irq(ndev->irq, ks); 1564err_id:
1555
1556err_irq:
1557 if (gpio_is_valid(gpio)) 1565 if (gpio_is_valid(gpio))
1558 gpio_set_value(gpio, 0); 1566 gpio_set_value(gpio, 0);
1559err_id:
1560 regulator_disable(ks->vdd_reg); 1567 regulator_disable(ks->vdd_reg);
1561err_reg: 1568err_reg:
1562 regulator_disable(ks->vdd_io); 1569 regulator_disable(ks->vdd_io);
@@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi)
1574 dev_info(&spi->dev, "remove\n"); 1581 dev_info(&spi->dev, "remove\n");
1575 1582
1576 unregister_netdev(priv->netdev); 1583 unregister_netdev(priv->netdev);
1577 free_irq(spi->irq, priv);
1578 if (gpio_is_valid(priv->gpio)) 1584 if (gpio_is_valid(priv->gpio))
1579 gpio_set_value(priv->gpio, 0); 1585 gpio_set_value(priv->gpio, 0);
1580 regulator_disable(priv->vdd_reg); 1586 regulator_disable(priv->vdd_reg);
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 852256ef1f22..23da1e3ee429 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -11,9 +11,15 @@
11*/ 11*/
12 12
13#define KS_CCR 0x08 13#define KS_CCR 0x08
14#define CCR_LE (1 << 10) /* KSZ8851-16MLL */
14#define CCR_EEPROM (1 << 9) 15#define CCR_EEPROM (1 << 9)
15#define CCR_SPI (1 << 8) 16#define CCR_SPI (1 << 8) /* KSZ8851SNL */
16#define CCR_32PIN (1 << 0) 17#define CCR_8BIT (1 << 7) /* KSZ8851-16MLL */
18#define CCR_16BIT (1 << 6) /* KSZ8851-16MLL */
19#define CCR_32BIT (1 << 5) /* KSZ8851-16MLL */
20#define CCR_SHARED (1 << 4) /* KSZ8851-16MLL */
21#define CCR_48PIN (1 << 1) /* KSZ8851-16MLL */
22#define CCR_32PIN (1 << 0) /* KSZ8851SNL */
17 23
18/* MAC address registers */ 24/* MAC address registers */
19#define KS_MAR(_m) (0x15 - (_m)) 25#define KS_MAR(_m) (0x15 - (_m))
@@ -112,13 +118,13 @@
112#define RXCR1_RXE (1 << 0) 118#define RXCR1_RXE (1 << 0)
113 119
114#define KS_RXCR2 0x76 120#define KS_RXCR2 0x76
115#define RXCR2_SRDBL_MASK (0x7 << 5) 121#define RXCR2_SRDBL_MASK (0x7 << 5) /* KSZ8851SNL */
116#define RXCR2_SRDBL_SHIFT (5) 122#define RXCR2_SRDBL_SHIFT (5) /* KSZ8851SNL */
117#define RXCR2_SRDBL_4B (0x0 << 5) 123#define RXCR2_SRDBL_4B (0x0 << 5) /* KSZ8851SNL */
118#define RXCR2_SRDBL_8B (0x1 << 5) 124#define RXCR2_SRDBL_8B (0x1 << 5) /* KSZ8851SNL */
119#define RXCR2_SRDBL_16B (0x2 << 5) 125#define RXCR2_SRDBL_16B (0x2 << 5) /* KSZ8851SNL */
120#define RXCR2_SRDBL_32B (0x3 << 5) 126#define RXCR2_SRDBL_32B (0x3 << 5) /* KSZ8851SNL */
121#define RXCR2_SRDBL_FRAME (0x4 << 5) 127#define RXCR2_SRDBL_FRAME (0x4 << 5) /* KSZ8851SNL */
122#define RXCR2_IUFFP (1 << 4) 128#define RXCR2_IUFFP (1 << 4)
123#define RXCR2_RXIUFCEZ (1 << 3) 129#define RXCR2_RXIUFCEZ (1 << 3)
124#define RXCR2_UDPLFE (1 << 2) 130#define RXCR2_UDPLFE (1 << 2)
@@ -143,8 +149,10 @@
143#define RXFSHR_RXCE (1 << 0) 149#define RXFSHR_RXCE (1 << 0)
144 150
145#define KS_RXFHBCR 0x7E 151#define KS_RXFHBCR 0x7E
152#define RXFHBCR_CNT_MASK (0xfff << 0)
153
146#define KS_TXQCR 0x80 154#define KS_TXQCR 0x80
147#define TXQCR_AETFE (1 << 2) 155#define TXQCR_AETFE (1 << 2) /* KSZ8851SNL */
148#define TXQCR_TXQMAM (1 << 1) 156#define TXQCR_TXQMAM (1 << 1)
149#define TXQCR_METFE (1 << 0) 157#define TXQCR_METFE (1 << 0)
150 158
@@ -167,6 +175,10 @@
167 175
168#define KS_RXFDPR 0x86 176#define KS_RXFDPR 0x86
169#define RXFDPR_RXFPAI (1 << 14) 177#define RXFDPR_RXFPAI (1 << 14)
178#define RXFDPR_WST (1 << 12) /* KSZ8851-16MLL */
179#define RXFDPR_EMS (1 << 11) /* KSZ8851-16MLL */
180#define RXFDPR_RXFP_MASK (0x7ff << 0)
181#define RXFDPR_RXFP_SHIFT (0)
170 182
171#define KS_RXDTTR 0x8C 183#define KS_RXDTTR 0x8C
172#define KS_RXDBCTR 0x8E 184#define KS_RXDBCTR 0x8E
@@ -184,7 +196,7 @@
184#define IRQ_RXMPDI (1 << 4) 196#define IRQ_RXMPDI (1 << 4)
185#define IRQ_LDI (1 << 3) 197#define IRQ_LDI (1 << 3)
186#define IRQ_EDI (1 << 2) 198#define IRQ_EDI (1 << 2)
187#define IRQ_SPIBEI (1 << 1) 199#define IRQ_SPIBEI (1 << 1) /* KSZ8851SNL */
188#define IRQ_DEDI (1 << 0) 200#define IRQ_DEDI (1 << 0)
189 201
190#define KS_RXFCTR 0x9C 202#define KS_RXFCTR 0x9C
@@ -257,42 +269,37 @@
257#define KS_P1ANLPR 0xEE 269#define KS_P1ANLPR 0xEE
258 270
259#define KS_P1SCLMD 0xF4 271#define KS_P1SCLMD 0xF4
260#define P1SCLMD_LEDOFF (1 << 15)
261#define P1SCLMD_TXIDS (1 << 14)
262#define P1SCLMD_RESTARTAN (1 << 13)
263#define P1SCLMD_DISAUTOMDIX (1 << 10)
264#define P1SCLMD_FORCEMDIX (1 << 9)
265#define P1SCLMD_AUTONEGEN (1 << 7)
266#define P1SCLMD_FORCE100 (1 << 6)
267#define P1SCLMD_FORCEFDX (1 << 5)
268#define P1SCLMD_ADV_FLOW (1 << 4)
269#define P1SCLMD_ADV_100BT_FDX (1 << 3)
270#define P1SCLMD_ADV_100BT_HDX (1 << 2)
271#define P1SCLMD_ADV_10BT_FDX (1 << 1)
272#define P1SCLMD_ADV_10BT_HDX (1 << 0)
273 272
274#define KS_P1CR 0xF6 273#define KS_P1CR 0xF6
275#define P1CR_HP_MDIX (1 << 15) 274#define P1CR_LEDOFF (1 << 15)
276#define P1CR_REV_POL (1 << 13) 275#define P1CR_TXIDS (1 << 14)
277#define P1CR_OP_100M (1 << 10) 276#define P1CR_RESTARTAN (1 << 13)
278#define P1CR_OP_FDX (1 << 9) 277#define P1CR_DISAUTOMDIX (1 << 10)
279#define P1CR_OP_MDI (1 << 7) 278#define P1CR_FORCEMDIX (1 << 9)
280#define P1CR_AN_DONE (1 << 6) 279#define P1CR_AUTONEGEN (1 << 7)
281#define P1CR_LINK_GOOD (1 << 5) 280#define P1CR_FORCE100 (1 << 6)
282#define P1CR_PNTR_FLOW (1 << 4) 281#define P1CR_FORCEFDX (1 << 5)
283#define P1CR_PNTR_100BT_FDX (1 << 3) 282#define P1CR_ADV_FLOW (1 << 4)
284#define P1CR_PNTR_100BT_HDX (1 << 2) 283#define P1CR_ADV_100BT_FDX (1 << 3)
285#define P1CR_PNTR_10BT_FDX (1 << 1) 284#define P1CR_ADV_100BT_HDX (1 << 2)
286#define P1CR_PNTR_10BT_HDX (1 << 0) 285#define P1CR_ADV_10BT_FDX (1 << 1)
286#define P1CR_ADV_10BT_HDX (1 << 0)
287
288#define KS_P1SR 0xF8
289#define P1SR_HP_MDIX (1 << 15)
290#define P1SR_REV_POL (1 << 13)
291#define P1SR_OP_100M (1 << 10)
292#define P1SR_OP_FDX (1 << 9)
293#define P1SR_OP_MDI (1 << 7)
294#define P1SR_AN_DONE (1 << 6)
295#define P1SR_LINK_GOOD (1 << 5)
296#define P1SR_PNTR_FLOW (1 << 4)
297#define P1SR_PNTR_100BT_FDX (1 << 3)
298#define P1SR_PNTR_100BT_HDX (1 << 2)
299#define P1SR_PNTR_10BT_FDX (1 << 1)
300#define P1SR_PNTR_10BT_HDX (1 << 0)
287 301
288/* TX Frame control */ 302/* TX Frame control */
289
290#define TXFR_TXIC (1 << 15) 303#define TXFR_TXIC (1 << 15)
291#define TXFR_TXFID_MASK (0x3f << 0) 304#define TXFR_TXFID_MASK (0x3f << 0)
292#define TXFR_TXFID_SHIFT (0) 305#define TXFR_TXFID_SHIFT (0)
293
294/* SPI frame opcodes */
295#define KS_SPIOP_RD (0x00)
296#define KS_SPIOP_WR (0x40)
297#define KS_SPIOP_RXFIFO (0x80)
298#define KS_SPIOP_TXFIFO (0xC0)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index 35f8c9ef204d..c946841c0a06 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -40,6 +40,8 @@
40#include <linux/of_device.h> 40#include <linux/of_device.h>
41#include <linux/of_net.h> 41#include <linux/of_net.h>
42 42
43#include "ks8851.h"
44
43#define DRV_NAME "ks8851_mll" 45#define DRV_NAME "ks8851_mll"
44 46
45static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 }; 47static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
@@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
48#define TX_BUF_SIZE 2000 50#define TX_BUF_SIZE 2000
49#define RX_BUF_SIZE 2000 51#define RX_BUF_SIZE 2000
50 52
51#define KS_CCR 0x08
52#define CCR_EEPROM (1 << 9)
53#define CCR_SPI (1 << 8)
54#define CCR_8BIT (1 << 7)
55#define CCR_16BIT (1 << 6)
56#define CCR_32BIT (1 << 5)
57#define CCR_SHARED (1 << 4)
58#define CCR_32PIN (1 << 0)
59
60/* MAC address registers */
61#define KS_MARL 0x10
62#define KS_MARM 0x12
63#define KS_MARH 0x14
64
65#define KS_OBCR 0x20
66#define OBCR_ODS_16MA (1 << 6)
67
68#define KS_EEPCR 0x22
69#define EEPCR_EESA (1 << 4)
70#define EEPCR_EESB (1 << 3)
71#define EEPCR_EEDO (1 << 2)
72#define EEPCR_EESCK (1 << 1)
73#define EEPCR_EECS (1 << 0)
74
75#define KS_MBIR 0x24
76#define MBIR_TXMBF (1 << 12)
77#define MBIR_TXMBFA (1 << 11)
78#define MBIR_RXMBF (1 << 4)
79#define MBIR_RXMBFA (1 << 3)
80
81#define KS_GRR 0x26
82#define GRR_QMU (1 << 1)
83#define GRR_GSR (1 << 0)
84
85#define KS_WFCR 0x2A
86#define WFCR_MPRXE (1 << 7)
87#define WFCR_WF3E (1 << 3)
88#define WFCR_WF2E (1 << 2)
89#define WFCR_WF1E (1 << 1)
90#define WFCR_WF0E (1 << 0)
91
92#define KS_WF0CRC0 0x30
93#define KS_WF0CRC1 0x32
94#define KS_WF0BM0 0x34
95#define KS_WF0BM1 0x36
96#define KS_WF0BM2 0x38
97#define KS_WF0BM3 0x3A
98
99#define KS_WF1CRC0 0x40
100#define KS_WF1CRC1 0x42
101#define KS_WF1BM0 0x44
102#define KS_WF1BM1 0x46
103#define KS_WF1BM2 0x48
104#define KS_WF1BM3 0x4A
105
106#define KS_WF2CRC0 0x50
107#define KS_WF2CRC1 0x52
108#define KS_WF2BM0 0x54
109#define KS_WF2BM1 0x56
110#define KS_WF2BM2 0x58
111#define KS_WF2BM3 0x5A
112
113#define KS_WF3CRC0 0x60
114#define KS_WF3CRC1 0x62
115#define KS_WF3BM0 0x64
116#define KS_WF3BM1 0x66
117#define KS_WF3BM2 0x68
118#define KS_WF3BM3 0x6A
119
120#define KS_TXCR 0x70
121#define TXCR_TCGICMP (1 << 8)
122#define TXCR_TCGUDP (1 << 7)
123#define TXCR_TCGTCP (1 << 6)
124#define TXCR_TCGIP (1 << 5)
125#define TXCR_FTXQ (1 << 4)
126#define TXCR_TXFCE (1 << 3)
127#define TXCR_TXPE (1 << 2)
128#define TXCR_TXCRC (1 << 1)
129#define TXCR_TXE (1 << 0)
130
131#define KS_TXSR 0x72
132#define TXSR_TXLC (1 << 13)
133#define TXSR_TXMC (1 << 12)
134#define TXSR_TXFID_MASK (0x3f << 0)
135#define TXSR_TXFID_SHIFT (0)
136#define TXSR_TXFID_GET(_v) (((_v) >> 0) & 0x3f)
137
138
139#define KS_RXCR1 0x74
140#define RXCR1_FRXQ (1 << 15)
141#define RXCR1_RXUDPFCC (1 << 14)
142#define RXCR1_RXTCPFCC (1 << 13)
143#define RXCR1_RXIPFCC (1 << 12)
144#define RXCR1_RXPAFMA (1 << 11)
145#define RXCR1_RXFCE (1 << 10)
146#define RXCR1_RXEFE (1 << 9)
147#define RXCR1_RXMAFMA (1 << 8)
148#define RXCR1_RXBE (1 << 7)
149#define RXCR1_RXME (1 << 6)
150#define RXCR1_RXUE (1 << 5)
151#define RXCR1_RXAE (1 << 4)
152#define RXCR1_RXINVF (1 << 1)
153#define RXCR1_RXE (1 << 0)
154#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \ 53#define RXCR1_FILTER_MASK (RXCR1_RXINVF | RXCR1_RXAE | \
155 RXCR1_RXMAFMA | RXCR1_RXPAFMA) 54 RXCR1_RXMAFMA | RXCR1_RXPAFMA)
156
157#define KS_RXCR2 0x76
158#define RXCR2_SRDBL_MASK (0x7 << 5)
159#define RXCR2_SRDBL_SHIFT (5)
160#define RXCR2_SRDBL_4B (0x0 << 5)
161#define RXCR2_SRDBL_8B (0x1 << 5)
162#define RXCR2_SRDBL_16B (0x2 << 5)
163#define RXCR2_SRDBL_32B (0x3 << 5)
164/* #define RXCR2_SRDBL_FRAME (0x4 << 5) */
165#define RXCR2_IUFFP (1 << 4)
166#define RXCR2_RXIUFCEZ (1 << 3)
167#define RXCR2_UDPLFE (1 << 2)
168#define RXCR2_RXICMPFCC (1 << 1)
169#define RXCR2_RXSAF (1 << 0)
170
171#define KS_TXMIR 0x78
172
173#define KS_RXFHSR 0x7C
174#define RXFSHR_RXFV (1 << 15)
175#define RXFSHR_RXICMPFCS (1 << 13)
176#define RXFSHR_RXIPFCS (1 << 12)
177#define RXFSHR_RXTCPFCS (1 << 11)
178#define RXFSHR_RXUDPFCS (1 << 10)
179#define RXFSHR_RXBF (1 << 7)
180#define RXFSHR_RXMF (1 << 6)
181#define RXFSHR_RXUF (1 << 5)
182#define RXFSHR_RXMR (1 << 4)
183#define RXFSHR_RXFT (1 << 3)
184#define RXFSHR_RXFTL (1 << 2)
185#define RXFSHR_RXRF (1 << 1)
186#define RXFSHR_RXCE (1 << 0)
187#define RXFSHR_ERR (RXFSHR_RXCE | RXFSHR_RXRF |\
188 RXFSHR_RXFTL | RXFSHR_RXMR |\
189 RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
190 RXFSHR_RXTCPFCS)
191#define KS_RXFHBCR 0x7E
192#define RXFHBCR_CNT_MASK 0x0FFF
193
194#define KS_TXQCR 0x80
195#define TXQCR_AETFE (1 << 2)
196#define TXQCR_TXQMAM (1 << 1)
197#define TXQCR_METFE (1 << 0)
198
199#define KS_RXQCR 0x82
200#define RXQCR_RXDTTS (1 << 12)
201#define RXQCR_RXDBCTS (1 << 11)
202#define RXQCR_RXFCTS (1 << 10)
203#define RXQCR_RXIPHTOE (1 << 9)
204#define RXQCR_RXDTTE (1 << 7)
205#define RXQCR_RXDBCTE (1 << 6)
206#define RXQCR_RXFCTE (1 << 5)
207#define RXQCR_ADRFE (1 << 4)
208#define RXQCR_SDA (1 << 3)
209#define RXQCR_RRXEF (1 << 0)
210#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE) 55#define RXQCR_CMD_CNTL (RXQCR_RXFCTE|RXQCR_ADRFE)
211 56
212#define KS_TXFDPR 0x84
213#define TXFDPR_TXFPAI (1 << 14)
214#define TXFDPR_TXFP_MASK (0x7ff << 0)
215#define TXFDPR_TXFP_SHIFT (0)
216
217#define KS_RXFDPR 0x86
218#define RXFDPR_RXFPAI (1 << 14)
219
220#define KS_RXDTTR 0x8C
221#define KS_RXDBCTR 0x8E
222
223#define KS_IER 0x90
224#define KS_ISR 0x92
225#define IRQ_LCI (1 << 15)
226#define IRQ_TXI (1 << 14)
227#define IRQ_RXI (1 << 13)
228#define IRQ_RXOI (1 << 11)
229#define IRQ_TXPSI (1 << 9)
230#define IRQ_RXPSI (1 << 8)
231#define IRQ_TXSAI (1 << 6)
232#define IRQ_RXWFDI (1 << 5)
233#define IRQ_RXMPDI (1 << 4)
234#define IRQ_LDI (1 << 3)
235#define IRQ_EDI (1 << 2)
236#define IRQ_SPIBEI (1 << 1)
237#define IRQ_DEDI (1 << 0)
238
239#define KS_RXFCTR 0x9C
240#define RXFCTR_THRESHOLD_MASK 0x00FF
241
242#define KS_RXFC 0x9D
243#define RXFCTR_RXFC_MASK (0xff << 8)
244#define RXFCTR_RXFC_SHIFT (8)
245#define RXFCTR_RXFC_GET(_v) (((_v) >> 8) & 0xff)
246#define RXFCTR_RXFCT_MASK (0xff << 0)
247#define RXFCTR_RXFCT_SHIFT (0)
248
249#define KS_TXNTFSR 0x9E
250
251#define KS_MAHTR0 0xA0
252#define KS_MAHTR1 0xA2
253#define KS_MAHTR2 0xA4
254#define KS_MAHTR3 0xA6
255
256#define KS_FCLWR 0xB0
257#define KS_FCHWR 0xB2
258#define KS_FCOWR 0xB4
259
260#define KS_CIDER 0xC0
261#define CIDER_ID 0x8870
262#define CIDER_REV_MASK (0x7 << 1)
263#define CIDER_REV_SHIFT (1)
264#define CIDER_REV_GET(_v) (((_v) >> 1) & 0x7)
265
266#define KS_CGCR 0xC6
267#define KS_IACR 0xC8
268#define IACR_RDEN (1 << 12)
269#define IACR_TSEL_MASK (0x3 << 10)
270#define IACR_TSEL_SHIFT (10)
271#define IACR_TSEL_MIB (0x3 << 10)
272#define IACR_ADDR_MASK (0x1f << 0)
273#define IACR_ADDR_SHIFT (0)
274
275#define KS_IADLR 0xD0
276#define KS_IAHDR 0xD2
277
278#define KS_PMECR 0xD4
279#define PMECR_PME_DELAY (1 << 14)
280#define PMECR_PME_POL (1 << 12)
281#define PMECR_WOL_WAKEUP (1 << 11)
282#define PMECR_WOL_MAGICPKT (1 << 10)
283#define PMECR_WOL_LINKUP (1 << 9)
284#define PMECR_WOL_ENERGY (1 << 8)
285#define PMECR_AUTO_WAKE_EN (1 << 7)
286#define PMECR_WAKEUP_NORMAL (1 << 6)
287#define PMECR_WKEVT_MASK (0xf << 2)
288#define PMECR_WKEVT_SHIFT (2)
289#define PMECR_WKEVT_GET(_v) (((_v) >> 2) & 0xf)
290#define PMECR_WKEVT_ENERGY (0x1 << 2)
291#define PMECR_WKEVT_LINK (0x2 << 2)
292#define PMECR_WKEVT_MAGICPKT (0x4 << 2)
293#define PMECR_WKEVT_FRAME (0x8 << 2)
294#define PMECR_PM_MASK (0x3 << 0)
295#define PMECR_PM_SHIFT (0)
296#define PMECR_PM_NORMAL (0x0 << 0)
297#define PMECR_PM_ENERGY (0x1 << 0)
298#define PMECR_PM_SOFTDOWN (0x2 << 0)
299#define PMECR_PM_POWERSAVE (0x3 << 0)
300
301/* Standard MII PHY data */
302#define KS_P1MBCR 0xE4
303#define P1MBCR_FORCE_FDX (1 << 8)
304
305#define KS_P1MBSR 0xE6
306#define P1MBSR_AN_COMPLETE (1 << 5)
307#define P1MBSR_AN_CAPABLE (1 << 3)
308#define P1MBSR_LINK_UP (1 << 2)
309
310#define KS_PHY1ILR 0xE8
311#define KS_PHY1IHR 0xEA
312#define KS_P1ANAR 0xEC
313#define KS_P1ANLPR 0xEE
314
315#define KS_P1SCLMD 0xF4
316#define P1SCLMD_LEDOFF (1 << 15)
317#define P1SCLMD_TXIDS (1 << 14)
318#define P1SCLMD_RESTARTAN (1 << 13)
319#define P1SCLMD_DISAUTOMDIX (1 << 10)
320#define P1SCLMD_FORCEMDIX (1 << 9)
321#define P1SCLMD_AUTONEGEN (1 << 7)
322#define P1SCLMD_FORCE100 (1 << 6)
323#define P1SCLMD_FORCEFDX (1 << 5)
324#define P1SCLMD_ADV_FLOW (1 << 4)
325#define P1SCLMD_ADV_100BT_FDX (1 << 3)
326#define P1SCLMD_ADV_100BT_HDX (1 << 2)
327#define P1SCLMD_ADV_10BT_FDX (1 << 1)
328#define P1SCLMD_ADV_10BT_HDX (1 << 0)
329
330#define KS_P1CR 0xF6
331#define P1CR_HP_MDIX (1 << 15)
332#define P1CR_REV_POL (1 << 13)
333#define P1CR_OP_100M (1 << 10)
334#define P1CR_OP_FDX (1 << 9)
335#define P1CR_OP_MDI (1 << 7)
336#define P1CR_AN_DONE (1 << 6)
337#define P1CR_LINK_GOOD (1 << 5)
338#define P1CR_PNTR_FLOW (1 << 4)
339#define P1CR_PNTR_100BT_FDX (1 << 3)
340#define P1CR_PNTR_100BT_HDX (1 << 2)
341#define P1CR_PNTR_10BT_FDX (1 << 1)
342#define P1CR_PNTR_10BT_HDX (1 << 0)
343
344/* TX Frame control */
345
346#define TXFR_TXIC (1 << 15)
347#define TXFR_TXFID_MASK (0x3f << 0)
348#define TXFR_TXFID_SHIFT (0)
349
350#define KS_P1SR 0xF8
351#define P1SR_HP_MDIX (1 << 15)
352#define P1SR_REV_POL (1 << 13)
353#define P1SR_OP_100M (1 << 10)
354#define P1SR_OP_FDX (1 << 9)
355#define P1SR_OP_MDI (1 << 7)
356#define P1SR_AN_DONE (1 << 6)
357#define P1SR_LINK_GOOD (1 << 5)
358#define P1SR_PNTR_FLOW (1 << 4)
359#define P1SR_PNTR_100BT_FDX (1 << 3)
360#define P1SR_PNTR_100BT_HDX (1 << 2)
361#define P1SR_PNTR_10BT_FDX (1 << 1)
362#define P1SR_PNTR_10BT_HDX (1 << 0)
363
364#define ENUM_BUS_NONE 0 57#define ENUM_BUS_NONE 0
365#define ENUM_BUS_8BIT 1 58#define ENUM_BUS_8BIT 1
366#define ENUM_BUS_16BIT 2 59#define ENUM_BUS_16BIT 2
@@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks)
1475 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI); 1168 ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
1476 1169
1477 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */ 1170 /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
1478 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK); 1171 ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
1479 1172
1480 /* Setup RxQ Command Control (RXQCR) */ 1173 /* Setup RxQ Command Control (RXQCR) */
1481 ks->rc_rxqcr = RXQCR_CMD_CNTL; 1174 ks->rc_rxqcr = RXQCR_CMD_CNTL;
@@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks)
1488 */ 1181 */
1489 1182
1490 w = ks_rdreg16(ks, KS_P1MBCR); 1183 w = ks_rdreg16(ks, KS_P1MBCR);
1491 w &= ~P1MBCR_FORCE_FDX; 1184 w &= ~BMCR_FULLDPLX;
1492 ks_wrreg16(ks, KS_P1MBCR, w); 1185 ks_wrreg16(ks, KS_P1MBCR, w);
1493 1186
1494 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP; 1187 w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
@@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev)
1629 ks_setup_int(ks); 1322 ks_setup_int(ks);
1630 1323
1631 data = ks_rdreg16(ks, KS_OBCR); 1324 data = ks_rdreg16(ks, KS_OBCR);
1632 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA); 1325 ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
1633 1326
1634 /* overwriting the default MAC address */ 1327 /* overwriting the default MAC address */
1635 if (pdev->dev.of_node) { 1328 if (pdev->dev.of_node) {
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index eeda4ed98333..e336f6ee94f5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
48 48
49 tmp_push_vlan_tci = 49 tmp_push_vlan_tci =
50 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | 50 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
51 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | 51 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
52 NFP_FL_PUSH_VLAN_CFI;
53 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); 52 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
54} 53}
55 54
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4fcaf11ed56e..0ed51e79db00 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,7 +26,7 @@
26#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) 26#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
27 27
28#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) 28#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
29#define NFP_FLOWER_MASK_VLAN_CFI BIT(12) 29#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
30#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) 30#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
31 31
32#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) 32#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
@@ -82,7 +82,6 @@
82#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) 82#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
83 83
84#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) 84#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
85#define NFP_FL_PUSH_VLAN_CFI BIT(12)
86#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) 85#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
87 86
88#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) 87#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e03c8ef2c28c..9b8b843d0340 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
30 30
31 flow_rule_match_vlan(rule, &match); 31 flow_rule_match_vlan(rule, &match);
32 /* Populate the tci field. */ 32 /* Populate the tci field. */
33 if (match.key->vlan_id || match.key->vlan_priority) { 33 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
34 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 34 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
35 match.key->vlan_priority) | 35 match.key->vlan_priority) |
36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
37 match.key->vlan_id) | 37 match.key->vlan_id);
38 NFP_FLOWER_MASK_VLAN_CFI; 38 ext->tci = cpu_to_be16(tmp_tci);
39 ext->tci = cpu_to_be16(tmp_tci); 39
40 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 40 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
41 match.mask->vlan_priority) | 41 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
42 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 42 match.mask->vlan_priority) |
43 match.mask->vlan_id) | 43 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
44 NFP_FLOWER_MASK_VLAN_CFI; 44 match.mask->vlan_id);
45 msk->tci = cpu_to_be16(tmp_tci); 45 msk->tci = cpu_to_be16(tmp_tci);
46 }
47 } 46 }
48} 47}
49 48
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d2c803bb4e56..94d228c04496 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
195 ret = dev_queue_xmit(skb); 195 ret = dev_queue_xmit(skb);
196 nfp_repr_inc_tx_stats(netdev, len, ret); 196 nfp_repr_inc_tx_stats(netdev, len, ret);
197 197
198 return ret; 198 return NETDEV_TX_OK;
199} 199}
200 200
201static int nfp_repr_stop(struct net_device *netdev) 201static int nfp_repr_stop(struct net_device *netdev)
@@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
383 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 383 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
384 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; 384 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
385 385
386 netdev->priv_flags |= IFF_NO_QUEUE; 386 netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
387 netdev->features |= NETIF_F_LLTX; 387 netdev->features |= NETIF_F_LLTX;
388 388
389 if (nfp_app_has_tc(app)) { 389 if (nfp_app_has_tc(app)) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 3b0adda7cc9c..a4cd6f2cfb86 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
1048 1048
1049 for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) { 1049 for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
1050 skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE); 1050 skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
1051 if (!skb)
1052 break;
1051 qlcnic_create_loopback_buff(skb->data, adapter->mac_addr); 1053 qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
1052 skb_put(skb, QLCNIC_ILB_PKT_SIZE); 1054 skb_put(skb, QLCNIC_ILB_PKT_SIZE);
1053 adapter->ahw->diag_cnt = 0; 1055 adapter->ahw->diag_cnt = 0;
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index cfb67b746595..58e0ca9093d3 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -482,7 +482,7 @@ static void hardware_init(struct net_device *dev)
482 write_reg_high(ioaddr, IMR, ISRh_RxErr); 482 write_reg_high(ioaddr, IMR, ISRh_RxErr);
483 483
484 lp->tx_unit_busy = 0; 484 lp->tx_unit_busy = 0;
485 lp->pac_cnt_in_tx_buf = 0; 485 lp->pac_cnt_in_tx_buf = 0;
486 lp->saved_tx_size = 0; 486 lp->saved_tx_size = 0;
487} 487}
488 488
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c29dde064078..19efa88f3f02 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -678,6 +678,7 @@ struct rtl8169_private {
678 struct work_struct work; 678 struct work_struct work;
679 } wk; 679 } wk;
680 680
681 unsigned irq_enabled:1;
681 unsigned supports_gmii:1; 682 unsigned supports_gmii:1;
682 dma_addr_t counters_phys_addr; 683 dma_addr_t counters_phys_addr;
683 struct rtl8169_counters *counters; 684 struct rtl8169_counters *counters;
@@ -1293,6 +1294,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1293static void rtl_irq_disable(struct rtl8169_private *tp) 1294static void rtl_irq_disable(struct rtl8169_private *tp)
1294{ 1295{
1295 RTL_W16(tp, IntrMask, 0); 1296 RTL_W16(tp, IntrMask, 0);
1297 tp->irq_enabled = 0;
1296} 1298}
1297 1299
1298#define RTL_EVENT_NAPI_RX (RxOK | RxErr) 1300#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
@@ -1301,6 +1303,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
1301 1303
1302static void rtl_irq_enable(struct rtl8169_private *tp) 1304static void rtl_irq_enable(struct rtl8169_private *tp)
1303{ 1305{
1306 tp->irq_enabled = 1;
1304 RTL_W16(tp, IntrMask, tp->irq_mask); 1307 RTL_W16(tp, IntrMask, tp->irq_mask);
1305} 1308}
1306 1309
@@ -5457,7 +5460,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
5457 tp->cp_cmd |= PktCntrDisable | INTT_1; 5460 tp->cp_cmd |= PktCntrDisable | INTT_1;
5458 RTL_W16(tp, CPlusCmd, tp->cp_cmd); 5461 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5459 5462
5460 RTL_W16(tp, IntrMitigate, 0x5151); 5463 RTL_W16(tp, IntrMitigate, 0x5100);
5461 5464
5462 /* Work around for RxFIFO overflow. */ 5465 /* Work around for RxFIFO overflow. */
5463 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 5466 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
@@ -6520,9 +6523,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6520{ 6523{
6521 struct rtl8169_private *tp = dev_instance; 6524 struct rtl8169_private *tp = dev_instance;
6522 u16 status = RTL_R16(tp, IntrStatus); 6525 u16 status = RTL_R16(tp, IntrStatus);
6523 u16 irq_mask = RTL_R16(tp, IntrMask);
6524 6526
6525 if (status == 0xffff || !(status & irq_mask)) 6527 if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
6526 return IRQ_NONE; 6528 return IRQ_NONE;
6527 6529
6528 if (unlikely(status & SYSErr)) { 6530 if (unlikely(status & SYSErr)) {
@@ -6540,7 +6542,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6540 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); 6542 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6541 } 6543 }
6542 6544
6543 if (status & RTL_EVENT_NAPI) { 6545 if (status & (RTL_EVENT_NAPI | LinkChg)) {
6544 rtl_irq_disable(tp); 6546 rtl_irq_disable(tp);
6545 napi_schedule_irqoff(&tp->napi); 6547 napi_schedule_irqoff(&tp->napi);
6546 } 6548 }
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 6073387511f8..67f9bb6e941b 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -730,10 +730,10 @@ static u16 sis900_default_phy(struct net_device * net_dev)
730 status = mdio_read(net_dev, phy->phy_addr, MII_STATUS); 730 status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
731 731
732 /* Link ON & Not select default PHY & not ghost PHY */ 732 /* Link ON & Not select default PHY & not ghost PHY */
733 if ((status & MII_STAT_LINK) && !default_phy && 733 if ((status & MII_STAT_LINK) && !default_phy &&
734 (phy->phy_types != UNKNOWN)) 734 (phy->phy_types != UNKNOWN)) {
735 default_phy = phy; 735 default_phy = phy;
736 else { 736 } else {
737 status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL); 737 status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
738 mdio_write(net_dev, phy->phy_addr, MII_CONTROL, 738 mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
739 status | MII_CNTL_AUTO | MII_CNTL_ISOLATE); 739 status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
@@ -741,7 +741,7 @@ static u16 sis900_default_phy(struct net_device * net_dev)
741 phy_home = phy; 741 phy_home = phy;
742 else if(phy->phy_types == LAN) 742 else if(phy->phy_types == LAN)
743 phy_lan = phy; 743 phy_lan = phy;
744 } 744 }
745 } 745 }
746 746
747 if (!default_phy && phy_home) 747 if (!default_phy && phy_home)
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40d6356a7e73..3dfb07a78952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -29,11 +29,13 @@
29/* Specific functions used for Ring mode */ 29/* Specific functions used for Ring mode */
30 30
31/* Enhanced descriptors */ 31/* Enhanced descriptors */
32static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) 32static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
33 int bfsize)
33{ 34{
34 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB 35 if (bfsize == BUF_SIZE_16KiB)
35 << ERDES1_BUFFER2_SIZE_SHIFT) 36 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
36 & ERDES1_BUFFER2_SIZE_MASK); 37 << ERDES1_BUFFER2_SIZE_SHIFT)
38 & ERDES1_BUFFER2_SIZE_MASK);
37 39
38 if (end) 40 if (end)
39 p->des1 |= cpu_to_le32(ERDES1_END_RING); 41 p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
59} 61}
60 62
61/* Normal descriptors */ 63/* Normal descriptors */
62static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) 64static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
63{ 65{
64 p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) 66 if (bfsize >= BUF_SIZE_2KiB) {
65 << RDES1_BUFFER2_SIZE_SHIFT) 67 int bfsize2;
66 & RDES1_BUFFER2_SIZE_MASK); 68
69 bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
70 p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
71 & RDES1_BUFFER2_SIZE_MASK);
72 }
67 73
68 if (end) 74 if (end)
69 p->des1 |= cpu_to_le32(RDES1_END_RING); 75 p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7fbb6a4dbf51..e061e9f5fad7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -296,7 +296,7 @@ exit:
296} 296}
297 297
298static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 298static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
299 int mode, int end) 299 int mode, int end, int bfsize)
300{ 300{
301 dwmac4_set_rx_owner(p, disable_rx_ic); 301 dwmac4_set_rx_owner(p, disable_rx_ic);
302} 302}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 1d858fdec997..98fa471da7c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
123} 123}
124 124
125static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 125static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
126 int mode, int end) 126 int mode, int end, int bfsize)
127{ 127{
128 dwxgmac2_set_rx_owner(p, disable_rx_ic); 128 dwxgmac2_set_rx_owner(p, disable_rx_ic);
129} 129}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 5ef91a790f9d..5202d6ad7919 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
201 if (unlikely(rdes0 & RDES0_OWN)) 201 if (unlikely(rdes0 & RDES0_OWN))
202 return dma_own; 202 return dma_own;
203 203
204 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
205 stats->rx_length_errors++;
206 return discard_frame;
207 }
208
204 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { 209 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
205 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { 210 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
206 x->rx_desc++; 211 x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
231 * It doesn't match with the information reported into the databook. 236 * It doesn't match with the information reported into the databook.
232 * At any rate, we need to understand if the CSUM hw computation is ok 237 * At any rate, we need to understand if the CSUM hw computation is ok
233 * and report this info to the upper layers. */ 238 * and report this info to the upper layers. */
234 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), 239 if (likely(ret == good_frame))
235 !!(rdes0 & RDES0_FRAME_TYPE), 240 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
236 !!(rdes0 & ERDES0_RX_MAC_ADDR)); 241 !!(rdes0 & RDES0_FRAME_TYPE),
242 !!(rdes0 & ERDES0_RX_MAC_ADDR));
237 243
238 if (unlikely(rdes0 & RDES0_DRIBBLING)) 244 if (unlikely(rdes0 & RDES0_DRIBBLING))
239 x->dribbling_bit++; 245 x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
259} 265}
260 266
261static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 267static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
262 int mode, int end) 268 int mode, int end, int bfsize)
263{ 269{
270 int bfsize1;
271
264 p->des0 |= cpu_to_le32(RDES0_OWN); 272 p->des0 |= cpu_to_le32(RDES0_OWN);
265 p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); 273
274 bfsize1 = min(bfsize, BUF_SIZE_8KiB);
275 p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
266 276
267 if (mode == STMMAC_CHAIN_MODE) 277 if (mode == STMMAC_CHAIN_MODE)
268 ehn_desc_rx_set_on_chain(p); 278 ehn_desc_rx_set_on_chain(p);
269 else 279 else
270 ehn_desc_rx_set_on_ring(p, end); 280 ehn_desc_rx_set_on_ring(p, end, bfsize);
271 281
272 if (disable_rx_ic) 282 if (disable_rx_ic)
273 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); 283 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 92b8944f26e3..5bb00234d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -33,7 +33,7 @@ struct dma_extended_desc;
33struct stmmac_desc_ops { 33struct stmmac_desc_ops {
34 /* DMA RX descriptor ring initialization */ 34 /* DMA RX descriptor ring initialization */
35 void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, 35 void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
36 int end); 36 int end, int bfsize);
37 /* DMA TX descriptor ring initialization */ 37 /* DMA TX descriptor ring initialization */
38 void (*init_tx_desc)(struct dma_desc *p, int mode, int end); 38 void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
39 /* Invoked by the xmit function to prepare the tx descriptor */ 39 /* Invoked by the xmit function to prepare the tx descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index de65bb29feba..b7dd4e3c760d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
91 return dma_own; 91 return dma_own;
92 92
93 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { 93 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
94 pr_warn("%s: Oversized frame spanned multiple buffers\n",
95 __func__);
96 stats->rx_length_errors++; 94 stats->rx_length_errors++;
97 return discard_frame; 95 return discard_frame;
98 } 96 }
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
135} 133}
136 134
137static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, 135static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
138 int end) 136 int end, int bfsize)
139{ 137{
138 int bfsize1;
139
140 p->des0 |= cpu_to_le32(RDES0_OWN); 140 p->des0 |= cpu_to_le32(RDES0_OWN);
141 p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); 141
142 bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
143 p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
142 144
143 if (mode == STMMAC_CHAIN_MODE) 145 if (mode == STMMAC_CHAIN_MODE)
144 ndesc_rx_set_on_chain(p, end); 146 ndesc_rx_set_on_chain(p, end);
145 else 147 else
146 ndesc_rx_set_on_ring(p, end); 148 ndesc_rx_set_on_ring(p, end, bfsize);
147 149
148 if (disable_rx_ic) 150 if (disable_rx_ic)
149 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); 151 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index d8c5bc412219..4d9bcb4d0378 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
59 59
60 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 60 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
61 stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, 61 stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
62 STMMAC_RING_MODE, 1, false, skb->len); 62 STMMAC_RING_MODE, 0, false, skb->len);
63 tx_q->tx_skbuff[entry] = NULL; 63 tx_q->tx_skbuff[entry] = NULL;
64 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE); 64 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
65 65
@@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
79 79
80 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 80 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
81 stmmac_prepare_tx_desc(priv, desc, 0, len, csum, 81 stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
82 STMMAC_RING_MODE, 1, true, skb->len); 82 STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
83 skb->len);
83 } else { 84 } else {
84 des2 = dma_map_single(priv->device, skb->data, 85 des2 = dma_map_single(priv->device, skb->data,
85 nopaged_len, DMA_TO_DEVICE); 86 nopaged_len, DMA_TO_DEVICE);
@@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
91 tx_q->tx_skbuff_dma[entry].is_jumbo = true; 92 tx_q->tx_skbuff_dma[entry].is_jumbo = true;
92 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB); 93 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
93 stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum, 94 stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
94 STMMAC_RING_MODE, 1, true, skb->len); 95 STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
96 skb->len);
95 } 97 }
96 98
97 tx_q->cur_tx = entry; 99 tx_q->cur_tx = entry;
@@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
111 113
112static void refill_desc3(void *priv_ptr, struct dma_desc *p) 114static void refill_desc3(void *priv_ptr, struct dma_desc *p)
113{ 115{
114 struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; 116 struct stmmac_rx_queue *rx_q = priv_ptr;
117 struct stmmac_priv *priv = rx_q->priv_data;
115 118
116 /* Fill DES3 in case of RING mode */ 119 /* Fill DES3 in case of RING mode */
117 if (priv->dma_buf_sz >= BUF_SIZE_8KiB) 120 if (priv->dma_buf_sz == BUF_SIZE_16KiB)
118 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB); 121 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
119} 122}
120 123
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 97c5e1aad88f..a26e36dbb5df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1136 if (priv->extend_desc) 1136 if (priv->extend_desc)
1137 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1137 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1138 priv->use_riwt, priv->mode, 1138 priv->use_riwt, priv->mode,
1139 (i == DMA_RX_SIZE - 1)); 1139 (i == DMA_RX_SIZE - 1),
1140 priv->dma_buf_sz);
1140 else 1141 else
1141 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1142 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1142 priv->use_riwt, priv->mode, 1143 priv->use_riwt, priv->mode,
1143 (i == DMA_RX_SIZE - 1)); 1144 (i == DMA_RX_SIZE - 1),
1145 priv->dma_buf_sz);
1144} 1146}
1145 1147
1146/** 1148/**
@@ -3216,14 +3218,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3216 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len, 3218 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
3217 csum_insertion, priv->mode, 1, last_segment, 3219 csum_insertion, priv->mode, 1, last_segment,
3218 skb->len); 3220 skb->len);
3219 3221 } else {
3220 /* The own bit must be the latest setting done when prepare the 3222 stmmac_set_tx_owner(priv, first);
3221 * descriptor and then barrier is needed to make sure that
3222 * all is coherent before granting the DMA engine.
3223 */
3224 wmb();
3225 } 3223 }
3226 3224
3225 /* The own bit must be the latest setting done when prepare the
3226 * descriptor and then barrier is needed to make sure that
3227 * all is coherent before granting the DMA engine.
3228 */
3229 wmb();
3230
3227 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); 3231 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
3228 3232
3229 stmmac_enable_dma_transmission(priv, priv->ioaddr); 3233 stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -3350,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3350{ 3354{
3351 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3355 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3352 struct stmmac_channel *ch = &priv->channel[queue]; 3356 struct stmmac_channel *ch = &priv->channel[queue];
3353 unsigned int entry = rx_q->cur_rx; 3357 unsigned int next_entry = rx_q->cur_rx;
3354 int coe = priv->hw->rx_csum; 3358 int coe = priv->hw->rx_csum;
3355 unsigned int next_entry;
3356 unsigned int count = 0; 3359 unsigned int count = 0;
3357 bool xmac; 3360 bool xmac;
3358 3361
@@ -3370,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3370 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); 3373 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3371 } 3374 }
3372 while (count < limit) { 3375 while (count < limit) {
3373 int status; 3376 int entry, status;
3374 struct dma_desc *p; 3377 struct dma_desc *p;
3375 struct dma_desc *np; 3378 struct dma_desc *np;
3376 3379
3380 entry = next_entry;
3381
3377 if (priv->extend_desc) 3382 if (priv->extend_desc)
3378 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3383 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3379 else 3384 else
@@ -3429,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3429 * ignored 3434 * ignored
3430 */ 3435 */
3431 if (frame_len > priv->dma_buf_sz) { 3436 if (frame_len > priv->dma_buf_sz) {
3432 netdev_err(priv->dev, 3437 if (net_ratelimit())
3433 "len %d larger than size (%d)\n", 3438 netdev_err(priv->dev,
3434 frame_len, priv->dma_buf_sz); 3439 "len %d larger than size (%d)\n",
3440 frame_len, priv->dma_buf_sz);
3435 priv->dev->stats.rx_length_errors++; 3441 priv->dev->stats.rx_length_errors++;
3436 break; 3442 continue;
3437 } 3443 }
3438 3444
3439 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3445 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3468,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3468 dev_warn(priv->device, 3474 dev_warn(priv->device,
3469 "packet dropped\n"); 3475 "packet dropped\n");
3470 priv->dev->stats.rx_dropped++; 3476 priv->dev->stats.rx_dropped++;
3471 break; 3477 continue;
3472 } 3478 }
3473 3479
3474 dma_sync_single_for_cpu(priv->device, 3480 dma_sync_single_for_cpu(priv->device,
@@ -3488,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3488 } else { 3494 } else {
3489 skb = rx_q->rx_skbuff[entry]; 3495 skb = rx_q->rx_skbuff[entry];
3490 if (unlikely(!skb)) { 3496 if (unlikely(!skb)) {
3491 netdev_err(priv->dev, 3497 if (net_ratelimit())
3492 "%s: Inconsistent Rx chain\n", 3498 netdev_err(priv->dev,
3493 priv->dev->name); 3499 "%s: Inconsistent Rx chain\n",
3500 priv->dev->name);
3494 priv->dev->stats.rx_dropped++; 3501 priv->dev->stats.rx_dropped++;
3495 break; 3502 continue;
3496 } 3503 }
3497 prefetch(skb->data - NET_IP_ALIGN); 3504 prefetch(skb->data - NET_IP_ALIGN);
3498 rx_q->rx_skbuff[entry] = NULL; 3505 rx_q->rx_skbuff[entry] = NULL;
@@ -3527,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3527 priv->dev->stats.rx_packets++; 3534 priv->dev->stats.rx_packets++;
3528 priv->dev->stats.rx_bytes += frame_len; 3535 priv->dev->stats.rx_bytes += frame_len;
3529 } 3536 }
3530 entry = next_entry;
3531 } 3537 }
3532 3538
3533 stmmac_rx_refill(priv, queue); 3539 stmmac_rx_refill(priv, queue);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 5174d318901e..0a920c5936b2 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
3657 3657
3658 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, 3658 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
3659 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); 3659 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
3660 if (ret) 3660 if (ret) {
3661 of_node_put(interfaces);
3661 return ret; 3662 return ret;
3663 }
3662 3664
3663 ret = netcp_txpipe_open(&gbe_dev->tx_pipe); 3665 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
3664 if (ret) 3666 if (ret) {
3667 of_node_put(interfaces);
3665 return ret; 3668 return ret;
3669 }
3666 3670
3667 /* Create network interfaces */ 3671 /* Create network interfaces */
3668 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); 3672 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index ec7e7ec24ff9..4041c75997ba 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev)
1575 ret = of_address_to_resource(np, 0, &dmares); 1575 ret = of_address_to_resource(np, 0, &dmares);
1576 if (ret) { 1576 if (ret) {
1577 dev_err(&pdev->dev, "unable to get DMA resource\n"); 1577 dev_err(&pdev->dev, "unable to get DMA resource\n");
1578 of_node_put(np);
1578 goto free_netdev; 1579 goto free_netdev;
1579 } 1580 }
1580 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares); 1581 lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
1581 if (IS_ERR(lp->dma_regs)) { 1582 if (IS_ERR(lp->dma_regs)) {
1582 dev_err(&pdev->dev, "could not map DMA regs\n"); 1583 dev_err(&pdev->dev, "could not map DMA regs\n");
1583 ret = PTR_ERR(lp->dma_regs); 1584 ret = PTR_ERR(lp->dma_regs);
1585 of_node_put(np);
1584 goto free_netdev; 1586 goto free_netdev;
1585 } 1587 }
1586 lp->rx_irq = irq_of_parse_and_map(np, 1); 1588 lp->rx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e859ae2e42d5..49f41b64077b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -987,6 +987,7 @@ struct netvsc_device {
987 987
988 wait_queue_head_t wait_drain; 988 wait_queue_head_t wait_drain;
989 bool destroy; 989 bool destroy;
990 bool tx_disable; /* if true, do not wake up queue again */
990 991
991 /* Receive buffer allocated by us but manages by NetVSP */ 992 /* Receive buffer allocated by us but manages by NetVSP */
992 void *recv_buf; 993 void *recv_buf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 813d195bbd57..e0dce373cdd9 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
110 110
111 init_waitqueue_head(&net_device->wait_drain); 111 init_waitqueue_head(&net_device->wait_drain);
112 net_device->destroy = false; 112 net_device->destroy = false;
113 net_device->tx_disable = false;
113 114
114 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 115 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
115 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 116 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
719 } else { 720 } else {
720 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); 721 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
721 722
722 if (netif_tx_queue_stopped(txq) && 723 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
723 (hv_get_avail_to_write_percent(&channel->outbound) > 724 (hv_get_avail_to_write_percent(&channel->outbound) >
724 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { 725 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
725 netif_tx_wake_queue(txq); 726 netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
874 } else if (ret == -EAGAIN) { 875 } else if (ret == -EAGAIN) {
875 netif_tx_stop_queue(txq); 876 netif_tx_stop_queue(txq);
876 ndev_ctx->eth_stats.stop_queue++; 877 ndev_ctx->eth_stats.stop_queue++;
877 if (atomic_read(&nvchan->queue_sends) < 1) { 878 if (atomic_read(&nvchan->queue_sends) < 1 &&
879 !net_device->tx_disable) {
878 netif_tx_wake_queue(txq); 880 netif_tx_wake_queue(txq);
879 ndev_ctx->eth_stats.wake_queue++; 881 ndev_ctx->eth_stats.wake_queue++;
880 ret = -ENOSPC; 882 ret = -ENOSPC;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf4897043e83..b20fb0fb595b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
109 rcu_read_unlock(); 109 rcu_read_unlock();
110} 110}
111 111
112static void netvsc_tx_enable(struct netvsc_device *nvscdev,
113 struct net_device *ndev)
114{
115 nvscdev->tx_disable = false;
116 virt_wmb(); /* ensure queue wake up mechanism is on */
117
118 netif_tx_wake_all_queues(ndev);
119}
120
112static int netvsc_open(struct net_device *net) 121static int netvsc_open(struct net_device *net)
113{ 122{
114 struct net_device_context *ndev_ctx = netdev_priv(net); 123 struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
129 rdev = nvdev->extension; 138 rdev = nvdev->extension;
130 if (!rdev->link_state) { 139 if (!rdev->link_state) {
131 netif_carrier_on(net); 140 netif_carrier_on(net);
132 netif_tx_wake_all_queues(net); 141 netvsc_tx_enable(nvdev, net);
133 } 142 }
134 143
135 if (vf_netdev) { 144 if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
184 } 193 }
185} 194}
186 195
196static void netvsc_tx_disable(struct netvsc_device *nvscdev,
197 struct net_device *ndev)
198{
199 if (nvscdev) {
200 nvscdev->tx_disable = true;
201 virt_wmb(); /* ensure txq will not wake up after stop */
202 }
203
204 netif_tx_disable(ndev);
205}
206
187static int netvsc_close(struct net_device *net) 207static int netvsc_close(struct net_device *net)
188{ 208{
189 struct net_device_context *net_device_ctx = netdev_priv(net); 209 struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
192 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 212 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
193 int ret; 213 int ret;
194 214
195 netif_tx_disable(net); 215 netvsc_tx_disable(nvdev, net);
196 216
197 /* No need to close rndis filter if it is removed already */ 217 /* No need to close rndis filter if it is removed already */
198 if (!nvdev) 218 if (!nvdev)
@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
920 940
921 /* If device was up (receiving) then shutdown */ 941 /* If device was up (receiving) then shutdown */
922 if (netif_running(ndev)) { 942 if (netif_running(ndev)) {
923 netif_tx_disable(ndev); 943 netvsc_tx_disable(nvdev, ndev);
924 944
925 ret = rndis_filter_close(nvdev); 945 ret = rndis_filter_close(nvdev);
926 if (ret) { 946 if (ret) {
@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
1908 if (rdev->link_state) { 1928 if (rdev->link_state) {
1909 rdev->link_state = false; 1929 rdev->link_state = false;
1910 netif_carrier_on(net); 1930 netif_carrier_on(net);
1911 netif_tx_wake_all_queues(net); 1931 netvsc_tx_enable(net_device, net);
1912 } else { 1932 } else {
1913 notify = true; 1933 notify = true;
1914 } 1934 }
@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
1918 if (!rdev->link_state) { 1938 if (!rdev->link_state) {
1919 rdev->link_state = true; 1939 rdev->link_state = true;
1920 netif_carrier_off(net); 1940 netif_carrier_off(net);
1921 netif_tx_stop_all_queues(net); 1941 netvsc_tx_disable(net_device, net);
1922 } 1942 }
1923 kfree(event); 1943 kfree(event);
1924 break; 1944 break;
@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
1927 if (!rdev->link_state) { 1947 if (!rdev->link_state) {
1928 rdev->link_state = true; 1948 rdev->link_state = true;
1929 netif_carrier_off(net); 1949 netif_carrier_off(net);
1930 netif_tx_stop_all_queues(net); 1950 netvsc_tx_disable(net_device, net);
1931 event->event = RNDIS_STATUS_MEDIA_CONNECT; 1951 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1932 spin_lock_irqsave(&ndev_ctx->lock, flags); 1952 spin_lock_irqsave(&ndev_ctx->lock, flags);
1933 list_add(&event->list, &ndev_ctx->reconfig_events); 1953 list_add(&event->list, &ndev_ctx->reconfig_events);
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index cd1d8faccca5..cd6b95e673a5 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi)
1268 INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work); 1268 INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
1269 lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev), 1269 lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
1270 WQ_MEM_RECLAIM); 1270 WQ_MEM_RECLAIM);
1271 if (unlikely(!lp->wqueue)) {
1272 ret = -ENOMEM;
1273 goto err_hw_init;
1274 }
1271 1275
1272 ret = adf7242_hw_init(lp); 1276 ret = adf7242_hw_init(lp);
1273 if (ret) 1277 if (ret)
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index b6743f03dce0..3b88846de31b 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
324 goto out_err; 324 goto out_err;
325 } 325 }
326 326
327 genlmsg_reply(skb, info); 327 res = genlmsg_reply(skb, info);
328 break; 328 break;
329 } 329 }
330 330
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 071869db44cf..520657945b82 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE
7 help 7 help
8 MDIO devices and driver infrastructure code. 8 MDIO devices and driver infrastructure code.
9 9
10if MDIO_DEVICE
11
10config MDIO_BUS 12config MDIO_BUS
11 tristate 13 tristate
12 default m if PHYLIB=m 14 default m if PHYLIB=m
@@ -179,6 +181,7 @@ config MDIO_XGENE
179 APM X-Gene SoC's. 181 APM X-Gene SoC's.
180 182
181endif 183endif
184endif
182 185
183config PHYLINK 186config PHYLINK
184 tristate 187 tristate
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 9605d4fe540b..cb86a3e90c7d 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev)
323 323
324 bcm54xx_phydsp_config(phydev); 324 bcm54xx_phydsp_config(phydev);
325 325
326 /* Encode link speed into LED1 and LED3 pair (green/amber).
327 * Also flash these two LEDs on activity. This means configuring
328 * them for MULTICOLOR and encoding link/activity into them.
329 */
330 val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) |
331 BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1);
332 bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val);
333
334 val = BCM_LED_MULTICOLOR_IN_PHASE |
335 BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) |
336 BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT);
337 bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
338
326 return 0; 339 return 0;
327} 340}
328 341
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index bbd8c22067f3..97d45bd5b38e 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -15,6 +15,8 @@
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16 16
17#define DP83822_PHY_ID 0x2000a240 17#define DP83822_PHY_ID 0x2000a240
18#define DP83825I_PHY_ID 0x2000a150
19
18#define DP83822_DEVADDR 0x1f 20#define DP83822_DEVADDR 0x1f
19 21
20#define MII_DP83822_PHYSCR 0x11 22#define MII_DP83822_PHYSCR 0x11
@@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev)
304 return 0; 306 return 0;
305} 307}
306 308
309#define DP83822_PHY_DRIVER(_id, _name) \
310 { \
311 PHY_ID_MATCH_MODEL(_id), \
312 .name = (_name), \
313 .features = PHY_BASIC_FEATURES, \
314 .soft_reset = dp83822_phy_reset, \
315 .config_init = dp83822_config_init, \
316 .get_wol = dp83822_get_wol, \
317 .set_wol = dp83822_set_wol, \
318 .ack_interrupt = dp83822_ack_interrupt, \
319 .config_intr = dp83822_config_intr, \
320 .suspend = dp83822_suspend, \
321 .resume = dp83822_resume, \
322 }
323
307static struct phy_driver dp83822_driver[] = { 324static struct phy_driver dp83822_driver[] = {
308 { 325 DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
309 .phy_id = DP83822_PHY_ID, 326 DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
310 .phy_id_mask = 0xfffffff0,
311 .name = "TI DP83822",
312 .features = PHY_BASIC_FEATURES,
313 .config_init = dp83822_config_init,
314 .soft_reset = dp83822_phy_reset,
315 .get_wol = dp83822_get_wol,
316 .set_wol = dp83822_set_wol,
317 .ack_interrupt = dp83822_ack_interrupt,
318 .config_intr = dp83822_config_intr,
319 .suspend = dp83822_suspend,
320 .resume = dp83822_resume,
321 },
322}; 327};
323module_phy_driver(dp83822_driver); 328module_phy_driver(dp83822_driver);
324 329
325static struct mdio_device_id __maybe_unused dp83822_tbl[] = { 330static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
326 { DP83822_PHY_ID, 0xfffffff0 }, 331 { DP83822_PHY_ID, 0xfffffff0 },
332 { DP83825I_PHY_ID, 0xfffffff0 },
327 { }, 333 { },
328}; 334};
329MODULE_DEVICE_TABLE(mdio, dp83822_tbl); 335MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
index a238388eb1a5..0eec2913c289 100644
--- a/drivers/net/phy/meson-gxl.c
+++ b/drivers/net/phy/meson-gxl.c
@@ -201,6 +201,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
201static int meson_gxl_config_intr(struct phy_device *phydev) 201static int meson_gxl_config_intr(struct phy_device *phydev)
202{ 202{
203 u16 val; 203 u16 val;
204 int ret;
204 205
205 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { 206 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
206 val = INTSRC_ANEG_PR 207 val = INTSRC_ANEG_PR
@@ -213,6 +214,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
213 val = 0; 214 val = 0;
214 } 215 }
215 216
217 /* Ack any pending IRQ */
218 ret = meson_gxl_ack_interrupt(phydev);
219 if (ret)
220 return ret;
221
216 return phy_write(phydev, INTSRC_MASK, val); 222 return phy_write(phydev, INTSRC_MASK, val);
217} 223}
218 224
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 49fdd1ee798e..77068c545de0 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev)
1831{ 1831{
1832 int ret; 1832 int ret;
1833 1833
1834 ret = phy_write(phydev, MII_BMCR, BMCR_RESET); 1834 ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
1835 if (ret < 0) 1835 if (ret < 0)
1836 return ret; 1836 return ret;
1837 1837
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 1d68921723dc..e9ca1c088d0b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1763 int skb_xdp = 1; 1763 int skb_xdp = 1;
1764 bool frags = tun_napi_frags_enabled(tfile); 1764 bool frags = tun_napi_frags_enabled(tfile);
1765 1765
1766 if (!(tun->dev->flags & IFF_UP))
1767 return -EIO;
1768
1769 if (!(tun->flags & IFF_NO_PI)) { 1766 if (!(tun->flags & IFF_NO_PI)) {
1770 if (len < sizeof(pi)) 1767 if (len < sizeof(pi))
1771 return -EINVAL; 1768 return -EINVAL;
@@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1867 err = skb_copy_datagram_from_iter(skb, 0, from, len); 1864 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1868 1865
1869 if (err) { 1866 if (err) {
1867 err = -EFAULT;
1868drop:
1870 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1869 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1871 kfree_skb(skb); 1870 kfree_skb(skb);
1872 if (frags) { 1871 if (frags) {
@@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1874 mutex_unlock(&tfile->napi_mutex); 1873 mutex_unlock(&tfile->napi_mutex);
1875 } 1874 }
1876 1875
1877 return -EFAULT; 1876 return err;
1878 } 1877 }
1879 } 1878 }
1880 1879
@@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1958 !tfile->detached) 1957 !tfile->detached)
1959 rxhash = __skb_get_hash_symmetric(skb); 1958 rxhash = __skb_get_hash_symmetric(skb);
1960 1959
1960 rcu_read_lock();
1961 if (unlikely(!(tun->dev->flags & IFF_UP))) {
1962 err = -EIO;
1963 rcu_read_unlock();
1964 goto drop;
1965 }
1966
1961 if (frags) { 1967 if (frags) {
1962 /* Exercise flow dissector code path. */ 1968 /* Exercise flow dissector code path. */
1963 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb)); 1969 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
@@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1965 if (unlikely(headlen > skb_headlen(skb))) { 1971 if (unlikely(headlen > skb_headlen(skb))) {
1966 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1972 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1967 napi_free_frags(&tfile->napi); 1973 napi_free_frags(&tfile->napi);
1974 rcu_read_unlock();
1968 mutex_unlock(&tfile->napi_mutex); 1975 mutex_unlock(&tfile->napi_mutex);
1969 WARN_ON(1); 1976 WARN_ON(1);
1970 return -ENOMEM; 1977 return -ENOMEM;
@@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1992 } else { 1999 } else {
1993 netif_rx_ni(skb); 2000 netif_rx_ni(skb);
1994 } 2001 }
2002 rcu_read_unlock();
1995 2003
1996 stats = get_cpu_ptr(tun->pcpu_stats); 2004 stats = get_cpu_ptr(tun->pcpu_stats);
1997 u64_stats_update_begin(&stats->syncp); 2005 u64_stats_update_begin(&stats->syncp);
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 820a2fe7d027..aff995be2a31 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = {
1301 .tx_fixup = aqc111_tx_fixup, 1301 .tx_fixup = aqc111_tx_fixup,
1302}; 1302};
1303 1303
1304static const struct driver_info qnap_info = {
1305 .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter",
1306 .bind = aqc111_bind,
1307 .unbind = aqc111_unbind,
1308 .status = aqc111_status,
1309 .link_reset = aqc111_link_reset,
1310 .reset = aqc111_reset,
1311 .stop = aqc111_stop,
1312 .flags = FLAG_ETHER | FLAG_FRAMING_AX |
1313 FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
1314 .rx_fixup = aqc111_rx_fixup,
1315 .tx_fixup = aqc111_tx_fixup,
1316};
1317
1304static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) 1318static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
1305{ 1319{
1306 struct usbnet *dev = usb_get_intfdata(intf); 1320 struct usbnet *dev = usb_get_intfdata(intf);
@@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = {
1455 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, 1469 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
1456 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, 1470 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
1457 {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)}, 1471 {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
1472 {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)},
1458 { },/* END */ 1473 { },/* END */
1459}; 1474};
1460MODULE_DEVICE_TABLE(usb, products); 1475MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5512a1038721..3e9b2c319e45 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -851,6 +851,14 @@ static const struct usb_device_id products[] = {
851 .driver_info = 0, 851 .driver_info = 0,
852}, 852},
853 853
854/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */
855{
856 USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM,
857 USB_CDC_SUBCLASS_ETHERNET,
858 USB_CDC_PROTO_NONE),
859 .driver_info = 0,
860},
861
854/* WHITELIST!!! 862/* WHITELIST!!!
855 * 863 *
856 * CDC Ether uses two interfaces, not necessarily consecutive. 864 * CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 74bebbdb4b15..9195f3476b1d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
1203 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1203 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1204 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1204 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1205 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ 1205 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1206 {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1206 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ 1207 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1207 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1208 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1208 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1209 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7c1430ed0244..6d1a1abbed27 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,7 @@ static void vrf_setup(struct net_device *dev)
1273 1273
1274 /* default to no qdisc; user can add if desired */ 1274 /* default to no qdisc; user can add if desired */
1275 dev->priv_flags |= IFF_NO_QUEUE; 1275 dev->priv_flags |= IFF_NO_QUEUE;
1276 dev->priv_flags |= IFF_NO_RX_HANDLER;
1276 1277
1277 dev->min_mtu = 0; 1278 dev->min_mtu = 0;
1278 dev->max_mtu = 0; 1279 dev->max_mtu = 0;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 077f1b9f2761..d76dfed8d9bb 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -4335,10 +4335,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
4335 /* If vxlan->dev is in the same netns, it has already been added 4335 /* If vxlan->dev is in the same netns, it has already been added
4336 * to the list by the previous loop. 4336 * to the list by the previous loop.
4337 */ 4337 */
4338 if (!net_eq(dev_net(vxlan->dev), net)) { 4338 if (!net_eq(dev_net(vxlan->dev), net))
4339 gro_cells_destroy(&vxlan->gro_cells);
4340 unregister_netdevice_queue(vxlan->dev, head); 4339 unregister_netdevice_queue(vxlan->dev, head);
4341 }
4342 } 4340 }
4343 4341
4344 for (h = 0; h < PORT_HASH_SIZE; ++h) 4342 for (h = 0; h < PORT_HASH_SIZE; ++h)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index e9822a3ec373..94132cfd1f56 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
460static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index, 460static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
461 struct cfg80211_pmsr_result *res) 461 struct cfg80211_pmsr_result *res)
462{ 462{
463 s64 rtt_avg = res->ftm.rtt_avg * 100; 463 s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
464
465 do_div(rtt_avg, 6666);
466 464
467 IWL_DEBUG_INFO(mvm, "entry %d\n", index); 465 IWL_DEBUG_INFO(mvm, "entry %d\n", index);
468 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status); 466 IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 6eedc0ec7661..76629b98c78d 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
130static void 130static void
131mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) 131mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
132{ 132{
133 iowrite32(q->desc_dma, &q->regs->desc_base);
134 iowrite32(q->ndesc, &q->regs->ring_size);
133 q->head = ioread32(&q->regs->dma_idx); 135 q->head = ioread32(&q->regs->dma_idx);
134 q->tail = q->head; 136 q->tail = q->head;
135 iowrite32(q->head, &q->regs->cpu_idx); 137 iowrite32(q->head, &q->regs->cpu_idx);
@@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
180 else 182 else
181 mt76_dma_sync_idx(dev, q); 183 mt76_dma_sync_idx(dev, q);
182 184
183 wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; 185 wake = wake && q->stopped &&
186 qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
187 if (wake)
188 q->stopped = false;
184 189
185 if (!q->queued) 190 if (!q->queued)
186 wake_up(&dev->tx_wait); 191 wake_up(&dev->tx_wait);
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index a033745adb2f..316167404729 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -679,19 +679,15 @@ out:
679 return ret; 679 return ret;
680} 680}
681 681
682static void 682void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
683mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, 683 struct ieee80211_sta *sta)
684 struct ieee80211_sta *sta)
685{ 684{
686 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; 685 struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
687 int idx = wcid->idx; 686 int i, idx = wcid->idx;
688 int i;
689 687
690 rcu_assign_pointer(dev->wcid[idx], NULL); 688 rcu_assign_pointer(dev->wcid[idx], NULL);
691 synchronize_rcu(); 689 synchronize_rcu();
692 690
693 mutex_lock(&dev->mutex);
694
695 if (dev->drv->sta_remove) 691 if (dev->drv->sta_remove)
696 dev->drv->sta_remove(dev, vif, sta); 692 dev->drv->sta_remove(dev, vif, sta);
697 693
@@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
699 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 695 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
700 mt76_txq_remove(dev, sta->txq[i]); 696 mt76_txq_remove(dev, sta->txq[i]);
701 mt76_wcid_free(dev->wcid_mask, idx); 697 mt76_wcid_free(dev->wcid_mask, idx);
698}
699EXPORT_SYMBOL_GPL(__mt76_sta_remove);
702 700
701static void
702mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
703 struct ieee80211_sta *sta)
704{
705 mutex_lock(&dev->mutex);
706 __mt76_sta_remove(dev, vif, sta);
703 mutex_unlock(&dev->mutex); 707 mutex_unlock(&dev->mutex);
704} 708}
705 709
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 5dfb0601f101..bcbfd3c4a44b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -126,6 +126,7 @@ struct mt76_queue {
126 int ndesc; 126 int ndesc;
127 int queued; 127 int queued;
128 int buf_size; 128 int buf_size;
129 bool stopped;
129 130
130 u8 buf_offset; 131 u8 buf_offset;
131 u8 hw_idx; 132 u8 hw_idx;
@@ -143,6 +144,7 @@ struct mt76_mcu_ops {
143 const struct mt76_reg_pair *rp, int len); 144 const struct mt76_reg_pair *rp, int len);
144 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, 145 int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
145 struct mt76_reg_pair *rp, int len); 146 struct mt76_reg_pair *rp, int len);
147 int (*mcu_restart)(struct mt76_dev *dev);
146}; 148};
147 149
148struct mt76_queue_ops { 150struct mt76_queue_ops {
@@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
693 struct ieee80211_sta *sta, 695 struct ieee80211_sta *sta,
694 enum ieee80211_sta_state old_state, 696 enum ieee80211_sta_state old_state,
695 enum ieee80211_sta_state new_state); 697 enum ieee80211_sta_state new_state);
698void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
699 struct ieee80211_sta *sta);
696 700
697struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); 701struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
698 702
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
index afcd86f735b4..4dcb465095d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
@@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
135 135
136out: 136out:
137 mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false); 137 mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
138 if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > 138 if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
139 __sw_hweight8(dev->beacon_mask))
140 dev->beacon_check++; 139 dev->beacon_check++;
141} 140}
142 141
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index d69e82c66ab2..b3ae0aaea62a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -27,12 +27,16 @@ static void
27mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb) 27mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
28{ 28{
29 __le32 *txd = (__le32 *)skb->data; 29 __le32 *txd = (__le32 *)skb->data;
30 struct ieee80211_hdr *hdr;
31 struct ieee80211_sta *sta;
30 struct mt7603_sta *msta; 32 struct mt7603_sta *msta;
31 struct mt76_wcid *wcid; 33 struct mt76_wcid *wcid;
34 void *priv;
32 int idx; 35 int idx;
33 u32 val; 36 u32 val;
37 u8 tid;
34 38
35 if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr)) 39 if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
36 goto free; 40 goto free;
37 41
38 val = le32_to_cpu(txd[1]); 42 val = le32_to_cpu(txd[1]);
@@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
46 if (!wcid) 50 if (!wcid)
47 goto free; 51 goto free;
48 52
49 msta = container_of(wcid, struct mt7603_sta, wcid); 53 priv = msta = container_of(wcid, struct mt7603_sta, wcid);
50 val = le32_to_cpu(txd[0]); 54 val = le32_to_cpu(txd[0]);
51 skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val)); 55 skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
52 56
57 val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
58 val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
59 txd[0] = cpu_to_le32(val);
60
61 sta = container_of(priv, struct ieee80211_sta, drv_priv);
62 hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
63 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
64 ieee80211_sta_set_buffered(sta, tid, true);
65
53 spin_lock_bh(&dev->ps_lock); 66 spin_lock_bh(&dev->ps_lock);
54 __skb_queue_tail(&msta->psq, skb); 67 __skb_queue_tail(&msta->psq, skb);
55 if (skb_queue_len(&msta->psq) >= 64) { 68 if (skb_queue_len(&msta->psq) >= 64) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index 15cc8f33b34d..d54dda67d036 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -112,7 +112,7 @@ static void
112mt7603_phy_init(struct mt7603_dev *dev) 112mt7603_phy_init(struct mt7603_dev *dev)
113{ 113{
114 int rx_chains = dev->mt76.antenna_mask; 114 int rx_chains = dev->mt76.antenna_mask;
115 int tx_chains = __sw_hweight8(rx_chains) - 1; 115 int tx_chains = hweight8(rx_chains) - 1;
116 116
117 mt76_rmw(dev, MT_WF_RMAC_RMCR, 117 mt76_rmw(dev, MT_WF_RMAC_RMCR,
118 (MT_WF_RMAC_RMCR_SMPS_MODE | 118 (MT_WF_RMAC_RMCR_SMPS_MODE |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 0a0115861b51..5e31d7da96fc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1072,7 +1072,7 @@ out:
1072 case MT_PHY_TYPE_HT: 1072 case MT_PHY_TYPE_HT:
1073 final_rate_flags |= IEEE80211_TX_RC_MCS; 1073 final_rate_flags |= IEEE80211_TX_RC_MCS;
1074 final_rate &= GENMASK(5, 0); 1074 final_rate &= GENMASK(5, 0);
1075 if (i > 15) 1075 if (final_rate > 15)
1076 return false; 1076 return false;
1077 break; 1077 break;
1078 default: 1078 default:
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index b10775ed92e6..cc0fe0933b2d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -5,6 +5,7 @@
5#include <linux/pci.h> 5#include <linux/pci.h>
6#include <linux/module.h> 6#include <linux/module.h>
7#include "mt7603.h" 7#include "mt7603.h"
8#include "mac.h"
8#include "eeprom.h" 9#include "eeprom.h"
9 10
10static int 11static int
@@ -386,6 +387,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
386} 387}
387 388
388static void 389static void
390mt7603_ps_set_more_data(struct sk_buff *skb)
391{
392 struct ieee80211_hdr *hdr;
393
394 hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
395 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
396}
397
398static void
389mt7603_release_buffered_frames(struct ieee80211_hw *hw, 399mt7603_release_buffered_frames(struct ieee80211_hw *hw,
390 struct ieee80211_sta *sta, 400 struct ieee80211_sta *sta,
391 u16 tids, int nframes, 401 u16 tids, int nframes,
@@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
399 409
400 __skb_queue_head_init(&list); 410 __skb_queue_head_init(&list);
401 411
412 mt7603_wtbl_set_ps(dev, msta, false);
413
402 spin_lock_bh(&dev->ps_lock); 414 spin_lock_bh(&dev->ps_lock);
403 skb_queue_walk_safe(&msta->psq, skb, tmp) { 415 skb_queue_walk_safe(&msta->psq, skb, tmp) {
404 if (!nframes) 416 if (!nframes)
@@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
409 421
410 skb_set_queue_mapping(skb, MT_TXQ_PSD); 422 skb_set_queue_mapping(skb, MT_TXQ_PSD);
411 __skb_unlink(skb, &msta->psq); 423 __skb_unlink(skb, &msta->psq);
424 mt7603_ps_set_more_data(skb);
412 __skb_queue_tail(&list, skb); 425 __skb_queue_tail(&list, skb);
413 nframes--; 426 nframes--;
414 } 427 }
415 spin_unlock_bh(&dev->ps_lock); 428 spin_unlock_bh(&dev->ps_lock);
416 429
430 if (!skb_queue_empty(&list))
431 ieee80211_sta_eosp(sta);
432
417 mt7603_ps_tx_list(dev, &list); 433 mt7603_ps_tx_list(dev, &list);
418 434
419 if (nframes) 435 if (nframes)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
index 4b0713f1fd5e..d06905ea8cc6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
@@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
433{ 433{
434 struct cfg80211_chan_def *chandef = &dev->mt76.chandef; 434 struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
435 struct ieee80211_hw *hw = mt76_hw(dev); 435 struct ieee80211_hw *hw = mt76_hw(dev);
436 int n_chains = __sw_hweight8(dev->mt76.antenna_mask); 436 int n_chains = hweight8(dev->mt76.antenna_mask);
437 struct { 437 struct {
438 u8 control_chan; 438 u8 control_chan;
439 u8 center_chan; 439 u8 center_chan;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
index e13fea80d970..b920be1f5718 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/soc.c
@@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev)
23 } 23 }
24 24
25 mem_base = devm_ioremap_resource(&pdev->dev, res); 25 mem_base = devm_ioremap_resource(&pdev->dev, res);
26 if (!mem_base) { 26 if (IS_ERR(mem_base)) {
27 dev_err(&pdev->dev, "Failed to get memory resource\n"); 27 dev_err(&pdev->dev, "Failed to get memory resource\n");
28 return -EINVAL; 28 return PTR_ERR(mem_base);
29 } 29 }
30 30
31 mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops, 31 mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
index 0290ba5869a5..736f81752b5b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
@@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
46 { MT_MM20_PROT_CFG, 0x01742004 }, 46 { MT_MM20_PROT_CFG, 0x01742004 },
47 { MT_MM40_PROT_CFG, 0x03f42084 }, 47 { MT_MM40_PROT_CFG, 0x03f42084 },
48 { MT_TXOP_CTRL_CFG, 0x0000583f }, 48 { MT_TXOP_CTRL_CFG, 0x0000583f },
49 { MT_TX_RTS_CFG, 0x00092b20 }, 49 { MT_TX_RTS_CFG, 0x00ffff20 },
50 { MT_EXP_ACK_TIME, 0x002400ca }, 50 { MT_EXP_ACK_TIME, 0x002400ca },
51 { MT_TXOP_HLDR_ET, 0x00000002 }, 51 { MT_TXOP_HLDR_ET, 0x00000002 },
52 { MT_XIFS_TIME_CFG, 0x33a41010 }, 52 { MT_XIFS_TIME_CFG, 0x33a41010 },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index 91718647da02..e5a06f74a6f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
229 struct usb_device *usb_dev = interface_to_usbdev(usb_intf); 229 struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
230 struct mt76x02_dev *dev; 230 struct mt76x02_dev *dev;
231 struct mt76_dev *mdev; 231 struct mt76_dev *mdev;
232 u32 asic_rev, mac_rev; 232 u32 mac_rev;
233 int ret; 233 int ret;
234 234
235 mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops, 235 mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
@@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
262 goto err; 262 goto err;
263 } 263 }
264 264
265 asic_rev = mt76_rr(dev, MT_ASIC_VERSION); 265 mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
266 mac_rev = mt76_rr(dev, MT_MAC_CSR0); 266 mac_rev = mt76_rr(dev, MT_MAC_CSR0);
267 dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n", 267 dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
268 asic_rev, mac_rev); 268 mdev->rev, mac_rev);
269 if (!is_mt76x0(dev)) {
270 ret = -ENODEV;
271 goto err;
272 }
269 273
270 /* Note: vendor driver skips this check for MT76X0U */ 274 /* Note: vendor driver skips this check for MT76X0U */
271 if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) 275 if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
index 6915cce5def9..07061eb4d1e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
@@ -51,6 +51,7 @@ struct mt76x02_calibration {
51 u16 false_cca; 51 u16 false_cca;
52 s8 avg_rssi_all; 52 s8 avg_rssi_all;
53 s8 agc_gain_adjust; 53 s8 agc_gain_adjust;
54 s8 agc_lowest_gain;
54 s8 low_gain; 55 s8 low_gain;
55 56
56 s8 temp_vco; 57 s8 temp_vco;
@@ -114,8 +115,11 @@ struct mt76x02_dev {
114 struct mt76x02_dfs_pattern_detector dfs_pd; 115 struct mt76x02_dfs_pattern_detector dfs_pd;
115 116
116 /* edcca monitor */ 117 /* edcca monitor */
118 unsigned long ed_trigger_timeout;
117 bool ed_tx_blocked; 119 bool ed_tx_blocked;
118 bool ed_monitor; 120 bool ed_monitor;
121 u8 ed_monitor_enabled;
122 u8 ed_monitor_learning;
119 u8 ed_trigger; 123 u8 ed_trigger;
120 u8 ed_silent; 124 u8 ed_silent;
121 ktime_t ed_time; 125 ktime_t ed_time;
@@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev);
188 192
189void mt76x02_init_debugfs(struct mt76x02_dev *dev); 193void mt76x02_init_debugfs(struct mt76x02_dev *dev);
190 194
195static inline bool is_mt76x0(struct mt76x02_dev *dev)
196{
197 return mt76_chip(&dev->mt76) == 0x7610 ||
198 mt76_chip(&dev->mt76) == 0x7630 ||
199 mt76_chip(&dev->mt76) == 0x7650;
200}
201
191static inline bool is_mt76x2(struct mt76x02_dev *dev) 202static inline bool is_mt76x2(struct mt76x02_dev *dev)
192{ 203{
193 return mt76_chip(&dev->mt76) == 0x7612 || 204 return mt76_chip(&dev->mt76) == 0x7612 ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
index 7580c5c986ff..b1d6fd4861e3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
@@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data)
116 return 0; 116 return 0;
117} 117}
118 118
119static int
120mt76_edcca_set(void *data, u64 val)
121{
122 struct mt76x02_dev *dev = data;
123 enum nl80211_dfs_regions region = dev->dfs_pd.region;
124
125 dev->ed_monitor_enabled = !!val;
126 dev->ed_monitor = dev->ed_monitor_enabled &&
127 region == NL80211_DFS_ETSI;
128 mt76x02_edcca_init(dev, true);
129
130 return 0;
131}
132
133static int
134mt76_edcca_get(void *data, u64 *val)
135{
136 struct mt76x02_dev *dev = data;
137
138 *val = dev->ed_monitor_enabled;
139 return 0;
140}
141
142DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
143 "%lld\n");
144
119void mt76x02_init_debugfs(struct mt76x02_dev *dev) 145void mt76x02_init_debugfs(struct mt76x02_dev *dev)
120{ 146{
121 struct dentry *dir; 147 struct dentry *dir;
@@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
127 debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp); 153 debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
128 debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc); 154 debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
129 155
156 debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
130 debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat); 157 debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
131 debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat); 158 debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
132 debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir, 159 debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
index e4649103efd4..17d12d212d1b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
@@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
885 if (dfs_pd->region != region) { 885 if (dfs_pd->region != region) {
886 tasklet_disable(&dfs_pd->dfs_tasklet); 886 tasklet_disable(&dfs_pd->dfs_tasklet);
887 887
888 dev->ed_monitor = region == NL80211_DFS_ETSI; 888 dev->ed_monitor = dev->ed_monitor_enabled &&
889 region == NL80211_DFS_ETSI;
889 mt76x02_edcca_init(dev, true); 890 mt76x02_edcca_init(dev, true);
890 891
891 dfs_pd->region = region; 892 dfs_pd->region = region;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 91ff6598eccf..9ed231abe916 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
67} 67}
68EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup); 68EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
69 69
70void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
71 struct ieee80211_key_conf *key)
72{
73 enum mt76x02_cipher_type cipher;
74 u8 key_data[32];
75 u32 iv, eiv;
76 u64 pn;
77
78 cipher = mt76x02_mac_get_key_info(key, key_data);
79 iv = mt76_rr(dev, MT_WCID_IV(idx));
80 eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
81
82 pn = (u64)eiv << 16;
83 if (cipher == MT_CIPHER_TKIP) {
84 pn |= (iv >> 16) & 0xff;
85 pn |= (iv & 0xff) << 8;
86 } else if (cipher >= MT_CIPHER_AES_CCMP) {
87 pn |= iv & 0xffff;
88 } else {
89 return;
90 }
91
92 atomic64_set(&key->tx_pn, pn);
93}
94
95
70int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, 96int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
71 struct ieee80211_key_conf *key) 97 struct ieee80211_key_conf *key)
72{ 98{
73 enum mt76x02_cipher_type cipher; 99 enum mt76x02_cipher_type cipher;
74 u8 key_data[32]; 100 u8 key_data[32];
75 u8 iv_data[8]; 101 u8 iv_data[8];
102 u64 pn;
76 103
77 cipher = mt76x02_mac_get_key_info(key, key_data); 104 cipher = mt76x02_mac_get_key_info(key, key_data);
78 if (cipher == MT_CIPHER_NONE && key) 105 if (cipher == MT_CIPHER_NONE && key)
@@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
85 if (key) { 112 if (key) {
86 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE, 113 mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
87 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); 114 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
115
116 pn = atomic64_read(&key->tx_pn);
117
88 iv_data[3] = key->keyidx << 6; 118 iv_data[3] = key->keyidx << 6;
89 if (cipher >= MT_CIPHER_TKIP) 119 if (cipher >= MT_CIPHER_TKIP) {
90 iv_data[3] |= 0x20; 120 iv_data[3] |= 0x20;
121 put_unaligned_le32(pn >> 16, &iv_data[4]);
122 }
123
124 if (cipher == MT_CIPHER_TKIP) {
125 iv_data[0] = (pn >> 8) & 0xff;
126 iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
127 iv_data[2] = pn & 0xff;
128 } else if (cipher >= MT_CIPHER_AES_CCMP) {
129 put_unaligned_le16((pn & 0xffff), &iv_data[0]);
130 }
91 } 131 }
92 132
93 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); 133 mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
@@ -920,6 +960,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
920 } 960 }
921 } 961 }
922 mt76x02_edcca_tx_enable(dev, true); 962 mt76x02_edcca_tx_enable(dev, true);
963 dev->ed_monitor_learning = true;
923 964
924 /* clear previous CCA timer value */ 965 /* clear previous CCA timer value */
925 mt76_rr(dev, MT_ED_CCA_TIMER); 966 mt76_rr(dev, MT_ED_CCA_TIMER);
@@ -929,6 +970,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
929 970
930#define MT_EDCCA_TH 92 971#define MT_EDCCA_TH 92
931#define MT_EDCCA_BLOCK_TH 2 972#define MT_EDCCA_BLOCK_TH 2
973#define MT_EDCCA_LEARN_TH 50
974#define MT_EDCCA_LEARN_CCA 180
975#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
976
932static void mt76x02_edcca_check(struct mt76x02_dev *dev) 977static void mt76x02_edcca_check(struct mt76x02_dev *dev)
933{ 978{
934 ktime_t cur_time; 979 ktime_t cur_time;
@@ -951,11 +996,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
951 dev->ed_trigger = 0; 996 dev->ed_trigger = 0;
952 } 997 }
953 998
954 if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && 999 if (dev->cal.agc_lowest_gain &&
955 !dev->ed_tx_blocked) 1000 dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
1001 dev->ed_trigger > MT_EDCCA_LEARN_TH) {
1002 dev->ed_monitor_learning = false;
1003 dev->ed_trigger_timeout = jiffies + 20 * HZ;
1004 } else if (!dev->ed_monitor_learning &&
1005 time_is_after_jiffies(dev->ed_trigger_timeout)) {
1006 dev->ed_monitor_learning = true;
1007 mt76x02_edcca_tx_enable(dev, true);
1008 }
1009
1010 if (dev->ed_monitor_learning)
1011 return;
1012
1013 if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
956 mt76x02_edcca_tx_enable(dev, false); 1014 mt76x02_edcca_tx_enable(dev, false);
957 else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && 1015 else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
958 dev->ed_tx_blocked)
959 mt76x02_edcca_tx_enable(dev, true); 1016 mt76x02_edcca_tx_enable(dev, true);
960} 1017}
961 1018
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
index 6b1f25d2f64c..caeeef96c42f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
@@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
177 u8 key_idx, struct ieee80211_key_conf *key); 177 u8 key_idx, struct ieee80211_key_conf *key);
178int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, 178int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
179 struct ieee80211_key_conf *key); 179 struct ieee80211_key_conf *key);
180void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
181 struct ieee80211_key_conf *key);
180void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx, 182void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
181 u8 *mac); 183 u8 *mac);
182void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop); 184void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index 1229f19f2b02..daaed1220147 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -19,6 +19,7 @@
19#include <linux/irq.h> 19#include <linux/irq.h>
20 20
21#include "mt76x02.h" 21#include "mt76x02.h"
22#include "mt76x02_mcu.h"
22#include "mt76x02_trace.h" 23#include "mt76x02_trace.h"
23 24
24struct beacon_bc_data { 25struct beacon_bc_data {
@@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
418 return i < 4; 419 return i < 4;
419} 420}
420 421
422static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
423 struct ieee80211_sta *sta,
424 struct ieee80211_key_conf *key, void *data)
425{
426 struct mt76x02_dev *dev = hw->priv;
427 struct mt76_wcid *wcid;
428
429 if (!sta)
430 return;
431
432 wcid = (struct mt76_wcid *) sta->drv_priv;
433
434 if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
435 return;
436
437 mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
438}
439
440static void mt76x02_reset_state(struct mt76x02_dev *dev)
441{
442 int i;
443
444 lockdep_assert_held(&dev->mt76.mutex);
445
446 clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
447
448 rcu_read_lock();
449 ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
450 rcu_read_unlock();
451
452 for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
453 struct ieee80211_sta *sta;
454 struct ieee80211_vif *vif;
455 struct mt76x02_sta *msta;
456 struct mt76_wcid *wcid;
457 void *priv;
458
459 wcid = rcu_dereference_protected(dev->mt76.wcid[i],
460 lockdep_is_held(&dev->mt76.mutex));
461 if (!wcid)
462 continue;
463
464 priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
465 sta = container_of(priv, struct ieee80211_sta, drv_priv);
466
467 priv = msta->vif;
468 vif = container_of(priv, struct ieee80211_vif, drv_priv);
469
470 __mt76_sta_remove(&dev->mt76, vif, sta);
471 memset(msta, 0, sizeof(*msta));
472 }
473
474 dev->vif_mask = 0;
475 dev->beacon_mask = 0;
476}
477
421static void mt76x02_watchdog_reset(struct mt76x02_dev *dev) 478static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
422{ 479{
423 u32 mask = dev->mt76.mmio.irqmask; 480 u32 mask = dev->mt76.mmio.irqmask;
481 bool restart = dev->mt76.mcu_ops->mcu_restart;
424 int i; 482 int i;
425 483
426 ieee80211_stop_queues(dev->mt76.hw); 484 ieee80211_stop_queues(dev->mt76.hw);
@@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
434 492
435 mutex_lock(&dev->mt76.mutex); 493 mutex_lock(&dev->mt76.mutex);
436 494
495 if (restart)
496 mt76x02_reset_state(dev);
497
437 if (dev->beacon_mask) 498 if (dev->beacon_mask)
438 mt76_clear(dev, MT_BEACON_TIME_CFG, 499 mt76_clear(dev, MT_BEACON_TIME_CFG,
439 MT_BEACON_TIME_CFG_BEACON_TX | 500 MT_BEACON_TIME_CFG_BEACON_TX |
@@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
452 /* let fw reset DMA */ 513 /* let fw reset DMA */
453 mt76_set(dev, 0x734, 0x3); 514 mt76_set(dev, 0x734, 0x3);
454 515
516 if (restart)
517 dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
518
455 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++) 519 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
456 mt76_queue_tx_cleanup(dev, i, true); 520 mt76_queue_tx_cleanup(dev, i, true);
457 521
458 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++) 522 for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
459 mt76_queue_rx_reset(dev, i); 523 mt76_queue_rx_reset(dev, i);
460 524
461 mt76_wr(dev, MT_MAC_SYS_CTRL, 525 mt76x02_mac_start(dev);
462 MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); 526
463 mt76_set(dev, MT_WPDMA_GLO_CFG,
464 MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
465 if (dev->ed_monitor) 527 if (dev->ed_monitor)
466 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN); 528 mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
467 529
468 if (dev->beacon_mask) 530 if (dev->beacon_mask && !restart)
469 mt76_set(dev, MT_BEACON_TIME_CFG, 531 mt76_set(dev, MT_BEACON_TIME_CFG,
470 MT_BEACON_TIME_CFG_BEACON_TX | 532 MT_BEACON_TIME_CFG_BEACON_TX |
471 MT_BEACON_TIME_CFG_TBTT_EN); 533 MT_BEACON_TIME_CFG_TBTT_EN);
@@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
486 napi_schedule(&dev->mt76.napi[i]); 548 napi_schedule(&dev->mt76.napi[i]);
487 } 549 }
488 550
489 ieee80211_wake_queues(dev->mt76.hw); 551 if (restart) {
490 552 mt76x02_mcu_function_select(dev, Q_SELECT, 1);
491 mt76_txq_schedule_all(&dev->mt76); 553 ieee80211_restart_hw(dev->mt76.hw);
554 } else {
555 ieee80211_wake_queues(dev->mt76.hw);
556 mt76_txq_schedule_all(&dev->mt76);
557 }
492} 558}
493 559
494static void mt76x02_check_tx_hang(struct mt76x02_dev *dev) 560static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
index a020c757ba5c..a54b63a96eae 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
@@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
194 ret = true; 194 ret = true;
195 } 195 }
196 196
197 dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
198
197 return ret; 199 return ret;
198} 200}
199EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain); 201EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
index 43f07461c8d3..6fb52b596d42 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
@@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
85 85
86 mt76x02_insert_hdr_pad(skb); 86 mt76x02_insert_hdr_pad(skb);
87 87
88 txwi = skb_push(skb, sizeof(struct mt76x02_txwi)); 88 txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
89 mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); 89 mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
90 skb_push(skb, sizeof(struct mt76x02_txwi));
90 91
91 pid = mt76_tx_status_skb_add(mdev, wcid, skb); 92 pid = mt76_tx_status_skb_add(mdev, wcid, skb);
92 txwi->pktid = pid; 93 txwi->pktid = pid;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index a48c261b0c63..cd072ac614f7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
237 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 237 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
238 int idx = 0; 238 int idx = 0;
239 239
240 memset(msta, 0, sizeof(*msta));
241
240 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); 242 idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
241 if (idx < 0) 243 if (idx < 0)
242 return -ENOSPC; 244 return -ENOSPC;
@@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
274 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; 276 struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
275 struct mt76_txq *mtxq; 277 struct mt76_txq *mtxq;
276 278
279 memset(mvif, 0, sizeof(*mvif));
280
277 mvif->idx = idx; 281 mvif->idx = idx;
278 mvif->group_wcid.idx = MT_VIF_WCID(idx); 282 mvif->group_wcid.idx = MT_VIF_WCID(idx);
279 mvif->group_wcid.hw_key_idx = -1; 283 mvif->group_wcid.hw_key_idx = -1;
@@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
289 struct mt76x02_dev *dev = hw->priv; 293 struct mt76x02_dev *dev = hw->priv;
290 unsigned int idx = 0; 294 unsigned int idx = 0;
291 295
296 /* Allow to change address in HW if we create first interface. */
297 if (!dev->vif_mask &&
298 (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
299 memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
300 mt76x02_mac_setaddr(dev, vif->addr);
301
292 if (vif->addr[0] & BIT(1)) 302 if (vif->addr[0] & BIT(1))
293 idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7); 303 idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
294 304
@@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
311 if (dev->vif_mask & BIT(idx)) 321 if (dev->vif_mask & BIT(idx))
312 return -EBUSY; 322 return -EBUSY;
313 323
314 /* Allow to change address in HW if we create first interface. */
315 if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
316 mt76x02_mac_setaddr(dev, vif->addr);
317
318 dev->vif_mask |= BIT(idx); 324 dev->vif_mask |= BIT(idx);
319 325
320 mt76x02_vif_init(dev, vif, idx); 326 mt76x02_vif_init(dev, vif, idx);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
index f8534362e2c8..a30ef2c5a9db 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c
@@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
106 { MT_TX_SW_CFG1, 0x00010000 }, 106 { MT_TX_SW_CFG1, 0x00010000 },
107 { MT_TX_SW_CFG2, 0x00000000 }, 107 { MT_TX_SW_CFG2, 0x00000000 },
108 { MT_TXOP_CTRL_CFG, 0x0400583f }, 108 { MT_TXOP_CTRL_CFG, 0x0400583f },
109 { MT_TX_RTS_CFG, 0x00100020 }, 109 { MT_TX_RTS_CFG, 0x00ffff20 },
110 { MT_TX_TIMEOUT_CFG, 0x000a2290 }, 110 { MT_TX_TIMEOUT_CFG, 0x000a2290 },
111 { MT_TX_RETRY_CFG, 0x47f01f0f }, 111 { MT_TX_RETRY_CFG, 0x47f01f0f },
112 { MT_EXP_ACK_TIME, 0x002c00dc }, 112 { MT_EXP_ACK_TIME, 0x002c00dc },
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
index 6c619f1c65c9..d7abe3d73bad 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
@@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
71 71
72void mt76x2_cleanup(struct mt76x02_dev *dev); 72void mt76x2_cleanup(struct mt76x02_dev *dev);
73 73
74int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
74void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable); 75void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
75void mt76x2_init_txpower(struct mt76x02_dev *dev, 76void mt76x2_init_txpower(struct mt76x02_dev *dev,
76 struct ieee80211_supported_band *sband); 77 struct ieee80211_supported_band *sband);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
index 984d9c4c2e1a..d3927a13e92e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
@@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
77 } 77 }
78} 78}
79 79
80static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) 80int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
81{ 81{
82 const u8 *macaddr = dev->mt76.macaddr; 82 const u8 *macaddr = dev->mt76.macaddr;
83 u32 val; 83 u32 val;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
index 03e24ae7f66c..605dc66ae83b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
@@ -165,9 +165,30 @@ error:
165 return -ENOENT; 165 return -ENOENT;
166} 166}
167 167
168static int
169mt76pci_mcu_restart(struct mt76_dev *mdev)
170{
171 struct mt76x02_dev *dev;
172 int ret;
173
174 dev = container_of(mdev, struct mt76x02_dev, mt76);
175
176 mt76x02_mcu_cleanup(dev);
177 mt76x2_mac_reset(dev, true);
178
179 ret = mt76pci_load_firmware(dev);
180 if (ret)
181 return ret;
182
183 mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
184
185 return 0;
186}
187
168int mt76x2_mcu_init(struct mt76x02_dev *dev) 188int mt76x2_mcu_init(struct mt76x02_dev *dev)
169{ 189{
170 static const struct mt76_mcu_ops mt76x2_mcu_ops = { 190 static const struct mt76_mcu_ops mt76x2_mcu_ops = {
191 .mcu_restart = mt76pci_mcu_restart,
171 .mcu_send_msg = mt76x02_mcu_msg_send, 192 .mcu_send_msg = mt76x02_mcu_msg_send,
172 }; 193 };
173 int ret; 194 int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
index 1848e8ab2e21..769a9b972044 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
@@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
260 gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; 260 gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
261 gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust; 261 gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
262 262
263 if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) 263 val = 0x1836 << 16;
264 if (!mt76x2_has_ext_lna(dev) &&
265 dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
264 val = 0x1e42 << 16; 266 val = 0x1e42 << 16;
265 else 267
266 val = 0x1836 << 16; 268 if (mt76x2_has_ext_lna(dev) &&
269 dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
270 dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
271 val = 0x0f36 << 16;
267 272
268 val |= 0xf8; 273 val |= 0xf8;
269 274
@@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
280{ 285{
281 u8 *gain = dev->cal.agc_gain_init; 286 u8 *gain = dev->cal.agc_gain_init;
282 u8 low_gain_delta, gain_delta; 287 u8 low_gain_delta, gain_delta;
288 u32 agc_35, agc_37;
283 bool gain_change; 289 bool gain_change;
284 int low_gain; 290 int low_gain;
285 u32 val; 291 u32 val;
@@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
318 else 324 else
319 low_gain_delta = 14; 325 low_gain_delta = 14;
320 326
327 agc_37 = 0x2121262c;
328 if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
329 agc_35 = 0x11111516;
330 else if (low_gain == 2)
331 agc_35 = agc_37 = 0x08080808;
332 else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
333 agc_35 = 0x10101014;
334 else
335 agc_35 = 0x11111116;
336
321 if (low_gain == 2) { 337 if (low_gain == 2) {
322 mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); 338 mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
323 mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); 339 mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
@@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
326 dev->cal.agc_gain_adjust = 0; 342 dev->cal.agc_gain_adjust = 0;
327 } else { 343 } else {
328 mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); 344 mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
329 if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
330 mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
331 else
332 mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
333 mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
334 gain_delta = 0; 345 gain_delta = 0;
335 dev->cal.agc_gain_adjust = low_gain_delta; 346 dev->cal.agc_gain_adjust = low_gain_delta;
336 } 347 }
337 348
349 mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
350 mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
351
338 dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; 352 dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
339 dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; 353 dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
340 mt76x2_phy_set_gain_val(dev); 354 mt76x2_phy_set_gain_val(dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
index ddb6b2c48e01..ac0f13d46299 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
@@ -21,11 +21,10 @@
21#include "mt76x2u.h" 21#include "mt76x2u.h"
22 22
23static const struct usb_device_id mt76x2u_device_table[] = { 23static const struct usb_device_id mt76x2u_device_table[] = {
24 { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
25 { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */ 24 { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
26 { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */ 25 { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
27 { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */ 26 { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
28 { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */ 27 { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
29 { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */ 28 { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
30 { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */ 29 { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
31 { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */ 30 { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
@@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf,
66 65
67 mdev->rev = mt76_rr(dev, MT_ASIC_VERSION); 66 mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
68 dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev); 67 dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
68 if (!is_mt76x2(dev)) {
69 err = -ENODEV;
70 goto err;
71 }
69 72
70 err = mt76x2u_register_device(dev); 73 err = mt76x2u_register_device(dev);
71 if (err < 0) 74 if (err < 0)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
index 5e84b4535cb1..3b82345756ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
@@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
93 mt76_wr(dev, MT_TX_LINK_CFG, 0x1020); 93 mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
94 mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13); 94 mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
95 mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00); 95 mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
96 mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
97 96
98 mt76_wr(dev, MT_WMM_AIFSN, 0x2273); 97 mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
99 mt76_wr(dev, MT_WMM_CWMIN, 0x2344); 98 mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 5a349fe3e576..2585df512335 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
289 dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta); 289 dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
290 dev->queue_ops->kick(dev, q); 290 dev->queue_ops->kick(dev, q);
291 291
292 if (q->queued > q->ndesc - 8) 292 if (q->queued > q->ndesc - 8 && !q->stopped) {
293 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); 293 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
294 q->stopped = true;
295 }
296
294 spin_unlock_bh(&q->lock); 297 spin_unlock_bh(&q->lock);
295} 298}
296EXPORT_SYMBOL_GPL(mt76_tx); 299EXPORT_SYMBOL_GPL(mt76_tx);
@@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
374 if (last_skb) { 377 if (last_skb) {
375 mt76_queue_ps_skb(dev, sta, last_skb, true); 378 mt76_queue_ps_skb(dev, sta, last_skb, true);
376 dev->queue_ops->kick(dev, hwq); 379 dev->queue_ops->kick(dev, hwq);
380 } else {
381 ieee80211_sta_eosp(sta);
377 } 382 }
383
378 spin_unlock_bh(&hwq->lock); 384 spin_unlock_bh(&hwq->lock);
379} 385}
380EXPORT_SYMBOL_GPL(mt76_release_buffered_frames); 386EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
@@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
577 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv; 583 struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
578 struct mt76_queue *hwq = mtxq->hwq; 584 struct mt76_queue *hwq = mtxq->hwq;
579 585
586 if (!test_bit(MT76_STATE_RUNNING, &dev->state))
587 return;
588
580 spin_lock_bh(&hwq->lock); 589 spin_lock_bh(&hwq->lock);
581 if (list_empty(&mtxq->list)) 590 if (list_empty(&mtxq->list))
582 list_add_tail(&mtxq->list, &hwq->swq); 591 list_add_tail(&mtxq->list, &hwq->swq);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index ae6ada370597..4c1abd492405 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data)
655 spin_lock_bh(&q->lock); 655 spin_lock_bh(&q->lock);
656 } 656 }
657 mt76_txq_schedule(dev, q); 657 mt76_txq_schedule(dev, q);
658 wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; 658
659 wake = q->stopped && q->queued < q->ndesc - 8;
660 if (wake)
661 q->stopped = false;
662
659 if (!q->queued) 663 if (!q->queued)
660 wake_up(&dev->tx_wait); 664 wake_up(&dev->tx_wait);
661 665
diff --git a/drivers/net/wireless/mediatek/mt7601u/usb.c b/drivers/net/wireless/mediatek/mt7601u/usb.c
index d8b7863f7926..6ae7f14dc9bf 100644
--- a/drivers/net/wireless/mediatek/mt7601u/usb.c
+++ b/drivers/net/wireless/mediatek/mt7601u/usb.c
@@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
303 mac_rev = mt7601u_rr(dev, MT_MAC_CSR0); 303 mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
304 dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n", 304 dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
305 asic_rev, mac_rev); 305 asic_rev, mac_rev);
306 if ((asic_rev >> 16) != 0x7601) {
307 ret = -ENODEV;
308 goto err;
309 }
306 310
307 /* Note: vendor driver skips this check for MT7601U */ 311 /* Note: vendor driver skips this check for MT7601U */
308 if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) 312 if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c
index 2ad263f708da..bb57ec239029 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c
@@ -180,7 +180,7 @@ int ndev_mw_to_bar(struct intel_ntb_dev *ndev, int idx)
180 return ndev->reg->mw_bar[idx]; 180 return ndev->reg->mw_bar[idx];
181} 181}
182 182
183static inline int ndev_db_addr(struct intel_ntb_dev *ndev, 183void ndev_db_addr(struct intel_ntb_dev *ndev,
184 phys_addr_t *db_addr, resource_size_t *db_size, 184 phys_addr_t *db_addr, resource_size_t *db_size,
185 phys_addr_t reg_addr, unsigned long reg) 185 phys_addr_t reg_addr, unsigned long reg)
186{ 186{
@@ -196,8 +196,6 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev,
196 *db_size = ndev->reg->db_size; 196 *db_size = ndev->reg->db_size;
197 dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size); 197 dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size);
198 } 198 }
199
200 return 0;
201} 199}
202 200
203u64 ndev_db_read(struct intel_ntb_dev *ndev, 201u64 ndev_db_read(struct intel_ntb_dev *ndev,
@@ -1111,13 +1109,28 @@ int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
1111 ndev->self_reg->db_mask); 1109 ndev->self_reg->db_mask);
1112} 1110}
1113 1111
1114int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr, 1112static int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
1115 resource_size_t *db_size) 1113 resource_size_t *db_size, u64 *db_data, int db_bit)
1116{ 1114{
1115 u64 db_bits;
1117 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 1116 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
1118 1117
1119 return ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr, 1118 if (unlikely(db_bit >= BITS_PER_LONG_LONG))
1119 return -EINVAL;
1120
1121 db_bits = BIT_ULL(db_bit);
1122
1123 if (unlikely(db_bits & ~ntb_ndev(ntb)->db_valid_mask))
1124 return -EINVAL;
1125
1126 ndev_db_addr(ndev, db_addr, db_size, ndev->peer_addr,
1120 ndev->peer_reg->db_bell); 1127 ndev->peer_reg->db_bell);
1128
1129 if (db_data)
1130 *db_data = db_bits;
1131
1132
1133 return 0;
1121} 1134}
1122 1135
1123static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) 1136static int intel_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h
index ad8ec1444436..544cf5c06f4d 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h
@@ -147,6 +147,9 @@ extern struct intel_b2b_addr xeon_b2b_dsd_addr;
147int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max, 147int ndev_init_isr(struct intel_ntb_dev *ndev, int msix_min, int msix_max,
148 int msix_shift, int total_shift); 148 int msix_shift, int total_shift);
149enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd); 149enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd);
150void ndev_db_addr(struct intel_ntb_dev *ndev,
151 phys_addr_t *db_addr, resource_size_t *db_size,
152 phys_addr_t reg_addr, unsigned long reg);
150u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio); 153u64 ndev_db_read(struct intel_ntb_dev *ndev, void __iomem *mmio);
151int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits, 154int ndev_db_write(struct intel_ntb_dev *ndev, u64 db_bits,
152 void __iomem *mmio); 155 void __iomem *mmio);
@@ -166,8 +169,6 @@ int intel_ntb_db_vector_count(struct ntb_dev *ntb);
166u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector); 169u64 intel_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector);
167int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits); 170int intel_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits);
168int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits); 171int intel_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits);
169int intel_ntb_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
170 resource_size_t *db_size);
171int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb); 172int intel_ntb_spad_is_unsafe(struct ntb_dev *ntb);
172int intel_ntb_spad_count(struct ntb_dev *ntb); 173int intel_ntb_spad_count(struct ntb_dev *ntb);
173u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx); 174u32 intel_ntb_spad_read(struct ntb_dev *ntb, int idx);
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.c b/drivers/ntb/hw/intel/ntb_hw_gen3.c
index b3fa24778f94..f475b56a3f49 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen3.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen3.c
@@ -532,6 +532,37 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx,
532 return 0; 532 return 0;
533} 533}
534 534
535int intel_ntb3_peer_db_addr(struct ntb_dev *ntb, phys_addr_t *db_addr,
536 resource_size_t *db_size,
537 u64 *db_data, int db_bit)
538{
539 phys_addr_t db_addr_base;
540 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
541
542 if (unlikely(db_bit >= BITS_PER_LONG_LONG))
543 return -EINVAL;
544
545 if (unlikely(BIT_ULL(db_bit) & ~ntb_ndev(ntb)->db_valid_mask))
546 return -EINVAL;
547
548 ndev_db_addr(ndev, &db_addr_base, db_size, ndev->peer_addr,
549 ndev->peer_reg->db_bell);
550
551 if (db_addr) {
552 *db_addr = db_addr_base + (db_bit * 4);
553 dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx db bit %d\n",
554 *db_addr, db_bit);
555 }
556
557 if (db_data) {
558 *db_data = 1;
559 dev_dbg(&ndev->ntb.pdev->dev, "Peer db data %llx db bit %d\n",
560 *db_data, db_bit);
561 }
562
563 return 0;
564}
565
535static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits) 566static int intel_ntb3_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
536{ 567{
537 struct intel_ntb_dev *ndev = ntb_ndev(ntb); 568 struct intel_ntb_dev *ndev = ntb_ndev(ntb);
@@ -584,7 +615,7 @@ const struct ntb_dev_ops intel_ntb3_ops = {
584 .db_clear = intel_ntb3_db_clear, 615 .db_clear = intel_ntb3_db_clear,
585 .db_set_mask = intel_ntb_db_set_mask, 616 .db_set_mask = intel_ntb_db_set_mask,
586 .db_clear_mask = intel_ntb_db_clear_mask, 617 .db_clear_mask = intel_ntb_db_clear_mask,
587 .peer_db_addr = intel_ntb_peer_db_addr, 618 .peer_db_addr = intel_ntb3_peer_db_addr,
588 .peer_db_set = intel_ntb3_peer_db_set, 619 .peer_db_set = intel_ntb3_peer_db_set,
589 .spad_is_unsafe = intel_ntb_spad_is_unsafe, 620 .spad_is_unsafe = intel_ntb_spad_is_unsafe,
590 .spad_count = intel_ntb_spad_count, 621 .spad_count = intel_ntb_spad_count,
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index f2df2d39c65b..d905d368d28c 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -236,6 +236,7 @@ static void switchtec_ntb_mw_clr_direct(struct switchtec_ntb *sndev, int idx)
236 ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN; 236 ctl_val &= ~NTB_CTRL_BAR_DIR_WIN_EN;
237 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 237 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
238 iowrite32(0, &ctl->bar_entry[bar].win_size); 238 iowrite32(0, &ctl->bar_entry[bar].win_size);
239 iowrite32(0, &ctl->bar_ext_entry[bar].win_size);
239 iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr); 240 iowrite64(sndev->self_partition, &ctl->bar_entry[bar].xlate_addr);
240} 241}
241 242
@@ -258,7 +259,9 @@ static void switchtec_ntb_mw_set_direct(struct switchtec_ntb *sndev, int idx,
258 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; 259 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
259 260
260 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 261 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
261 iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); 262 iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
263 &ctl->bar_entry[bar].win_size);
264 iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
262 iowrite64(sndev->self_partition | addr, 265 iowrite64(sndev->self_partition | addr,
263 &ctl->bar_entry[bar].xlate_addr); 266 &ctl->bar_entry[bar].xlate_addr);
264} 267}
@@ -679,11 +682,16 @@ static u64 switchtec_ntb_db_read_mask(struct ntb_dev *ntb)
679 682
680static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb, 683static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
681 phys_addr_t *db_addr, 684 phys_addr_t *db_addr,
682 resource_size_t *db_size) 685 resource_size_t *db_size,
686 u64 *db_data,
687 int db_bit)
683{ 688{
684 struct switchtec_ntb *sndev = ntb_sndev(ntb); 689 struct switchtec_ntb *sndev = ntb_sndev(ntb);
685 unsigned long offset; 690 unsigned long offset;
686 691
692 if (unlikely(db_bit >= BITS_PER_LONG_LONG))
693 return -EINVAL;
694
687 offset = (unsigned long)sndev->mmio_peer_dbmsg->odb - 695 offset = (unsigned long)sndev->mmio_peer_dbmsg->odb -
688 (unsigned long)sndev->stdev->mmio; 696 (unsigned long)sndev->stdev->mmio;
689 697
@@ -693,6 +701,8 @@ static int switchtec_ntb_peer_db_addr(struct ntb_dev *ntb,
693 *db_addr = pci_resource_start(ntb->pdev, 0) + offset; 701 *db_addr = pci_resource_start(ntb->pdev, 0) + offset;
694 if (db_size) 702 if (db_size)
695 *db_size = sizeof(u32); 703 *db_size = sizeof(u32);
704 if (db_data)
705 *db_data = BIT_ULL(db_bit) << sndev->db_peer_shift;
696 706
697 return 0; 707 return 0;
698} 708}
@@ -1025,7 +1035,9 @@ static int crosslink_setup_mws(struct switchtec_ntb *sndev, int ntb_lut_idx,
1025 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN; 1035 ctl_val |= NTB_CTRL_BAR_DIR_WIN_EN;
1026 1036
1027 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl); 1037 iowrite32(ctl_val, &ctl->bar_entry[bar].ctl);
1028 iowrite32(xlate_pos | size, &ctl->bar_entry[bar].win_size); 1038 iowrite32(xlate_pos | (lower_32_bits(size) & 0xFFFFF000),
1039 &ctl->bar_entry[bar].win_size);
1040 iowrite32(upper_32_bits(size), &ctl->bar_ext_entry[bar].win_size);
1029 iowrite64(sndev->peer_partition | addr, 1041 iowrite64(sndev->peer_partition | addr,
1030 &ctl->bar_entry[bar].xlate_addr); 1042 &ctl->bar_entry[bar].xlate_addr);
1031 } 1043 }
@@ -1092,7 +1104,7 @@ static int crosslink_enum_partition(struct switchtec_ntb *sndev,
1092 1104
1093 dev_dbg(&sndev->stdev->dev, 1105 dev_dbg(&sndev->stdev->dev,
1094 "Crosslink BAR%d addr: %llx\n", 1106 "Crosslink BAR%d addr: %llx\n",
1095 i, bar_addr); 1107 i*2, bar_addr);
1096 1108
1097 if (bar_addr != bar_space * i) 1109 if (bar_addr != bar_space * i)
1098 continue; 1110 continue;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index 3bfdb4562408..d4f39ba1d976 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -144,7 +144,9 @@ struct ntb_transport_qp {
144 struct list_head tx_free_q; 144 struct list_head tx_free_q;
145 spinlock_t ntb_tx_free_q_lock; 145 spinlock_t ntb_tx_free_q_lock;
146 void __iomem *tx_mw; 146 void __iomem *tx_mw;
147 dma_addr_t tx_mw_phys; 147 phys_addr_t tx_mw_phys;
148 size_t tx_mw_size;
149 dma_addr_t tx_mw_dma_addr;
148 unsigned int tx_index; 150 unsigned int tx_index;
149 unsigned int tx_max_entry; 151 unsigned int tx_max_entry;
150 unsigned int tx_max_frame; 152 unsigned int tx_max_frame;
@@ -862,6 +864,9 @@ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
862 if (!nt->link_is_up) 864 if (!nt->link_is_up)
863 cancel_delayed_work_sync(&nt->link_work); 865 cancel_delayed_work_sync(&nt->link_work);
864 866
867 for (i = 0; i < nt->mw_count; i++)
868 ntb_free_mw(nt, i);
869
865 /* The scratchpad registers keep the values if the remote side 870 /* The scratchpad registers keep the values if the remote side
866 * goes down, blast them now to give them a sane value the next 871 * goes down, blast them now to give them a sane value the next
867 * time they are accessed 872 * time they are accessed
@@ -1049,6 +1054,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
1049 tx_size = (unsigned int)mw_size / num_qps_mw; 1054 tx_size = (unsigned int)mw_size / num_qps_mw;
1050 qp_offset = tx_size * (qp_num / mw_count); 1055 qp_offset = tx_size * (qp_num / mw_count);
1051 1056
1057 qp->tx_mw_size = tx_size;
1052 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset; 1058 qp->tx_mw = nt->mw_vec[mw_num].vbase + qp_offset;
1053 if (!qp->tx_mw) 1059 if (!qp->tx_mw)
1054 return -EINVAL; 1060 return -EINVAL;
@@ -1644,7 +1650,7 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1644 dma_cookie_t cookie; 1650 dma_cookie_t cookie;
1645 1651
1646 device = chan->device; 1652 device = chan->device;
1647 dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index; 1653 dest = qp->tx_mw_dma_addr + qp->tx_max_frame * entry->tx_index;
1648 buff_off = (size_t)buf & ~PAGE_MASK; 1654 buff_off = (size_t)buf & ~PAGE_MASK;
1649 dest_off = (size_t)dest & ~PAGE_MASK; 1655 dest_off = (size_t)dest & ~PAGE_MASK;
1650 1656
@@ -1863,6 +1869,18 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1863 qp->rx_dma_chan = NULL; 1869 qp->rx_dma_chan = NULL;
1864 } 1870 }
1865 1871
1872 if (qp->tx_dma_chan) {
1873 qp->tx_mw_dma_addr =
1874 dma_map_resource(qp->tx_dma_chan->device->dev,
1875 qp->tx_mw_phys, qp->tx_mw_size,
1876 DMA_FROM_DEVICE, 0);
1877 if (dma_mapping_error(qp->tx_dma_chan->device->dev,
1878 qp->tx_mw_dma_addr)) {
1879 qp->tx_mw_dma_addr = 0;
1880 goto err1;
1881 }
1882 }
1883
1866 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n", 1884 dev_dbg(&pdev->dev, "Using %s memcpy for TX\n",
1867 qp->tx_dma_chan ? "DMA" : "CPU"); 1885 qp->tx_dma_chan ? "DMA" : "CPU");
1868 1886
@@ -1904,6 +1922,10 @@ err1:
1904 qp->rx_alloc_entry = 0; 1922 qp->rx_alloc_entry = 0;
1905 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) 1923 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1906 kfree(entry); 1924 kfree(entry);
1925 if (qp->tx_mw_dma_addr)
1926 dma_unmap_resource(qp->tx_dma_chan->device->dev,
1927 qp->tx_mw_dma_addr, qp->tx_mw_size,
1928 DMA_FROM_DEVICE, 0);
1907 if (qp->tx_dma_chan) 1929 if (qp->tx_dma_chan)
1908 dma_release_channel(qp->tx_dma_chan); 1930 dma_release_channel(qp->tx_dma_chan);
1909 if (qp->rx_dma_chan) 1931 if (qp->rx_dma_chan)
@@ -1945,6 +1967,11 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1945 */ 1967 */
1946 dma_sync_wait(chan, qp->last_cookie); 1968 dma_sync_wait(chan, qp->last_cookie);
1947 dmaengine_terminate_all(chan); 1969 dmaengine_terminate_all(chan);
1970
1971 dma_unmap_resource(chan->device->dev,
1972 qp->tx_mw_dma_addr, qp->tx_mw_size,
1973 DMA_FROM_DEVICE, 0);
1974
1948 dma_release_channel(chan); 1975 dma_release_channel(chan);
1949 } 1976 }
1950 1977
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 521eaf53a52a..36be9b619187 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -47,6 +47,7 @@ static int e820_register_one(struct resource *res, void *data)
47 ndr_desc.res = res; 47 ndr_desc.res = res;
48 ndr_desc.attr_groups = e820_pmem_region_attribute_groups; 48 ndr_desc.attr_groups = e820_pmem_region_attribute_groups;
49 ndr_desc.numa_node = e820_range_to_nid(res->start); 49 ndr_desc.numa_node = e820_range_to_nid(res->start);
50 ndr_desc.target_node = ndr_desc.numa_node;
50 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); 51 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
51 if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc)) 52 if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
52 return -ENXIO; 53 return -ENXIO;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 379bf4305e61..a5ac3b240293 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -153,7 +153,7 @@ struct nd_region {
153 u16 ndr_mappings; 153 u16 ndr_mappings;
154 u64 ndr_size; 154 u64 ndr_size;
155 u64 ndr_start; 155 u64 ndr_start;
156 int id, num_lanes, ro, numa_node; 156 int id, num_lanes, ro, numa_node, target_node;
157 void *provider_data; 157 void *provider_data;
158 struct kernfs_node *bb_state; 158 struct kernfs_node *bb_state;
159 struct badblocks bb; 159 struct badblocks bb;
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 11b9821eba85..a0c8dcfa0bf9 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -68,6 +68,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
68 memset(&ndr_desc, 0, sizeof(ndr_desc)); 68 memset(&ndr_desc, 0, sizeof(ndr_desc));
69 ndr_desc.attr_groups = region_attr_groups; 69 ndr_desc.attr_groups = region_attr_groups;
70 ndr_desc.numa_node = dev_to_node(&pdev->dev); 70 ndr_desc.numa_node = dev_to_node(&pdev->dev);
71 ndr_desc.target_node = ndr_desc.numa_node;
71 ndr_desc.res = &pdev->resource[i]; 72 ndr_desc.res = &pdev->resource[i];
72 ndr_desc.of_node = np; 73 ndr_desc.of_node = np;
73 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); 74 set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 3b58baa44b5c..b4ef7d9ff22e 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1072,6 +1072,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
1072 nd_region->flags = ndr_desc->flags; 1072 nd_region->flags = ndr_desc->flags;
1073 nd_region->ro = ro; 1073 nd_region->ro = ro;
1074 nd_region->numa_node = ndr_desc->numa_node; 1074 nd_region->numa_node = ndr_desc->numa_node;
1075 nd_region->target_node = ndr_desc->target_node;
1075 ida_init(&nd_region->ns_ida); 1076 ida_init(&nd_region->ns_ida);
1076 ida_init(&nd_region->btt_ida); 1077 ida_init(&nd_region->btt_ida);
1077 ida_init(&nd_region->pfn_ida); 1078 ida_init(&nd_region->pfn_ida);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 07bf2bff3a76..470601980794 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -179,8 +179,8 @@ static int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl)
179 int ret = 0; 179 int ret = 0;
180 180
181 /* 181 /*
182 * Keep a reference until the work is flushed since ->delete_ctrl 182 * Keep a reference until nvme_do_delete_ctrl() complete,
183 * can free the controller. 183 * since ->delete_ctrl can free the controller.
184 */ 184 */
185 nvme_get_ctrl(ctrl); 185 nvme_get_ctrl(ctrl);
186 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING)) 186 if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_DELETING))
@@ -1250,7 +1250,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1250 if (ns) { 1250 if (ns) {
1251 if (ctrl->effects) 1251 if (ctrl->effects)
1252 effects = le32_to_cpu(ctrl->effects->iocs[opcode]); 1252 effects = le32_to_cpu(ctrl->effects->iocs[opcode]);
1253 if (effects & ~NVME_CMD_EFFECTS_CSUPP) 1253 if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
1254 dev_warn(ctrl->device, 1254 dev_warn(ctrl->device,
1255 "IO command:%02x has unhandled effects:%08x\n", 1255 "IO command:%02x has unhandled effects:%08x\n",
1256 opcode, effects); 1256 opcode, effects);
@@ -1495,10 +1495,10 @@ static void nvme_set_chunk_size(struct nvme_ns *ns)
1495 blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size)); 1495 blk_queue_chunk_sectors(ns->queue, rounddown_pow_of_two(chunk_size));
1496} 1496}
1497 1497
1498static void nvme_config_discard(struct nvme_ns *ns) 1498static void nvme_config_discard(struct gendisk *disk, struct nvme_ns *ns)
1499{ 1499{
1500 struct nvme_ctrl *ctrl = ns->ctrl; 1500 struct nvme_ctrl *ctrl = ns->ctrl;
1501 struct request_queue *queue = ns->queue; 1501 struct request_queue *queue = disk->queue;
1502 u32 size = queue_logical_block_size(queue); 1502 u32 size = queue_logical_block_size(queue);
1503 1503
1504 if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) { 1504 if (!(ctrl->oncs & NVME_CTRL_ONCS_DSM)) {
@@ -1526,12 +1526,13 @@ static void nvme_config_discard(struct nvme_ns *ns)
1526 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); 1526 blk_queue_max_write_zeroes_sectors(queue, UINT_MAX);
1527} 1527}
1528 1528
1529static inline void nvme_config_write_zeroes(struct nvme_ns *ns) 1529static void nvme_config_write_zeroes(struct gendisk *disk, struct nvme_ns *ns)
1530{ 1530{
1531 u32 max_sectors; 1531 u32 max_sectors;
1532 unsigned short bs = 1 << ns->lba_shift; 1532 unsigned short bs = 1 << ns->lba_shift;
1533 1533
1534 if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES)) 1534 if (!(ns->ctrl->oncs & NVME_CTRL_ONCS_WRITE_ZEROES) ||
1535 (ns->ctrl->quirks & NVME_QUIRK_DISABLE_WRITE_ZEROES))
1535 return; 1536 return;
1536 /* 1537 /*
1537 * Even though NVMe spec explicitly states that MDTS is not 1538 * Even though NVMe spec explicitly states that MDTS is not
@@ -1548,13 +1549,7 @@ static inline void nvme_config_write_zeroes(struct nvme_ns *ns)
1548 else 1549 else
1549 max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9; 1550 max_sectors = ((u32)(ns->ctrl->max_hw_sectors + 1) * bs) >> 9;
1550 1551
1551 blk_queue_max_write_zeroes_sectors(ns->queue, max_sectors); 1552 blk_queue_max_write_zeroes_sectors(disk->queue, max_sectors);
1552}
1553
1554static inline void nvme_ns_config_oncs(struct nvme_ns *ns)
1555{
1556 nvme_config_discard(ns);
1557 nvme_config_write_zeroes(ns);
1558} 1553}
1559 1554
1560static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid, 1555static void nvme_report_ns_ids(struct nvme_ctrl *ctrl, unsigned int nsid,
@@ -1610,7 +1605,9 @@ static void nvme_update_disk_info(struct gendisk *disk,
1610 capacity = 0; 1605 capacity = 0;
1611 1606
1612 set_capacity(disk, capacity); 1607 set_capacity(disk, capacity);
1613 nvme_ns_config_oncs(ns); 1608
1609 nvme_config_discard(disk, ns);
1610 nvme_config_write_zeroes(disk, ns);
1614 1611
1615 if (id->nsattr & (1 << 0)) 1612 if (id->nsattr & (1 << 0))
1616 set_disk_ro(disk, true); 1613 set_disk_ro(disk, true);
@@ -3304,6 +3301,7 @@ static int nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
3304 mutex_lock(&ctrl->subsys->lock); 3301 mutex_lock(&ctrl->subsys->lock);
3305 list_del_rcu(&ns->siblings); 3302 list_del_rcu(&ns->siblings);
3306 mutex_unlock(&ctrl->subsys->lock); 3303 mutex_unlock(&ctrl->subsys->lock);
3304 nvme_put_ns_head(ns->head);
3307 out_free_id: 3305 out_free_id:
3308 kfree(id); 3306 kfree(id);
3309 out_free_queue: 3307 out_free_queue:
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b29b12498a1a..f3b9d91ba0df 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2107,7 +2107,7 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
2107 2107
2108 freq->sg_cnt = 0; 2108 freq->sg_cnt = 0;
2109 2109
2110 if (!blk_rq_payload_bytes(rq)) 2110 if (!blk_rq_nr_phys_segments(rq))
2111 return 0; 2111 return 0;
2112 2112
2113 freq->sg_table.sgl = freq->first_sgl; 2113 freq->sg_table.sgl = freq->first_sgl;
@@ -2304,12 +2304,23 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2304 if (ret) 2304 if (ret)
2305 return ret; 2305 return ret;
2306 2306
2307 data_len = blk_rq_payload_bytes(rq); 2307 /*
2308 if (data_len) 2308 * nvme core doesn't quite treat the rq opaquely. Commands such
2309 * as WRITE ZEROES will return a non-zero rq payload_bytes yet
2310 * there is no actual payload to be transferred.
2311 * To get it right, key data transmission on there being 1 or
2312 * more physical segments in the sg list. If there is no
2313 * physical segments, there is no payload.
2314 */
2315 if (blk_rq_nr_phys_segments(rq)) {
2316 data_len = blk_rq_payload_bytes(rq);
2309 io_dir = ((rq_data_dir(rq) == WRITE) ? 2317 io_dir = ((rq_data_dir(rq) == WRITE) ?
2310 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ); 2318 NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
2311 else 2319 } else {
2320 data_len = 0;
2312 io_dir = NVMEFC_FCP_NODATA; 2321 io_dir = NVMEFC_FCP_NODATA;
2322 }
2323
2313 2324
2314 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); 2325 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
2315} 2326}
@@ -2464,6 +2475,7 @@ static int
2464nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) 2475nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2465{ 2476{
2466 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; 2477 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
2478 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1;
2467 unsigned int nr_io_queues; 2479 unsigned int nr_io_queues;
2468 int ret; 2480 int ret;
2469 2481
@@ -2476,6 +2488,13 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2476 return ret; 2488 return ret;
2477 } 2489 }
2478 2490
2491 if (!nr_io_queues && prior_ioq_cnt) {
2492 dev_info(ctrl->ctrl.device,
2493 "Fail Reconnect: At least 1 io queue "
2494 "required (was %d)\n", prior_ioq_cnt);
2495 return -ENOSPC;
2496 }
2497
2479 ctrl->ctrl.queue_count = nr_io_queues + 1; 2498 ctrl->ctrl.queue_count = nr_io_queues + 1;
2480 /* check for io queues existing */ 2499 /* check for io queues existing */
2481 if (ctrl->ctrl.queue_count == 1) 2500 if (ctrl->ctrl.queue_count == 1)
@@ -2489,6 +2508,10 @@ nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
2489 if (ret) 2508 if (ret)
2490 goto out_delete_hw_queues; 2509 goto out_delete_hw_queues;
2491 2510
2511 if (prior_ioq_cnt != nr_io_queues)
2512 dev_info(ctrl->ctrl.device,
2513 "reconnect: revising io queue count from %d to %d\n",
2514 prior_ioq_cnt, nr_io_queues);
2492 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); 2515 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
2493 2516
2494 return 0; 2517 return 0;
@@ -3006,7 +3029,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
3006 3029
3007 ctrl->ctrl.opts = opts; 3030 ctrl->ctrl.opts = opts;
3008 ctrl->ctrl.nr_reconnects = 0; 3031 ctrl->ctrl.nr_reconnects = 0;
3009 ctrl->ctrl.numa_node = dev_to_node(lport->dev); 3032 if (lport->dev)
3033 ctrl->ctrl.numa_node = dev_to_node(lport->dev);
3034 else
3035 ctrl->ctrl.numa_node = NUMA_NO_NODE;
3010 INIT_LIST_HEAD(&ctrl->ctrl_list); 3036 INIT_LIST_HEAD(&ctrl->ctrl_list);
3011 ctrl->lport = lport; 3037 ctrl->lport = lport;
3012 ctrl->rport = rport; 3038 ctrl->rport = rport;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 2839bb70badf..f0716f6ce41f 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
404static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc, 404static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
405 struct nvme_ns *ns) 405 struct nvme_ns *ns)
406{ 406{
407 enum nvme_ana_state old;
408
409 mutex_lock(&ns->head->lock); 407 mutex_lock(&ns->head->lock);
410 old = ns->ana_state;
411 ns->ana_grpid = le32_to_cpu(desc->grpid); 408 ns->ana_grpid = le32_to_cpu(desc->grpid);
412 ns->ana_state = desc->state; 409 ns->ana_state = desc->state;
413 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); 410 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
414 411
415 if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old)) 412 if (nvme_state_is_live(ns->ana_state))
416 nvme_mpath_set_live(ns); 413 nvme_mpath_set_live(ns);
417 mutex_unlock(&ns->head->lock); 414 mutex_unlock(&ns->head->lock);
418} 415}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index b91f1838bbd5..527d64545023 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -87,6 +87,11 @@ enum nvme_quirks {
87 * Ignore device provided subnqn. 87 * Ignore device provided subnqn.
88 */ 88 */
89 NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8), 89 NVME_QUIRK_IGNORE_DEV_SUBNQN = (1 << 8),
90
91 /*
92 * Broken Write Zeroes.
93 */
94 NVME_QUIRK_DISABLE_WRITE_ZEROES = (1 << 9),
90}; 95};
91 96
92/* 97/*
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 92bad1c810ac..a90cf5d63aac 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2937,7 +2937,8 @@ static const struct pci_device_id nvme_id_table[] = {
2937 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ 2937 { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */
2938 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, 2938 .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
2939 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ 2939 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
2940 .driver_data = NVME_QUIRK_IDENTIFY_CNS, }, 2940 .driver_data = NVME_QUIRK_IDENTIFY_CNS |
2941 NVME_QUIRK_DISABLE_WRITE_ZEROES, },
2941 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ 2942 { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */
2942 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, 2943 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
2943 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */ 2944 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 208ee518af65..68c49dd67210 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -463,6 +463,15 @@ static int nvme_tcp_handle_c2h_data(struct nvme_tcp_queue *queue,
463 463
464 queue->data_remaining = le32_to_cpu(pdu->data_length); 464 queue->data_remaining = le32_to_cpu(pdu->data_length);
465 465
466 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS &&
467 unlikely(!(pdu->hdr.flags & NVME_TCP_F_DATA_LAST))) {
468 dev_err(queue->ctrl->ctrl.device,
469 "queue %d tag %#x SUCCESS set but not last PDU\n",
470 nvme_tcp_queue_id(queue), rq->tag);
471 nvme_tcp_error_recovery(&queue->ctrl->ctrl);
472 return -EPROTO;
473 }
474
466 return 0; 475 return 0;
467 476
468} 477}
@@ -618,6 +627,14 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
618 return ret; 627 return ret;
619} 628}
620 629
630static inline void nvme_tcp_end_request(struct request *rq, u16 status)
631{
632 union nvme_result res = {};
633
634 nvme_end_request(rq, cpu_to_le16(status << 1), res);
635}
636
637
621static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb, 638static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
622 unsigned int *offset, size_t *len) 639 unsigned int *offset, size_t *len)
623{ 640{
@@ -685,6 +702,8 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
685 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst); 702 nvme_tcp_ddgst_final(queue->rcv_hash, &queue->exp_ddgst);
686 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH; 703 queue->ddgst_remaining = NVME_TCP_DIGEST_LENGTH;
687 } else { 704 } else {
705 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS)
706 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
688 nvme_tcp_init_recv_ctx(queue); 707 nvme_tcp_init_recv_ctx(queue);
689 } 708 }
690 } 709 }
@@ -695,6 +714,7 @@ static int nvme_tcp_recv_data(struct nvme_tcp_queue *queue, struct sk_buff *skb,
695static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue, 714static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
696 struct sk_buff *skb, unsigned int *offset, size_t *len) 715 struct sk_buff *skb, unsigned int *offset, size_t *len)
697{ 716{
717 struct nvme_tcp_data_pdu *pdu = (void *)queue->pdu;
698 char *ddgst = (char *)&queue->recv_ddgst; 718 char *ddgst = (char *)&queue->recv_ddgst;
699 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining); 719 size_t recv_len = min_t(size_t, *len, queue->ddgst_remaining);
700 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining; 720 off_t off = NVME_TCP_DIGEST_LENGTH - queue->ddgst_remaining;
@@ -718,6 +738,13 @@ static int nvme_tcp_recv_ddgst(struct nvme_tcp_queue *queue,
718 return -EIO; 738 return -EIO;
719 } 739 }
720 740
741 if (pdu->hdr.flags & NVME_TCP_F_DATA_SUCCESS) {
742 struct request *rq = blk_mq_tag_to_rq(nvme_tcp_tagset(queue),
743 pdu->command_id);
744
745 nvme_tcp_end_request(rq, NVME_SC_SUCCESS);
746 }
747
721 nvme_tcp_init_recv_ctx(queue); 748 nvme_tcp_init_recv_ctx(queue);
722 return 0; 749 return 0;
723} 750}
@@ -815,10 +842,7 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
815 842
816static void nvme_tcp_fail_request(struct nvme_tcp_request *req) 843static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
817{ 844{
818 union nvme_result res = {}; 845 nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_DATA_XFER_ERROR);
819
820 nvme_end_request(blk_mq_rq_from_pdu(req),
821 cpu_to_le16(NVME_SC_DATA_XFER_ERROR), res);
822} 846}
823 847
824static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) 848static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 58456de78bb2..5f24ea7a28eb 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -50,7 +50,19 @@ static const char *nvme_trace_admin_identify(struct trace_seq *p, u8 *cdw10)
50 return ret; 50 return ret;
51} 51}
52 52
53static const char *nvme_trace_admin_get_features(struct trace_seq *p,
54 u8 *cdw10)
55{
56 const char *ret = trace_seq_buffer_ptr(p);
57 u8 fid = cdw10[0];
58 u8 sel = cdw10[1] & 0x7;
59 u32 cdw11 = get_unaligned_le32(cdw10 + 4);
60
61 trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
62 trace_seq_putc(p, 0);
53 63
64 return ret;
65}
54 66
55static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10) 67static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10)
56{ 68{
@@ -101,6 +113,8 @@ const char *nvme_trace_parse_admin_cmd(struct trace_seq *p,
101 return nvme_trace_create_cq(p, cdw10); 113 return nvme_trace_create_cq(p, cdw10);
102 case nvme_admin_identify: 114 case nvme_admin_identify:
103 return nvme_trace_admin_identify(p, cdw10); 115 return nvme_trace_admin_identify(p, cdw10);
116 case nvme_admin_get_features:
117 return nvme_trace_admin_get_features(p, cdw10);
104 default: 118 default:
105 return nvme_trace_common(p, cdw10); 119 return nvme_trace_common(p, cdw10);
106 } 120 }
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 244d7c177e5a..97d3c77365b8 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -108,7 +108,7 @@ TRACE_EVENT(nvme_setup_cmd,
108 __entry->metadata = le64_to_cpu(cmd->common.metadata); 108 __entry->metadata = le64_to_cpu(cmd->common.metadata);
109 __assign_disk_name(__entry->disk, req->rq_disk); 109 __assign_disk_name(__entry->disk, req->rq_disk);
110 memcpy(__entry->cdw10, &cmd->common.cdw10, 110 memcpy(__entry->cdw10, &cmd->common.cdw10,
111 6 * sizeof(__entry->cdw10)); 111 sizeof(__entry->cdw10));
112 ), 112 ),
113 TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)", 113 TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
114 __entry->ctrl_id, __print_disk_name(__entry->disk), 114 __entry->ctrl_id, __print_disk_name(__entry->disk),
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index d44ede147263..b3e765a95af8 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
509 509
510 ret = nvmet_p2pmem_ns_enable(ns); 510 ret = nvmet_p2pmem_ns_enable(ns);
511 if (ret) 511 if (ret)
512 goto out_unlock; 512 goto out_dev_disable;
513 513
514 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 514 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
515 nvmet_p2pmem_ns_add_p2p(ctrl, ns); 515 nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ out_unlock:
550out_dev_put: 550out_dev_put:
551 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) 551 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
552 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid)); 552 pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
553 553out_dev_disable:
554 nvmet_ns_dev_disable(ns); 554 nvmet_ns_dev_disable(ns);
555 goto out_unlock; 555 goto out_unlock;
556} 556}
@@ -1163,6 +1163,15 @@ static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1163 put_device(ctrl->p2p_client); 1163 put_device(ctrl->p2p_client);
1164} 1164}
1165 1165
1166static void nvmet_fatal_error_handler(struct work_struct *work)
1167{
1168 struct nvmet_ctrl *ctrl =
1169 container_of(work, struct nvmet_ctrl, fatal_err_work);
1170
1171 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1172 ctrl->ops->delete_ctrl(ctrl);
1173}
1174
1166u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, 1175u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1167 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp) 1176 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1168{ 1177{
@@ -1205,6 +1214,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1205 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); 1214 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1206 INIT_LIST_HEAD(&ctrl->async_events); 1215 INIT_LIST_HEAD(&ctrl->async_events);
1207 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL); 1216 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1217 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1208 1218
1209 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); 1219 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1210 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); 1220 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
@@ -1308,21 +1318,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1308 kref_put(&ctrl->ref, nvmet_ctrl_free); 1318 kref_put(&ctrl->ref, nvmet_ctrl_free);
1309} 1319}
1310 1320
1311static void nvmet_fatal_error_handler(struct work_struct *work)
1312{
1313 struct nvmet_ctrl *ctrl =
1314 container_of(work, struct nvmet_ctrl, fatal_err_work);
1315
1316 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1317 ctrl->ops->delete_ctrl(ctrl);
1318}
1319
1320void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) 1321void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1321{ 1322{
1322 mutex_lock(&ctrl->lock); 1323 mutex_lock(&ctrl->lock);
1323 if (!(ctrl->csts & NVME_CSTS_CFS)) { 1324 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1324 ctrl->csts |= NVME_CSTS_CFS; 1325 ctrl->csts |= NVME_CSTS_CFS;
1325 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1326 schedule_work(&ctrl->fatal_err_work); 1326 schedule_work(&ctrl->fatal_err_work);
1327 } 1327 }
1328 mutex_unlock(&ctrl->lock); 1328 mutex_unlock(&ctrl->lock);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 1e9654f04c60..98b7b1f4ee96 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1143,10 +1143,8 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1143 &tgtport->assoc_list, a_list) { 1143 &tgtport->assoc_list, a_list) {
1144 if (!nvmet_fc_tgt_a_get(assoc)) 1144 if (!nvmet_fc_tgt_a_get(assoc))
1145 continue; 1145 continue;
1146 spin_unlock_irqrestore(&tgtport->lock, flags); 1146 if (!schedule_work(&assoc->del_work))
1147 nvmet_fc_delete_target_assoc(assoc); 1147 nvmet_fc_tgt_a_put(assoc);
1148 nvmet_fc_tgt_a_put(assoc);
1149 spin_lock_irqsave(&tgtport->lock, flags);
1150 } 1148 }
1151 spin_unlock_irqrestore(&tgtport->lock, flags); 1149 spin_unlock_irqrestore(&tgtport->lock, flags);
1152} 1150}
@@ -1185,7 +1183,8 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1185 nvmet_fc_tgtport_put(tgtport); 1183 nvmet_fc_tgtport_put(tgtport);
1186 1184
1187 if (found_ctrl) { 1185 if (found_ctrl) {
1188 schedule_work(&assoc->del_work); 1186 if (!schedule_work(&assoc->del_work))
1187 nvmet_fc_tgt_a_put(assoc);
1189 return; 1188 return;
1190 } 1189 }
1191 1190
@@ -1503,10 +1502,8 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1503 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; 1502 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1504 struct fcnvme_ls_disconnect_acc *acc = 1503 struct fcnvme_ls_disconnect_acc *acc =
1505 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; 1504 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1506 struct nvmet_fc_tgt_queue *queue = NULL;
1507 struct nvmet_fc_tgt_assoc *assoc; 1505 struct nvmet_fc_tgt_assoc *assoc;
1508 int ret = 0; 1506 int ret = 0;
1509 bool del_assoc = false;
1510 1507
1511 memset(acc, 0, sizeof(*acc)); 1508 memset(acc, 0, sizeof(*acc));
1512 1509
@@ -1537,18 +1534,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1537 assoc = nvmet_fc_find_target_assoc(tgtport, 1534 assoc = nvmet_fc_find_target_assoc(tgtport,
1538 be64_to_cpu(rqst->associd.association_id)); 1535 be64_to_cpu(rqst->associd.association_id));
1539 iod->assoc = assoc; 1536 iod->assoc = assoc;
1540 if (assoc) { 1537 if (!assoc)
1541 if (rqst->discon_cmd.scope ==
1542 FCNVME_DISCONN_CONNECTION) {
1543 queue = nvmet_fc_find_target_queue(tgtport,
1544 be64_to_cpu(
1545 rqst->discon_cmd.id));
1546 if (!queue) {
1547 nvmet_fc_tgt_a_put(assoc);
1548 ret = VERR_NO_CONN;
1549 }
1550 }
1551 } else
1552 ret = VERR_NO_ASSOC; 1538 ret = VERR_NO_ASSOC;
1553 } 1539 }
1554 1540
@@ -1576,26 +1562,10 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1576 sizeof(struct fcnvme_ls_disconnect_acc)), 1562 sizeof(struct fcnvme_ls_disconnect_acc)),
1577 FCNVME_LS_DISCONNECT); 1563 FCNVME_LS_DISCONNECT);
1578 1564
1579
1580 /* are we to delete a Connection ID (queue) */
1581 if (queue) {
1582 int qid = queue->qid;
1583
1584 nvmet_fc_delete_target_queue(queue);
1585
1586 /* release the get taken by find_target_queue */
1587 nvmet_fc_tgt_q_put(queue);
1588
1589 /* tear association down if io queue terminated */
1590 if (!qid)
1591 del_assoc = true;
1592 }
1593
1594 /* release get taken in nvmet_fc_find_target_assoc */ 1565 /* release get taken in nvmet_fc_find_target_assoc */
1595 nvmet_fc_tgt_a_put(iod->assoc); 1566 nvmet_fc_tgt_a_put(iod->assoc);
1596 1567
1597 if (del_assoc) 1568 nvmet_fc_delete_target_assoc(iod->assoc);
1598 nvmet_fc_delete_target_assoc(iod->assoc);
1599} 1569}
1600 1570
1601 1571
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 71dfedbadc26..a065dbfc43b1 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -194,11 +194,11 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
194 le64_to_cpu(range->slba) << (ns->blksize_shift - 9), 194 le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
195 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9), 195 le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
196 GFP_KERNEL, 0, bio); 196 GFP_KERNEL, 0, bio);
197 197 if (ret && ret != -EOPNOTSUPP) {
198 if (ret)
199 req->error_slba = le64_to_cpu(range->slba); 198 req->error_slba = le64_to_cpu(range->slba);
200 199 return blk_to_nvme_status(req, errno_to_blk_status(ret));
201 return blk_to_nvme_status(req, errno_to_blk_status(ret)); 200 }
201 return NVME_SC_SUCCESS;
202} 202}
203 203
204static void nvmet_bdev_execute_discard(struct nvmet_req *req) 204static void nvmet_bdev_execute_discard(struct nvmet_req *req)
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 517522305e5c..bc6ebb51b0bf 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -75,11 +75,11 @@ err:
75 return ret; 75 return ret;
76} 76}
77 77
78static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter) 78static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
79{ 79{
80 bv->bv_page = sg_page_iter_page(iter); 80 bv->bv_page = sg_page(sg);
81 bv->bv_offset = iter->sg->offset; 81 bv->bv_offset = sg->offset;
82 bv->bv_len = PAGE_SIZE - iter->sg->offset; 82 bv->bv_len = sg->length;
83} 83}
84 84
85static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos, 85static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
128 128
129static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags) 129static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
130{ 130{
131 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); 131 ssize_t nr_bvec = req->sg_cnt;
132 struct sg_page_iter sg_pg_iter;
133 unsigned long bv_cnt = 0; 132 unsigned long bv_cnt = 0;
134 bool is_sync = false; 133 bool is_sync = false;
135 size_t len = 0, total_len = 0; 134 size_t len = 0, total_len = 0;
136 ssize_t ret = 0; 135 ssize_t ret = 0;
137 loff_t pos; 136 loff_t pos;
138 137 int i;
138 struct scatterlist *sg;
139 139
140 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC) 140 if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
141 is_sync = true; 141 is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
147 } 147 }
148 148
149 memset(&req->f.iocb, 0, sizeof(struct kiocb)); 149 memset(&req->f.iocb, 0, sizeof(struct kiocb));
150 for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) { 150 for_each_sg(req->sg, sg, req->sg_cnt, i) {
151 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter); 151 nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
152 len += req->f.bvec[bv_cnt].bv_len; 152 len += req->f.bvec[bv_cnt].bv_len;
153 total_len += req->f.bvec[bv_cnt].bv_len; 153 total_len += req->f.bvec[bv_cnt].bv_len;
154 bv_cnt++; 154 bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
225 225
226static void nvmet_file_execute_rw(struct nvmet_req *req) 226static void nvmet_file_execute_rw(struct nvmet_req *req)
227{ 227{
228 ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE); 228 ssize_t nr_bvec = req->sg_cnt;
229 229
230 if (!req->sg_cnt || !nr_bvec) { 230 if (!req->sg_cnt || !nr_bvec) {
231 nvmet_req_complete(req, 0); 231 nvmet_req_complete(req, 0);
@@ -297,7 +297,7 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
297 } 297 }
298 298
299 ret = vfs_fallocate(req->ns->file, mode, offset, len); 299 ret = vfs_fallocate(req->ns->file, mode, offset, len);
300 if (ret) { 300 if (ret && ret != -EOPNOTSUPP) {
301 req->error_slba = le64_to_cpu(range.slba); 301 req->error_slba = le64_to_cpu(range.slba);
302 status = errno_to_nvme_status(req, ret); 302 status = errno_to_nvme_status(req, ret);
303 break; 303 break;
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 1be571c20062..6bad04cbb1d3 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -157,8 +157,12 @@
157#define DBG_IRT(x...) 157#define DBG_IRT(x...)
158#endif 158#endif
159 159
160#ifdef CONFIG_64BIT
161#define COMPARE_IRTE_ADDR(irte, hpa) ((irte)->dest_iosapic_addr == (hpa))
162#else
160#define COMPARE_IRTE_ADDR(irte, hpa) \ 163#define COMPARE_IRTE_ADDR(irte, hpa) \
161 ((irte)->dest_iosapic_addr == F_EXTEND(hpa)) 164 ((irte)->dest_iosapic_addr == ((hpa) | 0xffffffff00000000ULL))
165#endif
162 166
163#define IOSAPIC_REG_SELECT 0x00 167#define IOSAPIC_REG_SELECT 0x00
164#define IOSAPIC_REG_WINDOW 0x10 168#define IOSAPIC_REG_WINDOW 0x10
diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c
index 56dd83a45e55..5484a46dafda 100644
--- a/drivers/parport/daisy.c
+++ b/drivers/parport/daisy.c
@@ -213,12 +213,10 @@ void parport_daisy_fini(struct parport *port)
213struct pardevice *parport_open(int devnum, const char *name) 213struct pardevice *parport_open(int devnum, const char *name)
214{ 214{
215 struct daisydev *p = topology; 215 struct daisydev *p = topology;
216 struct pardev_cb par_cb;
217 struct parport *port; 216 struct parport *port;
218 struct pardevice *dev; 217 struct pardevice *dev;
219 int daisy; 218 int daisy;
220 219
221 memset(&par_cb, 0, sizeof(par_cb));
222 spin_lock(&topology_lock); 220 spin_lock(&topology_lock);
223 while (p && p->devnum != devnum) 221 while (p && p->devnum != devnum)
224 p = p->next; 222 p = p->next;
@@ -232,7 +230,7 @@ struct pardevice *parport_open(int devnum, const char *name)
232 port = parport_get_port(p->port); 230 port = parport_get_port(p->port);
233 spin_unlock(&topology_lock); 231 spin_unlock(&topology_lock);
234 232
235 dev = parport_register_dev_model(port, name, &par_cb, devnum); 233 dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
236 parport_put_port(port); 234 parport_put_port(port);
237 if (!dev) 235 if (!dev)
238 return NULL; 236 return NULL;
@@ -482,31 +480,3 @@ static int assign_addrs(struct parport *port)
482 kfree(deviceid); 480 kfree(deviceid);
483 return detected; 481 return detected;
484} 482}
485
486static int daisy_drv_probe(struct pardevice *par_dev)
487{
488 struct device_driver *drv = par_dev->dev.driver;
489
490 if (strcmp(drv->name, "daisy_drv"))
491 return -ENODEV;
492 if (strcmp(par_dev->name, daisy_dev_name))
493 return -ENODEV;
494
495 return 0;
496}
497
498static struct parport_driver daisy_driver = {
499 .name = "daisy_drv",
500 .probe = daisy_drv_probe,
501 .devmodel = true,
502};
503
504int daisy_drv_init(void)
505{
506 return parport_register_driver(&daisy_driver);
507}
508
509void daisy_drv_exit(void)
510{
511 parport_unregister_driver(&daisy_driver);
512}
diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c
index e5e6a463a941..e035174ba205 100644
--- a/drivers/parport/probe.c
+++ b/drivers/parport/probe.c
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
257ssize_t parport_device_id (int devnum, char *buffer, size_t count) 257ssize_t parport_device_id (int devnum, char *buffer, size_t count)
258{ 258{
259 ssize_t retval = -ENXIO; 259 ssize_t retval = -ENXIO;
260 struct pardevice *dev = parport_open(devnum, daisy_dev_name); 260 struct pardevice *dev = parport_open (devnum, "Device ID probe");
261 if (!dev) 261 if (!dev)
262 return -ENXIO; 262 return -ENXIO;
263 263
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 0171b8dbcdcd..5dc53d420ca8 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -137,19 +137,11 @@ static struct bus_type parport_bus_type = {
137 137
138int parport_bus_init(void) 138int parport_bus_init(void)
139{ 139{
140 int retval; 140 return bus_register(&parport_bus_type);
141
142 retval = bus_register(&parport_bus_type);
143 if (retval)
144 return retval;
145 daisy_drv_init();
146
147 return 0;
148} 141}
149 142
150void parport_bus_exit(void) 143void parport_bus_exit(void)
151{ 144{
152 daisy_drv_exit();
153 bus_unregister(&parport_bus_type); 145 bus_unregister(&parport_bus_type);
154} 146}
155 147
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 224d88634115..d994839a3e24 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
273u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed, 273u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
274 enum pcie_link_width *width); 274 enum pcie_link_width *width);
275void __pcie_print_link_status(struct pci_dev *dev, bool verbose); 275void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
276void pcie_report_downtraining(struct pci_dev *dev);
276 277
277/* Single Root I/O Virtualization */ 278/* Single Root I/O Virtualization */
278struct pci_sriov { 279struct pci_sriov {
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
index d2eae3b7cc0f..4fa9e3523ee1 100644
--- a/drivers/pci/pcie/bw_notification.c
+++ b/drivers/pci/pcie/bw_notification.c
@@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
30{ 30{
31 u16 lnk_ctl; 31 u16 lnk_ctl;
32 32
33 pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
34
33 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl); 35 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
34 lnk_ctl |= PCI_EXP_LNKCTL_LBMIE; 36 lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
35 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); 37 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
@@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
44 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl); 46 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
45} 47}
46 48
47static irqreturn_t pcie_bw_notification_handler(int irq, void *context) 49static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
48{ 50{
49 struct pcie_device *srv = context; 51 struct pcie_device *srv = context;
50 struct pci_dev *port = srv->port; 52 struct pci_dev *port = srv->port;
51 struct pci_dev *dev;
52 u16 link_status, events; 53 u16 link_status, events;
53 int ret; 54 int ret;
54 55
@@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
58 if (ret != PCIBIOS_SUCCESSFUL || !events) 59 if (ret != PCIBIOS_SUCCESSFUL || !events)
59 return IRQ_NONE; 60 return IRQ_NONE;
60 61
62 pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
63 pcie_update_link_speed(port->subordinate, link_status);
64 return IRQ_WAKE_THREAD;
65}
66
67static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
68{
69 struct pcie_device *srv = context;
70 struct pci_dev *port = srv->port;
71 struct pci_dev *dev;
72
61 /* 73 /*
62 * Print status from downstream devices, not this root port or 74 * Print status from downstream devices, not this root port or
63 * downstream switch port. 75 * downstream switch port.
64 */ 76 */
65 down_read(&pci_bus_sem); 77 down_read(&pci_bus_sem);
66 list_for_each_entry(dev, &port->subordinate->devices, bus_list) 78 list_for_each_entry(dev, &port->subordinate->devices, bus_list)
67 __pcie_print_link_status(dev, false); 79 pcie_report_downtraining(dev);
68 up_read(&pci_bus_sem); 80 up_read(&pci_bus_sem);
69 81
70 pcie_update_link_speed(port->subordinate, link_status);
71 pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
72 return IRQ_HANDLED; 82 return IRQ_HANDLED;
73} 83}
74 84
@@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
80 if (!pcie_link_bandwidth_notification_supported(srv->port)) 90 if (!pcie_link_bandwidth_notification_supported(srv->port))
81 return -ENODEV; 91 return -ENODEV;
82 92
83 ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler, 93 ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
94 pcie_bw_notification_handler,
84 IRQF_SHARED, "PCIe BW notif", srv); 95 IRQF_SHARED, "PCIe BW notif", srv);
85 if (ret) 96 if (ret)
86 return ret; 97 return ret;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2ec0df04e0dc..7e12d0163863 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2388 return dev; 2388 return dev;
2389} 2389}
2390 2390
2391static void pcie_report_downtraining(struct pci_dev *dev) 2391void pcie_report_downtraining(struct pci_dev *dev)
2392{ 2392{
2393 if (!pci_is_pcie(dev)) 2393 if (!pci_is_pcie(dev))
2394 return; 2394 return;
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 5163097b43df..4bbd9ede38c8 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
485 struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy); 485 struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
486 int new_mode; 486 int new_mode;
487 487
488 if (phy->index != 0) 488 if (phy->index != 0) {
489 if (mode == PHY_MODE_USB_HOST)
490 return 0;
489 return -EINVAL; 491 return -EINVAL;
492 }
490 493
491 switch (mode) { 494 switch (mode) {
492 case PHY_MODE_USB_HOST: 495 case PHY_MODE_USB_HOST:
diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c
index 900c7073c46f..71308766e891 100644
--- a/drivers/platform/chrome/cros_ec_debugfs.c
+++ b/drivers/platform/chrome/cros_ec_debugfs.c
@@ -440,7 +440,7 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
440 440
441 ret = cros_ec_create_pdinfo(debug_info); 441 ret = cros_ec_create_pdinfo(debug_info);
442 if (ret) 442 if (ret)
443 goto remove_debugfs; 443 goto remove_log;
444 444
445 ec->debug_info = debug_info; 445 ec->debug_info = debug_info;
446 446
@@ -448,6 +448,8 @@ static int cros_ec_debugfs_probe(struct platform_device *pd)
448 448
449 return 0; 449 return 0;
450 450
451remove_log:
452 cros_ec_cleanup_console_log(debug_info);
451remove_debugfs: 453remove_debugfs:
452 debugfs_remove_recursive(debug_info->dir); 454 debugfs_remove_recursive(debug_info->dir);
453 return ret; 455 return ret;
@@ -467,7 +469,8 @@ static int __maybe_unused cros_ec_debugfs_suspend(struct device *dev)
467{ 469{
468 struct cros_ec_dev *ec = dev_get_drvdata(dev); 470 struct cros_ec_dev *ec = dev_get_drvdata(dev);
469 471
470 cancel_delayed_work_sync(&ec->debug_info->log_poll_work); 472 if (ec->debug_info->log_buffer.buf)
473 cancel_delayed_work_sync(&ec->debug_info->log_poll_work);
471 474
472 return 0; 475 return 0;
473} 476}
@@ -476,7 +479,8 @@ static int __maybe_unused cros_ec_debugfs_resume(struct device *dev)
476{ 479{
477 struct cros_ec_dev *ec = dev_get_drvdata(dev); 480 struct cros_ec_dev *ec = dev_get_drvdata(dev);
478 481
479 schedule_delayed_work(&ec->debug_info->log_poll_work, 0); 482 if (ec->debug_info->log_buffer.buf)
483 schedule_delayed_work(&ec->debug_info->log_poll_work, 0);
480 484
481 return 0; 485 return 0;
482} 486}
diff --git a/drivers/platform/chrome/wilco_ec/mailbox.c b/drivers/platform/chrome/wilco_ec/mailbox.c
index f6ff29a11f1a..14355668ddfa 100644
--- a/drivers/platform/chrome/wilco_ec/mailbox.c
+++ b/drivers/platform/chrome/wilco_ec/mailbox.c
@@ -223,11 +223,11 @@ int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg)
223 msg->command, msg->type, msg->flags, msg->response_size, 223 msg->command, msg->type, msg->flags, msg->response_size,
224 msg->request_size); 224 msg->request_size);
225 225
226 mutex_lock(&ec->mailbox_lock);
226 /* Prepare request packet */ 227 /* Prepare request packet */
227 rq = ec->data_buffer; 228 rq = ec->data_buffer;
228 wilco_ec_prepare(msg, rq); 229 wilco_ec_prepare(msg, rq);
229 230
230 mutex_lock(&ec->mailbox_lock);
231 ret = wilco_ec_transfer(ec, msg, rq); 231 ret = wilco_ec_transfer(ec, msg, rq);
232 mutex_unlock(&ec->mailbox_lock); 232 mutex_unlock(&ec->mailbox_lock);
233 233
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index a71734c41693..f933c06bff4f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -667,9 +667,9 @@ config RTC_DRV_S5M
667 will be called rtc-s5m. 667 will be called rtc-s5m.
668 668
669config RTC_DRV_SD3078 669config RTC_DRV_SD3078
670 tristate "ZXW Crystal SD3078" 670 tristate "ZXW Shenzhen whwave SD3078"
671 help 671 help
672 If you say yes here you get support for the ZXW Crystal 672 If you say yes here you get support for the ZXW Shenzhen whwave
673 SD3078 RTC chips. 673 SD3078 RTC chips.
674 674
675 This driver can also be built as a module. If so, the module 675 This driver can also be built as a module. If so, the module
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index e5444296075e..4d6bf9304ceb 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -298,7 +298,7 @@ static int cros_ec_rtc_suspend(struct device *dev)
298 struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); 298 struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
299 299
300 if (device_may_wakeup(dev)) 300 if (device_may_wakeup(dev))
301 enable_irq_wake(cros_ec_rtc->cros_ec->irq); 301 return enable_irq_wake(cros_ec_rtc->cros_ec->irq);
302 302
303 return 0; 303 return 0;
304} 304}
@@ -309,7 +309,7 @@ static int cros_ec_rtc_resume(struct device *dev)
309 struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev); 309 struct cros_ec_rtc *cros_ec_rtc = dev_get_drvdata(&pdev->dev);
310 310
311 if (device_may_wakeup(dev)) 311 if (device_may_wakeup(dev))
312 disable_irq_wake(cros_ec_rtc->cros_ec->irq); 312 return disable_irq_wake(cros_ec_rtc->cros_ec->irq);
313 313
314 return 0; 314 return 0;
315} 315}
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index b4e054c64bad..69b54e5556c0 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -480,6 +480,13 @@ static int da9063_rtc_probe(struct platform_device *pdev)
480 da9063_data_to_tm(data, &rtc->alarm_time, rtc); 480 da9063_data_to_tm(data, &rtc->alarm_time, rtc);
481 rtc->rtc_sync = false; 481 rtc->rtc_sync = false;
482 482
483 /*
484 * TODO: some models have alarms on a minute boundary but still support
485 * real hardware interrupts. Add this once the core supports it.
486 */
487 if (config->rtc_data_start != RTC_SEC)
488 rtc->rtc_dev->uie_unsupported = 1;
489
483 irq_alarm = platform_get_irq_byname(pdev, "ALARM"); 490 irq_alarm = platform_get_irq_byname(pdev, "ALARM");
484 ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL, 491 ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
485 da9063_alarm_event, 492 da9063_alarm_event,
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index d417b203cbc5..1d3de2a3d1a4 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -374,7 +374,7 @@ static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
374static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off) 374static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
375{ 375{
376 unsigned int byte; 376 unsigned int byte;
377 int value = 0xff; /* return 0xff for ignored values */ 377 int value = -1; /* return -1 for ignored values */
378 378
379 byte = readb(rtc->regbase + reg_off); 379 byte = readb(rtc->regbase + reg_off);
380 if (byte & AR_ENB) { 380 if (byte & AR_ENB) {
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index a0baee25134c..a835b31aad99 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -24,6 +24,7 @@
24#include <asm/crw.h> 24#include <asm/crw.h>
25#include <asm/isc.h> 25#include <asm/isc.h>
26#include <asm/ebcdic.h> 26#include <asm/ebcdic.h>
27#include <asm/ap.h>
27 28
28#include "css.h" 29#include "css.h"
29#include "cio.h" 30#include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
586 " failed (rc=%d).\n", ret); 587 " failed (rc=%d).\n", ret);
587} 588}
588 589
590static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
591{
592 CIO_CRW_EVENT(3, "chsc: ap config changed\n");
593 if (sei_area->rs != 5)
594 return;
595
596 ap_bus_cfg_chg();
597}
598
589static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) 599static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
590{ 600{
591 switch (sei_area->cc) { 601 switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
612 case 2: /* i/o resource accessibility */ 622 case 2: /* i/o resource accessibility */
613 chsc_process_sei_res_acc(sei_area); 623 chsc_process_sei_res_acc(sei_area);
614 break; 624 break;
625 case 3: /* ap config changed */
626 chsc_process_sei_ap_cfg_chg(sei_area);
627 break;
615 case 7: /* channel-path-availability information */ 628 case 7: /* channel-path-availability information */
616 chsc_process_sei_chp_avail(sei_area); 629 chsc_process_sei_chp_avail(sei_area);
617 break; 630 break;
@@ -1382,3 +1395,40 @@ int chsc_pnso_brinfo(struct subchannel_id schid,
1382 return chsc_error_from_response(brinfo_area->response.code); 1395 return chsc_error_from_response(brinfo_area->response.code);
1383} 1396}
1384EXPORT_SYMBOL_GPL(chsc_pnso_brinfo); 1397EXPORT_SYMBOL_GPL(chsc_pnso_brinfo);
1398
1399int chsc_sgib(u32 origin)
1400{
1401 struct {
1402 struct chsc_header request;
1403 u16 op;
1404 u8 reserved01[2];
1405 u8 reserved02:4;
1406 u8 fmt:4;
1407 u8 reserved03[7];
1408 /* operation data area begin */
1409 u8 reserved04[4];
1410 u32 gib_origin;
1411 u8 reserved05[10];
1412 u8 aix;
1413 u8 reserved06[4029];
1414 struct chsc_header response;
1415 u8 reserved07[4];
1416 } *sgib_area;
1417 int ret;
1418
1419 spin_lock_irq(&chsc_page_lock);
1420 memset(chsc_page, 0, PAGE_SIZE);
1421 sgib_area = chsc_page;
1422 sgib_area->request.length = 0x0fe0;
1423 sgib_area->request.code = 0x0021;
1424 sgib_area->op = 0x1;
1425 sgib_area->gib_origin = origin;
1426
1427 ret = chsc(sgib_area);
1428 if (ret == 0)
1429 ret = chsc_error_from_response(sgib_area->response.code);
1430 spin_unlock_irq(&chsc_page_lock);
1431
1432 return ret;
1433}
1434EXPORT_SYMBOL_GPL(chsc_sgib);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 78aba8d94eec..e57d68e325a3 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -164,6 +164,7 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp);
164int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd); 164int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
165int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc, 165int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
166 u64 summary_indicator_addr, u64 subchannel_indicator_addr); 166 u64 summary_indicator_addr, u64 subchannel_indicator_addr);
167int chsc_sgib(u32 origin);
167int chsc_error_from_response(int response); 168int chsc_error_from_response(int response);
168 169
169int chsc_siosl(struct subchannel_id schid); 170int chsc_siosl(struct subchannel_id schid);
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index a10cec0e86eb..0b3b9de45c60 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
72{ 72{
73 struct vfio_ccw_private *private; 73 struct vfio_ccw_private *private;
74 struct irb *irb; 74 struct irb *irb;
75 bool is_final;
75 76
76 private = container_of(work, struct vfio_ccw_private, io_work); 77 private = container_of(work, struct vfio_ccw_private, io_work);
77 irb = &private->irb; 78 irb = &private->irb;
78 79
80 is_final = !(scsw_actl(&irb->scsw) &
81 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
79 if (scsw_is_solicited(&irb->scsw)) { 82 if (scsw_is_solicited(&irb->scsw)) {
80 cp_update_scsw(&private->cp, &irb->scsw); 83 cp_update_scsw(&private->cp, &irb->scsw);
81 cp_free(&private->cp); 84 if (is_final)
85 cp_free(&private->cp);
82 } 86 }
83 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 87 memcpy(private->io_region->irb_area, irb, sizeof(*irb));
84 88
85 if (private->io_trigger) 89 if (private->io_trigger)
86 eventfd_signal(private->io_trigger, 1); 90 eventfd_signal(private->io_trigger, 1);
87 91
88 if (private->mdev) 92 if (private->mdev && is_final)
89 private->state = VFIO_CCW_STATE_IDLE; 93 private->state = VFIO_CCW_STATE_IDLE;
90} 94}
91 95
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index e15816ff1265..1546389d71db 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
810 struct ap_device *ap_dev = to_ap_dev(dev); 810 struct ap_device *ap_dev = to_ap_dev(dev);
811 struct ap_driver *ap_drv = ap_dev->drv; 811 struct ap_driver *ap_drv = ap_dev->drv;
812 812
813 /* prepare ap queue device removal */
813 if (is_queue_dev(dev)) 814 if (is_queue_dev(dev))
814 ap_queue_remove(to_ap_queue(dev)); 815 ap_queue_prepare_remove(to_ap_queue(dev));
816
817 /* driver's chance to clean up gracefully */
815 if (ap_drv->remove) 818 if (ap_drv->remove)
816 ap_drv->remove(ap_dev); 819 ap_drv->remove(ap_dev);
817 820
821 /* now do the ap queue device remove */
822 if (is_queue_dev(dev))
823 ap_queue_remove(to_ap_queue(dev));
824
818 /* Remove queue/card from list of active queues/cards */ 825 /* Remove queue/card from list of active queues/cards */
819 spin_lock_bh(&ap_list_lock); 826 spin_lock_bh(&ap_list_lock);
820 if (is_card_dev(dev)) 827 if (is_card_dev(dev))
@@ -861,6 +868,16 @@ void ap_bus_force_rescan(void)
861EXPORT_SYMBOL(ap_bus_force_rescan); 868EXPORT_SYMBOL(ap_bus_force_rescan);
862 869
863/* 870/*
871* A config change has happened, force an ap bus rescan.
872*/
873void ap_bus_cfg_chg(void)
874{
875 AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
876
877 ap_bus_force_rescan();
878}
879
880/*
864 * hex2bitmap() - parse hex mask string and set bitmap. 881 * hex2bitmap() - parse hex mask string and set bitmap.
865 * Valid strings are "0x012345678" with at least one valid hex number. 882 * Valid strings are "0x012345678" with at least one valid hex number.
866 * Rest of the bitmap to the right is padded with 0. No spaces allowed 883 * Rest of the bitmap to the right is padded with 0. No spaces allowed
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index d0059eae5d94..15a98a673c5c 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -91,6 +91,7 @@ enum ap_state {
91 AP_STATE_WORKING, 91 AP_STATE_WORKING,
92 AP_STATE_QUEUE_FULL, 92 AP_STATE_QUEUE_FULL,
93 AP_STATE_SUSPEND_WAIT, 93 AP_STATE_SUSPEND_WAIT,
94 AP_STATE_REMOVE, /* about to be removed from driver */
94 AP_STATE_UNBOUND, /* momentary not bound to a driver */ 95 AP_STATE_UNBOUND, /* momentary not bound to a driver */
95 AP_STATE_BORKED, /* broken */ 96 AP_STATE_BORKED, /* broken */
96 NR_AP_STATES 97 NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
252 253
253void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg); 254void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
254struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type); 255struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
256void ap_queue_prepare_remove(struct ap_queue *aq);
255void ap_queue_remove(struct ap_queue *aq); 257void ap_queue_remove(struct ap_queue *aq);
256void ap_queue_suspend(struct ap_device *ap_dev); 258void ap_queue_suspend(struct ap_device *ap_dev);
257void ap_queue_resume(struct ap_device *ap_dev); 259void ap_queue_resume(struct ap_device *ap_dev);
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index ba261210c6da..6a340f2c3556 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
420 [AP_EVENT_POLL] = ap_sm_suspend_read, 420 [AP_EVENT_POLL] = ap_sm_suspend_read,
421 [AP_EVENT_TIMEOUT] = ap_sm_nop, 421 [AP_EVENT_TIMEOUT] = ap_sm_nop,
422 }, 422 },
423 [AP_STATE_REMOVE] = {
424 [AP_EVENT_POLL] = ap_sm_nop,
425 [AP_EVENT_TIMEOUT] = ap_sm_nop,
426 },
423 [AP_STATE_UNBOUND] = { 427 [AP_STATE_UNBOUND] = {
424 [AP_EVENT_POLL] = ap_sm_nop, 428 [AP_EVENT_POLL] = ap_sm_nop,
425 [AP_EVENT_TIMEOUT] = ap_sm_nop, 429 [AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
740} 744}
741EXPORT_SYMBOL(ap_flush_queue); 745EXPORT_SYMBOL(ap_flush_queue);
742 746
743void ap_queue_remove(struct ap_queue *aq) 747void ap_queue_prepare_remove(struct ap_queue *aq)
744{ 748{
745 ap_flush_queue(aq); 749 spin_lock_bh(&aq->lock);
750 /* flush queue */
751 __ap_flush_queue(aq);
752 /* set REMOVE state to prevent new messages are queued in */
753 aq->state = AP_STATE_REMOVE;
746 del_timer_sync(&aq->timeout); 754 del_timer_sync(&aq->timeout);
755 spin_unlock_bh(&aq->lock);
756}
747 757
748 /* reset with zero, also clears irq registration */ 758void ap_queue_remove(struct ap_queue *aq)
759{
760 /*
761 * all messages have been flushed and the state is
762 * AP_STATE_REMOVE. Now reset with zero which also
763 * clears the irq registration and move the state
764 * to AP_STATE_UNBOUND to signal that this queue
765 * is not used by any driver currently.
766 */
749 spin_lock_bh(&aq->lock); 767 spin_lock_bh(&aq->lock);
750 ap_zapq(aq->qid); 768 ap_zapq(aq->qid);
751 aq->state = AP_STATE_UNBOUND; 769 aq->state = AP_STATE_UNBOUND;
752 spin_unlock_bh(&aq->lock); 770 spin_unlock_bh(&aq->lock);
753} 771}
754EXPORT_SYMBOL(ap_queue_remove);
755 772
756void ap_queue_reinit_state(struct ap_queue *aq) 773void ap_queue_reinit_state(struct ap_queue *aq)
757{ 774{
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
760 ap_wait(ap_sm_event(aq, AP_EVENT_POLL)); 777 ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
761 spin_unlock_bh(&aq->lock); 778 spin_unlock_bh(&aq->lock);
762} 779}
763EXPORT_SYMBOL(ap_queue_reinit_state);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index eb93c2d27d0a..689c2af7026a 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
586 586
587static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc, 587static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
588 struct zcrypt_queue *zq, 588 struct zcrypt_queue *zq,
589 struct module **pmod,
589 unsigned int weight) 590 unsigned int weight)
590{ 591{
591 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner)) 592 if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
595 atomic_add(weight, &zc->load); 596 atomic_add(weight, &zc->load);
596 atomic_add(weight, &zq->load); 597 atomic_add(weight, &zq->load);
597 zq->request_count++; 598 zq->request_count++;
599 *pmod = zq->queue->ap_dev.drv->driver.owner;
598 return zq; 600 return zq;
599} 601}
600 602
601static inline void zcrypt_drop_queue(struct zcrypt_card *zc, 603static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
602 struct zcrypt_queue *zq, 604 struct zcrypt_queue *zq,
605 struct module *mod,
603 unsigned int weight) 606 unsigned int weight)
604{ 607{
605 struct module *mod = zq->queue->ap_dev.drv->driver.owner;
606
607 zq->request_count--; 608 zq->request_count--;
608 atomic_sub(weight, &zc->load); 609 atomic_sub(weight, &zc->load);
609 atomic_sub(weight, &zq->load); 610 atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
653 unsigned int weight, pref_weight; 654 unsigned int weight, pref_weight;
654 unsigned int func_code; 655 unsigned int func_code;
655 int qid = 0, rc = -ENODEV; 656 int qid = 0, rc = -ENODEV;
657 struct module *mod;
656 658
657 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO); 659 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
658 660
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
706 pref_weight = weight; 708 pref_weight = weight;
707 } 709 }
708 } 710 }
709 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 711 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
710 spin_unlock(&zcrypt_list_lock); 712 spin_unlock(&zcrypt_list_lock);
711 713
712 if (!pref_zq) { 714 if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
718 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex); 720 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
719 721
720 spin_lock(&zcrypt_list_lock); 722 spin_lock(&zcrypt_list_lock);
721 zcrypt_drop_queue(pref_zc, pref_zq, weight); 723 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
722 spin_unlock(&zcrypt_list_lock); 724 spin_unlock(&zcrypt_list_lock);
723 725
724out: 726out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
735 unsigned int weight, pref_weight; 737 unsigned int weight, pref_weight;
736 unsigned int func_code; 738 unsigned int func_code;
737 int qid = 0, rc = -ENODEV; 739 int qid = 0, rc = -ENODEV;
740 struct module *mod;
738 741
739 trace_s390_zcrypt_req(crt, TP_ICARSACRT); 742 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
740 743
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
788 pref_weight = weight; 791 pref_weight = weight;
789 } 792 }
790 } 793 }
791 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 794 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
792 spin_unlock(&zcrypt_list_lock); 795 spin_unlock(&zcrypt_list_lock);
793 796
794 if (!pref_zq) { 797 if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
800 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt); 803 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
801 804
802 spin_lock(&zcrypt_list_lock); 805 spin_lock(&zcrypt_list_lock);
803 zcrypt_drop_queue(pref_zc, pref_zq, weight); 806 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
804 spin_unlock(&zcrypt_list_lock); 807 spin_unlock(&zcrypt_list_lock);
805 808
806out: 809out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
819 unsigned int func_code; 822 unsigned int func_code;
820 unsigned short *domain; 823 unsigned short *domain;
821 int qid = 0, rc = -ENODEV; 824 int qid = 0, rc = -ENODEV;
825 struct module *mod;
822 826
823 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB); 827 trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
824 828
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
865 pref_weight = weight; 869 pref_weight = weight;
866 } 870 }
867 } 871 }
868 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 872 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
869 spin_unlock(&zcrypt_list_lock); 873 spin_unlock(&zcrypt_list_lock);
870 874
871 if (!pref_zq) { 875 if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
881 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg); 885 rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
882 886
883 spin_lock(&zcrypt_list_lock); 887 spin_lock(&zcrypt_list_lock);
884 zcrypt_drop_queue(pref_zc, pref_zq, weight); 888 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
885 spin_unlock(&zcrypt_list_lock); 889 spin_unlock(&zcrypt_list_lock);
886 890
887out: 891out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
932 unsigned int func_code; 936 unsigned int func_code;
933 struct ap_message ap_msg; 937 struct ap_message ap_msg;
934 int qid = 0, rc = -ENODEV; 938 int qid = 0, rc = -ENODEV;
939 struct module *mod;
935 940
936 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB); 941 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
937 942
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
1000 pref_weight = weight; 1005 pref_weight = weight;
1001 } 1006 }
1002 } 1007 }
1003 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 1008 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
1004 spin_unlock(&zcrypt_list_lock); 1009 spin_unlock(&zcrypt_list_lock);
1005 1010
1006 if (!pref_zq) { 1011 if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
1012 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg); 1017 rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
1013 1018
1014 spin_lock(&zcrypt_list_lock); 1019 spin_lock(&zcrypt_list_lock);
1015 zcrypt_drop_queue(pref_zc, pref_zq, weight); 1020 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
1016 spin_unlock(&zcrypt_list_lock); 1021 spin_unlock(&zcrypt_list_lock);
1017 1022
1018out_free: 1023out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
1033 struct ap_message ap_msg; 1038 struct ap_message ap_msg;
1034 unsigned int domain; 1039 unsigned int domain;
1035 int qid = 0, rc = -ENODEV; 1040 int qid = 0, rc = -ENODEV;
1041 struct module *mod;
1036 1042
1037 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB); 1043 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1038 1044
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
1064 pref_weight = weight; 1070 pref_weight = weight;
1065 } 1071 }
1066 } 1072 }
1067 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight); 1073 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
1068 spin_unlock(&zcrypt_list_lock); 1074 spin_unlock(&zcrypt_list_lock);
1069 1075
1070 if (!pref_zq) { 1076 if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
1076 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg); 1082 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1077 1083
1078 spin_lock(&zcrypt_list_lock); 1084 spin_lock(&zcrypt_list_lock);
1079 zcrypt_drop_queue(pref_zc, pref_zq, weight); 1085 zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
1080 spin_unlock(&zcrypt_list_lock); 1086 spin_unlock(&zcrypt_list_lock);
1081 1087
1082out: 1088out:
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 197b0f5b63e7..44bd6f04c145 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1150,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1150 1150
1151static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf) 1151static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
1152{ 1152{
1153 struct sk_buff *skb;
1154
1153 /* release may never happen from within CQ tasklet scope */ 1155 /* release may never happen from within CQ tasklet scope */
1154 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ); 1156 WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
1155 1157
1156 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING) 1158 if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1157 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR); 1159 qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
1158 1160
1159 __skb_queue_purge(&buf->skb_list); 1161 while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
1162 consume_skb(skb);
1160} 1163}
1161 1164
1162static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 1165static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 8efb2e8ff8f4..c3067fd3bd9e 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -629,8 +629,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
629 } /* else fall through */ 629 } /* else fall through */
630 630
631 QETH_TXQ_STAT_INC(queue, tx_dropped); 631 QETH_TXQ_STAT_INC(queue, tx_dropped);
632 QETH_TXQ_STAT_INC(queue, tx_errors); 632 kfree_skb(skb);
633 dev_kfree_skb_any(skb);
634 netif_wake_queue(dev); 633 netif_wake_queue(dev);
635 return NETDEV_TX_OK; 634 return NETDEV_TX_OK;
636} 635}
@@ -645,6 +644,8 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
645 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 644 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
646 int rc; 645 int rc;
647 646
647 qeth_l2_vnicc_set_defaults(card);
648
648 if (gdev->dev.type == &qeth_generic_devtype) { 649 if (gdev->dev.type == &qeth_generic_devtype) {
649 rc = qeth_l2_create_device_attributes(&gdev->dev); 650 rc = qeth_l2_create_device_attributes(&gdev->dev);
650 if (rc) 651 if (rc)
@@ -652,8 +653,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
652 } 653 }
653 654
654 hash_init(card->mac_htable); 655 hash_init(card->mac_htable);
655 card->info.hwtrap = 0;
656 qeth_l2_vnicc_set_defaults(card);
657 return 0; 656 return 0;
658} 657}
659 658
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 7e68d9d16859..53712cf26406 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2096,8 +2096,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
2096 2096
2097tx_drop: 2097tx_drop:
2098 QETH_TXQ_STAT_INC(queue, tx_dropped); 2098 QETH_TXQ_STAT_INC(queue, tx_dropped);
2099 QETH_TXQ_STAT_INC(queue, tx_errors); 2099 kfree_skb(skb);
2100 dev_kfree_skb_any(skb);
2101 netif_wake_queue(dev); 2100 netif_wake_queue(dev);
2102 return NETDEV_TX_OK; 2101 return NETDEV_TX_OK;
2103} 2102}
@@ -2253,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
2253 struct qeth_card *card = dev_get_drvdata(&gdev->dev); 2252 struct qeth_card *card = dev_get_drvdata(&gdev->dev);
2254 int rc; 2253 int rc;
2255 2254
2255 hash_init(card->ip_htable);
2256
2256 if (gdev->dev.type == &qeth_generic_devtype) { 2257 if (gdev->dev.type == &qeth_generic_devtype) {
2257 rc = qeth_l3_create_device_attributes(&gdev->dev); 2258 rc = qeth_l3_create_device_attributes(&gdev->dev);
2258 if (rc) 2259 if (rc)
2259 return rc; 2260 return rc;
2260 } 2261 }
2261 hash_init(card->ip_htable); 2262
2262 hash_init(card->ip_mc_htable); 2263 hash_init(card->ip_mc_htable);
2263 card->info.hwtrap = 0;
2264 return 0; 2264 return 0;
2265} 2265}
2266 2266
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 744a64680d5b..e8fc28dba8df 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
624 add_timer(&erp_action->timer); 624 add_timer(&erp_action->timer);
625} 625}
626 626
627void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
628 int clear, char *dbftag)
629{
630 unsigned long flags;
631 struct zfcp_port *port;
632
633 write_lock_irqsave(&adapter->erp_lock, flags);
634 read_lock(&adapter->port_list_lock);
635 list_for_each_entry(port, &adapter->port_list, list)
636 _zfcp_erp_port_forced_reopen(port, clear, dbftag);
637 read_unlock(&adapter->port_list_lock);
638 write_unlock_irqrestore(&adapter->erp_lock, flags);
639}
640
627static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter, 641static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
628 int clear, char *dbftag) 642 int clear, char *dbftag)
629{ 643{
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
1341 struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); 1355 struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
1342 int lun_status; 1356 int lun_status;
1343 1357
1358 if (sdev->sdev_state == SDEV_DEL ||
1359 sdev->sdev_state == SDEV_CANCEL)
1360 continue;
1344 if (zsdev->port != port) 1361 if (zsdev->port != port)
1345 continue; 1362 continue;
1346 /* LUN under port of interest */ 1363 /* LUN under port of interest */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3fce47b0b21b..c6acca521ffe 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
70 char *dbftag); 70 char *dbftag);
71extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *); 71extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
72extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *); 72extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
73extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
74 int clear, char *dbftag);
73extern void zfcp_erp_set_lun_status(struct scsi_device *, u32); 75extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
74extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32); 76extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
75extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *); 77extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index db00b5e3abbe..33eddb02ee30 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
239 list_for_each_entry(port, &adapter->port_list, list) { 239 list_for_each_entry(port, &adapter->port_list, list) {
240 if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range)) 240 if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
241 zfcp_fc_test_link(port); 241 zfcp_fc_test_link(port);
242 if (!port->d_id)
243 zfcp_erp_port_reopen(port,
244 ZFCP_STATUS_COMMON_ERP_FAILED,
245 "fcrscn1");
246 } 242 }
247 read_unlock_irqrestore(&adapter->port_list_lock, flags); 243 read_unlock_irqrestore(&adapter->port_list_lock, flags);
248} 244}
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
250static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req) 246static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
251{ 247{
252 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data; 248 struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
249 struct zfcp_adapter *adapter = fsf_req->adapter;
253 struct fc_els_rscn *head; 250 struct fc_els_rscn *head;
254 struct fc_els_rscn_page *page; 251 struct fc_els_rscn_page *page;
255 u16 i; 252 u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
263 no_entries = be16_to_cpu(head->rscn_plen) / 260 no_entries = be16_to_cpu(head->rscn_plen) /
264 sizeof(struct fc_els_rscn_page); 261 sizeof(struct fc_els_rscn_page);
265 262
263 if (no_entries > 1) {
264 /* handle failed ports */
265 unsigned long flags;
266 struct zfcp_port *port;
267
268 read_lock_irqsave(&adapter->port_list_lock, flags);
269 list_for_each_entry(port, &adapter->port_list, list) {
270 if (port->d_id)
271 continue;
272 zfcp_erp_port_reopen(port,
273 ZFCP_STATUS_COMMON_ERP_FAILED,
274 "fcrscn1");
275 }
276 read_unlock_irqrestore(&adapter->port_list_lock, flags);
277 }
278
266 for (i = 1; i < no_entries; i++) { 279 for (i = 1; i < no_entries; i++) {
267 /* skip head and start with 1st element */ 280 /* skip head and start with 1st element */
268 page++; 281 page++;
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index f4f6a07c5222..221d0dfb8493 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
368 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; 368 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
369 int ret = SUCCESS, fc_ret; 369 int ret = SUCCESS, fc_ret;
370 370
371 if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
372 zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
373 zfcp_erp_wait(adapter);
374 }
371 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1"); 375 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
372 zfcp_erp_wait(adapter); 376 zfcp_erp_wait(adapter);
373 fc_ret = fc_block_scsi_eh(scpnt); 377 fc_ret = fc_block_scsi_eh(scpnt);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 1df5171594b8..11fb68d7e60d 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
2640 return capacity; 2640 return capacity;
2641} 2641}
2642 2642
2643static inline int aac_pci_offline(struct aac_dev *dev)
2644{
2645 return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
2646}
2647
2643static inline int aac_adapter_check_health(struct aac_dev *dev) 2648static inline int aac_adapter_check_health(struct aac_dev *dev)
2644{ 2649{
2645 if (unlikely(pci_channel_offline(dev->pdev))) 2650 if (unlikely(aac_pci_offline(dev)))
2646 return -1; 2651 return -1;
2647 2652
2648 return (dev)->a_ops.adapter_check_health(dev); 2653 return (dev)->a_ops.adapter_check_health(dev);
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index e67e032936ef..78430a7b294c 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
672 return -ETIMEDOUT; 672 return -ETIMEDOUT;
673 } 673 }
674 674
675 if (unlikely(pci_channel_offline(dev->pdev))) 675 if (unlikely(aac_pci_offline(dev)))
676 return -EFAULT; 676 return -EFAULT;
677 677
678 if ((blink = aac_adapter_check_health(dev)) > 0) { 678 if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
772 772
773 spin_unlock_irqrestore(&fibptr->event_lock, flags); 773 spin_unlock_irqrestore(&fibptr->event_lock, flags);
774 774
775 if (unlikely(pci_channel_offline(dev->pdev))) 775 if (unlikely(aac_pci_offline(dev)))
776 return -EFAULT; 776 return -EFAULT;
777 777
778 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT; 778 fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a45f81ec80ce..8e28a505f7e8 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -413,13 +413,16 @@ static int aac_slave_configure(struct scsi_device *sdev)
413 if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) { 413 if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
414 devtype = aac->hba_map[chn][tid].devtype; 414 devtype = aac->hba_map[chn][tid].devtype;
415 415
416 if (devtype == AAC_DEVTYPE_NATIVE_RAW) 416 if (devtype == AAC_DEVTYPE_NATIVE_RAW) {
417 depth = aac->hba_map[chn][tid].qd_limit; 417 depth = aac->hba_map[chn][tid].qd_limit;
418 else if (devtype == AAC_DEVTYPE_ARC_RAW) 418 set_timeout = 1;
419 goto common_config;
420 }
421 if (devtype == AAC_DEVTYPE_ARC_RAW) {
419 set_qd_dev_type = true; 422 set_qd_dev_type = true;
420 423 set_timeout = 1;
421 set_timeout = 1; 424 goto common_config;
422 goto common_config; 425 }
423 } 426 }
424 427
425 if (aac->jbod && (sdev->type == TYPE_DISK)) 428 if (aac->jbod && (sdev->type == TYPE_DISK))
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 6c87bd34509a..9bfa9f12d81e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -18,6 +18,7 @@
18#include <linux/dmapool.h> 18#include <linux/dmapool.h>
19#include <linux/iopoll.h> 19#include <linux/iopoll.h>
20#include <linux/lcm.h> 20#include <linux/lcm.h>
21#include <linux/libata.h>
21#include <linux/mfd/syscon.h> 22#include <linux/mfd/syscon.h>
22#include <linux/module.h> 23#include <linux/module.h>
23#include <linux/of_address.h> 24#include <linux/of_address.h>
@@ -94,6 +95,11 @@ enum {
94 PORT_TYPE_SATA = (1U << 0), 95 PORT_TYPE_SATA = (1U << 0),
95}; 96};
96 97
98enum dev_status {
99 HISI_SAS_DEV_INIT,
100 HISI_SAS_DEV_NORMAL,
101};
102
97enum { 103enum {
98 HISI_SAS_INT_ABT_CMD = 0, 104 HISI_SAS_INT_ABT_CMD = 0,
99 HISI_SAS_INT_ABT_DEV = 1, 105 HISI_SAS_INT_ABT_DEV = 1,
@@ -161,6 +167,7 @@ struct hisi_sas_phy {
161 u8 in_reset; 167 u8 in_reset;
162 u8 reserved[2]; 168 u8 reserved[2];
163 u32 phy_type; 169 u32 phy_type;
170 u32 code_violation_err_count;
164 enum sas_linkrate minimum_linkrate; 171 enum sas_linkrate minimum_linkrate;
165 enum sas_linkrate maximum_linkrate; 172 enum sas_linkrate maximum_linkrate;
166}; 173};
@@ -194,6 +201,7 @@ struct hisi_sas_device {
194 struct hisi_sas_dq *dq; 201 struct hisi_sas_dq *dq;
195 struct list_head list; 202 struct list_head list;
196 enum sas_device_type dev_type; 203 enum sas_device_type dev_type;
204 enum dev_status dev_status;
197 int device_id; 205 int device_id;
198 int sata_idx; 206 int sata_idx;
199 spinlock_t lock; /* For protecting slots */ 207 spinlock_t lock; /* For protecting slots */
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 13ca5a0bdf6b..14bac4966c87 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include "hisi_sas.h" 12#include "hisi_sas.h"
13#include "../libsas/sas_internal.h"
13#define DRV_NAME "hisi_sas" 14#define DRV_NAME "hisi_sas"
14 15
15#define DEV_IS_GONE(dev) \ 16#define DEV_IS_GONE(dev) \
@@ -707,6 +708,7 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
707 708
708 hisi_hba->devices[i].device_id = i; 709 hisi_hba->devices[i].device_id = i;
709 sas_dev = &hisi_hba->devices[i]; 710 sas_dev = &hisi_hba->devices[i];
711 sas_dev->dev_status = HISI_SAS_DEV_INIT;
710 sas_dev->dev_type = device->dev_type; 712 sas_dev->dev_type = device->dev_type;
711 sas_dev->hisi_hba = hisi_hba; 713 sas_dev->hisi_hba = hisi_hba;
712 sas_dev->sas_device = device; 714 sas_dev->sas_device = device;
@@ -731,6 +733,8 @@ static int hisi_sas_init_device(struct domain_device *device)
731 struct hisi_sas_tmf_task tmf_task; 733 struct hisi_sas_tmf_task tmf_task;
732 int retry = HISI_SAS_SRST_ATA_DISK_CNT; 734 int retry = HISI_SAS_SRST_ATA_DISK_CNT;
733 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 735 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
736 struct device *dev = hisi_hba->dev;
737 struct sas_phy *local_phy;
734 738
735 switch (device->dev_type) { 739 switch (device->dev_type) {
736 case SAS_END_DEVICE: 740 case SAS_END_DEVICE:
@@ -746,6 +750,31 @@ static int hisi_sas_init_device(struct domain_device *device)
746 case SAS_SATA_PM: 750 case SAS_SATA_PM:
747 case SAS_SATA_PM_PORT: 751 case SAS_SATA_PM_PORT:
748 case SAS_SATA_PENDING: 752 case SAS_SATA_PENDING:
753 /*
754 * send HARD RESET to clear previous affiliation of
755 * STP target port
756 */
757 local_phy = sas_get_local_phy(device);
758 if (!scsi_is_sas_phy_local(local_phy)) {
759 unsigned long deadline = ata_deadline(jiffies, 20000);
760 struct sata_device *sata_dev = &device->sata_dev;
761 struct ata_host *ata_host = sata_dev->ata_host;
762 struct ata_port_operations *ops = ata_host->ops;
763 struct ata_port *ap = sata_dev->ap;
764 struct ata_link *link;
765 unsigned int classes;
766
767 ata_for_each_link(link, ap, EDGE)
768 rc = ops->hardreset(link, &classes,
769 deadline);
770 }
771 sas_put_local_phy(local_phy);
772 if (rc) {
773 dev_warn(dev, "SATA disk hardreset fail: 0x%x\n",
774 rc);
775 return rc;
776 }
777
749 while (retry-- > 0) { 778 while (retry-- > 0) {
750 rc = hisi_sas_softreset_ata_disk(device); 779 rc = hisi_sas_softreset_ata_disk(device);
751 if (!rc) 780 if (!rc)
@@ -808,6 +837,7 @@ static int hisi_sas_dev_found(struct domain_device *device)
808 rc = hisi_sas_init_device(device); 837 rc = hisi_sas_init_device(device);
809 if (rc) 838 if (rc)
810 goto err_out; 839 goto err_out;
840 sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
811 return 0; 841 return 0;
812 842
813err_out: 843err_out:
@@ -980,7 +1010,8 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
980 spin_lock_irqsave(&task->task_state_lock, flags); 1010 spin_lock_irqsave(&task->task_state_lock, flags);
981 task->task_state_flags &= 1011 task->task_state_flags &=
982 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR); 1012 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
983 task->task_state_flags |= SAS_TASK_STATE_DONE; 1013 if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
1014 task->task_state_flags |= SAS_TASK_STATE_DONE;
984 spin_unlock_irqrestore(&task->task_state_lock, flags); 1015 spin_unlock_irqrestore(&task->task_state_lock, flags);
985 } 1016 }
986 1017
@@ -1713,20 +1744,23 @@ static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
1713static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) 1744static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1714{ 1745{
1715 struct sas_phy *local_phy = sas_get_local_phy(device); 1746 struct sas_phy *local_phy = sas_get_local_phy(device);
1716 int rc, reset_type = (device->dev_type == SAS_SATA_DEV || 1747 struct hisi_sas_device *sas_dev = device->lldd_dev;
1717 (device->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
1718 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); 1748 struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
1719 struct sas_ha_struct *sas_ha = &hisi_hba->sha; 1749 struct sas_ha_struct *sas_ha = &hisi_hba->sha;
1720 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; 1750 struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number];
1721 struct hisi_sas_phy *phy = container_of(sas_phy, 1751 struct hisi_sas_phy *phy = container_of(sas_phy,
1722 struct hisi_sas_phy, sas_phy); 1752 struct hisi_sas_phy, sas_phy);
1723 DECLARE_COMPLETION_ONSTACK(phyreset); 1753 DECLARE_COMPLETION_ONSTACK(phyreset);
1754 int rc, reset_type;
1724 1755
1725 if (scsi_is_sas_phy_local(local_phy)) { 1756 if (scsi_is_sas_phy_local(local_phy)) {
1726 phy->in_reset = 1; 1757 phy->in_reset = 1;
1727 phy->reset_completion = &phyreset; 1758 phy->reset_completion = &phyreset;
1728 } 1759 }
1729 1760
1761 reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
1762 !dev_is_sata(device)) ? 1 : 0;
1763
1730 rc = sas_phy_reset(local_phy, reset_type); 1764 rc = sas_phy_reset(local_phy, reset_type);
1731 sas_put_local_phy(local_phy); 1765 sas_put_local_phy(local_phy);
1732 1766
@@ -1742,8 +1776,13 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
1742 /* report PHY down if timed out */ 1776 /* report PHY down if timed out */
1743 if (!ret) 1777 if (!ret)
1744 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0); 1778 hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
1745 } else 1779 } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
1780 /*
1781 * If in init state, we rely on caller to wait for link to be
1782 * ready; otherwise, delay.
1783 */
1746 msleep(2000); 1784 msleep(2000);
1785 }
1747 1786
1748 return rc; 1787 return rc;
1749} 1788}
@@ -1762,6 +1801,12 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
1762 } 1801 }
1763 hisi_sas_dereg_device(hisi_hba, device); 1802 hisi_sas_dereg_device(hisi_hba, device);
1764 1803
1804 if (dev_is_sata(device)) {
1805 rc = hisi_sas_softreset_ata_disk(device);
1806 if (rc)
1807 return TMF_RESP_FUNC_FAILED;
1808 }
1809
1765 rc = hisi_sas_debug_I_T_nexus_reset(device); 1810 rc = hisi_sas_debug_I_T_nexus_reset(device);
1766 1811
1767 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV)) 1812 if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
@@ -2125,9 +2170,18 @@ static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
2125 2170
2126static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy) 2171static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
2127{ 2172{
2173 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2174 struct sas_phy *sphy = sas_phy->phy;
2175 struct sas_phy_data *d = sphy->hostdata;
2176
2128 phy->phy_attached = 0; 2177 phy->phy_attached = 0;
2129 phy->phy_type = 0; 2178 phy->phy_type = 0;
2130 phy->port = NULL; 2179 phy->port = NULL;
2180
2181 if (d->enable)
2182 sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
2183 else
2184 sphy->negotiated_linkrate = SAS_PHY_DISABLED;
2131} 2185}
2132 2186
2133void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) 2187void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
@@ -2253,6 +2307,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
2253 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) { 2307 for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
2254 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED; 2308 hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
2255 hisi_hba->devices[i].device_id = i; 2309 hisi_hba->devices[i].device_id = i;
2310 hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
2256 } 2311 }
2257 2312
2258 for (i = 0; i < hisi_hba->queue_count; i++) { 2313 for (i = 0; i < hisi_hba->queue_count; i++) {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index e40cc6b3b67b..89160ab3efb0 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -868,6 +868,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
868 868
869 hisi_hba->devices[i].device_id = i; 869 hisi_hba->devices[i].device_id = i;
870 sas_dev = &hisi_hba->devices[i]; 870 sas_dev = &hisi_hba->devices[i];
871 sas_dev->dev_status = HISI_SAS_DEV_INIT;
871 sas_dev->dev_type = device->dev_type; 872 sas_dev->dev_type = device->dev_type;
872 sas_dev->hisi_hba = hisi_hba; 873 sas_dev->hisi_hba = hisi_hba;
873 sas_dev->sas_device = device; 874 sas_dev->sas_device = device;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 9ec8848ec541..086695a4099f 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -129,6 +129,7 @@
129#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) 129#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
130#define CMD_HDR_PIR_OFF 8 130#define CMD_HDR_PIR_OFF 8
131#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) 131#define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF)
132#define SERDES_CFG (PORT_BASE + 0x1c)
132#define SL_CFG (PORT_BASE + 0x84) 133#define SL_CFG (PORT_BASE + 0x84)
133#define AIP_LIMIT (PORT_BASE + 0x90) 134#define AIP_LIMIT (PORT_BASE + 0x90)
134#define SL_CONTROL (PORT_BASE + 0x94) 135#define SL_CONTROL (PORT_BASE + 0x94)
@@ -181,6 +182,8 @@
181#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22 182#define CHL_INT1_DMAC_RX_AXI_RD_ERR_OFF 22
182#define CHL_INT2 (PORT_BASE + 0x1bc) 183#define CHL_INT2 (PORT_BASE + 0x1bc)
183#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0 184#define CHL_INT2_SL_IDAF_TOUT_CONF_OFF 0
185#define CHL_INT2_RX_DISP_ERR_OFF 28
186#define CHL_INT2_RX_CODE_ERR_OFF 29
184#define CHL_INT2_RX_INVLD_DW_OFF 30 187#define CHL_INT2_RX_INVLD_DW_OFF 30
185#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31 188#define CHL_INT2_STP_LINK_TIMEOUT_OFF 31
186#define CHL_INT0_MSK (PORT_BASE + 0x1c0) 189#define CHL_INT0_MSK (PORT_BASE + 0x1c0)
@@ -523,6 +526,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
523 } 526 }
524 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE, 527 hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
525 prog_phy_link_rate); 528 prog_phy_link_rate);
529 hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
526 hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80); 530 hisi_sas_phy_write32(hisi_hba, i, SAS_RX_TRAIN_TIMER, 0x13e80);
527 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff); 531 hisi_sas_phy_write32(hisi_hba, i, CHL_INT0, 0xffffffff);
528 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff); 532 hisi_sas_phy_write32(hisi_hba, i, CHL_INT1, 0xffffffff);
@@ -544,6 +548,8 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
544 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120); 548 hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
545 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01); 549 hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
546 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32); 550 hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
551 hisi_sas_phy_write32(hisi_hba, i, SAS_EC_INT_COAL_TIME,
552 0x30f4240);
547 /* used for 12G negotiate */ 553 /* used for 12G negotiate */
548 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e); 554 hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
549 hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff); 555 hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
@@ -1344,7 +1350,8 @@ static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
1344 1350
1345static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba) 1351static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
1346{ 1352{
1347 int i, res; 1353 int i;
1354 irqreturn_t res;
1348 u32 context, port_id, link_rate; 1355 u32 context, port_id, link_rate;
1349 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1356 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1350 struct asd_sas_phy *sas_phy = &phy->sas_phy; 1357 struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -1575,6 +1582,39 @@ static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1575 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value); 1582 hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value);
1576} 1583}
1577 1584
1585static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1586{
1587 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1588 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1589 struct sas_phy *sphy = sas_phy->phy;
1590 unsigned long flags;
1591 u32 reg_value;
1592
1593 spin_lock_irqsave(&phy->lock, flags);
1594
1595 /* loss dword sync */
1596 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
1597 sphy->loss_of_dword_sync_count += reg_value;
1598
1599 /* phy reset problem */
1600 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
1601 sphy->phy_reset_problem_count += reg_value;
1602
1603 /* invalid dword */
1604 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
1605 sphy->invalid_dword_count += reg_value;
1606
1607 /* disparity err */
1608 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
1609 sphy->running_disparity_error_count += reg_value;
1610
1611 /* code violation error */
1612 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_CODE_ERR);
1613 phy->code_violation_err_count += reg_value;
1614
1615 spin_unlock_irqrestore(&phy->lock, flags);
1616}
1617
1578static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no) 1618static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1579{ 1619{
1580 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK); 1620 u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
@@ -1582,6 +1622,9 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1582 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; 1622 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
1583 struct pci_dev *pci_dev = hisi_hba->pci_dev; 1623 struct pci_dev *pci_dev = hisi_hba->pci_dev;
1584 struct device *dev = hisi_hba->dev; 1624 struct device *dev = hisi_hba->dev;
1625 static const u32 msk = BIT(CHL_INT2_RX_DISP_ERR_OFF) |
1626 BIT(CHL_INT2_RX_CODE_ERR_OFF) |
1627 BIT(CHL_INT2_RX_INVLD_DW_OFF);
1585 1628
1586 irq_value &= ~irq_msk; 1629 irq_value &= ~irq_msk;
1587 if (!irq_value) 1630 if (!irq_value)
@@ -1602,6 +1645,25 @@ static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
1602 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET); 1645 hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
1603 } 1646 }
1604 1647
1648 if (pci_dev->revision > 0x20 && (irq_value & msk)) {
1649 struct asd_sas_phy *sas_phy = &phy->sas_phy;
1650 struct sas_phy *sphy = sas_phy->phy;
1651
1652 phy_get_events_v3_hw(hisi_hba, phy_no);
1653
1654 if (irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF))
1655 dev_info(dev, "phy%d invalid dword cnt: %u\n", phy_no,
1656 sphy->invalid_dword_count);
1657
1658 if (irq_value & BIT(CHL_INT2_RX_CODE_ERR_OFF))
1659 dev_info(dev, "phy%d code violation cnt: %u\n", phy_no,
1660 phy->code_violation_err_count);
1661
1662 if (irq_value & BIT(CHL_INT2_RX_DISP_ERR_OFF))
1663 dev_info(dev, "phy%d disparity error cnt: %u\n", phy_no,
1664 sphy->running_disparity_error_count);
1665 }
1666
1605 if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) && 1667 if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
1606 (pci_dev->revision == 0x20)) { 1668 (pci_dev->revision == 0x20)) {
1607 u32 reg_value; 1669 u32 reg_value;
@@ -2230,31 +2292,6 @@ static u32 get_phys_state_v3_hw(struct hisi_hba *hisi_hba)
2230 return hisi_sas_read32(hisi_hba, PHY_STATE); 2292 return hisi_sas_read32(hisi_hba, PHY_STATE);
2231} 2293}
2232 2294
2233static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
2234{
2235 struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
2236 struct asd_sas_phy *sas_phy = &phy->sas_phy;
2237 struct sas_phy *sphy = sas_phy->phy;
2238 u32 reg_value;
2239
2240 /* loss dword sync */
2241 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DWS_LOST);
2242 sphy->loss_of_dword_sync_count += reg_value;
2243
2244 /* phy reset problem */
2245 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_RESET_PROB);
2246 sphy->phy_reset_problem_count += reg_value;
2247
2248 /* invalid dword */
2249 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_INVLD_DW);
2250 sphy->invalid_dword_count += reg_value;
2251
2252 /* disparity err */
2253 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no, ERR_CNT_DISP_ERR);
2254 sphy->running_disparity_error_count += reg_value;
2255
2256}
2257
2258static int disable_host_v3_hw(struct hisi_hba *hisi_hba) 2295static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
2259{ 2296{
2260 struct device *dev = hisi_hba->dev; 2297 struct device *dev = hisi_hba->dev;
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index dbaa4f131433..3ad997ac3510 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -139,6 +139,7 @@ static const struct {
139 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" }, 139 { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
140 140
141 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" }, 141 { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
142 { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
142}; 143};
143 144
144static void ibmvfc_npiv_login(struct ibmvfc_host *); 145static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
1494 if (rsp->flags & FCP_RSP_LEN_VALID) 1495 if (rsp->flags & FCP_RSP_LEN_VALID)
1495 rsp_code = rsp->data.info.rsp_code; 1496 rsp_code = rsp->data.info.rsp_code;
1496 1497
1497 scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) " 1498 scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
1498 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n", 1499 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1499 cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error, 1500 cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
1500 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status); 1501 rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
1501} 1502}
1502 1503
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
2022 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) " 2023 sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
2023 "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc, 2024 "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
2024 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), 2025 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2025 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, 2026 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2026 fc_rsp->scsi_status); 2027 fc_rsp->scsi_status);
2027 rsp_rc = -EIO; 2028 rsp_rc = -EIO;
2028 } else 2029 } else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
2381 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) " 2382 sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
2382 "flags: %x fcp_rsp: %x, scsi_status: %x\n", 2383 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2383 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)), 2384 ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
2384 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code, 2385 be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
2385 fc_rsp->scsi_status); 2386 fc_rsp->scsi_status);
2386 rsp_rc = -EIO; 2387 rsp_rc = -EIO;
2387 } else 2388 } else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
2755 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE); 2756 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
2756 if (crq->format == IBMVFC_PARTITION_MIGRATED) { 2757 if (crq->format == IBMVFC_PARTITION_MIGRATED) {
2757 /* We need to re-setup the interpartition connection */ 2758 /* We need to re-setup the interpartition connection */
2758 dev_info(vhost->dev, "Re-enabling adapter\n"); 2759 dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
2759 vhost->client_migrated = 1; 2760 vhost->client_migrated = 1;
2760 ibmvfc_purge_requests(vhost, DID_REQUEUE); 2761 ibmvfc_purge_requests(vhost, DID_REQUEUE);
2761 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); 2762 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2762 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); 2763 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
2763 } else { 2764 } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
2764 dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format); 2765 dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
2765 ibmvfc_purge_requests(vhost, DID_ERROR); 2766 ibmvfc_purge_requests(vhost, DID_ERROR);
2766 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); 2767 ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
2767 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET); 2768 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
2769 } else {
2770 dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
2768 } 2771 }
2769 return; 2772 return;
2770 case IBMVFC_CRQ_CMD_RSP: 2773 case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
3348 3351
3349 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n", 3352 tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
3350 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 3353 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3351 rsp->status, rsp->error, status); 3354 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
3352 break; 3355 break;
3353 } 3356 }
3354 3357
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
3446 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT); 3449 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3447 3450
3448 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3451 tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3449 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error, 3452 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3450 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type, 3453 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
3451 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status); 3454 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
3455 ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
3452 break; 3456 break;
3453 } 3457 }
3454 3458
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
3619 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8; 3623 fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
3620 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3624 tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3621 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)), 3625 ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
3622 mad->iu.status, mad->iu.error, 3626 be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
3623 ibmvfc_get_fc_type(fc_reason), fc_reason, 3627 ibmvfc_get_fc_type(fc_reason), fc_reason,
3624 ibmvfc_get_ls_explain(fc_explain), fc_explain, status); 3628 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3625 break; 3629 break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
3831 3835
3832 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n", 3836 tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3833 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 3837 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3834 rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), 3838 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
3835 rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), 3839 ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
3836 rsp->fc_explain, status); 3840 ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
3841 status);
3837 break; 3842 break;
3838 } 3843 }
3839 3844
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
3959 level += ibmvfc_retry_host_init(vhost); 3964 level += ibmvfc_retry_host_init(vhost);
3960 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n", 3965 ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
3961 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 3966 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
3962 rsp->status, rsp->error); 3967 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
3963 break; 3968 break;
3964 case IBMVFC_MAD_DRIVER_FAILED: 3969 case IBMVFC_MAD_DRIVER_FAILED:
3965 break; 3970 break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
4024 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD); 4029 ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
4025 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n", 4030 ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
4026 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), 4031 ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
4027 rsp->status, rsp->error); 4032 be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
4028 ibmvfc_free_event(evt); 4033 ibmvfc_free_event(evt);
4029 return; 4034 return;
4030 case IBMVFC_MAD_CRQ_ERROR: 4035 case IBMVFC_MAD_CRQ_ERROR:
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index b81a53c4a9a8..459cc288ba1d 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
78 IBMVFC_CRQ_XPORT_EVENT = 0xFF, 78 IBMVFC_CRQ_XPORT_EVENT = 0xFF,
79}; 79};
80 80
81enum ibmvfc_crq_format { 81enum ibmvfc_crq_init_msg {
82 IBMVFC_CRQ_INIT = 0x01, 82 IBMVFC_CRQ_INIT = 0x01,
83 IBMVFC_CRQ_INIT_COMPLETE = 0x02, 83 IBMVFC_CRQ_INIT_COMPLETE = 0x02,
84};
85
86enum ibmvfc_crq_xport_evts {
87 IBMVFC_PARTNER_FAILED = 0x01,
88 IBMVFC_PARTNER_DEREGISTER = 0x02,
84 IBMVFC_PARTITION_MIGRATED = 0x06, 89 IBMVFC_PARTITION_MIGRATED = 0x06,
85}; 90};
86 91
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 1135e74646e2..8cec5230fe31 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -96,6 +96,7 @@ static int client_reserve = 1;
96static char partition_name[96] = "UNKNOWN"; 96static char partition_name[96] = "UNKNOWN";
97static unsigned int partition_number = -1; 97static unsigned int partition_number = -1;
98static LIST_HEAD(ibmvscsi_head); 98static LIST_HEAD(ibmvscsi_head);
99static DEFINE_SPINLOCK(ibmvscsi_driver_lock);
99 100
100static struct scsi_transport_template *ibmvscsi_transport_template; 101static struct scsi_transport_template *ibmvscsi_transport_template;
101 102
@@ -2270,7 +2271,9 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2270 } 2271 }
2271 2272
2272 dev_set_drvdata(&vdev->dev, hostdata); 2273 dev_set_drvdata(&vdev->dev, hostdata);
2274 spin_lock(&ibmvscsi_driver_lock);
2273 list_add_tail(&hostdata->host_list, &ibmvscsi_head); 2275 list_add_tail(&hostdata->host_list, &ibmvscsi_head);
2276 spin_unlock(&ibmvscsi_driver_lock);
2274 return 0; 2277 return 0;
2275 2278
2276 add_srp_port_failed: 2279 add_srp_port_failed:
@@ -2292,15 +2295,27 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
2292static int ibmvscsi_remove(struct vio_dev *vdev) 2295static int ibmvscsi_remove(struct vio_dev *vdev)
2293{ 2296{
2294 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev); 2297 struct ibmvscsi_host_data *hostdata = dev_get_drvdata(&vdev->dev);
2295 list_del(&hostdata->host_list); 2298 unsigned long flags;
2296 unmap_persist_bufs(hostdata); 2299
2300 srp_remove_host(hostdata->host);
2301 scsi_remove_host(hostdata->host);
2302
2303 purge_requests(hostdata, DID_ERROR);
2304
2305 spin_lock_irqsave(hostdata->host->host_lock, flags);
2297 release_event_pool(&hostdata->pool, hostdata); 2306 release_event_pool(&hostdata->pool, hostdata);
2307 spin_unlock_irqrestore(hostdata->host->host_lock, flags);
2308
2298 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata, 2309 ibmvscsi_release_crq_queue(&hostdata->queue, hostdata,
2299 max_events); 2310 max_events);
2300 2311
2301 kthread_stop(hostdata->work_thread); 2312 kthread_stop(hostdata->work_thread);
2302 srp_remove_host(hostdata->host); 2313 unmap_persist_bufs(hostdata);
2303 scsi_remove_host(hostdata->host); 2314
2315 spin_lock(&ibmvscsi_driver_lock);
2316 list_del(&hostdata->host_list);
2317 spin_unlock(&ibmvscsi_driver_lock);
2318
2304 scsi_host_put(hostdata->host); 2319 scsi_host_put(hostdata->host);
2305 2320
2306 return 0; 2321 return 0;
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 21309d5b456d..e893949a3d11 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -798,7 +798,7 @@ EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
798 * @datalen: len of buffer 798 * @datalen: len of buffer
799 * 799 *
800 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and 800 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
801 * then completes the command and task. 801 * then completes the command and task. called under back_lock
802 **/ 802 **/
803static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 803static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
804 struct iscsi_task *task, char *data, 804 struct iscsi_task *task, char *data,
@@ -894,6 +894,9 @@ out:
894 * @conn: iscsi connection 894 * @conn: iscsi connection
895 * @hdr: iscsi pdu 895 * @hdr: iscsi pdu
896 * @task: scsi command task 896 * @task: scsi command task
897 *
898 * iscsi_data_in_rsp sets up the scsi_cmnd fields based on the data received
899 * then completes the command and task. called under back_lock
897 **/ 900 **/
898static void 901static void
899iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr, 902iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
@@ -978,6 +981,16 @@ static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
978 return 0; 981 return 0;
979} 982}
980 983
984/**
985 * iscsi_nop_out_rsp - SCSI NOP Response processing
986 * @task: scsi command task
987 * @nop: the nop structure
988 * @data: where to put the data
989 * @datalen: length of data
990 *
991 * iscsi_nop_out_rsp handles nop response from use or
992 * from user space. called under back_lock
993 **/
981static int iscsi_nop_out_rsp(struct iscsi_task *task, 994static int iscsi_nop_out_rsp(struct iscsi_task *task,
982 struct iscsi_nopin *nop, char *data, int datalen) 995 struct iscsi_nopin *nop, char *data, int datalen)
983{ 996{
@@ -1750,7 +1763,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
1750 return 0; 1763 return 0;
1751 1764
1752prepd_reject: 1765prepd_reject:
1766 spin_lock_bh(&session->back_lock);
1753 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); 1767 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1768 spin_unlock_bh(&session->back_lock);
1754reject: 1769reject:
1755 spin_unlock_bh(&session->frwd_lock); 1770 spin_unlock_bh(&session->frwd_lock);
1756 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n", 1771 ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1758,7 +1773,9 @@ reject:
1758 return SCSI_MLQUEUE_TARGET_BUSY; 1773 return SCSI_MLQUEUE_TARGET_BUSY;
1759 1774
1760prepd_fault: 1775prepd_fault:
1776 spin_lock_bh(&session->back_lock);
1761 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ); 1777 iscsi_complete_task(task, ISCSI_TASK_REQUEUE_SCSIQ);
1778 spin_unlock_bh(&session->back_lock);
1762fault: 1779fault:
1763 spin_unlock_bh(&session->frwd_lock); 1780 spin_unlock_bh(&session->frwd_lock);
1764 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n", 1781 ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
@@ -3075,8 +3092,9 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
3075 state = ISCSI_TASK_ABRT_SESS_RECOV; 3092 state = ISCSI_TASK_ABRT_SESS_RECOV;
3076 if (task->state == ISCSI_TASK_PENDING) 3093 if (task->state == ISCSI_TASK_PENDING)
3077 state = ISCSI_TASK_COMPLETED; 3094 state = ISCSI_TASK_COMPLETED;
3095 spin_lock_bh(&session->back_lock);
3078 iscsi_complete_task(task, state); 3096 iscsi_complete_task(task, state);
3079 3097 spin_unlock_bh(&session->back_lock);
3080 } 3098 }
3081} 3099}
3082 3100
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 9923e9e3b884..c3fe3f3a78f5 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -129,12 +129,17 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
129 BUG_ON(sg->length == 0); 129 BUG_ON(sg->length == 0);
130 130
131 /* 131 /*
132 * We always map for the recv path.
133 *
132 * If the page count is greater than one it is ok to send 134 * If the page count is greater than one it is ok to send
133 * to the network layer's zero copy send path. If not we 135 * to the network layer's zero copy send path. If not we
134 * have to go the slow sendmsg path. We always map for the 136 * have to go the slow sendmsg path.
135 * recv path. 137 *
138 * Same goes for slab pages: skb_can_coalesce() allows
139 * coalescing neighboring slab objects into a single frag which
140 * triggers one of hardened usercopy checks.
136 */ 141 */
137 if (page_count(sg_page(sg)) >= 1 && !recv) 142 if (!recv && page_count(sg_page(sg)) >= 1 && !PageSlab(sg_page(sg)))
138 return; 143 return;
139 144
140 if (recv) { 145 if (recv) {
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 3b5873f6751e..7fcdaed3fa94 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4090,7 +4090,7 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
4090 /* Sanity check to ensure our sizing is right for both SCSI and NVME */ 4090 /* Sanity check to ensure our sizing is right for both SCSI and NVME */
4091 if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) { 4091 if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) {
4092 lpfc_printf_log(phba, KERN_ERR, LOG_FCP, 4092 lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
4093 "6426 Common buffer size %ld exceeds %d\n", 4093 "6426 Common buffer size %zd exceeds %d\n",
4094 sizeof(struct lpfc_io_buf), 4094 sizeof(struct lpfc_io_buf),
4095 LPFC_COMMON_IO_BUF_SZ); 4095 LPFC_COMMON_IO_BUF_SZ);
4096 return 0; 4096 return 0;
@@ -10052,7 +10052,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10052{ 10052{
10053 struct pci_dev *pdev = phba->pcidev; 10053 struct pci_dev *pdev = phba->pcidev;
10054 unsigned long bar0map_len, bar1map_len, bar2map_len; 10054 unsigned long bar0map_len, bar1map_len, bar2map_len;
10055 int error = -ENODEV; 10055 int error;
10056 uint32_t if_type; 10056 uint32_t if_type;
10057 10057
10058 if (!pdev) 10058 if (!pdev)
@@ -10071,7 +10071,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10071 */ 10071 */
10072 if (pci_read_config_dword(pdev, LPFC_SLI_INTF, 10072 if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
10073 &phba->sli4_hba.sli_intf.word0)) { 10073 &phba->sli4_hba.sli_intf.word0)) {
10074 return error; 10074 return -ENODEV;
10075 } 10075 }
10076 10076
10077 /* There is no SLI3 failback for SLI4 devices. */ 10077 /* There is no SLI3 failback for SLI4 devices. */
@@ -10081,7 +10081,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10081 "2894 SLI_INTF reg contents invalid " 10081 "2894 SLI_INTF reg contents invalid "
10082 "sli_intf reg 0x%x\n", 10082 "sli_intf reg 0x%x\n",
10083 phba->sli4_hba.sli_intf.word0); 10083 phba->sli4_hba.sli_intf.word0);
10084 return error; 10084 return -ENODEV;
10085 } 10085 }
10086 10086
10087 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf); 10087 if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
@@ -10105,7 +10105,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10105 dev_printk(KERN_ERR, &pdev->dev, 10105 dev_printk(KERN_ERR, &pdev->dev,
10106 "ioremap failed for SLI4 PCI config " 10106 "ioremap failed for SLI4 PCI config "
10107 "registers.\n"); 10107 "registers.\n");
10108 goto out; 10108 return -ENODEV;
10109 } 10109 }
10110 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; 10110 phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
10111 /* Set up BAR0 PCI config space register memory map */ 10111 /* Set up BAR0 PCI config space register memory map */
@@ -10116,7 +10116,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10116 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) { 10116 if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
10117 dev_printk(KERN_ERR, &pdev->dev, 10117 dev_printk(KERN_ERR, &pdev->dev,
10118 "FATAL - No BAR0 mapping for SLI4, if_type 2\n"); 10118 "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
10119 goto out; 10119 return -ENODEV;
10120 } 10120 }
10121 phba->sli4_hba.conf_regs_memmap_p = 10121 phba->sli4_hba.conf_regs_memmap_p =
10122 ioremap(phba->pci_bar0_map, bar0map_len); 10122 ioremap(phba->pci_bar0_map, bar0map_len);
@@ -10124,7 +10124,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10124 dev_printk(KERN_ERR, &pdev->dev, 10124 dev_printk(KERN_ERR, &pdev->dev,
10125 "ioremap failed for SLI4 PCI config " 10125 "ioremap failed for SLI4 PCI config "
10126 "registers.\n"); 10126 "registers.\n");
10127 goto out; 10127 return -ENODEV;
10128 } 10128 }
10129 lpfc_sli4_bar0_register_memmap(phba, if_type); 10129 lpfc_sli4_bar0_register_memmap(phba, if_type);
10130 } 10130 }
@@ -10170,6 +10170,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10170 if (!phba->sli4_hba.drbl_regs_memmap_p) { 10170 if (!phba->sli4_hba.drbl_regs_memmap_p) {
10171 dev_err(&pdev->dev, 10171 dev_err(&pdev->dev,
10172 "ioremap failed for SLI4 HBA doorbell registers.\n"); 10172 "ioremap failed for SLI4 HBA doorbell registers.\n");
10173 error = -ENOMEM;
10173 goto out_iounmap_conf; 10174 goto out_iounmap_conf;
10174 } 10175 }
10175 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; 10176 phba->pci_bar2_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
@@ -10219,6 +10220,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
10219 if (!phba->sli4_hba.dpp_regs_memmap_p) { 10220 if (!phba->sli4_hba.dpp_regs_memmap_p) {
10220 dev_err(&pdev->dev, 10221 dev_err(&pdev->dev,
10221 "ioremap failed for SLI4 HBA dpp registers.\n"); 10222 "ioremap failed for SLI4 HBA dpp registers.\n");
10223 error = -ENOMEM;
10222 goto out_iounmap_ctrl; 10224 goto out_iounmap_ctrl;
10223 } 10225 }
10224 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p; 10226 phba->pci_bar4_memmap_p = phba->sli4_hba.dpp_regs_memmap_p;
@@ -10249,7 +10251,7 @@ out_iounmap_ctrl:
10249 iounmap(phba->sli4_hba.ctrl_regs_memmap_p); 10251 iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
10250out_iounmap_conf: 10252out_iounmap_conf:
10251 iounmap(phba->sli4_hba.conf_regs_memmap_p); 10253 iounmap(phba->sli4_hba.conf_regs_memmap_p);
10252out: 10254
10253 return error; 10255 return error;
10254} 10256}
10255 10257
@@ -11137,7 +11139,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
11137 lpfc_sli4_ras_dma_free(phba); 11139 lpfc_sli4_ras_dma_free(phba);
11138 11140
11139 /* Stop the SLI4 device port */ 11141 /* Stop the SLI4 device port */
11140 phba->pport->work_port_events = 0; 11142 if (phba->pport)
11143 phba->pport->work_port_events = 0;
11141} 11144}
11142 11145
11143 /** 11146 /**
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 55ab9d3ee4ba..1aa00d2c3f74 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -965,7 +965,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
965 struct lpfc_nodelist *ndlp; 965 struct lpfc_nodelist *ndlp;
966 struct lpfc_nvme_fcpreq_priv *freqpriv; 966 struct lpfc_nvme_fcpreq_priv *freqpriv;
967 struct lpfc_nvme_lport *lport; 967 struct lpfc_nvme_lport *lport;
968 uint32_t code, status, idx, cpu; 968 uint32_t code, status, idx;
969 uint16_t cid, sqhd, data; 969 uint16_t cid, sqhd, data;
970 uint32_t *ptr; 970 uint32_t *ptr;
971 971
@@ -1138,6 +1138,7 @@ out_err:
1138 lpfc_nvme_ktime(phba, lpfc_ncmd); 1138 lpfc_nvme_ktime(phba, lpfc_ncmd);
1139 } 1139 }
1140 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { 1140 if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1141 uint32_t cpu;
1141 idx = lpfc_ncmd->cur_iocbq.hba_wqidx; 1142 idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
1142 cpu = smp_processor_id(); 1143 cpu = smp_processor_id();
1143 if (cpu < LPFC_CHECK_CPU_CNT) { 1144 if (cpu < LPFC_CHECK_CPU_CNT) {
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index c98f264f1d83..a497b2c0cb79 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3878,10 +3878,9 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
3878 * wake up the thread. 3878 * wake up the thread.
3879 */ 3879 */
3880 spin_lock(&lpfc_cmd->buf_lock); 3880 spin_lock(&lpfc_cmd->buf_lock);
3881 if (unlikely(lpfc_cmd->cur_iocbq.iocb_flag & LPFC_DRIVER_ABORTED)) { 3881 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED;
3882 lpfc_cmd->cur_iocbq.iocb_flag &= ~LPFC_DRIVER_ABORTED; 3882 if (lpfc_cmd->waitq) {
3883 if (lpfc_cmd->waitq) 3883 wake_up(lpfc_cmd->waitq);
3884 wake_up(lpfc_cmd->waitq);
3885 lpfc_cmd->waitq = NULL; 3884 lpfc_cmd->waitq = NULL;
3886 } 3885 }
3887 spin_unlock(&lpfc_cmd->buf_lock); 3886 spin_unlock(&lpfc_cmd->buf_lock);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d0817facdae3..57b4a463b589 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -9881,7 +9881,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
9881 * The WQE can be either 64 or 128 bytes, 9881 * The WQE can be either 64 or 128 bytes,
9882 */ 9882 */
9883 9883
9884 lockdep_assert_held(&phba->hbalock); 9884 lockdep_assert_held(&pring->ring_lock);
9885 9885
9886 if (piocb->sli4_xritag == NO_XRI) { 9886 if (piocb->sli4_xritag == NO_XRI) {
9887 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN || 9887 if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index dace907744a5..293f5cf524d7 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3924,12 +3924,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
3924 /* 3924 /*
3925 * The cur_state should not last for more than max_wait secs 3925 * The cur_state should not last for more than max_wait secs
3926 */ 3926 */
3927 for (i = 0; i < max_wait; i++) { 3927 for (i = 0; i < max_wait * 50; i++) {
3928 curr_abs_state = instance->instancet-> 3928 curr_abs_state = instance->instancet->
3929 read_fw_status_reg(instance); 3929 read_fw_status_reg(instance);
3930 3930
3931 if (abs_state == curr_abs_state) { 3931 if (abs_state == curr_abs_state) {
3932 msleep(1000); 3932 msleep(20);
3933 } else 3933 } else
3934 break; 3934 break;
3935 } 3935 }
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index e57774472e75..1d8c584ec1e9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3281 3281
3282 if (smid < ioc->hi_priority_smid) { 3282 if (smid < ioc->hi_priority_smid) {
3283 struct scsiio_tracker *st; 3283 struct scsiio_tracker *st;
3284 void *request;
3284 3285
3285 st = _get_st_from_smid(ioc, smid); 3286 st = _get_st_from_smid(ioc, smid);
3286 if (!st) { 3287 if (!st) {
3287 _base_recovery_check(ioc); 3288 _base_recovery_check(ioc);
3288 return; 3289 return;
3289 } 3290 }
3291
3292 /* Clear MPI request frame */
3293 request = mpt3sas_base_get_msg_frame(ioc, smid);
3294 memset(request, 0, ioc->request_sz);
3295
3290 mpt3sas_base_clear_st(ioc, st); 3296 mpt3sas_base_clear_st(ioc, st);
3291 _base_recovery_check(ioc); 3297 _base_recovery_check(ioc);
3292 return; 3298 return;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8bb5b8f9f4d2..1ccfbc7eebe0 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1462{ 1462{
1463 struct scsi_cmnd *scmd = NULL; 1463 struct scsi_cmnd *scmd = NULL;
1464 struct scsiio_tracker *st; 1464 struct scsiio_tracker *st;
1465 Mpi25SCSIIORequest_t *mpi_request;
1465 1466
1466 if (smid > 0 && 1467 if (smid > 0 &&
1467 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) { 1468 smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
1468 u32 unique_tag = smid - 1; 1469 u32 unique_tag = smid - 1;
1469 1470
1471 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
1472
1473 /*
1474 * If SCSI IO request is outstanding at driver level then
1475 * DevHandle filed must be non-zero. If DevHandle is zero
1476 * then it means that this smid is free at driver level,
1477 * so return NULL.
1478 */
1479 if (!mpi_request->DevHandle)
1480 return scmd;
1481
1470 scmd = scsi_host_find_tag(ioc->shost, unique_tag); 1482 scmd = scsi_host_find_tag(ioc->shost, unique_tag);
1471 if (scmd) { 1483 if (scmd) {
1472 st = scsi_cmd_priv(scmd); 1484 st = scsi_cmd_priv(scmd);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e74a62448ba4..e5db9a9954dc 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1392,10 +1392,8 @@ static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1392 1392
1393static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) 1393static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi)
1394{ 1394{
1395 struct qedi_nvm_iscsi_image nvm_image;
1396
1397 qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev, 1395 qedi->iscsi_image = dma_alloc_coherent(&qedi->pdev->dev,
1398 sizeof(nvm_image), 1396 sizeof(struct qedi_nvm_iscsi_image),
1399 &qedi->nvm_buf_dma, GFP_KERNEL); 1397 &qedi->nvm_buf_dma, GFP_KERNEL);
1400 if (!qedi->iscsi_image) { 1398 if (!qedi->iscsi_image) {
1401 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); 1399 QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n");
@@ -2236,14 +2234,13 @@ static void qedi_boot_release(void *data)
2236static int qedi_get_boot_info(struct qedi_ctx *qedi) 2234static int qedi_get_boot_info(struct qedi_ctx *qedi)
2237{ 2235{
2238 int ret = 1; 2236 int ret = 1;
2239 struct qedi_nvm_iscsi_image nvm_image;
2240 2237
2241 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, 2238 QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
2242 "Get NVM iSCSI CFG image\n"); 2239 "Get NVM iSCSI CFG image\n");
2243 ret = qedi_ops->common->nvm_get_image(qedi->cdev, 2240 ret = qedi_ops->common->nvm_get_image(qedi->cdev,
2244 QED_NVM_IMAGE_ISCSI_CFG, 2241 QED_NVM_IMAGE_ISCSI_CFG,
2245 (char *)qedi->iscsi_image, 2242 (char *)qedi->iscsi_image,
2246 sizeof(nvm_image)); 2243 sizeof(struct qedi_nvm_iscsi_image));
2247 if (ret) 2244 if (ret)
2248 QEDI_ERR(&qedi->dbg_ctx, 2245 QEDI_ERR(&qedi->dbg_ctx,
2249 "Could not get NVM image. ret = %d\n", ret); 2246 "Could not get NVM image. ret = %d\n", ret);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 2eb1ae721a7d..f928c4d3a1ef 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1652,6 +1652,8 @@ qla2x00_port_speed_store(struct device *dev, struct device_attribute *attr,
1652 } 1652 }
1653 1653
1654 rval = kstrtol(buf, 10, &type); 1654 rval = kstrtol(buf, 10, &type);
1655 if (rval)
1656 return rval;
1655 speed = type; 1657 speed = type;
1656 if (type == 40 || type == 80 || type == 160 || 1658 if (type == 40 || type == 80 || type == 160 ||
1657 type == 320) { 1659 type == 320) {
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index ead17288e2a7..5819a45ac5ef 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -193,6 +193,8 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
193 193
194 for (i = 0; i < vha->hw->max_qpairs; i++) { 194 for (i = 0; i < vha->hw->max_qpairs; i++) {
195 qpair = vha->hw->queue_pair_map[i]; 195 qpair = vha->hw->queue_pair_map[i];
196 if (!qpair)
197 continue;
196 qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd; 198 qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
197 core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf; 199 core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
198 qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio; 200 qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 420045155ba0..0c700b140ce7 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4991,6 +4991,13 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
4991 if ((domain & 0xf0) == 0xf0) 4991 if ((domain & 0xf0) == 0xf0)
4992 continue; 4992 continue;
4993 4993
4994 /* Bypass if not same domain and area of adapter. */
4995 if (area && domain && ((area != vha->d_id.b.area) ||
4996 (domain != vha->d_id.b.domain)) &&
4997 (ha->current_topology == ISP_CFG_NL))
4998 continue;
4999
5000
4994 /* Bypass invalid local loop ID. */ 5001 /* Bypass invalid local loop ID. */
4995 if (loop_id > LAST_LOCAL_LOOP_ID) 5002 if (loop_id > LAST_LOCAL_LOOP_ID)
4996 continue; 5003 continue;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 63f8e3c19841..456a41d2e2c6 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1132,7 +1132,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1132 /* if initiator doing write or target doing read */ 1132 /* if initiator doing write or target doing read */
1133 if (direction_to_device) { 1133 if (direction_to_device) {
1134 for_each_sg(sgl, sg, tot_dsds, i) { 1134 for_each_sg(sgl, sg, tot_dsds, i) {
1135 dma_addr_t sle_phys = sg_phys(sg); 1135 u64 sle_phys = sg_phys(sg);
1136 1136
1137 /* If SGE addr + len flips bits in upper 32-bits */ 1137 /* If SGE addr + len flips bits in upper 32-bits */
1138 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) { 1138 if (MSD(sle_phys + sg->length) ^ MSD(sle_phys)) {
@@ -1178,7 +1178,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
1178 1178
1179 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023, 1179 ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe023,
1180 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n", 1180 "%s: sg[%x] (phys=%llx sglen=%x) ldma_sg_len: %x dif_bundl_len: %x ldma_needed: %x\n",
1181 __func__, i, sg_phys(sg), sglen, ldma_sg_len, 1181 __func__, i, (u64)sg_phys(sg), sglen, ldma_sg_len,
1182 difctx->dif_bundl_len, ldma_needed); 1182 difctx->dif_bundl_len, ldma_needed);
1183 1183
1184 while (sglen) { 1184 while (sglen) {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 677f82fdf56f..91f576d743fe 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1517,7 +1517,7 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
1517 goto eh_reset_failed; 1517 goto eh_reset_failed;
1518 } 1518 }
1519 err = 2; 1519 err = 2;
1520 if (do_reset(fcport, cmd->device->lun, blk_mq_rq_cpu(cmd->request) + 1) 1520 if (do_reset(fcport, cmd->device->lun, 1)
1521 != QLA_SUCCESS) { 1521 != QLA_SUCCESS) {
1522 ql_log(ql_log_warn, vha, 0x800c, 1522 ql_log(ql_log_warn, vha, 0x800c,
1523 "do_reset failed for cmd=%p.\n", cmd); 1523 "do_reset failed for cmd=%p.\n", cmd);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 16a18d5d856f..6e4f4931ae17 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
3203 if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) 3203 if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
3204 return -EINVAL; 3204 return -EINVAL;
3205 ep = iscsi_lookup_endpoint(transport_fd); 3205 ep = iscsi_lookup_endpoint(transport_fd);
3206 if (!ep)
3207 return -EINVAL;
3206 conn = cls_conn->dd_data; 3208 conn = cls_conn->dd_data;
3207 qla_conn = conn->dd_data; 3209 qla_conn = conn->dd_data;
3208 qla_conn->qla_ep = ep->dd_data; 3210 qla_conn->qla_ep = ep->dd_data;
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c4cbfd07b916..a08ff3bd6310 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -238,6 +238,7 @@ static struct {
238 {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 238 {"NETAPP", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
239 {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 239 {"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
240 {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH}, 240 {"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
241 {"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
241 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36}, 242 {"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
242 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN}, 243 {"SONY", "CD-ROM CDU-8001", NULL, BLIST_BORKEN},
243 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */ 244 {"SONY", "TSL", NULL, BLIST_FORCELUN}, /* DDS3 & DDS4 autoloaders */
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 5a58cbf3a75d..c14006ac98f9 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -75,6 +75,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
75 {"NETAPP", "INF-01-00", "rdac", }, 75 {"NETAPP", "INF-01-00", "rdac", },
76 {"LSI", "INF-01-00", "rdac", }, 76 {"LSI", "INF-01-00", "rdac", },
77 {"ENGENIO", "INF-01-00", "rdac", }, 77 {"ENGENIO", "INF-01-00", "rdac", },
78 {"LENOVO", "DE_Series", "rdac", },
78 {NULL, NULL, NULL }, 79 {NULL, NULL, NULL },
79}; 80};
80 81
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 20189675677a..601b9f1de267 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -585,10 +585,17 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
585 if (!blk_rq_is_scsi(req)) { 585 if (!blk_rq_is_scsi(req)) {
586 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED)); 586 WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
587 cmd->flags &= ~SCMD_INITIALIZED; 587 cmd->flags &= ~SCMD_INITIALIZED;
588 destroy_rcu_head(&cmd->rcu);
589 } 588 }
590 589
591 /* 590 /*
591 * Calling rcu_barrier() is not necessary here because the
592 * SCSI error handler guarantees that the function called by
593 * call_rcu() has been called before scsi_end_request() is
594 * called.
595 */
596 destroy_rcu_head(&cmd->rcu);
597
598 /*
592 * In the MQ case the command gets freed by __blk_mq_end_request, 599 * In the MQ case the command gets freed by __blk_mq_end_request,
593 * so we have to do all cleanup that depends on it earlier. 600 * so we have to do all cleanup that depends on it earlier.
594 * 601 *
@@ -2541,8 +2548,10 @@ void scsi_device_resume(struct scsi_device *sdev)
2541 * device deleted during suspend) 2548 * device deleted during suspend)
2542 */ 2549 */
2543 mutex_lock(&sdev->state_mutex); 2550 mutex_lock(&sdev->state_mutex);
2544 sdev->quiesced_by = NULL; 2551 if (sdev->quiesced_by) {
2545 blk_clear_pm_only(sdev->request_queue); 2552 sdev->quiesced_by = NULL;
2553 blk_clear_pm_only(sdev->request_queue);
2554 }
2546 if (sdev->sdev_state == SDEV_QUIESCE) 2555 if (sdev->sdev_state == SDEV_QUIESCE)
2547 scsi_device_set_state(sdev, SDEV_RUNNING); 2556 scsi_device_set_state(sdev, SDEV_RUNNING);
2548 mutex_unlock(&sdev->state_mutex); 2557 mutex_unlock(&sdev->state_mutex);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 6a9040faed00..3b119ca0cc0c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
771 771
772 mutex_lock(&sdev->state_mutex); 772 mutex_lock(&sdev->state_mutex);
773 ret = scsi_device_set_state(sdev, state); 773 ret = scsi_device_set_state(sdev, state);
774 /*
775 * If the device state changes to SDEV_RUNNING, we need to run
776 * the queue to avoid I/O hang.
777 */
778 if (ret == 0 && state == SDEV_RUNNING)
779 blk_mq_run_hw_queues(sdev->request_queue, true);
774 mutex_unlock(&sdev->state_mutex); 780 mutex_unlock(&sdev->state_mutex);
775 781
776 return ret == 0 ? count : -EINVAL; 782 return ret == 0 ? count : -EINVAL;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0508831d6fb9..0a82e93566dc 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2200,6 +2200,8 @@ void iscsi_remove_session(struct iscsi_cls_session *session)
2200 scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE); 2200 scsi_target_unblock(&session->dev, SDEV_TRANSPORT_OFFLINE);
2201 /* flush running scans then delete devices */ 2201 /* flush running scans then delete devices */
2202 flush_work(&session->scan_work); 2202 flush_work(&session->scan_work);
2203 /* flush running unbind operations */
2204 flush_work(&session->unbind_work);
2203 __iscsi_unbind_session(&session->unbind_work); 2205 __iscsi_unbind_session(&session->unbind_work);
2204 2206
2205 /* hw iscsi may not have removed all connections from session */ 2207 /* hw iscsi may not have removed all connections from session */
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 251db30d0882..2b2bc4b49d78 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
1415 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 1415 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1416 } 1416 }
1417 1417
1418 /*
1419 * XXX and what if there are packets in flight and this close()
1420 * XXX is followed by a "rmmod sd_mod"?
1421 */
1422
1423 scsi_disk_put(sdkp); 1418 scsi_disk_put(sdkp);
1424} 1419}
1425 1420
@@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
3076 unsigned int opt_xfer_bytes = 3071 unsigned int opt_xfer_bytes =
3077 logical_to_bytes(sdp, sdkp->opt_xfer_blocks); 3072 logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3078 3073
3074 if (sdkp->opt_xfer_blocks == 0)
3075 return false;
3076
3079 if (sdkp->opt_xfer_blocks > dev_max) { 3077 if (sdkp->opt_xfer_blocks > dev_max) {
3080 sd_first_printk(KERN_WARNING, sdkp, 3078 sd_first_printk(KERN_WARNING, sdkp,
3081 "Optimal transfer size %u logical blocks " \ 3079 "Optimal transfer size %u logical blocks " \
@@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
3505{ 3503{
3506 struct scsi_disk *sdkp = to_scsi_disk(dev); 3504 struct scsi_disk *sdkp = to_scsi_disk(dev);
3507 struct gendisk *disk = sdkp->disk; 3505 struct gendisk *disk = sdkp->disk;
3508 3506 struct request_queue *q = disk->queue;
3507
3509 ida_free(&sd_index_ida, sdkp->index); 3508 ida_free(&sd_index_ida, sdkp->index);
3510 3509
3510 /*
3511 * Wait until all requests that are in progress have completed.
3512 * This is necessary to avoid that e.g. scsi_end_request() crashes
3513 * due to clearing the disk->private_data pointer. Wait from inside
3514 * scsi_disk_release() instead of from sd_release() to avoid that
3515 * freezing and unfreezing the request queue affects user space I/O
3516 * in case multiple processes open a /dev/sd... node concurrently.
3517 */
3518 blk_mq_freeze_queue(q);
3519 blk_mq_unfreeze_queue(q);
3520
3511 disk->private_data = NULL; 3521 disk->private_data = NULL;
3512 put_disk(disk); 3522 put_disk(disk);
3513 put_device(&sdkp->device->sdev_gendev); 3523 put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 5d9ccbab7581..75ec43aa8df3 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -2764,6 +2764,12 @@ static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2764 sshdr.sense_key == HARDWARE_ERROR && 2764 sshdr.sense_key == HARDWARE_ERROR &&
2765 sshdr.asc == 0x3e && 2765 sshdr.asc == 0x3e &&
2766 sshdr.ascq == 0x1) { 2766 sshdr.ascq == 0x1) {
2767 struct pqi_ctrl_info *ctrl_info = shost_to_hba(scmd->device->host);
2768 struct pqi_scsi_dev *device = scmd->device->hostdata;
2769
2770 if (printk_ratelimit())
2771 scmd_printk(KERN_ERR, scmd, "received 'logical unit failure' from controller for scsi %d:%d:%d:%d\n",
2772 ctrl_info->scsi_host->host_no, device->bus, device->target, device->lun);
2767 pqi_take_device_offline(scmd->device, "RAID"); 2773 pqi_take_device_offline(scmd->device, "RAID");
2768 host_byte = DID_NO_CONNECT; 2774 host_byte = DID_NO_CONNECT;
2769 } 2775 }
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 84380bae20f1..8472de1007ff 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -385,7 +385,7 @@ enum storvsc_request_type {
385 * This is the end of Protocol specific defines. 385 * This is the end of Protocol specific defines.
386 */ 386 */
387 387
388static int storvsc_ringbuffer_size = (256 * PAGE_SIZE); 388static int storvsc_ringbuffer_size = (128 * 1024);
389static u32 max_outstanding_req_per_channel; 389static u32 max_outstanding_req_per_channel;
390 390
391static int storvsc_vcpus_per_sub_channel = 4; 391static int storvsc_vcpus_per_sub_channel = 4;
@@ -668,13 +668,22 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
668{ 668{
669 struct device *dev = &device->device; 669 struct device *dev = &device->device;
670 struct storvsc_device *stor_device; 670 struct storvsc_device *stor_device;
671 int num_cpus = num_online_cpus();
672 int num_sc; 671 int num_sc;
673 struct storvsc_cmd_request *request; 672 struct storvsc_cmd_request *request;
674 struct vstor_packet *vstor_packet; 673 struct vstor_packet *vstor_packet;
675 int ret, t; 674 int ret, t;
676 675
677 num_sc = ((max_chns > num_cpus) ? num_cpus : max_chns); 676 /*
677 * If the number of CPUs is artificially restricted, such as
678 * with maxcpus=1 on the kernel boot line, Hyper-V could offer
679 * sub-channels >= the number of CPUs. These sub-channels
680 * should not be created. The primary channel is already created
681 * and assigned to one CPU, so check against # CPUs - 1.
682 */
683 num_sc = min((int)(num_online_cpus() - 1), max_chns);
684 if (!num_sc)
685 return;
686
678 stor_device = get_out_stor_device(device); 687 stor_device = get_out_stor_device(device);
679 if (!stor_device) 688 if (!stor_device)
680 return; 689 return;
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index f2d3df357a97..0e855b5afe82 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -640,7 +640,7 @@ static int ufs_hi3670_init(struct ufs_hba *hba)
640 return 0; 640 return 0;
641} 641}
642 642
643static struct ufs_hba_variant_ops ufs_hba_hi3660_vops = { 643static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
644 .name = "hi3660", 644 .name = "hi3660",
645 .init = ufs_hi3660_init, 645 .init = ufs_hi3660_init,
646 .link_startup_notify = ufs_hisi_link_startup_notify, 646 .link_startup_notify = ufs_hisi_link_startup_notify,
@@ -649,7 +649,7 @@ static struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
649 .resume = ufs_hisi_resume, 649 .resume = ufs_hisi_resume,
650}; 650};
651 651
652static struct ufs_hba_variant_ops ufs_hba_hi3670_vops = { 652static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
653 .name = "hi3670", 653 .name = "hi3670",
654 .init = ufs_hi3670_init, 654 .init = ufs_hi3670_init,
655 .link_startup_notify = ufs_hisi_link_startup_notify, 655 .link_startup_notify = ufs_hisi_link_startup_notify,
@@ -669,13 +669,10 @@ MODULE_DEVICE_TABLE(of, ufs_hisi_of_match);
669static int ufs_hisi_probe(struct platform_device *pdev) 669static int ufs_hisi_probe(struct platform_device *pdev)
670{ 670{
671 const struct of_device_id *of_id; 671 const struct of_device_id *of_id;
672 struct ufs_hba_variant_ops *vops;
673 struct device *dev = &pdev->dev;
674 672
675 of_id = of_match_node(ufs_hisi_of_match, dev->of_node); 673 of_id = of_match_node(ufs_hisi_of_match, pdev->dev.of_node);
676 vops = (struct ufs_hba_variant_ops *)of_id->data;
677 674
678 return ufshcd_pltfrm_init(pdev, vops); 675 return ufshcd_pltfrm_init(pdev, of_id->data);
679} 676}
680 677
681static int ufs_hisi_remove(struct platform_device *pdev) 678static int ufs_hisi_remove(struct platform_device *pdev)
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 895a9b5ac989..27213676329c 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -297,7 +297,7 @@ static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
297 * Returns 0 on success, non-zero value on failure 297 * Returns 0 on success, non-zero value on failure
298 */ 298 */
299int ufshcd_pltfrm_init(struct platform_device *pdev, 299int ufshcd_pltfrm_init(struct platform_device *pdev,
300 struct ufs_hba_variant_ops *vops) 300 const struct ufs_hba_variant_ops *vops)
301{ 301{
302 struct ufs_hba *hba; 302 struct ufs_hba *hba;
303 void __iomem *mmio_base; 303 void __iomem *mmio_base;
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
index df64c4180340..1f29e1fd6d52 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.h
@@ -17,7 +17,7 @@
17#include "ufshcd.h" 17#include "ufshcd.h"
18 18
19int ufshcd_pltfrm_init(struct platform_device *pdev, 19int ufshcd_pltfrm_init(struct platform_device *pdev,
20 struct ufs_hba_variant_ops *vops); 20 const struct ufs_hba_variant_ops *vops);
21void ufshcd_pltfrm_shutdown(struct platform_device *pdev); 21void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
22 22
23#ifdef CONFIG_PM 23#ifdef CONFIG_PM
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 69ba7445d2b3..ecfa898b9ccc 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -546,7 +546,7 @@ struct ufs_hba {
546 int nutrs; 546 int nutrs;
547 int nutmrs; 547 int nutmrs;
548 u32 ufs_version; 548 u32 ufs_version;
549 struct ufs_hba_variant_ops *vops; 549 const struct ufs_hba_variant_ops *vops;
550 void *priv; 550 void *priv;
551 unsigned int irq; 551 unsigned int irq;
552 bool is_irq_enabled; 552 bool is_irq_enabled;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 1a6f150cd2d8..8af01777d09c 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -586,7 +586,6 @@ static int virtscsi_device_reset(struct scsi_cmnd *sc)
586 return FAILED; 586 return FAILED;
587 587
588 memset(cmd, 0, sizeof(*cmd)); 588 memset(cmd, 0, sizeof(*cmd));
589 cmd->sc = sc;
590 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ 589 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
591 .type = VIRTIO_SCSI_T_TMF, 590 .type = VIRTIO_SCSI_T_TMF,
592 .subtype = cpu_to_virtio32(vscsi->vdev, 591 .subtype = cpu_to_virtio32(vscsi->vdev,
@@ -645,7 +644,6 @@ static int virtscsi_abort(struct scsi_cmnd *sc)
645 return FAILED; 644 return FAILED;
646 645
647 memset(cmd, 0, sizeof(*cmd)); 646 memset(cmd, 0, sizeof(*cmd));
648 cmd->sc = sc;
649 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ 647 cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
650 .type = VIRTIO_SCSI_T_TMF, 648 .type = VIRTIO_SCSI_T_TMF,
651 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, 649 .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
diff --git a/drivers/soc/bcm/bcm2835-power.c b/drivers/soc/bcm/bcm2835-power.c
index 9351349cf0a9..1e0041ec8132 100644
--- a/drivers/soc/bcm/bcm2835-power.c
+++ b/drivers/soc/bcm/bcm2835-power.c
@@ -150,7 +150,12 @@ struct bcm2835_power {
150 150
151static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg) 151static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
152{ 152{
153 u64 start = ktime_get_ns(); 153 u64 start;
154
155 if (!reg)
156 return 0;
157
158 start = ktime_get_ns();
154 159
155 /* Enable the module's async AXI bridges. */ 160 /* Enable the module's async AXI bridges. */
156 ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP); 161 ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
165 170
166static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg) 171static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
167{ 172{
168 u64 start = ktime_get_ns(); 173 u64 start;
174
175 if (!reg)
176 return 0;
177
178 start = ktime_get_ns();
169 179
170 /* Enable the module's async AXI bridges. */ 180 /* Enable the module's async AXI bridges. */
171 ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP); 181 ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
475 } 485 }
476} 486}
477 487
478static void 488static int
479bcm2835_init_power_domain(struct bcm2835_power *power, 489bcm2835_init_power_domain(struct bcm2835_power *power,
480 int pd_xlate_index, const char *name) 490 int pd_xlate_index, const char *name)
481{ 491{
@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
483 struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index]; 493 struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
484 494
485 dom->clk = devm_clk_get(dev->parent, name); 495 dom->clk = devm_clk_get(dev->parent, name);
496 if (IS_ERR(dom->clk)) {
497 int ret = PTR_ERR(dom->clk);
498
499 if (ret == -EPROBE_DEFER)
500 return ret;
501
502 /* Some domains don't have a clk, so make sure that we
503 * don't deref an error pointer later.
504 */
505 dom->clk = NULL;
506 }
486 507
487 dom->base.name = name; 508 dom->base.name = name;
488 dom->base.power_on = bcm2835_power_pd_power_on; 509 dom->base.power_on = bcm2835_power_pd_power_on;
@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
495 pm_genpd_init(&dom->base, NULL, true); 516 pm_genpd_init(&dom->base, NULL, true);
496 517
497 power->pd_xlate.domains[pd_xlate_index] = &dom->base; 518 power->pd_xlate.domains[pd_xlate_index] = &dom->base;
519
520 return 0;
498} 521}
499 522
500/** bcm2835_reset_reset - Resets a block that has a reset line in the 523/** bcm2835_reset_reset - Resets a block that has a reset line in the
@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
592 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 }, 615 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
593 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 }, 616 { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
594 }; 617 };
595 int ret, i; 618 int ret = 0, i;
596 u32 id; 619 u32 id;
597 620
598 power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL); 621 power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
619 642
620 power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names); 643 power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
621 644
622 for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) 645 for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
623 bcm2835_init_power_domain(power, i, power_domain_names[i]); 646 ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
647 if (ret)
648 goto fail;
649 }
624 650
625 for (i = 0; i < ARRAY_SIZE(domain_deps); i++) { 651 for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
626 pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base, 652 pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
634 660
635 ret = devm_reset_controller_register(dev, &power->reset); 661 ret = devm_reset_controller_register(dev, &power->reset);
636 if (ret) 662 if (ret)
637 return ret; 663 goto fail;
638 664
639 of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate); 665 of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
640 666
641 dev_info(dev, "Broadcom BCM2835 power domains driver"); 667 dev_info(dev, "Broadcom BCM2835 power domains driver");
642 return 0; 668 return 0;
669
670fail:
671 for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
672 struct generic_pm_domain *dom = &power->domains[i].base;
673
674 if (dom->name)
675 pm_genpd_remove(dom);
676 }
677 return ret;
643} 678}
644 679
645static int bcm2835_power_remove(struct platform_device *pdev) 680static int bcm2835_power_remove(struct platform_device *pdev)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index c0901b96cfe4..62951e836cbc 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -114,8 +114,6 @@ source "drivers/staging/ralink-gdma/Kconfig"
114 114
115source "drivers/staging/mt7621-mmc/Kconfig" 115source "drivers/staging/mt7621-mmc/Kconfig"
116 116
117source "drivers/staging/mt7621-eth/Kconfig"
118
119source "drivers/staging/mt7621-dts/Kconfig" 117source "drivers/staging/mt7621-dts/Kconfig"
120 118
121source "drivers/staging/gasket/Kconfig" 119source "drivers/staging/gasket/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 57c6bce13ff4..d1b17ddcd354 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_SPI_MT7621) += mt7621-spi/
47obj-$(CONFIG_SOC_MT7621) += mt7621-dma/ 47obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
48obj-$(CONFIG_DMA_RALINK) += ralink-gdma/ 48obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
49obj-$(CONFIG_MTK_MMC) += mt7621-mmc/ 49obj-$(CONFIG_MTK_MMC) += mt7621-mmc/
50obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/
51obj-$(CONFIG_SOC_MT7621) += mt7621-dts/ 50obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
52obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/ 51obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
53obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/ 52obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
diff --git a/drivers/staging/axis-fifo/Kconfig b/drivers/staging/axis-fifo/Kconfig
index 687537203d9c..d9725888af6f 100644
--- a/drivers/staging/axis-fifo/Kconfig
+++ b/drivers/staging/axis-fifo/Kconfig
@@ -3,6 +3,7 @@
3# 3#
4config XIL_AXIS_FIFO 4config XIL_AXIS_FIFO
5 tristate "Xilinx AXI-Stream FIFO IP core driver" 5 tristate "Xilinx AXI-Stream FIFO IP core driver"
6 depends on OF
6 default n 7 default n
7 help 8 help
8 This adds support for the Xilinx AXI-Stream 9 This adds support for the Xilinx AXI-Stream
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index a7d569cfca5d..0dff1ac057cd 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
1001 unsigned int mask); 1001 unsigned int mask);
1002unsigned int comedi_dio_update_state(struct comedi_subdevice *s, 1002unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
1003 unsigned int *data); 1003 unsigned int *data);
1004unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
1005 struct comedi_cmd *cmd);
1004unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s); 1006unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
1005unsigned int comedi_nscans_left(struct comedi_subdevice *s, 1007unsigned int comedi_nscans_left(struct comedi_subdevice *s,
1006 unsigned int nscans); 1008 unsigned int nscans);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index eefa62f42c0f..5a32b8fc000e 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
394EXPORT_SYMBOL_GPL(comedi_dio_update_state); 394EXPORT_SYMBOL_GPL(comedi_dio_update_state);
395 395
396/** 396/**
397 * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes 397 * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
398 * bytes
398 * @s: COMEDI subdevice. 399 * @s: COMEDI subdevice.
400 * @cmd: COMEDI command.
399 * 401 *
400 * Determines the overall scan length according to the subdevice type and the 402 * Determines the overall scan length according to the subdevice type and the
401 * number of channels in the scan. 403 * number of channels in the scan for the specified command.
402 * 404 *
403 * For digital input, output or input/output subdevices, samples for 405 * For digital input, output or input/output subdevices, samples for
404 * multiple channels are assumed to be packed into one or more unsigned 406 * multiple channels are assumed to be packed into one or more unsigned
@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
408 * 410 *
409 * Returns the overall scan length in bytes. 411 * Returns the overall scan length in bytes.
410 */ 412 */
411unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s) 413unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
414 struct comedi_cmd *cmd)
412{ 415{
413 struct comedi_cmd *cmd = &s->async->cmd;
414 unsigned int num_samples; 416 unsigned int num_samples;
415 unsigned int bits_per_sample; 417 unsigned int bits_per_sample;
416 418
@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
427 } 429 }
428 return comedi_samples_to_bytes(s, num_samples); 430 return comedi_samples_to_bytes(s, num_samples);
429} 431}
432EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
433
434/**
435 * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
436 * @s: COMEDI subdevice.
437 *
438 * Determines the overall scan length according to the subdevice type and the
439 * number of channels in the scan for the current command.
440 *
441 * For digital input, output or input/output subdevices, samples for
442 * multiple channels are assumed to be packed into one or more unsigned
443 * short or unsigned int values according to the subdevice's %SDF_LSAMPL
444 * flag. For other types of subdevice, samples are assumed to occupy a
445 * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
446 *
447 * Returns the overall scan length in bytes.
448 */
449unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
450{
451 struct comedi_cmd *cmd = &s->async->cmd;
452
453 return comedi_bytes_per_scan_cmd(s, cmd);
454}
430EXPORT_SYMBOL_GPL(comedi_bytes_per_scan); 455EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
431 456
432static unsigned int __comedi_nscans_left(struct comedi_subdevice *s, 457static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 5edf59ac6706..b04dad8c7092 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
3545 struct comedi_subdevice *s, struct comedi_cmd *cmd) 3545 struct comedi_subdevice *s, struct comedi_cmd *cmd)
3546{ 3546{
3547 struct ni_private *devpriv = dev->private; 3547 struct ni_private *devpriv = dev->private;
3548 unsigned int bytes_per_scan;
3548 int err = 0; 3549 int err = 0;
3549 3550
3550 /* Step 1 : check if triggers are trivially valid */ 3551 /* Step 1 : check if triggers are trivially valid */
@@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
3579 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0); 3580 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
3580 err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg, 3581 err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
3581 cmd->chanlist_len); 3582 cmd->chanlist_len);
3582 err |= comedi_check_trigger_arg_max(&cmd->stop_arg, 3583 bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
3583 s->async->prealloc_bufsz / 3584 if (bytes_per_scan) {
3584 comedi_bytes_per_scan(s)); 3585 err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
3586 s->async->prealloc_bufsz /
3587 bytes_per_scan);
3588 }
3585 3589
3586 if (err) 3590 if (err)
3587 return 3; 3591 return 3;
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index 829f7b12e0dc..9bbc68729c11 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
23 [EROFS_FT_SYMLINK] = DT_LNK, 23 [EROFS_FT_SYMLINK] = DT_LNK,
24}; 24};
25 25
26static void debug_one_dentry(unsigned char d_type, const char *de_name,
27 unsigned int de_namelen)
28{
29#ifdef CONFIG_EROFS_FS_DEBUG
30 /* since the on-disk name could not have the trailing '\0' */
31 unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
32
33 memcpy(dbg_namebuf, de_name, de_namelen);
34 dbg_namebuf[de_namelen] = '\0';
35
36 debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
37 de_namelen, d_type);
38#endif
39}
40
26static int erofs_fill_dentries(struct dir_context *ctx, 41static int erofs_fill_dentries(struct dir_context *ctx,
27 void *dentry_blk, unsigned int *ofs, 42 void *dentry_blk, unsigned int *ofs,
28 unsigned int nameoff, unsigned int maxsize) 43 unsigned int nameoff, unsigned int maxsize)
@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
33 de = dentry_blk + *ofs; 48 de = dentry_blk + *ofs;
34 while (de < end) { 49 while (de < end) {
35 const char *de_name; 50 const char *de_name;
36 int de_namelen; 51 unsigned int de_namelen;
37 unsigned char d_type; 52 unsigned char d_type;
38#ifdef CONFIG_EROFS_FS_DEBUG
39 unsigned int dbg_namelen;
40 unsigned char dbg_namebuf[EROFS_NAME_LEN];
41#endif
42 53
43 if (unlikely(de->file_type < EROFS_FT_MAX)) 54 if (de->file_type < EROFS_FT_MAX)
44 d_type = erofs_filetype_table[de->file_type]; 55 d_type = erofs_filetype_table[de->file_type];
45 else 56 else
46 d_type = DT_UNKNOWN; 57 d_type = DT_UNKNOWN;
@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
48 nameoff = le16_to_cpu(de->nameoff); 59 nameoff = le16_to_cpu(de->nameoff);
49 de_name = (char *)dentry_blk + nameoff; 60 de_name = (char *)dentry_blk + nameoff;
50 61
51 de_namelen = unlikely(de + 1 >= end) ? 62 /* the last dirent in the block? */
52 /* last directory entry */ 63 if (de + 1 >= end)
53 strnlen(de_name, maxsize - nameoff) : 64 de_namelen = strnlen(de_name, maxsize - nameoff);
54 le16_to_cpu(de[1].nameoff) - nameoff; 65 else
66 de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
55 67
56 /* a corrupted entry is found */ 68 /* a corrupted entry is found */
57 if (unlikely(de_namelen < 0)) { 69 if (unlikely(nameoff + de_namelen > maxsize ||
70 de_namelen > EROFS_NAME_LEN)) {
58 DBG_BUGON(1); 71 DBG_BUGON(1);
59 return -EIO; 72 return -EIO;
60 } 73 }
61 74
62#ifdef CONFIG_EROFS_FS_DEBUG 75 debug_one_dentry(d_type, de_name, de_namelen);
63 dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
64 memcpy(dbg_namebuf, de_name, dbg_namelen);
65 dbg_namebuf[dbg_namelen] = '\0';
66
67 debugln("%s, found de_name %s de_len %d d_type %d", __func__,
68 dbg_namebuf, de_namelen, d_type);
69#endif
70
71 if (!dir_emit(ctx, de_name, de_namelen, 76 if (!dir_emit(ctx, de_name, de_namelen,
72 le64_to_cpu(de->nid), d_type)) 77 le64_to_cpu(de->nid), d_type))
73 /* stopped by some reason */ 78 /* stopped by some reason */
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 8715bc50e09c..31eef8395774 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -972,6 +972,7 @@ repeat:
972 overlapped = false; 972 overlapped = false;
973 compressed_pages = grp->compressed_pages; 973 compressed_pages = grp->compressed_pages;
974 974
975 err = 0;
975 for (i = 0; i < clusterpages; ++i) { 976 for (i = 0; i < clusterpages; ++i) {
976 unsigned int pagenr; 977 unsigned int pagenr;
977 978
@@ -981,26 +982,39 @@ repeat:
981 DBG_BUGON(!page); 982 DBG_BUGON(!page);
982 DBG_BUGON(!page->mapping); 983 DBG_BUGON(!page->mapping);
983 984
984 if (z_erofs_is_stagingpage(page)) 985 if (!z_erofs_is_stagingpage(page)) {
985 continue;
986#ifdef EROFS_FS_HAS_MANAGED_CACHE 986#ifdef EROFS_FS_HAS_MANAGED_CACHE
987 if (page->mapping == MNGD_MAPPING(sbi)) { 987 if (page->mapping == MNGD_MAPPING(sbi)) {
988 DBG_BUGON(!PageUptodate(page)); 988 if (unlikely(!PageUptodate(page)))
989 continue; 989 err = -EIO;
990 } 990 continue;
991 }
991#endif 992#endif
992 993
993 /* only non-head page could be reused as a compressed page */ 994 /*
994 pagenr = z_erofs_onlinepage_index(page); 995 * only if non-head page can be selected
996 * for inplace decompression
997 */
998 pagenr = z_erofs_onlinepage_index(page);
995 999
996 DBG_BUGON(pagenr >= nr_pages); 1000 DBG_BUGON(pagenr >= nr_pages);
997 DBG_BUGON(pages[pagenr]); 1001 DBG_BUGON(pages[pagenr]);
998 ++sparsemem_pages; 1002 ++sparsemem_pages;
999 pages[pagenr] = page; 1003 pages[pagenr] = page;
1000 1004
1001 overlapped = true; 1005 overlapped = true;
1006 }
1007
1008 /* PG_error needs checking for inplaced and staging pages */
1009 if (unlikely(PageError(page))) {
1010 DBG_BUGON(PageUptodate(page));
1011 err = -EIO;
1012 }
1002 } 1013 }
1003 1014
1015 if (unlikely(err))
1016 goto out;
1017
1004 llen = (nr_pages << PAGE_SHIFT) - work->pageofs; 1018 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
1005 1019
1006 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) { 1020 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
@@ -1029,6 +1043,10 @@ repeat:
1029 1043
1030skip_allocpage: 1044skip_allocpage:
1031 vout = erofs_vmap(pages, nr_pages); 1045 vout = erofs_vmap(pages, nr_pages);
1046 if (!vout) {
1047 err = -ENOMEM;
1048 goto out;
1049 }
1032 1050
1033 err = z_erofs_vle_unzip_vmap(compressed_pages, 1051 err = z_erofs_vle_unzip_vmap(compressed_pages,
1034 clusterpages, vout, llen, work->pageofs, overlapped); 1052 clusterpages, vout, llen, work->pageofs, overlapped);
@@ -1194,6 +1212,7 @@ repeat:
1194 if (page->mapping == mc) { 1212 if (page->mapping == mc) {
1195 WRITE_ONCE(grp->compressed_pages[nr], page); 1213 WRITE_ONCE(grp->compressed_pages[nr], page);
1196 1214
1215 ClearPageError(page);
1197 if (!PagePrivate(page)) { 1216 if (!PagePrivate(page)) {
1198 /* 1217 /*
1199 * impossible to be !PagePrivate(page) for 1218 * impossible to be !PagePrivate(page) for
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index 48b263a2731a..0daac9b984a8 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
136 136
137 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE); 137 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
138 138
139 if (clusterpages == 1) 139 if (clusterpages == 1) {
140 vin = kmap_atomic(compressed_pages[0]); 140 vin = kmap_atomic(compressed_pages[0]);
141 else 141 } else {
142 vin = erofs_vmap(compressed_pages, clusterpages); 142 vin = erofs_vmap(compressed_pages, clusterpages);
143 if (!vin)
144 return -ENOMEM;
145 }
143 146
144 preempt_disable(); 147 preempt_disable();
145 vout = erofs_pcpubuf[smp_processor_id()].data; 148 vout = erofs_pcpubuf[smp_processor_id()].data;
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index b73385540216..250c15ace2a7 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -117,22 +117,6 @@
117 status = "okay"; 117 status = "okay";
118}; 118};
119 119
120&ethernet {
121 //mtd-mac-address = <&factory 0xe000>;
122 gmac1: mac@0 {
123 compatible = "mediatek,eth-mac";
124 reg = <0>;
125 phy-handle = <&phy1>;
126 };
127
128 mdio-bus {
129 phy1: ethernet-phy@1 {
130 reg = <1>;
131 phy-mode = "rgmii";
132 };
133 };
134};
135
136&pinctrl { 120&pinctrl {
137 state_default: pinctrl0 { 121 state_default: pinctrl0 {
138 gpio { 122 gpio {
@@ -141,3 +125,16 @@
141 }; 125 };
142 }; 126 };
143}; 127};
128
129&switch0 {
130 ports {
131 port@0 {
132 label = "ethblack";
133 status = "ok";
134 };
135 port@4 {
136 label = "ethblue";
137 status = "ok";
138 };
139 };
140};
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 6aff3680ce4b..17020e24abd2 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -372,16 +372,83 @@
372 372
373 mediatek,ethsys = <&ethsys>; 373 mediatek,ethsys = <&ethsys>;
374 374
375 mediatek,switch = <&gsw>;
376 375
376 gmac0: mac@0 {
377 compatible = "mediatek,eth-mac";
378 reg = <0>;
379 phy-mode = "rgmii";
380 fixed-link {
381 speed = <1000>;
382 full-duplex;
383 pause;
384 };
385 };
386 gmac1: mac@1 {
387 compatible = "mediatek,eth-mac";
388 reg = <1>;
389 status = "off";
390 phy-mode = "rgmii";
391 phy-handle = <&phy5>;
392 };
377 mdio-bus { 393 mdio-bus {
378 #address-cells = <1>; 394 #address-cells = <1>;
379 #size-cells = <0>; 395 #size-cells = <0>;
380 396
381 phy1f: ethernet-phy@1f { 397 phy5: ethernet-phy@5 {
382 reg = <0x1f>; 398 reg = <5>;
383 phy-mode = "rgmii"; 399 phy-mode = "rgmii";
384 }; 400 };
401
402 switch0: switch0@0 {
403 compatible = "mediatek,mt7621";
404 #address-cells = <1>;
405 #size-cells = <0>;
406 reg = <0>;
407 mediatek,mcm;
408 resets = <&rstctrl 2>;
409 reset-names = "mcm";
410
411 ports {
412 #address-cells = <1>;
413 #size-cells = <0>;
414 reg = <0>;
415 port@0 {
416 status = "off";
417 reg = <0>;
418 label = "lan0";
419 };
420 port@1 {
421 status = "off";
422 reg = <1>;
423 label = "lan1";
424 };
425 port@2 {
426 status = "off";
427 reg = <2>;
428 label = "lan2";
429 };
430 port@3 {
431 status = "off";
432 reg = <3>;
433 label = "lan3";
434 };
435 port@4 {
436 status = "off";
437 reg = <4>;
438 label = "lan4";
439 };
440 port@6 {
441 reg = <6>;
442 label = "cpu";
443 ethernet = <&gmac0>;
444 phy-mode = "trgmii";
445 fixed-link {
446 speed = <1000>;
447 full-duplex;
448 };
449 };
450 };
451 };
385 }; 452 };
386 }; 453 };
387 454
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
deleted file mode 100644
index 596b38552697..000000000000
--- a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
+++ /dev/null
@@ -1,48 +0,0 @@
1Mediatek Gigabit Switch
2=======================
3
4The mediatek gigabit switch can be found on Mediatek SoCs.
5
6Required properties:
7- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw",
8 "mediatek,mt7623-gsw"
9- reg: Address and length of the register set for the device
10- interrupts: Should contain the gigabit switches interrupt
11
12
13Additional required properties for ARM based SoCs:
14- mediatek,reset-pin: phandle describing the reset GPIO
15- clocks: the clocks used by the switch
16- clock-names: the names of the clocks listed in the clocks property
17 these should be "trgpll", "esw", "gp2", "gp1"
18- mt7530-supply: the phandle of the regulator used to power the switch
19- mediatek,pctl-regmap: phandle to the port control regmap. this is used to
20 setup the drive current
21
22
23Optional properties:
24- interrupt-parent: Should be the phandle for the interrupt controller
25 that services interrupts for this device
26
27Example:
28
29gsw: switch@1b100000 {
30 compatible = "mediatek,mt7623-gsw";
31 reg = <0 0x1b110000 0 0x300000>;
32
33 interrupt-parent = <&pio>;
34 interrupts = <168 IRQ_TYPE_EDGE_RISING>;
35
36 clocks = <&apmixedsys CLK_APMIXED_TRGPLL>,
37 <&ethsys CLK_ETHSYS_ESW>,
38 <&ethsys CLK_ETHSYS_GP2>,
39 <&ethsys CLK_ETHSYS_GP1>;
40 clock-names = "trgpll", "esw", "gp2", "gp1";
41
42 mt7530-supply = <&mt6323_vpa_reg>;
43
44 mediatek,pctl-regmap = <&syscfg_pctl_a>;
45 mediatek,reset-pin = <&pio 15 0>;
46
47 status = "okay";
48};
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig
deleted file mode 100644
index 44ea86c7a96c..000000000000
--- a/drivers/staging/mt7621-eth/Kconfig
+++ /dev/null
@@ -1,39 +0,0 @@
1config NET_VENDOR_MEDIATEK_STAGING
2 bool "MediaTek ethernet driver - staging version"
3 depends on RALINK
4 ---help---
5 If you have an MT7621 Mediatek SoC with ethernet, say Y.
6
7if NET_VENDOR_MEDIATEK_STAGING
8choice
9 prompt "MAC type"
10
11config NET_MEDIATEK_MT7621
12 bool "MT7621"
13 depends on MIPS && SOC_MT7621
14
15endchoice
16
17config NET_MEDIATEK_SOC_STAGING
18 tristate "MediaTek SoC Gigabit Ethernet support"
19 depends on NET_VENDOR_MEDIATEK_STAGING
20 select PHYLIB
21 ---help---
22 This driver supports the gigabit ethernet MACs in the
23 MediaTek SoC family.
24
25config NET_MEDIATEK_MDIO
26 def_bool NET_MEDIATEK_SOC_STAGING
27 depends on NET_MEDIATEK_MT7621
28 select PHYLIB
29
30config NET_MEDIATEK_MDIO_MT7620
31 def_bool NET_MEDIATEK_SOC_STAGING
32 depends on NET_MEDIATEK_MT7621
33 select NET_MEDIATEK_MDIO
34
35config NET_MEDIATEK_GSW_MT7621
36 def_tristate NET_MEDIATEK_SOC_STAGING
37 depends on NET_MEDIATEK_MT7621
38
39endif #NET_VENDOR_MEDIATEK_STAGING
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile
deleted file mode 100644
index 018bcc3596b3..000000000000
--- a/drivers/staging/mt7621-eth/Makefile
+++ /dev/null
@@ -1,14 +0,0 @@
1#
2# Makefile for the Ralink SoCs built-in ethernet macs
3#
4
5mtk-eth-soc-y += mtk_eth_soc.o ethtool.o
6
7mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO) += mdio.o
8mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o
9
10mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621) += soc_mt7621.o
11
12obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621) += gsw_mt7621.o
13
14obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mtk-eth-soc.o
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO
deleted file mode 100644
index f9e47d4b4cd4..000000000000
--- a/drivers/staging/mt7621-eth/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
1
2- verify devicetree documentation is consistent with code
3- fix ethtool - currently doesn't return valid data.
4- general code review and clean up
5- add support for second MAC on mt7621
6- convert gsw code to use switchdev interfaces
7- md7620_mmi_write etc should probably be wrapped
8 in a regmap abstraction.
9- Get soc_mt7621 to work with QDMA TX if possible.
10- Ensure phys are correctly configured when a cable
11 is plugged in.
12
13Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
deleted file mode 100644
index 8c4228e2c987..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.c
+++ /dev/null
@@ -1,250 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/* This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
12 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
13 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
14 */
15
16#include "mtk_eth_soc.h"
17#include "ethtool.h"
18
19struct mtk_stat {
20 char name[ETH_GSTRING_LEN];
21 unsigned int idx;
22};
23
24#define MTK_HW_STAT(stat) { \
25 .name = #stat, \
26 .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
27}
28
29static const struct mtk_stat mtk_ethtool_hw_stats[] = {
30 MTK_HW_STAT(tx_bytes),
31 MTK_HW_STAT(tx_packets),
32 MTK_HW_STAT(tx_skip),
33 MTK_HW_STAT(tx_collisions),
34 MTK_HW_STAT(rx_bytes),
35 MTK_HW_STAT(rx_packets),
36 MTK_HW_STAT(rx_overflow),
37 MTK_HW_STAT(rx_fcs_errors),
38 MTK_HW_STAT(rx_short_errors),
39 MTK_HW_STAT(rx_long_errors),
40 MTK_HW_STAT(rx_checksum_errors),
41 MTK_HW_STAT(rx_flow_control_packets),
42};
43
44#define MTK_HW_STATS_LEN ARRAY_SIZE(mtk_ethtool_hw_stats)
45
46static int mtk_get_link_ksettings(struct net_device *dev,
47 struct ethtool_link_ksettings *cmd)
48{
49 struct mtk_mac *mac = netdev_priv(dev);
50 int err;
51
52 if (!mac->phy_dev)
53 return -ENODEV;
54
55 if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
56 err = phy_read_status(mac->phy_dev);
57 if (err)
58 return -ENODEV;
59 }
60
61 phy_ethtool_ksettings_get(mac->phy_dev, cmd);
62 return 0;
63}
64
65static int mtk_set_link_ksettings(struct net_device *dev,
66 const struct ethtool_link_ksettings *cmd)
67{
68 struct mtk_mac *mac = netdev_priv(dev);
69
70 if (!mac->phy_dev)
71 return -ENODEV;
72
73 if (cmd->base.phy_address != mac->phy_dev->mdio.addr) {
74 if (mac->hw->phy->phy_node[cmd->base.phy_address]) {
75 mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
76 mac->phy_flags = MTK_PHY_FLAG_PORT;
77 } else if (mac->hw->mii_bus) {
78 mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
79 cmd->base.phy_address);
80 if (!mac->phy_dev)
81 return -ENODEV;
82 mac->phy_flags = MTK_PHY_FLAG_ATTACH;
83 } else {
84 return -ENODEV;
85 }
86 }
87
88 return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
89}
90
91static void mtk_get_drvinfo(struct net_device *dev,
92 struct ethtool_drvinfo *info)
93{
94 struct mtk_mac *mac = netdev_priv(dev);
95 struct mtk_soc_data *soc = mac->hw->soc;
96
97 strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
98 strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
99
100 if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
101 info->n_stats = MTK_HW_STATS_LEN;
102}
103
104static u32 mtk_get_msglevel(struct net_device *dev)
105{
106 struct mtk_mac *mac = netdev_priv(dev);
107
108 return mac->hw->msg_enable;
109}
110
111static void mtk_set_msglevel(struct net_device *dev, u32 value)
112{
113 struct mtk_mac *mac = netdev_priv(dev);
114
115 mac->hw->msg_enable = value;
116}
117
118static int mtk_nway_reset(struct net_device *dev)
119{
120 struct mtk_mac *mac = netdev_priv(dev);
121
122 if (!mac->phy_dev)
123 return -EOPNOTSUPP;
124
125 return genphy_restart_aneg(mac->phy_dev);
126}
127
128static u32 mtk_get_link(struct net_device *dev)
129{
130 struct mtk_mac *mac = netdev_priv(dev);
131 int err;
132
133 if (!mac->phy_dev)
134 goto out_get_link;
135
136 if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
137 err = genphy_update_link(mac->phy_dev);
138 if (err)
139 goto out_get_link;
140 }
141
142 return mac->phy_dev->link;
143
144out_get_link:
145 return ethtool_op_get_link(dev);
146}
147
148static int mtk_set_ringparam(struct net_device *dev,
149 struct ethtool_ringparam *ring)
150{
151 struct mtk_mac *mac = netdev_priv(dev);
152
153 if ((ring->tx_pending < 2) ||
154 (ring->rx_pending < 2) ||
155 (ring->rx_pending > mac->hw->soc->dma_ring_size) ||
156 (ring->tx_pending > mac->hw->soc->dma_ring_size))
157 return -EINVAL;
158
159 dev->netdev_ops->ndo_stop(dev);
160
161 mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
162 mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1);
163
164 return dev->netdev_ops->ndo_open(dev);
165}
166
167static void mtk_get_ringparam(struct net_device *dev,
168 struct ethtool_ringparam *ring)
169{
170 struct mtk_mac *mac = netdev_priv(dev);
171
172 ring->rx_max_pending = mac->hw->soc->dma_ring_size;
173 ring->tx_max_pending = mac->hw->soc->dma_ring_size;
174 ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size;
175 ring->tx_pending = mac->hw->tx_ring.tx_ring_size;
176}
177
178static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
179{
180 int i;
181
182 switch (stringset) {
183 case ETH_SS_STATS:
184 for (i = 0; i < MTK_HW_STATS_LEN; i++) {
185 memcpy(data, mtk_ethtool_hw_stats[i].name,
186 ETH_GSTRING_LEN);
187 data += ETH_GSTRING_LEN;
188 }
189 break;
190 }
191}
192
193static int mtk_get_sset_count(struct net_device *dev, int sset)
194{
195 switch (sset) {
196 case ETH_SS_STATS:
197 return MTK_HW_STATS_LEN;
198 default:
199 return -EOPNOTSUPP;
200 }
201}
202
203static void mtk_get_ethtool_stats(struct net_device *dev,
204 struct ethtool_stats *stats, u64 *data)
205{
206 struct mtk_mac *mac = netdev_priv(dev);
207 struct mtk_hw_stats *hwstats = mac->hw_stats;
208 unsigned int start;
209 int i;
210
211 if (netif_running(dev) && netif_device_present(dev)) {
212 if (spin_trylock(&hwstats->stats_lock)) {
213 mtk_stats_update_mac(mac);
214 spin_unlock(&hwstats->stats_lock);
215 }
216 }
217
218 do {
219 start = u64_stats_fetch_begin_irq(&hwstats->syncp);
220 for (i = 0; i < MTK_HW_STATS_LEN; i++)
221 data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
222
223 } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
224}
225
226static struct ethtool_ops mtk_ethtool_ops = {
227 .get_link_ksettings = mtk_get_link_ksettings,
228 .set_link_ksettings = mtk_set_link_ksettings,
229 .get_drvinfo = mtk_get_drvinfo,
230 .get_msglevel = mtk_get_msglevel,
231 .set_msglevel = mtk_set_msglevel,
232 .nway_reset = mtk_nway_reset,
233 .get_link = mtk_get_link,
234 .set_ringparam = mtk_set_ringparam,
235 .get_ringparam = mtk_get_ringparam,
236};
237
238void mtk_set_ethtool_ops(struct net_device *netdev)
239{
240 struct mtk_mac *mac = netdev_priv(netdev);
241 struct mtk_soc_data *soc = mac->hw->soc;
242
243 if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) {
244 mtk_ethtool_ops.get_strings = mtk_get_strings;
245 mtk_ethtool_ops.get_sset_count = mtk_get_sset_count;
246 mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats;
247 }
248
249 netdev->ethtool_ops = &mtk_ethtool_ops;
250}
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
deleted file mode 100644
index 0071469aea6c..000000000000
--- a/drivers/staging/mt7621-eth/ethtool.h
+++ /dev/null
@@ -1,15 +0,0 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
4 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
5 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
6 */
7
8#ifndef MTK_ETHTOOL_H
9#define MTK_ETHTOOL_H
10
11#include <linux/ethtool.h>
12
13void mtk_set_ethtool_ops(struct net_device *netdev);
14
15#endif /* MTK_ETHTOOL_H */
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
deleted file mode 100644
index 70f7e5481952..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7620.h
+++ /dev/null
@@ -1,277 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#ifndef _RALINK_GSW_MT7620_H__
16#define _RALINK_GSW_MT7620_H__
17
18#define GSW_REG_PHY_TIMEOUT (5 * HZ)
19
20#define MT7620_GSW_REG_PIAC 0x0004
21
22#define GSW_NUM_VLANS 16
23#define GSW_NUM_VIDS 4096
24#define GSW_NUM_PORTS 7
25#define GSW_PORT6 6
26
27#define GSW_MDIO_ACCESS BIT(31)
28#define GSW_MDIO_READ BIT(19)
29#define GSW_MDIO_WRITE BIT(18)
30#define GSW_MDIO_START BIT(16)
31#define GSW_MDIO_ADDR_SHIFT 20
32#define GSW_MDIO_REG_SHIFT 25
33
34#define GSW_REG_PORT_PMCR(x) (0x3000 + (x * 0x100))
35#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100))
36#define GSW_REG_SMACCR0 0x3fE4
37#define GSW_REG_SMACCR1 0x3fE8
38#define GSW_REG_CKGCR 0x3ff0
39
40#define GSW_REG_IMR 0x7008
41#define GSW_REG_ISR 0x700c
42#define GSW_REG_GPC1 0x7014
43
44#define SYSC_REG_CHIP_REV_ID 0x0c
45#define SYSC_REG_CFG 0x10
46#define SYSC_REG_CFG1 0x14
47#define RST_CTRL_MCM BIT(2)
48#define SYSC_PAD_RGMII2_MDIO 0x58
49#define SYSC_GPIO_MODE 0x60
50
51#define PORT_IRQ_ST_CHG 0x7f
52
53#define MT7621_ESW_PHY_POLLING 0x0000
54#define MT7620_ESW_PHY_POLLING 0x7000
55
56#define PMCR_IPG BIT(18)
57#define PMCR_MAC_MODE BIT(16)
58#define PMCR_FORCE BIT(15)
59#define PMCR_TX_EN BIT(14)
60#define PMCR_RX_EN BIT(13)
61#define PMCR_BACKOFF BIT(9)
62#define PMCR_BACKPRES BIT(8)
63#define PMCR_RX_FC BIT(5)
64#define PMCR_TX_FC BIT(4)
65#define PMCR_SPEED(_x) (_x << 2)
66#define PMCR_DUPLEX BIT(1)
67#define PMCR_LINK BIT(0)
68
69#define PHY_AN_EN BIT(31)
70#define PHY_PRE_EN BIT(30)
71#define PMY_MDC_CONF(_x) ((_x & 0x3f) << 24)
72
73/* ethernet subsystem config register */
74#define ETHSYS_SYSCFG0 0x14
75/* ethernet subsystem clock register */
76#define ETHSYS_CLKCFG0 0x2c
77#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
78
79/* p5 RGMII wrapper TX clock control register */
80#define MT7530_P5RGMIITXCR 0x7b04
81/* p5 RGMII wrapper RX clock control register */
82#define MT7530_P5RGMIIRXCR 0x7b00
83/* TRGMII TDX ODT registers */
84#define MT7530_TRGMII_TD0_ODT 0x7a54
85#define MT7530_TRGMII_TD1_ODT 0x7a5c
86#define MT7530_TRGMII_TD2_ODT 0x7a64
87#define MT7530_TRGMII_TD3_ODT 0x7a6c
88#define MT7530_TRGMII_TD4_ODT 0x7a74
89#define MT7530_TRGMII_TD5_ODT 0x7a7c
90/* TRGMII TCK ctrl register */
91#define MT7530_TRGMII_TCK_CTRL 0x7a78
92/* TRGMII Tx ctrl register */
93#define MT7530_TRGMII_TXCTRL 0x7a40
94/* port 6 extended control register */
95#define MT7530_P6ECR 0x7830
96/* IO driver control register */
97#define MT7530_IO_DRV_CR 0x7810
98/* top signal control register */
99#define MT7530_TOP_SIG_CTRL 0x7808
100/* modified hwtrap register */
101#define MT7530_MHWTRAP 0x7804
102/* hwtrap status register */
103#define MT7530_HWTRAP 0x7800
104/* status interrupt register */
105#define MT7530_SYS_INT_STS 0x700c
106/* system nterrupt register */
107#define MT7530_SYS_INT_EN 0x7008
108/* system control register */
109#define MT7530_SYS_CTRL 0x7000
110/* port MAC status register */
111#define MT7530_PMSR_P(x) (0x3008 + (x * 0x100))
112/* port MAC control register */
113#define MT7530_PMCR_P(x) (0x3000 + (x * 0x100))
114
115#define MT7621_XTAL_SHIFT 6
116#define MT7621_XTAL_MASK 0x7
117#define MT7621_XTAL_25 6
118#define MT7621_XTAL_40 3
119#define MT7621_MDIO_DRV_MASK (3 << 4)
120#define MT7621_GE1_MODE_MASK (3 << 12)
121
122#define TRGMII_TXCTRL_TXC_INV BIT(30)
123#define P6ECR_INTF_MODE_RGMII BIT(1)
124#define P5RGMIIRXCR_C_ALIGN BIT(8)
125#define P5RGMIIRXCR_DELAY_2 BIT(1)
126#define P5RGMIITXCR_DELAY_2 (BIT(8) | BIT(2))
127
128/* TOP_SIG_CTRL bits */
129#define TOP_SIG_CTRL_NORMAL (BIT(17) | BIT(16))
130
131/* MHWTRAP bits */
132#define MHWTRAP_MANUAL BIT(16)
133#define MHWTRAP_P5_MAC_SEL BIT(13)
134#define MHWTRAP_P6_DIS BIT(8)
135#define MHWTRAP_P5_RGMII_MODE BIT(7)
136#define MHWTRAP_P5_DIS BIT(6)
137#define MHWTRAP_PHY_ACCESS BIT(5)
138
139/* HWTRAP bits */
140#define HWTRAP_XTAL_SHIFT 9
141#define HWTRAP_XTAL_MASK 0x3
142
143/* SYS_CTRL bits */
144#define SYS_CTRL_SW_RST BIT(1)
145#define SYS_CTRL_REG_RST BIT(0)
146
147/* PMCR bits */
148#define PMCR_IFG_XMIT_96 BIT(18)
149#define PMCR_MAC_MODE BIT(16)
150#define PMCR_FORCE_MODE BIT(15)
151#define PMCR_TX_EN BIT(14)
152#define PMCR_RX_EN BIT(13)
153#define PMCR_BACK_PRES_EN BIT(9)
154#define PMCR_BACKOFF_EN BIT(8)
155#define PMCR_TX_FC_EN BIT(5)
156#define PMCR_RX_FC_EN BIT(4)
157#define PMCR_FORCE_SPEED_1000 BIT(3)
158#define PMCR_FORCE_FDX BIT(1)
159#define PMCR_FORCE_LNK BIT(0)
160#define PMCR_FIXED_LINK (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \
161 PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \
162 PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \
163 PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \
164 PMCR_FORCE_LNK)
165
166#define PMCR_FIXED_LINK_FC (PMCR_FIXED_LINK | \
167 PMCR_TX_FC_EN | PMCR_RX_FC_EN)
168
169/* TRGMII control registers */
170#define GSW_INTF_MODE 0x390
171#define GSW_TRGMII_TD0_ODT 0x354
172#define GSW_TRGMII_TD1_ODT 0x35c
173#define GSW_TRGMII_TD2_ODT 0x364
174#define GSW_TRGMII_TD3_ODT 0x36c
175#define GSW_TRGMII_TXCTL_ODT 0x374
176#define GSW_TRGMII_TCK_ODT 0x37c
177#define GSW_TRGMII_RCK_CTRL 0x300
178
179#define INTF_MODE_TRGMII BIT(1)
180#define TRGMII_RCK_CTRL_RX_RST BIT(31)
181
182/* Mac control registers */
183#define MTK_MAC_P2_MCR 0x200
184#define MTK_MAC_P1_MCR 0x100
185
186#define MAC_MCR_MAX_RX_2K BIT(29)
187#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
188#define MAC_MCR_FORCE_MODE BIT(15)
189#define MAC_MCR_TX_EN BIT(14)
190#define MAC_MCR_RX_EN BIT(13)
191#define MAC_MCR_BACKOFF_EN BIT(9)
192#define MAC_MCR_BACKPR_EN BIT(8)
193#define MAC_MCR_FORCE_RX_FC BIT(5)
194#define MAC_MCR_FORCE_TX_FC BIT(4)
195#define MAC_MCR_SPEED_1000 BIT(3)
196#define MAC_MCR_FORCE_DPX BIT(1)
197#define MAC_MCR_FORCE_LINK BIT(0)
198#define MAC_MCR_FIXED_LINK (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
199 MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
200 MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
201 MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
202 MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
203 MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
204#define MAC_MCR_FIXED_LINK_FC (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
205 MAC_MCR_FIXED_LINK)
206
207/* possible XTAL speed */
208#define MT7623_XTAL_40 0
209#define MT7623_XTAL_20 1
210#define MT7623_XTAL_25 3
211
212/* GPIO port control registers */
213#define GPIO_OD33_CTRL8 0x4c0
214#define GPIO_BIAS_CTRL 0xed0
215#define GPIO_DRV_SEL10 0xf00
216
217/* on MT7620 the functio of port 4 can be software configured */
218enum {
219 PORT4_EPHY = 0,
220 PORT4_EXT,
221};
222
223/* struct mt7620_gsw - the structure that holds the SoC specific data
224 * @dev: The Device struct
225 * @base: The base address
226 * @piac_offset: The PIAC base may change depending on SoC
227 * @irq: The IRQ we are using
228 * @port4: The port4 mode on MT7620
229 * @autopoll: Is MDIO autopolling enabled
230 * @ethsys: The ethsys register map
231 * @pctl: The pin control register map
232 * @clk_gsw: The switch clock
233 * @clk_gp1: The gmac1 clock
234 * @clk_gp2: The gmac2 clock
235 * @clk_trgpll: The trgmii pll clock
236 */
237struct mt7620_gsw {
238 struct device *dev;
239 void __iomem *base;
240 u32 piac_offset;
241 int irq;
242 int port4;
243 unsigned long int autopoll;
244
245 struct regmap *ethsys;
246 struct regmap *pctl;
247
248 struct clk *clk_gsw;
249 struct clk *clk_gp1;
250 struct clk *clk_gp2;
251 struct clk *clk_trgpll;
252};
253
254/* switch register I/O wrappers */
255void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
256u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
257
258/* the callback used by the driver core to bringup the switch */
259int mtk_gsw_init(struct mtk_eth *eth);
260
261/* MDIO access wrappers */
262int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
263int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg);
264void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port);
265int mt7620_has_carrier(struct mtk_eth *eth);
266void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
267 int speed, int duplex);
268void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val);
269u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg);
270void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg);
271
272u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
273 u32 phy_register, u32 write_data);
274u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg);
275void mt7620_handle_carrier(struct mtk_eth *eth);
276
277#endif
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
deleted file mode 100644
index 53767b17bad9..000000000000
--- a/drivers/staging/mt7621-eth/gsw_mt7621.c
+++ /dev/null
@@ -1,297 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/platform_device.h>
19#include <linux/of_device.h>
20#include <linux/of_irq.h>
21
22#include <ralink_regs.h>
23
24#include "mtk_eth_soc.h"
25#include "gsw_mt7620.h"
26
27void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
28{
29 iowrite32(val, gsw->base + reg);
30}
31EXPORT_SYMBOL_GPL(mtk_switch_w32);
32
33u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
34{
35 return ioread32(gsw->base + reg);
36}
37EXPORT_SYMBOL_GPL(mtk_switch_r32);
38
39static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth)
40{
41 struct mtk_eth *eth = (struct mtk_eth *)_eth;
42 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
43 u32 reg, i;
44
45 reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS);
46
47 for (i = 0; i < 5; i++) {
48 unsigned int link;
49
50 if ((reg & BIT(i)) == 0)
51 continue;
52
53 link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1;
54
55 if (link == eth->link[i])
56 continue;
57
58 eth->link[i] = link;
59 if (link)
60 netdev_info(*eth->netdev,
61 "port %d link up\n", i);
62 else
63 netdev_info(*eth->netdev,
64 "port %d link down\n", i);
65 }
66
67 mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f);
68
69 return IRQ_HANDLED;
70}
71
72static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
73 struct device_node *np)
74{
75 u32 i;
76 u32 val;
77
78 /* hardware reset the switch */
79 mtk_reset(eth, RST_CTRL_MCM);
80 mdelay(10);
81
82 /* reduce RGMII2 PAD driving strength */
83 rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO);
84
85 /* gpio mux - RGMII1=Normal mode */
86 rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE);
87
88 /* set GMAC1 RGMII mode */
89 rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1);
90
91 /* enable MDIO to control MT7530 */
92 rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE);
93
94 /* turn off all PHYs */
95 for (i = 0; i <= 4; i++) {
96 val = _mt7620_mii_read(gsw, i, 0x0);
97 val |= BIT(11);
98 _mt7620_mii_write(gsw, i, 0x0, val);
99 }
100
101 /* reset the switch */
102 mt7530_mdio_w32(gsw, MT7530_SYS_CTRL,
103 SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
104 usleep_range(10, 20);
105
106 if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) {
107 /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
108 mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR);
109 mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK);
110 } else {
111 /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
112 mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR);
113 mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC);
114 }
115
116 /* GE2, Link down */
117 mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR);
118
119 /* Enable Port 6, P5 as GMAC5, P5 disable */
120 val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP);
121 /* Enable Port 6 */
122 val &= ~MHWTRAP_P6_DIS;
123 /* Disable Port 5 */
124 val |= MHWTRAP_P5_DIS;
125 /* manual override of HW-Trap */
126 val |= MHWTRAP_MANUAL;
127 mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val);
128
129 val = rt_sysc_r32(SYSC_REG_CFG);
130 val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK;
131 if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) {
132 /* 40Mhz */
133
134 /* disable MT7530 core clock */
135 _mt7620_mii_write(gsw, 0, 13, 0x1f);
136 _mt7620_mii_write(gsw, 0, 14, 0x410);
137 _mt7620_mii_write(gsw, 0, 13, 0x401f);
138 _mt7620_mii_write(gsw, 0, 14, 0x0);
139
140 /* disable MT7530 PLL */
141 _mt7620_mii_write(gsw, 0, 13, 0x1f);
142 _mt7620_mii_write(gsw, 0, 14, 0x40d);
143 _mt7620_mii_write(gsw, 0, 13, 0x401f);
144 _mt7620_mii_write(gsw, 0, 14, 0x2020);
145
146 /* for MT7530 core clock = 500Mhz */
147 _mt7620_mii_write(gsw, 0, 13, 0x1f);
148 _mt7620_mii_write(gsw, 0, 14, 0x40e);
149 _mt7620_mii_write(gsw, 0, 13, 0x401f);
150 _mt7620_mii_write(gsw, 0, 14, 0x119);
151
152 /* enable MT7530 PLL */
153 _mt7620_mii_write(gsw, 0, 13, 0x1f);
154 _mt7620_mii_write(gsw, 0, 14, 0x40d);
155 _mt7620_mii_write(gsw, 0, 13, 0x401f);
156 _mt7620_mii_write(gsw, 0, 14, 0x2820);
157
158 usleep_range(20, 40);
159
160 /* enable MT7530 core clock */
161 _mt7620_mii_write(gsw, 0, 13, 0x1f);
162 _mt7620_mii_write(gsw, 0, 14, 0x410);
163 _mt7620_mii_write(gsw, 0, 13, 0x401f);
164 }
165
166 /* RGMII */
167 _mt7620_mii_write(gsw, 0, 14, 0x1);
168
169 /* set MT7530 central align */
170 mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR);
171 mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0,
172 MT7530_TRGMII_TXCTRL);
173 mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855);
174
175 /* delay setting for 10/1000M */
176 mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR,
177 P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2);
178 mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14);
179
180 /* lower Tx Driving*/
181 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44);
182 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44);
183 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44);
184 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44);
185 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44);
186 mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44);
187
188 /* turn on all PHYs */
189 for (i = 0; i <= 4; i++) {
190 val = _mt7620_mii_read(gsw, i, 0);
191 val &= ~BIT(11);
192 _mt7620_mii_write(gsw, i, 0, val);
193 }
194
195#define MT7530_NUM_PORTS 8
196#define REG_ESW_PORT_PCR(x) (0x2004 | ((x) << 8))
197#define REG_ESW_PORT_PVC(x) (0x2010 | ((x) << 8))
198#define REG_ESW_PORT_PPBV1(x) (0x2014 | ((x) << 8))
199#define MT7530_CPU_PORT 6
200
201 /* This is copied from mt7530_apply_config in libreCMC driver */
202 {
203 int i;
204
205 for (i = 0; i < MT7530_NUM_PORTS; i++)
206 mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
207
208 mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
209 0x00ff0000);
210
211 for (i = 0; i < MT7530_NUM_PORTS; i++)
212 mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
213 }
214
215 /* enable irq */
216 mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
217 mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
218}
219
220static const struct of_device_id mediatek_gsw_match[] = {
221 { .compatible = "mediatek,mt7621-gsw" },
222 {},
223};
224MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
225
226int mtk_gsw_init(struct mtk_eth *eth)
227{
228 struct device_node *np = eth->switch_np;
229 struct platform_device *pdev = of_find_device_by_node(np);
230 struct mt7620_gsw *gsw;
231
232 if (!pdev)
233 return -ENODEV;
234
235 if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
236 return -EINVAL;
237
238 gsw = platform_get_drvdata(pdev);
239 eth->sw_priv = gsw;
240
241 if (!gsw->irq)
242 return -EINVAL;
243
244 request_irq(gsw->irq, gsw_interrupt_mt7621, 0,
245 "gsw", eth);
246 disable_irq(gsw->irq);
247
248 mt7621_hw_init(eth, gsw, np);
249
250 enable_irq(gsw->irq);
251
252 return 0;
253}
254EXPORT_SYMBOL_GPL(mtk_gsw_init);
255
256static int mt7621_gsw_probe(struct platform_device *pdev)
257{
258 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
259 struct mt7620_gsw *gsw;
260
261 gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL);
262 if (!gsw)
263 return -ENOMEM;
264
265 gsw->base = devm_ioremap_resource(&pdev->dev, res);
266 if (IS_ERR(gsw->base))
267 return PTR_ERR(gsw->base);
268
269 gsw->dev = &pdev->dev;
270 gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
271
272 platform_set_drvdata(pdev, gsw);
273
274 return 0;
275}
276
277static int mt7621_gsw_remove(struct platform_device *pdev)
278{
279 platform_set_drvdata(pdev, NULL);
280
281 return 0;
282}
283
284static struct platform_driver gsw_driver = {
285 .probe = mt7621_gsw_probe,
286 .remove = mt7621_gsw_remove,
287 .driver = {
288 .name = "mt7621-gsw",
289 .of_match_table = mediatek_gsw_match,
290 },
291};
292
293module_platform_driver(gsw_driver);
294
295MODULE_LICENSE("GPL");
296MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
297MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC");
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
deleted file mode 100644
index 5fea6a447eed..000000000000
--- a/drivers/staging/mt7621-eth/mdio.c
+++ /dev/null
@@ -1,275 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
6 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
7 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/phy.h>
13#include <linux/of_net.h>
14#include <linux/of_mdio.h>
15
16#include "mtk_eth_soc.h"
17#include "mdio.h"
18
19static int mtk_mdio_reset(struct mii_bus *bus)
20{
21 /* TODO */
22 return 0;
23}
24
25static void mtk_phy_link_adjust(struct net_device *dev)
26{
27 struct mtk_eth *eth = netdev_priv(dev);
28 unsigned long flags;
29 int i;
30
31 spin_lock_irqsave(&eth->phy->lock, flags);
32 for (i = 0; i < 8; i++) {
33 if (eth->phy->phy_node[i]) {
34 struct phy_device *phydev = eth->phy->phy[i];
35 int status_change = 0;
36
37 if (phydev->link)
38 if (eth->phy->duplex[i] != phydev->duplex ||
39 eth->phy->speed[i] != phydev->speed)
40 status_change = 1;
41
42 if (phydev->link != eth->link[i])
43 status_change = 1;
44
45 switch (phydev->speed) {
46 case SPEED_1000:
47 case SPEED_100:
48 case SPEED_10:
49 eth->link[i] = phydev->link;
50 eth->phy->duplex[i] = phydev->duplex;
51 eth->phy->speed[i] = phydev->speed;
52
53 if (status_change &&
54 eth->soc->mdio_adjust_link)
55 eth->soc->mdio_adjust_link(eth, i);
56 break;
57 }
58 }
59 }
60 spin_unlock_irqrestore(&eth->phy->lock, flags);
61}
62
63int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
64 struct device_node *phy_node)
65{
66 const __be32 *_port = NULL;
67 struct phy_device *phydev;
68 int phy_mode, port;
69
70 _port = of_get_property(phy_node, "reg", NULL);
71
72 if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
73 pr_err("%pOFn: invalid port id\n", phy_node);
74 return -EINVAL;
75 }
76 port = be32_to_cpu(*_port);
77 phy_mode = of_get_phy_mode(phy_node);
78 if (phy_mode < 0) {
79 dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
80 eth->phy->phy_node[port] = NULL;
81 return -EINVAL;
82 }
83
84 phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
85 mtk_phy_link_adjust, 0, phy_mode);
86 if (!phydev) {
87 dev_err(eth->dev, "could not connect to PHY\n");
88 eth->phy->phy_node[port] = NULL;
89 return -ENODEV;
90 }
91
92 phydev->supported &= PHY_1000BT_FEATURES;
93 phydev->advertising = phydev->supported;
94
95 dev_info(eth->dev,
96 "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
97 port, phydev_name(phydev), phydev->phy_id,
98 phydev->drv->name);
99
100 eth->phy->phy[port] = phydev;
101 eth->link[port] = 0;
102
103 return 0;
104}
105
106static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
107 struct phy_device *phy)
108{
109 phy_attach(eth->netdev[mac->id], phydev_name(phy),
110 PHY_INTERFACE_MODE_MII);
111
112 phy->autoneg = AUTONEG_ENABLE;
113 phy->speed = 0;
114 phy->duplex = 0;
115 phy_set_max_speed(phy, SPEED_100);
116 phy->advertising = phy->supported | ADVERTISED_Autoneg;
117
118 phy_start_aneg(phy);
119}
120
121static int mtk_phy_connect(struct mtk_mac *mac)
122{
123 struct mtk_eth *eth = mac->hw;
124 int i;
125
126 for (i = 0; i < 8; i++) {
127 if (eth->phy->phy_node[i]) {
128 if (!mac->phy_dev) {
129 mac->phy_dev = eth->phy->phy[i];
130 mac->phy_flags = MTK_PHY_FLAG_PORT;
131 }
132 } else if (eth->mii_bus) {
133 struct phy_device *phy;
134
135 phy = mdiobus_get_phy(eth->mii_bus, i);
136 if (phy) {
137 phy_init(eth, mac, phy);
138 if (!mac->phy_dev) {
139 mac->phy_dev = phy;
140 mac->phy_flags = MTK_PHY_FLAG_ATTACH;
141 }
142 }
143 }
144 }
145
146 return 0;
147}
148
149static void mtk_phy_disconnect(struct mtk_mac *mac)
150{
151 struct mtk_eth *eth = mac->hw;
152 unsigned long flags;
153 int i;
154
155 for (i = 0; i < 8; i++)
156 if (eth->phy->phy_fixed[i]) {
157 spin_lock_irqsave(&eth->phy->lock, flags);
158 eth->link[i] = 0;
159 if (eth->soc->mdio_adjust_link)
160 eth->soc->mdio_adjust_link(eth, i);
161 spin_unlock_irqrestore(&eth->phy->lock, flags);
162 } else if (eth->phy->phy[i]) {
163 phy_disconnect(eth->phy->phy[i]);
164 } else if (eth->mii_bus) {
165 struct phy_device *phy =
166 mdiobus_get_phy(eth->mii_bus, i);
167
168 if (phy)
169 phy_detach(phy);
170 }
171}
172
173static void mtk_phy_start(struct mtk_mac *mac)
174{
175 struct mtk_eth *eth = mac->hw;
176 unsigned long flags;
177 int i;
178
179 for (i = 0; i < 8; i++) {
180 if (eth->phy->phy_fixed[i]) {
181 spin_lock_irqsave(&eth->phy->lock, flags);
182 eth->link[i] = 1;
183 if (eth->soc->mdio_adjust_link)
184 eth->soc->mdio_adjust_link(eth, i);
185 spin_unlock_irqrestore(&eth->phy->lock, flags);
186 } else if (eth->phy->phy[i]) {
187 phy_start(eth->phy->phy[i]);
188 }
189 }
190}
191
192static void mtk_phy_stop(struct mtk_mac *mac)
193{
194 struct mtk_eth *eth = mac->hw;
195 unsigned long flags;
196 int i;
197
198 for (i = 0; i < 8; i++)
199 if (eth->phy->phy_fixed[i]) {
200 spin_lock_irqsave(&eth->phy->lock, flags);
201 eth->link[i] = 0;
202 if (eth->soc->mdio_adjust_link)
203 eth->soc->mdio_adjust_link(eth, i);
204 spin_unlock_irqrestore(&eth->phy->lock, flags);
205 } else if (eth->phy->phy[i]) {
206 phy_stop(eth->phy->phy[i]);
207 }
208}
209
210static struct mtk_phy phy_ralink = {
211 .connect = mtk_phy_connect,
212 .disconnect = mtk_phy_disconnect,
213 .start = mtk_phy_start,
214 .stop = mtk_phy_stop,
215};
216
217int mtk_mdio_init(struct mtk_eth *eth)
218{
219 struct device_node *mii_np;
220 int err;
221
222 if (!eth->soc->mdio_read || !eth->soc->mdio_write)
223 return 0;
224
225 spin_lock_init(&phy_ralink.lock);
226 eth->phy = &phy_ralink;
227
228 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
229 if (!mii_np) {
230 dev_err(eth->dev, "no %s child node found", "mdio-bus");
231 return -ENODEV;
232 }
233
234 if (!of_device_is_available(mii_np)) {
235 err = 0;
236 goto err_put_node;
237 }
238
239 eth->mii_bus = mdiobus_alloc();
240 if (!eth->mii_bus) {
241 err = -ENOMEM;
242 goto err_put_node;
243 }
244
245 eth->mii_bus->name = "mdio";
246 eth->mii_bus->read = eth->soc->mdio_read;
247 eth->mii_bus->write = eth->soc->mdio_write;
248 eth->mii_bus->reset = mtk_mdio_reset;
249 eth->mii_bus->priv = eth;
250 eth->mii_bus->parent = eth->dev;
251
252 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
253 err = of_mdiobus_register(eth->mii_bus, mii_np);
254 if (err)
255 goto err_free_bus;
256
257 return 0;
258
259err_free_bus:
260 kfree(eth->mii_bus);
261err_put_node:
262 of_node_put(mii_np);
263 eth->mii_bus = NULL;
264 return err;
265}
266
267void mtk_mdio_cleanup(struct mtk_eth *eth)
268{
269 if (!eth->mii_bus)
270 return;
271
272 mdiobus_unregister(eth->mii_bus);
273 of_node_put(eth->mii_bus->dev.of_node);
274 kfree(eth->mii_bus);
275}
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h
deleted file mode 100644
index b14e23842a01..000000000000
--- a/drivers/staging/mt7621-eth/mdio.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#ifndef _RALINK_MDIO_H__
16#define _RALINK_MDIO_H__
17
18#ifdef CONFIG_NET_MEDIATEK_MDIO
19int mtk_mdio_init(struct mtk_eth *eth);
20void mtk_mdio_cleanup(struct mtk_eth *eth);
21int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
22 struct device_node *phy_node);
23#else
24static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; }
25static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {}
26#endif
27#endif
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c
deleted file mode 100644
index ced605c2914e..000000000000
--- a/drivers/staging/mt7621-eth/mdio_mt7620.c
+++ /dev/null
@@ -1,173 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18
19#include "mtk_eth_soc.h"
20#include "gsw_mt7620.h"
21#include "mdio.h"
22
23static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw)
24{
25 unsigned long t_start = jiffies;
26
27 while (1) {
28 if (!(mtk_switch_r32(gsw,
29 gsw->piac_offset + MT7620_GSW_REG_PIAC) &
30 GSW_MDIO_ACCESS))
31 return 0;
32 if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT))
33 break;
34 }
35
36 dev_err(gsw->dev, "mdio: MDIO timeout\n");
37 return -1;
38}
39
40u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
41 u32 phy_register, u32 write_data)
42{
43 if (mt7620_mii_busy_wait(gsw))
44 return -1;
45
46 write_data &= 0xffff;
47
48 mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE |
49 (phy_register << GSW_MDIO_REG_SHIFT) |
50 (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data,
51 MT7620_GSW_REG_PIAC);
52
53 if (mt7620_mii_busy_wait(gsw))
54 return -1;
55
56 return 0;
57}
58EXPORT_SYMBOL_GPL(_mt7620_mii_write);
59
60u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg)
61{
62 u32 d;
63
64 if (mt7620_mii_busy_wait(gsw))
65 return 0xffff;
66
67 mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ |
68 (phy_reg << GSW_MDIO_REG_SHIFT) |
69 (phy_addr << GSW_MDIO_ADDR_SHIFT),
70 MT7620_GSW_REG_PIAC);
71
72 if (mt7620_mii_busy_wait(gsw))
73 return 0xffff;
74
75 d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff;
76
77 return d;
78}
79EXPORT_SYMBOL_GPL(_mt7620_mii_read);
80
81int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
82{
83 struct mtk_eth *eth = bus->priv;
84 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
85
86 return _mt7620_mii_write(gsw, phy_addr, phy_reg, val);
87}
88
89int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
90{
91 struct mtk_eth *eth = bus->priv;
92 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
93
94 return _mt7620_mii_read(gsw, phy_addr, phy_reg);
95}
96
97void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val)
98{
99 _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
100 _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf, val & 0xffff);
101 _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16);
102}
103EXPORT_SYMBOL_GPL(mt7530_mdio_w32);
104
105u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg)
106{
107 u16 high, low;
108
109 _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
110 low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf);
111 high = _mt7620_mii_read(gsw, 0x1f, 0x10);
112
113 return (high << 16) | (low & 0xffff);
114}
115EXPORT_SYMBOL_GPL(mt7530_mdio_r32);
116
117void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg)
118{
119 u32 val = mt7530_mdio_r32(gsw, reg);
120
121 val &= ~mask;
122 val |= set;
123 mt7530_mdio_w32(gsw, reg, val);
124}
125EXPORT_SYMBOL_GPL(mt7530_mdio_m32);
126
127static unsigned char *mtk_speed_str(int speed)
128{
129 switch (speed) {
130 case 2:
131 case SPEED_1000:
132 return "1000";
133 case 1:
134 case SPEED_100:
135 return "100";
136 case 0:
137 case SPEED_10:
138 return "10";
139 }
140
141 return "? ";
142}
143
144int mt7620_has_carrier(struct mtk_eth *eth)
145{
146 struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
147 int i;
148
149 for (i = 0; i < GSW_PORT6; i++)
150 if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1)
151 return 1;
152 return 0;
153}
154
155void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
156 int speed, int duplex)
157{
158 struct mt7620_gsw *gsw = eth->sw_priv;
159
160 if (link)
161 dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n",
162 port, mtk_speed_str(speed),
163 (duplex) ? "Full" : "Half");
164 else
165 dev_info(gsw->dev, "port %d link down\n", port);
166}
167
168void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port)
169{
170 mt7620_print_link_state(eth, port, eth->link[port],
171 eth->phy->speed[port],
172 (eth->phy->duplex[port] == DUPLEX_FULL));
173}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
deleted file mode 100644
index 6027b19f7bc2..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.c
+++ /dev/null
@@ -1,2176 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/dma-mapping.h>
19#include <linux/init.h>
20#include <linux/skbuff.h>
21#include <linux/etherdevice.h>
22#include <linux/ethtool.h>
23#include <linux/platform_device.h>
24#include <linux/of_device.h>
25#include <linux/mfd/syscon.h>
26#include <linux/clk.h>
27#include <linux/of_net.h>
28#include <linux/of_mdio.h>
29#include <linux/if_vlan.h>
30#include <linux/reset.h>
31#include <linux/tcp.h>
32#include <linux/io.h>
33#include <linux/bug.h>
34#include <linux/regmap.h>
35
36#include "mtk_eth_soc.h"
37#include "mdio.h"
38#include "ethtool.h"
39
40#define MAX_RX_LENGTH 1536
41#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
42#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
43#define DMA_DUMMY_DESC 0xffffffff
44#define MTK_DEFAULT_MSG_ENABLE \
45 (NETIF_MSG_DRV | \
46 NETIF_MSG_PROBE | \
47 NETIF_MSG_LINK | \
48 NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | \
50 NETIF_MSG_IFUP | \
51 NETIF_MSG_RX_ERR | \
52 NETIF_MSG_TX_ERR)
53
54#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
55#define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1))
56#define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1))
57
58#define SYSC_REG_RSTCTRL 0x34
59
60static int mtk_msg_level = -1;
61module_param_named(msg_level, mtk_msg_level, int, 0);
62MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
63
64static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
65 [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
66 [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
67 [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
68 [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
69 [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
70 [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
71 [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
72 [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
73 [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
74 [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
75 [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
76 [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
77 [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
78 [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
79 [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
80 [MTK_REG_MTK_RST_GL] = MTK_RST_GL,
81};
82
83static const u16 *mtk_reg_table = mtk_reg_table_default;
84
85void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
86{
87 __raw_writel(val, eth->base + reg);
88}
89
90u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
91{
92 return __raw_readl(eth->base + reg);
93}
94
95static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
96{
97 mtk_w32(eth, val, mtk_reg_table[reg]);
98}
99
100static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
101{
102 return mtk_r32(eth, mtk_reg_table[reg]);
103}
104
105/* these bits are also exposed via the reset-controller API. however the switch
106 * and FE need to be brought out of reset in the exakt same moemtn and the
107 * reset-controller api does not provide this feature yet. Do the reset manually
108 * until we fixed the reset-controller api to be able to do this
109 */
110void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
111{
112 u32 val;
113
114 regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
115 val |= reset_bits;
116 regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
117 usleep_range(10, 20);
118 val &= ~reset_bits;
119 regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
120 usleep_range(10, 20);
121}
122EXPORT_SYMBOL(mtk_reset);
123
124static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
125{
126 if (eth->soc->dma_type & MTK_PDMA)
127 mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
128 if (eth->soc->dma_type & MTK_QDMA)
129 mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
130}
131
132static inline u32 mtk_irq_pending(struct mtk_eth *eth)
133{
134 u32 status = 0;
135
136 if (eth->soc->dma_type & MTK_PDMA)
137 status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
138 if (eth->soc->dma_type & MTK_QDMA)
139 status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
140
141 return status;
142}
143
144static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
145{
146 u32 status_reg = MTK_REG_MTK_INT_STATUS;
147
148 if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
149 status_reg = MTK_REG_MTK_INT_STATUS2;
150
151 mtk_reg_w32(eth, mask, status_reg);
152}
153
154static u32 mtk_irq_pending_status(struct mtk_eth *eth)
155{
156 u32 status_reg = MTK_REG_MTK_INT_STATUS;
157
158 if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
159 status_reg = MTK_REG_MTK_INT_STATUS2;
160
161 return mtk_reg_r32(eth, status_reg);
162}
163
164static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
165{
166 u32 val;
167
168 if (eth->soc->dma_type & MTK_PDMA) {
169 val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
170 mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
171 /* flush write */
172 mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
173 }
174 if (eth->soc->dma_type & MTK_QDMA) {
175 val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
176 mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
177 /* flush write */
178 mtk_r32(eth, MTK_QMTK_INT_ENABLE);
179 }
180}
181
182static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
183{
184 u32 val;
185
186 if (eth->soc->dma_type & MTK_PDMA) {
187 val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
188 mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
189 /* flush write */
190 mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
191 }
192 if (eth->soc->dma_type & MTK_QDMA) {
193 val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
194 mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
195 /* flush write */
196 mtk_r32(eth, MTK_QMTK_INT_ENABLE);
197 }
198}
199
200static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
201{
202 u32 enabled = 0;
203
204 if (eth->soc->dma_type & MTK_PDMA)
205 enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
206 if (eth->soc->dma_type & MTK_QDMA)
207 enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
208
209 return enabled;
210}
211
212static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
213 unsigned char *macaddr)
214{
215 unsigned long flags;
216
217 spin_lock_irqsave(&mac->hw->page_lock, flags);
218 mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
219 mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
220 (macaddr[4] << 8) | macaddr[5],
221 MTK_GDMA1_MAC_ADRL);
222 spin_unlock_irqrestore(&mac->hw->page_lock, flags);
223}
224
225static int mtk_set_mac_address(struct net_device *dev, void *p)
226{
227 int ret = eth_mac_addr(dev, p);
228 struct mtk_mac *mac = netdev_priv(dev);
229 struct mtk_eth *eth = mac->hw;
230
231 if (ret)
232 return ret;
233
234 if (eth->soc->set_mac)
235 eth->soc->set_mac(mac, dev->dev_addr);
236 else
237 mtk_hw_set_macaddr(mac, p);
238
239 return 0;
240}
241
242static inline int mtk_max_frag_size(int mtu)
243{
244 /* make sure buf_size will be at least MAX_RX_LENGTH */
245 if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
246 mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
247
248 return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
249 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
250}
251
252static inline int mtk_max_buf_size(int frag_size)
253{
254 int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
255 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
256
257 WARN_ON(buf_size < MAX_RX_LENGTH);
258
259 return buf_size;
260}
261
262static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
263 struct mtk_rx_dma *dma_rxd)
264{
265 rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
266 rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
267 rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
268 rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
269}
270
271static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
272 struct mtk_tx_dma *dma_txd)
273{
274 WRITE_ONCE(dma_txd->txd1, txd->txd1);
275 WRITE_ONCE(dma_txd->txd3, txd->txd3);
276 WRITE_ONCE(dma_txd->txd4, txd->txd4);
277 /* clean dma done flag last */
278 WRITE_ONCE(dma_txd->txd2, txd->txd2);
279}
280
281static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
282{
283 int i;
284
285 if (ring->rx_data && ring->rx_dma) {
286 for (i = 0; i < ring->rx_ring_size; i++) {
287 if (!ring->rx_data[i])
288 continue;
289 if (!ring->rx_dma[i].rxd1)
290 continue;
291 dma_unmap_single(eth->dev,
292 ring->rx_dma[i].rxd1,
293 ring->rx_buf_size,
294 DMA_FROM_DEVICE);
295 skb_free_frag(ring->rx_data[i]);
296 }
297 kfree(ring->rx_data);
298 ring->rx_data = NULL;
299 }
300
301 if (ring->rx_dma) {
302 dma_free_coherent(eth->dev,
303 ring->rx_ring_size * sizeof(*ring->rx_dma),
304 ring->rx_dma,
305 ring->rx_phys);
306 ring->rx_dma = NULL;
307 }
308}
309
310static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
311{
312 int i, pad = 0;
313
314 ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
315 ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
316 ring->rx_ring_size = eth->soc->dma_ring_size;
317 ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
318 GFP_KERNEL);
319 if (!ring->rx_data)
320 goto no_rx_mem;
321
322 for (i = 0; i < ring->rx_ring_size; i++) {
323 ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
324 if (!ring->rx_data[i])
325 goto no_rx_mem;
326 }
327
328 ring->rx_dma =
329 dma_alloc_coherent(eth->dev,
330 ring->rx_ring_size * sizeof(*ring->rx_dma),
331 &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
332 if (!ring->rx_dma)
333 goto no_rx_mem;
334
335 if (!eth->soc->rx_2b_offset)
336 pad = NET_IP_ALIGN;
337
338 for (i = 0; i < ring->rx_ring_size; i++) {
339 dma_addr_t dma_addr = dma_map_single(eth->dev,
340 ring->rx_data[i] + NET_SKB_PAD + pad,
341 ring->rx_buf_size,
342 DMA_FROM_DEVICE);
343 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
344 goto no_rx_mem;
345 ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
346
347 if (eth->soc->rx_sg_dma)
348 ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
349 else
350 ring->rx_dma[i].rxd2 = RX_DMA_LSO;
351 }
352 ring->rx_calc_idx = ring->rx_ring_size - 1;
353 /* make sure that all changes to the dma ring are flushed before we
354 * continue
355 */
356 wmb();
357
358 return 0;
359
360no_rx_mem:
361 return -ENOMEM;
362}
363
364static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
365{
366 if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
367 dma_unmap_single(dev,
368 dma_unmap_addr(tx_buf, dma_addr0),
369 dma_unmap_len(tx_buf, dma_len0),
370 DMA_TO_DEVICE);
371 } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
372 dma_unmap_page(dev,
373 dma_unmap_addr(tx_buf, dma_addr0),
374 dma_unmap_len(tx_buf, dma_len0),
375 DMA_TO_DEVICE);
376 }
377 if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
378 dma_unmap_page(dev,
379 dma_unmap_addr(tx_buf, dma_addr1),
380 dma_unmap_len(tx_buf, dma_len1),
381 DMA_TO_DEVICE);
382
383 tx_buf->flags = 0;
384 if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
385 dev_kfree_skb_any(tx_buf->skb);
386 tx_buf->skb = NULL;
387}
388
389static void mtk_pdma_tx_clean(struct mtk_eth *eth)
390{
391 struct mtk_tx_ring *ring = &eth->tx_ring;
392 int i;
393
394 if (ring->tx_buf) {
395 for (i = 0; i < ring->tx_ring_size; i++)
396 mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
397 kfree(ring->tx_buf);
398 ring->tx_buf = NULL;
399 }
400
401 if (ring->tx_dma) {
402 dma_free_coherent(eth->dev,
403 ring->tx_ring_size * sizeof(*ring->tx_dma),
404 ring->tx_dma,
405 ring->tx_phys);
406 ring->tx_dma = NULL;
407 }
408}
409
410static void mtk_qdma_tx_clean(struct mtk_eth *eth)
411{
412 struct mtk_tx_ring *ring = &eth->tx_ring;
413 int i;
414
415 if (ring->tx_buf) {
416 for (i = 0; i < ring->tx_ring_size; i++)
417 mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
418 kfree(ring->tx_buf);
419 ring->tx_buf = NULL;
420 }
421
422 if (ring->tx_dma) {
423 dma_free_coherent(eth->dev,
424 ring->tx_ring_size * sizeof(*ring->tx_dma),
425 ring->tx_dma,
426 ring->tx_phys);
427 ring->tx_dma = NULL;
428 }
429}
430
431void mtk_stats_update_mac(struct mtk_mac *mac)
432{
433 struct mtk_hw_stats *hw_stats = mac->hw_stats;
434 unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
435 u64 stats;
436
437 base += hw_stats->reg_offset;
438
439 u64_stats_update_begin(&hw_stats->syncp);
440
441 if (mac->hw->soc->new_stats) {
442 hw_stats->rx_bytes += mtk_r32(mac->hw, base);
443 stats = mtk_r32(mac->hw, base + 0x04);
444 if (stats)
445 hw_stats->rx_bytes += (stats << 32);
446 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
447 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
448 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
449 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
450 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
451 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
452 hw_stats->rx_flow_control_packets +=
453 mtk_r32(mac->hw, base + 0x24);
454 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
455 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
456 hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
457 stats = mtk_r32(mac->hw, base + 0x34);
458 if (stats)
459 hw_stats->tx_bytes += (stats << 32);
460 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
461 } else {
462 hw_stats->tx_bytes += mtk_r32(mac->hw, base);
463 hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
464 hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
465 hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
466 hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
467 hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
468 hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
469 hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
470 hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
471 hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
472 hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
473 hw_stats->rx_flow_control_packets +=
474 mtk_r32(mac->hw, base + 0x3c);
475 }
476
477 u64_stats_update_end(&hw_stats->syncp);
478}
479
480static void mtk_get_stats64(struct net_device *dev,
481 struct rtnl_link_stats64 *storage)
482{
483 struct mtk_mac *mac = netdev_priv(dev);
484 struct mtk_hw_stats *hw_stats = mac->hw_stats;
485 unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
486 unsigned int start;
487
488 if (!base) {
489 netdev_stats_to_stats64(storage, &dev->stats);
490 return;
491 }
492
493 if (netif_running(dev) && netif_device_present(dev)) {
494 if (spin_trylock(&hw_stats->stats_lock)) {
495 mtk_stats_update_mac(mac);
496 spin_unlock(&hw_stats->stats_lock);
497 }
498 }
499
500 do {
501 start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
502 storage->rx_packets = hw_stats->rx_packets;
503 storage->tx_packets = hw_stats->tx_packets;
504 storage->rx_bytes = hw_stats->rx_bytes;
505 storage->tx_bytes = hw_stats->tx_bytes;
506 storage->collisions = hw_stats->tx_collisions;
507 storage->rx_length_errors = hw_stats->rx_short_errors +
508 hw_stats->rx_long_errors;
509 storage->rx_over_errors = hw_stats->rx_overflow;
510 storage->rx_crc_errors = hw_stats->rx_fcs_errors;
511 storage->rx_errors = hw_stats->rx_checksum_errors;
512 storage->tx_aborted_errors = hw_stats->tx_skip;
513 } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
514
515 storage->tx_errors = dev->stats.tx_errors;
516 storage->rx_dropped = dev->stats.rx_dropped;
517 storage->tx_dropped = dev->stats.tx_dropped;
518}
519
520static int mtk_vlan_rx_add_vid(struct net_device *dev,
521 __be16 proto, u16 vid)
522{
523 struct mtk_mac *mac = netdev_priv(dev);
524 struct mtk_eth *eth = mac->hw;
525 u32 idx = (vid & 0xf);
526 u32 vlan_cfg;
527
528 if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
529 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
530 return 0;
531
532 if (test_bit(idx, &eth->vlan_map)) {
533 netdev_warn(dev, "disable tx vlan offload\n");
534 dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
535 netdev_update_features(dev);
536 } else {
537 vlan_cfg = mtk_r32(eth,
538 mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
539 ((idx >> 1) << 2));
540 if (idx & 0x1) {
541 vlan_cfg &= 0xffff;
542 vlan_cfg |= (vid << 16);
543 } else {
544 vlan_cfg &= 0xffff0000;
545 vlan_cfg |= vid;
546 }
547 mtk_w32(eth,
548 vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
549 ((idx >> 1) << 2));
550 set_bit(idx, &eth->vlan_map);
551 }
552
553 return 0;
554}
555
556static int mtk_vlan_rx_kill_vid(struct net_device *dev,
557 __be16 proto, u16 vid)
558{
559 struct mtk_mac *mac = netdev_priv(dev);
560 struct mtk_eth *eth = mac->hw;
561 u32 idx = (vid & 0xf);
562
563 if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
564 (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
565 return 0;
566
567 clear_bit(idx, &eth->vlan_map);
568
569 return 0;
570}
571
572static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
573{
574 barrier();
575 return (u32)(ring->tx_ring_size -
576 ((ring->tx_next_idx - ring->tx_free_idx) &
577 (ring->tx_ring_size - 1)));
578}
579
580static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
581{
582 unsigned int len;
583 int ret;
584
585 if (unlikely(skb->len >= VLAN_ETH_ZLEN))
586 return 0;
587
588 if (eth->soc->padding_64b && !eth->soc->padding_bug)
589 return 0;
590
591 if (skb_vlan_tag_present(skb))
592 len = ETH_ZLEN;
593 else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
594 len = VLAN_ETH_ZLEN;
595 else if (!eth->soc->padding_64b)
596 len = ETH_ZLEN;
597 else
598 return 0;
599
600 if (skb->len >= len)
601 return 0;
602
603 ret = skb_pad(skb, len - skb->len);
604 if (ret < 0)
605 return ret;
606 skb->len = len;
607 skb_set_tail_pointer(skb, len);
608
609 return ret;
610}
611
612static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
613 int tx_num, struct mtk_tx_ring *ring, bool gso)
614{
615 struct mtk_mac *mac = netdev_priv(dev);
616 struct mtk_eth *eth = mac->hw;
617 struct skb_frag_struct *frag;
618 struct mtk_tx_dma txd, *ptxd;
619 struct mtk_tx_buf *tx_buf;
620 int i, j, k, frag_size, frag_map_size, offset;
621 dma_addr_t mapped_addr;
622 unsigned int nr_frags;
623 u32 def_txd4;
624
625 if (mtk_skb_padto(skb, eth)) {
626 netif_warn(eth, tx_err, dev, "tx padding failed!\n");
627 return -1;
628 }
629
630 tx_buf = &ring->tx_buf[ring->tx_next_idx];
631 memset(tx_buf, 0, sizeof(*tx_buf));
632 memset(&txd, 0, sizeof(txd));
633 nr_frags = skb_shinfo(skb)->nr_frags;
634
635 /* init tx descriptor */
636 def_txd4 = eth->soc->txd4;
637 txd.txd4 = def_txd4;
638
639 if (eth->soc->mac_count > 1)
640 txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
641
642 if (gso)
643 txd.txd4 |= TX_DMA_TSO;
644
645 /* TX Checksum offload */
646 if (skb->ip_summed == CHECKSUM_PARTIAL)
647 txd.txd4 |= TX_DMA_CHKSUM;
648
649 /* VLAN header offload */
650 if (skb_vlan_tag_present(skb)) {
651 u16 tag = skb_vlan_tag_get(skb);
652
653 txd.txd4 |= TX_DMA_INS_VLAN |
654 ((tag >> VLAN_PRIO_SHIFT) << 4) |
655 (tag & 0xF);
656 }
657
658 mapped_addr = dma_map_single(&dev->dev, skb->data,
659 skb_headlen(skb), DMA_TO_DEVICE);
660 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
661 return -1;
662
663 txd.txd1 = mapped_addr;
664 txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
665
666 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
667 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
668 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
669
670 /* TX SG offload */
671 j = ring->tx_next_idx;
672 k = 0;
673 for (i = 0; i < nr_frags; i++) {
674 offset = 0;
675 frag = &skb_shinfo(skb)->frags[i];
676 frag_size = skb_frag_size(frag);
677
678 while (frag_size > 0) {
679 frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
680 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
681 frag_map_size,
682 DMA_TO_DEVICE);
683 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
684 goto err_dma;
685
686 if (k & 0x1) {
687 j = NEXT_TX_DESP_IDX(j);
688 txd.txd1 = mapped_addr;
689 txd.txd2 = TX_DMA_PLEN0(frag_map_size);
690 txd.txd4 = def_txd4;
691
692 tx_buf = &ring->tx_buf[j];
693 memset(tx_buf, 0, sizeof(*tx_buf));
694
695 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
696 dma_unmap_addr_set(tx_buf, dma_addr0,
697 mapped_addr);
698 dma_unmap_len_set(tx_buf, dma_len0,
699 frag_map_size);
700 } else {
701 txd.txd3 = mapped_addr;
702 txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
703
704 tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
705 tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
706 dma_unmap_addr_set(tx_buf, dma_addr1,
707 mapped_addr);
708 dma_unmap_len_set(tx_buf, dma_len1,
709 frag_map_size);
710
711 if (!((i == (nr_frags - 1)) &&
712 (frag_map_size == frag_size))) {
713 mtk_set_txd_pdma(&txd,
714 &ring->tx_dma[j]);
715 memset(&txd, 0, sizeof(txd));
716 }
717 }
718 frag_size -= frag_map_size;
719 offset += frag_map_size;
720 k++;
721 }
722 }
723
724 /* set last segment */
725 if (k & 0x1)
726 txd.txd2 |= TX_DMA_LS1;
727 else
728 txd.txd2 |= TX_DMA_LS0;
729 mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
730
731 /* store skb to cleanup */
732 tx_buf->skb = skb;
733
734 netdev_sent_queue(dev, skb->len);
735 skb_tx_timestamp(skb);
736
737 ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
738 /* make sure that all changes to the dma ring are flushed before we
739 * continue
740 */
741 wmb();
742 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
743
744 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
745 mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
746
747 return 0;
748
749err_dma:
750 j = ring->tx_next_idx;
751 for (i = 0; i < tx_num; i++) {
752 ptxd = &ring->tx_dma[j];
753 tx_buf = &ring->tx_buf[j];
754
755 /* unmap dma */
756 mtk_txd_unmap(&dev->dev, tx_buf);
757
758 ptxd->txd2 = TX_DMA_DESP2_DEF;
759 j = NEXT_TX_DESP_IDX(j);
760 }
761 /* make sure that all changes to the dma ring are flushed before we
762 * continue
763 */
764 wmb();
765 return -1;
766}
767
768/* the qdma core needs scratch memory to be setup */
769static int mtk_init_fq_dma(struct mtk_eth *eth)
770{
771 dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
772 int cnt = eth->soc->dma_ring_size;
773 int i;
774
775 eth->scratch_ring = dma_alloc_coherent(eth->dev,
776 cnt * sizeof(struct mtk_tx_dma),
777 &phy_ring_head,
778 GFP_ATOMIC | __GFP_ZERO);
779 if (unlikely(!eth->scratch_ring))
780 return -ENOMEM;
781
782 eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
783 GFP_KERNEL);
784 dma_addr = dma_map_single(eth->dev,
785 eth->scratch_head, cnt * QDMA_PAGE_SIZE,
786 DMA_FROM_DEVICE);
787 if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
788 return -ENOMEM;
789
790 memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
791 phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
792
793 for (i = 0; i < cnt; i++) {
794 eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
795 if (i < cnt - 1)
796 eth->scratch_ring[i].txd2 = (phy_ring_head +
797 ((i + 1) * sizeof(struct mtk_tx_dma)));
798 eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
799 }
800
801 mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
802 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
803 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
804 mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
805
806 return 0;
807}
808
809static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
810{
811 void *ret = ring->tx_dma;
812
813 return ret + (desc - ring->tx_phys);
814}
815
816static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
817 struct mtk_tx_dma *txd)
818{
819 return mtk_qdma_phys_to_virt(ring, txd->txd2);
820}
821
822static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
823 struct mtk_tx_dma *txd)
824{
825 int idx = txd - ring->tx_dma;
826
827 return &ring->tx_buf[idx];
828}
829
830static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
831 int tx_num, struct mtk_tx_ring *ring, bool gso)
832{
833 struct mtk_mac *mac = netdev_priv(dev);
834 struct mtk_eth *eth = mac->hw;
835 struct mtk_tx_dma *itxd, *txd;
836 struct mtk_tx_buf *tx_buf;
837 dma_addr_t mapped_addr;
838 unsigned int nr_frags;
839 int i, n_desc = 1;
840 u32 txd4 = eth->soc->txd4;
841
842 itxd = ring->tx_next_free;
843 if (itxd == ring->tx_last_free)
844 return -ENOMEM;
845
846 if (eth->soc->mac_count > 1)
847 txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
848
849 tx_buf = mtk_desc_to_tx_buf(ring, itxd);
850 memset(tx_buf, 0, sizeof(*tx_buf));
851
852 if (gso)
853 txd4 |= TX_DMA_TSO;
854
855 /* TX Checksum offload */
856 if (skb->ip_summed == CHECKSUM_PARTIAL)
857 txd4 |= TX_DMA_CHKSUM;
858
859 /* VLAN header offload */
860 if (skb_vlan_tag_present(skb))
861 txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
862
863 mapped_addr = dma_map_single(&dev->dev, skb->data,
864 skb_headlen(skb), DMA_TO_DEVICE);
865 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
866 return -ENOMEM;
867
868 WRITE_ONCE(itxd->txd1, mapped_addr);
869 tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
870 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
871 dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
872
873 /* TX SG offload */
874 txd = itxd;
875 nr_frags = skb_shinfo(skb)->nr_frags;
876 for (i = 0; i < nr_frags; i++) {
877 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
878 unsigned int offset = 0;
879 int frag_size = skb_frag_size(frag);
880
881 while (frag_size) {
882 bool last_frag = false;
883 unsigned int frag_map_size;
884
885 txd = mtk_tx_next_qdma(ring, txd);
886 if (txd == ring->tx_last_free)
887 goto err_dma;
888
889 n_desc++;
890 frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
891 mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
892 frag_map_size,
893 DMA_TO_DEVICE);
894 if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
895 goto err_dma;
896
897 if (i == nr_frags - 1 &&
898 (frag_size - frag_map_size) == 0)
899 last_frag = true;
900
901 WRITE_ONCE(txd->txd1, mapped_addr);
902 WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
903 TX_DMA_PLEN0(frag_map_size) |
904 last_frag * TX_DMA_LS0) |
905 mac->id);
906 WRITE_ONCE(txd->txd4, 0);
907
908 tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
909 tx_buf = mtk_desc_to_tx_buf(ring, txd);
910 memset(tx_buf, 0, sizeof(*tx_buf));
911
912 tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
913 dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
914 dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
915 frag_size -= frag_map_size;
916 offset += frag_map_size;
917 }
918 }
919
920 /* store skb to cleanup */
921 tx_buf->skb = skb;
922
923 WRITE_ONCE(itxd->txd4, txd4);
924 WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
925 (!nr_frags * TX_DMA_LS0)));
926
927 netdev_sent_queue(dev, skb->len);
928 skb_tx_timestamp(skb);
929
930 ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
931 atomic_sub(n_desc, &ring->tx_free_count);
932
933 /* make sure that all changes to the dma ring are flushed before we
934 * continue
935 */
936 wmb();
937
938 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
939 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
940
941 return 0;
942
943err_dma:
944 do {
945 tx_buf = mtk_desc_to_tx_buf(ring, txd);
946
947 /* unmap dma */
948 mtk_txd_unmap(&dev->dev, tx_buf);
949
950 itxd->txd3 = TX_DMA_DESP2_DEF;
951 itxd = mtk_tx_next_qdma(ring, itxd);
952 } while (itxd != txd);
953
954 return -ENOMEM;
955}
956
957static inline int mtk_cal_txd_req(struct sk_buff *skb)
958{
959 int i, nfrags;
960 struct skb_frag_struct *frag;
961
962 nfrags = 1;
963 if (skb_is_gso(skb)) {
964 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
965 frag = &skb_shinfo(skb)->frags[i];
966 nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
967 }
968 } else {
969 nfrags += skb_shinfo(skb)->nr_frags;
970 }
971
972 return DIV_ROUND_UP(nfrags, 2);
973}
974
975static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
976{
977 struct mtk_mac *mac = netdev_priv(dev);
978 struct mtk_eth *eth = mac->hw;
979 struct mtk_tx_ring *ring = &eth->tx_ring;
980 struct net_device_stats *stats = &dev->stats;
981 int tx_num;
982 int len = skb->len;
983 bool gso = false;
984
985 tx_num = mtk_cal_txd_req(skb);
986 if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
987 netif_stop_queue(dev);
988 netif_err(eth, tx_queued, dev,
989 "Tx Ring full when queue awake!\n");
990 return NETDEV_TX_BUSY;
991 }
992
993 /* TSO: fill MSS info in tcp checksum field */
994 if (skb_is_gso(skb)) {
995 if (skb_cow_head(skb, 0)) {
996 netif_warn(eth, tx_err, dev,
997 "GSO expand head fail.\n");
998 goto drop;
999 }
1000
1001 if (skb_shinfo(skb)->gso_type &
1002 (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
1003 gso = true;
1004 tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
1005 }
1006 }
1007
1008 if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
1009 goto drop;
1010
1011 stats->tx_packets++;
1012 stats->tx_bytes += len;
1013
1014 if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
1015 netif_stop_queue(dev);
1016 smp_mb();
1017 if (unlikely(atomic_read(&ring->tx_free_count) >
1018 ring->tx_thresh))
1019 netif_wake_queue(dev);
1020 }
1021
1022 return NETDEV_TX_OK;
1023
1024drop:
1025 stats->tx_dropped++;
1026 dev_kfree_skb(skb);
1027 return NETDEV_TX_OK;
1028}
1029
1030static int mtk_poll_rx(struct napi_struct *napi, int budget,
1031 struct mtk_eth *eth, u32 rx_intr)
1032{
1033 struct mtk_soc_data *soc = eth->soc;
1034 struct mtk_rx_ring *ring = &eth->rx_ring[0];
1035 int idx = ring->rx_calc_idx;
1036 u32 checksum_bit;
1037 struct sk_buff *skb;
1038 u8 *data, *new_data;
1039 struct mtk_rx_dma *rxd, trxd;
1040 int done = 0, pad;
1041
1042 if (eth->soc->hw_features & NETIF_F_RXCSUM)
1043 checksum_bit = soc->checksum_bit;
1044 else
1045 checksum_bit = 0;
1046
1047 if (eth->soc->rx_2b_offset)
1048 pad = 0;
1049 else
1050 pad = NET_IP_ALIGN;
1051
1052 while (done < budget) {
1053 struct net_device *netdev;
1054 unsigned int pktlen;
1055 dma_addr_t dma_addr;
1056 int mac = 0;
1057
1058 idx = NEXT_RX_DESP_IDX(idx);
1059 rxd = &ring->rx_dma[idx];
1060 data = ring->rx_data[idx];
1061
1062 mtk_get_rxd(&trxd, rxd);
1063 if (!(trxd.rxd2 & RX_DMA_DONE))
1064 break;
1065
1066 /* find out which mac the packet come from. values start at 1 */
1067 if (eth->soc->mac_count > 1) {
1068 mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
1069 RX_DMA_FPORT_MASK;
1070 mac--;
1071 if (mac < 0 || mac >= eth->soc->mac_count)
1072 goto release_desc;
1073 }
1074
1075 netdev = eth->netdev[mac];
1076
1077 /* alloc new buffer */
1078 new_data = napi_alloc_frag(ring->frag_size);
1079 if (unlikely(!new_data || !netdev)) {
1080 netdev->stats.rx_dropped++;
1081 goto release_desc;
1082 }
1083 dma_addr = dma_map_single(&netdev->dev,
1084 new_data + NET_SKB_PAD + pad,
1085 ring->rx_buf_size,
1086 DMA_FROM_DEVICE);
1087 if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
1088 skb_free_frag(new_data);
1089 goto release_desc;
1090 }
1091
1092 /* receive data */
1093 skb = build_skb(data, ring->frag_size);
1094 if (unlikely(!skb)) {
1095 put_page(virt_to_head_page(new_data));
1096 goto release_desc;
1097 }
1098 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1099
1100 dma_unmap_single(&netdev->dev, trxd.rxd1,
1101 ring->rx_buf_size, DMA_FROM_DEVICE);
1102 pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
1103 skb->dev = netdev;
1104 skb_put(skb, pktlen);
1105 if (trxd.rxd4 & checksum_bit)
1106 skb->ip_summed = CHECKSUM_UNNECESSARY;
1107 else
1108 skb_checksum_none_assert(skb);
1109 skb->protocol = eth_type_trans(skb, netdev);
1110
1111 netdev->stats.rx_packets++;
1112 netdev->stats.rx_bytes += pktlen;
1113
1114 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
1115 RX_DMA_VID(trxd.rxd3))
1116 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1117 RX_DMA_VID(trxd.rxd3));
1118 napi_gro_receive(napi, skb);
1119
1120 ring->rx_data[idx] = new_data;
1121 rxd->rxd1 = (unsigned int)dma_addr;
1122
1123release_desc:
1124 if (eth->soc->rx_sg_dma)
1125 rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
1126 else
1127 rxd->rxd2 = RX_DMA_LSO;
1128
1129 ring->rx_calc_idx = idx;
1130 /* make sure that all changes to the dma ring are flushed before
1131 * we continue
1132 */
1133 wmb();
1134 if (eth->soc->dma_type == MTK_QDMA)
1135 mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
1136 else
1137 mtk_reg_w32(eth, ring->rx_calc_idx,
1138 MTK_REG_RX_CALC_IDX0);
1139 done++;
1140 }
1141
1142 if (done < budget)
1143 mtk_irq_ack(eth, rx_intr);
1144
1145 return done;
1146}
1147
1148static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
1149{
1150 struct sk_buff *skb;
1151 struct mtk_tx_buf *tx_buf;
1152 int done = 0;
1153 u32 idx, hwidx;
1154 struct mtk_tx_ring *ring = &eth->tx_ring;
1155 unsigned int bytes = 0;
1156
1157 idx = ring->tx_free_idx;
1158 hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
1159
1160 while ((idx != hwidx) && budget) {
1161 tx_buf = &ring->tx_buf[idx];
1162 skb = tx_buf->skb;
1163
1164 if (!skb)
1165 break;
1166
1167 if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
1168 bytes += skb->len;
1169 done++;
1170 budget--;
1171 }
1172 mtk_txd_unmap(eth->dev, tx_buf);
1173 idx = NEXT_TX_DESP_IDX(idx);
1174 }
1175 ring->tx_free_idx = idx;
1176 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
1177
1178 /* read hw index again make sure no new tx packet */
1179 if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
1180 *tx_again = 1;
1181
1182 if (done)
1183 netdev_completed_queue(*eth->netdev, done, bytes);
1184
1185 return done;
1186}
1187
1188static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
1189{
1190 struct mtk_tx_ring *ring = &eth->tx_ring;
1191 struct mtk_tx_dma *desc;
1192 struct sk_buff *skb;
1193 struct mtk_tx_buf *tx_buf;
1194 int total = 0, done[MTK_MAX_DEVS];
1195 unsigned int bytes[MTK_MAX_DEVS];
1196 u32 cpu, dma;
1197 int i;
1198
1199 memset(done, 0, sizeof(done));
1200 memset(bytes, 0, sizeof(bytes));
1201
1202 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
1203 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
1204
1205 desc = mtk_qdma_phys_to_virt(ring, cpu);
1206
1207 while ((cpu != dma) && budget) {
1208 u32 next_cpu = desc->txd2;
1209 int mac;
1210
1211 desc = mtk_tx_next_qdma(ring, desc);
1212 if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
1213 break;
1214
1215 mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
1216 TX_DMA_FPORT_MASK;
1217 mac--;
1218
1219 tx_buf = mtk_desc_to_tx_buf(ring, desc);
1220 skb = tx_buf->skb;
1221 if (!skb)
1222 break;
1223
1224 if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
1225 bytes[mac] += skb->len;
1226 done[mac]++;
1227 budget--;
1228 }
1229 mtk_txd_unmap(eth->dev, tx_buf);
1230
1231 ring->tx_last_free->txd2 = next_cpu;
1232 ring->tx_last_free = desc;
1233 atomic_inc(&ring->tx_free_count);
1234
1235 cpu = next_cpu;
1236 }
1237
1238 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
1239
1240 /* read hw index again make sure no new tx packet */
1241 if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
1242 *tx_again = true;
1243
1244 for (i = 0; i < eth->soc->mac_count; i++) {
1245 if (!done[i])
1246 continue;
1247 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
1248 total += done[i];
1249 }
1250
1251 return total;
1252}
1253
1254static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
1255 bool *tx_again)
1256{
1257 struct mtk_tx_ring *ring = &eth->tx_ring;
1258 struct net_device *netdev = eth->netdev[0];
1259 int done;
1260
1261 done = eth->tx_ring.tx_poll(eth, budget, tx_again);
1262 if (!*tx_again)
1263 mtk_irq_ack(eth, tx_intr);
1264
1265 if (!done)
1266 return 0;
1267
1268 smp_mb();
1269 if (unlikely(!netif_queue_stopped(netdev)))
1270 return done;
1271
1272 if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
1273 netif_wake_queue(netdev);
1274
1275 return done;
1276}
1277
1278static void mtk_stats_update(struct mtk_eth *eth)
1279{
1280 int i;
1281
1282 for (i = 0; i < eth->soc->mac_count; i++) {
1283 if (!eth->mac[i] || !eth->mac[i]->hw_stats)
1284 continue;
1285 if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
1286 mtk_stats_update_mac(eth->mac[i]);
1287 spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
1288 }
1289 }
1290}
1291
1292static int mtk_poll(struct napi_struct *napi, int budget)
1293{
1294 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
1295 u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
1296 int tx_done, rx_done;
1297 bool tx_again = false;
1298
1299 status = mtk_irq_pending(eth);
1300 mtk_status = mtk_irq_pending_status(eth);
1301 tx_intr = eth->soc->tx_int;
1302 rx_intr = eth->soc->rx_int;
1303 status_intr = eth->soc->status_int;
1304 tx_done = 0;
1305 rx_done = 0;
1306 tx_again = 0;
1307
1308 if (status & tx_intr)
1309 tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
1310
1311 if (status & rx_intr)
1312 rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
1313
1314 if (unlikely(mtk_status & status_intr)) {
1315 mtk_stats_update(eth);
1316 mtk_irq_ack_status(eth, status_intr);
1317 }
1318
1319 if (unlikely(netif_msg_intr(eth))) {
1320 mask = mtk_irq_enabled(eth);
1321 netdev_info(eth->netdev[0],
1322 "done tx %d, rx %d, intr 0x%08x/0x%x\n",
1323 tx_done, rx_done, status, mask);
1324 }
1325
1326 if (tx_again || rx_done == budget)
1327 return budget;
1328
1329 status = mtk_irq_pending(eth);
1330 if (status & (tx_intr | rx_intr))
1331 return budget;
1332
1333 napi_complete(napi);
1334 mtk_irq_enable(eth, tx_intr | rx_intr);
1335
1336 return rx_done;
1337}
1338
1339static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
1340{
1341 int i;
1342 struct mtk_tx_ring *ring = &eth->tx_ring;
1343
1344 ring->tx_ring_size = eth->soc->dma_ring_size;
1345 ring->tx_free_idx = 0;
1346 ring->tx_next_idx = 0;
1347 ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
1348 MAX_SKB_FRAGS);
1349
1350 ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
1351 GFP_KERNEL);
1352 if (!ring->tx_buf)
1353 goto no_tx_mem;
1354
1355 ring->tx_dma =
1356 dma_alloc_coherent(eth->dev,
1357 ring->tx_ring_size * sizeof(*ring->tx_dma),
1358 &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
1359 if (!ring->tx_dma)
1360 goto no_tx_mem;
1361
1362 for (i = 0; i < ring->tx_ring_size; i++) {
1363 ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
1364 ring->tx_dma[i].txd4 = eth->soc->txd4;
1365 }
1366
1367 atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
1368 ring->tx_map = mtk_pdma_tx_map;
1369 ring->tx_poll = mtk_pdma_tx_poll;
1370 ring->tx_clean = mtk_pdma_tx_clean;
1371
1372 /* make sure that all changes to the dma ring are flushed before we
1373 * continue
1374 */
1375 wmb();
1376
1377 mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
1378 mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
1379 mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
1380 mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
1381
1382 return 0;
1383
1384no_tx_mem:
1385 return -ENOMEM;
1386}
1387
1388static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
1389{
1390 struct mtk_tx_ring *ring = &eth->tx_ring;
1391 int i, sz = sizeof(*ring->tx_dma);
1392
1393 ring->tx_ring_size = eth->soc->dma_ring_size;
1394 ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
1395 GFP_KERNEL);
1396 if (!ring->tx_buf)
1397 goto no_tx_mem;
1398
1399 ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
1400 &ring->tx_phys,
1401 GFP_ATOMIC | __GFP_ZERO);
1402 if (!ring->tx_dma)
1403 goto no_tx_mem;
1404
1405 for (i = 0; i < ring->tx_ring_size; i++) {
1406 int next = (i + 1) % ring->tx_ring_size;
1407 u32 next_ptr = ring->tx_phys + next * sz;
1408
1409 ring->tx_dma[i].txd2 = next_ptr;
1410 ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
1411 }
1412
1413 atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
1414 ring->tx_next_free = &ring->tx_dma[0];
1415 ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
1416 ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
1417 MAX_SKB_FRAGS);
1418
1419 ring->tx_map = mtk_qdma_tx_map;
1420 ring->tx_poll = mtk_qdma_tx_poll;
1421 ring->tx_clean = mtk_qdma_tx_clean;
1422
1423 /* make sure that all changes to the dma ring are flushed before we
1424 * continue
1425 */
1426 wmb();
1427
1428 mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
1429 mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
1430 mtk_w32(eth,
1431 ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
1432 MTK_QTX_CRX_PTR);
1433 mtk_w32(eth,
1434 ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
1435 MTK_QTX_DRX_PTR);
1436
1437 return 0;
1438
1439no_tx_mem:
1440 return -ENOMEM;
1441}
1442
1443static int mtk_qdma_init(struct mtk_eth *eth, int ring)
1444{
1445 int err;
1446
1447 err = mtk_init_fq_dma(eth);
1448 if (err)
1449 return err;
1450
1451 err = mtk_qdma_tx_alloc_tx(eth);
1452 if (err)
1453 return err;
1454
1455 err = mtk_dma_rx_alloc(eth, &eth->rx_ring[ring]);
1456 if (err)
1457 return err;
1458
1459 mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
1460 mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
1461 mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
1462 mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
1463 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
1464
1465 /* Enable random early drop and set drop threshold automatically */
1466 mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
1467 mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
1468
1469 return 0;
1470}
1471
1472static int mtk_pdma_qdma_init(struct mtk_eth *eth)
1473{
1474 int err = mtk_qdma_init(eth, 1);
1475
1476 if (err)
1477 return err;
1478
1479 err = mtk_dma_rx_alloc(eth, &eth->rx_ring[0]);
1480 if (err)
1481 return err;
1482
1483 mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
1484 mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
1485 mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
1486 mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
1487
1488 return 0;
1489}
1490
1491static int mtk_pdma_init(struct mtk_eth *eth)
1492{
1493 struct mtk_rx_ring *ring = &eth->rx_ring[0];
1494 int err;
1495
1496 err = mtk_pdma_tx_alloc(eth);
1497 if (err)
1498 return err;
1499
1500 err = mtk_dma_rx_alloc(eth, ring);
1501 if (err)
1502 return err;
1503
1504 mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
1505 mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
1506 mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
1507 mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
1508
1509 return 0;
1510}
1511
1512static void mtk_dma_free(struct mtk_eth *eth)
1513{
1514 int i;
1515
1516 for (i = 0; i < eth->soc->mac_count; i++)
1517 if (eth->netdev[i])
1518 netdev_reset_queue(eth->netdev[i]);
1519 eth->tx_ring.tx_clean(eth);
1520 mtk_clean_rx(eth, &eth->rx_ring[0]);
1521 mtk_clean_rx(eth, &eth->rx_ring[1]);
1522 kfree(eth->scratch_head);
1523}
1524
1525static void mtk_tx_timeout(struct net_device *dev)
1526{
1527 struct mtk_mac *mac = netdev_priv(dev);
1528 struct mtk_eth *eth = mac->hw;
1529 struct mtk_tx_ring *ring = &eth->tx_ring;
1530
1531 eth->netdev[mac->id]->stats.tx_errors++;
1532 netif_err(eth, tx_err, dev,
1533 "transmit timed out\n");
1534 if (eth->soc->dma_type & MTK_PDMA) {
1535 netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
1536 mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
1537 netif_info(eth, drv, dev,
1538 "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
1539 0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
1540 mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
1541 mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
1542 mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
1543 ring->tx_free_idx,
1544 ring->tx_next_idx);
1545 }
1546 if (eth->soc->dma_type & MTK_QDMA) {
1547 netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
1548 mtk_r32(eth, MTK_QDMA_GLO_CFG));
1549 netif_info(eth, drv, dev,
1550 "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
1551 0, mtk_r32(eth, MTK_QTX_CTX_PTR),
1552 mtk_r32(eth, MTK_QTX_DTX_PTR),
1553 mtk_r32(eth, MTK_QTX_CRX_PTR),
1554 mtk_r32(eth, MTK_QTX_DRX_PTR),
1555 atomic_read(&ring->tx_free_count));
1556 }
1557 netif_info(eth, drv, dev,
1558 "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
1559 0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
1560 mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
1561 mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
1562 mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
1563
1564 schedule_work(&mac->pending_work);
1565}
1566
1567static irqreturn_t mtk_handle_irq(int irq, void *_eth)
1568{
1569 struct mtk_eth *eth = _eth;
1570 u32 status, int_mask;
1571
1572 status = mtk_irq_pending(eth);
1573 if (unlikely(!status))
1574 return IRQ_NONE;
1575
1576 int_mask = (eth->soc->rx_int | eth->soc->tx_int);
1577 if (likely(status & int_mask)) {
1578 if (likely(napi_schedule_prep(&eth->rx_napi)))
1579 __napi_schedule(&eth->rx_napi);
1580 } else {
1581 mtk_irq_ack(eth, status);
1582 }
1583 mtk_irq_disable(eth, int_mask);
1584
1585 return IRQ_HANDLED;
1586}
1587
1588#ifdef CONFIG_NET_POLL_CONTROLLER
1589static void mtk_poll_controller(struct net_device *dev)
1590{
1591 struct mtk_mac *mac = netdev_priv(dev);
1592 struct mtk_eth *eth = mac->hw;
1593 u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
1594
1595 mtk_irq_disable(eth, int_mask);
1596 mtk_handle_irq(dev->irq, dev);
1597 mtk_irq_enable(eth, int_mask);
1598}
1599#endif
1600
1601int mtk_set_clock_cycle(struct mtk_eth *eth)
1602{
1603 unsigned long sysclk = eth->sysclk;
1604
1605 sysclk /= MTK_US_CYC_CNT_DIVISOR;
1606 sysclk <<= MTK_US_CYC_CNT_SHIFT;
1607
1608 mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
1609 ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
1610 sysclk,
1611 MTK_GLO_CFG);
1612 return 0;
1613}
1614
1615void mtk_fwd_config(struct mtk_eth *eth)
1616{
1617 u32 fwd_cfg;
1618
1619 fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
1620
1621 /* disable jumbo frame */
1622 if (eth->soc->jumbo_frame)
1623 fwd_cfg &= ~MTK_GDM1_JMB_EN;
1624
1625 /* set unicast/multicast/broadcast frame to cpu */
1626 fwd_cfg &= ~0xffff;
1627
1628 mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
1629}
1630
1631void mtk_csum_config(struct mtk_eth *eth)
1632{
1633 if (eth->soc->hw_features & NETIF_F_RXCSUM)
1634 mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
1635 (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
1636 MTK_GDMA1_FWD_CFG);
1637 else
1638 mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
1639 ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
1640 MTK_GDMA1_FWD_CFG);
1641 if (eth->soc->hw_features & NETIF_F_IP_CSUM)
1642 mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
1643 (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
1644 MTK_CDMA_CSG_CFG);
1645 else
1646 mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
1647 ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
1648 MTK_CDMA_CSG_CFG);
1649}
1650
1651static int mtk_start_dma(struct mtk_eth *eth)
1652{
1653 unsigned long flags;
1654 u32 val;
1655 int err;
1656
1657 if (eth->soc->dma_type == MTK_PDMA)
1658 err = mtk_pdma_init(eth);
1659 else if (eth->soc->dma_type == MTK_QDMA)
1660 err = mtk_qdma_init(eth, 0);
1661 else
1662 err = mtk_pdma_qdma_init(eth);
1663 if (err) {
1664 mtk_dma_free(eth);
1665 return err;
1666 }
1667
1668 spin_lock_irqsave(&eth->page_lock, flags);
1669
1670 val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
1671 if (eth->soc->rx_2b_offset)
1672 val |= MTK_RX_2B_OFFSET;
1673 val |= eth->soc->pdma_glo_cfg;
1674
1675 if (eth->soc->dma_type & MTK_PDMA)
1676 mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
1677
1678 if (eth->soc->dma_type & MTK_QDMA)
1679 mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
1680
1681 spin_unlock_irqrestore(&eth->page_lock, flags);
1682
1683 return 0;
1684}
1685
1686static int mtk_open(struct net_device *dev)
1687{
1688 struct mtk_mac *mac = netdev_priv(dev);
1689 struct mtk_eth *eth = mac->hw;
1690
1691 dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
1692
1693 if (!atomic_read(&eth->dma_refcnt)) {
1694 int err = mtk_start_dma(eth);
1695
1696 if (err)
1697 return err;
1698
1699 napi_enable(&eth->rx_napi);
1700 mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
1701 }
1702 atomic_inc(&eth->dma_refcnt);
1703
1704 if (eth->phy)
1705 eth->phy->start(mac);
1706
1707 if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
1708 netif_carrier_on(dev);
1709
1710 netif_start_queue(dev);
1711 eth->soc->fwd_config(eth);
1712
1713 return 0;
1714}
1715
1716static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
1717{
1718 unsigned long flags;
1719 u32 val;
1720 int i;
1721
1722 /* stop the dma enfine */
1723 spin_lock_irqsave(&eth->page_lock, flags);
1724 val = mtk_r32(eth, glo_cfg);
1725 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
1726 glo_cfg);
1727 spin_unlock_irqrestore(&eth->page_lock, flags);
1728
1729 /* wait for dma stop */
1730 for (i = 0; i < 10; i++) {
1731 val = mtk_r32(eth, glo_cfg);
1732 if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
1733 msleep(20);
1734 continue;
1735 }
1736 break;
1737 }
1738}
1739
1740static int mtk_stop(struct net_device *dev)
1741{
1742 struct mtk_mac *mac = netdev_priv(dev);
1743 struct mtk_eth *eth = mac->hw;
1744
1745 netif_tx_disable(dev);
1746 if (eth->phy)
1747 eth->phy->stop(mac);
1748
1749 if (!atomic_dec_and_test(&eth->dma_refcnt))
1750 return 0;
1751
1752 mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
1753 napi_disable(&eth->rx_napi);
1754
1755 if (eth->soc->dma_type & MTK_PDMA)
1756 mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
1757
1758 if (eth->soc->dma_type & MTK_QDMA)
1759 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
1760
1761 mtk_dma_free(eth);
1762
1763 return 0;
1764}
1765
1766static int __init mtk_init_hw(struct mtk_eth *eth)
1767{
1768 int i, err;
1769
1770 eth->soc->reset_fe(eth);
1771
1772 if (eth->soc->switch_init)
1773 if (eth->soc->switch_init(eth)) {
1774 dev_err(eth->dev, "failed to initialize switch core\n");
1775 return -ENODEV;
1776 }
1777
1778 err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
1779 dev_name(eth->dev), eth);
1780 if (err)
1781 return err;
1782
1783 err = mtk_mdio_init(eth);
1784 if (err)
1785 return err;
1786
1787 /* disable delay and normal interrupt */
1788 mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
1789 if (eth->soc->dma_type & MTK_QDMA)
1790 mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
1791 mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
1792
1793 /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
1794 if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
1795 for (i = 0; i < 16; i += 2)
1796 mtk_w32(eth, ((i + 1) << 16) + i,
1797 mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
1798 (i * 2));
1799
1800 if (eth->soc->fwd_config(eth))
1801 dev_err(eth->dev, "unable to get clock\n");
1802
1803 if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
1804 mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
1805 mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
1806 }
1807
1808 return 0;
1809}
1810
1811static int __init mtk_init(struct net_device *dev)
1812{
1813 struct mtk_mac *mac = netdev_priv(dev);
1814 struct mtk_eth *eth = mac->hw;
1815 struct device_node *port;
1816 const char *mac_addr;
1817 int err;
1818
1819 mac_addr = of_get_mac_address(mac->of_node);
1820 if (mac_addr)
1821 ether_addr_copy(dev->dev_addr, mac_addr);
1822
1823 /* If the mac address is invalid, use random mac address */
1824 if (!is_valid_ether_addr(dev->dev_addr)) {
1825 eth_hw_addr_random(dev);
1826 dev_err(eth->dev, "generated random MAC address %pM\n",
1827 dev->dev_addr);
1828 }
1829 mac->hw->soc->set_mac(mac, dev->dev_addr);
1830
1831 if (eth->soc->port_init)
1832 for_each_child_of_node(mac->of_node, port)
1833 if (of_device_is_compatible(port,
1834 "mediatek,eth-port") &&
1835 of_device_is_available(port))
1836 eth->soc->port_init(eth, mac, port);
1837
1838 if (eth->phy) {
1839 err = eth->phy->connect(mac);
1840 if (err)
1841 return err;
1842 }
1843
1844 return 0;
1845}
1846
1847static void mtk_uninit(struct net_device *dev)
1848{
1849 struct mtk_mac *mac = netdev_priv(dev);
1850 struct mtk_eth *eth = mac->hw;
1851
1852 if (eth->phy)
1853 eth->phy->disconnect(mac);
1854 mtk_mdio_cleanup(eth);
1855
1856 mtk_irq_disable(eth, ~0);
1857 free_irq(dev->irq, dev);
1858}
1859
1860static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1861{
1862 struct mtk_mac *mac = netdev_priv(dev);
1863
1864 if (!mac->phy_dev)
1865 return -ENODEV;
1866
1867 switch (cmd) {
1868 case SIOCGMIIPHY:
1869 case SIOCGMIIREG:
1870 case SIOCSMIIREG:
1871 return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
1872 default:
1873 break;
1874 }
1875
1876 return -EOPNOTSUPP;
1877}
1878
1879static int mtk_change_mtu(struct net_device *dev, int new_mtu)
1880{
1881 struct mtk_mac *mac = netdev_priv(dev);
1882 struct mtk_eth *eth = mac->hw;
1883 int frag_size, old_mtu;
1884 u32 fwd_cfg;
1885
1886 if (!eth->soc->jumbo_frame)
1887 return eth_change_mtu(dev, new_mtu);
1888
1889 frag_size = mtk_max_frag_size(new_mtu);
1890 if (new_mtu < 68 || frag_size > PAGE_SIZE)
1891 return -EINVAL;
1892
1893 old_mtu = dev->mtu;
1894 dev->mtu = new_mtu;
1895
1896 /* return early if the buffer sizes will not change */
1897 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
1898 return 0;
1899 if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
1900 return 0;
1901
1902 if (new_mtu <= ETH_DATA_LEN)
1903 eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
1904 else
1905 eth->rx_ring[0].frag_size = PAGE_SIZE;
1906 eth->rx_ring[0].rx_buf_size =
1907 mtk_max_buf_size(eth->rx_ring[0].frag_size);
1908
1909 if (!netif_running(dev))
1910 return 0;
1911
1912 mtk_stop(dev);
1913 fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
1914 if (new_mtu <= ETH_DATA_LEN) {
1915 fwd_cfg &= ~MTK_GDM1_JMB_EN;
1916 } else {
1917 fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
1918 fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
1919 MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
1920 }
1921 mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
1922
1923 return mtk_open(dev);
1924}
1925
1926static void mtk_pending_work(struct work_struct *work)
1927{
1928 struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
1929 struct mtk_eth *eth = mac->hw;
1930 struct net_device *dev = eth->netdev[mac->id];
1931 int err;
1932
1933 rtnl_lock();
1934 mtk_stop(dev);
1935
1936 err = mtk_open(dev);
1937 if (err) {
1938 netif_alert(eth, ifup, dev,
1939 "Driver up/down cycle failed, closing device.\n");
1940 dev_close(dev);
1941 }
1942 rtnl_unlock();
1943}
1944
1945static int mtk_cleanup(struct mtk_eth *eth)
1946{
1947 int i;
1948
1949 for (i = 0; i < eth->soc->mac_count; i++) {
1950 struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
1951
1952 if (!eth->netdev[i])
1953 continue;
1954
1955 unregister_netdev(eth->netdev[i]);
1956 free_netdev(eth->netdev[i]);
1957 cancel_work_sync(&mac->pending_work);
1958 }
1959
1960 return 0;
1961}
1962
1963static const struct net_device_ops mtk_netdev_ops = {
1964 .ndo_init = mtk_init,
1965 .ndo_uninit = mtk_uninit,
1966 .ndo_open = mtk_open,
1967 .ndo_stop = mtk_stop,
1968 .ndo_start_xmit = mtk_start_xmit,
1969 .ndo_set_mac_address = mtk_set_mac_address,
1970 .ndo_validate_addr = eth_validate_addr,
1971 .ndo_do_ioctl = mtk_do_ioctl,
1972 .ndo_change_mtu = mtk_change_mtu,
1973 .ndo_tx_timeout = mtk_tx_timeout,
1974 .ndo_get_stats64 = mtk_get_stats64,
1975 .ndo_vlan_rx_add_vid = mtk_vlan_rx_add_vid,
1976 .ndo_vlan_rx_kill_vid = mtk_vlan_rx_kill_vid,
1977#ifdef CONFIG_NET_POLL_CONTROLLER
1978 .ndo_poll_controller = mtk_poll_controller,
1979#endif
1980};
1981
1982static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
1983{
1984 struct mtk_mac *mac;
1985 const __be32 *_id = of_get_property(np, "reg", NULL);
1986 int id, err;
1987
1988 if (!_id) {
1989 dev_err(eth->dev, "missing mac id\n");
1990 return -EINVAL;
1991 }
1992 id = be32_to_cpup(_id);
1993 if (id >= eth->soc->mac_count || eth->netdev[id]) {
1994 dev_err(eth->dev, "%d is not a valid mac id\n", id);
1995 return -EINVAL;
1996 }
1997
1998 eth->netdev[id] = alloc_etherdev(sizeof(*mac));
1999 if (!eth->netdev[id]) {
2000 dev_err(eth->dev, "alloc_etherdev failed\n");
2001 return -ENOMEM;
2002 }
2003 mac = netdev_priv(eth->netdev[id]);
2004 eth->mac[id] = mac;
2005 mac->id = id;
2006 mac->hw = eth;
2007 mac->of_node = np;
2008 INIT_WORK(&mac->pending_work, mtk_pending_work);
2009
2010 if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
2011 mac->hw_stats = devm_kzalloc(eth->dev,
2012 sizeof(*mac->hw_stats),
2013 GFP_KERNEL);
2014 if (!mac->hw_stats) {
2015 err = -ENOMEM;
2016 goto free_netdev;
2017 }
2018 spin_lock_init(&mac->hw_stats->stats_lock);
2019 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
2020 }
2021
2022 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
2023 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
2024 eth->netdev[id]->base_addr = (unsigned long)eth->base;
2025
2026 if (eth->soc->init_data)
2027 eth->soc->init_data(eth->soc, eth->netdev[id]);
2028
2029 eth->netdev[id]->vlan_features = eth->soc->hw_features &
2030 ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
2031 eth->netdev[id]->features |= eth->soc->hw_features;
2032
2033 if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
2034 eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2035
2036 mtk_set_ethtool_ops(eth->netdev[id]);
2037
2038 err = register_netdev(eth->netdev[id]);
2039 if (err) {
2040 dev_err(eth->dev, "error bringing up device\n");
2041 err = -ENOMEM;
2042 goto free_netdev;
2043 }
2044 eth->netdev[id]->irq = eth->irq;
2045 netif_info(eth, probe, eth->netdev[id],
2046 "mediatek frame engine at 0x%08lx, irq %d\n",
2047 eth->netdev[id]->base_addr, eth->netdev[id]->irq);
2048
2049 return 0;
2050
2051free_netdev:
2052 free_netdev(eth->netdev[id]);
2053 return err;
2054}
2055
2056static int mtk_probe(struct platform_device *pdev)
2057{
2058 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2059 const struct of_device_id *match;
2060 struct device_node *mac_np;
2061 struct mtk_soc_data *soc;
2062 struct mtk_eth *eth;
2063 struct clk *sysclk;
2064 int err;
2065
2066 device_reset(&pdev->dev);
2067
2068 match = of_match_device(of_mtk_match, &pdev->dev);
2069 soc = (struct mtk_soc_data *)match->data;
2070
2071 if (soc->reg_table)
2072 mtk_reg_table = soc->reg_table;
2073
2074 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
2075 if (!eth)
2076 return -ENOMEM;
2077
2078 eth->base = devm_ioremap_resource(&pdev->dev, res);
2079 if (IS_ERR(eth->base))
2080 return PTR_ERR(eth->base);
2081
2082 spin_lock_init(&eth->page_lock);
2083
2084 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
2085 "mediatek,ethsys");
2086 if (IS_ERR(eth->ethsys))
2087 return PTR_ERR(eth->ethsys);
2088
2089 eth->irq = platform_get_irq(pdev, 0);
2090 if (eth->irq < 0) {
2091 dev_err(&pdev->dev, "no IRQ resource found\n");
2092 return -ENXIO;
2093 }
2094
2095 sysclk = devm_clk_get(&pdev->dev, NULL);
2096 if (IS_ERR(sysclk)) {
2097 dev_err(&pdev->dev,
2098 "the clock is not defined in the devicetree\n");
2099 return -ENXIO;
2100 }
2101 eth->sysclk = clk_get_rate(sysclk);
2102
2103 eth->switch_np = of_parse_phandle(pdev->dev.of_node,
2104 "mediatek,switch", 0);
2105 if (soc->has_switch && !eth->switch_np) {
2106 dev_err(&pdev->dev, "failed to read switch phandle\n");
2107 return -ENODEV;
2108 }
2109
2110 eth->dev = &pdev->dev;
2111 eth->soc = soc;
2112 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
2113
2114 err = mtk_init_hw(eth);
2115 if (err)
2116 return err;
2117
2118 if (eth->soc->mac_count > 1) {
2119 for_each_child_of_node(pdev->dev.of_node, mac_np) {
2120 if (!of_device_is_compatible(mac_np,
2121 "mediatek,eth-mac"))
2122 continue;
2123
2124 if (!of_device_is_available(mac_np))
2125 continue;
2126
2127 err = mtk_add_mac(eth, mac_np);
2128 if (err)
2129 goto err_free_dev;
2130 }
2131
2132 init_dummy_netdev(&eth->dummy_dev);
2133 netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
2134 soc->napi_weight);
2135 } else {
2136 err = mtk_add_mac(eth, pdev->dev.of_node);
2137 if (err)
2138 goto err_free_dev;
2139 netif_napi_add(eth->netdev[0], &eth->rx_napi, mtk_poll,
2140 soc->napi_weight);
2141 }
2142
2143 platform_set_drvdata(pdev, eth);
2144
2145 return 0;
2146
2147err_free_dev:
2148 mtk_cleanup(eth);
2149 return err;
2150}
2151
2152static int mtk_remove(struct platform_device *pdev)
2153{
2154 struct mtk_eth *eth = platform_get_drvdata(pdev);
2155
2156 netif_napi_del(&eth->rx_napi);
2157 mtk_cleanup(eth);
2158 platform_set_drvdata(pdev, NULL);
2159
2160 return 0;
2161}
2162
2163static struct platform_driver mtk_driver = {
2164 .probe = mtk_probe,
2165 .remove = mtk_remove,
2166 .driver = {
2167 .name = "mtk_soc_eth",
2168 .of_match_table = of_mtk_match,
2169 },
2170};
2171
2172module_platform_driver(mtk_driver);
2173
2174MODULE_LICENSE("GPL");
2175MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
2176MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
deleted file mode 100644
index e6ed80433f49..000000000000
--- a/drivers/staging/mt7621-eth/mtk_eth_soc.h
+++ /dev/null
@@ -1,716 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#ifndef MTK_ETH_H
16#define MTK_ETH_H
17
18#include <linux/mii.h>
19#include <linux/interrupt.h>
20#include <linux/netdevice.h>
21#include <linux/dma-mapping.h>
22#include <linux/phy.h>
23#include <linux/ethtool.h>
24#include <linux/version.h>
25#include <linux/atomic.h>
26
27/* these registers have different offsets depending on the SoC. we use a lookup
28 * table for these
29 */
30enum mtk_reg {
31 MTK_REG_PDMA_GLO_CFG = 0,
32 MTK_REG_PDMA_RST_CFG,
33 MTK_REG_DLY_INT_CFG,
34 MTK_REG_TX_BASE_PTR0,
35 MTK_REG_TX_MAX_CNT0,
36 MTK_REG_TX_CTX_IDX0,
37 MTK_REG_TX_DTX_IDX0,
38 MTK_REG_RX_BASE_PTR0,
39 MTK_REG_RX_MAX_CNT0,
40 MTK_REG_RX_CALC_IDX0,
41 MTK_REG_RX_DRX_IDX0,
42 MTK_REG_MTK_INT_ENABLE,
43 MTK_REG_MTK_INT_STATUS,
44 MTK_REG_MTK_DMA_VID_BASE,
45 MTK_REG_MTK_COUNTER_BASE,
46 MTK_REG_MTK_RST_GL,
47 MTK_REG_MTK_INT_STATUS2,
48 MTK_REG_COUNT
49};
50
51/* delayed interrupt bits */
52#define MTK_DELAY_EN_INT 0x80
53#define MTK_DELAY_MAX_INT 0x04
54#define MTK_DELAY_MAX_TOUT 0x04
55#define MTK_DELAY_TIME 20
56#define MTK_DELAY_CHAN (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \
57 | MTK_DELAY_MAX_TOUT)
58#define MTK_DELAY_INIT ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN)
59#define MTK_PSE_FQFC_CFG_INIT 0x80504000
60#define MTK_PSE_FQFC_CFG_256Q 0xff908000
61
62/* interrupt bits */
63#define MTK_CNT_PPE_AF BIT(31)
64#define MTK_CNT_GDM_AF BIT(29)
65#define MTK_PSE_P2_FC BIT(26)
66#define MTK_PSE_BUF_DROP BIT(24)
67#define MTK_GDM_OTHER_DROP BIT(23)
68#define MTK_PSE_P1_FC BIT(22)
69#define MTK_PSE_P0_FC BIT(21)
70#define MTK_PSE_FQ_EMPTY BIT(20)
71#define MTK_GE1_STA_CHG BIT(18)
72#define MTK_TX_COHERENT BIT(17)
73#define MTK_RX_COHERENT BIT(16)
74#define MTK_TX_DONE_INT3 BIT(11)
75#define MTK_TX_DONE_INT2 BIT(10)
76#define MTK_TX_DONE_INT1 BIT(9)
77#define MTK_TX_DONE_INT0 BIT(8)
78#define MTK_RX_DONE_INT0 BIT(2)
79#define MTK_TX_DLY_INT BIT(1)
80#define MTK_RX_DLY_INT BIT(0)
81
82#define MTK_RX_DONE_INT MTK_RX_DONE_INT0
83#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
84 MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
85
86#define RT5350_RX_DLY_INT BIT(30)
87#define RT5350_TX_DLY_INT BIT(28)
88#define RT5350_RX_DONE_INT1 BIT(17)
89#define RT5350_RX_DONE_INT0 BIT(16)
90#define RT5350_TX_DONE_INT3 BIT(3)
91#define RT5350_TX_DONE_INT2 BIT(2)
92#define RT5350_TX_DONE_INT1 BIT(1)
93#define RT5350_TX_DONE_INT0 BIT(0)
94
95#define RT5350_RX_DONE_INT (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
96#define RT5350_TX_DONE_INT (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
97 RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
98
99/* registers */
100#define MTK_GDMA_OFFSET 0x0020
101#define MTK_PSE_OFFSET 0x0040
102#define MTK_GDMA2_OFFSET 0x0060
103#define MTK_CDMA_OFFSET 0x0080
104#define MTK_DMA_VID0 0x00a8
105#define MTK_PDMA_OFFSET 0x0100
106#define MTK_PPE_OFFSET 0x0200
107#define MTK_CMTABLE_OFFSET 0x0400
108#define MTK_POLICYTABLE_OFFSET 0x1000
109
110#define MT7621_GDMA_OFFSET 0x0500
111#define MT7620_GDMA_OFFSET 0x0600
112
113#define RT5350_PDMA_OFFSET 0x0800
114#define RT5350_SDM_OFFSET 0x0c00
115
116#define MTK_MDIO_ACCESS 0x00
117#define MTK_MDIO_CFG 0x04
118#define MTK_GLO_CFG 0x08
119#define MTK_RST_GL 0x0C
120#define MTK_INT_STATUS 0x10
121#define MTK_INT_ENABLE 0x14
122#define MTK_MDIO_CFG2 0x18
123#define MTK_FOC_TS_T 0x1C
124
125#define MTK_GDMA1_FWD_CFG (MTK_GDMA_OFFSET + 0x00)
126#define MTK_GDMA1_SCH_CFG (MTK_GDMA_OFFSET + 0x04)
127#define MTK_GDMA1_SHPR_CFG (MTK_GDMA_OFFSET + 0x08)
128#define MTK_GDMA1_MAC_ADRL (MTK_GDMA_OFFSET + 0x0C)
129#define MTK_GDMA1_MAC_ADRH (MTK_GDMA_OFFSET + 0x10)
130
131#define MTK_GDMA2_FWD_CFG (MTK_GDMA2_OFFSET + 0x00)
132#define MTK_GDMA2_SCH_CFG (MTK_GDMA2_OFFSET + 0x04)
133#define MTK_GDMA2_SHPR_CFG (MTK_GDMA2_OFFSET + 0x08)
134#define MTK_GDMA2_MAC_ADRL (MTK_GDMA2_OFFSET + 0x0C)
135#define MTK_GDMA2_MAC_ADRH (MTK_GDMA2_OFFSET + 0x10)
136
137#define MTK_PSE_FQ_CFG (MTK_PSE_OFFSET + 0x00)
138#define MTK_CDMA_FC_CFG (MTK_PSE_OFFSET + 0x04)
139#define MTK_GDMA1_FC_CFG (MTK_PSE_OFFSET + 0x08)
140#define MTK_GDMA2_FC_CFG (MTK_PSE_OFFSET + 0x0C)
141
142#define MTK_CDMA_CSG_CFG (MTK_CDMA_OFFSET + 0x00)
143#define MTK_CDMA_SCH_CFG (MTK_CDMA_OFFSET + 0x04)
144
145#define MT7621_GDMA_FWD_CFG(x) (MT7621_GDMA_OFFSET + (x * 0x1000))
146
147/* FIXME this might be different for different SOCs */
148#define MT7620_GDMA1_FWD_CFG (MT7621_GDMA_OFFSET + 0x00)
149
150#define RT5350_TX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x00)
151#define RT5350_TX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x04)
152#define RT5350_TX_CTX_IDX0 (RT5350_PDMA_OFFSET + 0x08)
153#define RT5350_TX_DTX_IDX0 (RT5350_PDMA_OFFSET + 0x0C)
154#define RT5350_TX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x10)
155#define RT5350_TX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x14)
156#define RT5350_TX_CTX_IDX1 (RT5350_PDMA_OFFSET + 0x18)
157#define RT5350_TX_DTX_IDX1 (RT5350_PDMA_OFFSET + 0x1C)
158#define RT5350_TX_BASE_PTR2 (RT5350_PDMA_OFFSET + 0x20)
159#define RT5350_TX_MAX_CNT2 (RT5350_PDMA_OFFSET + 0x24)
160#define RT5350_TX_CTX_IDX2 (RT5350_PDMA_OFFSET + 0x28)
161#define RT5350_TX_DTX_IDX2 (RT5350_PDMA_OFFSET + 0x2C)
162#define RT5350_TX_BASE_PTR3 (RT5350_PDMA_OFFSET + 0x30)
163#define RT5350_TX_MAX_CNT3 (RT5350_PDMA_OFFSET + 0x34)
164#define RT5350_TX_CTX_IDX3 (RT5350_PDMA_OFFSET + 0x38)
165#define RT5350_TX_DTX_IDX3 (RT5350_PDMA_OFFSET + 0x3C)
166#define RT5350_RX_BASE_PTR0 (RT5350_PDMA_OFFSET + 0x100)
167#define RT5350_RX_MAX_CNT0 (RT5350_PDMA_OFFSET + 0x104)
168#define RT5350_RX_CALC_IDX0 (RT5350_PDMA_OFFSET + 0x108)
169#define RT5350_RX_DRX_IDX0 (RT5350_PDMA_OFFSET + 0x10C)
170#define RT5350_RX_BASE_PTR1 (RT5350_PDMA_OFFSET + 0x110)
171#define RT5350_RX_MAX_CNT1 (RT5350_PDMA_OFFSET + 0x114)
172#define RT5350_RX_CALC_IDX1 (RT5350_PDMA_OFFSET + 0x118)
173#define RT5350_RX_DRX_IDX1 (RT5350_PDMA_OFFSET + 0x11C)
174#define RT5350_PDMA_GLO_CFG (RT5350_PDMA_OFFSET + 0x204)
175#define RT5350_PDMA_RST_CFG (RT5350_PDMA_OFFSET + 0x208)
176#define RT5350_DLY_INT_CFG (RT5350_PDMA_OFFSET + 0x20c)
177#define RT5350_MTK_INT_STATUS (RT5350_PDMA_OFFSET + 0x220)
178#define RT5350_MTK_INT_ENABLE (RT5350_PDMA_OFFSET + 0x228)
179#define RT5350_PDMA_SCH_CFG (RT5350_PDMA_OFFSET + 0x280)
180
181#define MTK_PDMA_GLO_CFG (MTK_PDMA_OFFSET + 0x00)
182#define MTK_PDMA_RST_CFG (MTK_PDMA_OFFSET + 0x04)
183#define MTK_PDMA_SCH_CFG (MTK_PDMA_OFFSET + 0x08)
184#define MTK_DLY_INT_CFG (MTK_PDMA_OFFSET + 0x0C)
185#define MTK_TX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x10)
186#define MTK_TX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x14)
187#define MTK_TX_CTX_IDX0 (MTK_PDMA_OFFSET + 0x18)
188#define MTK_TX_DTX_IDX0 (MTK_PDMA_OFFSET + 0x1C)
189#define MTK_TX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x20)
190#define MTK_TX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x24)
191#define MTK_TX_CTX_IDX1 (MTK_PDMA_OFFSET + 0x28)
192#define MTK_TX_DTX_IDX1 (MTK_PDMA_OFFSET + 0x2C)
193#define MTK_RX_BASE_PTR0 (MTK_PDMA_OFFSET + 0x30)
194#define MTK_RX_MAX_CNT0 (MTK_PDMA_OFFSET + 0x34)
195#define MTK_RX_CALC_IDX0 (MTK_PDMA_OFFSET + 0x38)
196#define MTK_RX_DRX_IDX0 (MTK_PDMA_OFFSET + 0x3C)
197#define MTK_TX_BASE_PTR2 (MTK_PDMA_OFFSET + 0x40)
198#define MTK_TX_MAX_CNT2 (MTK_PDMA_OFFSET + 0x44)
199#define MTK_TX_CTX_IDX2 (MTK_PDMA_OFFSET + 0x48)
200#define MTK_TX_DTX_IDX2 (MTK_PDMA_OFFSET + 0x4C)
201#define MTK_TX_BASE_PTR3 (MTK_PDMA_OFFSET + 0x50)
202#define MTK_TX_MAX_CNT3 (MTK_PDMA_OFFSET + 0x54)
203#define MTK_TX_CTX_IDX3 (MTK_PDMA_OFFSET + 0x58)
204#define MTK_TX_DTX_IDX3 (MTK_PDMA_OFFSET + 0x5C)
205#define MTK_RX_BASE_PTR1 (MTK_PDMA_OFFSET + 0x60)
206#define MTK_RX_MAX_CNT1 (MTK_PDMA_OFFSET + 0x64)
207#define MTK_RX_CALC_IDX1 (MTK_PDMA_OFFSET + 0x68)
208#define MTK_RX_DRX_IDX1 (MTK_PDMA_OFFSET + 0x6C)
209
210/* Switch DMA configuration */
211#define RT5350_SDM_CFG (RT5350_SDM_OFFSET + 0x00)
212#define RT5350_SDM_RRING (RT5350_SDM_OFFSET + 0x04)
213#define RT5350_SDM_TRING (RT5350_SDM_OFFSET + 0x08)
214#define RT5350_SDM_MAC_ADRL (RT5350_SDM_OFFSET + 0x0C)
215#define RT5350_SDM_MAC_ADRH (RT5350_SDM_OFFSET + 0x10)
216#define RT5350_SDM_TPCNT (RT5350_SDM_OFFSET + 0x100)
217#define RT5350_SDM_TBCNT (RT5350_SDM_OFFSET + 0x104)
218#define RT5350_SDM_RPCNT (RT5350_SDM_OFFSET + 0x108)
219#define RT5350_SDM_RBCNT (RT5350_SDM_OFFSET + 0x10C)
220#define RT5350_SDM_CS_ERR (RT5350_SDM_OFFSET + 0x110)
221
222#define RT5350_SDM_ICS_EN BIT(16)
223#define RT5350_SDM_TCS_EN BIT(17)
224#define RT5350_SDM_UCS_EN BIT(18)
225
226/* QDMA registers */
227#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
228#define MTK_QTX_SCH(x) (0x1804 + (x * 0x10))
229#define MTK_QRX_BASE_PTR0 0x1900
230#define MTK_QRX_MAX_CNT0 0x1904
231#define MTK_QRX_CRX_IDX0 0x1908
232#define MTK_QRX_DRX_IDX0 0x190C
233#define MTK_QDMA_GLO_CFG 0x1A04
234#define MTK_QDMA_RST_IDX 0x1A08
235#define MTK_QDMA_DELAY_INT 0x1A0C
236#define MTK_QDMA_FC_THRES 0x1A10
237#define MTK_QMTK_INT_STATUS 0x1A18
238#define MTK_QMTK_INT_ENABLE 0x1A1C
239#define MTK_QDMA_HRED2 0x1A44
240
241#define MTK_QTX_CTX_PTR 0x1B00
242#define MTK_QTX_DTX_PTR 0x1B04
243
244#define MTK_QTX_CRX_PTR 0x1B10
245#define MTK_QTX_DRX_PTR 0x1B14
246
247#define MTK_QDMA_FQ_HEAD 0x1B20
248#define MTK_QDMA_FQ_TAIL 0x1B24
249#define MTK_QDMA_FQ_CNT 0x1B28
250#define MTK_QDMA_FQ_BLEN 0x1B2C
251
252#define QDMA_PAGE_SIZE 2048
253#define QDMA_TX_OWNER_CPU BIT(31)
254#define QDMA_TX_SWC BIT(14)
255#define TX_QDMA_SDL(_x) (((_x) & 0x3fff) << 16)
256#define QDMA_RES_THRES 4
257
258/* MDIO_CFG register bits */
259#define MTK_MDIO_CFG_AUTO_POLL_EN BIT(29)
260#define MTK_MDIO_CFG_GP1_BP_EN BIT(16)
261#define MTK_MDIO_CFG_GP1_FRC_EN BIT(15)
262#define MTK_MDIO_CFG_GP1_SPEED_10 (0 << 13)
263#define MTK_MDIO_CFG_GP1_SPEED_100 (1 << 13)
264#define MTK_MDIO_CFG_GP1_SPEED_1000 (2 << 13)
265#define MTK_MDIO_CFG_GP1_DUPLEX BIT(12)
266#define MTK_MDIO_CFG_GP1_FC_TX BIT(11)
267#define MTK_MDIO_CFG_GP1_FC_RX BIT(10)
268#define MTK_MDIO_CFG_GP1_LNK_DWN BIT(9)
269#define MTK_MDIO_CFG_GP1_AN_FAIL BIT(8)
270#define MTK_MDIO_CFG_MDC_CLK_DIV_1 (0 << 6)
271#define MTK_MDIO_CFG_MDC_CLK_DIV_2 (1 << 6)
272#define MTK_MDIO_CFG_MDC_CLK_DIV_4 (2 << 6)
273#define MTK_MDIO_CFG_MDC_CLK_DIV_8 (3 << 6)
274#define MTK_MDIO_CFG_TURBO_MII_FREQ BIT(5)
275#define MTK_MDIO_CFG_TURBO_MII_MODE BIT(4)
276#define MTK_MDIO_CFG_RX_CLK_SKEW_0 (0 << 2)
277#define MTK_MDIO_CFG_RX_CLK_SKEW_200 (1 << 2)
278#define MTK_MDIO_CFG_RX_CLK_SKEW_400 (2 << 2)
279#define MTK_MDIO_CFG_RX_CLK_SKEW_INV (3 << 2)
280#define MTK_MDIO_CFG_TX_CLK_SKEW_0 0
281#define MTK_MDIO_CFG_TX_CLK_SKEW_200 1
282#define MTK_MDIO_CFG_TX_CLK_SKEW_400 2
283#define MTK_MDIO_CFG_TX_CLK_SKEW_INV 3
284
285/* uni-cast port */
286#define MTK_GDM1_JMB_LEN_MASK 0xf
287#define MTK_GDM1_JMB_LEN_SHIFT 28
288#define MTK_GDM1_ICS_EN BIT(22)
289#define MTK_GDM1_TCS_EN BIT(21)
290#define MTK_GDM1_UCS_EN BIT(20)
291#define MTK_GDM1_JMB_EN BIT(19)
292#define MTK_GDM1_STRPCRC BIT(16)
293#define MTK_GDM1_UFRC_P_CPU (0 << 12)
294#define MTK_GDM1_UFRC_P_GDMA1 (1 << 12)
295#define MTK_GDM1_UFRC_P_PPE (6 << 12)
296
297/* checksums */
298#define MTK_ICS_GEN_EN BIT(2)
299#define MTK_UCS_GEN_EN BIT(1)
300#define MTK_TCS_GEN_EN BIT(0)
301
302/* dma mode */
303#define MTK_PDMA BIT(0)
304#define MTK_QDMA BIT(1)
305#define MTK_PDMA_RX_QDMA_TX (MTK_PDMA | MTK_QDMA)
306
307/* dma ring */
308#define MTK_PST_DRX_IDX0 BIT(16)
309#define MTK_PST_DTX_IDX3 BIT(3)
310#define MTK_PST_DTX_IDX2 BIT(2)
311#define MTK_PST_DTX_IDX1 BIT(1)
312#define MTK_PST_DTX_IDX0 BIT(0)
313
314#define MTK_RX_2B_OFFSET BIT(31)
315#define MTK_TX_WB_DDONE BIT(6)
316#define MTK_RX_DMA_BUSY BIT(3)
317#define MTK_TX_DMA_BUSY BIT(1)
318#define MTK_RX_DMA_EN BIT(2)
319#define MTK_TX_DMA_EN BIT(0)
320
321#define MTK_PDMA_SIZE_4DWORDS (0 << 4)
322#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
323#define MTK_PDMA_SIZE_16DWORDS (2 << 4)
324
325#define MTK_US_CYC_CNT_MASK 0xff
326#define MTK_US_CYC_CNT_SHIFT 0x8
327#define MTK_US_CYC_CNT_DIVISOR 1000000
328
329/* PDMA descriptor rxd2 */
330#define RX_DMA_DONE BIT(31)
331#define RX_DMA_LSO BIT(30)
332#define RX_DMA_PLEN0(_x) (((_x) & 0x3fff) << 16)
333#define RX_DMA_GET_PLEN0(_x) (((_x) >> 16) & 0x3fff)
334#define RX_DMA_TAG BIT(15)
335
336/* PDMA descriptor rxd3 */
337#define RX_DMA_TPID(_x) (((_x) >> 16) & 0xffff)
338#define RX_DMA_VID(_x) ((_x) & 0xfff)
339
340/* PDMA descriptor rxd4 */
341#define RX_DMA_L4VALID BIT(30)
342#define RX_DMA_FPORT_SHIFT 19
343#define RX_DMA_FPORT_MASK 0x7
344
345struct mtk_rx_dma {
346 unsigned int rxd1;
347 unsigned int rxd2;
348 unsigned int rxd3;
349 unsigned int rxd4;
350} __packed __aligned(4);
351
352/* PDMA tx descriptor bits */
353#define TX_DMA_BUF_LEN 0x3fff
354#define TX_DMA_PLEN0_MASK (TX_DMA_BUF_LEN << 16)
355#define TX_DMA_PLEN0(_x) (((_x) & TX_DMA_BUF_LEN) << 16)
356#define TX_DMA_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
357#define TX_DMA_GET_PLEN0(_x) (((_x) >> 16) & TX_DMA_BUF_LEN)
358#define TX_DMA_GET_PLEN1(_x) ((_x) & TX_DMA_BUF_LEN)
359#define TX_DMA_LS1 BIT(14)
360#define TX_DMA_LS0 BIT(30)
361#define TX_DMA_DONE BIT(31)
362#define TX_DMA_FPORT_SHIFT 25
363#define TX_DMA_FPORT_MASK 0x7
364#define TX_DMA_INS_VLAN_MT7621 BIT(16)
365#define TX_DMA_INS_VLAN BIT(7)
366#define TX_DMA_INS_PPPOE BIT(12)
367#define TX_DMA_TAG BIT(15)
368#define TX_DMA_TAG_MASK BIT(15)
369#define TX_DMA_QN(_x) ((_x) << 16)
370#define TX_DMA_PN(_x) ((_x) << 24)
371#define TX_DMA_QN_MASK TX_DMA_QN(0x7)
372#define TX_DMA_PN_MASK TX_DMA_PN(0x7)
373#define TX_DMA_UDF BIT(20)
374#define TX_DMA_CHKSUM (0x7 << 29)
375#define TX_DMA_TSO BIT(28)
376#define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1))
377
378/* frame engine counters */
379#define MTK_PPE_AC_BCNT0 (MTK_CMTABLE_OFFSET + 0x00)
380#define MTK_GDMA1_TX_GBCNT (MTK_CMTABLE_OFFSET + 0x300)
381#define MTK_GDMA2_TX_GBCNT (MTK_GDMA1_TX_GBCNT + 0x40)
382
383/* phy device flags */
384#define MTK_PHY_FLAG_PORT BIT(0)
385#define MTK_PHY_FLAG_ATTACH BIT(1)
386
387struct mtk_tx_dma {
388 unsigned int txd1;
389 unsigned int txd2;
390 unsigned int txd3;
391 unsigned int txd4;
392} __packed __aligned(4);
393
394struct mtk_eth;
395struct mtk_mac;
396
397/* manage the attached phys */
398struct mtk_phy {
399 spinlock_t lock;
400
401 struct phy_device *phy[8];
402 struct device_node *phy_node[8];
403 const __be32 *phy_fixed[8];
404 int duplex[8];
405 int speed[8];
406 int tx_fc[8];
407 int rx_fc[8];
408 int (*connect)(struct mtk_mac *mac);
409 void (*disconnect)(struct mtk_mac *mac);
410 void (*start)(struct mtk_mac *mac);
411 void (*stop)(struct mtk_mac *mac);
412};
413
414/* struct mtk_soc_data - the structure that holds the SoC specific data
415 * @reg_table: Some of the legacy registers changed their location
416 * over time. Their offsets are stored in this table
417 *
418 * @init_data: Some features depend on the silicon revision. This
419 * callback allows runtime modification of the content of
420 * this struct
421 * @reset_fe: This callback is used to trigger the reset of the frame
422 * engine
423 * @set_mac: This callback is used to set the unicast mac address
424 * filter
425 * @fwd_config: This callback is used to setup the forward config
426 * register of the MAC
427 * @switch_init: This callback is used to bring up the switch core
428 * @port_init: Some SoCs have ports that can be router to a switch port
429 * or an external PHY. This callback is used to setup these
430 * ports.
431 * @has_carrier: This callback allows driver to check if there is a cable
432 * attached.
433 * @mdio_init: This callbck is used to setup the MDIO bus if one is
434 * present
435 * @mdio_cleanup: This callback is used to cleanup the MDIO state.
436 * @mdio_write: This callback is used to write data to the MDIO bus.
437 * @mdio_read: This callback is used to write data to the MDIO bus.
438 * @mdio_adjust_link: This callback is used to apply the PHY settings.
439 * @piac_offset: the PIAC register has a different different base offset
440 * @hw_features: feature set depends on the SoC type
441 * @dma_ring_size: allow GBit SoCs to set bigger rings than FE SoCs
442 * @napi_weight: allow GBit SoCs to set bigger napi weight than FE SoCs
443 * @dma_type: SoCs is PDMA, QDMA or a mix of the 2
444 * @pdma_glo_cfg: the default DMA configuration
445 * @rx_int: the TX interrupt bits used by the SoC
446 * @tx_int: the TX interrupt bits used by the SoC
447 * @status_int: the Status interrupt bits used by the SoC
448 * @checksum_bit: the bits used to turn on HW checksumming
449 * @txd4: default value of the TXD4 descriptor
450 * @mac_count: the number of MACs that the SoC has
451 * @new_stats: there is a old and new way to read hardware stats
452 * registers
453 * @jumbo_frame: does the SoC support jumbo frames ?
454 * @rx_2b_offset: tell the rx dma to offset the data by 2 bytes
455 * @rx_sg_dma: scatter gather support
456 * @padding_64b enable 64 bit padding
457 * @padding_bug: rt2880 has a padding bug
458 * @has_switch: does the SoC have a built-in switch
459 *
460 * Although all of the supported SoCs share the same basic functionality, there
461 * are several SoC specific functions and features that we need to support. This
462 * struct holds the SoC specific data so that the common core can figure out
463 * how to setup and use these differences.
464 */
465struct mtk_soc_data {
466 const u16 *reg_table;
467
468 void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev);
469 void (*reset_fe)(struct mtk_eth *eth);
470 void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr);
471 int (*fwd_config)(struct mtk_eth *eth);
472 int (*switch_init)(struct mtk_eth *eth);
473 void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac,
474 struct device_node *port);
475 int (*has_carrier)(struct mtk_eth *eth);
476 int (*mdio_init)(struct mtk_eth *eth);
477 void (*mdio_cleanup)(struct mtk_eth *eth);
478 int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
479 u16 val);
480 int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
481 void (*mdio_adjust_link)(struct mtk_eth *eth, int port);
482 u32 piac_offset;
483 netdev_features_t hw_features;
484 u32 dma_ring_size;
485 u32 napi_weight;
486 u32 dma_type;
487 u32 pdma_glo_cfg;
488 u32 rx_int;
489 u32 tx_int;
490 u32 status_int;
491 u32 checksum_bit;
492 u32 txd4;
493 u32 mac_count;
494
495 u32 new_stats:1;
496 u32 jumbo_frame:1;
497 u32 rx_2b_offset:1;
498 u32 rx_sg_dma:1;
499 u32 padding_64b:1;
500 u32 padding_bug:1;
501 u32 has_switch:1;
502};
503
504#define MTK_STAT_OFFSET 0x40
505
506/* struct mtk_hw_stats - the structure that holds the traffic statistics.
507 * @stats_lock: make sure that stats operations are atomic
508 * @reg_offset: the status register offset of the SoC
509 * @syncp: the refcount
510 *
511 * All of the supported SoCs have hardware counters for traffic statstics.
512 * Whenever the status IRQ triggers we can read the latest stats from these
513 * counters and store them in this struct.
514 */
515struct mtk_hw_stats {
516 spinlock_t stats_lock;
517 u32 reg_offset;
518 struct u64_stats_sync syncp;
519
520 u64 tx_bytes;
521 u64 tx_packets;
522 u64 tx_skip;
523 u64 tx_collisions;
524 u64 rx_bytes;
525 u64 rx_packets;
526 u64 rx_overflow;
527 u64 rx_fcs_errors;
528 u64 rx_short_errors;
529 u64 rx_long_errors;
530 u64 rx_checksum_errors;
531 u64 rx_flow_control_packets;
532};
533
534/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
535 * memory was allocated so that it can be freed properly
536 */
537enum mtk_tx_flags {
538 MTK_TX_FLAGS_SINGLE0 = 0x01,
539 MTK_TX_FLAGS_PAGE0 = 0x02,
540 MTK_TX_FLAGS_PAGE1 = 0x04,
541};
542
543/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
544 * by the TX descriptor s
545 * @skb: The SKB pointer of the packet being sent
546 * @dma_addr0: The base addr of the first segment
547 * @dma_len0: The length of the first segment
548 * @dma_addr1: The base addr of the second segment
549 * @dma_len1: The length of the second segment
550 */
551struct mtk_tx_buf {
552 struct sk_buff *skb;
553 u32 flags;
554 DEFINE_DMA_UNMAP_ADDR(dma_addr0);
555 DEFINE_DMA_UNMAP_LEN(dma_len0);
556 DEFINE_DMA_UNMAP_ADDR(dma_addr1);
557 DEFINE_DMA_UNMAP_LEN(dma_len1);
558};
559
560/* struct mtk_tx_ring - This struct holds info describing a TX ring
561 * @tx_dma: The descriptor ring
562 * @tx_buf: The memory pointed at by the ring
563 * @tx_phys: The physical addr of tx_buf
564 * @tx_next_free: Pointer to the next free descriptor
565 * @tx_last_free: Pointer to the last free descriptor
566 * @tx_thresh: The threshold of minimum amount of free descriptors
567 * @tx_map: Callback to map a new packet into the ring
568 * @tx_poll: Callback for the housekeeping function
569 * @tx_clean: Callback for the cleanup function
570 * @tx_ring_size: How many descriptors are in the ring
571 * @tx_free_idx: The index of th next free descriptor
572 * @tx_next_idx: QDMA uses a linked list. This element points to the next
573 * free descriptor in the list
574 * @tx_free_count: QDMA uses a linked list. Track how many free descriptors
575 * are present
576 */
577struct mtk_tx_ring {
578 struct mtk_tx_dma *tx_dma;
579 struct mtk_tx_buf *tx_buf;
580 dma_addr_t tx_phys;
581 struct mtk_tx_dma *tx_next_free;
582 struct mtk_tx_dma *tx_last_free;
583 u16 tx_thresh;
584 int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num,
585 struct mtk_tx_ring *ring, bool gso);
586 int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again);
587 void (*tx_clean)(struct mtk_eth *eth);
588
589 /* PDMA only */
590 u16 tx_ring_size;
591 u16 tx_free_idx;
592
593 /* QDMA only */
594 u16 tx_next_idx;
595 atomic_t tx_free_count;
596};
597
598/* struct mtk_rx_ring - This struct holds info describing a RX ring
599 * @rx_dma: The descriptor ring
600 * @rx_data: The memory pointed at by the ring
601 * @trx_phys: The physical addr of rx_buf
602 * @rx_ring_size: How many descriptors are in the ring
603 * @rx_buf_size: The size of each packet buffer
604 * @rx_calc_idx: The current head of ring
605 */
606struct mtk_rx_ring {
607 struct mtk_rx_dma *rx_dma;
608 u8 **rx_data;
609 dma_addr_t rx_phys;
610 u16 rx_ring_size;
611 u16 frag_size;
612 u16 rx_buf_size;
613 u16 rx_calc_idx;
614};
615
616/* currently no SoC has more than 2 macs */
617#define MTK_MAX_DEVS 2
618
619/* struct mtk_eth - This is the main datasructure for holding the state
620 * of the driver
621 * @dev: The device pointer
622 * @base: The mapped register i/o base
623 * @page_lock: Make sure that register operations are atomic
624 * @soc: pointer to our SoC specific data
625 * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
626 * dummy for NAPI to work
627 * @netdev: The netdev instances
628 * @mac: Each netdev is linked to a physical MAC
629 * @switch_np: The phandle for the switch
630 * @irq: The IRQ that we are using
631 * @msg_enable: Ethtool msg level
632 * @ysclk: The sysclk rate - neeed for calibration
633 * @ethsys: The register map pointing at the range used to setup
634 * MII modes
635 * @dma_refcnt: track how many netdevs are using the DMA engine
636 * @tx_ring: Pointer to the memore holding info about the TX ring
637 * @rx_ring: Pointer to the memore holding info about the RX ring
638 * @rx_napi: The NAPI struct
639 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
640 * @scratch_head: The scratch memory that scratch_ring points to.
641 * @phy: Info about the attached PHYs
642 * @mii_bus: If there is a bus we need to create an instance for it
643 * @link: Track if the ports have a physical link
644 * @sw_priv: Pointer to the switches private data
645 * @vlan_map: RX VID tracking
646 */
647
648struct mtk_eth {
649 struct device *dev;
650 void __iomem *base;
651 spinlock_t page_lock;
652 struct mtk_soc_data *soc;
653 struct net_device dummy_dev;
654 struct net_device *netdev[MTK_MAX_DEVS];
655 struct mtk_mac *mac[MTK_MAX_DEVS];
656 struct device_node *switch_np;
657 int irq;
658 u32 msg_enable;
659 unsigned long sysclk;
660 struct regmap *ethsys;
661 atomic_t dma_refcnt;
662 struct mtk_tx_ring tx_ring;
663 struct mtk_rx_ring rx_ring[2];
664 struct napi_struct rx_napi;
665 struct mtk_tx_dma *scratch_ring;
666 void *scratch_head;
667 struct mtk_phy *phy;
668 struct mii_bus *mii_bus;
669 int link[8];
670 void *sw_priv;
671 unsigned long vlan_map;
672};
673
674/* struct mtk_mac - the structure that holds the info about the MACs of the
675 * SoC
676 * @id: The number of the MAC
677 * @of_node: Our devicetree node
678 * @hw: Backpointer to our main datastruture
679 * @hw_stats: Packet statistics counter
680 * @phy_dev: The attached PHY if available
681 * @phy_flags: The PHYs flags
682 * @pending_work: The workqueue used to reset the dma ring
683 */
684struct mtk_mac {
685 int id;
686 struct device_node *of_node;
687 struct mtk_eth *hw;
688 struct mtk_hw_stats *hw_stats;
689 struct phy_device *phy_dev;
690 u32 phy_flags;
691 struct work_struct pending_work;
692};
693
694/* the struct describing the SoC. these are declared in the soc_xyz.c files */
695extern const struct of_device_id of_mtk_match[];
696
697/* read the hardware status register */
698void mtk_stats_update_mac(struct mtk_mac *mac);
699
700/* default checksum setup handler */
701void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
702
703/* register i/o wrappers */
704void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
705u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
706
707/* default clock calibration handler */
708int mtk_set_clock_cycle(struct mtk_eth *eth);
709
710/* default checksum setup handler */
711void mtk_csum_config(struct mtk_eth *eth);
712
713/* default forward config handler */
714void mtk_fwd_config(struct mtk_eth *eth);
715
716#endif /* MTK_ETH_H */
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
deleted file mode 100644
index 5d63b5d96f6b..000000000000
--- a/drivers/staging/mt7621-eth/soc_mt7621.c
+++ /dev/null
@@ -1,161 +0,0 @@
1/* This program is free software; you can redistribute it and/or modify
2 * it under the terms of the GNU General Public License as published by
3 * the Free Software Foundation; version 2 of the License
4 *
5 * This program is distributed in the hope that it will be useful,
6 * but WITHOUT ANY WARRANTY; without even the implied warranty of
7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
8 * GNU General Public License for more details.
9 *
10 * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
11 * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
12 * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
13 */
14
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/if_vlan.h>
18#include <linux/of_net.h>
19
20#include <asm/mach-ralink/ralink_regs.h>
21
22#include "mtk_eth_soc.h"
23#include "gsw_mt7620.h"
24#include "mdio.h"
25
26#define MT7620_CDMA_CSG_CFG 0x400
27#define MT7621_CDMP_IG_CTRL (MT7620_CDMA_CSG_CFG + 0x00)
28#define MT7621_CDMP_EG_CTRL (MT7620_CDMA_CSG_CFG + 0x04)
29#define MT7621_RESET_FE BIT(6)
30#define MT7621_L4_VALID BIT(24)
31
32#define MT7621_TX_DMA_UDF BIT(19)
33
34#define CDMA_ICS_EN BIT(2)
35#define CDMA_UCS_EN BIT(1)
36#define CDMA_TCS_EN BIT(0)
37
38#define GDMA_ICS_EN BIT(22)
39#define GDMA_TCS_EN BIT(21)
40#define GDMA_UCS_EN BIT(20)
41
42/* frame engine counters */
43#define MT7621_REG_MIB_OFFSET 0x2000
44#define MT7621_PPE_AC_BCNT0 (MT7621_REG_MIB_OFFSET + 0x00)
45#define MT7621_GDM1_TX_GBCNT (MT7621_REG_MIB_OFFSET + 0x400)
46#define MT7621_GDM2_TX_GBCNT (MT7621_GDM1_TX_GBCNT + 0x40)
47
48#define GSW_REG_GDMA1_MAC_ADRL 0x508
49#define GSW_REG_GDMA1_MAC_ADRH 0x50C
50#define GSW_REG_GDMA2_MAC_ADRL 0x1508
51#define GSW_REG_GDMA2_MAC_ADRH 0x150C
52
53#define MT7621_MTK_RST_GL 0x04
54#define MT7620_MTK_INT_STATUS2 0x08
55
56/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29)
57 * but after test it should be BIT(13).
58 */
59#define MT7621_MTK_GDM1_AF BIT(28)
60#define MT7621_MTK_GDM2_AF BIT(29)
61
62static const u16 mt7621_reg_table[MTK_REG_COUNT] = {
63 [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
64 [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
65 [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
66 [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0,
67 [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0,
68 [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0,
69 [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0,
70 [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0,
71 [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0,
72 [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0,
73 [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0,
74 [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE,
75 [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS,
76 [MTK_REG_MTK_DMA_VID_BASE] = 0,
77 [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT,
78 [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL,
79 [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2,
80};
81
82static void mt7621_mtk_reset(struct mtk_eth *eth)
83{
84 mtk_reset(eth, MT7621_RESET_FE);
85}
86
87static int mt7621_fwd_config(struct mtk_eth *eth)
88{
89 /* Setup GMAC1 only, there is no support for GMAC2 yet */
90 mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff,
91 MT7620_GDMA1_FWD_CFG);
92
93 /* Enable RX checksum */
94 mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
95 GDMA_TCS_EN | GDMA_UCS_EN),
96 MT7620_GDMA1_FWD_CFG);
97
98 /* Enable RX VLan Offloading */
99 mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL);
100
101 return 0;
102}
103
104static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
105{
106 unsigned long flags;
107
108 spin_lock_irqsave(&mac->hw->page_lock, flags);
109 if (mac->id == 0) {
110 mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
111 GSW_REG_GDMA1_MAC_ADRH);
112 mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
113 (hwaddr[4] << 8) | hwaddr[5],
114 GSW_REG_GDMA1_MAC_ADRL);
115 }
116 if (mac->id == 1) {
117 mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
118 GSW_REG_GDMA2_MAC_ADRH);
119 mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
120 (hwaddr[4] << 8) | hwaddr[5],
121 GSW_REG_GDMA2_MAC_ADRL);
122 }
123 spin_unlock_irqrestore(&mac->hw->page_lock, flags);
124}
125
126static struct mtk_soc_data mt7621_data = {
127 .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
128 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
129 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
130 NETIF_F_IPV6_CSUM,
131 .dma_type = MTK_PDMA,
132 .dma_ring_size = 256,
133 .napi_weight = 64,
134 .new_stats = 1,
135 .padding_64b = 1,
136 .rx_2b_offset = 1,
137 .rx_sg_dma = 1,
138 .has_switch = 1,
139 .mac_count = 2,
140 .reset_fe = mt7621_mtk_reset,
141 .set_mac = mt7621_set_mac,
142 .fwd_config = mt7621_fwd_config,
143 .switch_init = mtk_gsw_init,
144 .reg_table = mt7621_reg_table,
145 .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS,
146 .rx_int = RT5350_RX_DONE_INT,
147 .tx_int = RT5350_TX_DONE_INT,
148 .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF,
149 .checksum_bit = MT7621_L4_VALID,
150 .has_carrier = mt7620_has_carrier,
151 .mdio_read = mt7620_mdio_read,
152 .mdio_write = mt7620_mdio_write,
153 .mdio_adjust_link = mt7620_mdio_link_adjust,
154};
155
156const struct of_device_id of_mtk_match[] = {
157 { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
158 {},
159};
160
161MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/staging/mt7621-pci/Kconfig b/drivers/staging/mt7621-pci/Kconfig
index d33533872a16..c8fa17cfa807 100644
--- a/drivers/staging/mt7621-pci/Kconfig
+++ b/drivers/staging/mt7621-pci/Kconfig
@@ -1,6 +1,7 @@
1config PCI_MT7621 1config PCI_MT7621
2 tristate "MediaTek MT7621 PCI Controller" 2 tristate "MediaTek MT7621 PCI Controller"
3 depends on RALINK 3 depends on RALINK
4 depends on PCI
4 select PCI_DRIVERS_GENERIC 5 select PCI_DRIVERS_GENERIC
5 help 6 help
6 This selects a driver for the MediaTek MT7621 PCI Controller. 7 This selects a driver for the MediaTek MT7621 PCI Controller.
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index d6248eecf123..2aee64fdaec5 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
163 goto no_phy; 163 goto no_phy;
164 164
165 phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0, 165 phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
166 PHY_INTERFACE_MODE_GMII); 166 priv->phy_mode);
167 of_node_put(phy_node); 167 of_node_put(phy_node);
168 168
169 if (!phydev) 169 if (!phydev)
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index ce61c5670ef6..986db76705cc 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
653 return np; 653 return np;
654} 654}
655 655
656static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port) 656static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
657 int port)
657{ 658{
659 struct device_node *np = priv->of_node;
658 u32 delay_value; 660 u32 delay_value;
661 bool rx_delay;
662 bool tx_delay;
659 663
660 if (!of_property_read_u32(np, "rx-delay", &delay_value)) 664 /* By default, both RX/TX delay is enabled in
665 * __cvmx_helper_rgmii_enable().
666 */
667 rx_delay = true;
668 tx_delay = true;
669
670 if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
661 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value); 671 cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
662 if (!of_property_read_u32(np, "tx-delay", &delay_value)) 672 rx_delay = delay_value > 0;
673 }
674 if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
663 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value); 675 cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
676 tx_delay = delay_value > 0;
677 }
678
679 if (!rx_delay && !tx_delay)
680 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
681 else if (!rx_delay)
682 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
683 else if (!tx_delay)
684 priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
685 else
686 priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
664} 687}
665 688
666static int cvm_oct_probe(struct platform_device *pdev) 689static int cvm_oct_probe(struct platform_device *pdev)
@@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
825 priv->port = port; 848 priv->port = port;
826 priv->queue = cvmx_pko_get_base_queue(priv->port); 849 priv->queue = cvmx_pko_get_base_queue(priv->port);
827 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4; 850 priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
851 priv->phy_mode = PHY_INTERFACE_MODE_NA;
828 for (qos = 0; qos < 16; qos++) 852 for (qos = 0; qos < 16; qos++)
829 skb_queue_head_init(&priv->tx_free_list[qos]); 853 skb_queue_head_init(&priv->tx_free_list[qos]);
830 for (qos = 0; qos < cvmx_pko_get_num_queues(port); 854 for (qos = 0; qos < cvmx_pko_get_num_queues(port);
@@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
856 break; 880 break;
857 881
858 case CVMX_HELPER_INTERFACE_MODE_SGMII: 882 case CVMX_HELPER_INTERFACE_MODE_SGMII:
883 priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
859 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops; 884 dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
860 strcpy(dev->name, "eth%d"); 885 strcpy(dev->name, "eth%d");
861 break; 886 break;
@@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
865 strcpy(dev->name, "spi%d"); 890 strcpy(dev->name, "spi%d");
866 break; 891 break;
867 892
868 case CVMX_HELPER_INTERFACE_MODE_RGMII:
869 case CVMX_HELPER_INTERFACE_MODE_GMII: 893 case CVMX_HELPER_INTERFACE_MODE_GMII:
894 priv->phy_mode = PHY_INTERFACE_MODE_GMII;
895 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
896 strcpy(dev->name, "eth%d");
897 break;
898
899 case CVMX_HELPER_INTERFACE_MODE_RGMII:
870 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops; 900 dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
871 strcpy(dev->name, "eth%d"); 901 strcpy(dev->name, "eth%d");
872 cvm_set_rgmii_delay(priv->of_node, interface, 902 cvm_set_rgmii_delay(priv, interface,
873 port_index); 903 port_index);
874 break; 904 break;
875 } 905 }
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index 4a07e7f43d12..be570d33685a 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -12,7 +12,7 @@
12#define OCTEON_ETHERNET_H 12#define OCTEON_ETHERNET_H
13 13
14#include <linux/of.h> 14#include <linux/of.h>
15 15#include <linux/phy.h>
16#include <asm/octeon/cvmx-helper-board.h> 16#include <asm/octeon/cvmx-helper-board.h>
17 17
18/** 18/**
@@ -33,6 +33,8 @@ struct octeon_ethernet {
33 * cvmx_helper_interface_mode_t 33 * cvmx_helper_interface_mode_t
34 */ 34 */
35 int imode; 35 int imode;
36 /* PHY mode */
37 phy_interface_t phy_mode;
36 /* List of outstanding tx buffers per queue */ 38 /* List of outstanding tx buffers per queue */
37 struct sk_buff_head tx_free_list[16]; 39 struct sk_buff_head tx_free_list[16];
38 unsigned int last_speed; 40 unsigned int last_speed;
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 80b8d4153414..a54286498a47 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
45{ 45{
46 unsigned char lob; 46 unsigned char lob;
47 int ret, i; 47 int ret, i;
48 struct dcon_gpio *pin = &gpios_asis[0]; 48 const struct dcon_gpio *pin = &gpios_asis[0];
49 49
50 for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) { 50 for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
51 gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name, 51 gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index 1723a47a96b4..952f2ab51347 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
174 174
175 pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; 175 pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
176 176
177 rtw_alloc_hwxmits(padapter); 177 res = rtw_alloc_hwxmits(padapter);
178 if (res == _FAIL)
179 goto exit;
178 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); 180 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
179 181
180 for (i = 0; i < 4; i++) 182 for (i = 0; i < 4; i++)
@@ -1503,7 +1505,7 @@ exit:
1503 return res; 1505 return res;
1504} 1506}
1505 1507
1506void rtw_alloc_hwxmits(struct adapter *padapter) 1508s32 rtw_alloc_hwxmits(struct adapter *padapter)
1507{ 1509{
1508 struct hw_xmit *hwxmits; 1510 struct hw_xmit *hwxmits;
1509 struct xmit_priv *pxmitpriv = &padapter->xmitpriv; 1511 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
1512 1514
1513 pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry, 1515 pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
1514 sizeof(struct hw_xmit), GFP_KERNEL); 1516 sizeof(struct hw_xmit), GFP_KERNEL);
1517 if (!pxmitpriv->hwxmits)
1518 return _FAIL;
1515 1519
1516 hwxmits = pxmitpriv->hwxmits; 1520 hwxmits = pxmitpriv->hwxmits;
1517 1521
@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
1519 hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; 1523 hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
1520 hwxmits[2] .sta_queue = &pxmitpriv->be_pending; 1524 hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
1521 hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; 1525 hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
1526 return _SUCCESS;
1522} 1527}
1523 1528
1524void rtw_free_hwxmits(struct adapter *padapter) 1529void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index 788f59c74ea1..ba7e15fbde72 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
336void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry); 336void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
337s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter); 337s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
338void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv); 338void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
339void rtw_alloc_hwxmits(struct adapter *padapter); 339s32 rtw_alloc_hwxmits(struct adapter *padapter);
340void rtw_free_hwxmits(struct adapter *padapter); 340void rtw_free_hwxmits(struct adapter *padapter);
341s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt); 341s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
342 342
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 1920d02f7c9f..8c36acedf507 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
147 147
148static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf) 148static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
149{ 149{
150 u32 val;
151 void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj *pcmd);
152 struct cmd_obj *pcmd = (struct cmd_obj *)pbuf; 150 struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
153 151
154 if (pcmd->rsp && pcmd->rspsz > 0) 152 r8712_free_cmd_obj(pcmd);
155 memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
156 pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
157 if (!pcmd_callback)
158 r8712_free_cmd_obj(pcmd);
159 else
160 pcmd_callback(padapter, pcmd);
161 return H2C_SUCCESS; 153 return H2C_SUCCESS;
162} 154}
163 155
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.h b/drivers/staging/rtl8712/rtl8712_cmd.h
index 92fb77666d44..1ef86b8c592f 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.h
+++ b/drivers/staging/rtl8712/rtl8712_cmd.h
@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
140static struct _cmd_callback cmd_callback[] = { 140static struct _cmd_callback cmd_callback[] = {
141 {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/ 141 {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
142 {GEN_CMD_CODE(_Write_MACREG), NULL}, 142 {GEN_CMD_CODE(_Write_MACREG), NULL},
143 {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback}, 143 {GEN_CMD_CODE(_Read_BBREG), NULL},
144 {GEN_CMD_CODE(_Write_BBREG), NULL}, 144 {GEN_CMD_CODE(_Write_BBREG), NULL},
145 {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback}, 145 {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
146 {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/ 146 {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 094d61bcb469..b87f13a0b563 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
260 } 260 }
261 } 261 }
262 262
263 rtw_alloc_hwxmits(padapter); 263 res = rtw_alloc_hwxmits(padapter);
264 if (res == _FAIL)
265 goto exit;
264 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); 266 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
265 267
266 for (i = 0; i < 4; i++) { 268 for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ exit:
2144 return res; 2146 return res;
2145} 2147}
2146 2148
2147void rtw_alloc_hwxmits(struct adapter *padapter) 2149s32 rtw_alloc_hwxmits(struct adapter *padapter)
2148{ 2150{
2149 struct hw_xmit *hwxmits; 2151 struct hw_xmit *hwxmits;
2150 struct xmit_priv *pxmitpriv = &padapter->xmitpriv; 2152 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
2155 2157
2156 pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry); 2158 pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
2157 2159
2158 if (pxmitpriv->hwxmits == NULL) { 2160 if (!pxmitpriv->hwxmits)
2159 DBG_871X("alloc hwxmits fail!...\n"); 2161 return _FAIL;
2160 return;
2161 }
2162 2162
2163 hwxmits = pxmitpriv->hwxmits; 2163 hwxmits = pxmitpriv->hwxmits;
2164 2164
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
2204 2204
2205 } 2205 }
2206 2206
2207 2207 return _SUCCESS;
2208} 2208}
2209 2209
2210void rtw_free_hwxmits(struct adapter *padapter) 2210void rtw_free_hwxmits(struct adapter *padapter)
diff --git a/drivers/staging/rtl8723bs/include/rtw_xmit.h b/drivers/staging/rtl8723bs/include/rtw_xmit.h
index 1b38b9182b31..37f42b2f22f1 100644
--- a/drivers/staging/rtl8723bs/include/rtw_xmit.h
+++ b/drivers/staging/rtl8723bs/include/rtw_xmit.h
@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
487void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv); 487void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
488 488
489 489
490void rtw_alloc_hwxmits(struct adapter *padapter); 490s32 rtw_alloc_hwxmits(struct adapter *padapter);
491void rtw_free_hwxmits(struct adapter *padapter); 491void rtw_free_hwxmits(struct adapter *padapter);
492 492
493 493
diff --git a/drivers/staging/rtlwifi/phydm/rtl_phydm.c b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
index 9930ed954abb..4cc77b2016e1 100644
--- a/drivers/staging/rtlwifi/phydm/rtl_phydm.c
+++ b/drivers/staging/rtlwifi/phydm/rtl_phydm.c
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
180 180
181 rtlpriv->phydm.internal = 181 rtlpriv->phydm.internal =
182 kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL); 182 kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
183 if (!rtlpriv->phydm.internal)
184 return 0;
183 185
184 _rtl_phydm_init_com_info(rtlpriv, ic, params); 186 _rtl_phydm_init_com_info(rtlpriv, ic, params);
185 187
diff --git a/drivers/staging/rtlwifi/rtl8822be/fw.c b/drivers/staging/rtlwifi/rtl8822be/fw.c
index f061dd1382aa..cf6b7a80b753 100644
--- a/drivers/staging/rtlwifi/rtl8822be/fw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/fw.c
@@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
743 u1_rsvd_page_loc, 3); 743 u1_rsvd_page_loc, 3);
744 744
745 skb = dev_alloc_skb(totalpacketlen); 745 skb = dev_alloc_skb(totalpacketlen);
746 if (!skb)
747 return;
746 memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet, 748 memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
747 totalpacketlen); 749 totalpacketlen);
748 750
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index edff6ce85655..9d85a3a1af4c 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
210 return -EINVAL; 210 return -EINVAL;
211 211
212 spin_lock_irqsave(&speakup_info.spinlock, flags); 212 spin_lock_irqsave(&speakup_info.spinlock, flags);
213 synth_soft.alive = 1;
213 while (1) { 214 while (1) {
214 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); 215 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
215 if (!unicode) 216 if (synth_current() == &synth_soft) {
216 synth_buffer_skip_nonlatin1(); 217 if (!unicode)
217 if (!synth_buffer_empty() || speakup_info.flushing) 218 synth_buffer_skip_nonlatin1();
218 break; 219 if (!synth_buffer_empty() || speakup_info.flushing)
220 break;
221 }
219 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 222 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
220 if (fp->f_flags & O_NONBLOCK) { 223 if (fp->f_flags & O_NONBLOCK) {
221 finish_wait(&speakup_event, &wait); 224 finish_wait(&speakup_event, &wait);
@@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
235 238
236 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ 239 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
237 while (chars_sent <= count - bytes_per_ch) { 240 while (chars_sent <= count - bytes_per_ch) {
241 if (synth_current() != &synth_soft)
242 break;
238 if (speakup_info.flushing) { 243 if (speakup_info.flushing) {
239 speakup_info.flushing = 0; 244 speakup_info.flushing = 0;
240 ch = '\x18'; 245 ch = '\x18';
@@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
331 poll_wait(fp, &speakup_event, wait); 336 poll_wait(fp, &speakup_event, wait);
332 337
333 spin_lock_irqsave(&speakup_info.spinlock, flags); 338 spin_lock_irqsave(&speakup_info.spinlock, flags);
334 if (!synth_buffer_empty() || speakup_info.flushing) 339 if (synth_current() == &synth_soft &&
340 (!synth_buffer_empty() || speakup_info.flushing))
335 ret = EPOLLIN | EPOLLRDNORM; 341 ret = EPOLLIN | EPOLLRDNORM;
336 spin_unlock_irqrestore(&speakup_info.spinlock, flags); 342 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
337 return ret; 343 return ret;
diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
index c8e688878fc7..ac6a74883af4 100644
--- a/drivers/staging/speakup/spk_priv.h
+++ b/drivers/staging/speakup/spk_priv.h
@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
74int synth_release_region(unsigned long start, unsigned long n); 74int synth_release_region(unsigned long start, unsigned long n);
75int synth_add(struct spk_synth *in_synth); 75int synth_add(struct spk_synth *in_synth);
76void synth_remove(struct spk_synth *in_synth); 76void synth_remove(struct spk_synth *in_synth);
77struct spk_synth *synth_current(void);
77 78
78extern struct speakup_info_t speakup_info; 79extern struct speakup_info_t speakup_info;
79 80
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index 25f259ee4ffc..3568bfb89912 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
481} 481}
482EXPORT_SYMBOL_GPL(synth_remove); 482EXPORT_SYMBOL_GPL(synth_remove);
483 483
484struct spk_synth *synth_current(void)
485{
486 return synth;
487}
488EXPORT_SYMBOL_GPL(synth_current);
489
484short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM }; 490short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 804daf83be35..064d0db4c51e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
3513 struct device_node *fw_node; 3513 struct device_node *fw_node;
3514 const struct of_device_id *of_id; 3514 const struct of_device_id *of_id;
3515 struct vchiq_drvdata *drvdata; 3515 struct vchiq_drvdata *drvdata;
3516 struct device *vchiq_dev;
3516 int err; 3517 int err;
3517 3518
3518 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node); 3519 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
3547 goto failed_platform_init; 3548 goto failed_platform_init;
3548 } 3549 }
3549 3550
3550 if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid, 3551 vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
3551 NULL, "vchiq"))) 3552 "vchiq");
3553 if (IS_ERR(vchiq_dev)) {
3554 err = PTR_ERR(vchiq_dev);
3552 goto failed_device_create; 3555 goto failed_device_create;
3556 }
3553 3557
3554 vchiq_debugfs_init(); 3558 vchiq_debugfs_init();
3555 3559
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b370985b58a1..c6bb4aaf9bd0 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
1033 return; 1033 return;
1034 } 1034 }
1035 1035
1036 MACvIntDisable(priv->PortOffset);
1037
1038 spin_lock_irqsave(&priv->lock, flags); 1036 spin_lock_irqsave(&priv->lock, flags);
1039 1037
1040 /* Read low level stats */ 1038 /* Read low level stats */
@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
1122 } 1120 }
1123 1121
1124 spin_unlock_irqrestore(&priv->lock, flags); 1122 spin_unlock_irqrestore(&priv->lock, flags);
1125
1126 MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
1127} 1123}
1128 1124
1129static void vnt_interrupt_work(struct work_struct *work) 1125static void vnt_interrupt_work(struct work_struct *work)
@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
1133 1129
1134 if (priv->vif) 1130 if (priv->vif)
1135 vnt_interrupt_process(priv); 1131 vnt_interrupt_process(priv);
1132
1133 MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
1136} 1134}
1137 1135
1138static irqreturn_t vnt_interrupt(int irq, void *arg) 1136static irqreturn_t vnt_interrupt(int irq, void *arg)
1139{ 1137{
1140 struct vnt_private *priv = arg; 1138 struct vnt_private *priv = arg;
1141 1139
1142 if (priv->vif) 1140 schedule_work(&priv->interrupt_work);
1143 schedule_work(&priv->interrupt_work); 1141
1142 MACvIntDisable(priv->PortOffset);
1144 1143
1145 return IRQ_HANDLED; 1144 return IRQ_HANDLED;
1146} 1145}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 5831e0eecea1..9704b135a7bc 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1663,7 +1663,7 @@ static void tcmu_dev_kref_release(struct kref *kref)
1663 WARN_ON(!all_expired); 1663 WARN_ON(!all_expired);
1664 1664
1665 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1); 1665 tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
1666 kfree(udev->data_bitmap); 1666 bitmap_free(udev->data_bitmap);
1667 mutex_unlock(&udev->cmdr_lock); 1667 mutex_unlock(&udev->cmdr_lock);
1668 1668
1669 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); 1669 call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
@@ -1794,11 +1794,12 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
1794 1794
1795 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, 1795 ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
1796 TCMU_MCGRP_CONFIG, GFP_KERNEL); 1796 TCMU_MCGRP_CONFIG, GFP_KERNEL);
1797 /* We don't care if no one is listening */ 1797
1798 if (ret == -ESRCH) 1798 /* Wait during an add as the listener may not be up yet */
1799 ret = 0; 1799 if (ret == 0 ||
1800 if (!ret) 1800 (ret == -ESRCH && cmd == TCMU_CMD_ADDED_DEVICE))
1801 ret = tcmu_wait_genl_cmd_reply(udev); 1801 return tcmu_wait_genl_cmd_reply(udev);
1802
1802 return ret; 1803 return ret;
1803} 1804}
1804 1805
@@ -1870,9 +1871,7 @@ static int tcmu_configure_device(struct se_device *dev)
1870 info = &udev->uio_info; 1871 info = &udev->uio_info;
1871 1872
1872 mutex_lock(&udev->cmdr_lock); 1873 mutex_lock(&udev->cmdr_lock);
1873 udev->data_bitmap = kcalloc(BITS_TO_LONGS(udev->max_blocks), 1874 udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL);
1874 sizeof(unsigned long),
1875 GFP_KERNEL);
1876 mutex_unlock(&udev->cmdr_lock); 1875 mutex_unlock(&udev->cmdr_lock);
1877 if (!udev->data_bitmap) { 1876 if (!udev->data_bitmap) {
1878 ret = -ENOMEM; 1877 ret = -ENOMEM;
@@ -1959,7 +1958,7 @@ err_register:
1959 vfree(udev->mb_addr); 1958 vfree(udev->mb_addr);
1960 udev->mb_addr = NULL; 1959 udev->mb_addr = NULL;
1961err_vzalloc: 1960err_vzalloc:
1962 kfree(udev->data_bitmap); 1961 bitmap_free(udev->data_bitmap);
1963 udev->data_bitmap = NULL; 1962 udev->data_bitmap = NULL;
1964err_bitmap_alloc: 1963err_bitmap_alloc:
1965 kfree(info->name); 1964 kfree(info->name);
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 720760cd493f..ba39647a690c 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -119,8 +119,7 @@ static const struct debugfs_reg32 bcm2835_thermal_regs[] = {
119 119
120static void bcm2835_thermal_debugfs(struct platform_device *pdev) 120static void bcm2835_thermal_debugfs(struct platform_device *pdev)
121{ 121{
122 struct thermal_zone_device *tz = platform_get_drvdata(pdev); 122 struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
123 struct bcm2835_thermal_data *data = tz->devdata;
124 struct debugfs_regset32 *regset; 123 struct debugfs_regset32 *regset;
125 124
126 data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL); 125 data->debugfsdir = debugfs_create_dir("bcm2835_thermal", NULL);
@@ -266,7 +265,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
266 265
267 data->tz = tz; 266 data->tz = tz;
268 267
269 platform_set_drvdata(pdev, tz); 268 platform_set_drvdata(pdev, data);
270 269
271 /* 270 /*
272 * Thermal_zone doesn't enable hwmon as default, 271 * Thermal_zone doesn't enable hwmon as default,
@@ -290,8 +289,8 @@ err_clk:
290 289
291static int bcm2835_thermal_remove(struct platform_device *pdev) 290static int bcm2835_thermal_remove(struct platform_device *pdev)
292{ 291{
293 struct thermal_zone_device *tz = platform_get_drvdata(pdev); 292 struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
294 struct bcm2835_thermal_data *data = tz->devdata; 293 struct thermal_zone_device *tz = data->tz;
295 294
296 debugfs_remove_recursive(data->debugfsdir); 295 debugfs_remove_recursive(data->debugfsdir);
297 thermal_zone_of_sensor_unregister(&pdev->dev, tz); 296 thermal_zone_of_sensor_unregister(&pdev->dev, tz);
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 6fff16113628..f7c1f49ec87f 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -536,12 +536,11 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev,
536 struct thermal_zone_device *tz, u32 power, 536 struct thermal_zone_device *tz, u32 power,
537 unsigned long *state) 537 unsigned long *state)
538{ 538{
539 unsigned int cur_freq, target_freq; 539 unsigned int target_freq;
540 u32 last_load, normalised_power; 540 u32 last_load, normalised_power;
541 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; 541 struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
542 struct cpufreq_policy *policy = cpufreq_cdev->policy; 542 struct cpufreq_policy *policy = cpufreq_cdev->policy;
543 543
544 cur_freq = cpufreq_quick_get(policy->cpu);
545 power = power > 0 ? power : 0; 544 power = power > 0 ? power : 0;
546 last_load = cpufreq_cdev->last_load ?: 1; 545 last_load = cpufreq_cdev->last_load ?: 1;
547 normalised_power = (power * 100) / last_load; 546 normalised_power = (power * 100) / last_load;
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 61ca7ce3624e..5f3ed24e26ec 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -22,6 +22,13 @@ enum int3400_thermal_uuid {
22 INT3400_THERMAL_PASSIVE_1, 22 INT3400_THERMAL_PASSIVE_1,
23 INT3400_THERMAL_ACTIVE, 23 INT3400_THERMAL_ACTIVE,
24 INT3400_THERMAL_CRITICAL, 24 INT3400_THERMAL_CRITICAL,
25 INT3400_THERMAL_ADAPTIVE_PERFORMANCE,
26 INT3400_THERMAL_EMERGENCY_CALL_MODE,
27 INT3400_THERMAL_PASSIVE_2,
28 INT3400_THERMAL_POWER_BOSS,
29 INT3400_THERMAL_VIRTUAL_SENSOR,
30 INT3400_THERMAL_COOLING_MODE,
31 INT3400_THERMAL_HARDWARE_DUTY_CYCLING,
25 INT3400_THERMAL_MAXIMUM_UUID, 32 INT3400_THERMAL_MAXIMUM_UUID,
26}; 33};
27 34
@@ -29,6 +36,13 @@ static char *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
29 "42A441D6-AE6A-462b-A84B-4A8CE79027D3", 36 "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
30 "3A95C389-E4B8-4629-A526-C52C88626BAE", 37 "3A95C389-E4B8-4629-A526-C52C88626BAE",
31 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", 38 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
39 "63BE270F-1C11-48FD-A6F7-3AF253FF3E2D",
40 "5349962F-71E6-431D-9AE8-0A635B710AEE",
41 "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
42 "F5A35014-C209-46A4-993A-EB56DE7530A1",
43 "6ED722A7-9240-48A5-B479-31EEF723D7CF",
44 "16CAF1B7-DD38-40ED-B1C1-1B8A1913D531",
45 "BE84BABF-C4D4-403D-B495-3128FD44dAC1",
32}; 46};
33 47
34struct int3400_thermal_priv { 48struct int3400_thermal_priv {
@@ -299,10 +313,9 @@ static int int3400_thermal_probe(struct platform_device *pdev)
299 313
300 platform_set_drvdata(pdev, priv); 314 platform_set_drvdata(pdev, priv);
301 315
302 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) { 316 int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
303 int3400_thermal_ops.get_mode = int3400_thermal_get_mode; 317 int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
304 int3400_thermal_ops.set_mode = int3400_thermal_set_mode; 318
305 }
306 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0, 319 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
307 priv, &int3400_thermal_ops, 320 priv, &int3400_thermal_ops,
308 &int3400_thermal_params, 0, 0); 321 &int3400_thermal_params, 0, 0);
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index 7571f7c2e7c9..ac7256b5f020 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -101,7 +101,7 @@ struct powerclamp_worker_data {
101 bool clamping; 101 bool clamping;
102}; 102};
103 103
104static struct powerclamp_worker_data * __percpu worker_data; 104static struct powerclamp_worker_data __percpu *worker_data;
105static struct thermal_cooling_device *cooling_dev; 105static struct thermal_cooling_device *cooling_dev;
106static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu 106static unsigned long *cpu_clamping_mask; /* bit map for tracking per cpu
107 * clamping kthread worker 107 * clamping kthread worker
@@ -494,7 +494,7 @@ static void start_power_clamp_worker(unsigned long cpu)
494 struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu); 494 struct powerclamp_worker_data *w_data = per_cpu_ptr(worker_data, cpu);
495 struct kthread_worker *worker; 495 struct kthread_worker *worker;
496 496
497 worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inject/%ld", cpu); 497 worker = kthread_create_worker_on_cpu(cpu, 0, "kidle_inj/%ld", cpu);
498 if (IS_ERR(worker)) 498 if (IS_ERR(worker))
499 return; 499 return;
500 500
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 5c07a61447d3..e4ea7f6aef20 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -199,6 +199,9 @@ enum {
199#define MT7622_TS1 0 199#define MT7622_TS1 0
200#define MT7622_NUM_CONTROLLER 1 200#define MT7622_NUM_CONTROLLER 1
201 201
202/* The maximum number of banks */
203#define MAX_NUM_ZONES 8
204
202/* The calibration coefficient of sensor */ 205/* The calibration coefficient of sensor */
203#define MT7622_CALIBRATION 165 206#define MT7622_CALIBRATION 165
204 207
@@ -249,7 +252,7 @@ struct mtk_thermal_data {
249 const int num_controller; 252 const int num_controller;
250 const int *controller_offset; 253 const int *controller_offset;
251 bool need_switch_bank; 254 bool need_switch_bank;
252 struct thermal_bank_cfg bank_data[]; 255 struct thermal_bank_cfg bank_data[MAX_NUM_ZONES];
253}; 256};
254 257
255struct mtk_thermal { 258struct mtk_thermal {
@@ -268,7 +271,7 @@ struct mtk_thermal {
268 s32 vts[MAX_NUM_VTS]; 271 s32 vts[MAX_NUM_VTS];
269 272
270 const struct mtk_thermal_data *conf; 273 const struct mtk_thermal_data *conf;
271 struct mtk_thermal_bank banks[]; 274 struct mtk_thermal_bank banks[MAX_NUM_ZONES];
272}; 275};
273 276
274/* MT8183 thermal sensor data */ 277/* MT8183 thermal sensor data */
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 48eef552cba4..fc9399d9c082 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -666,7 +666,7 @@ static int exynos_get_temp(void *p, int *temp)
666 struct exynos_tmu_data *data = p; 666 struct exynos_tmu_data *data = p;
667 int value, ret = 0; 667 int value, ret = 0;
668 668
669 if (!data || !data->tmu_read || !data->enabled) 669 if (!data || !data->tmu_read)
670 return -EINVAL; 670 return -EINVAL;
671 else if (!data->enabled) 671 else if (!data->enabled)
672 /* 672 /*
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index db5df3d54818..3bdd56a1021b 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
49 struct clk *clk; 49 struct clk *clk;
50}; 50};
51 51
52static inline bool ar933x_uart_console_enabled(void)
53{
54 return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
55}
56
57static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up, 52static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
58 int offset) 53 int offset)
59{ 54{
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
508 .verify_port = ar933x_uart_verify_port, 503 .verify_port = ar933x_uart_verify_port,
509}; 504};
510 505
506#ifdef CONFIG_SERIAL_AR933X_CONSOLE
511static struct ar933x_uart_port * 507static struct ar933x_uart_port *
512ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS]; 508ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
513 509
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
604 .index = -1, 600 .index = -1,
605 .data = &ar933x_uart_driver, 601 .data = &ar933x_uart_driver,
606}; 602};
607 603#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
608static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
609{
610 if (!ar933x_uart_console_enabled())
611 return;
612
613 ar933x_console_ports[up->port.line] = up;
614}
615 604
616static struct uart_driver ar933x_uart_driver = { 605static struct uart_driver ar933x_uart_driver = {
617 .owner = THIS_MODULE, 606 .owner = THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
700 baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP); 689 baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
701 up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD); 690 up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
702 691
703 ar933x_uart_add_console_port(up); 692#ifdef CONFIG_SERIAL_AR933X_CONSOLE
693 ar933x_console_ports[up->port.line] = up;
694#endif
704 695
705 ret = uart_add_one_port(&ar933x_uart_driver, &up->port); 696 ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
706 if (ret) 697 if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
749{ 740{
750 int ret; 741 int ret;
751 742
752 if (ar933x_uart_console_enabled()) 743#ifdef CONFIG_SERIAL_AR933X_CONSOLE
753 ar933x_uart_driver.cons = &ar933x_uart_console; 744 ar933x_uart_driver.cons = &ar933x_uart_console;
745#endif
754 746
755 ret = uart_register_driver(&ar933x_uart_driver); 747 ret = uart_register_driver(&ar933x_uart_driver);
756 if (ret) 748 if (ret)
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 05147fe24343..0b4f36905321 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -166,6 +166,8 @@ struct atmel_uart_port {
166 unsigned int pending_status; 166 unsigned int pending_status;
167 spinlock_t lock_suspended; 167 spinlock_t lock_suspended;
168 168
169 bool hd_start_rx; /* can start RX during half-duplex operation */
170
169 /* ISO7816 */ 171 /* ISO7816 */
170 unsigned int fidi_min; 172 unsigned int fidi_min;
171 unsigned int fidi_max; 173 unsigned int fidi_max;
@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
231 __raw_writeb(value, port->membase + ATMEL_US_THR); 233 __raw_writeb(value, port->membase + ATMEL_US_THR);
232} 234}
233 235
236static inline int atmel_uart_is_half_duplex(struct uart_port *port)
237{
238 return ((port->rs485.flags & SER_RS485_ENABLED) &&
239 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
240 (port->iso7816.flags & SER_ISO7816_ENABLED);
241}
242
234#ifdef CONFIG_SERIAL_ATMEL_PDC 243#ifdef CONFIG_SERIAL_ATMEL_PDC
235static bool atmel_use_pdc_rx(struct uart_port *port) 244static bool atmel_use_pdc_rx(struct uart_port *port)
236{ 245{
@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
608 /* Disable interrupts */ 617 /* Disable interrupts */
609 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask); 618 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
610 619
611 if (((port->rs485.flags & SER_RS485_ENABLED) && 620 if (atmel_uart_is_half_duplex(port))
612 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
613 port->iso7816.flags & SER_ISO7816_ENABLED)
614 atmel_start_rx(port); 621 atmel_start_rx(port);
622
615} 623}
616 624
617/* 625/*
@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
628 return; 636 return;
629 637
630 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) 638 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
631 if (((port->rs485.flags & SER_RS485_ENABLED) && 639 if (atmel_uart_is_half_duplex(port))
632 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
633 port->iso7816.flags & SER_ISO7816_ENABLED)
634 atmel_stop_rx(port); 640 atmel_stop_rx(port);
635 641
636 if (atmel_use_pdc_tx(port)) 642 if (atmel_use_pdc_tx(port))
@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
928 */ 934 */
929 if (!uart_circ_empty(xmit)) 935 if (!uart_circ_empty(xmit))
930 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 936 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
931 else if (((port->rs485.flags & SER_RS485_ENABLED) && 937 else if (atmel_uart_is_half_duplex(port)) {
932 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) || 938 /*
933 port->iso7816.flags & SER_ISO7816_ENABLED) { 939 * DMA done, re-enable TXEMPTY and signal that we can stop
934 /* DMA done, stop TX, start RX for RS485 */ 940 * TX and start RX for RS485
935 atmel_start_rx(port); 941 */
942 atmel_port->hd_start_rx = true;
943 atmel_uart_writel(port, ATMEL_US_IER,
944 atmel_port->tx_done_mask);
936 } 945 }
937 946
938 spin_unlock_irqrestore(&port->lock, flags); 947 spin_unlock_irqrestore(&port->lock, flags);
@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
1288 sg_dma_len(&atmel_port->sg_rx)/2, 1297 sg_dma_len(&atmel_port->sg_rx)/2,
1289 DMA_DEV_TO_MEM, 1298 DMA_DEV_TO_MEM,
1290 DMA_PREP_INTERRUPT); 1299 DMA_PREP_INTERRUPT);
1300 if (!desc) {
1301 dev_err(port->dev, "Preparing DMA cyclic failed\n");
1302 goto chan_err;
1303 }
1291 desc->callback = atmel_complete_rx_dma; 1304 desc->callback = atmel_complete_rx_dma;
1292 desc->callback_param = port; 1305 desc->callback_param = port;
1293 atmel_port->desc_rx = desc; 1306 atmel_port->desc_rx = desc;
@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
1376 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); 1389 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
1377 1390
1378 if (pending & atmel_port->tx_done_mask) { 1391 if (pending & atmel_port->tx_done_mask) {
1379 /* Either PDC or interrupt transmission */
1380 atmel_uart_writel(port, ATMEL_US_IDR, 1392 atmel_uart_writel(port, ATMEL_US_IDR,
1381 atmel_port->tx_done_mask); 1393 atmel_port->tx_done_mask);
1394
1395 /* Start RX if flag was set and FIFO is empty */
1396 if (atmel_port->hd_start_rx) {
1397 if (!(atmel_uart_readl(port, ATMEL_US_CSR)
1398 & ATMEL_US_TXEMPTY))
1399 dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
1400
1401 atmel_port->hd_start_rx = false;
1402 atmel_start_rx(port);
1403 return;
1404 }
1405
1382 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); 1406 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
1383 } 1407 }
1384} 1408}
@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
1508 atmel_uart_writel(port, ATMEL_US_IER, 1532 atmel_uart_writel(port, ATMEL_US_IER,
1509 atmel_port->tx_done_mask); 1533 atmel_port->tx_done_mask);
1510 } else { 1534 } else {
1511 if (((port->rs485.flags & SER_RS485_ENABLED) && 1535 if (atmel_uart_is_half_duplex(port)) {
1512 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
1513 port->iso7816.flags & SER_ISO7816_ENABLED) {
1514 /* DMA done, stop TX, start RX for RS485 */ 1536 /* DMA done, stop TX, start RX for RS485 */
1515 atmel_start_rx(port); 1537 atmel_start_rx(port);
1516 } 1538 }
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 6fb312e7af71..bfe5e9e034ec 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
148 char *cptr = config; 148 char *cptr = config;
149 struct console *cons; 149 struct console *cons;
150 150
151 if (!strlen(config) || isspace(config[0])) 151 if (!strlen(config) || isspace(config[0])) {
152 err = 0;
152 goto noconfig; 153 goto noconfig;
154 }
153 155
154 kgdboc_io_ops.is_console = 0; 156 kgdboc_io_ops.is_console = 0;
155 kgdb_tty_driver = NULL; 157 kgdb_tty_driver = NULL;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index f5bdde405627..450ba6d7996c 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi)
1415 if (spi->dev.of_node) { 1415 if (spi->dev.of_node) {
1416 const struct of_device_id *of_id = 1416 const struct of_device_id *of_id =
1417 of_match_device(max310x_dt_ids, &spi->dev); 1417 of_match_device(max310x_dt_ids, &spi->dev);
1418 if (!of_id)
1419 return -ENODEV;
1418 1420
1419 devtype = (struct max310x_devtype *)of_id->data; 1421 devtype = (struct max310x_devtype *)of_id->data;
1420 } else { 1422 } else {
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 231f751d1ef4..7e7b1559fa36 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
810 return -EINVAL; 810 return -EINVAL;
811 } 811 }
812 812
813 if (!match)
814 return -ENODEV;
815
813 /* Assume that all UART ports have a DT alias or none has */ 816 /* Assume that all UART ports have a DT alias or none has */
814 id = of_alias_get_id(pdev->dev.of_node, "serial"); 817 id = of_alias_get_id(pdev->dev.of_node, "serial");
815 if (!pdev->dev.of_node || id < 0) 818 if (!pdev->dev.of_node || id < 0)
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 27235a526cce..4c188f4079b3 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
1686 1686
1687 s->port.mapbase = r->start; 1687 s->port.mapbase = r->start;
1688 s->port.membase = ioremap(r->start, resource_size(r)); 1688 s->port.membase = ioremap(r->start, resource_size(r));
1689 if (!s->port.membase) {
1690 ret = -ENOMEM;
1691 goto out_disable_clks;
1692 }
1689 s->port.ops = &mxs_auart_ops; 1693 s->port.ops = &mxs_auart_ops;
1690 s->port.iotype = UPIO_MEM; 1694 s->port.iotype = UPIO_MEM;
1691 s->port.fifosize = MXS_AUART_FIFO_SIZE; 1695 s->port.fifosize = MXS_AUART_FIFO_SIZE;
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 3bcec1c20219..35e5f9c5d5be 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
1050{ 1050{
1051 struct uart_port *uport; 1051 struct uart_port *uport;
1052 struct qcom_geni_serial_port *port; 1052 struct qcom_geni_serial_port *port;
1053 int baud; 1053 int baud = 9600;
1054 int bits = 8; 1054 int bits = 8;
1055 int parity = 'n'; 1055 int parity = 'n';
1056 int flow = 'n'; 1056 int flow = 'n';
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 635178cf3eed..09a183dfc526 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void)
1507 ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver); 1507 ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
1508 if (ret < 0) { 1508 if (ret < 0) {
1509 pr_err("failed to init sc16is7xx i2c --> %d\n", ret); 1509 pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
1510 return ret; 1510 goto err_i2c;
1511 } 1511 }
1512#endif 1512#endif
1513 1513
@@ -1515,10 +1515,18 @@ static int __init sc16is7xx_init(void)
1515 ret = spi_register_driver(&sc16is7xx_spi_uart_driver); 1515 ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
1516 if (ret < 0) { 1516 if (ret < 0) {
1517 pr_err("failed to init sc16is7xx spi --> %d\n", ret); 1517 pr_err("failed to init sc16is7xx spi --> %d\n", ret);
1518 return ret; 1518 goto err_spi;
1519 } 1519 }
1520#endif 1520#endif
1521 return ret; 1521 return ret;
1522
1523err_spi:
1524#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
1525 i2c_del_driver(&sc16is7xx_i2c_uart_driver);
1526#endif
1527err_i2c:
1528 uart_unregister_driver(&sc16is7xx_uart);
1529 return ret;
1522} 1530}
1523module_init(sc16is7xx_init); 1531module_init(sc16is7xx_init);
1524 1532
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 060fcd42b6d5..2d1c626312cd 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
838 838
839 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 839 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
840 uart_write_wakeup(port); 840 uart_write_wakeup(port);
841 if (uart_circ_empty(xmit)) { 841 if (uart_circ_empty(xmit))
842 sci_stop_tx(port); 842 sci_stop_tx(port);
843 } else {
844 ctrl = serial_port_in(port, SCSCR);
845
846 if (port->type != PORT_SCI) {
847 serial_port_in(port, SCxSR); /* Dummy read */
848 sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
849 }
850 843
851 ctrl |= SCSCR_TIE;
852 serial_port_out(port, SCSCR, ctrl);
853 }
854} 844}
855 845
856/* On SH3, SCIF may read end-of-break as a space->mark char */ 846/* On SH3, SCIF may read end-of-break as a space->mark char */
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 044c3cbdcfa4..a9e12b3bc31d 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
325 if (tty && C_HUPCL(tty)) 325 if (tty && C_HUPCL(tty))
326 tty_port_lower_dtr_rts(port); 326 tty_port_lower_dtr_rts(port);
327 327
328 if (port->ops->shutdown) 328 if (port->ops && port->ops->shutdown)
329 port->ops->shutdown(port); 329 port->ops->shutdown(port);
330 } 330 }
331out: 331out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
398 */ 398 */
399int tty_port_carrier_raised(struct tty_port *port) 399int tty_port_carrier_raised(struct tty_port *port)
400{ 400{
401 if (port->ops->carrier_raised == NULL) 401 if (!port->ops || !port->ops->carrier_raised)
402 return 1; 402 return 1;
403 return port->ops->carrier_raised(port); 403 return port->ops->carrier_raised(port);
404} 404}
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
414 */ 414 */
415void tty_port_raise_dtr_rts(struct tty_port *port) 415void tty_port_raise_dtr_rts(struct tty_port *port)
416{ 416{
417 if (port->ops->dtr_rts) 417 if (port->ops && port->ops->dtr_rts)
418 port->ops->dtr_rts(port, 1); 418 port->ops->dtr_rts(port, 1);
419} 419}
420EXPORT_SYMBOL(tty_port_raise_dtr_rts); 420EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
429 */ 429 */
430void tty_port_lower_dtr_rts(struct tty_port *port) 430void tty_port_lower_dtr_rts(struct tty_port *port)
431{ 431{
432 if (port->ops->dtr_rts) 432 if (port->ops && port->ops->dtr_rts)
433 port->ops->dtr_rts(port, 0); 433 port->ops->dtr_rts(port, 0);
434} 434}
435EXPORT_SYMBOL(tty_port_lower_dtr_rts); 435EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
684 684
685 if (!tty_port_initialized(port)) { 685 if (!tty_port_initialized(port)) {
686 clear_bit(TTY_IO_ERROR, &tty->flags); 686 clear_bit(TTY_IO_ERROR, &tty->flags);
687 if (port->ops->activate) { 687 if (port->ops && port->ops->activate) {
688 int retval = port->ops->activate(port, tty); 688 int retval = port->ops->activate(port, tty);
689 if (retval) { 689 if (retval) {
690 mutex_unlock(&port->mutex); 690 mutex_unlock(&port->mutex);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 739f8960811a..ec666eb4b7b4 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
558 clear_bit(EVENT_RX_STALL, &acm->flags); 558 clear_bit(EVENT_RX_STALL, &acm->flags);
559 } 559 }
560 560
561 if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) { 561 if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
562 tty_port_tty_wakeup(&acm->port); 562 tty_port_tty_wakeup(&acm->port);
563 clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
564 }
565} 563}
566 564
567/* 565/*
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 48277bbc15e4..73c8e6591746 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
145 145
146 do { 146 do {
147 controller = of_find_node_with_property(controller, "phys"); 147 controller = of_find_node_with_property(controller, "phys");
148 if (!of_device_is_available(controller))
149 continue;
148 index = 0; 150 index = 0;
149 do { 151 do {
150 if (arg0 == -1) { 152 if (arg0 == -1) {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 3189181bb628..975d7c1288e3 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2742,6 +2742,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
2742 retval = usb_phy_roothub_set_mode(hcd->phy_roothub, 2742 retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
2743 PHY_MODE_USB_HOST_SS); 2743 PHY_MODE_USB_HOST_SS);
2744 if (retval) 2744 if (retval)
2745 retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
2746 PHY_MODE_USB_HOST);
2747 if (retval)
2745 goto err_usb_phy_roothub_power_on; 2748 goto err_usb_phy_roothub_power_on;
2746 2749
2747 retval = usb_phy_roothub_power_on(hcd->phy_roothub); 2750 retval = usb_phy_roothub_power_on(hcd->phy_roothub);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index fdc6e4e403e8..8cced3609e24 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -29,6 +29,7 @@
29#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa 29#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa
30#define PCI_DEVICE_ID_INTEL_APL 0x5aaa 30#define PCI_DEVICE_ID_INTEL_APL 0x5aaa
31#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 31#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0
32#define PCI_DEVICE_ID_INTEL_CMLH 0x02ee
32#define PCI_DEVICE_ID_INTEL_GLK 0x31aa 33#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
33#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee 34#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
34#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e 35#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
305 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD), 306 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
306 (kernel_ulong_t) &dwc3_pci_mrfld_properties, }, 307 (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
307 308
309 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
310 (kernel_ulong_t) &dwc3_pci_intel_properties, },
311
308 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP), 312 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
309 (kernel_ulong_t) &dwc3_pci_intel_properties, }, 313 (kernel_ulong_t) &dwc3_pci_intel_properties, },
310 314
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 75b113a5b25c..f3816a5c861e 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -391,20 +391,20 @@ try_again:
391 req->complete = f_hidg_req_complete; 391 req->complete = f_hidg_req_complete;
392 req->context = hidg; 392 req->context = hidg;
393 393
394 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
395
394 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC); 396 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
395 if (status < 0) { 397 if (status < 0) {
396 ERROR(hidg->func.config->cdev, 398 ERROR(hidg->func.config->cdev,
397 "usb_ep_queue error on int endpoint %zd\n", status); 399 "usb_ep_queue error on int endpoint %zd\n", status);
398 goto release_write_pending_unlocked; 400 goto release_write_pending;
399 } else { 401 } else {
400 status = count; 402 status = count;
401 } 403 }
402 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
403 404
404 return status; 405 return status;
405release_write_pending: 406release_write_pending:
406 spin_lock_irqsave(&hidg->write_spinlock, flags); 407 spin_lock_irqsave(&hidg->write_spinlock, flags);
407release_write_pending_unlocked:
408 hidg->write_pending = 0; 408 hidg->write_pending = 0;
409 spin_unlock_irqrestore(&hidg->write_spinlock, flags); 409 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
410 410
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index b77f3126580e..c2011cd7df8c 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
945 break; 945 break;
946 } 946 }
947 if (&req->req != _req) { 947 if (&req->req != _req) {
948 ep->stopped = stopped;
948 spin_unlock_irqrestore(&ep->dev->lock, flags); 949 spin_unlock_irqrestore(&ep->dev->lock, flags);
949 return -EINVAL; 950 return -EINVAL;
950 } 951 }
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index f63f82450bf4..898339e5df10 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
866 (void) readl(&ep->dev->pci->pcimstctl); 866 (void) readl(&ep->dev->pci->pcimstctl);
867 867
868 writel(BIT(DMA_START), &dma->dmastat); 868 writel(BIT(DMA_START), &dma->dmastat);
869
870 if (!ep->is_in)
871 stop_out_naking(ep);
872} 869}
873 870
874static void start_dma(struct net2280_ep *ep, struct net2280_request *req) 871static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
907 writel(BIT(DMA_START), &dma->dmastat); 904 writel(BIT(DMA_START), &dma->dmastat);
908 return; 905 return;
909 } 906 }
907 stop_out_naking(ep);
910 } 908 }
911 909
912 tmp = dmactl_default; 910 tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1275 break; 1273 break;
1276 } 1274 }
1277 if (&req->req != _req) { 1275 if (&req->req != _req) {
1276 ep->stopped = stopped;
1278 spin_unlock_irqrestore(&ep->dev->lock, flags); 1277 spin_unlock_irqrestore(&ep->dev->lock, flags);
1279 dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n", 1278 ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
1280 __func__);
1281 return -EINVAL; 1279 return -EINVAL;
1282 } 1280 }
1283 1281
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 934584f0a20a..6343fbacd244 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void)
3204 printk(KERN_INFO "driver %s\n", hcd_name); 3204 printk(KERN_INFO "driver %s\n", hcd_name);
3205 workqueue = create_singlethread_workqueue("u132"); 3205 workqueue = create_singlethread_workqueue("u132");
3206 retval = platform_driver_register(&u132_platform_driver); 3206 retval = platform_driver_register(&u132_platform_driver);
3207 if (retval)
3208 destroy_workqueue(workqueue);
3209
3207 return retval; 3210 return retval;
3208} 3211}
3209 3212
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index c78be578abb0..d932cc31711e 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
516 return -1; 516 return -1;
517 517
518 writel(0, &dbc->regs->control); 518 writel(0, &dbc->regs->control);
519 xhci_dbc_mem_cleanup(xhci);
520 dbc->state = DS_DISABLED; 519 dbc->state = DS_DISABLED;
521 520
522 return 0; 521 return 0;
@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
562 ret = xhci_do_dbc_stop(xhci); 561 ret = xhci_do_dbc_stop(xhci);
563 spin_unlock_irqrestore(&dbc->lock, flags); 562 spin_unlock_irqrestore(&dbc->lock, flags);
564 563
565 if (!ret) 564 if (!ret) {
565 xhci_dbc_mem_cleanup(xhci);
566 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); 566 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
567 }
567} 568}
568 569
569static void 570static void
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e2eece693655..96a740543183 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1545 port_index = max_ports; 1545 port_index = max_ports;
1546 while (port_index--) { 1546 while (port_index--) {
1547 u32 t1, t2; 1547 u32 t1, t2;
1548 1548 int retries = 10;
1549retry:
1549 t1 = readl(ports[port_index]->addr); 1550 t1 = readl(ports[port_index]->addr);
1550 t2 = xhci_port_state_to_neutral(t1); 1551 t2 = xhci_port_state_to_neutral(t1);
1551 portsc_buf[port_index] = 0; 1552 portsc_buf[port_index] = 0;
1552 1553
1553 /* Bail out if a USB3 port has a new device in link training */ 1554 /*
1554 if ((hcd->speed >= HCD_USB3) && 1555 * Give a USB3 port in link training time to finish, but don't
1556 * prevent suspend as port might be stuck
1557 */
1558 if ((hcd->speed >= HCD_USB3) && retries-- &&
1555 (t1 & PORT_PLS_MASK) == XDEV_POLLING) { 1559 (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
1556 bus_state->bus_suspended = 0;
1557 spin_unlock_irqrestore(&xhci->lock, flags); 1560 spin_unlock_irqrestore(&xhci->lock, flags);
1558 xhci_dbg(xhci, "Bus suspend bailout, port in polling\n"); 1561 msleep(XHCI_PORT_POLLING_LFPS_TIME);
1559 return -EBUSY; 1562 spin_lock_irqsave(&xhci->lock, flags);
1563 xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
1564 port_index);
1565 goto retry;
1560 } 1566 }
1561
1562 /* suspend ports in U0, or bail out for new connect changes */ 1567 /* suspend ports in U0, or bail out for new connect changes */
1563 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { 1568 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
1564 if ((t1 & PORT_CSC) && wake_enabled) { 1569 if ((t1 & PORT_CSC) && wake_enabled) {
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index a6e463715779..671bce18782c 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
246 if (!xhci_rcar_wait_for_pll_active(hcd)) 246 if (!xhci_rcar_wait_for_pll_active(hcd))
247 return -ETIMEDOUT; 247 return -ETIMEDOUT;
248 248
249 xhci->quirks |= XHCI_TRUST_TX_LENGTH;
249 return xhci_rcar_download_firmware(hcd); 250 return xhci_rcar_download_firmware(hcd);
250} 251}
251 252
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 40fa25c4d041..9215a28dad40 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
1647 } 1647 }
1648 } 1648 }
1649 1649
1650 if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 && 1650 if ((portsc & PORT_PLC) &&
1651 DEV_SUPERSPEED_ANY(portsc)) { 1651 DEV_SUPERSPEED_ANY(portsc) &&
1652 ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
1653 (portsc & PORT_PLS_MASK) == XDEV_U1 ||
1654 (portsc & PORT_PLS_MASK) == XDEV_U2)) {
1652 xhci_dbg(xhci, "resume SS port %d finished\n", port_id); 1655 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
1653 /* We've just brought the device into U0 through either the 1656 /* We've just brought the device into U0/1/2 through either the
1654 * Resume state after a device remote wakeup, or through the 1657 * Resume state after a device remote wakeup, or through the
1655 * U3Exit state after a host-initiated resume. If it's a device 1658 * U3Exit state after a host-initiated resume. If it's a device
1656 * initiated remote wake, don't pass up the link state change, 1659 * initiated remote wake, don't pass up the link state change,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 652dc36e3012..9334cdee382a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -452,6 +452,14 @@ struct xhci_op_regs {
452 */ 452 */
453#define XHCI_DEFAULT_BESL 4 453#define XHCI_DEFAULT_BESL 4
454 454
455/*
456 * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
457 * to complete link training. usually link trainig completes much faster
458 * so check status 10 times with 36ms sleep in places we need to wait for
459 * polling to complete.
460 */
461#define XHCI_PORT_POLLING_LFPS_TIME 36
462
455/** 463/**
456 * struct xhci_intr_reg - Interrupt Register Set 464 * struct xhci_intr_reg - Interrupt Register Set
457 * @irq_pending: IMAN - Interrupt Management Register. Used to enable 465 * @irq_pending: IMAN - Interrupt Management Register. Used to enable
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 4d72b7d1d383..04684849d683 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
547 */ 547 */
548 hub->port_swap = USB251XB_DEF_PORT_SWAP; 548 hub->port_swap = USB251XB_DEF_PORT_SWAP;
549 of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) { 549 of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
550 if ((port >= 0) && (port <= data->port_cnt)) 550 if (port <= data->port_cnt)
551 hub->port_swap |= BIT(port); 551 hub->port_swap |= BIT(port);
552 } 552 }
553 553
@@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub)
612 dev); 612 dev);
613 int err; 613 int err;
614 614
615 if (np) { 615 if (np && of_id) {
616 err = usb251xb_get_ofdata(hub, 616 err = usb251xb_get_ofdata(hub,
617 (struct usb251xb_data *)of_id->data); 617 (struct usb251xb_data *)of_id->data);
618 if (err) { 618 if (err) {
diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
index bcc23486c4ed..928c2cd6fc00 100644
--- a/drivers/usb/mtu3/Kconfig
+++ b/drivers/usb/mtu3/Kconfig
@@ -6,6 +6,7 @@ config USB_MTU3
6 tristate "MediaTek USB3 Dual Role controller" 6 tristate "MediaTek USB3 Dual Role controller"
7 depends on USB || USB_GADGET 7 depends on USB || USB_GADGET
8 depends on ARCH_MEDIATEK || COMPILE_TEST 8 depends on ARCH_MEDIATEK || COMPILE_TEST
9 depends on EXTCON || !EXTCON
9 select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD 10 select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
10 help 11 help
11 Say Y or M here if your system runs on MediaTek SoCs with 12 Say Y or M here if your system runs on MediaTek SoCs with
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fffe23ab0189..979bef9bfb6b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
80 { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ 80 { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
81 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ 81 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
82 { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ 82 { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
83 { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
83 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ 84 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
84 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ 85 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
85 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ 86 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8f5b17471759..1d8461ae2c34 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
609 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 609 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
610 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID), 610 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
611 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, 611 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
612 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
613 { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
612 { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) }, 614 { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
613 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) }, 615 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
614 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) }, 616 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index b863bedb55a1..5755f0df0025 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -567,7 +567,9 @@
567/* 567/*
568 * NovaTech product ids (FTDI_VID) 568 * NovaTech product ids (FTDI_VID)
569 */ 569 */
570#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */ 570#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
571#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
572#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
571 573
572/* 574/*
573 * Synapse Wireless product ids (FTDI_VID) 575 * Synapse Wireless product ids (FTDI_VID)
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index fc52ac75fbf6..18110225d506 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
366 if (!urbtrack) 366 if (!urbtrack)
367 return -ENOMEM; 367 return -ENOMEM;
368 368
369 kref_get(&mos_parport->ref_count);
370 urbtrack->mos_parport = mos_parport;
371 urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC); 369 urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
372 if (!urbtrack->urb) { 370 if (!urbtrack->urb) {
373 kfree(urbtrack); 371 kfree(urbtrack);
@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
388 usb_sndctrlpipe(usbdev, 0), 386 usb_sndctrlpipe(usbdev, 0),
389 (unsigned char *)urbtrack->setup, 387 (unsigned char *)urbtrack->setup,
390 NULL, 0, async_complete, urbtrack); 388 NULL, 0, async_complete, urbtrack);
389 kref_get(&mos_parport->ref_count);
390 urbtrack->mos_parport = mos_parport;
391 kref_init(&urbtrack->ref_count); 391 kref_init(&urbtrack->ref_count);
392 INIT_LIST_HEAD(&urbtrack->urblist_entry); 392 INIT_LIST_HEAD(&urbtrack->urblist_entry);
393 393
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 11b21d9410f3..83869065b802 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
246#define QUECTEL_PRODUCT_EC25 0x0125 246#define QUECTEL_PRODUCT_EC25 0x0125
247#define QUECTEL_PRODUCT_BG96 0x0296 247#define QUECTEL_PRODUCT_BG96 0x0296
248#define QUECTEL_PRODUCT_EP06 0x0306 248#define QUECTEL_PRODUCT_EP06 0x0306
249#define QUECTEL_PRODUCT_EM12 0x0512
249 250
250#define CMOTECH_VENDOR_ID 0x16d8 251#define CMOTECH_VENDOR_ID 0x16d8
251#define CMOTECH_PRODUCT_6001 0x6001 252#define CMOTECH_PRODUCT_6001 0x6001
@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
1066 .driver_info = RSVD(3) }, 1067 .driver_info = RSVD(3) },
1067 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ 1068 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
1068 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ 1069 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
1069 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1070 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
1071 .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
1070 /* Quectel products using Qualcomm vendor ID */ 1072 /* Quectel products using Qualcomm vendor ID */
1071 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)}, 1073 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
1072 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20), 1074 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
1087 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), 1089 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
1088 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, 1090 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
1089 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, 1091 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
1092 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
1093 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
1094 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
1090 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1095 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1091 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1096 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1092 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1097 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
1940 .driver_info = RSVD(4) }, 1945 .driver_info = RSVD(4) },
1941 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */ 1946 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
1942 .driver_info = RSVD(4) }, 1947 .driver_info = RSVD(4) },
1943 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ 1948 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
1944 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ 1949 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
1945 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */ 1950 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
1946 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */ 1951 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
1952 .driver_info = RSVD(4) },
1953 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
1947 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) }, 1954 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
1948 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) }, 1955 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
1949 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) }, 1956 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 0f62db091d8d..a2233d72ae7c 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -37,6 +37,7 @@
37 S(SRC_ATTACHED), \ 37 S(SRC_ATTACHED), \
38 S(SRC_STARTUP), \ 38 S(SRC_STARTUP), \
39 S(SRC_SEND_CAPABILITIES), \ 39 S(SRC_SEND_CAPABILITIES), \
40 S(SRC_SEND_CAPABILITIES_TIMEOUT), \
40 S(SRC_NEGOTIATE_CAPABILITIES), \ 41 S(SRC_NEGOTIATE_CAPABILITIES), \
41 S(SRC_TRANSITION_SUPPLY), \ 42 S(SRC_TRANSITION_SUPPLY), \
42 S(SRC_READY), \ 43 S(SRC_READY), \
@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
2966 /* port->hard_reset_count = 0; */ 2967 /* port->hard_reset_count = 0; */
2967 port->caps_count = 0; 2968 port->caps_count = 0;
2968 port->pd_capable = true; 2969 port->pd_capable = true;
2969 tcpm_set_state_cond(port, hard_reset_state(port), 2970 tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
2970 PD_T_SEND_SOURCE_CAP); 2971 PD_T_SEND_SOURCE_CAP);
2971 } 2972 }
2972 break; 2973 break;
2974 case SRC_SEND_CAPABILITIES_TIMEOUT:
2975 /*
2976 * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
2977 *
2978 * PD 2.0 sinks are supposed to accept src-capabilities with a
2979 * 3.0 header and simply ignore any src PDOs which the sink does
2980 * not understand such as PPS but some 2.0 sinks instead ignore
2981 * the entire PD_DATA_SOURCE_CAP message, causing contract
2982 * negotiation to fail.
2983 *
2984 * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
2985 * sending src-capabilities with a lower PD revision to
2986 * make these broken sinks work.
2987 */
2988 if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
2989 tcpm_set_state(port, HARD_RESET_SEND, 0);
2990 } else if (port->negotiated_rev > PD_REV20) {
2991 port->negotiated_rev--;
2992 port->hard_reset_count = 0;
2993 tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2994 } else {
2995 tcpm_set_state(port, hard_reset_state(port), 0);
2996 }
2997 break;
2973 case SRC_NEGOTIATE_CAPABILITIES: 2998 case SRC_NEGOTIATE_CAPABILITIES:
2974 ret = tcpm_pd_check_request(port); 2999 ret = tcpm_pd_check_request(port);
2975 if (ret < 0) { 3000 if (ret < 0) {
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 423208e19383..6770afd40765 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
615 wcove->dev = &pdev->dev; 615 wcove->dev = &pdev->dev;
616 wcove->regmap = pmic->regmap; 616 wcove->regmap = pmic->regmap;
617 617
618 irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, 618 irq = platform_get_irq(pdev, 0);
619 platform_get_irq(pdev, 0)); 619 if (irq < 0) {
620 dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
621 return irq;
622 }
623
624 irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
620 if (irq < 0) 625 if (irq < 0)
621 return irq; 626 return irq;
622 627
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index a25659b5a5d1..3fa20e95a6bb 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void)
1661 rc = pci_add_dynid(&vfio_pci_driver, vendor, device, 1661 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1662 subvendor, subdevice, class, class_mask, 0); 1662 subvendor, subdevice, class, class_mask, 0);
1663 if (rc) 1663 if (rc)
1664 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n", 1664 pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
1665 vendor, device, subvendor, subdevice, 1665 vendor, device, subvendor, subdevice,
1666 class, class_mask, rc); 1666 class, class_mask, rc);
1667 else 1667 else
1668 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n", 1668 pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
1669 vendor, device, subvendor, subdevice, 1669 vendor, device, subvendor, subdevice,
1670 class, class_mask); 1670 class, class_mask);
1671 } 1671 }
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 8dbb270998f4..6b64e45a5269 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1398,7 +1398,7 @@ unlock_exit:
1398 mutex_unlock(&container->lock); 1398 mutex_unlock(&container->lock);
1399} 1399}
1400 1400
1401const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { 1401static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1402 .name = "iommu-vfio-powerpc", 1402 .name = "iommu-vfio-powerpc",
1403 .owner = THIS_MODULE, 1403 .owner = THIS_MODULE,
1404 .open = tce_iommu_open, 1404 .open = tce_iommu_open,
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 73652e21efec..d0f731c9920a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
58MODULE_PARM_DESC(disable_hugepages, 58MODULE_PARM_DESC(disable_hugepages,
59 "Disable VFIO IOMMU support for IOMMU hugepages."); 59 "Disable VFIO IOMMU support for IOMMU hugepages.");
60 60
61static unsigned int dma_entry_limit __read_mostly = U16_MAX;
62module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
63MODULE_PARM_DESC(dma_entry_limit,
64 "Maximum number of user DMA mappings per container (65535).");
65
61struct vfio_iommu { 66struct vfio_iommu {
62 struct list_head domain_list; 67 struct list_head domain_list;
63 struct vfio_domain *external_domain; /* domain for external user */ 68 struct vfio_domain *external_domain; /* domain for external user */
64 struct mutex lock; 69 struct mutex lock;
65 struct rb_root dma_list; 70 struct rb_root dma_list;
66 struct blocking_notifier_head notifier; 71 struct blocking_notifier_head notifier;
72 unsigned int dma_avail;
67 bool v2; 73 bool v2;
68 bool nesting; 74 bool nesting;
69}; 75};
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
836 vfio_unlink_dma(iommu, dma); 842 vfio_unlink_dma(iommu, dma);
837 put_task_struct(dma->task); 843 put_task_struct(dma->task);
838 kfree(dma); 844 kfree(dma);
845 iommu->dma_avail++;
839} 846}
840 847
841static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) 848static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
1081 goto out_unlock; 1088 goto out_unlock;
1082 } 1089 }
1083 1090
1091 if (!iommu->dma_avail) {
1092 ret = -ENOSPC;
1093 goto out_unlock;
1094 }
1095
1084 dma = kzalloc(sizeof(*dma), GFP_KERNEL); 1096 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
1085 if (!dma) { 1097 if (!dma) {
1086 ret = -ENOMEM; 1098 ret = -ENOMEM;
1087 goto out_unlock; 1099 goto out_unlock;
1088 } 1100 }
1089 1101
1102 iommu->dma_avail--;
1090 dma->iova = iova; 1103 dma->iova = iova;
1091 dma->vaddr = vaddr; 1104 dma->vaddr = vaddr;
1092 dma->prot = prot; 1105 dma->prot = prot;
@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
1583 1596
1584 INIT_LIST_HEAD(&iommu->domain_list); 1597 INIT_LIST_HEAD(&iommu->domain_list);
1585 iommu->dma_list = RB_ROOT; 1598 iommu->dma_list = RB_ROOT;
1599 iommu->dma_avail = dma_entry_limit;
1586 mutex_init(&iommu->lock); 1600 mutex_init(&iommu->lock);
1587 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); 1601 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
1588 1602
diff --git a/drivers/video/fbdev/aty/radeon_pm.c b/drivers/video/fbdev/aty/radeon_pm.c
index e695adb0e573..2dc5703eac51 100644
--- a/drivers/video/fbdev/aty/radeon_pm.c
+++ b/drivers/video/fbdev/aty/radeon_pm.c
@@ -2844,8 +2844,8 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis
2844 * in some desktop G4s), Via (M9+ chip on iBook G4) and 2844 * in some desktop G4s), Via (M9+ chip on iBook G4) and
2845 * Snowy (M11 chip on iBook G4 manufactured after July 2005) 2845 * Snowy (M11 chip on iBook G4 manufactured after July 2005)
2846 */ 2846 */
2847 if (!strcmp(rinfo->of_node->name, "ATY,JasperParent") || 2847 if (of_node_name_eq(rinfo->of_node, "ATY,JasperParent") ||
2848 !strcmp(rinfo->of_node->name, "ATY,SnowyParent")) { 2848 of_node_name_eq(rinfo->of_node, "ATY,SnowyParent")) {
2849 rinfo->reinit_func = radeon_reinitialize_M10; 2849 rinfo->reinit_func = radeon_reinitialize_M10;
2850 rinfo->pm_mode |= radeon_pm_off; 2850 rinfo->pm_mode |= radeon_pm_off;
2851 } 2851 }
@@ -2855,7 +2855,7 @@ void radeonfb_pm_init(struct radeonfb_info *rinfo, int dynclk, int ignore_devlis
2855 rinfo->pm_mode |= radeon_pm_off; 2855 rinfo->pm_mode |= radeon_pm_off;
2856 } 2856 }
2857#endif 2857#endif
2858 if (!strcmp(rinfo->of_node->name, "ATY,ViaParent")) { 2858 if (of_node_name_eq(rinfo->of_node, "ATY,ViaParent")) {
2859 rinfo->reinit_func = radeon_reinitialize_M9P; 2859 rinfo->reinit_func = radeon_reinitialize_M9P;
2860 rinfo->pm_mode |= radeon_pm_off; 2860 rinfo->pm_mode |= radeon_pm_off;
2861 } 2861 }
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index 9af54c2368fd..a6dce1a78490 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -486,8 +486,8 @@ static int cg14_probe(struct platform_device *op)
486 info->var.xres); 486 info->var.xres);
487 info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres); 487 info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres);
488 488
489 if (!strcmp(dp->parent->name, "sbus") || 489 if (of_node_name_eq(dp->parent, "sbus") ||
490 !strcmp(dp->parent->name, "sbi")) { 490 of_node_name_eq(dp->parent, "sbi")) {
491 info->fix.smem_start = op->resource[0].start; 491 info->fix.smem_start = op->resource[0].start;
492 par->iospace = op->resource[0].flags & IORESOURCE_BITS; 492 par->iospace = op->resource[0].flags & IORESOURCE_BITS;
493 } else { 493 } else {
diff --git a/drivers/video/fbdev/cg3.c b/drivers/video/fbdev/cg3.c
index 1bd95b02f3aa..6d42def8436b 100644
--- a/drivers/video/fbdev/cg3.c
+++ b/drivers/video/fbdev/cg3.c
@@ -369,7 +369,7 @@ static int cg3_probe(struct platform_device *op)
369 info->var.red.length = 8; 369 info->var.red.length = 8;
370 info->var.green.length = 8; 370 info->var.green.length = 8;
371 info->var.blue.length = 8; 371 info->var.blue.length = 8;
372 if (!strcmp(dp->name, "cgRDI")) 372 if (of_node_name_eq(dp, "cgRDI"))
373 par->flags |= CG3_FLAG_RDI; 373 par->flags |= CG3_FLAG_RDI;
374 if (par->flags & CG3_FLAG_RDI) 374 if (par->flags & CG3_FLAG_RDI)
375 cg3_rdi_maybe_fixup_var(&info->var, dp); 375 cg3_rdi_maybe_fixup_var(&info->var, dp);
diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
index 40182ed85648..ca549e1532e6 100644
--- a/drivers/video/fbdev/chipsfb.c
+++ b/drivers/video/fbdev/chipsfb.c
@@ -349,7 +349,7 @@ static void init_chips(struct fb_info *p, unsigned long addr)
349static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) 349static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
350{ 350{
351 struct fb_info *p; 351 struct fb_info *p;
352 unsigned long addr, size; 352 unsigned long addr;
353 unsigned short cmd; 353 unsigned short cmd;
354 int rc = -ENODEV; 354 int rc = -ENODEV;
355 355
@@ -361,7 +361,6 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
361 if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) 361 if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
362 goto err_disable; 362 goto err_disable;
363 addr = pci_resource_start(dp, 0); 363 addr = pci_resource_start(dp, 0);
364 size = pci_resource_len(dp, 0);
365 if (addr == 0) 364 if (addr == 0)
366 goto err_disable; 365 goto err_disable;
367 366
diff --git a/drivers/video/fbdev/core/fb_cmdline.c b/drivers/video/fbdev/core/fb_cmdline.c
index 39509ccd92f1..3b5bd666b952 100644
--- a/drivers/video/fbdev/core/fb_cmdline.c
+++ b/drivers/video/fbdev/core/fb_cmdline.c
@@ -75,36 +75,33 @@ EXPORT_SYMBOL(fb_get_options);
75 * NOTE: This function is a __setup and __init function. 75 * NOTE: This function is a __setup and __init function.
76 * It only stores the options. Drivers have to call 76 * It only stores the options. Drivers have to call
77 * fb_get_options() as necessary. 77 * fb_get_options() as necessary.
78 *
79 * Returns zero.
80 *
81 */ 78 */
82static int __init video_setup(char *options) 79static int __init video_setup(char *options)
83{ 80{
84 int i, global = 0;
85
86 if (!options || !*options) 81 if (!options || !*options)
87 global = 1; 82 goto out;
88 83
89 if (!global && !strncmp(options, "ofonly", 6)) { 84 if (!strncmp(options, "ofonly", 6)) {
90 ofonly = 1; 85 ofonly = 1;
91 global = 1; 86 goto out;
92 } 87 }
93 88
94 if (!global && !strchr(options, ':')) { 89 if (strchr(options, ':')) {
95 fb_mode_option = options; 90 /* named */
96 global = 1; 91 int i;
97 }
98 92
99 if (!global) {
100 for (i = 0; i < FB_MAX; i++) { 93 for (i = 0; i < FB_MAX; i++) {
101 if (video_options[i] == NULL) { 94 if (video_options[i] == NULL) {
102 video_options[i] = options; 95 video_options[i] = options;
103 break; 96 break;
104 } 97 }
105 } 98 }
99 } else {
100 /* global */
101 fb_mode_option = options;
106 } 102 }
107 103
104out:
108 return 1; 105 return 1;
109} 106}
110__setup("video=", video_setup); 107__setup("video=", video_setup);
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index bfa1360ec750..cd059a801662 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -656,11 +656,14 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
656 kfree(save); 656 kfree(save);
657 } 657 }
658 658
659 if (logo_shown == FBCON_LOGO_DONTSHOW)
660 return;
661
659 if (logo_lines > vc->vc_bottom) { 662 if (logo_lines > vc->vc_bottom) {
660 logo_shown = FBCON_LOGO_CANSHOW; 663 logo_shown = FBCON_LOGO_CANSHOW;
661 printk(KERN_INFO 664 printk(KERN_INFO
662 "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n"); 665 "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
663 } else if (logo_shown != FBCON_LOGO_DONTSHOW) { 666 } else {
664 logo_shown = FBCON_LOGO_DRAW; 667 logo_shown = FBCON_LOGO_DRAW;
665 vc->vc_top = logo_lines; 668 vc->vc_top = logo_lines;
666 } 669 }
@@ -999,7 +1002,7 @@ static const char *fbcon_startup(void)
999 if (!softback_buf) { 1002 if (!softback_buf) {
1000 softback_buf = 1003 softback_buf =
1001 (unsigned long) 1004 (unsigned long)
1002 kmalloc(fbcon_softback_size, 1005 kvmalloc(fbcon_softback_size,
1003 GFP_KERNEL); 1006 GFP_KERNEL);
1004 if (!softback_buf) { 1007 if (!softback_buf) {
1005 fbcon_softback_size = 0; 1008 fbcon_softback_size = 0;
@@ -1008,7 +1011,7 @@ static const char *fbcon_startup(void)
1008 } 1011 }
1009 } else { 1012 } else {
1010 if (softback_buf) { 1013 if (softback_buf) {
1011 kfree((void *) softback_buf); 1014 kvfree((void *) softback_buf);
1012 softback_buf = 0; 1015 softback_buf = 0;
1013 softback_top = 0; 1016 softback_top = 0;
1014 } 1017 }
@@ -1066,6 +1069,9 @@ static void fbcon_init(struct vc_data *vc, int init)
1066 1069
1067 cap = info->flags; 1070 cap = info->flags;
1068 1071
1072 if (console_loglevel <= CONSOLE_LOGLEVEL_QUIET)
1073 logo_shown = FBCON_LOGO_DONTSHOW;
1074
1069 if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW || 1075 if (vc != svc || logo_shown == FBCON_LOGO_DONTSHOW ||
1070 (info->fix.type == FB_TYPE_TEXT)) 1076 (info->fix.type == FB_TYPE_TEXT))
1071 logo = 0; 1077 logo = 0;
@@ -3672,7 +3678,7 @@ static void fbcon_exit(void)
3672 } 3678 }
3673#endif 3679#endif
3674 3680
3675 kfree((void *)softback_buf); 3681 kvfree((void *)softback_buf);
3676 softback_buf = 0UL; 3682 softback_buf = 0UL;
3677 3683
3678 for_each_registered_fb(i) { 3684 for_each_registered_fb(i) {
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index cb43a2258c51..4721491e6c8c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -431,6 +431,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
431{ 431{
432 unsigned int x; 432 unsigned int x;
433 433
434 if (image->width > info->var.xres || image->height > info->var.yres)
435 return;
436
434 if (rotate == FB_ROTATE_UR) { 437 if (rotate == FB_ROTATE_UR) {
435 for (x = 0; 438 for (x = 0;
436 x < num && image->dx + image->width <= info->var.xres; 439 x < num && image->dx + image->width <= info->var.xres;
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index dd3128990776..3558a70a6664 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -978,6 +978,8 @@ void fb_edid_to_monspecs(unsigned char *edid, struct fb_monspecs *specs)
978 get_monspecs(edid, specs); 978 get_monspecs(edid, specs);
979 979
980 specs->modedb = fb_create_modedb(edid, &specs->modedb_len, specs); 980 specs->modedb = fb_create_modedb(edid, &specs->modedb_len, specs);
981 if (!specs->modedb)
982 return;
981 983
982 /* 984 /*
983 * Workaround for buggy EDIDs that sets that the first 985 * Workaround for buggy EDIDs that sets that the first
diff --git a/drivers/video/fbdev/ffb.c b/drivers/video/fbdev/ffb.c
index 6b1915872af1..b7aee0c427a8 100644
--- a/drivers/video/fbdev/ffb.c
+++ b/drivers/video/fbdev/ffb.c
@@ -944,7 +944,7 @@ static int ffb_probe(struct platform_device *op)
944 944
945 info->var.accel_flags = FB_ACCELF_TEXT; 945 info->var.accel_flags = FB_ACCELF_TEXT;
946 946
947 if (!strcmp(dp->name, "SUNW,afb")) 947 if (of_node_name_eq(dp, "SUNW,afb"))
948 par->flags |= FFB_FLAG_AFB; 948 par->flags |= FFB_FLAG_AFB;
949 949
950 par->board_type = of_getintprop_default(dp, "board_type", 0); 950 par->board_type = of_getintprop_default(dp, "board_type", 0);
diff --git a/drivers/video/fbdev/geode/gxfb_core.c b/drivers/video/fbdev/geode/gxfb_core.c
index f4f76373b2a8..b1906cf5a8f0 100644
--- a/drivers/video/fbdev/geode/gxfb_core.c
+++ b/drivers/video/fbdev/geode/gxfb_core.c
@@ -33,6 +33,8 @@
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/cs5535.h> 34#include <linux/cs5535.h>
35 35
36#include <asm/olpc.h>
37
36#include "gxfb.h" 38#include "gxfb.h"
37 39
38static char *mode_option; 40static char *mode_option;
@@ -107,9 +109,6 @@ static struct fb_videomode gx_modedb[] = {
107 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA }, 109 FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
108}; 110};
109 111
110#ifdef CONFIG_OLPC
111#include <asm/olpc.h>
112
113static struct fb_videomode gx_dcon_modedb[] = { 112static struct fb_videomode gx_dcon_modedb[] = {
114 /* The only mode the DCON has is 1200x900 */ 113 /* The only mode the DCON has is 1200x900 */
115 { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3, 114 { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3,
@@ -128,14 +127,6 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
128 } 127 }
129} 128}
130 129
131#else
132static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
133{
134 *modedb = (struct fb_videomode *) gx_modedb;
135 *size = ARRAY_SIZE(gx_modedb);
136}
137#endif
138
139static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 130static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
140{ 131{
141 if (var->xres > 1600 || var->yres > 1200) 132 if (var->xres > 1600 || var->yres > 1200)
diff --git a/drivers/video/fbdev/geode/lxfb_core.c b/drivers/video/fbdev/geode/lxfb_core.c
index 138da6cb6cbc..17ab905811b1 100644
--- a/drivers/video/fbdev/geode/lxfb_core.c
+++ b/drivers/video/fbdev/geode/lxfb_core.c
@@ -23,6 +23,8 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25 25
26#include <asm/olpc.h>
27
26#include "lxfb.h" 28#include "lxfb.h"
27 29
28static char *mode_option; 30static char *mode_option;
@@ -216,9 +218,6 @@ static struct fb_videomode geode_modedb[] = {
216 0, FB_VMODE_NONINTERLACED, 0 }, 218 0, FB_VMODE_NONINTERLACED, 0 },
217}; 219};
218 220
219#ifdef CONFIG_OLPC
220#include <asm/olpc.h>
221
222static struct fb_videomode olpc_dcon_modedb[] = { 221static struct fb_videomode olpc_dcon_modedb[] = {
223 /* The only mode the DCON has is 1200x900 */ 222 /* The only mode the DCON has is 1200x900 */
224 { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3, 223 { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3,
@@ -237,14 +236,6 @@ static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
237 } 236 }
238} 237}
239 238
240#else
241static void get_modedb(struct fb_videomode **modedb, unsigned int *size)
242{
243 *modedb = (struct fb_videomode *) geode_modedb;
244 *size = ARRAY_SIZE(geode_modedb);
245}
246#endif
247
248static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) 239static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
249{ 240{
250 if (var->xres > 1920 || var->yres > 1440) 241 if (var->xres > 1920 || var->yres > 1440)
diff --git a/drivers/video/fbdev/imsttfb.c b/drivers/video/fbdev/imsttfb.c
index 5d9670daf60e..4b9615e4ce74 100644
--- a/drivers/video/fbdev/imsttfb.c
+++ b/drivers/video/fbdev/imsttfb.c
@@ -1497,8 +1497,8 @@ static int imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1497 switch (pdev->device) { 1497 switch (pdev->device) {
1498 case PCI_DEVICE_ID_IMS_TT128: /* IMS,tt128mbA */ 1498 case PCI_DEVICE_ID_IMS_TT128: /* IMS,tt128mbA */
1499 par->ramdac = IBM; 1499 par->ramdac = IBM;
1500 if (dp && ((strcmp(dp->name, "IMS,tt128mb8") == 0) || 1500 if (of_node_name_eq(dp, "IMS,tt128mb8") ||
1501 (strcmp(dp->name, "IMS,tt128mb8A") == 0))) 1501 of_node_name_eq(dp, "IMS,tt128mb8A"))
1502 par->ramdac = TVP; 1502 par->ramdac = TVP;
1503 break; 1503 break;
1504 case PCI_DEVICE_ID_IMS_TT3D: /* IMS,tt3d */ 1504 case PCI_DEVICE_ID_IMS_TT3D: /* IMS,tt3d */
diff --git a/drivers/video/fbdev/mbx/mbxdebugfs.c b/drivers/video/fbdev/mbx/mbxdebugfs.c
index 2bd328883178..09af721638fb 100644
--- a/drivers/video/fbdev/mbx/mbxdebugfs.c
+++ b/drivers/video/fbdev/mbx/mbxdebugfs.c
@@ -211,36 +211,22 @@ static const struct file_operations misc_fops = {
211static void mbxfb_debugfs_init(struct fb_info *fbi) 211static void mbxfb_debugfs_init(struct fb_info *fbi)
212{ 212{
213 struct mbxfb_info *mfbi = fbi->par; 213 struct mbxfb_info *mfbi = fbi->par;
214 struct mbxfb_debugfs_data *dbg; 214 struct dentry *dir;
215 215
216 dbg = kzalloc(sizeof(struct mbxfb_debugfs_data), GFP_KERNEL); 216 dir = debugfs_create_dir("mbxfb", NULL);
217 mfbi->debugfs_data = dbg; 217 mfbi->debugfs_dir = dir;
218 218
219 dbg->dir = debugfs_create_dir("mbxfb", NULL); 219 debugfs_create_file("sysconf", 0444, dir, fbi, &sysconf_fops);
220 dbg->sysconf = debugfs_create_file("sysconf", 0444, dbg->dir, 220 debugfs_create_file("clock", 0444, dir, fbi, &clock_fops);
221 fbi, &sysconf_fops); 221 debugfs_create_file("display", 0444, dir, fbi, &display_fops);
222 dbg->clock = debugfs_create_file("clock", 0444, dbg->dir, 222 debugfs_create_file("gsctl", 0444, dir, fbi, &gsctl_fops);
223 fbi, &clock_fops); 223 debugfs_create_file("sdram", 0444, dir, fbi, &sdram_fops);
224 dbg->display = debugfs_create_file("display", 0444, dbg->dir, 224 debugfs_create_file("misc", 0444, dir, fbi, &misc_fops);
225 fbi, &display_fops);
226 dbg->gsctl = debugfs_create_file("gsctl", 0444, dbg->dir,
227 fbi, &gsctl_fops);
228 dbg->sdram = debugfs_create_file("sdram", 0444, dbg->dir,
229 fbi, &sdram_fops);
230 dbg->misc = debugfs_create_file("misc", 0444, dbg->dir,
231 fbi, &misc_fops);
232} 225}
233 226
234static void mbxfb_debugfs_remove(struct fb_info *fbi) 227static void mbxfb_debugfs_remove(struct fb_info *fbi)
235{ 228{
236 struct mbxfb_info *mfbi = fbi->par; 229 struct mbxfb_info *mfbi = fbi->par;
237 struct mbxfb_debugfs_data *dbg = mfbi->debugfs_data; 230
238 231 debugfs_remove_recursive(mfbi->debugfs_dir);
239 debugfs_remove(dbg->misc);
240 debugfs_remove(dbg->sdram);
241 debugfs_remove(dbg->gsctl);
242 debugfs_remove(dbg->display);
243 debugfs_remove(dbg->clock);
244 debugfs_remove(dbg->sysconf);
245 debugfs_remove(dbg->dir);
246} 232}
diff --git a/drivers/video/fbdev/mbx/mbxfb.c b/drivers/video/fbdev/mbx/mbxfb.c
index 539b85da0897..6ded480a69b4 100644
--- a/drivers/video/fbdev/mbx/mbxfb.c
+++ b/drivers/video/fbdev/mbx/mbxfb.c
@@ -74,7 +74,7 @@ struct mbxfb_info {
74 74
75 u32 pseudo_palette[MAX_PALETTES]; 75 u32 pseudo_palette[MAX_PALETTES];
76#ifdef CONFIG_FB_MBX_DEBUG 76#ifdef CONFIG_FB_MBX_DEBUG
77 void *debugfs_data; 77 struct dentry *debugfs_dir;
78#endif 78#endif
79 79
80}; 80};
diff --git a/drivers/video/fbdev/offb.c b/drivers/video/fbdev/offb.c
index 057d3cdef92e..fbc6eafb63c7 100644
--- a/drivers/video/fbdev/offb.c
+++ b/drivers/video/fbdev/offb.c
@@ -141,6 +141,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
141 /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ 141 /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
142 out_le32(par->cmap_adr + 0x58, 142 out_le32(par->cmap_adr + 0x58,
143 in_le32(par->cmap_adr + 0x58) & ~0x20); 143 in_le32(par->cmap_adr + 0x58) & ~0x20);
144 /* fall through */
144 case cmap_r128: 145 case cmap_r128:
145 /* Set palette index & data */ 146 /* Set palette index & data */
146 out_8(par->cmap_adr + 0xb0, regno); 147 out_8(par->cmap_adr + 0xb0, regno);
@@ -210,6 +211,7 @@ static int offb_blank(int blank, struct fb_info *info)
210 /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */ 211 /* Clear PALETTE_ACCESS_CNTL in DAC_CNTL */
211 out_le32(par->cmap_adr + 0x58, 212 out_le32(par->cmap_adr + 0x58,
212 in_le32(par->cmap_adr + 0x58) & ~0x20); 213 in_le32(par->cmap_adr + 0x58) & ~0x20);
214 /* fall through */
213 case cmap_r128: 215 case cmap_r128:
214 /* Set palette index & data */ 216 /* Set palette index & data */
215 out_8(par->cmap_adr + 0xb0, i); 217 out_8(par->cmap_adr + 0xb0, i);
@@ -646,7 +648,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
646 } 648 }
647#endif 649#endif
648 /* kludge for valkyrie */ 650 /* kludge for valkyrie */
649 if (strcmp(dp->name, "valkyrie") == 0) 651 if (of_node_name_eq(dp, "valkyrie"))
650 address += 0x1000; 652 address += 0x1000;
651 offb_init_fb(no_real_node ? "bootx" : NULL, 653 offb_init_fb(no_real_node ? "bootx" : NULL,
652 width, height, depth, pitch, address, 654 width, height, depth, pitch, address,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index b4bcf3a4a647..b5956a1a30d4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -110,19 +110,12 @@ DEFINE_SHOW_ATTRIBUTE(dss);
110 110
111static struct dentry *dss_debugfs_dir; 111static struct dentry *dss_debugfs_dir;
112 112
113static int dss_initialize_debugfs(void) 113static void dss_initialize_debugfs(void)
114{ 114{
115 dss_debugfs_dir = debugfs_create_dir("omapdss", NULL); 115 dss_debugfs_dir = debugfs_create_dir("omapdss", NULL);
116 if (IS_ERR(dss_debugfs_dir)) {
117 int err = PTR_ERR(dss_debugfs_dir);
118 dss_debugfs_dir = NULL;
119 return err;
120 }
121 116
122 debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, 117 debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
123 &dss_debug_dump_clocks, &dss_fops); 118 &dss_debug_dump_clocks, &dss_fops);
124
125 return 0;
126} 119}
127 120
128static void dss_uninitialize_debugfs(void) 121static void dss_uninitialize_debugfs(void)
@@ -130,26 +123,19 @@ static void dss_uninitialize_debugfs(void)
130 debugfs_remove_recursive(dss_debugfs_dir); 123 debugfs_remove_recursive(dss_debugfs_dir);
131} 124}
132 125
133int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) 126void dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
134{ 127{
135 struct dentry *d; 128 debugfs_create_file(name, S_IRUGO, dss_debugfs_dir, write, &dss_fops);
136
137 d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
138 write, &dss_fops);
139
140 return PTR_ERR_OR_ZERO(d);
141} 129}
142#else /* CONFIG_FB_OMAP2_DSS_DEBUGFS */ 130#else /* CONFIG_FB_OMAP2_DSS_DEBUGFS */
143static inline int dss_initialize_debugfs(void) 131static inline void dss_initialize_debugfs(void)
144{ 132{
145 return 0;
146} 133}
147static inline void dss_uninitialize_debugfs(void) 134static inline void dss_uninitialize_debugfs(void)
148{ 135{
149} 136}
150int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)) 137void dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
151{ 138{
152 return 0;
153} 139}
154#endif /* CONFIG_FB_OMAP2_DSS_DEBUGFS */ 140#endif /* CONFIG_FB_OMAP2_DSS_DEBUGFS */
155 141
@@ -182,15 +168,11 @@ static struct notifier_block omap_dss_pm_notif_block = {
182 168
183static int __init omap_dss_probe(struct platform_device *pdev) 169static int __init omap_dss_probe(struct platform_device *pdev)
184{ 170{
185 int r;
186
187 core.pdev = pdev; 171 core.pdev = pdev;
188 172
189 dss_features_init(omapdss_get_version()); 173 dss_features_init(omapdss_get_version());
190 174
191 r = dss_initialize_debugfs(); 175 dss_initialize_debugfs();
192 if (r)
193 goto err_debugfs;
194 176
195 if (def_disp_name) 177 if (def_disp_name)
196 core.default_display_name = def_disp_name; 178 core.default_display_name = def_disp_name;
@@ -198,10 +180,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
198 register_pm_notifier(&omap_dss_pm_notif_block); 180 register_pm_notifier(&omap_dss_pm_notif_block);
199 181
200 return 0; 182 return 0;
201
202err_debugfs:
203
204 return r;
205} 183}
206 184
207static int omap_dss_remove(struct platform_device *pdev) 185static int omap_dss_remove(struct platform_device *pdev)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index f1eb8b0f8a2a..5ce893c1923d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -60,7 +60,7 @@ omapdss_of_get_next_port(const struct device_node *parent,
60 return NULL; 60 return NULL;
61 } 61 }
62 prev = port; 62 prev = port;
63 } while (of_node_cmp(port->name, "port") != 0); 63 } while (!of_node_name_eq(port, "port"));
64 64
65 of_node_put(ports); 65 of_node_put(ports);
66 } 66 }
@@ -83,7 +83,7 @@ omapdss_of_get_next_endpoint(const struct device_node *parent,
83 if (!ep) 83 if (!ep)
84 return NULL; 84 return NULL;
85 prev = ep; 85 prev = ep;
86 } while (of_node_cmp(ep->name, "endpoint") != 0); 86 } while (!of_node_name_eq(ep, "endpoint"));
87 87
88 return ep; 88 return ep;
89} 89}
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
index a3cc0ca8f9d2..b1a354494144 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
@@ -214,7 +214,7 @@ struct platform_device *dss_get_core_pdev(void);
214int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask); 214int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
215void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask); 215void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
216int dss_set_min_bus_tput(struct device *dev, unsigned long tput); 216int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
217int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *)); 217void dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
218 218
219/* display */ 219/* display */
220int dss_suspend_all_devices(void); 220int dss_suspend_all_devices(void);
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
index fa72e735dad2..d146793dd044 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4_core.c
@@ -712,7 +712,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
712 else 712 else
713 acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT; 713 acore.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
714 /* 714 /*
715 * The I2S input word length is twice the lenght given in the IEC-60958 715 * The I2S input word length is twice the length given in the IEC-60958
716 * status word. If the word size is greater than 716 * status word. If the word size is greater than
717 * 20 bits, increment by one. 717 * 20 bits, increment by one.
718 */ 718 */
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 4061a20cfe24..3b361bc9feb8 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -667,10 +667,10 @@ static int ssd1307fb_probe(struct i2c_client *client,
667 667
668 if (par->reset) { 668 if (par->reset) {
669 /* Reset the screen */ 669 /* Reset the screen */
670 gpiod_set_value_cansleep(par->reset, 0);
671 udelay(4);
672 gpiod_set_value_cansleep(par->reset, 1); 670 gpiod_set_value_cansleep(par->reset, 1);
673 udelay(4); 671 udelay(4);
672 gpiod_set_value_cansleep(par->reset, 0);
673 udelay(4);
674 } 674 }
675 675
676 if (par->vbat_reg) { 676 if (par->vbat_reg) {
diff --git a/drivers/video/fbdev/via/viafbdev.c b/drivers/video/fbdev/via/viafbdev.c
index 7bb7e90b8f00..bdf5a0ea876d 100644
--- a/drivers/video/fbdev/via/viafbdev.c
+++ b/drivers/video/fbdev/via/viafbdev.c
@@ -2110,7 +2110,7 @@ MODULE_PARM_DESC(viafb_lcd_panel_id,
2110 2110
2111module_param(viafb_lcd_dsp_method, int, S_IRUSR); 2111module_param(viafb_lcd_dsp_method, int, S_IRUSR);
2112MODULE_PARM_DESC(viafb_lcd_dsp_method, 2112MODULE_PARM_DESC(viafb_lcd_dsp_method,
2113 "Set Flat Panel display scaling method.(Default=Expandsion)"); 2113 "Set Flat Panel display scaling method.(Default=Expansion)");
2114 2114
2115module_param(viafb_SAMM_ON, int, S_IRUSR); 2115module_param(viafb_SAMM_ON, int, S_IRUSR);
2116MODULE_PARM_DESC(viafb_SAMM_ON, 2116MODULE_PARM_DESC(viafb_SAMM_ON,
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index df7d09409efe..8ca333f21292 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -27,6 +27,10 @@
27 27
28#define GUEST_MAPPINGS_TRIES 5 28#define GUEST_MAPPINGS_TRIES 5
29 29
30#define VBG_KERNEL_REQUEST \
31 (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
32 VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
33
30/** 34/**
31 * Reserves memory in which the VMM can relocate any guest mappings 35 * Reserves memory in which the VMM can relocate any guest mappings
32 * that are floating around. 36 * that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
48 int i, rc; 52 int i, rc;
49 53
50 /* Query the required space. */ 54 /* Query the required space. */
51 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO); 55 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
56 VBG_KERNEL_REQUEST);
52 if (!req) 57 if (!req)
53 return; 58 return;
54 59
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
135 * Tell the host that we're going to free the memory we reserved for 140 * Tell the host that we're going to free the memory we reserved for
136 * it, the free it up. (Leak the memory if anything goes wrong here.) 141 * it, the free it up. (Leak the memory if anything goes wrong here.)
137 */ 142 */
138 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO); 143 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
144 VBG_KERNEL_REQUEST);
139 if (!req) 145 if (!req)
140 return; 146 return;
141 147
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
172 struct vmmdev_guest_info2 *req2 = NULL; 178 struct vmmdev_guest_info2 *req2 = NULL;
173 int rc, ret = -ENOMEM; 179 int rc, ret = -ENOMEM;
174 180
175 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO); 181 req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
176 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2); 182 VBG_KERNEL_REQUEST);
183 req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
184 VBG_KERNEL_REQUEST);
177 if (!req1 || !req2) 185 if (!req1 || !req2)
178 goto out_free; 186 goto out_free;
179 187
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
187 req2->additions_minor = VBG_VERSION_MINOR; 195 req2->additions_minor = VBG_VERSION_MINOR;
188 req2->additions_build = VBG_VERSION_BUILD; 196 req2->additions_build = VBG_VERSION_BUILD;
189 req2->additions_revision = VBG_SVN_REV; 197 req2->additions_revision = VBG_SVN_REV;
190 /* (no features defined yet) */ 198 req2->additions_features =
191 req2->additions_features = 0; 199 VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
192 strlcpy(req2->name, VBG_VERSION_STRING, 200 strlcpy(req2->name, VBG_VERSION_STRING,
193 sizeof(req2->name)); 201 sizeof(req2->name));
194 202
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
230 struct vmmdev_guest_status *req; 238 struct vmmdev_guest_status *req;
231 int rc; 239 int rc;
232 240
233 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS); 241 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
242 VBG_KERNEL_REQUEST);
234 if (!req) 243 if (!req)
235 return -ENOMEM; 244 return -ENOMEM;
236 245
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
423 struct vmmdev_heartbeat *req; 432 struct vmmdev_heartbeat *req;
424 int rc; 433 int rc;
425 434
426 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE); 435 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
436 VBG_KERNEL_REQUEST);
427 if (!req) 437 if (!req)
428 return -ENOMEM; 438 return -ENOMEM;
429 439
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
457 467
458 gdev->guest_heartbeat_req = vbg_req_alloc( 468 gdev->guest_heartbeat_req = vbg_req_alloc(
459 sizeof(*gdev->guest_heartbeat_req), 469 sizeof(*gdev->guest_heartbeat_req),
460 VMMDEVREQ_GUEST_HEARTBEAT); 470 VMMDEVREQ_GUEST_HEARTBEAT,
471 VBG_KERNEL_REQUEST);
461 if (!gdev->guest_heartbeat_req) 472 if (!gdev->guest_heartbeat_req)
462 return -ENOMEM; 473 return -ENOMEM;
463 474
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
528 struct vmmdev_mask *req; 539 struct vmmdev_mask *req;
529 int rc; 540 int rc;
530 541
531 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 542 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
543 VBG_KERNEL_REQUEST);
532 if (!req) 544 if (!req)
533 return -ENOMEM; 545 return -ENOMEM;
534 546
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
567 u32 changed, previous; 579 u32 changed, previous;
568 int rc, ret = 0; 580 int rc, ret = 0;
569 581
570 /* Allocate a request buffer before taking the spinlock */ 582 /*
571 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 583 * Allocate a request buffer before taking the spinlock, when
584 * the session is being terminated the requestor is the kernel,
585 * as we're cleaning up.
586 */
587 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
588 session_termination ? VBG_KERNEL_REQUEST :
589 session->requestor);
572 if (!req) { 590 if (!req) {
573 if (!session_termination) 591 if (!session_termination)
574 return -ENOMEM; 592 return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
627 struct vmmdev_mask *req; 645 struct vmmdev_mask *req;
628 int rc; 646 int rc;
629 647
630 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 648 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
649 VBG_KERNEL_REQUEST);
631 if (!req) 650 if (!req)
632 return -ENOMEM; 651 return -ENOMEM;
633 652
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
662 u32 changed, previous; 681 u32 changed, previous;
663 int rc, ret = 0; 682 int rc, ret = 0;
664 683
665 /* Allocate a request buffer before taking the spinlock */ 684 /*
666 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 685 * Allocate a request buffer before taking the spinlock, when
686 * the session is being terminated the requestor is the kernel,
687 * as we're cleaning up.
688 */
689 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
690 session_termination ? VBG_KERNEL_REQUEST :
691 session->requestor);
667 if (!req) { 692 if (!req) {
668 if (!session_termination) 693 if (!session_termination)
669 return -ENOMEM; 694 return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
722 struct vmmdev_host_version *req; 747 struct vmmdev_host_version *req;
723 int rc, ret; 748 int rc, ret;
724 749
725 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION); 750 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
751 VBG_KERNEL_REQUEST);
726 if (!req) 752 if (!req)
727 return -ENOMEM; 753 return -ENOMEM;
728 754
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
783 809
784 gdev->mem_balloon.get_req = 810 gdev->mem_balloon.get_req =
785 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), 811 vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
786 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ); 812 VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
813 VBG_KERNEL_REQUEST);
787 gdev->mem_balloon.change_req = 814 gdev->mem_balloon.change_req =
788 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), 815 vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
789 VMMDEVREQ_CHANGE_MEMBALLOON); 816 VMMDEVREQ_CHANGE_MEMBALLOON,
817 VBG_KERNEL_REQUEST);
790 gdev->cancel_req = 818 gdev->cancel_req =
791 vbg_req_alloc(sizeof(*(gdev->cancel_req)), 819 vbg_req_alloc(sizeof(*(gdev->cancel_req)),
792 VMMDEVREQ_HGCM_CANCEL2); 820 VMMDEVREQ_HGCM_CANCEL2,
821 VBG_KERNEL_REQUEST);
793 gdev->ack_events_req = 822 gdev->ack_events_req =
794 vbg_req_alloc(sizeof(*gdev->ack_events_req), 823 vbg_req_alloc(sizeof(*gdev->ack_events_req),
795 VMMDEVREQ_ACKNOWLEDGE_EVENTS); 824 VMMDEVREQ_ACKNOWLEDGE_EVENTS,
825 VBG_KERNEL_REQUEST);
796 gdev->mouse_status_req = 826 gdev->mouse_status_req =
797 vbg_req_alloc(sizeof(*gdev->mouse_status_req), 827 vbg_req_alloc(sizeof(*gdev->mouse_status_req),
798 VMMDEVREQ_GET_MOUSE_STATUS); 828 VMMDEVREQ_GET_MOUSE_STATUS,
829 VBG_KERNEL_REQUEST);
799 830
800 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || 831 if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
801 !gdev->cancel_req || !gdev->ack_events_req || 832 !gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
892 * vboxguest_linux.c calls this when userspace opens the char-device. 923 * vboxguest_linux.c calls this when userspace opens the char-device.
893 * Return: A pointer to the new session or an ERR_PTR on error. 924 * Return: A pointer to the new session or an ERR_PTR on error.
894 * @gdev: The Guest extension device. 925 * @gdev: The Guest extension device.
895 * @user: Set if this is a session for the vboxuser device. 926 * @requestor: VMMDEV_REQUESTOR_* flags
896 */ 927 */
897struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) 928struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
898{ 929{
899 struct vbg_session *session; 930 struct vbg_session *session;
900 931
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
903 return ERR_PTR(-ENOMEM); 934 return ERR_PTR(-ENOMEM);
904 935
905 session->gdev = gdev; 936 session->gdev = gdev;
906 session->user_session = user; 937 session->requestor = requestor;
907 938
908 return session; 939 return session;
909} 940}
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
924 if (!session->hgcm_client_ids[i]) 955 if (!session->hgcm_client_ids[i])
925 continue; 956 continue;
926 957
927 vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc); 958 /* requestor is kernel here, as we're cleaning up. */
959 vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
960 session->hgcm_client_ids[i], &rc);
928 } 961 }
929 962
930 kfree(session); 963 kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
1152 return -EPERM; 1185 return -EPERM;
1153 } 1186 }
1154 1187
1155 if (trusted_apps_only && session->user_session) { 1188 if (trusted_apps_only &&
1189 (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
1156 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", 1190 vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
1157 req->request_type); 1191 req->request_type);
1158 return -EPERM; 1192 return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
1209 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1243 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1210 return -EMFILE; 1244 return -EMFILE;
1211 1245
1212 ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id, 1246 ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
1213 &conn->hdr.rc); 1247 &client_id, &conn->hdr.rc);
1214 1248
1215 mutex_lock(&gdev->session_mutex); 1249 mutex_lock(&gdev->session_mutex);
1216 if (ret == 0 && conn->hdr.rc >= 0) { 1250 if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
1251 if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1285 if (i >= ARRAY_SIZE(session->hgcm_client_ids))
1252 return -EINVAL; 1286 return -EINVAL;
1253 1287
1254 ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc); 1288 ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
1289 &disconn->hdr.rc);
1255 1290
1256 mutex_lock(&gdev->session_mutex); 1291 mutex_lock(&gdev->session_mutex);
1257 if (ret == 0 && disconn->hdr.rc >= 0) 1292 if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
1313 } 1348 }
1314 1349
1315 if (IS_ENABLED(CONFIG_COMPAT) && f32bit) 1350 if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
1316 ret = vbg_hgcm_call32(gdev, client_id, 1351 ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
1317 call->function, call->timeout_ms, 1352 call->function, call->timeout_ms,
1318 VBG_IOCTL_HGCM_CALL_PARMS32(call), 1353 VBG_IOCTL_HGCM_CALL_PARMS32(call),
1319 call->parm_count, &call->hdr.rc); 1354 call->parm_count, &call->hdr.rc);
1320 else 1355 else
1321 ret = vbg_hgcm_call(gdev, client_id, 1356 ret = vbg_hgcm_call(gdev, session->requestor, client_id,
1322 call->function, call->timeout_ms, 1357 call->function, call->timeout_ms,
1323 VBG_IOCTL_HGCM_CALL_PARMS(call), 1358 VBG_IOCTL_HGCM_CALL_PARMS(call),
1324 call->parm_count, &call->hdr.rc); 1359 call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
1408} 1443}
1409 1444
1410static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, 1445static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1446 struct vbg_session *session,
1411 struct vbg_ioctl_write_coredump *dump) 1447 struct vbg_ioctl_write_coredump *dump)
1412{ 1448{
1413 struct vmmdev_write_core_dump *req; 1449 struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
1415 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) 1451 if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
1416 return -EINVAL; 1452 return -EINVAL;
1417 1453
1418 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP); 1454 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
1455 session->requestor);
1419 if (!req) 1456 if (!req)
1420 return -ENOMEM; 1457 return -ENOMEM;
1421 1458
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1476 case VBG_IOCTL_CHECK_BALLOON: 1513 case VBG_IOCTL_CHECK_BALLOON:
1477 return vbg_ioctl_check_balloon(gdev, data); 1514 return vbg_ioctl_check_balloon(gdev, data);
1478 case VBG_IOCTL_WRITE_CORE_DUMP: 1515 case VBG_IOCTL_WRITE_CORE_DUMP:
1479 return vbg_ioctl_write_core_dump(gdev, data); 1516 return vbg_ioctl_write_core_dump(gdev, session, data);
1480 } 1517 }
1481 1518
1482 /* Variable sized requests. */ 1519 /* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
1508 struct vmmdev_mouse_status *req; 1545 struct vmmdev_mouse_status *req;
1509 int rc; 1546 int rc;
1510 1547
1511 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS); 1548 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
1549 VBG_KERNEL_REQUEST);
1512 if (!req) 1550 if (!req)
1513 return -ENOMEM; 1551 return -ENOMEM;
1514 1552
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 7ad9ec45bfa9..4188c12b839f 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -154,15 +154,15 @@ struct vbg_session {
154 * host. Protected by vbg_gdev.session_mutex. 154 * host. Protected by vbg_gdev.session_mutex.
155 */ 155 */
156 u32 guest_caps; 156 u32 guest_caps;
157 /** Does this session belong to a root process or a user one? */ 157 /** VMMDEV_REQUESTOR_* flags */
158 bool user_session; 158 u32 requestor;
159 /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ 159 /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
160 bool cancel_waiters; 160 bool cancel_waiters;
161}; 161};
162 162
163int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events); 163int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
164void vbg_core_exit(struct vbg_dev *gdev); 164void vbg_core_exit(struct vbg_dev *gdev);
165struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user); 165struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
166void vbg_core_close_session(struct vbg_session *session); 166void vbg_core_close_session(struct vbg_session *session);
167int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data); 167int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
168int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features); 168int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
172void vbg_linux_mouse_event(struct vbg_dev *gdev); 172void vbg_linux_mouse_event(struct vbg_dev *gdev);
173 173
174/* Private (non exported) functions form vboxguest_utils.c */ 174/* Private (non exported) functions form vboxguest_utils.c */
175void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); 175void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
176 u32 requestor);
176void vbg_req_free(void *req, size_t len); 177void vbg_req_free(void *req, size_t len);
177int vbg_req_perform(struct vbg_dev *gdev, void *req); 178int vbg_req_perform(struct vbg_dev *gdev, void *req);
178int vbg_hgcm_call32( 179int vbg_hgcm_call32(
179 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, 180 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
180 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, 181 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
181 int *vbox_status); 182 u32 parm_count, int *vbox_status);
182 183
183#endif 184#endif
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 6e2a9619192d..6e8c0f1c1056 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -5,6 +5,7 @@
5 * Copyright (C) 2006-2016 Oracle Corporation 5 * Copyright (C) 2006-2016 Oracle Corporation
6 */ 6 */
7 7
8#include <linux/cred.h>
8#include <linux/input.h> 9#include <linux/input.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/miscdevice.h> 11#include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
28/** Global vbg_gdev pointer used by vbg_get/put_gdev. */ 29/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
29static struct vbg_dev *vbg_gdev; 30static struct vbg_dev *vbg_gdev;
30 31
32static u32 vbg_misc_device_requestor(struct inode *inode)
33{
34 u32 requestor = VMMDEV_REQUESTOR_USERMODE |
35 VMMDEV_REQUESTOR_CON_DONT_KNOW |
36 VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
37
38 if (from_kuid(current_user_ns(), current->cred->uid) == 0)
39 requestor |= VMMDEV_REQUESTOR_USR_ROOT;
40 else
41 requestor |= VMMDEV_REQUESTOR_USR_USER;
42
43 if (in_egroup_p(inode->i_gid))
44 requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
45
46 return requestor;
47}
48
31static int vbg_misc_device_open(struct inode *inode, struct file *filp) 49static int vbg_misc_device_open(struct inode *inode, struct file *filp)
32{ 50{
33 struct vbg_session *session; 51 struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
36 /* misc_open sets filp->private_data to our misc device */ 54 /* misc_open sets filp->private_data to our misc device */
37 gdev = container_of(filp->private_data, struct vbg_dev, misc_device); 55 gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
38 56
39 session = vbg_core_open_session(gdev, false); 57 session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
40 if (IS_ERR(session)) 58 if (IS_ERR(session))
41 return PTR_ERR(session); 59 return PTR_ERR(session);
42 60
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
53 gdev = container_of(filp->private_data, struct vbg_dev, 71 gdev = container_of(filp->private_data, struct vbg_dev,
54 misc_device_user); 72 misc_device_user);
55 73
56 session = vbg_core_open_session(gdev, false); 74 session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
75 VMMDEV_REQUESTOR_USER_DEVICE);
57 if (IS_ERR(session)) 76 if (IS_ERR(session))
58 return PTR_ERR(session); 77 return PTR_ERR(session);
59 78
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
115 req == VBG_IOCTL_VMMDEV_REQUEST_BIG; 134 req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
116 135
117 if (is_vmmdev_req) 136 if (is_vmmdev_req)
118 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT); 137 buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
138 session->requestor);
119 else 139 else
120 buf = kmalloc(size, GFP_KERNEL); 140 buf = kmalloc(size, GFP_KERNEL);
121 if (!buf) 141 if (!buf)
diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
index bf4474214b4d..75fd140b02ff 100644
--- a/drivers/virt/vboxguest/vboxguest_utils.c
+++ b/drivers/virt/vboxguest/vboxguest_utils.c
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
62VBG_LOG(vbg_debug, pr_debug); 62VBG_LOG(vbg_debug, pr_debug);
63#endif 63#endif
64 64
65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type) 65void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
66 u32 requestor)
66{ 67{
67 struct vmmdev_request_header *req; 68 struct vmmdev_request_header *req;
68 int order = get_order(PAGE_ALIGN(len)); 69 int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
78 req->request_type = req_type; 79 req->request_type = req_type;
79 req->rc = VERR_GENERAL_FAILURE; 80 req->rc = VERR_GENERAL_FAILURE;
80 req->reserved1 = 0; 81 req->reserved1 = 0;
81 req->reserved2 = 0; 82 req->requestor = requestor;
82 83
83 return req; 84 return req;
84} 85}
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
119 return done; 120 return done;
120} 121}
121 122
122int vbg_hgcm_connect(struct vbg_dev *gdev, 123int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
123 struct vmmdev_hgcm_service_location *loc, 124 struct vmmdev_hgcm_service_location *loc,
124 u32 *client_id, int *vbox_status) 125 u32 *client_id, int *vbox_status)
125{ 126{
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
127 int rc; 128 int rc;
128 129
129 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect), 130 hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
130 VMMDEVREQ_HGCM_CONNECT); 131 VMMDEVREQ_HGCM_CONNECT, requestor);
131 if (!hgcm_connect) 132 if (!hgcm_connect)
132 return -ENOMEM; 133 return -ENOMEM;
133 134
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
153} 154}
154EXPORT_SYMBOL(vbg_hgcm_connect); 155EXPORT_SYMBOL(vbg_hgcm_connect);
155 156
156int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status) 157int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
158 u32 client_id, int *vbox_status)
157{ 159{
158 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL; 160 struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
159 int rc; 161 int rc;
160 162
161 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect), 163 hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
162 VMMDEVREQ_HGCM_DISCONNECT); 164 VMMDEVREQ_HGCM_DISCONNECT,
165 requestor);
163 if (!hgcm_disconnect) 166 if (!hgcm_disconnect)
164 return -ENOMEM; 167 return -ENOMEM;
165 168
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
593 return 0; 596 return 0;
594} 597}
595 598
596int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, 599int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
597 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, 600 u32 function, u32 timeout_ms,
598 u32 parm_count, int *vbox_status) 601 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
602 int *vbox_status)
599{ 603{
600 struct vmmdev_hgcm_call *call; 604 struct vmmdev_hgcm_call *call;
601 void **bounce_bufs = NULL; 605 void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
615 goto free_bounce_bufs; 619 goto free_bounce_bufs;
616 } 620 }
617 621
618 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL); 622 call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
619 if (!call) { 623 if (!call) {
620 ret = -ENOMEM; 624 ret = -ENOMEM;
621 goto free_bounce_bufs; 625 goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
647 651
648#ifdef CONFIG_COMPAT 652#ifdef CONFIG_COMPAT
649int vbg_hgcm_call32( 653int vbg_hgcm_call32(
650 struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, 654 struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
651 struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, 655 u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
652 int *vbox_status) 656 u32 parm_count, int *vbox_status)
653{ 657{
654 struct vmmdev_hgcm_function_parameter *parm64 = NULL; 658 struct vmmdev_hgcm_function_parameter *parm64 = NULL;
655 u32 i, size; 659 u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
689 goto out_free; 693 goto out_free;
690 } 694 }
691 695
692 ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms, 696 ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
693 parm64, parm_count, vbox_status); 697 parm64, parm_count, vbox_status);
694 if (ret < 0) 698 if (ret < 0)
695 goto out_free; 699 goto out_free;
diff --git a/drivers/virt/vboxguest/vboxguest_version.h b/drivers/virt/vboxguest/vboxguest_version.h
index 77f0c8f8a231..84834dad38d5 100644
--- a/drivers/virt/vboxguest/vboxguest_version.h
+++ b/drivers/virt/vboxguest/vboxguest_version.h
@@ -9,11 +9,10 @@
9#ifndef __VBOX_VERSION_H__ 9#ifndef __VBOX_VERSION_H__
10#define __VBOX_VERSION_H__ 10#define __VBOX_VERSION_H__
11 11
12/* Last synced October 4th 2017 */ 12#define VBG_VERSION_MAJOR 6
13#define VBG_VERSION_MAJOR 5 13#define VBG_VERSION_MINOR 0
14#define VBG_VERSION_MINOR 2
15#define VBG_VERSION_BUILD 0 14#define VBG_VERSION_BUILD 0
16#define VBG_SVN_REV 68940 15#define VBG_SVN_REV 127566
17#define VBG_VERSION_STRING "5.2.0" 16#define VBG_VERSION_STRING "6.0.0"
18 17
19#endif 18#endif
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
index 5e2ae978935d..6337b8d75d96 100644
--- a/drivers/virt/vboxguest/vmmdev.h
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
98 s32 rc; 98 s32 rc;
99 /** Reserved field no.1. MBZ. */ 99 /** Reserved field no.1. MBZ. */
100 u32 reserved1; 100 u32 reserved1;
101 /** Reserved field no.2. MBZ. */ 101 /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
102 u32 reserved2; 102 u32 requestor;
103}; 103};
104VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24); 104VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
105 105
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
247}; 247};
248VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8); 248VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
249 249
250#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO BIT(0)
251
250/** struct vmmdev_guestinfo2 - Guest information report, version 2. */ 252/** struct vmmdev_guestinfo2 - Guest information report, version 2. */
251struct vmmdev_guest_info2 { 253struct vmmdev_guest_info2 {
252 /** Header. */ 254 /** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
259 u32 additions_build; 261 u32 additions_build;
260 /** SVN revision. */ 262 /** SVN revision. */
261 u32 additions_revision; 263 u32 additions_revision;
262 /** Feature mask, currently unused. */ 264 /** Feature mask. */
263 u32 additions_features; 265 u32 additions_features;
264 /** 266 /**
265 * The intentional meaning of this field was: 267 * The intentional meaning of this field was:
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index c3e201025ef0..0782ff3c2273 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -622,9 +622,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
622 if (xen_store_evtchn == 0) 622 if (xen_store_evtchn == 0)
623 return -ENOENT; 623 return -ENOENT;
624 624
625 nonseekable_open(inode, filp); 625 stream_open(inode, filp);
626
627 filp->f_mode &= ~FMODE_ATOMIC_POS; /* cdev-style semantics */
628 626
629 u = kzalloc(sizeof(*u), GFP_KERNEL); 627 u = kzalloc(sizeof(*u), GFP_KERNEL);
630 if (u == NULL) 628 if (u == NULL)