aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/hwsleep.c15
-rw-r--r--drivers/acpi/acpica/psloop.c26
-rw-r--r--drivers/acpi/acpica/uterror.c6
-rw-r--r--drivers/acpi/battery.c9
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/nfit/core.c48
-rw-r--r--drivers/acpi/nfit/nfit.h1
-rw-r--r--drivers/acpi/pptt.c10
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c60
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-eh.c41
-rw-r--r--drivers/ata/libata-scsi.c18
-rw-r--r--drivers/ata/sata_fsl.c9
-rw-r--r--drivers/ata/sata_nv.c3
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/dd.c8
-rw-r--r--drivers/base/power/domain.c16
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/loop.c1
-rw-r--r--drivers/block/nbd.c96
-rw-r--r--drivers/bus/ti-sysc.c8
-rw-r--r--drivers/char/agp/alpha-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c6
-rw-r--r--drivers/char/ipmi/kcs_bmc.c31
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/random.c10
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-aspeed.c59
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c2
-rw-r--r--drivers/clk/davinci/psc.h2
-rw-r--r--drivers/clk/meson/clk-audio-divider.c2
-rw-r--r--drivers/clk/meson/gxbb.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c38
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c1
-rw-r--r--drivers/clk/sunxi-ng/Makefile39
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c17
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c1
-rw-r--r--drivers/dax/device.c12
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ti/omap-dma.c6
-rw-r--r--drivers/fpga/altera-cvp.c6
-rw-r--r--drivers/gpio/gpio-uniphier.c6
-rw-r--r--drivers/gpio/gpiolib-of.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c3
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c2
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c12
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h1
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c30
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c86
-rw-r--r--drivers/gpu/drm/drm_lease.c16
-rw-r--r--drivers/gpu/drm/drm_property.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c58
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h29
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/sun4i/Makefile5
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c5
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c11
-rw-r--r--drivers/gpu/host1x/dev.c3
-rw-r--r--drivers/gpu/host1x/job.c3
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-debug.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c3
-rw-r--r--drivers/i2c/busses/i2c-davinci.c8
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-rcar.c54
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c17
-rw-r--r--drivers/i2c/i2c-core-base.c11
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c28
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h4
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c18
-rw-r--r--drivers/input/mouse/elan_i2c_core.c2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/intel-iommu.c94
-rw-r--r--drivers/md/dm-writecache.c43
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/media/rc/bpf-lirc.c14
-rw-r--r--drivers/misc/cxl/api.c8
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c27
-rw-r--r--drivers/misc/mei/interrupt.c5
-rw-r--r--drivers/misc/vmw_balloon.c4
-rw-r--r--drivers/mmc/core/slot-gpio.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c15
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c21
-rw-r--r--drivers/mmc/host/sunxi-mmc.c7
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c6
-rw-r--r--drivers/net/bonding/bond_options.c23
-rw-r--r--drivers/net/can/m_can/m_can.c18
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c5
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c19
-rw-r--r--drivers/net/can/xilinx_can.c392
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c21
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/cadence/macb.h11
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c38
-rw-r--r--drivers/net/ethernet/cavium/Kconfig12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c5
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c35
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c15
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c8
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c48
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c21
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c93
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c94
-rw-r--r--drivers/net/ethernet/sfc/ef10.c30
-rw-r--r--drivers/net/ethernet/sfc/efx.c17
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c54
-rw-r--r--drivers/net/hyperv/netvsc_drv.c17
-rw-r--r--drivers/net/hyperv/rndis_filter.c62
-rw-r--r--drivers/net/ieee802154/adf7242.c34
-rw-r--r--drivers/net/ieee802154/at86rf230.c15
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c36
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/marvell.c54
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c7
-rw-r--r--drivers/net/phy/sfp-bus.c35
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/lan78xx.c42
-rw-r--r--drivers/net/usb/qmi_wwan.c3
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/smsc75xx.c62
-rw-r--r--drivers/net/virtio_net.c30
-rw-r--r--drivers/net/vxlan.c130
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/nvdimm/claim.c1
-rw-r--r--drivers/nvme/host/core.c63
-rw-r--r--drivers/nvme/host/fabrics.c10
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/pci.c12
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/target/configfs.c9
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/fc.c44
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvmem/core.c4
-rw-r--r--drivers/of/base.c6
-rw-r--r--drivers/of/of_private.h2
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/pci/controller/dwc/Kconfig1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c3
-rw-r--r--drivers/pci/controller/pci-aardvark.c2
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-hyperv.c8
-rw-r--r--drivers/pci/controller/pci-v3-semi.c2
-rw-r--r--drivers/pci/controller/pci-versatile.c2
-rw-r--r--drivers/pci/controller/pci-xgene.c2
-rw-r--r--drivers/pci/controller/pcie-mediatek.c2
-rw-r--r--drivers/pci/controller/pcie-rcar.c16
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c2
-rw-r--r--drivers/pci/controller/pcie-xilinx.c1
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c62
-rw-r--r--drivers/pci/iov.c16
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/pci-acpi.c12
-rw-r--r--drivers/pci/pci-driver.c1
-rw-r--r--drivers/pci/pci.c38
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/err.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c4
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c4
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c6
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c48
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77970.c14
-rw-r--r--drivers/platform/x86/dell-laptop.c2
-rw-r--r--drivers/ptp/ptp_chardev.c1
-rw-r--r--drivers/rtc/interface.c8
-rw-r--r--drivers/rtc/rtc-mrst.c4
-rw-r--r--drivers/s390/block/dasd.c13
-rw-r--r--drivers/s390/block/dasd_int.h8
-rw-r--r--drivers/s390/net/qeth_core.h13
-rw-r--r--drivers/s390/net/qeth_core_main.c47
-rw-r--r--drivers/s390/net/qeth_l2_main.c24
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/scsi/aacraid/aachba.c15
-rw-r--r--drivers/scsi/cxlflash/main.h4
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c5
-rw-r--r--drivers/scsi/hpsa.c25
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/qedf/qedf_main.c12
-rw-r--r--drivers/scsi/qedi/qedi_main.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/scsi_error.c14
-rw-r--r--drivers/scsi/sd_zbc.c5
-rw-r--r--drivers/scsi/sg.c42
-rw-r--r--drivers/soc/imx/gpc.c21
-rw-r--r--drivers/staging/ks7010/ks_hostif.c12
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c3
-rw-r--r--drivers/staging/rtl8188eu/Kconfig1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c161
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c92
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c2
-rw-r--r--drivers/staging/rtlwifi/wifi.h1
-rw-r--r--drivers/staging/speakup/speakup_soft.c6
-rw-r--r--drivers/target/target_core_pr.c15
-rw-r--r--drivers/thunderbolt/domain.c4
-rw-r--r--drivers/uio/uio.c139
-rw-r--r--drivers/usb/chipidea/Kconfig9
-rw-r--r--drivers/usb/chipidea/Makefile3
-rw-r--r--drivers/usb/chipidea/ci.h8
-rw-r--r--drivers/usb/chipidea/ulpi.c3
-rw-r--r--drivers/usb/class/cdc-acm.c3
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/gadget.c6
-rw-r--r--drivers/usb/dwc2/hcd.c54
-rw-r--r--drivers/usb/dwc2/hcd_intr.c9
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/gadget/composite.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_uac2.c24
-rw-r--r--drivers/usb/gadget/function/u_audio.c88
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c11
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c14
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h33
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/host/xhci-dbgcap.c12
-rw-r--r--drivers/usb/host/xhci-mem.c2
-rw-r--r--drivers/usb/host/xhci.c1
-rw-r--r--drivers/usb/misc/yurex.c23
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c4
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/usb/typec/tcpm.c7
-rw-r--r--drivers/vfio/pci/Kconfig12
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c10
-rw-r--r--drivers/vfio/vfio_iommu_type1.c16
384 files changed, 3796 insertions, 1818 deletions
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index fc0c2e2328cd..fe9d46d81750 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
51 return_ACPI_STATUS(status); 51 return_ACPI_STATUS(status);
52 } 52 }
53 53
54 /* 54 /* Disable all GPEs */
55 * 1) Disable all GPEs
56 * 2) Enable all wakeup GPEs
57 */
58 status = acpi_hw_disable_all_gpes(); 55 status = acpi_hw_disable_all_gpes();
59 if (ACPI_FAILURE(status)) { 56 if (ACPI_FAILURE(status)) {
60 return_ACPI_STATUS(status); 57 return_ACPI_STATUS(status);
61 } 58 }
59 /*
60 * If the target sleep state is S5, clear all GPEs and fixed events too
61 */
62 if (sleep_state == ACPI_STATE_S5) {
63 status = acpi_hw_clear_acpi_status();
64 if (ACPI_FAILURE(status)) {
65 return_ACPI_STATUS(status);
66 }
67 }
62 acpi_gbl_system_awake_and_running = FALSE; 68 acpi_gbl_system_awake_and_running = FALSE;
63 69
70 /* Enable all wakeup GPEs */
64 status = acpi_hw_enable_all_wakeup_gpes(); 71 status = acpi_hw_enable_all_wakeup_gpes();
65 if (ACPI_FAILURE(status)) { 72 if (ACPI_FAILURE(status)) {
66 return_ACPI_STATUS(status); 73 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index bc5f05906bd1..ee840be150b5 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
497 status = 497 status =
498 acpi_ps_create_op(walk_state, aml_op_start, &op); 498 acpi_ps_create_op(walk_state, aml_op_start, &op);
499 if (ACPI_FAILURE(status)) { 499 if (ACPI_FAILURE(status)) {
500 /*
501 * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
502 * executing it as a control method. However, if we encounter
503 * an error while loading the table, we need to keep trying to
504 * load the table rather than aborting the table load. Set the
505 * status to AE_OK to proceed with the table load.
506 */
507 if ((walk_state->
508 parse_flags & ACPI_PARSE_MODULE_LEVEL)
509 && status == AE_ALREADY_EXISTS) {
510 status = AE_OK;
511 }
500 if (status == AE_CTRL_PARSE_CONTINUE) { 512 if (status == AE_CTRL_PARSE_CONTINUE) {
501 continue; 513 continue;
502 } 514 }
@@ -694,6 +706,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
694 acpi_ps_next_parse_state(walk_state, op, status); 706 acpi_ps_next_parse_state(walk_state, op, status);
695 if (status == AE_CTRL_PENDING) { 707 if (status == AE_CTRL_PENDING) {
696 status = AE_OK; 708 status = AE_OK;
709 } else
710 if ((walk_state->
711 parse_flags & ACPI_PARSE_MODULE_LEVEL)
712 && ACPI_FAILURE(status)) {
713 /*
714 * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
715 * executing it as a control method. However, if we encounter
716 * an error while loading the table, we need to keep trying to
717 * load the table rather than aborting the table load. Set the
718 * status to AE_OK to proceed with the table load. If we get a
719 * failure at this point, it means that the dispatcher got an
720 * error while processing Op (most likely an AML operand error.
721 */
722 status = AE_OK;
697 } 723 }
698 } 724 }
699 725
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 5a64ddaed8a3..e47430272692 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
182 switch (lookup_status) { 182 switch (lookup_status) {
183 case AE_ALREADY_EXISTS: 183 case AE_ALREADY_EXISTS:
184 184
185 acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); 185 acpi_os_printf(ACPI_MSG_BIOS_ERROR);
186 message = "Failure creating"; 186 message = "Failure creating";
187 break; 187 break;
188 188
189 case AE_NOT_FOUND: 189 case AE_NOT_FOUND:
190 190
191 acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR); 191 acpi_os_printf(ACPI_MSG_BIOS_ERROR);
192 message = "Could not resolve"; 192 message = "Could not resolve";
193 break; 193 break;
194 194
195 default: 195 default:
196 196
197 acpi_os_printf("\n" ACPI_MSG_ERROR); 197 acpi_os_printf(ACPI_MSG_ERROR);
198 message = "Failure resolving"; 198 message = "Failure resolving";
199 break; 199 break;
200 } 200 }
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b0113a5802a3..d79ad844c78f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook)
717 */ 717 */
718 pr_err("extension failed to load: %s", hook->name); 718 pr_err("extension failed to load: %s", hook->name);
719 __battery_hook_unregister(hook, 0); 719 __battery_hook_unregister(hook, 0);
720 return; 720 goto end;
721 } 721 }
722 } 722 }
723 pr_info("new extension: %s\n", hook->name); 723 pr_info("new extension: %s\n", hook->name);
724end:
724 mutex_unlock(&hook_mutex); 725 mutex_unlock(&hook_mutex);
725} 726}
726EXPORT_SYMBOL_GPL(battery_hook_register); 727EXPORT_SYMBOL_GPL(battery_hook_register);
@@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register);
732*/ 733*/
733static void battery_hook_add_battery(struct acpi_battery *battery) 734static void battery_hook_add_battery(struct acpi_battery *battery)
734{ 735{
735 struct acpi_battery_hook *hook_node; 736 struct acpi_battery_hook *hook_node, *tmp;
736 737
737 mutex_lock(&hook_mutex); 738 mutex_lock(&hook_mutex);
738 INIT_LIST_HEAD(&battery->list); 739 INIT_LIST_HEAD(&battery->list);
@@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
744 * when a battery gets hotplugged or initialized 745 * when a battery gets hotplugged or initialized
745 * during the battery module initialization. 746 * during the battery module initialization.
746 */ 747 */
747 list_for_each_entry(hook_node, &battery_hook_list, list) { 748 list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
748 if (hook_node->add_battery(battery->bat)) { 749 if (hook_node->add_battery(battery->bat)) {
749 /* 750 /*
750 * The notification of the extensions has failed, to 751 * The notification of the extensions has failed, to
751 * prevent further errors we will unload the extension. 752 * prevent further errors we will unload the extension.
752 */ 753 */
753 __battery_hook_unregister(hook_node, 0);
754 pr_err("error in extension, unloading: %s", 754 pr_err("error in extension, unloading: %s",
755 hook_node->name); 755 hook_node->name);
756 __battery_hook_unregister(hook_node, 0);
756 } 757 }
757 } 758 }
758 mutex_unlock(&hook_mutex); 759 mutex_unlock(&hook_mutex);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 442a9e24f439..917f77f4cb55 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2042,7 +2042,7 @@ static const struct dmi_system_id acpi_ec_no_wakeup[] = {
2042 .ident = "Thinkpad X1 Carbon 6th", 2042 .ident = "Thinkpad X1 Carbon 6th",
2043 .matches = { 2043 .matches = {
2044 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 2044 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2045 DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"), 2045 DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
2046 }, 2046 },
2047 }, 2047 },
2048 { }, 2048 { },
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index d15814e1727f..7c479002e798 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
408 const guid_t *guid; 408 const guid_t *guid;
409 int rc, i; 409 int rc, i;
410 410
411 if (cmd_rc)
412 *cmd_rc = -EINVAL;
411 func = cmd; 413 func = cmd;
412 if (cmd == ND_CMD_CALL) { 414 if (cmd == ND_CMD_CALL) {
413 call_pkg = buf; 415 call_pkg = buf;
@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
518 * If we return an error (like elsewhere) then caller wouldn't 520 * If we return an error (like elsewhere) then caller wouldn't
519 * be able to rely upon data returned to make calculation. 521 * be able to rely upon data returned to make calculation.
520 */ 522 */
523 if (cmd_rc)
524 *cmd_rc = 0;
521 return 0; 525 return 0;
522 } 526 }
523 527
@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
1273 1277
1274 mutex_lock(&acpi_desc->init_mutex); 1278 mutex_lock(&acpi_desc->init_mutex);
1275 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count, 1279 rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
1276 work_busy(&acpi_desc->dwork.work) 1280 acpi_desc->scrub_busy
1277 && !acpi_desc->cancel ? "+\n" : "\n"); 1281 && !acpi_desc->cancel ? "+\n" : "\n");
1278 mutex_unlock(&acpi_desc->init_mutex); 1282 mutex_unlock(&acpi_desc->init_mutex);
1279 } 1283 }
@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
2939 return 0; 2943 return 0;
2940} 2944}
2941 2945
2946static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
2947{
2948 lockdep_assert_held(&acpi_desc->init_mutex);
2949
2950 acpi_desc->scrub_busy = 1;
2951 /* note this should only be set from within the workqueue */
2952 if (tmo)
2953 acpi_desc->scrub_tmo = tmo;
2954 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
2955}
2956
2957static void sched_ars(struct acpi_nfit_desc *acpi_desc)
2958{
2959 __sched_ars(acpi_desc, 0);
2960}
2961
2962static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
2963{
2964 lockdep_assert_held(&acpi_desc->init_mutex);
2965
2966 acpi_desc->scrub_busy = 0;
2967 acpi_desc->scrub_count++;
2968 if (acpi_desc->scrub_count_state)
2969 sysfs_notify_dirent(acpi_desc->scrub_count_state);
2970}
2971
2942static void acpi_nfit_scrub(struct work_struct *work) 2972static void acpi_nfit_scrub(struct work_struct *work)
2943{ 2973{
2944 struct acpi_nfit_desc *acpi_desc; 2974 struct acpi_nfit_desc *acpi_desc;
@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
2949 mutex_lock(&acpi_desc->init_mutex); 2979 mutex_lock(&acpi_desc->init_mutex);
2950 query_rc = acpi_nfit_query_poison(acpi_desc); 2980 query_rc = acpi_nfit_query_poison(acpi_desc);
2951 tmo = __acpi_nfit_scrub(acpi_desc, query_rc); 2981 tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
2952 if (tmo) { 2982 if (tmo)
2953 queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ); 2983 __sched_ars(acpi_desc, tmo);
2954 acpi_desc->scrub_tmo = tmo; 2984 else
2955 } else { 2985 notify_ars_done(acpi_desc);
2956 acpi_desc->scrub_count++;
2957 if (acpi_desc->scrub_count_state)
2958 sysfs_notify_dirent(acpi_desc->scrub_count_state);
2959 }
2960 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars); 2986 memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
2961 mutex_unlock(&acpi_desc->init_mutex); 2987 mutex_unlock(&acpi_desc->init_mutex);
2962} 2988}
@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
3037 break; 3063 break;
3038 } 3064 }
3039 3065
3040 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); 3066 sched_ars(acpi_desc);
3041 return 0; 3067 return 0;
3042} 3068}
3043 3069
@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
3239 } 3265 }
3240 } 3266 }
3241 if (scheduled) { 3267 if (scheduled) {
3242 queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0); 3268 sched_ars(acpi_desc);
3243 dev_dbg(dev, "ars_scan triggered\n"); 3269 dev_dbg(dev, "ars_scan triggered\n");
3244 } 3270 }
3245 mutex_unlock(&acpi_desc->init_mutex); 3271 mutex_unlock(&acpi_desc->init_mutex);
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 7d15856a739f..a97ff42fe311 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -203,6 +203,7 @@ struct acpi_nfit_desc {
203 unsigned int max_ars; 203 unsigned int max_ars;
204 unsigned int scrub_count; 204 unsigned int scrub_count;
205 unsigned int scrub_mode; 205 unsigned int scrub_mode;
206 unsigned int scrub_busy:1;
206 unsigned int cancel:1; 207 unsigned int cancel:1;
207 unsigned long dimm_cmd_force_en; 208 unsigned long dimm_cmd_force_en;
208 unsigned long bus_cmd_force_en; 209 unsigned long bus_cmd_force_en;
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index e5ea1974d1e3..d1e26cb599bf 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
481 if (cpu_node) { 481 if (cpu_node) {
482 cpu_node = acpi_find_processor_package_id(table, cpu_node, 482 cpu_node = acpi_find_processor_package_id(table, cpu_node,
483 level, flag); 483 level, flag);
484 /* Only the first level has a guaranteed id */ 484 /*
485 if (level == 0) 485 * As per specification if the processor structure represents
486 * an actual processor, then ACPI processor ID must be valid.
487 * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
488 * should be set if the UID is valid
489 */
490 if (level == 0 ||
491 cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
486 return cpu_node->acpi_processor_id; 492 return cpu_node->acpi_processor_id;
487 return ACPI_PTR_DIFF(cpu_node, table); 493 return ACPI_PTR_DIFF(cpu_node, table);
488 } 494 }
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2b16e7c8fff3..39b181d6bd0d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG
398 398
399config SATA_HIGHBANK 399config SATA_HIGHBANK
400 tristate "Calxeda Highbank SATA support" 400 tristate "Calxeda Highbank SATA support"
401 depends on HAS_DMA
402 depends on ARCH_HIGHBANK || COMPILE_TEST 401 depends on ARCH_HIGHBANK || COMPILE_TEST
403 help 402 help
404 This option enables support for the Calxeda Highbank SoC's 403 This option enables support for the Calxeda Highbank SoC's
@@ -408,7 +407,6 @@ config SATA_HIGHBANK
408 407
409config SATA_MV 408config SATA_MV
410 tristate "Marvell SATA support" 409 tristate "Marvell SATA support"
411 depends on HAS_DMA
412 depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \ 410 depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
413 ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST 411 ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
414 select GENERIC_PHY 412 select GENERIC_PHY
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 738fb22978dd..b2b9eba1d214 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
400 { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */ 400 { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
401 { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */ 401 { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
402 { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */ 402 { PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
403 { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
403 404
404 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 405 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
405 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 406 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
1280 return strcmp(buf, dmi->driver_data) < 0; 1281 return strcmp(buf, dmi->driver_data) < 0;
1281} 1282}
1282 1283
1284static bool ahci_broken_lpm(struct pci_dev *pdev)
1285{
1286 static const struct dmi_system_id sysids[] = {
1287 /* Various Lenovo 50 series have LPM issues with older BIOSen */
1288 {
1289 .matches = {
1290 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1291 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
1292 },
1293 .driver_data = "20180406", /* 1.31 */
1294 },
1295 {
1296 .matches = {
1297 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1298 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
1299 },
1300 .driver_data = "20180420", /* 1.28 */
1301 },
1302 {
1303 .matches = {
1304 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1305 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
1306 },
1307 .driver_data = "20180315", /* 1.33 */
1308 },
1309 {
1310 .matches = {
1311 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
1312 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
1313 },
1314 /*
1315 * Note date based on release notes, 2.35 has been
1316 * reported to be good, but I've been unable to get
1317 * a hold of the reporter to get the DMI BIOS date.
1318 * TODO: fix this.
1319 */
1320 .driver_data = "20180310", /* 2.35 */
1321 },
1322 { } /* terminate list */
1323 };
1324 const struct dmi_system_id *dmi = dmi_first_match(sysids);
1325 int year, month, date;
1326 char buf[9];
1327
1328 if (!dmi)
1329 return false;
1330
1331 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
1332 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
1333
1334 return strcmp(buf, dmi->driver_data) < 0;
1335}
1336
1283static bool ahci_broken_online(struct pci_dev *pdev) 1337static bool ahci_broken_online(struct pci_dev *pdev)
1284{ 1338{
1285#define ENCODE_BUSDEVFN(bus, slot, func) \ 1339#define ENCODE_BUSDEVFN(bus, slot, func) \
@@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1694 "quirky BIOS, skipping spindown on poweroff\n"); 1748 "quirky BIOS, skipping spindown on poweroff\n");
1695 } 1749 }
1696 1750
1751 if (ahci_broken_lpm(pdev)) {
1752 pi.flags |= ATA_FLAG_NO_LPM;
1753 dev_warn(&pdev->dev,
1754 "BIOS update required for Link Power Management support\n");
1755 }
1756
1697 if (ahci_broken_suspend(pdev)) { 1757 if (ahci_broken_suspend(pdev)) {
1698 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND; 1758 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
1699 dev_warn(&pdev->dev, 1759 dev_warn(&pdev->dev,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 0045dacd814b..72d90b4c3aae 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
82 * 82 *
83 * Return: 0 on success; Error code otherwise. 83 * Return: 0 on success; Error code otherwise.
84 */ 84 */
85int ahci_mvebu_stop_engine(struct ata_port *ap) 85static int ahci_mvebu_stop_engine(struct ata_port *ap)
86{ 86{
87 void __iomem *port_mmio = ahci_port_base(ap); 87 void __iomem *port_mmio = ahci_port_base(ap);
88 u32 tmp, port_fbs; 88 u32 tmp, port_fbs;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 965842a08743..09620c2ffa0f 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -35,6 +35,7 @@
35#include <linux/kernel.h> 35#include <linux/kernel.h>
36#include <linux/gfp.h> 36#include <linux/gfp.h>
37#include <linux/module.h> 37#include <linux/module.h>
38#include <linux/nospec.h>
38#include <linux/blkdev.h> 39#include <linux/blkdev.h>
39#include <linux/delay.h> 40#include <linux/delay.h>
40#include <linux/interrupt.h> 41#include <linux/interrupt.h>
@@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1146 1147
1147 /* get the slot number from the message */ 1148 /* get the slot number from the message */
1148 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8; 1149 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1149 if (pmp < EM_MAX_SLOTS) 1150 if (pmp < EM_MAX_SLOTS) {
1151 pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
1150 emp = &pp->em_priv[pmp]; 1152 emp = &pp->em_priv[pmp];
1151 else 1153 } else {
1152 return -EINVAL; 1154 return -EINVAL;
1155 }
1153 1156
1154 /* mask off the activity bits if we are in sw_activity 1157 /* mask off the activity bits if we are in sw_activity
1155 * mode, user should turn off sw_activity before setting 1158 * mode, user should turn off sw_activity before setting
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 27d15ed7fa3d..cc71c63df381 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev)
2493 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) 2493 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
2494 dev->horkage |= ATA_HORKAGE_NOLPM; 2494 dev->horkage |= ATA_HORKAGE_NOLPM;
2495 2495
2496 if (ap->flags & ATA_FLAG_NO_LPM)
2497 dev->horkage |= ATA_HORKAGE_NOLPM;
2498
2496 if (dev->horkage & ATA_HORKAGE_NOLPM) { 2499 if (dev->horkage & ATA_HORKAGE_NOLPM) {
2497 ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); 2500 ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
2498 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; 2501 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index d5412145d76d..01306c018398 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
614 list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) { 614 list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
615 struct ata_queued_cmd *qc; 615 struct ata_queued_cmd *qc;
616 616
617 for (i = 0; i < ATA_MAX_QUEUE; i++) { 617 ata_qc_for_each_raw(ap, qc, i) {
618 qc = __ata_qc_from_tag(ap, i);
619 if (qc->flags & ATA_QCFLAG_ACTIVE && 618 if (qc->flags & ATA_QCFLAG_ACTIVE &&
620 qc->scsicmd == scmd) 619 qc->scsicmd == scmd)
621 break; 620 break;
@@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh);
818 817
819static int ata_eh_nr_in_flight(struct ata_port *ap) 818static int ata_eh_nr_in_flight(struct ata_port *ap)
820{ 819{
820 struct ata_queued_cmd *qc;
821 unsigned int tag; 821 unsigned int tag;
822 int nr = 0; 822 int nr = 0;
823 823
824 /* count only non-internal commands */ 824 /* count only non-internal commands */
825 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 825 ata_qc_for_each(ap, qc, tag) {
826 if (ata_tag_internal(tag)) 826 if (qc)
827 continue;
828 if (ata_qc_from_tag(ap, tag))
829 nr++; 827 nr++;
830 } 828 }
831 829
@@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
847 goto out_unlock; 845 goto out_unlock;
848 846
849 if (cnt == ap->fastdrain_cnt) { 847 if (cnt == ap->fastdrain_cnt) {
848 struct ata_queued_cmd *qc;
850 unsigned int tag; 849 unsigned int tag;
851 850
852 /* No progress during the last interval, tag all 851 /* No progress during the last interval, tag all
853 * in-flight qcs as timed out and freeze the port. 852 * in-flight qcs as timed out and freeze the port.
854 */ 853 */
855 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 854 ata_qc_for_each(ap, qc, tag) {
856 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
857 if (qc) 855 if (qc)
858 qc->err_mask |= AC_ERR_TIMEOUT; 856 qc->err_mask |= AC_ERR_TIMEOUT;
859 } 857 }
@@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
999 997
1000static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link) 998static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1001{ 999{
1000 struct ata_queued_cmd *qc;
1002 int tag, nr_aborted = 0; 1001 int tag, nr_aborted = 0;
1003 1002
1004 WARN_ON(!ap->ops->error_handler); 1003 WARN_ON(!ap->ops->error_handler);
@@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
1007 ata_eh_set_pending(ap, 0); 1006 ata_eh_set_pending(ap, 0);
1008 1007
1009 /* include internal tag in iteration */ 1008 /* include internal tag in iteration */
1010 for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) { 1009 ata_qc_for_each_with_internal(ap, qc, tag) {
1011 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1012
1013 if (qc && (!link || qc->dev->link == link)) { 1010 if (qc && (!link || qc->dev->link == link)) {
1014 qc->flags |= ATA_QCFLAG_FAILED; 1011 qc->flags |= ATA_QCFLAG_FAILED;
1015 ata_qc_complete(qc); 1012 ata_qc_complete(qc);
@@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
1712 return; 1709 return;
1713 1710
1714 /* has LLDD analyzed already? */ 1711 /* has LLDD analyzed already? */
1715 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1712 ata_qc_for_each_raw(ap, qc, tag) {
1716 qc = __ata_qc_from_tag(ap, tag);
1717
1718 if (!(qc->flags & ATA_QCFLAG_FAILED)) 1713 if (!(qc->flags & ATA_QCFLAG_FAILED))
1719 continue; 1714 continue;
1720 1715
@@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
2136{ 2131{
2137 struct ata_port *ap = link->ap; 2132 struct ata_port *ap = link->ap;
2138 struct ata_eh_context *ehc = &link->eh_context; 2133 struct ata_eh_context *ehc = &link->eh_context;
2134 struct ata_queued_cmd *qc;
2139 struct ata_device *dev; 2135 struct ata_device *dev;
2140 unsigned int all_err_mask = 0, eflags = 0; 2136 unsigned int all_err_mask = 0, eflags = 0;
2141 int tag, nr_failed = 0, nr_quiet = 0; 2137 int tag, nr_failed = 0, nr_quiet = 0;
@@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
2168 2164
2169 all_err_mask |= ehc->i.err_mask; 2165 all_err_mask |= ehc->i.err_mask;
2170 2166
2171 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2167 ata_qc_for_each_raw(ap, qc, tag) {
2172 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2173
2174 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2168 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2175 ata_dev_phys_link(qc->dev) != link) 2169 ata_dev_phys_link(qc->dev) != link)
2176 continue; 2170 continue;
@@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link)
2436{ 2430{
2437 struct ata_port *ap = link->ap; 2431 struct ata_port *ap = link->ap;
2438 struct ata_eh_context *ehc = &link->eh_context; 2432 struct ata_eh_context *ehc = &link->eh_context;
2433 struct ata_queued_cmd *qc;
2439 const char *frozen, *desc; 2434 const char *frozen, *desc;
2440 char tries_buf[6] = ""; 2435 char tries_buf[6] = "";
2441 int tag, nr_failed = 0; 2436 int tag, nr_failed = 0;
@@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link)
2447 if (ehc->i.desc[0] != '\0') 2442 if (ehc->i.desc[0] != '\0')
2448 desc = ehc->i.desc; 2443 desc = ehc->i.desc;
2449 2444
2450 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2445 ata_qc_for_each_raw(ap, qc, tag) {
2451 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2452
2453 if (!(qc->flags & ATA_QCFLAG_FAILED) || 2446 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2454 ata_dev_phys_link(qc->dev) != link || 2447 ata_dev_phys_link(qc->dev) != link ||
2455 ((qc->flags & ATA_QCFLAG_QUIET) && 2448 ((qc->flags & ATA_QCFLAG_QUIET) &&
@@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link)
2511 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 2504 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2512#endif 2505#endif
2513 2506
2514 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 2507 ata_qc_for_each_raw(ap, qc, tag) {
2515 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2516 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf; 2508 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2517 char data_buf[20] = ""; 2509 char data_buf[20] = "";
2518 char cdb_buf[70] = ""; 2510 char cdb_buf[70] = "";
@@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3992 */ 3984 */
3993void ata_eh_finish(struct ata_port *ap) 3985void ata_eh_finish(struct ata_port *ap)
3994{ 3986{
3987 struct ata_queued_cmd *qc;
3995 int tag; 3988 int tag;
3996 3989
3997 /* retry or finish qcs */ 3990 /* retry or finish qcs */
3998 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 3991 ata_qc_for_each_raw(ap, qc, tag) {
3999 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
4000
4001 if (!(qc->flags & ATA_QCFLAG_FAILED)) 3992 if (!(qc->flags & ATA_QCFLAG_FAILED))
4002 continue; 3993 continue;
4003 3994
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 6a91d04351d9..aad1b01447de 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3805,10 +3805,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
3805 */ 3805 */
3806 goto invalid_param_len; 3806 goto invalid_param_len;
3807 } 3807 }
3808 if (block > dev->n_sectors)
3809 goto out_of_range;
3810 3808
3811 all = cdb[14] & 0x1; 3809 all = cdb[14] & 0x1;
3810 if (all) {
3811 /*
3812 * Ignore the block address (zone ID) as defined by ZBC.
3813 */
3814 block = 0;
3815 } else if (block >= dev->n_sectors) {
3816 /*
3817 * Block must be a valid zone ID (a zone start LBA).
3818 */
3819 fp = 2;
3820 goto invalid_fld;
3821 }
3812 3822
3813 if (ata_ncq_enabled(qc->dev) && 3823 if (ata_ncq_enabled(qc->dev) &&
3814 ata_fpdma_zac_mgmt_out_supported(qc->dev)) { 3824 ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
@@ -3837,10 +3847,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
3837 invalid_fld: 3847 invalid_fld:
3838 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff); 3848 ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
3839 return 1; 3849 return 1;
3840 out_of_range:
3841 /* "Logical Block Address out of range" */
3842 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
3843 return 1;
3844invalid_param_len: 3850invalid_param_len:
3845 /* "Parameter list length error" */ 3851 /* "Parameter list length error" */
3846 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0); 3852 ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index b8d9cfc60374..4dc528bf8e85 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag,
395{ 395{
396 /* We let libATA core do actual (queue) tag allocation */ 396 /* We let libATA core do actual (queue) tag allocation */
397 397
398 /* all non NCQ/queued commands should have tag#0 */
399 if (ata_tag_internal(tag)) {
400 DPRINTK("mapping internal cmds to tag#0\n");
401 return 0;
402 }
403
404 if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) { 398 if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
405 DPRINTK("tag %d invalid : out of range\n", tag); 399 DPRINTK("tag %d invalid : out of range\n", tag);
406 return 0; 400 return 0;
@@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
1229 1223
1230 /* Workaround for data length mismatch errata */ 1224 /* Workaround for data length mismatch errata */
1231 if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) { 1225 if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
1232 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1226 ata_qc_for_each_with_internal(ap, qc, tag) {
1233 qc = ata_qc_from_tag(ap, tag);
1234 if (qc && ata_is_atapi(qc->tf.protocol)) { 1227 if (qc && ata_is_atapi(qc->tf.protocol)) {
1235 u32 hcontrol; 1228 u32 hcontrol;
1236 /* Set HControl[27] to clear error registers */ 1229 /* Set HControl[27] to clear error registers */
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 10ae11aa1926..72c9b922a77b 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
675 struct ata_port *ap = ata_shost_to_port(sdev->host); 675 struct ata_port *ap = ata_shost_to_port(sdev->host);
676 struct nv_adma_port_priv *pp = ap->private_data; 676 struct nv_adma_port_priv *pp = ap->private_data;
677 struct nv_adma_port_priv *port0, *port1; 677 struct nv_adma_port_priv *port0, *port1;
678 struct scsi_device *sdev0, *sdev1;
679 struct pci_dev *pdev = to_pci_dev(ap->host->dev); 678 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
680 unsigned long segment_boundary, flags; 679 unsigned long segment_boundary, flags;
681 unsigned short sg_tablesize; 680 unsigned short sg_tablesize;
@@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
736 735
737 port0 = ap->host->ports[0]->private_data; 736 port0 = ap->host->ports[0]->private_data;
738 port1 = ap->host->ports[1]->private_data; 737 port1 = ap->host->ports[1]->private_data;
739 sdev0 = ap->host->ports[0]->link.device[0].sdev;
740 sdev1 = ap->host->ports[1]->link.device[0].sdev;
741 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || 738 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
742 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) { 739 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
743 /* 740 /*
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ff81a576347e..82532c299bb5 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
1618 skb_queue_head_init(&iadev->rx_dma_q); 1618 skb_queue_head_init(&iadev->rx_dma_q);
1619 iadev->rx_free_desc_qhead = NULL; 1619 iadev->rx_free_desc_qhead = NULL;
1620 1620
1621 iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL); 1621 iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1622 if (!iadev->rx_open) { 1622 if (!iadev->rx_open) {
1623 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", 1623 printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1624 dev->number); 1624 dev->number);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a8d2eb0ceb8d..2c288d1f42bb 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
1483 return -EFAULT; 1483 return -EFAULT;
1484 if (pool < 0 || pool > ZATM_LAST_POOL) 1484 if (pool < 0 || pool > ZATM_LAST_POOL)
1485 return -EINVAL; 1485 return -EINVAL;
1486 pool = array_index_nospec(pool,
1487 ZATM_LAST_POOL + 1);
1486 if (copy_from_user(&info, 1488 if (copy_from_user(&info,
1487 &((struct zatm_pool_req __user *) arg)->info, 1489 &((struct zatm_pool_req __user *) arg)->info,
1488 sizeof(info))) return -EFAULT; 1490 sizeof(info))) return -EFAULT;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1435d7281c66..6ebcd65d64b6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -434,14 +434,6 @@ re_probe:
434 goto probe_failed; 434 goto probe_failed;
435 } 435 }
436 436
437 /*
438 * Ensure devices are listed in devices_kset in correct order
439 * It's important to move Dev to the end of devices_kset before
440 * calling .probe, because it could be recursive and parent Dev
441 * should always go first
442 */
443 devices_kset_move_last(dev);
444
445 if (dev->bus->probe) { 437 if (dev->bus->probe) {
446 ret = dev->bus->probe(dev); 438 ret = dev->bus->probe(dev);
447 if (ret) 439 if (ret)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index c298de8a8308..9e8484189034 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev)
2235} 2235}
2236 2236
2237static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np, 2237static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
2238 unsigned int index) 2238 unsigned int index, bool power_on)
2239{ 2239{
2240 struct of_phandle_args pd_args; 2240 struct of_phandle_args pd_args;
2241 struct generic_pm_domain *pd; 2241 struct generic_pm_domain *pd;
@@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
2271 dev->pm_domain->detach = genpd_dev_pm_detach; 2271 dev->pm_domain->detach = genpd_dev_pm_detach;
2272 dev->pm_domain->sync = genpd_dev_pm_sync; 2272 dev->pm_domain->sync = genpd_dev_pm_sync;
2273 2273
2274 genpd_lock(pd); 2274 if (power_on) {
2275 ret = genpd_power_on(pd, 0); 2275 genpd_lock(pd);
2276 genpd_unlock(pd); 2276 ret = genpd_power_on(pd, 0);
2277 genpd_unlock(pd);
2278 }
2277 2279
2278 if (ret) 2280 if (ret)
2279 genpd_remove_device(pd, dev); 2281 genpd_remove_device(pd, dev);
@@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev)
2307 "#power-domain-cells") != 1) 2309 "#power-domain-cells") != 1)
2308 return 0; 2310 return 0;
2309 2311
2310 return __genpd_dev_pm_attach(dev, dev->of_node, 0); 2312 return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
2311} 2313}
2312EXPORT_SYMBOL_GPL(genpd_dev_pm_attach); 2314EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
2313 2315
@@ -2359,14 +2361,14 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
2359 } 2361 }
2360 2362
2361 /* Try to attach the device to the PM domain at the specified index. */ 2363 /* Try to attach the device to the PM domain at the specified index. */
2362 ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index); 2364 ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
2363 if (ret < 1) { 2365 if (ret < 1) {
2364 device_unregister(genpd_dev); 2366 device_unregister(genpd_dev);
2365 return ret ? ERR_PTR(ret) : NULL; 2367 return ret ? ERR_PTR(ret) : NULL;
2366 } 2368 }
2367 2369
2368 pm_runtime_set_active(genpd_dev);
2369 pm_runtime_enable(genpd_dev); 2370 pm_runtime_enable(genpd_dev);
2371 genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
2370 2372
2371 return genpd_dev; 2373 return genpd_dev;
2372} 2374}
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1476cb3439f4..5e793dd7adfb 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio)
282 what = COMPLETED_OK; 282 what = COMPLETED_OK;
283 } 283 }
284 284
285 bio_put(req->private_bio);
286 req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status)); 285 req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
286 bio_put(bio);
287 287
288 /* not req_mod(), we need irqsave here! */ 288 /* not req_mod(), we need irqsave here! */
289 spin_lock_irqsave(&device->resource->req_lock, flags); 289 spin_lock_irqsave(&device->resource->req_lock, flags);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d6b6f434fd4b..4cb1d1be3cfb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1613,6 +1613,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1613 arg = (unsigned long) compat_ptr(arg); 1613 arg = (unsigned long) compat_ptr(arg);
1614 case LOOP_SET_FD: 1614 case LOOP_SET_FD:
1615 case LOOP_CHANGE_FD: 1615 case LOOP_CHANGE_FD:
1616 case LOOP_SET_BLOCK_SIZE:
1616 err = lo_ioctl(bdev, mode, cmd, arg); 1617 err = lo_ioctl(bdev, mode, cmd, arg);
1617 break; 1618 break;
1618 default: 1619 default:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 74a05561b620..3fb95c8d9fd8 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -112,12 +112,16 @@ struct nbd_device {
112 struct task_struct *task_setup; 112 struct task_struct *task_setup;
113}; 113};
114 114
115#define NBD_CMD_REQUEUED 1
116
115struct nbd_cmd { 117struct nbd_cmd {
116 struct nbd_device *nbd; 118 struct nbd_device *nbd;
119 struct mutex lock;
117 int index; 120 int index;
118 int cookie; 121 int cookie;
119 struct completion send_complete;
120 blk_status_t status; 122 blk_status_t status;
123 unsigned long flags;
124 u32 cmd_cookie;
121}; 125};
122 126
123#if IS_ENABLED(CONFIG_DEBUG_FS) 127#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -146,6 +150,35 @@ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
146 return disk_to_dev(nbd->disk); 150 return disk_to_dev(nbd->disk);
147} 151}
148 152
153static void nbd_requeue_cmd(struct nbd_cmd *cmd)
154{
155 struct request *req = blk_mq_rq_from_pdu(cmd);
156
157 if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
158 blk_mq_requeue_request(req, true);
159}
160
161#define NBD_COOKIE_BITS 32
162
163static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
164{
165 struct request *req = blk_mq_rq_from_pdu(cmd);
166 u32 tag = blk_mq_unique_tag(req);
167 u64 cookie = cmd->cmd_cookie;
168
169 return (cookie << NBD_COOKIE_BITS) | tag;
170}
171
172static u32 nbd_handle_to_tag(u64 handle)
173{
174 return (u32)handle;
175}
176
177static u32 nbd_handle_to_cookie(u64 handle)
178{
179 return (u32)(handle >> NBD_COOKIE_BITS);
180}
181
149static const char *nbdcmd_to_ascii(int cmd) 182static const char *nbdcmd_to_ascii(int cmd)
150{ 183{
151 switch (cmd) { 184 switch (cmd) {
@@ -319,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
319 } 352 }
320 config = nbd->config; 353 config = nbd->config;
321 354
355 if (!mutex_trylock(&cmd->lock))
356 return BLK_EH_RESET_TIMER;
357
322 if (config->num_connections > 1) { 358 if (config->num_connections > 1) {
323 dev_err_ratelimited(nbd_to_dev(nbd), 359 dev_err_ratelimited(nbd_to_dev(nbd),
324 "Connection timed out, retrying (%d/%d alive)\n", 360 "Connection timed out, retrying (%d/%d alive)\n",
@@ -343,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
343 nbd_mark_nsock_dead(nbd, nsock, 1); 379 nbd_mark_nsock_dead(nbd, nsock, 1);
344 mutex_unlock(&nsock->tx_lock); 380 mutex_unlock(&nsock->tx_lock);
345 } 381 }
346 blk_mq_requeue_request(req, true); 382 mutex_unlock(&cmd->lock);
383 nbd_requeue_cmd(cmd);
347 nbd_config_put(nbd); 384 nbd_config_put(nbd);
348 return BLK_EH_DONE; 385 return BLK_EH_DONE;
349 } 386 }
@@ -353,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
353 } 390 }
354 set_bit(NBD_TIMEDOUT, &config->runtime_flags); 391 set_bit(NBD_TIMEDOUT, &config->runtime_flags);
355 cmd->status = BLK_STS_IOERR; 392 cmd->status = BLK_STS_IOERR;
393 mutex_unlock(&cmd->lock);
356 sock_shutdown(nbd); 394 sock_shutdown(nbd);
357 nbd_config_put(nbd); 395 nbd_config_put(nbd);
358done: 396done:
@@ -430,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
430 struct iov_iter from; 468 struct iov_iter from;
431 unsigned long size = blk_rq_bytes(req); 469 unsigned long size = blk_rq_bytes(req);
432 struct bio *bio; 470 struct bio *bio;
471 u64 handle;
433 u32 type; 472 u32 type;
434 u32 nbd_cmd_flags = 0; 473 u32 nbd_cmd_flags = 0;
435 u32 tag = blk_mq_unique_tag(req);
436 int sent = nsock->sent, skip = 0; 474 int sent = nsock->sent, skip = 0;
437 475
438 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request)); 476 iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
@@ -474,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
474 goto send_pages; 512 goto send_pages;
475 } 513 }
476 iov_iter_advance(&from, sent); 514 iov_iter_advance(&from, sent);
515 } else {
516 cmd->cmd_cookie++;
477 } 517 }
478 cmd->index = index; 518 cmd->index = index;
479 cmd->cookie = nsock->cookie; 519 cmd->cookie = nsock->cookie;
@@ -482,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
482 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 522 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
483 request.len = htonl(size); 523 request.len = htonl(size);
484 } 524 }
485 memcpy(request.handle, &tag, sizeof(tag)); 525 handle = nbd_cmd_handle(cmd);
526 memcpy(request.handle, &handle, sizeof(handle));
486 527
487 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", 528 dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
488 req, nbdcmd_to_ascii(type), 529 req, nbdcmd_to_ascii(type),
@@ -500,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
500 nsock->pending = req; 541 nsock->pending = req;
501 nsock->sent = sent; 542 nsock->sent = sent;
502 } 543 }
544 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
503 return BLK_STS_RESOURCE; 545 return BLK_STS_RESOURCE;
504 } 546 }
505 dev_err_ratelimited(disk_to_dev(nbd->disk), 547 dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -541,6 +583,7 @@ send_pages:
541 */ 583 */
542 nsock->pending = req; 584 nsock->pending = req;
543 nsock->sent = sent; 585 nsock->sent = sent;
586 set_bit(NBD_CMD_REQUEUED, &cmd->flags);
544 return BLK_STS_RESOURCE; 587 return BLK_STS_RESOURCE;
545 } 588 }
546 dev_err(disk_to_dev(nbd->disk), 589 dev_err(disk_to_dev(nbd->disk),
@@ -573,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
573 struct nbd_reply reply; 616 struct nbd_reply reply;
574 struct nbd_cmd *cmd; 617 struct nbd_cmd *cmd;
575 struct request *req = NULL; 618 struct request *req = NULL;
619 u64 handle;
576 u16 hwq; 620 u16 hwq;
577 u32 tag; 621 u32 tag;
578 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)}; 622 struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
579 struct iov_iter to; 623 struct iov_iter to;
624 int ret = 0;
580 625
581 reply.magic = 0; 626 reply.magic = 0;
582 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply)); 627 iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
@@ -594,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
594 return ERR_PTR(-EPROTO); 639 return ERR_PTR(-EPROTO);
595 } 640 }
596 641
597 memcpy(&tag, reply.handle, sizeof(u32)); 642 memcpy(&handle, reply.handle, sizeof(handle));
598 643 tag = nbd_handle_to_tag(handle);
599 hwq = blk_mq_unique_tag_to_hwq(tag); 644 hwq = blk_mq_unique_tag_to_hwq(tag);
600 if (hwq < nbd->tag_set.nr_hw_queues) 645 if (hwq < nbd->tag_set.nr_hw_queues)
601 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], 646 req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
@@ -606,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
606 return ERR_PTR(-ENOENT); 651 return ERR_PTR(-ENOENT);
607 } 652 }
608 cmd = blk_mq_rq_to_pdu(req); 653 cmd = blk_mq_rq_to_pdu(req);
654
655 mutex_lock(&cmd->lock);
656 if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
657 dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
658 req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
659 ret = -ENOENT;
660 goto out;
661 }
662 if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
663 dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
664 req);
665 ret = -ENOENT;
666 goto out;
667 }
609 if (ntohl(reply.error)) { 668 if (ntohl(reply.error)) {
610 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", 669 dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
611 ntohl(reply.error)); 670 ntohl(reply.error));
612 cmd->status = BLK_STS_IOERR; 671 cmd->status = BLK_STS_IOERR;
613 return cmd; 672 goto out;
614 } 673 }
615 674
616 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); 675 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
@@ -635,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
635 if (nbd_disconnected(config) || 694 if (nbd_disconnected(config) ||
636 config->num_connections <= 1) { 695 config->num_connections <= 1) {
637 cmd->status = BLK_STS_IOERR; 696 cmd->status = BLK_STS_IOERR;
638 return cmd; 697 goto out;
639 } 698 }
640 return ERR_PTR(-EIO); 699 ret = -EIO;
700 goto out;
641 } 701 }
642 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", 702 dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
643 req, bvec.bv_len); 703 req, bvec.bv_len);
644 } 704 }
645 } else {
646 /* See the comment in nbd_queue_rq. */
647 wait_for_completion(&cmd->send_complete);
648 } 705 }
649 return cmd; 706out:
707 mutex_unlock(&cmd->lock);
708 return ret ? ERR_PTR(ret) : cmd;
650} 709}
651 710
652static void recv_work(struct work_struct *work) 711static void recv_work(struct work_struct *work)
@@ -805,7 +864,7 @@ again:
805 */ 864 */
806 blk_mq_start_request(req); 865 blk_mq_start_request(req);
807 if (unlikely(nsock->pending && nsock->pending != req)) { 866 if (unlikely(nsock->pending && nsock->pending != req)) {
808 blk_mq_requeue_request(req, true); 867 nbd_requeue_cmd(cmd);
809 ret = 0; 868 ret = 0;
810 goto out; 869 goto out;
811 } 870 }
@@ -818,7 +877,7 @@ again:
818 dev_err_ratelimited(disk_to_dev(nbd->disk), 877 dev_err_ratelimited(disk_to_dev(nbd->disk),
819 "Request send failed, requeueing\n"); 878 "Request send failed, requeueing\n");
820 nbd_mark_nsock_dead(nbd, nsock, 1); 879 nbd_mark_nsock_dead(nbd, nsock, 1);
821 blk_mq_requeue_request(req, true); 880 nbd_requeue_cmd(cmd);
822 ret = 0; 881 ret = 0;
823 } 882 }
824out: 883out:
@@ -842,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
842 * that the server is misbehaving (or there was an error) before we're 901 * that the server is misbehaving (or there was an error) before we're
843 * done sending everything over the wire. 902 * done sending everything over the wire.
844 */ 903 */
845 init_completion(&cmd->send_complete); 904 mutex_lock(&cmd->lock);
905 clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
846 906
847 /* We can be called directly from the user space process, which means we 907 /* We can be called directly from the user space process, which means we
848 * could possibly have signals pending so our sendmsg will fail. In 908 * could possibly have signals pending so our sendmsg will fail. In
@@ -854,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
854 ret = BLK_STS_IOERR; 914 ret = BLK_STS_IOERR;
855 else if (!ret) 915 else if (!ret)
856 ret = BLK_STS_OK; 916 ret = BLK_STS_OK;
857 complete(&cmd->send_complete); 917 mutex_unlock(&cmd->lock);
858 918
859 return ret; 919 return ret;
860} 920}
@@ -1460,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1460{ 1520{
1461 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); 1521 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1462 cmd->nbd = set->driver_data; 1522 cmd->nbd = set->driver_data;
1523 cmd->flags = 0;
1524 mutex_init(&cmd->lock);
1463 return 0; 1525 return 0;
1464} 1526}
1465 1527
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 1cc29629d238..80d60f43db56 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -169,9 +169,9 @@ static int sysc_get_clocks(struct sysc *ddata)
169 const char *name; 169 const char *name;
170 int nr_fck = 0, nr_ick = 0, i, error = 0; 170 int nr_fck = 0, nr_ick = 0, i, error = 0;
171 171
172 ddata->clock_roles = devm_kzalloc(ddata->dev, 172 ddata->clock_roles = devm_kcalloc(ddata->dev,
173 sizeof(*ddata->clock_roles) *
174 SYSC_MAX_CLOCKS, 173 SYSC_MAX_CLOCKS,
174 sizeof(*ddata->clock_roles),
175 GFP_KERNEL); 175 GFP_KERNEL);
176 if (!ddata->clock_roles) 176 if (!ddata->clock_roles)
177 return -ENOMEM; 177 return -ENOMEM;
@@ -200,8 +200,8 @@ static int sysc_get_clocks(struct sysc *ddata)
200 return -EINVAL; 200 return -EINVAL;
201 } 201 }
202 202
203 ddata->clocks = devm_kzalloc(ddata->dev, 203 ddata->clocks = devm_kcalloc(ddata->dev,
204 sizeof(*ddata->clocks) * ddata->nr_clocks, 204 ddata->nr_clocks, sizeof(*ddata->clocks),
205 GFP_KERNEL); 205 GFP_KERNEL);
206 if (!ddata->clocks) 206 if (!ddata->clocks)
207 return -ENOMEM; 207 return -ENOMEM;
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 53fe633df1e8..c9bf2c219841 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -11,7 +11,7 @@
11 11
12#include "agp.h" 12#include "agp.h"
13 13
14static int alpha_core_agp_vm_fault(struct vm_fault *vmf) 14static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
15{ 15{
16 alpha_agp_info *agp = agp_bridge->dev_private_data; 16 alpha_agp_info *agp = agp_bridge->dev_private_data;
17 dma_addr_t dma_addr; 17 dma_addr_t dma_addr;
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index e50c29c97ca7..c69e39fdd02b 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -156,7 +156,7 @@ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
156 156
157 /* Address to map to */ 157 /* Address to map to */
158 pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp); 158 pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
159 aperturebase = tmp << 25; 159 aperturebase = (u64)tmp << 25;
160 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK); 160 aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
161 161
162 enable_gart_translation(hammer, gatt_table); 162 enable_gart_translation(hammer, gatt_table);
@@ -277,7 +277,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
277 pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order); 277 pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
278 nb_order = (nb_order >> 1) & 7; 278 nb_order = (nb_order >> 1) & 7;
279 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base); 279 pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
280 nb_aper = nb_base << 25; 280 nb_aper = (u64)nb_base << 25;
281 281
282 /* Northbridge seems to contain crap. Try the AGP bridge. */ 282 /* Northbridge seems to contain crap. Try the AGP bridge. */
283 283
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index ad353be871bf..90ec010bffbd 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi)
2088 return 0; 2088 return 0;
2089 2089
2090out_err: 2090out_err:
2091 ipmi_unregister_smi(new_smi->intf); 2091 if (new_smi->intf) {
2092 new_smi->intf = NULL; 2092 ipmi_unregister_smi(new_smi->intf);
2093 new_smi->intf = NULL;
2094 }
2093 2095
2094 kfree(init_name); 2096 kfree(init_name);
2095 2097
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index fbfc05e3f3d1..bb882ab161fe 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
210int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc) 210int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
211{ 211{
212 unsigned long flags; 212 unsigned long flags;
213 int ret = 0; 213 int ret = -ENODATA;
214 u8 status; 214 u8 status;
215 215
216 spin_lock_irqsave(&kcs_bmc->lock, flags); 216 spin_lock_irqsave(&kcs_bmc->lock, flags);
217 217
218 if (!kcs_bmc->running) { 218 status = read_status(kcs_bmc);
219 kcs_force_abort(kcs_bmc); 219 if (status & KCS_STATUS_IBF) {
220 ret = -ENODEV; 220 if (!kcs_bmc->running)
221 goto out_unlock; 221 kcs_force_abort(kcs_bmc);
222 } 222 else if (status & KCS_STATUS_CMD_DAT)
223 223 kcs_bmc_handle_cmd(kcs_bmc);
224 status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT); 224 else
225 225 kcs_bmc_handle_data(kcs_bmc);
226 switch (status) {
227 case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT:
228 kcs_bmc_handle_cmd(kcs_bmc);
229 break;
230
231 case KCS_STATUS_IBF:
232 kcs_bmc_handle_data(kcs_bmc);
233 break;
234 226
235 default: 227 ret = 0;
236 ret = -ENODATA;
237 break;
238 } 228 }
239 229
240out_unlock:
241 spin_unlock_irqrestore(&kcs_bmc->lock, flags); 230 spin_unlock_irqrestore(&kcs_bmc->lock, flags);
242 231
243 return ret; 232 return ret;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index ffeb60d3434c..df66a9dd0aae 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
708#endif 708#endif
709 if (vma->vm_flags & VM_SHARED) 709 if (vma->vm_flags & VM_SHARED)
710 return shmem_zero_setup(vma); 710 return shmem_zero_setup(vma);
711 vma_set_anonymous(vma);
711 return 0; 712 return 0;
712} 713}
713 714
diff --git a/drivers/char/random.c b/drivers/char/random.c
index cd888d4ee605..bd449ad52442 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1895,14 +1895,22 @@ static int
1895write_pool(struct entropy_store *r, const char __user *buffer, size_t count) 1895write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
1896{ 1896{
1897 size_t bytes; 1897 size_t bytes;
1898 __u32 buf[16]; 1898 __u32 t, buf[16];
1899 const char __user *p = buffer; 1899 const char __user *p = buffer;
1900 1900
1901 while (count > 0) { 1901 while (count > 0) {
1902 int b, i = 0;
1903
1902 bytes = min(count, sizeof(buf)); 1904 bytes = min(count, sizeof(buf));
1903 if (copy_from_user(&buf, p, bytes)) 1905 if (copy_from_user(&buf, p, bytes))
1904 return -EFAULT; 1906 return -EFAULT;
1905 1907
1908 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
1909 if (!arch_get_random_int(&t))
1910 break;
1911 buf[i] ^= t;
1912 }
1913
1906 count -= bytes; 1914 count -= bytes;
1907 p += bytes; 1915 p += bytes;
1908 1916
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index ae40cbe770f0..0bb25dd009d1 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -96,7 +96,7 @@ obj-$(CONFIG_ARCH_SPRD) += sprd/
96obj-$(CONFIG_ARCH_STI) += st/ 96obj-$(CONFIG_ARCH_STI) += st/
97obj-$(CONFIG_ARCH_STRATIX10) += socfpga/ 97obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
98obj-$(CONFIG_ARCH_SUNXI) += sunxi/ 98obj-$(CONFIG_ARCH_SUNXI) += sunxi/
99obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/ 99obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/
100obj-$(CONFIG_ARCH_TEGRA) += tegra/ 100obj-$(CONFIG_ARCH_TEGRA) += tegra/
101obj-y += ti/ 101obj-y += ti/
102obj-$(CONFIG_CLK_UNIPHIER) += uniphier/ 102obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 38b366b00c57..7b70a074095d 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -24,7 +24,7 @@
24#define ASPEED_MPLL_PARAM 0x20 24#define ASPEED_MPLL_PARAM 0x20
25#define ASPEED_HPLL_PARAM 0x24 25#define ASPEED_HPLL_PARAM 0x24
26#define AST2500_HPLL_BYPASS_EN BIT(20) 26#define AST2500_HPLL_BYPASS_EN BIT(20)
27#define AST2400_HPLL_STRAPPED BIT(18) 27#define AST2400_HPLL_PROGRAMMED BIT(18)
28#define AST2400_HPLL_BYPASS_EN BIT(17) 28#define AST2400_HPLL_BYPASS_EN BIT(17)
29#define ASPEED_MISC_CTRL 0x2c 29#define ASPEED_MISC_CTRL 0x2c
30#define UART_DIV13_EN BIT(12) 30#define UART_DIV13_EN BIT(12)
@@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
91 [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */ 91 [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
92 [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */ 92 [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
93 [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */ 93 [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
94 [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */ 94 [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
95 [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */ 95 [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
96 [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL }, 96 [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
97 [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */ 97 [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
98 [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */ 98 [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
@@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
212{ 212{
213 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw); 213 struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
214 u32 clk = BIT(gate->clock_idx); 214 u32 clk = BIT(gate->clock_idx);
215 u32 rst = BIT(gate->reset_idx);
215 u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk; 216 u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
216 u32 reg; 217 u32 reg;
217 218
219 /*
220 * If the IP is in reset, treat the clock as not enabled,
221 * this happens with some clocks such as the USB one when
222 * coming from cold reset. Without this, aspeed_clk_enable()
223 * will fail to lift the reset.
224 */
225 if (gate->reset_idx >= 0) {
226 regmap_read(gate->map, ASPEED_RESET_CTRL, &reg);
227 if (reg & rst)
228 return 0;
229 }
230
218 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg); 231 regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
219 232
220 return ((reg & clk) == enval) ? 1 : 0; 233 return ((reg & clk) == enval) ? 1 : 0;
@@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
565static void __init aspeed_ast2400_cc(struct regmap *map) 578static void __init aspeed_ast2400_cc(struct regmap *map)
566{ 579{
567 struct clk_hw *hw; 580 struct clk_hw *hw;
568 u32 val, freq, div; 581 u32 val, div, clkin, hpll;
582 const u16 hpll_rates[][4] = {
583 {384, 360, 336, 408},
584 {400, 375, 350, 425},
585 };
586 int rate;
569 587
570 /* 588 /*
571 * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by 589 * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
572 * strapping 590 * strapping
573 */ 591 */
574 regmap_read(map, ASPEED_STRAP, &val); 592 regmap_read(map, ASPEED_STRAP, &val);
575 if (val & CLKIN_25MHZ_EN) 593 rate = (val >> 8) & 3;
576 freq = 25000000; 594 if (val & CLKIN_25MHZ_EN) {
577 else if (val & AST2400_CLK_SOURCE_SEL) 595 clkin = 25000000;
578 freq = 48000000; 596 hpll = hpll_rates[1][rate];
579 else 597 } else if (val & AST2400_CLK_SOURCE_SEL) {
580 freq = 24000000; 598 clkin = 48000000;
581 hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq); 599 hpll = hpll_rates[0][rate];
582 pr_debug("clkin @%u MHz\n", freq / 1000000); 600 } else {
601 clkin = 24000000;
602 hpll = hpll_rates[0][rate];
603 }
604 hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
605 pr_debug("clkin @%u MHz\n", clkin / 1000000);
583 606
584 /* 607 /*
585 * High-speed PLL clock derived from the crystal. This the CPU clock, 608 * High-speed PLL clock derived from the crystal. This the CPU clock,
586 * and we assume that it is enabled 609 * and we assume that it is enabled. It can be configured through the
610 * HPLL_PARAM register, or set to a specified frequency by strapping.
587 */ 611 */
588 regmap_read(map, ASPEED_HPLL_PARAM, &val); 612 regmap_read(map, ASPEED_HPLL_PARAM, &val);
589 WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured"); 613 if (val & AST2400_HPLL_PROGRAMMED)
590 aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val); 614 hw = aspeed_ast2400_calc_pll("hpll", val);
615 else
616 hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
617 hpll * 1000000);
618
619 aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
591 620
592 /* 621 /*
593 * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK) 622 * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9760b526ca31..e2ed078abd90 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -24,7 +24,6 @@
24#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
25#include <linux/sched.h> 25#include <linux/sched.h>
26#include <linux/clkdev.h> 26#include <linux/clkdev.h>
27#include <linux/stringify.h>
28 27
29#include "clk.h" 28#include "clk.h"
30 29
@@ -2559,7 +2558,7 @@ static const struct {
2559 unsigned long flag; 2558 unsigned long flag;
2560 const char *name; 2559 const char *name;
2561} clk_flags[] = { 2560} clk_flags[] = {
2562#define ENTRY(f) { f, __stringify(f) } 2561#define ENTRY(f) { f, #f }
2563 ENTRY(CLK_SET_RATE_GATE), 2562 ENTRY(CLK_SET_RATE_GATE),
2564 ENTRY(CLK_SET_PARENT_GATE), 2563 ENTRY(CLK_SET_PARENT_GATE),
2565 ENTRY(CLK_SET_RATE_PARENT), 2564 ENTRY(CLK_SET_RATE_PARENT),
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index aae62a5b8734..d1bbee19ed0f 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
672 672
673 usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap); 673 usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
674 if (IS_ERR(usb1)) { 674 if (IS_ERR(usb1)) {
675 if (PTR_ERR(usb0) == -EPROBE_DEFER) 675 if (PTR_ERR(usb1) == -EPROBE_DEFER)
676 return -EPROBE_DEFER; 676 return -EPROBE_DEFER;
677 677
678 dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n", 678 dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index 6a42529d31a9..cc5614567a70 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data;
107#ifdef CONFIG_ARCH_DAVINCI_DM355 107#ifdef CONFIG_ARCH_DAVINCI_DM355
108extern const struct davinci_psc_init_data dm355_psc_init_data; 108extern const struct davinci_psc_init_data dm355_psc_init_data;
109#endif 109#endif
110#ifdef CONFIG_ARCH_DAVINCI_DM356 110#ifdef CONFIG_ARCH_DAVINCI_DM365
111extern const struct davinci_psc_init_data dm365_psc_init_data; 111extern const struct davinci_psc_init_data dm365_psc_init_data;
112#endif 112#endif
113#ifdef CONFIG_ARCH_DAVINCI_DM644x 113#ifdef CONFIG_ARCH_DAVINCI_DM644x
diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c
index 58f546e04807..e4cf96ba704e 100644
--- a/drivers/clk/meson/clk-audio-divider.c
+++ b/drivers/clk/meson/clk-audio-divider.c
@@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
51 struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk); 51 struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
52 unsigned long divider; 52 unsigned long divider;
53 53
54 divider = meson_parm_read(clk->map, &adiv->div); 54 divider = meson_parm_read(clk->map, &adiv->div) + 1;
55 55
56 return DIV_ROUND_UP_ULL((u64)parent_rate, divider); 56 return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
57} 57}
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 240658404367..177fffb9ebef 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
498 .ops = &clk_regmap_gate_ops, 498 .ops = &clk_regmap_gate_ops,
499 .parent_names = (const char *[]){ "fclk_div2_div" }, 499 .parent_names = (const char *[]){ "fclk_div2_div" },
500 .num_parents = 1, 500 .num_parents = 1,
501 .flags = CLK_IS_CRITICAL,
501 }, 502 },
502}; 503};
503 504
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 6860bd5a37c5..44e4e27eddad 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -35,6 +35,7 @@
35#define CLK_SEL 0x10 35#define CLK_SEL 0x10
36#define CLK_DIS 0x14 36#define CLK_DIS 0x14
37 37
38#define ARMADA_37XX_DVFS_LOAD_1 1
38#define LOAD_LEVEL_NR 4 39#define LOAD_LEVEL_NR 4
39 40
40#define ARMADA_37XX_NB_L0L1 0x18 41#define ARMADA_37XX_NB_L0L1 0x18
@@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
507 return -EINVAL; 508 return -EINVAL;
508} 509}
509 510
511/*
512 * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
513 * respectively) to L0 frequency (1.2 Ghz) requires a significant
514 * amount of time to let VDD stabilize to the appropriate
515 * voltage. This amount of time is large enough that it cannot be
516 * covered by the hardware countdown register. Due to this, the CPU
517 * might start operating at L0 before the voltage is stabilized,
518 * leading to CPU stalls.
519 *
520 * To work around this problem, we prevent switching directly from the
521 * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
522 * frequency in-between. The sequence therefore becomes:
523 * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
524 * 2. Sleep 20ms for stabling VDD voltage
525 * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
526 */
527static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
528{
529 unsigned int cur_level;
530
531 if (rate != 1200 * 1000 * 1000)
532 return;
533
534 regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
535 cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
536 if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
537 return;
538
539 regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
540 ARMADA_37XX_NB_CPU_LOAD_MASK,
541 ARMADA_37XX_DVFS_LOAD_1);
542 msleep(20);
543}
544
510static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate, 545static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
511 unsigned long parent_rate) 546 unsigned long parent_rate)
512{ 547{
@@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
537 */ 572 */
538 reg = ARMADA_37XX_NB_CPU_LOAD; 573 reg = ARMADA_37XX_NB_CPU_LOAD;
539 mask = ARMADA_37XX_NB_CPU_LOAD_MASK; 574 mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
575
576 clk_pm_cpu_set_rate_wa(rate, base);
577
540 regmap_update_bits(base, reg, mask, load_level); 578 regmap_update_bits(base, reg, mask, load_level);
541 579
542 return rate; 580 return rate;
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 9f35b3fe1d97..ff8d66fd94e6 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
2781 2781
2782static struct clk_branch gcc_ufs_tx_symbol_0_clk = { 2782static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
2783 .halt_reg = 0x75018, 2783 .halt_reg = 0x75018,
2784 .halt_check = BRANCH_HALT_SKIP,
2784 .clkr = { 2785 .clkr = {
2785 .enable_reg = 0x75018, 2786 .enable_reg = 0x75018,
2786 .enable_mask = BIT(0), 2787 .enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 1a25ee4f3658..4b20d1b67a1b 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = {
2910 .name = "mmagic_bimc", 2910 .name = "mmagic_bimc",
2911 }, 2911 },
2912 .pwrsts = PWRSTS_OFF_ON, 2912 .pwrsts = PWRSTS_OFF_ON,
2913 .flags = ALWAYS_ON,
2913}; 2914};
2914 2915
2915static struct gdsc mmagic_video_gdsc = { 2916static struct gdsc mmagic_video_gdsc = {
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index acaa14cfa25c..49454700f2e5 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,24 +1,24 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2# Common objects 2# Common objects
3lib-$(CONFIG_SUNXI_CCU) += ccu_common.o 3obj-y += ccu_common.o
4lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o 4obj-y += ccu_mmc_timing.o
5lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o 5obj-y += ccu_reset.o
6 6
7# Base clock types 7# Base clock types
8lib-$(CONFIG_SUNXI_CCU) += ccu_div.o 8obj-y += ccu_div.o
9lib-$(CONFIG_SUNXI_CCU) += ccu_frac.o 9obj-y += ccu_frac.o
10lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o 10obj-y += ccu_gate.o
11lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o 11obj-y += ccu_mux.o
12lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o 12obj-y += ccu_mult.o
13lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o 13obj-y += ccu_phase.o
14lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o 14obj-y += ccu_sdm.o
15 15
16# Multi-factor clocks 16# Multi-factor clocks
17lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o 17obj-y += ccu_nk.o
18lib-$(CONFIG_SUNXI_CCU) += ccu_nkm.o 18obj-y += ccu_nkm.o
19lib-$(CONFIG_SUNXI_CCU) += ccu_nkmp.o 19obj-y += ccu_nkmp.o
20lib-$(CONFIG_SUNXI_CCU) += ccu_nm.o 20obj-y += ccu_nm.o
21lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o 21obj-y += ccu_mp.o
22 22
23# SoC support 23# SoC support
24obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o 24obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
@@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o
38obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o 38obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o
39obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o 39obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o
40obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o 40obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o
41
42# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our
43# case, we want to use that goal, but even though lib.a will be properly
44# generated, it will not be linked in, eventually resulting in a linker error
45# for missing symbols.
46#
47# We can work around that by explicitly adding lib.a to the obj-y goal. This is
48# an undocumented behaviour, but works well for now.
49obj-$(CONFIG_SUNXI_CCU) += lib.a
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 57cb2f00fc07..d8c7f5750cdb 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type,
735 clk->features |= CLOCK_EVT_FEAT_DYNIRQ; 735 clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
736 clk->name = "arch_mem_timer"; 736 clk->name = "arch_mem_timer";
737 clk->rating = 400; 737 clk->rating = 400;
738 clk->cpumask = cpu_all_mask; 738 clk->cpumask = cpu_possible_mask;
739 if (arch_timer_mem_use_virtual) { 739 if (arch_timer_mem_use_virtual) {
740 clk->set_state_shutdown = arch_timer_shutdown_virt_mem; 740 clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
741 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; 741 clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index ece120da3353..3c3971256130 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2394,6 +2394,18 @@ static bool __init intel_pstate_no_acpi_pss(void)
2394 return true; 2394 return true;
2395} 2395}
2396 2396
2397static bool __init intel_pstate_no_acpi_pcch(void)
2398{
2399 acpi_status status;
2400 acpi_handle handle;
2401
2402 status = acpi_get_handle(NULL, "\\_SB", &handle);
2403 if (ACPI_FAILURE(status))
2404 return true;
2405
2406 return !acpi_has_method(handle, "PCCH");
2407}
2408
2397static bool __init intel_pstate_has_acpi_ppc(void) 2409static bool __init intel_pstate_has_acpi_ppc(void)
2398{ 2410{
2399 int i; 2411 int i;
@@ -2453,7 +2465,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
2453 2465
2454 switch (plat_info[idx].data) { 2466 switch (plat_info[idx].data) {
2455 case PSS: 2467 case PSS:
2456 return intel_pstate_no_acpi_pss(); 2468 if (!intel_pstate_no_acpi_pss())
2469 return false;
2470
2471 return intel_pstate_no_acpi_pcch();
2457 case PPC: 2472 case PPC:
2458 return intel_pstate_has_acpi_ppc() && !force_load; 2473 return intel_pstate_has_acpi_ppc() && !force_load;
2459 } 2474 }
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 3f0ce2ae35ee..0c56c9759672 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void)
580{ 580{
581 int ret; 581 int ret;
582 582
583 /* Skip initialization if another cpufreq driver is there. */
584 if (cpufreq_get_current_driver())
585 return 0;
586
583 if (acpi_disabled) 587 if (acpi_disabled)
584 return 0; 588 return 0;
585 589
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index 29389accf3e9..efc9a7ae4857 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -183,6 +183,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
183static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = { 183static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
184 { .compatible = "qcom,apq8096", }, 184 { .compatible = "qcom,apq8096", },
185 { .compatible = "qcom,msm8996", }, 185 { .compatible = "qcom,msm8996", },
186 {}
186}; 187};
187 188
188/* 189/*
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index de2f8297a210..108c37fca782 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
189 189
190 /* prevent private mappings from being established */ 190 /* prevent private mappings from being established */
191 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) { 191 if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
192 dev_info(dev, "%s: %s: fail, attempted private mapping\n", 192 dev_info_ratelimited(dev,
193 "%s: %s: fail, attempted private mapping\n",
193 current->comm, func); 194 current->comm, func);
194 return -EINVAL; 195 return -EINVAL;
195 } 196 }
196 197
197 mask = dax_region->align - 1; 198 mask = dax_region->align - 1;
198 if (vma->vm_start & mask || vma->vm_end & mask) { 199 if (vma->vm_start & mask || vma->vm_end & mask) {
199 dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n", 200 dev_info_ratelimited(dev,
201 "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
200 current->comm, func, vma->vm_start, vma->vm_end, 202 current->comm, func, vma->vm_start, vma->vm_end,
201 mask); 203 mask);
202 return -EINVAL; 204 return -EINVAL;
@@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
204 206
205 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV 207 if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
206 && (vma->vm_flags & VM_DONTCOPY) == 0) { 208 && (vma->vm_flags & VM_DONTCOPY) == 0) {
207 dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n", 209 dev_info_ratelimited(dev,
210 "%s: %s: fail, dax range requires MADV_DONTFORK\n",
208 current->comm, func); 211 current->comm, func);
209 return -EINVAL; 212 return -EINVAL;
210 } 213 }
211 214
212 if (!vma_is_dax(vma)) { 215 if (!vma_is_dax(vma)) {
213 dev_info(dev, "%s: %s: fail, vma is not DAX capable\n", 216 dev_info_ratelimited(dev,
217 "%s: %s: fail, vma is not DAX capable\n",
214 current->comm, func); 218 current->comm, func);
215 return -EINVAL; 219 return -EINVAL;
216 } 220 }
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index fa31cccbe04f..6bfa217ed6d0 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
794 struct k3_dma_dev *d = ofdma->of_dma_data; 794 struct k3_dma_dev *d = ofdma->of_dma_data;
795 unsigned int request = dma_spec->args[0]; 795 unsigned int request = dma_spec->args[0];
796 796
797 if (request > d->dma_requests) 797 if (request >= d->dma_requests)
798 return NULL; 798 return NULL;
799 799
800 return dma_get_slave_channel(&(d->chans[request].vc.chan)); 800 return dma_get_slave_channel(&(d->chans[request].vc.chan));
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index defcdde4d358..de0957fe9668 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
3033 pd->src_addr_widths = PL330_DMA_BUSWIDTHS; 3033 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
3034 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; 3034 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
3035 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 3035 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3036 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; 3036 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3037 pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ? 3037 pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
3038 1 : PL330_MAX_BURST); 3038 1 : PL330_MAX_BURST);
3039 3039
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 9b5ca8691f27..a4a931ddf6f6 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev)
1485 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; 1485 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1486 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; 1486 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1487 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 1487 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1488 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; 1488 if (__dma_omap15xx(od->plat->dma_attr))
1489 od->ddev.residue_granularity =
1490 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1491 else
1492 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1489 od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */ 1493 od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
1490 od->ddev.dev = &pdev->dev; 1494 od->ddev.dev = &pdev->dev;
1491 INIT_LIST_HEAD(&od->ddev.channels); 1495 INIT_LIST_HEAD(&od->ddev.channels);
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index dd4edd8f22ce..7fa793672a7a 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev,
455 455
456 mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name, 456 mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
457 &altera_cvp_ops, conf); 457 &altera_cvp_ops, conf);
458 if (!mgr) 458 if (!mgr) {
459 return -ENOMEM; 459 ret = -ENOMEM;
460 goto err_unmap;
461 }
460 462
461 pci_set_drvdata(pdev, mgr); 463 pci_set_drvdata(pdev, mgr);
462 464
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index d3cf9502e7e7..58faeb1cef63 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
181 fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node); 181 fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
182 fwspec.param_count = 2; 182 fwspec.param_count = 2;
183 fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET; 183 fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
184 fwspec.param[1] = IRQ_TYPE_NONE; 184 /*
185 * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
186 * temporarily. Anyway, ->irq_set_type() will override it later.
187 */
188 fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
185 189
186 return irq_create_fwspec_mapping(&fwspec); 190 return irq_create_fwspec_mapping(&fwspec);
187} 191}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 28d968088131..53a14ee8ad6d 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
64 * Note that active low is the default. 64 * Note that active low is the default.
65 */ 65 */
66 if (IS_ENABLED(CONFIG_REGULATOR) && 66 if (IS_ENABLED(CONFIG_REGULATOR) &&
67 (of_device_is_compatible(np, "reg-fixed-voltage") || 67 (of_device_is_compatible(np, "regulator-fixed") ||
68 of_device_is_compatible(np, "reg-fixed-voltage") ||
68 of_device_is_compatible(np, "regulator-gpio"))) { 69 of_device_is_compatible(np, "regulator-gpio"))) {
69 /* 70 /*
70 * The regulator GPIO handles are specified such that the 71 * The regulator GPIO handles are specified such that the
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index f4c474a95875..71efcf38f11b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -57,6 +57,10 @@
57#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8 57#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
58#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c 58#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
59#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68 59#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
60#define ACP_BT_PLAY_REGS_START 0x14970
61#define ACP_BT_PLAY_REGS_END 0x14a24
62#define ACP_BT_COMP1_REG_OFFSET 0xac
63#define ACP_BT_COMP2_REG_OFFSET 0xa8
60 64
61#define mmACP_PGFSM_RETAIN_REG 0x51c9 65#define mmACP_PGFSM_RETAIN_REG 0x51c9
62#define mmACP_PGFSM_CONFIG_REG 0x51ca 66#define mmACP_PGFSM_CONFIG_REG 0x51ca
@@ -77,7 +81,7 @@
77#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF 81#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
78 82
79#define ACP_TIMEOUT_LOOP 0x000000FF 83#define ACP_TIMEOUT_LOOP 0x000000FF
80#define ACP_DEVS 3 84#define ACP_DEVS 4
81#define ACP_SRC_ID 162 85#define ACP_SRC_ID 162
82 86
83enum { 87enum {
@@ -316,14 +320,13 @@ static int acp_hw_init(void *handle)
316 if (adev->acp.acp_cell == NULL) 320 if (adev->acp.acp_cell == NULL)
317 return -ENOMEM; 321 return -ENOMEM;
318 322
319 adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL); 323 adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
320
321 if (adev->acp.acp_res == NULL) { 324 if (adev->acp.acp_res == NULL) {
322 kfree(adev->acp.acp_cell); 325 kfree(adev->acp.acp_cell);
323 return -ENOMEM; 326 return -ENOMEM;
324 } 327 }
325 328
326 i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL); 329 i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
327 if (i2s_pdata == NULL) { 330 if (i2s_pdata == NULL) {
328 kfree(adev->acp.acp_res); 331 kfree(adev->acp.acp_res);
329 kfree(adev->acp.acp_cell); 332 kfree(adev->acp.acp_cell);
@@ -358,6 +361,20 @@ static int acp_hw_init(void *handle)
358 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET; 361 i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
359 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET; 362 i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
360 363
364 i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
365 switch (adev->asic_type) {
366 case CHIP_STONEY:
367 i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
368 break;
369 default:
370 break;
371 }
372
373 i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
374 i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
375 i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
376 i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
377
361 adev->acp.acp_res[0].name = "acp2x_dma"; 378 adev->acp.acp_res[0].name = "acp2x_dma";
362 adev->acp.acp_res[0].flags = IORESOURCE_MEM; 379 adev->acp.acp_res[0].flags = IORESOURCE_MEM;
363 adev->acp.acp_res[0].start = acp_base; 380 adev->acp.acp_res[0].start = acp_base;
@@ -373,13 +390,18 @@ static int acp_hw_init(void *handle)
373 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START; 390 adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
374 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END; 391 adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
375 392
376 adev->acp.acp_res[3].name = "acp2x_dma_irq"; 393 adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
377 adev->acp.acp_res[3].flags = IORESOURCE_IRQ; 394 adev->acp.acp_res[3].flags = IORESOURCE_MEM;
378 adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162); 395 adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
379 adev->acp.acp_res[3].end = adev->acp.acp_res[3].start; 396 adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
397
398 adev->acp.acp_res[4].name = "acp2x_dma_irq";
399 adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
400 adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
401 adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
380 402
381 adev->acp.acp_cell[0].name = "acp_audio_dma"; 403 adev->acp.acp_cell[0].name = "acp_audio_dma";
382 adev->acp.acp_cell[0].num_resources = 4; 404 adev->acp.acp_cell[0].num_resources = 5;
383 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0]; 405 adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
384 adev->acp.acp_cell[0].platform_data = &adev->asic_type; 406 adev->acp.acp_cell[0].platform_data = &adev->asic_type;
385 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type); 407 adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
@@ -396,6 +418,12 @@ static int acp_hw_init(void *handle)
396 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1]; 418 adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
397 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data); 419 adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
398 420
421 adev->acp.acp_cell[3].name = "designware-i2s";
422 adev->acp.acp_cell[3].num_resources = 1;
423 adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
424 adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
425 adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
426
399 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 427 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
400 ACP_DEVS); 428 ACP_DEVS);
401 if (r) 429 if (r)
@@ -451,7 +479,6 @@ static int acp_hw_init(void *handle)
451 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET); 479 val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
452 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; 480 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
453 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); 481 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
454
455 return 0; 482 return 0;
456} 483}
457 484
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index b33f1680c9a3..a028661d9e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -575,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
575 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX }, 575 { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
576 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX }, 576 { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
577 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX }, 577 { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
578 { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
578 { 0, 0, 0, 0, 0 }, 579 { 0, 0, 0, 0, 0 },
579}; 580};
580 581
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 13acef526c5b..e839470880d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2843,6 +2843,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
2843 if (r) 2843 if (r)
2844 return r; 2844 return r;
2845 2845
2846 /* Make sure IB tests flushed */
2847 flush_delayed_work(&adev->late_init_work);
2848
2846 /* blat the mode back in */ 2849 /* blat the mode back in */
2847 if (fbcon) { 2850 if (fbcon) {
2848 if (!amdgpu_device_has_dc_support(adev)) { 2851 if (!amdgpu_device_has_dc_support(adev)) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 9f0a217603ad..516795342dd2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -74,4 +74,3 @@ bool dm_read_persistent_data(struct dc_context *ctx,
74 74
75/**** power component interfaces ****/ 75/**** power component interfaces ****/
76 76
77
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 9d901ca70588..af9386ee5a93 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1781,12 +1781,10 @@ static void dp_test_send_link_training(struct dc_link *link)
1781 dp_retrain_link_dp_test(link, &link_settings, false); 1781 dp_retrain_link_dp_test(link, &link_settings, false);
1782} 1782}
1783 1783
1784/* TODO hbr2 compliance eye output is unstable 1784/* TODO Raven hbr2 compliance eye output is unstable
1785 * (toggling on and off) with debugger break 1785 * (toggling on and off) with debugger break
1786 * This caueses intermittent PHY automation failure 1786 * This caueses intermittent PHY automation failure
1787 * Need to look into the root cause */ 1787 * Need to look into the root cause */
1788static uint8_t force_tps4_for_cp2520 = 1;
1789
1790static void dp_test_send_phy_test_pattern(struct dc_link *link) 1788static void dp_test_send_phy_test_pattern(struct dc_link *link)
1791{ 1789{
1792 union phy_test_pattern dpcd_test_pattern; 1790 union phy_test_pattern dpcd_test_pattern;
@@ -1846,13 +1844,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
1846 break; 1844 break;
1847 case PHY_TEST_PATTERN_CP2520_1: 1845 case PHY_TEST_PATTERN_CP2520_1:
1848 /* CP2520 pattern is unstable, temporarily use TPS4 instead */ 1846 /* CP2520 pattern is unstable, temporarily use TPS4 instead */
1849 test_pattern = (force_tps4_for_cp2520 == 1) ? 1847 test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
1850 DP_TEST_PATTERN_TRAINING_PATTERN4 : 1848 DP_TEST_PATTERN_TRAINING_PATTERN4 :
1851 DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; 1849 DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
1852 break; 1850 break;
1853 case PHY_TEST_PATTERN_CP2520_2: 1851 case PHY_TEST_PATTERN_CP2520_2:
1854 /* CP2520 pattern is unstable, temporarily use TPS4 instead */ 1852 /* CP2520 pattern is unstable, temporarily use TPS4 instead */
1855 test_pattern = (force_tps4_for_cp2520 == 1) ? 1853 test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
1856 DP_TEST_PATTERN_TRAINING_PATTERN4 : 1854 DP_TEST_PATTERN_TRAINING_PATTERN4 :
1857 DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; 1855 DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
1858 break; 1856 break;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 7515c0dcbdd2..b91f14989aef 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -77,6 +77,7 @@ struct dc_caps {
77 bool is_apu; 77 bool is_apu;
78 bool dual_link_dvi; 78 bool dual_link_dvi;
79 bool post_blend_color_processing; 79 bool post_blend_color_processing;
80 bool force_dp_tps4_for_cp2520;
80}; 81};
81 82
82struct dc_dcc_surface_param { 83struct dc_dcc_surface_param {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index cd8c22839227..c39934f8677f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1127,6 +1127,8 @@ static bool construct(
1127 dc->caps.max_slave_planes = 1; 1127 dc->caps.max_slave_planes = 1;
1128 dc->caps.is_apu = true; 1128 dc->caps.is_apu = true;
1129 dc->caps.post_blend_color_processing = false; 1129 dc->caps.post_blend_color_processing = false;
1130 /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
1131 dc->caps.force_dp_tps4_for_cp2520 = true;
1130 1132
1131 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) 1133 if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
1132 dc->debug = debug_defaults_drv; 1134 dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index f4f366b26fd1..cb3a5b1737c8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -226,6 +226,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
226 226
227 ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address; 227 ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
228 228
229 ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
230
229 return 0; 231 return 0;
230} 232}
231 233
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 03eeee11dd5b..42a40daff132 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -519,8 +519,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
519 u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR); 519 u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
520 520
521 /* 521 /*
522 * This is rediculous - rather than writing bits to clear, we 522 * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
523 * have to set the actual status register value. This is racy. 523 * is set. Writing has some other effect to acknowledge the IRQ -
524 * without this, we only get a single IRQ.
524 */ 525 */
525 writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); 526 writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
526 527
@@ -1116,16 +1117,22 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc,
1116static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc) 1117static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
1117{ 1118{
1118 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 1119 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
1120 unsigned long flags;
1119 1121
1122 spin_lock_irqsave(&dcrtc->irq_lock, flags);
1120 armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA); 1123 armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
1124 spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
1121 return 0; 1125 return 0;
1122} 1126}
1123 1127
1124static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc) 1128static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
1125{ 1129{
1126 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc); 1130 struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
1131 unsigned long flags;
1127 1132
1133 spin_lock_irqsave(&dcrtc->irq_lock, flags);
1128 armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA); 1134 armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
1135 spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
1129} 1136}
1130 1137
1131static const struct drm_crtc_funcs armada_crtc_funcs = { 1138static const struct drm_crtc_funcs armada_crtc_funcs = {
@@ -1415,6 +1422,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
1415 CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1); 1422 CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
1416 writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1); 1423 writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
1417 writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA); 1424 writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
1425 readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
1418 writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR); 1426 writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
1419 1427
1420 ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc", 1428 ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
index 27319a8335e2..345dc4d0851e 100644
--- a/drivers/gpu/drm/armada/armada_hw.h
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -160,6 +160,7 @@ enum {
160 CFG_ALPHAM_GRA = 0x1 << 16, 160 CFG_ALPHAM_GRA = 0x1 << 16,
161 CFG_ALPHAM_CFG = 0x2 << 16, 161 CFG_ALPHAM_CFG = 0x2 << 16,
162 CFG_ALPHA_MASK = 0xff << 8, 162 CFG_ALPHA_MASK = 0xff << 8,
163#define CFG_ALPHA(x) ((x) << 8)
163 CFG_PIXCMD_MASK = 0xff, 164 CFG_PIXCMD_MASK = 0xff,
164}; 165};
165 166
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index c391955009d6..afa7ded3ae31 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -28,6 +28,7 @@ struct armada_ovl_plane_properties {
28 uint16_t contrast; 28 uint16_t contrast;
29 uint16_t saturation; 29 uint16_t saturation;
30 uint32_t colorkey_mode; 30 uint32_t colorkey_mode;
31 uint32_t colorkey_enable;
31}; 32};
32 33
33struct armada_ovl_plane { 34struct armada_ovl_plane {
@@ -54,11 +55,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
54 writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE); 55 writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
55 56
56 spin_lock_irq(&dcrtc->irq_lock); 57 spin_lock_irq(&dcrtc->irq_lock);
57 armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA, 58 armada_updatel(prop->colorkey_mode,
58 CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK, 59 CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
59 dcrtc->base + LCD_SPU_DMA_CTRL1); 60 dcrtc->base + LCD_SPU_DMA_CTRL1);
60 61 if (dcrtc->variant->has_spu_adv_reg)
61 armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG); 62 armada_updatel(prop->colorkey_enable,
63 ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
64 dcrtc->base + LCD_SPU_ADV_REG);
62 spin_unlock_irq(&dcrtc->irq_lock); 65 spin_unlock_irq(&dcrtc->irq_lock);
63} 66}
64 67
@@ -321,8 +324,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
321 dplane->prop.colorkey_vb |= K2B(val); 324 dplane->prop.colorkey_vb |= K2B(val);
322 update_attr = true; 325 update_attr = true;
323 } else if (property == priv->colorkey_mode_prop) { 326 } else if (property == priv->colorkey_mode_prop) {
324 dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK; 327 if (val == CKMODE_DISABLE) {
325 dplane->prop.colorkey_mode |= CFG_CKMODE(val); 328 dplane->prop.colorkey_mode =
329 CFG_CKMODE(CKMODE_DISABLE) |
330 CFG_ALPHAM_CFG | CFG_ALPHA(255);
331 dplane->prop.colorkey_enable = 0;
332 } else {
333 dplane->prop.colorkey_mode =
334 CFG_CKMODE(val) |
335 CFG_ALPHAM_GRA | CFG_ALPHA(0);
336 dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
337 }
326 update_attr = true; 338 update_attr = true;
327 } else if (property == priv->brightness_prop) { 339 } else if (property == priv->brightness_prop) {
328 dplane->prop.brightness = val - 256; 340 dplane->prop.brightness = val - 256;
@@ -453,7 +465,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
453 dplane->prop.colorkey_yr = 0xfefefe00; 465 dplane->prop.colorkey_yr = 0xfefefe00;
454 dplane->prop.colorkey_ug = 0x01010100; 466 dplane->prop.colorkey_ug = 0x01010100;
455 dplane->prop.colorkey_vb = 0x01010100; 467 dplane->prop.colorkey_vb = 0x01010100;
456 dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB); 468 dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
469 CFG_ALPHAM_GRA | CFG_ALPHA(0);
470 dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
457 dplane->prop.brightness = 0; 471 dplane->prop.brightness = 0;
458 dplane->prop.contrast = 0x4000; 472 dplane->prop.contrast = 0x4000;
459 dplane->prop.saturation = 0x4000; 473 dplane->prop.saturation = 0x4000;
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 250effa0e6b8..a6e8f4591e63 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -14,6 +14,7 @@
14#include <drm/bridge/mhl.h> 14#include <drm/bridge/mhl.h>
15#include <drm/drm_crtc.h> 15#include <drm/drm_crtc.h>
16#include <drm/drm_edid.h> 16#include <drm/drm_edid.h>
17#include <drm/drm_encoder.h>
17 18
18#include <linux/clk.h> 19#include <linux/clk.h>
19#include <linux/delay.h> 20#include <linux/delay.h>
@@ -72,9 +73,7 @@ struct sii8620 {
72 struct regulator_bulk_data supplies[2]; 73 struct regulator_bulk_data supplies[2];
73 struct mutex lock; /* context lock, protects fields below */ 74 struct mutex lock; /* context lock, protects fields below */
74 int error; 75 int error;
75 int pixel_clock;
76 unsigned int use_packed_pixel:1; 76 unsigned int use_packed_pixel:1;
77 int video_code;
78 enum sii8620_mode mode; 77 enum sii8620_mode mode;
79 enum sii8620_sink_type sink_type; 78 enum sii8620_sink_type sink_type;
80 u8 cbus_status; 79 u8 cbus_status;
@@ -82,7 +81,6 @@ struct sii8620 {
82 u8 xstat[MHL_XDS_SIZE]; 81 u8 xstat[MHL_XDS_SIZE];
83 u8 devcap[MHL_DCAP_SIZE]; 82 u8 devcap[MHL_DCAP_SIZE];
84 u8 xdevcap[MHL_XDC_SIZE]; 83 u8 xdevcap[MHL_XDC_SIZE];
85 u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
86 bool feature_complete; 84 bool feature_complete;
87 bool devcap_read; 85 bool devcap_read;
88 bool sink_detected; 86 bool sink_detected;
@@ -1017,21 +1015,36 @@ static void sii8620_stop_video(struct sii8620 *ctx)
1017 1015
1018static void sii8620_set_format(struct sii8620 *ctx) 1016static void sii8620_set_format(struct sii8620 *ctx)
1019{ 1017{
1018 u8 out_fmt;
1019
1020 if (sii8620_is_mhl3(ctx)) { 1020 if (sii8620_is_mhl3(ctx)) {
1021 sii8620_setbits(ctx, REG_M3_P0CTRL, 1021 sii8620_setbits(ctx, REG_M3_P0CTRL,
1022 BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, 1022 BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
1023 ctx->use_packed_pixel ? ~0 : 0); 1023 ctx->use_packed_pixel ? ~0 : 0);
1024 } else { 1024 } else {
1025 if (ctx->use_packed_pixel) {
1026 sii8620_write_seq_static(ctx,
1027 REG_VID_MODE, BIT_VID_MODE_M1080P,
1028 REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
1029 REG_MHLTX_CTL6, 0x60
1030 );
1031 } else {
1025 sii8620_write_seq_static(ctx, 1032 sii8620_write_seq_static(ctx,
1026 REG_VID_MODE, 0, 1033 REG_VID_MODE, 0,
1027 REG_MHL_TOP_CTL, 1, 1034 REG_MHL_TOP_CTL, 1,
1028 REG_MHLTX_CTL6, 0xa0 1035 REG_MHLTX_CTL6, 0xa0
1029 ); 1036 );
1037 }
1030 } 1038 }
1031 1039
1040 if (ctx->use_packed_pixel)
1041 out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
1042 else
1043 out_fmt = VAL_TPI_FORMAT(RGB, FULL);
1044
1032 sii8620_write_seq(ctx, 1045 sii8620_write_seq(ctx,
1033 REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), 1046 REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
1034 REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), 1047 REG_TPI_OUTPUT, out_fmt,
1035 ); 1048 );
1036} 1049}
1037 1050
@@ -1082,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
1082 return frm_len; 1095 return frm_len;
1083} 1096}
1084 1097
1085static void sii8620_set_infoframes(struct sii8620 *ctx) 1098static void sii8620_set_infoframes(struct sii8620 *ctx,
1099 struct drm_display_mode *mode)
1086{ 1100{
1087 struct mhl3_infoframe mhl_frm; 1101 struct mhl3_infoframe mhl_frm;
1088 union hdmi_infoframe frm; 1102 union hdmi_infoframe frm;
1089 u8 buf[31]; 1103 u8 buf[31];
1090 int ret; 1104 int ret;
1091 1105
1106 ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
1107 mode,
1108 true);
1109 if (ctx->use_packed_pixel)
1110 frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
1111
1112 if (!ret)
1113 ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
1114 if (ret > 0)
1115 sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
1116
1092 if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) { 1117 if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
1093 sii8620_write(ctx, REG_TPI_SC, 1118 sii8620_write(ctx, REG_TPI_SC,
1094 BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI); 1119 BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
1095 sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
1096 ARRAY_SIZE(ctx->avif) - 3);
1097 sii8620_write(ctx, REG_PKT_FILTER_0, 1120 sii8620_write(ctx, REG_PKT_FILTER_0,
1098 BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | 1121 BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
1099 BIT_PKT_FILTER_0_DROP_MPEG_PKT | 1122 BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1102,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
1102 return; 1125 return;
1103 } 1126 }
1104 1127
1105 ret = hdmi_avi_infoframe_init(&frm.avi);
1106 frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
1107 frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
1108 frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
1109 frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
1110 frm.avi.video_code = ctx->video_code;
1111 if (!ret)
1112 ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
1113 if (ret > 0)
1114 sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
1115 sii8620_write(ctx, REG_PKT_FILTER_0, 1128 sii8620_write(ctx, REG_PKT_FILTER_0,
1116 BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT | 1129 BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
1117 BIT_PKT_FILTER_0_DROP_MPEG_PKT | 1130 BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1131,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
1131 1144
1132static void sii8620_start_video(struct sii8620 *ctx) 1145static void sii8620_start_video(struct sii8620 *ctx)
1133{ 1146{
1147 struct drm_display_mode *mode =
1148 &ctx->bridge.encoder->crtc->state->adjusted_mode;
1149
1134 if (!sii8620_is_mhl3(ctx)) 1150 if (!sii8620_is_mhl3(ctx))
1135 sii8620_stop_video(ctx); 1151 sii8620_stop_video(ctx);
1136 1152
@@ -1149,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx)
1149 sii8620_set_format(ctx); 1165 sii8620_set_format(ctx);
1150 1166
1151 if (!sii8620_is_mhl3(ctx)) { 1167 if (!sii8620_is_mhl3(ctx)) {
1152 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1168 u8 link_mode = MHL_DST_LM_PATH_ENABLED;
1153 MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED); 1169
1170 if (ctx->use_packed_pixel)
1171 link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
1172 else
1173 link_mode |= MHL_DST_LM_CLK_MODE_NORMAL;
1174
1175 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode);
1154 sii8620_set_auto_zone(ctx); 1176 sii8620_set_auto_zone(ctx);
1155 } else { 1177 } else {
1156 static const struct { 1178 static const struct {
@@ -1167,7 +1189,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
1167 MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 }, 1189 MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
1168 }; 1190 };
1169 u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN; 1191 u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
1170 int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); 1192 int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3);
1171 int i; 1193 int i;
1172 1194
1173 for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) 1195 for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
@@ -1196,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
1196 clk_spec[i].link_rate); 1218 clk_spec[i].link_rate);
1197 } 1219 }
1198 1220
1199 sii8620_set_infoframes(ctx); 1221 sii8620_set_infoframes(ctx, mode);
1200} 1222}
1201 1223
1202static void sii8620_disable_hpd(struct sii8620 *ctx) 1224static void sii8620_disable_hpd(struct sii8620 *ctx)
@@ -1661,14 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx)
1661 1683
1662static void sii8620_status_changed_path(struct sii8620 *ctx) 1684static void sii8620_status_changed_path(struct sii8620 *ctx)
1663{ 1685{
1664 if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) { 1686 u8 link_mode;
1665 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1687
1666 MHL_DST_LM_CLK_MODE_NORMAL 1688 if (ctx->use_packed_pixel)
1667 | MHL_DST_LM_PATH_ENABLED); 1689 link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
1668 } else { 1690 else
1669 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1691 link_mode = MHL_DST_LM_CLK_MODE_NORMAL;
1670 MHL_DST_LM_CLK_MODE_NORMAL); 1692
1671 } 1693 if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
1694 link_mode |= MHL_DST_LM_PATH_ENABLED;
1695
1696 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
1697 link_mode);
1672} 1698}
1673 1699
1674static void sii8620_msc_mr_write_stat(struct sii8620 *ctx) 1700static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
@@ -2242,8 +2268,6 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
2242 mutex_lock(&ctx->lock); 2268 mutex_lock(&ctx->lock);
2243 2269
2244 ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode); 2270 ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
2245 ctx->video_code = drm_match_cea_mode(adjusted_mode);
2246 ctx->pixel_clock = adjusted_mode->clock;
2247 2271
2248 mutex_unlock(&ctx->lock); 2272 mutex_unlock(&ctx->lock);
2249 2273
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 50c73c0a20b9..d638c0fb3418 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
553 553
554 /* Clone the lessor file to create a new file for us */ 554 /* Clone the lessor file to create a new file for us */
555 DRM_DEBUG_LEASE("Allocating lease file\n"); 555 DRM_DEBUG_LEASE("Allocating lease file\n");
556 path_get(&lessor_file->f_path); 556 lessee_file = filp_clone_open(lessor_file);
557 lessee_file = alloc_file(&lessor_file->f_path,
558 lessor_file->f_mode,
559 fops_get(lessor_file->f_inode->i_fop));
560
561 if (IS_ERR(lessee_file)) { 557 if (IS_ERR(lessee_file)) {
562 ret = PTR_ERR(lessee_file); 558 ret = PTR_ERR(lessee_file);
563 goto out_lessee; 559 goto out_lessee;
564 } 560 }
565 561
566 /* Initialize the new file for DRM */
567 DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
568 ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
569 if (ret)
570 goto out_lessee_file;
571
572 lessee_priv = lessee_file->private_data; 562 lessee_priv = lessee_file->private_data;
573
574 /* Change the file to a master one */ 563 /* Change the file to a master one */
575 drm_master_put(&lessee_priv->master); 564 drm_master_put(&lessee_priv->master);
576 lessee_priv->master = lessee; 565 lessee_priv->master = lessee;
@@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
588 DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n"); 577 DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
589 return 0; 578 return 0;
590 579
591out_lessee_file:
592 fput(lessee_file);
593
594out_lessee: 580out_lessee:
595 drm_master_put(&lessee); 581 drm_master_put(&lessee);
596 582
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 1f8031e30f53..cdb10f885a4f 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref)
532 532
533 drm_mode_object_unregister(blob->dev, &blob->base); 533 drm_mode_object_unregister(blob->dev, &blob->base);
534 534
535 kfree(blob); 535 kvfree(blob);
536} 536}
537 537
538/** 538/**
@@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
559 if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob)) 559 if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
560 return ERR_PTR(-EINVAL); 560 return ERR_PTR(-EINVAL);
561 561
562 blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL); 562 blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
563 if (!blob) 563 if (!blob)
564 return ERR_PTR(-ENOMEM); 564 return ERR_PTR(-ENOMEM);
565 565
@@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
576 ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB, 576 ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
577 true, drm_property_free_blob); 577 true, drm_property_free_blob);
578 if (ret) { 578 if (ret) {
579 kfree(blob); 579 kvfree(blob);
580 return ERR_PTR(-EINVAL); 580 return ERR_PTR(-EINVAL);
581 } 581 }
582 582
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 207532c05eb8..9b2720b41571 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -630,8 +630,11 @@ static struct platform_driver etnaviv_platform_driver = {
630 }, 630 },
631}; 631};
632 632
633static struct platform_device *etnaviv_drm;
634
633static int __init etnaviv_init(void) 635static int __init etnaviv_init(void)
634{ 636{
637 struct platform_device *pdev;
635 int ret; 638 int ret;
636 struct device_node *np; 639 struct device_node *np;
637 640
@@ -643,7 +646,7 @@ static int __init etnaviv_init(void)
643 646
644 ret = platform_driver_register(&etnaviv_platform_driver); 647 ret = platform_driver_register(&etnaviv_platform_driver);
645 if (ret != 0) 648 if (ret != 0)
646 platform_driver_unregister(&etnaviv_gpu_driver); 649 goto unregister_gpu_driver;
647 650
648 /* 651 /*
649 * If the DT contains at least one available GPU device, instantiate 652 * If the DT contains at least one available GPU device, instantiate
@@ -652,20 +655,33 @@ static int __init etnaviv_init(void)
652 for_each_compatible_node(np, NULL, "vivante,gc") { 655 for_each_compatible_node(np, NULL, "vivante,gc") {
653 if (!of_device_is_available(np)) 656 if (!of_device_is_available(np))
654 continue; 657 continue;
655 658 pdev = platform_device_register_simple("etnaviv", -1,
656 platform_device_register_simple("etnaviv", -1, NULL, 0); 659 NULL, 0);
660 if (IS_ERR(pdev)) {
661 ret = PTR_ERR(pdev);
662 of_node_put(np);
663 goto unregister_platform_driver;
664 }
665 etnaviv_drm = pdev;
657 of_node_put(np); 666 of_node_put(np);
658 break; 667 break;
659 } 668 }
660 669
670 return 0;
671
672unregister_platform_driver:
673 platform_driver_unregister(&etnaviv_platform_driver);
674unregister_gpu_driver:
675 platform_driver_unregister(&etnaviv_gpu_driver);
661 return ret; 676 return ret;
662} 677}
663module_init(etnaviv_init); 678module_init(etnaviv_init);
664 679
665static void __exit etnaviv_exit(void) 680static void __exit etnaviv_exit(void)
666{ 681{
667 platform_driver_unregister(&etnaviv_gpu_driver); 682 platform_device_unregister(etnaviv_drm);
668 platform_driver_unregister(&etnaviv_platform_driver); 683 platform_driver_unregister(&etnaviv_platform_driver);
684 platform_driver_unregister(&etnaviv_gpu_driver);
669} 685}
670module_exit(etnaviv_exit); 686module_exit(etnaviv_exit);
671 687
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index dd430f0f8ff5..90f17ff7888e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -131,6 +131,9 @@ struct etnaviv_gpu {
131 struct work_struct sync_point_work; 131 struct work_struct sync_point_work;
132 int sync_point_event; 132 int sync_point_event;
133 133
134 /* hang detection */
135 u32 hangcheck_dma_addr;
136
134 void __iomem *mmio; 137 void __iomem *mmio;
135 int irq; 138 int irq;
136 139
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index 590e44b0d963..3949f18afb35 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -10,6 +10,7 @@
10#include "etnaviv_gem.h" 10#include "etnaviv_gem.h"
11#include "etnaviv_gpu.h" 11#include "etnaviv_gpu.h"
12#include "etnaviv_sched.h" 12#include "etnaviv_sched.h"
13#include "state.xml.h"
13 14
14static int etnaviv_job_hang_limit = 0; 15static int etnaviv_job_hang_limit = 0;
15module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444); 16module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
85{ 86{
86 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); 87 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
87 struct etnaviv_gpu *gpu = submit->gpu; 88 struct etnaviv_gpu *gpu = submit->gpu;
89 u32 dma_addr;
90 int change;
91
92 /*
93 * If the GPU managed to complete this jobs fence, the timout is
94 * spurious. Bail out.
95 */
96 if (fence_completed(gpu, submit->out_fence->seqno))
97 return;
98
99 /*
100 * If the GPU is still making forward progress on the front-end (which
101 * should never loop) we shift out the timeout to give it a chance to
102 * finish the job.
103 */
104 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
105 change = dma_addr - gpu->hangcheck_dma_addr;
106 if (change < 0 || change > 16) {
107 gpu->hangcheck_dma_addr = dma_addr;
108 schedule_delayed_work(&sched_job->work_tdr,
109 sched_job->sched->timeout);
110 return;
111 }
88 112
89 /* block scheduler */ 113 /* block scheduler */
90 kthread_park(gpu->sched.thread); 114 kthread_park(gpu->sched.thread);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 0651e63b25fb..45e89b1e0481 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -863,6 +863,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
863{ 863{
864 struct intel_vgpu *vgpu = s->vgpu; 864 struct intel_vgpu *vgpu = s->vgpu;
865 struct intel_gvt *gvt = vgpu->gvt; 865 struct intel_gvt *gvt = vgpu->gvt;
866 u32 ctx_sr_ctl;
866 867
867 if (offset + 4 > gvt->device_info.mmio_size) { 868 if (offset + 4 > gvt->device_info.mmio_size) {
868 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", 869 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -895,6 +896,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
895 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE); 896 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
896 } 897 }
897 898
899 /* TODO
900 * Right now only scan LRI command on KBL and in inhibit context.
901 * It's good enough to support initializing mmio by lri command in
902 * vgpu inhibit context on KBL.
903 */
904 if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
905 intel_gvt_mmio_is_in_ctx(gvt, offset) &&
906 !strncmp(cmd, "lri", 3)) {
907 intel_gvt_hypervisor_read_gpa(s->vgpu,
908 s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
909 /* check inhibit context */
910 if (ctx_sr_ctl & 1) {
911 u32 data = cmd_val(s, index + 1);
912
913 if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
914 intel_vgpu_mask_mmio_write(vgpu,
915 offset, &data, 4);
916 else
917 vgpu_vreg(vgpu, offset) = data;
918 }
919 }
920
898 /* TODO: Update the global mask if this MMIO is a masked-MMIO */ 921 /* TODO: Update the global mask if this MMIO is a masked-MMIO */
899 intel_gvt_mmio_set_cmd_accessed(gvt, offset); 922 intel_gvt_mmio_set_cmd_accessed(gvt, offset);
900 return 0; 923 return 0;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6ee50cb328f8..3019dbc39aef 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -219,7 +219,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
219 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | 219 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
220 TRANS_DDI_PORT_MASK); 220 TRANS_DDI_PORT_MASK);
221 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= 221 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
222 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | 222 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
223 (PORT_B << TRANS_DDI_PORT_SHIFT) | 223 (PORT_B << TRANS_DDI_PORT_SHIFT) |
224 TRANS_DDI_FUNC_ENABLE); 224 TRANS_DDI_FUNC_ENABLE);
225 if (IS_BROADWELL(dev_priv)) { 225 if (IS_BROADWELL(dev_priv)) {
@@ -239,7 +239,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
239 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | 239 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
240 TRANS_DDI_PORT_MASK); 240 TRANS_DDI_PORT_MASK);
241 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= 241 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
242 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | 242 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
243 (PORT_C << TRANS_DDI_PORT_SHIFT) | 243 (PORT_C << TRANS_DDI_PORT_SHIFT) |
244 TRANS_DDI_FUNC_ENABLE); 244 TRANS_DDI_FUNC_ENABLE);
245 if (IS_BROADWELL(dev_priv)) { 245 if (IS_BROADWELL(dev_priv)) {
@@ -259,7 +259,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
259 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | 259 ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
260 TRANS_DDI_PORT_MASK); 260 TRANS_DDI_PORT_MASK);
261 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= 261 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
262 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | 262 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
263 (PORT_D << TRANS_DDI_PORT_SHIFT) | 263 (PORT_D << TRANS_DDI_PORT_SHIFT) |
264 TRANS_DDI_FUNC_ENABLE); 264 TRANS_DDI_FUNC_ENABLE);
265 if (IS_BROADWELL(dev_priv)) { 265 if (IS_BROADWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 39980dfbbebd..00aad8164dec 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1901,6 +1901,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1901 vgpu_free_mm(mm); 1901 vgpu_free_mm(mm);
1902 return ERR_PTR(-ENOMEM); 1902 return ERR_PTR(-ENOMEM);
1903 } 1903 }
1904 mm->ggtt_mm.last_partial_off = -1UL;
1904 1905
1905 return mm; 1906 return mm;
1906} 1907}
@@ -1925,6 +1926,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1925 invalidate_ppgtt_mm(mm); 1926 invalidate_ppgtt_mm(mm);
1926 } else { 1927 } else {
1927 vfree(mm->ggtt_mm.virtual_ggtt); 1928 vfree(mm->ggtt_mm.virtual_ggtt);
1929 mm->ggtt_mm.last_partial_off = -1UL;
1928 } 1930 }
1929 1931
1930 vgpu_free_mm(mm); 1932 vgpu_free_mm(mm);
@@ -2177,6 +2179,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2177 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, 2179 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2178 bytes); 2180 bytes);
2179 2181
2182 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2183 * write, we assume the two 4 bytes writes are consecutive.
2184 * Otherwise, we abort and report error
2185 */
2186 if (bytes < info->gtt_entry_size) {
2187 if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
2188 /* the first partial part*/
2189 ggtt_mm->ggtt_mm.last_partial_off = off;
2190 ggtt_mm->ggtt_mm.last_partial_data = e.val64;
2191 return 0;
2192 } else if ((g_gtt_index ==
2193 (ggtt_mm->ggtt_mm.last_partial_off >>
2194 info->gtt_entry_size_shift)) &&
2195 (off != ggtt_mm->ggtt_mm.last_partial_off)) {
2196 /* the second partial part */
2197
2198 int last_off = ggtt_mm->ggtt_mm.last_partial_off &
2199 (info->gtt_entry_size - 1);
2200
2201 memcpy((void *)&e.val64 + last_off,
2202 (void *)&ggtt_mm->ggtt_mm.last_partial_data +
2203 last_off, bytes);
2204
2205 ggtt_mm->ggtt_mm.last_partial_off = -1UL;
2206 } else {
2207 int last_offset;
2208
2209 gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
2210 ggtt_mm->ggtt_mm.last_partial_off, off,
2211 bytes, info->gtt_entry_size);
2212
2213 /* set host ggtt entry to scratch page and clear
2214 * virtual ggtt entry as not present for last
2215 * partially write offset
2216 */
2217 last_offset = ggtt_mm->ggtt_mm.last_partial_off &
2218 (~(info->gtt_entry_size - 1));
2219
2220 ggtt_get_host_entry(ggtt_mm, &m, last_offset);
2221 ggtt_invalidate_pte(vgpu, &m);
2222 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2223 ops->clear_present(&m);
2224 ggtt_set_host_entry(ggtt_mm, &m, last_offset);
2225 ggtt_invalidate(gvt->dev_priv);
2226
2227 ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
2228 ops->clear_present(&e);
2229 ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
2230
2231 ggtt_mm->ggtt_mm.last_partial_off = off;
2232 ggtt_mm->ggtt_mm.last_partial_data = e.val64;
2233
2234 return 0;
2235 }
2236 }
2237
2180 if (ops->test_present(&e)) { 2238 if (ops->test_present(&e)) {
2181 gfn = ops->get_pfn(&e); 2239 gfn = ops->get_pfn(&e);
2182 m = e; 2240 m = e;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b7bf68cc8418..7a9b36176efb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -157,6 +157,8 @@ struct intel_vgpu_mm {
157 } ppgtt_mm; 157 } ppgtt_mm;
158 struct { 158 struct {
159 void *virtual_ggtt; 159 void *virtual_ggtt;
160 unsigned long last_partial_off;
161 u64 last_partial_data;
160 } ggtt_mm; 162 } ggtt_mm;
161 }; 163 };
162}; 164};
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index de2a3a2580be..9a9671522774 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -274,6 +274,8 @@ struct intel_gvt_mmio {
274#define F_CMD_ACCESSED (1 << 5) 274#define F_CMD_ACCESSED (1 << 5)
275/* This reg could be accessed by unaligned address */ 275/* This reg could be accessed by unaligned address */
276#define F_UNALIGN (1 << 6) 276#define F_UNALIGN (1 << 6)
277/* This reg is saved/restored in context */
278#define F_IN_CTX (1 << 7)
277 279
278 struct gvt_mmio_block *mmio_block; 280 struct gvt_mmio_block *mmio_block;
279 unsigned int num_mmio_block; 281 unsigned int num_mmio_block;
@@ -655,6 +657,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
655 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK; 657 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
656} 658}
657 659
660/**
661 * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
662 * @gvt: a GVT device
663 * @offset: register offset
664 *
665 * Returns:
666 * True if a MMIO has a in-context mask, false if it isn't.
667 *
668 */
669static inline bool intel_gvt_mmio_is_in_ctx(
670 struct intel_gvt *gvt, unsigned int offset)
671{
672 return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
673}
674
675/**
676 * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
677 * @gvt: a GVT device
678 * @offset: register offset
679 *
680 */
681static inline void intel_gvt_mmio_set_in_ctx(
682 struct intel_gvt *gvt, unsigned int offset)
683{
684 gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
685}
686
658int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu); 687int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
659void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); 688void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
660int intel_gvt_debugfs_init(struct intel_gvt *gvt); 689int intel_gvt_debugfs_init(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 6b50f850dc28..7a58ca555197 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -3388,6 +3388,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3388} 3388}
3389 3389
3390/** 3390/**
3391 * intel_vgpu_mask_mmio_write - write mask register
3392 * @vgpu: a vGPU
3393 * @offset: access offset
3394 * @p_data: write data buffer
3395 * @bytes: access data length
3396 *
3397 * Returns:
3398 * Zero on success, negative error code if failed.
3399 */
3400int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
3401 void *p_data, unsigned int bytes)
3402{
3403 u32 mask, old_vreg;
3404
3405 old_vreg = vgpu_vreg(vgpu, offset);
3406 write_vreg(vgpu, offset, p_data, bytes);
3407 mask = vgpu_vreg(vgpu, offset) >> 16;
3408 vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
3409 (vgpu_vreg(vgpu, offset) & mask);
3410
3411 return 0;
3412}
3413
3414/**
3391 * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be 3415 * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
3392 * force-nopriv register 3416 * force-nopriv register
3393 * 3417 *
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index e474188b46d2..1ffc69eba30e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -99,4 +99,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
99int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, 99int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
100 void *pdata, unsigned int bytes, bool is_read); 100 void *pdata, unsigned int bytes, bool is_read);
101 101
102int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
103 void *p_data, unsigned int bytes);
102#endif 104#endif
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 20be9a92600f..42e1e6bdcc2c 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -587,7 +587,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
587 587
588 for (mmio = gvt->engine_mmio_list.mmio; 588 for (mmio = gvt->engine_mmio_list.mmio;
589 i915_mmio_reg_valid(mmio->reg); mmio++) { 589 i915_mmio_reg_valid(mmio->reg); mmio++) {
590 if (mmio->in_context) 590 if (mmio->in_context) {
591 gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++; 591 gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
592 intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
593 }
592 } 594 }
593} 595}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 501d2d290e9c..70dce544984e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev)
55 nouveau_display(dev)->init = nv04_display_init; 55 nouveau_display(dev)->init = nv04_display_init;
56 nouveau_display(dev)->fini = nv04_display_fini; 56 nouveau_display(dev)->fini = nv04_display_fini;
57 57
58 /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
59 dev->driver->driver_features &= ~DRIVER_ATOMIC;
60
58 nouveau_hw_save_vga_fonts(dev, 1); 61 nouveau_hw_save_vga_fonts(dev, 1);
59 62
60 nv04_crtc_create(dev, 0); 63 nv04_crtc_create(dev, 0);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 0190377b02a6..8412119bd940 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1610,8 +1610,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
1610 *****************************************************************************/ 1610 *****************************************************************************/
1611 1611
1612static void 1612static void
1613nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock) 1613nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
1614{ 1614{
1615 struct nouveau_drm *drm = nouveau_drm(state->dev);
1615 struct nv50_disp *disp = nv50_disp(drm->dev); 1616 struct nv50_disp *disp = nv50_disp(drm->dev);
1616 struct nv50_core *core = disp->core; 1617 struct nv50_core *core = disp->core;
1617 struct nv50_mstm *mstm; 1618 struct nv50_mstm *mstm;
@@ -1643,6 +1644,22 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
1643} 1644}
1644 1645
1645static void 1646static void
1647nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
1648{
1649 struct drm_plane_state *new_plane_state;
1650 struct drm_plane *plane;
1651 int i;
1652
1653 for_each_new_plane_in_state(state, plane, new_plane_state, i) {
1654 struct nv50_wndw *wndw = nv50_wndw(plane);
1655 if (interlock[wndw->interlock.type] & wndw->interlock.data) {
1656 if (wndw->func->update)
1657 wndw->func->update(wndw, interlock);
1658 }
1659 }
1660}
1661
1662static void
1646nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) 1663nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
1647{ 1664{
1648 struct drm_device *dev = state->dev; 1665 struct drm_device *dev = state->dev;
@@ -1709,7 +1726,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
1709 help->disable(encoder); 1726 help->disable(encoder);
1710 interlock[NV50_DISP_INTERLOCK_CORE] |= 1; 1727 interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
1711 if (outp->flush_disable) { 1728 if (outp->flush_disable) {
1712 nv50_disp_atomic_commit_core(drm, interlock); 1729 nv50_disp_atomic_commit_wndw(state, interlock);
1730 nv50_disp_atomic_commit_core(state, interlock);
1713 memset(interlock, 0x00, sizeof(interlock)); 1731 memset(interlock, 0x00, sizeof(interlock));
1714 } 1732 }
1715 } 1733 }
@@ -1718,15 +1736,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
1718 /* Flush disable. */ 1736 /* Flush disable. */
1719 if (interlock[NV50_DISP_INTERLOCK_CORE]) { 1737 if (interlock[NV50_DISP_INTERLOCK_CORE]) {
1720 if (atom->flush_disable) { 1738 if (atom->flush_disable) {
1721 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 1739 nv50_disp_atomic_commit_wndw(state, interlock);
1722 struct nv50_wndw *wndw = nv50_wndw(plane); 1740 nv50_disp_atomic_commit_core(state, interlock);
1723 if (interlock[wndw->interlock.type] & wndw->interlock.data) {
1724 if (wndw->func->update)
1725 wndw->func->update(wndw, interlock);
1726 }
1727 }
1728
1729 nv50_disp_atomic_commit_core(drm, interlock);
1730 memset(interlock, 0x00, sizeof(interlock)); 1741 memset(interlock, 0x00, sizeof(interlock));
1731 } 1742 }
1732 } 1743 }
@@ -1787,18 +1798,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
1787 } 1798 }
1788 1799
1789 /* Flush update. */ 1800 /* Flush update. */
1790 for_each_new_plane_in_state(state, plane, new_plane_state, i) { 1801 nv50_disp_atomic_commit_wndw(state, interlock);
1791 struct nv50_wndw *wndw = nv50_wndw(plane);
1792 if (interlock[wndw->interlock.type] & wndw->interlock.data) {
1793 if (wndw->func->update)
1794 wndw->func->update(wndw, interlock);
1795 }
1796 }
1797 1802
1798 if (interlock[NV50_DISP_INTERLOCK_CORE]) { 1803 if (interlock[NV50_DISP_INTERLOCK_CORE]) {
1799 if (interlock[NV50_DISP_INTERLOCK_BASE] || 1804 if (interlock[NV50_DISP_INTERLOCK_BASE] ||
1805 interlock[NV50_DISP_INTERLOCK_OVLY] ||
1806 interlock[NV50_DISP_INTERLOCK_WNDW] ||
1800 !atom->state.legacy_cursor_update) 1807 !atom->state.legacy_cursor_update)
1801 nv50_disp_atomic_commit_core(drm, interlock); 1808 nv50_disp_atomic_commit_core(state, interlock);
1802 else 1809 else
1803 disp->core->func->update(disp->core, interlock, false); 1810 disp->core->func->update(disp->core, interlock, false);
1804 } 1811 }
@@ -1896,7 +1903,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
1896 nv50_disp_atomic_commit_tail(state); 1903 nv50_disp_atomic_commit_tail(state);
1897 1904
1898 drm_for_each_crtc(crtc, dev) { 1905 drm_for_each_crtc(crtc, dev) {
1899 if (crtc->state->enable) { 1906 if (crtc->state->active) {
1900 if (!drm->have_disp_power_ref) { 1907 if (!drm->have_disp_power_ref) {
1901 drm->have_disp_power_ref = true; 1908 drm->have_disp_power_ref = true;
1902 return 0; 1909 return 0;
@@ -2144,10 +2151,6 @@ nv50_display_destroy(struct drm_device *dev)
2144 kfree(disp); 2151 kfree(disp);
2145} 2152}
2146 2153
2147MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
2148static int nouveau_atomic = 0;
2149module_param_named(atomic, nouveau_atomic, int, 0400);
2150
2151int 2154int
2152nv50_display_create(struct drm_device *dev) 2155nv50_display_create(struct drm_device *dev)
2153{ 2156{
@@ -2172,8 +2175,6 @@ nv50_display_create(struct drm_device *dev)
2172 disp->disp = &nouveau_display(dev)->disp; 2175 disp->disp = &nouveau_display(dev)->disp;
2173 dev->mode_config.funcs = &nv50_disp_func; 2176 dev->mode_config.funcs = &nv50_disp_func;
2174 dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; 2177 dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
2175 if (nouveau_atomic)
2176 dev->driver->driver_features |= DRIVER_ATOMIC;
2177 2178
2178 /* small shared memory area we use for notifiers and semaphores */ 2179 /* small shared memory area we use for notifiers and semaphores */
2179 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, 2180 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index debbbf0fd4bd..408b955e5c39 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev)
267 struct nouveau_drm *drm = nouveau_drm(dev); 267 struct nouveau_drm *drm = nouveau_drm(dev);
268 struct nvif_device *device = &drm->client.device; 268 struct nvif_device *device = &drm->client.device;
269 struct drm_connector *connector; 269 struct drm_connector *connector;
270 struct drm_connector_list_iter conn_iter;
270 271
271 INIT_LIST_HEAD(&drm->bl_connectors); 272 INIT_LIST_HEAD(&drm->bl_connectors);
272 273
@@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev)
275 return 0; 276 return 0;
276 } 277 }
277 278
278 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 279 drm_connector_list_iter_begin(dev, &conn_iter);
280 drm_for_each_connector_iter(connector, &conn_iter) {
279 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && 281 if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
280 connector->connector_type != DRM_MODE_CONNECTOR_eDP) 282 connector->connector_type != DRM_MODE_CONNECTOR_eDP)
281 continue; 283 continue;
@@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev)
292 break; 294 break;
293 } 295 }
294 } 296 }
295 297 drm_connector_list_iter_end(&conn_iter);
296 298
297 return 0; 299 return 0;
298} 300}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 22a15478d23d..51932c72334e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1193,14 +1193,19 @@ nouveau_connector_create(struct drm_device *dev, int index)
1193 struct nouveau_display *disp = nouveau_display(dev); 1193 struct nouveau_display *disp = nouveau_display(dev);
1194 struct nouveau_connector *nv_connector = NULL; 1194 struct nouveau_connector *nv_connector = NULL;
1195 struct drm_connector *connector; 1195 struct drm_connector *connector;
1196 struct drm_connector_list_iter conn_iter;
1196 int type, ret = 0; 1197 int type, ret = 0;
1197 bool dummy; 1198 bool dummy;
1198 1199
1199 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 1200 drm_connector_list_iter_begin(dev, &conn_iter);
1201 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
1200 nv_connector = nouveau_connector(connector); 1202 nv_connector = nouveau_connector(connector);
1201 if (nv_connector->index == index) 1203 if (nv_connector->index == index) {
1204 drm_connector_list_iter_end(&conn_iter);
1202 return connector; 1205 return connector;
1206 }
1203 } 1207 }
1208 drm_connector_list_iter_end(&conn_iter);
1204 1209
1205 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); 1210 nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
1206 if (!nv_connector) 1211 if (!nv_connector)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index a4d1a059bd3d..dc7454e7f19a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -33,6 +33,7 @@
33#include <drm/drm_encoder.h> 33#include <drm/drm_encoder.h>
34#include <drm/drm_dp_helper.h> 34#include <drm/drm_dp_helper.h>
35#include "nouveau_crtc.h" 35#include "nouveau_crtc.h"
36#include "nouveau_encoder.h"
36 37
37struct nvkm_i2c_port; 38struct nvkm_i2c_port;
38 39
@@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector(
60 return container_of(con, struct nouveau_connector, base); 61 return container_of(con, struct nouveau_connector, base);
61} 62}
62 63
64static inline bool
65nouveau_connector_is_mst(struct drm_connector *connector)
66{
67 const struct nouveau_encoder *nv_encoder;
68 const struct drm_encoder *encoder;
69
70 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
71 return false;
72
73 nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY);
74 if (!nv_encoder)
75 return false;
76
77 encoder = &nv_encoder->base.base;
78 return encoder->encoder_type == DRM_MODE_ENCODER_DPMST;
79}
80
81#define nouveau_for_each_non_mst_connector_iter(connector, iter) \
82 drm_for_each_connector_iter(connector, iter) \
83 for_each_if(!nouveau_connector_is_mst(connector))
84
63static inline struct nouveau_connector * 85static inline struct nouveau_connector *
64nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc) 86nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
65{ 87{
66 struct drm_device *dev = nv_crtc->base.dev; 88 struct drm_device *dev = nv_crtc->base.dev;
67 struct drm_connector *connector; 89 struct drm_connector *connector;
90 struct drm_connector_list_iter conn_iter;
91 struct nouveau_connector *nv_connector = NULL;
68 struct drm_crtc *crtc = to_drm_crtc(nv_crtc); 92 struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
69 93
70 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 94 drm_connector_list_iter_begin(dev, &conn_iter);
71 if (connector->encoder && connector->encoder->crtc == crtc) 95 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
72 return nouveau_connector(connector); 96 if (connector->encoder && connector->encoder->crtc == crtc) {
97 nv_connector = nouveau_connector(connector);
98 break;
99 }
73 } 100 }
101 drm_connector_list_iter_end(&conn_iter);
74 102
75 return NULL; 103 return nv_connector;
76} 104}
77 105
78struct drm_connector * 106struct drm_connector *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index dfa236370726..139368b31916 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -404,6 +404,7 @@ nouveau_display_init(struct drm_device *dev)
404 struct nouveau_display *disp = nouveau_display(dev); 404 struct nouveau_display *disp = nouveau_display(dev);
405 struct nouveau_drm *drm = nouveau_drm(dev); 405 struct nouveau_drm *drm = nouveau_drm(dev);
406 struct drm_connector *connector; 406 struct drm_connector *connector;
407 struct drm_connector_list_iter conn_iter;
407 int ret; 408 int ret;
408 409
409 ret = disp->init(dev); 410 ret = disp->init(dev);
@@ -411,10 +412,12 @@ nouveau_display_init(struct drm_device *dev)
411 return ret; 412 return ret;
412 413
413 /* enable hotplug interrupts */ 414 /* enable hotplug interrupts */
414 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 415 drm_connector_list_iter_begin(dev, &conn_iter);
416 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
415 struct nouveau_connector *conn = nouveau_connector(connector); 417 struct nouveau_connector *conn = nouveau_connector(connector);
416 nvif_notify_get(&conn->hpd); 418 nvif_notify_get(&conn->hpd);
417 } 419 }
420 drm_connector_list_iter_end(&conn_iter);
418 421
419 /* enable flip completion events */ 422 /* enable flip completion events */
420 nvif_notify_get(&drm->flip); 423 nvif_notify_get(&drm->flip);
@@ -427,6 +430,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
427 struct nouveau_display *disp = nouveau_display(dev); 430 struct nouveau_display *disp = nouveau_display(dev);
428 struct nouveau_drm *drm = nouveau_drm(dev); 431 struct nouveau_drm *drm = nouveau_drm(dev);
429 struct drm_connector *connector; 432 struct drm_connector *connector;
433 struct drm_connector_list_iter conn_iter;
430 434
431 if (!suspend) { 435 if (!suspend) {
432 if (drm_drv_uses_atomic_modeset(dev)) 436 if (drm_drv_uses_atomic_modeset(dev))
@@ -439,10 +443,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
439 nvif_notify_put(&drm->flip); 443 nvif_notify_put(&drm->flip);
440 444
441 /* disable hotplug interrupts */ 445 /* disable hotplug interrupts */
442 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 446 drm_connector_list_iter_begin(dev, &conn_iter);
447 nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
443 struct nouveau_connector *conn = nouveau_connector(connector); 448 struct nouveau_connector *conn = nouveau_connector(connector);
444 nvif_notify_put(&conn->hpd); 449 nvif_notify_put(&conn->hpd);
445 } 450 }
451 drm_connector_list_iter_end(&conn_iter);
446 452
447 drm_kms_helper_poll_disable(dev); 453 drm_kms_helper_poll_disable(dev);
448 disp->fini(dev); 454 disp->fini(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c779ee3c665b..c7ec86d6c3c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -81,6 +81,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
81int nouveau_modeset = -1; 81int nouveau_modeset = -1;
82module_param_named(modeset, nouveau_modeset, int, 0400); 82module_param_named(modeset, nouveau_modeset, int, 0400);
83 83
84MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
85static int nouveau_atomic = 0;
86module_param_named(atomic, nouveau_atomic, int, 0400);
87
84MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); 88MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
85static int nouveau_runtime_pm = -1; 89static int nouveau_runtime_pm = -1;
86module_param_named(runpm, nouveau_runtime_pm, int, 0400); 90module_param_named(runpm, nouveau_runtime_pm, int, 0400);
@@ -509,6 +513,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
509 513
510 pci_set_master(pdev); 514 pci_set_master(pdev);
511 515
516 if (nouveau_atomic)
517 driver_pci.driver_features |= DRIVER_ATOMIC;
518
512 ret = drm_get_pci_dev(pdev, pent, &driver_pci); 519 ret = drm_get_pci_dev(pdev, pent, &driver_pci);
513 if (ret) { 520 if (ret) {
514 nvkm_device_del(&device); 521 nvkm_device_del(&device);
@@ -874,22 +881,11 @@ nouveau_pmops_runtime_resume(struct device *dev)
874static int 881static int
875nouveau_pmops_runtime_idle(struct device *dev) 882nouveau_pmops_runtime_idle(struct device *dev)
876{ 883{
877 struct pci_dev *pdev = to_pci_dev(dev);
878 struct drm_device *drm_dev = pci_get_drvdata(pdev);
879 struct nouveau_drm *drm = nouveau_drm(drm_dev);
880 struct drm_crtc *crtc;
881
882 if (!nouveau_pmops_runtime()) { 884 if (!nouveau_pmops_runtime()) {
883 pm_runtime_forbid(dev); 885 pm_runtime_forbid(dev);
884 return -EBUSY; 886 return -EBUSY;
885 } 887 }
886 888
887 list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
888 if (crtc->enabled) {
889 DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
890 return -EBUSY;
891 }
892 }
893 pm_runtime_mark_last_busy(dev); 889 pm_runtime_mark_last_busy(dev);
894 pm_runtime_autosuspend(dev); 890 pm_runtime_autosuspend(dev);
895 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */ 891 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index df73bec354e8..b56524d343c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -616,7 +616,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
616 struct nouveau_bo *nvbo; 616 struct nouveau_bo *nvbo;
617 uint32_t data; 617 uint32_t data;
618 618
619 if (unlikely(r->bo_index > req->nr_buffers)) { 619 if (unlikely(r->bo_index >= req->nr_buffers)) {
620 NV_PRINTK(err, cli, "reloc bo index invalid\n"); 620 NV_PRINTK(err, cli, "reloc bo index invalid\n");
621 ret = -EINVAL; 621 ret = -EINVAL;
622 break; 622 break;
@@ -626,7 +626,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
626 if (b->presumed.valid) 626 if (b->presumed.valid)
627 continue; 627 continue;
628 628
629 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { 629 if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
630 NV_PRINTK(err, cli, "reloc container bo index invalid\n"); 630 NV_PRINTK(err, cli, "reloc container bo index invalid\n");
631 ret = -EINVAL; 631 ret = -EINVAL;
632 break; 632 break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 73b5d46104bd..434d2fc5bb1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -140,6 +140,9 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
140 if (fb->func->init) 140 if (fb->func->init)
141 fb->func->init(fb); 141 fb->func->init(fb);
142 142
143 if (fb->func->init_remapper)
144 fb->func->init_remapper(fb);
145
143 if (fb->func->init_page) { 146 if (fb->func->init_page) {
144 ret = fb->func->init_page(fb); 147 ret = fb->func->init_page(fb);
145 if (WARN_ON(ret)) 148 if (WARN_ON(ret))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index dffe1f5e1071..8205ce436b3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -37,6 +37,14 @@ gp100_fb_init_unkn(struct nvkm_fb *base)
37} 37}
38 38
39void 39void
40gp100_fb_init_remapper(struct nvkm_fb *fb)
41{
42 struct nvkm_device *device = fb->subdev.device;
43 /* Disable address remapper. */
44 nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000);
45}
46
47void
40gp100_fb_init(struct nvkm_fb *base) 48gp100_fb_init(struct nvkm_fb *base)
41{ 49{
42 struct gf100_fb *fb = gf100_fb(base); 50 struct gf100_fb *fb = gf100_fb(base);
@@ -56,6 +64,7 @@ gp100_fb = {
56 .dtor = gf100_fb_dtor, 64 .dtor = gf100_fb_dtor,
57 .oneinit = gf100_fb_oneinit, 65 .oneinit = gf100_fb_oneinit,
58 .init = gp100_fb_init, 66 .init = gp100_fb_init,
67 .init_remapper = gp100_fb_init_remapper,
59 .init_page = gm200_fb_init_page, 68 .init_page = gm200_fb_init_page,
60 .init_unkn = gp100_fb_init_unkn, 69 .init_unkn = gp100_fb_init_unkn,
61 .ram_new = gp100_ram_new, 70 .ram_new = gp100_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index b84b9861ef26..b4d74e815674 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -31,6 +31,7 @@ gp102_fb = {
31 .dtor = gf100_fb_dtor, 31 .dtor = gf100_fb_dtor,
32 .oneinit = gf100_fb_oneinit, 32 .oneinit = gf100_fb_oneinit,
33 .init = gp100_fb_init, 33 .init = gp100_fb_init,
34 .init_remapper = gp100_fb_init_remapper,
34 .init_page = gm200_fb_init_page, 35 .init_page = gm200_fb_init_page,
35 .ram_new = gp100_ram_new, 36 .ram_new = gp100_ram_new,
36}; 37};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 2857f31466bf..1e4ad61c19e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -11,6 +11,7 @@ struct nvkm_fb_func {
11 u32 (*tags)(struct nvkm_fb *); 11 u32 (*tags)(struct nvkm_fb *);
12 int (*oneinit)(struct nvkm_fb *); 12 int (*oneinit)(struct nvkm_fb *);
13 void (*init)(struct nvkm_fb *); 13 void (*init)(struct nvkm_fb *);
14 void (*init_remapper)(struct nvkm_fb *);
14 int (*init_page)(struct nvkm_fb *); 15 int (*init_page)(struct nvkm_fb *);
15 void (*init_unkn)(struct nvkm_fb *); 16 void (*init_unkn)(struct nvkm_fb *);
16 void (*intr)(struct nvkm_fb *); 17 void (*intr)(struct nvkm_fb *);
@@ -69,5 +70,6 @@ int gf100_fb_init_page(struct nvkm_fb *);
69 70
70int gm200_fb_init_page(struct nvkm_fb *); 71int gm200_fb_init_page(struct nvkm_fb *);
71 72
73void gp100_fb_init_remapper(struct nvkm_fb *);
72void gp100_fb_init_unkn(struct nvkm_fb *); 74void gp100_fb_init_unkn(struct nvkm_fb *);
73#endif 75#endif
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index b04ea0f3da75..0eb38ac8e86e 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -32,7 +32,10 @@ obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
32obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o 32obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
33obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o 33obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
34 34
35obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o 35obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
36ifdef CONFIG_DRM_SUN4I_BACKEND
37obj-$(CONFIG_DRM_SUN4I) += sun4i-frontend.o
38endif
36obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o 39obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
37obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o 40obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o
38obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o 41obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 776c1513e582..a2bd5876c633 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
398 * unaligned offset is malformed and cause commands stream 398 * unaligned offset is malformed and cause commands stream
399 * corruption on the buffer address relocation. 399 * corruption on the buffer address relocation.
400 */ 400 */
401 if (offset & 3 || offset >= obj->gem.size) { 401 if (offset & 3 || offset > obj->gem.size) {
402 err = -EINVAL; 402 err = -EINVAL;
403 goto fail; 403 goto fail;
404 } 404 }
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 2ebdc6d5a76e..d5583190f3e4 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
137 137
138 if (cmd > (char *) urb->transfer_buffer) { 138 if (cmd > (char *) urb->transfer_buffer) {
139 /* Send partial buffer remaining before exiting */ 139 /* Send partial buffer remaining before exiting */
140 int len = cmd - (char *) urb->transfer_buffer; 140 int len;
141 if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
142 *cmd++ = 0xAF;
143 len = cmd - (char *) urb->transfer_buffer;
141 ret = udl_submit_urb(dev, urb, len); 144 ret = udl_submit_urb(dev, urb, len);
142 bytes_sent += len; 145 bytes_sent += len;
143 } else 146 } else
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 0c87b1ac6b68..b992644c17e6 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -153,11 +153,11 @@ static void udl_compress_hline16(
153 raw_pixels_count_byte = cmd++; /* we'll know this later */ 153 raw_pixels_count_byte = cmd++; /* we'll know this later */
154 raw_pixel_start = pixel; 154 raw_pixel_start = pixel;
155 155
156 cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, 156 cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
157 min((int)(pixel_end - pixel) / bpp, 157 (unsigned long)(pixel_end - pixel) / bpp,
158 (int)(cmd_buffer_end - cmd) / 2))) * bpp; 158 (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp;
159 159
160 prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); 160 prefetch_range((void *) pixel, cmd_pixel_end - pixel);
161 pixel_val16 = get_pixel_val16(pixel, bpp); 161 pixel_val16 = get_pixel_val16(pixel, bpp);
162 162
163 while (pixel < cmd_pixel_end) { 163 while (pixel < cmd_pixel_end) {
@@ -193,6 +193,9 @@ static void udl_compress_hline16(
193 if (pixel > raw_pixel_start) { 193 if (pixel > raw_pixel_start) {
194 /* finalize last RAW span */ 194 /* finalize last RAW span */
195 *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; 195 *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
196 } else {
197 /* undo unused byte */
198 cmd--;
196 } 199 }
197 200
198 *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; 201 *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f1d5f76e9c33..d88073e7d22d 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev)
218 return err; 218 return err;
219 } 219 }
220 220
221 if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
222 goto skip_iommu;
223
221 host->group = iommu_group_get(&pdev->dev); 224 host->group = iommu_group_get(&pdev->dev);
222 if (host->group) { 225 if (host->group) {
223 struct iommu_domain_geometry *geometry; 226 struct iommu_domain_geometry *geometry;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index e2f4a4d93d20..527a1cddb14f 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job)
569 for (i = 0; i < job->num_unpins; i++) { 569 for (i = 0; i < job->num_unpins; i++) {
570 struct host1x_job_unpin_data *unpin = &job->unpins[i]; 570 struct host1x_job_unpin_data *unpin = &job->unpins[i];
571 571
572 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) { 572 if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
573 unpin->size && host->domain) {
573 iommu_unmap(host->domain, job->addr_phys[i], 574 iommu_unmap(host->domain, job->addr_phys[i],
574 unpin->size); 575 unpin->size);
575 free_iova(&host->iova, 576 free_iova(&host->iova,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f858cc72011d..3942ee61bd1c 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1952,6 +1952,8 @@ static int hid_device_probe(struct device *dev)
1952 } 1952 }
1953 hdev->io_started = false; 1953 hdev->io_started = false;
1954 1954
1955 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
1956
1955 if (!hdev->driver) { 1957 if (!hdev->driver) {
1956 id = hid_match_device(hdev, hdrv); 1958 id = hid_match_device(hdev, hdrv);
1957 if (id == NULL) { 1959 if (id == NULL) {
@@ -2215,7 +2217,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
2215 struct hid_device *hdev = to_hid_device(dev); 2217 struct hid_device *hdev = to_hid_device(dev);
2216 2218
2217 if (hdev->driver == hdrv && 2219 if (hdev->driver == hdrv &&
2218 !hdrv->match(hdev, hid_ignore_special_drivers)) 2220 !hdrv->match(hdev, hid_ignore_special_drivers) &&
2221 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
2219 return device_reprobe(dev); 2222 return device_reprobe(dev);
2220 2223
2221 return 0; 2224 return 0;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 8469b6964ff6..b48100236df8 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1154,6 +1154,8 @@ copy_rest:
1154 goto out; 1154 goto out;
1155 if (list->tail > list->head) { 1155 if (list->tail > list->head) {
1156 len = list->tail - list->head; 1156 len = list->tail - list->head;
1157 if (len > count)
1158 len = count;
1157 1159
1158 if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { 1160 if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
1159 ret = -EFAULT; 1161 ret = -EFAULT;
@@ -1163,6 +1165,8 @@ copy_rest:
1163 list->head += len; 1165 list->head += len;
1164 } else { 1166 } else {
1165 len = HID_DEBUG_BUFSIZE - list->head; 1167 len = HID_DEBUG_BUFSIZE - list->head;
1168 if (len > count)
1169 len = count;
1166 1170
1167 if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { 1171 if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
1168 ret = -EFAULT; 1172 ret = -EFAULT;
@@ -1170,7 +1174,9 @@ copy_rest:
1170 } 1174 }
1171 list->head = 0; 1175 list->head = 0;
1172 ret += len; 1176 ret += len;
1173 goto copy_rest; 1177 count -= len;
1178 if (count > 0)
1179 goto copy_rest;
1174 } 1180 }
1175 1181
1176 } 1182 }
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index c1652bb7bd15..eae0cb3ddec6 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
484 return; 484 return;
485 } 485 }
486 486
487 if ((ret_size > size) || (ret_size <= 2)) { 487 if ((ret_size > size) || (ret_size < 2)) {
488 dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", 488 dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
489 __func__, size, ret_size); 489 __func__, size, ret_size);
490 return; 490 return;
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index e3ce233f8bdc..23872d08308c 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -36,6 +36,7 @@
36#include <linux/hiddev.h> 36#include <linux/hiddev.h>
37#include <linux/compat.h> 37#include <linux/compat.h>
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/nospec.h>
39#include "usbhid.h" 40#include "usbhid.h"
40 41
41#ifdef CONFIG_USB_DYNAMIC_MINORS 42#ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
469 470
470 if (uref->field_index >= report->maxfield) 471 if (uref->field_index >= report->maxfield)
471 goto inval; 472 goto inval;
473 uref->field_index = array_index_nospec(uref->field_index,
474 report->maxfield);
472 475
473 field = report->field[uref->field_index]; 476 field = report->field[uref->field_index];
474 if (uref->usage_index >= field->maxusage) 477 if (uref->usage_index >= field->maxusage)
475 goto inval; 478 goto inval;
479 uref->usage_index = array_index_nospec(uref->usage_index,
480 field->maxusage);
476 481
477 uref->usage_code = field->usage[uref->usage_index].hid; 482 uref->usage_code = field->usage[uref->usage_index].hid;
478 483
@@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
499 504
500 if (uref->field_index >= report->maxfield) 505 if (uref->field_index >= report->maxfield)
501 goto inval; 506 goto inval;
507 uref->field_index = array_index_nospec(uref->field_index,
508 report->maxfield);
502 509
503 field = report->field[uref->field_index]; 510 field = report->field[uref->field_index];
504 511
@@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
753 760
754 if (finfo.field_index >= report->maxfield) 761 if (finfo.field_index >= report->maxfield)
755 break; 762 break;
763 finfo.field_index = array_index_nospec(finfo.field_index,
764 report->maxfield);
756 765
757 field = report->field[finfo.field_index]; 766 field = report->field[finfo.field_index];
758 memset(&finfo, 0, sizeof(finfo)); 767 memset(&finfo, 0, sizeof(finfo));
@@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
797 806
798 if (cinfo.index >= hid->maxcollection) 807 if (cinfo.index >= hid->maxcollection)
799 break; 808 break;
809 cinfo.index = array_index_nospec(cinfo.index,
810 hid->maxcollection);
800 811
801 cinfo.type = hid->collection[cinfo.index].type; 812 cinfo.type = hid->collection[cinfo.index].type;
802 cinfo.usage = hid->collection[cinfo.index].usage; 813 cinfo.usage = hid->collection[cinfo.index].usage;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0bb44d0088ed..ad7afa74d365 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
3365 if (features->type >= INTUOSHT && features->type <= BAMBOO_PT) 3365 if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
3366 features->device_type |= WACOM_DEVICETYPE_PAD; 3366 features->device_type |= WACOM_DEVICETYPE_PAD;
3367 3367
3368 features->x_max = 4096; 3368 if (features->type == INTUOSHT2) {
3369 features->y_max = 4096; 3369 features->x_max = features->x_max / 10;
3370 features->y_max = features->y_max / 10;
3371 }
3372 else {
3373 features->x_max = 4096;
3374 features->y_max = 4096;
3375 }
3370 } 3376 }
3371 else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { 3377 else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
3372 features->device_type |= WACOM_DEVICETYPE_PAD; 3378 features->device_type |= WACOM_DEVICETYPE_PAD;
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 44cffad43701..c4d176f5ed79 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = {
234 .name = "cht_wc_ext_chrg_irq_chip", 234 .name = "cht_wc_ext_chrg_irq_chip",
235}; 235};
236 236
237static const char * const bq24190_suppliers[] = { "fusb302-typec-source" }; 237static const char * const bq24190_suppliers[] = {
238 "tcpm-source-psy-i2c-fusb302" };
238 239
239static const struct property_entry bq24190_props[] = { 240static const struct property_entry bq24190_props[] = {
240 PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers), 241 PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 75d6ab177055..7379043711df 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
237 /* 237 /*
238 * It's not always possible to have 1 to 2 ratio when d=7, so fall back 238 * It's not always possible to have 1 to 2 ratio when d=7, so fall back
239 * to minimal possible clkh in this case. 239 * to minimal possible clkh in this case.
240 *
241 * Note:
242 * CLKH is not allowed to be 0, in this case I2C clock is not generated
243 * at all
240 */ 244 */
241 if (clk >= clkl + d) { 245 if (clk > clkl + d) {
242 clkh = clk - clkl - d; 246 clkh = clk - clkl - d;
243 clkl -= d; 247 clkl -= d;
244 } else { 248 } else {
245 clkh = 0; 249 clkh = 1;
246 clkl = clk - (d << 1); 250 clkl = clk - (d << 1);
247 } 251 }
248 252
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 0207e194f84b..498c5e891649 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
368 goto err_desc; 368 goto err_desc;
369 } 369 }
370 370
371 reinit_completion(&dma->cmd_complete);
371 txdesc->callback = i2c_imx_dma_callback; 372 txdesc->callback = i2c_imx_dma_callback;
372 txdesc->callback_param = i2c_imx; 373 txdesc->callback_param = i2c_imx;
373 if (dma_submit_error(dmaengine_submit(txdesc))) { 374 if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -622,7 +623,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
622 * The first byte must be transmitted by the CPU. 623 * The first byte must be transmitted by the CPU.
623 */ 624 */
624 imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR); 625 imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
625 reinit_completion(&i2c_imx->dma->cmd_complete);
626 time_left = wait_for_completion_timeout( 626 time_left = wait_for_completion_timeout(
627 &i2c_imx->dma->cmd_complete, 627 &i2c_imx->dma->cmd_complete,
628 msecs_to_jiffies(DMA_TIMEOUT)); 628 msecs_to_jiffies(DMA_TIMEOUT));
@@ -681,7 +681,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
681 if (result) 681 if (result)
682 return result; 682 return result;
683 683
684 reinit_completion(&i2c_imx->dma->cmd_complete);
685 time_left = wait_for_completion_timeout( 684 time_left = wait_for_completion_timeout(
686 &i2c_imx->dma->cmd_complete, 685 &i2c_imx->dma->cmd_complete,
687 msecs_to_jiffies(DMA_TIMEOUT)); 686 msecs_to_jiffies(DMA_TIMEOUT));
@@ -1010,7 +1009,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
1010 i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl, 1009 i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
1011 "gpio"); 1010 "gpio");
1012 rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN); 1011 rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
1013 rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH); 1012 rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
1014 1013
1015 if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER || 1014 if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
1016 PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) { 1015 PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 5e310efd9446..3c1c817f6968 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -32,6 +32,7 @@
32#include <linux/of_device.h> 32#include <linux/of_device.h>
33#include <linux/platform_device.h> 33#include <linux/platform_device.h>
34#include <linux/pm_runtime.h> 34#include <linux/pm_runtime.h>
35#include <linux/reset.h>
35#include <linux/slab.h> 36#include <linux/slab.h>
36 37
37/* register offsets */ 38/* register offsets */
@@ -111,8 +112,9 @@
111#define ID_ARBLOST (1 << 3) 112#define ID_ARBLOST (1 << 3)
112#define ID_NACK (1 << 4) 113#define ID_NACK (1 << 4)
113/* persistent flags */ 114/* persistent flags */
115#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
114#define ID_P_PM_BLOCKED (1 << 31) 116#define ID_P_PM_BLOCKED (1 << 31)
115#define ID_P_MASK ID_P_PM_BLOCKED 117#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
116 118
117enum rcar_i2c_type { 119enum rcar_i2c_type {
118 I2C_RCAR_GEN1, 120 I2C_RCAR_GEN1,
@@ -141,6 +143,8 @@ struct rcar_i2c_priv {
141 struct dma_chan *dma_rx; 143 struct dma_chan *dma_rx;
142 struct scatterlist sg; 144 struct scatterlist sg;
143 enum dma_data_direction dma_direction; 145 enum dma_data_direction dma_direction;
146
147 struct reset_control *rstc;
144}; 148};
145 149
146#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent) 150#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
370 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), 374 dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
371 sg_dma_len(&priv->sg), priv->dma_direction); 375 sg_dma_len(&priv->sg), priv->dma_direction);
372 376
377 /* Gen3 can only do one RXDMA per transfer and we just completed it */
378 if (priv->devtype == I2C_RCAR_GEN3 &&
379 priv->dma_direction == DMA_FROM_DEVICE)
380 priv->flags |= ID_P_NO_RXDMA;
381
373 priv->dma_direction = DMA_NONE; 382 priv->dma_direction = DMA_NONE;
374} 383}
375 384
@@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
407 unsigned char *buf; 416 unsigned char *buf;
408 int len; 417 int len;
409 418
410 /* Do not use DMA if it's not available or for messages < 8 bytes */ 419 /* Do various checks to see if DMA is feasible at all */
411 if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE)) 420 if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
421 (read && priv->flags & ID_P_NO_RXDMA))
412 return; 422 return;
413 423
414 if (read) { 424 if (read) {
@@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
739 } 749 }
740} 750}
741 751
752/* I2C is a special case, we need to poll the status of a reset */
753static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
754{
755 int i, ret;
756
757 ret = reset_control_reset(priv->rstc);
758 if (ret)
759 return ret;
760
761 for (i = 0; i < LOOP_TIMEOUT; i++) {
762 ret = reset_control_status(priv->rstc);
763 if (ret == 0)
764 return 0;
765 udelay(1);
766 }
767
768 return -ETIMEDOUT;
769}
770
742static int rcar_i2c_master_xfer(struct i2c_adapter *adap, 771static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
743 struct i2c_msg *msgs, 772 struct i2c_msg *msgs,
744 int num) 773 int num)
@@ -750,6 +779,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
750 779
751 pm_runtime_get_sync(dev); 780 pm_runtime_get_sync(dev);
752 781
782 /* Gen3 needs a reset before allowing RXDMA once */
783 if (priv->devtype == I2C_RCAR_GEN3) {
784 priv->flags |= ID_P_NO_RXDMA;
785 if (!IS_ERR(priv->rstc)) {
786 ret = rcar_i2c_do_reset(priv);
787 if (ret == 0)
788 priv->flags &= ~ID_P_NO_RXDMA;
789 }
790 }
791
753 rcar_i2c_init(priv); 792 rcar_i2c_init(priv);
754 793
755 ret = rcar_i2c_bus_barrier(priv); 794 ret = rcar_i2c_bus_barrier(priv);
@@ -920,6 +959,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
920 if (ret < 0) 959 if (ret < 0)
921 goto out_pm_put; 960 goto out_pm_put;
922 961
962 if (priv->devtype == I2C_RCAR_GEN3) {
963 priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
964 if (!IS_ERR(priv->rstc)) {
965 ret = reset_control_status(priv->rstc);
966 if (ret < 0)
967 priv->rstc = ERR_PTR(-ENOTSUPP);
968 }
969 }
970
923 /* Stay always active when multi-master to keep arbitration working */ 971 /* Stay always active when multi-master to keep arbitration working */
924 if (of_property_read_bool(dev->of_node, "multi-master")) 972 if (of_property_read_bool(dev->of_node, "multi-master"))
925 priv->flags |= ID_P_PM_BLOCKED; 973 priv->flags |= ID_P_PM_BLOCKED;
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index e866c481bfc3..fce52bdab2b7 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -127,7 +127,7 @@ enum stu300_error {
127 127
128/* 128/*
129 * The number of address send athemps tried before giving up. 129 * The number of address send athemps tried before giving up.
130 * If the first one failes it seems like 5 to 8 attempts are required. 130 * If the first one fails it seems like 5 to 8 attempts are required.
131 */ 131 */
132#define NUM_ADDR_RESEND_ATTEMPTS 12 132#define NUM_ADDR_RESEND_ATTEMPTS 12
133 133
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 5fccd1f1bca8..797def5319f1 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -545,6 +545,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
545{ 545{
546 u32 cnfg; 546 u32 cnfg;
547 547
548 /*
549 * NACK interrupt is generated before the I2C controller generates
550 * the STOP condition on the bus. So wait for 2 clock periods
551 * before disabling the controller so that the STOP condition has
552 * been delivered properly.
553 */
554 udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
555
548 cnfg = i2c_readl(i2c_dev, I2C_CNFG); 556 cnfg = i2c_readl(i2c_dev, I2C_CNFG);
549 if (cnfg & I2C_CNFG_PACKET_MODE_EN) 557 if (cnfg & I2C_CNFG_PACKET_MODE_EN)
550 i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG); 558 i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
@@ -706,15 +714,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
706 if (likely(i2c_dev->msg_err == I2C_ERR_NONE)) 714 if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
707 return 0; 715 return 0;
708 716
709 /*
710 * NACK interrupt is generated before the I2C controller generates
711 * the STOP condition on the bus. So wait for 2 clock periods
712 * before resetting the controller so that the STOP condition has
713 * been delivered properly.
714 */
715 if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
716 udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
717
718 tegra_i2c_init(i2c_dev); 717 tegra_i2c_init(i2c_dev);
719 if (i2c_dev->msg_err == I2C_ERR_NO_ACK) { 718 if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
720 if (msg->flags & I2C_M_IGNORE_NAK) 719 if (msg->flags & I2C_M_IGNORE_NAK)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 31d16ada6e7d..301285c54603 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -198,7 +198,16 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
198 198
199 val = !val; 199 val = !val;
200 bri->set_scl(adap, val); 200 bri->set_scl(adap, val);
201 ndelay(RECOVERY_NDELAY); 201
202 /*
203 * If we can set SDA, we will always create STOP here to ensure
204 * the additional pulses will do no harm. This is achieved by
205 * letting SDA follow SCL half a cycle later.
206 */
207 ndelay(RECOVERY_NDELAY / 2);
208 if (bri->set_sda)
209 bri->set_sda(adap, val);
210 ndelay(RECOVERY_NDELAY / 2);
202 } 211 }
203 212
204 /* check if recovery actually succeeded */ 213 /* check if recovery actually succeeded */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3e90b6a1d9d2..cc06e8404e9b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3488,8 +3488,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3488 struct ib_flow_attr *flow_attr; 3488 struct ib_flow_attr *flow_attr;
3489 struct ib_qp *qp; 3489 struct ib_qp *qp;
3490 struct ib_uflow_resources *uflow_res; 3490 struct ib_uflow_resources *uflow_res;
3491 struct ib_uverbs_flow_spec_hdr *kern_spec;
3491 int err = 0; 3492 int err = 0;
3492 void *kern_spec;
3493 void *ib_spec; 3493 void *ib_spec;
3494 int i; 3494 int i;
3495 3495
@@ -3538,8 +3538,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3538 if (!kern_flow_attr) 3538 if (!kern_flow_attr)
3539 return -ENOMEM; 3539 return -ENOMEM;
3540 3540
3541 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr)); 3541 *kern_flow_attr = cmd.flow_attr;
3542 err = ib_copy_from_udata(kern_flow_attr + 1, ucore, 3542 err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore,
3543 cmd.flow_attr.size); 3543 cmd.flow_attr.size);
3544 if (err) 3544 if (err)
3545 goto err_free_attr; 3545 goto err_free_attr;
@@ -3559,6 +3559,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3559 goto err_uobj; 3559 goto err_uobj;
3560 } 3560 }
3561 3561
3562 if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
3563 err = -EINVAL;
3564 goto err_put;
3565 }
3566
3562 flow_attr = kzalloc(struct_size(flow_attr, flows, 3567 flow_attr = kzalloc(struct_size(flow_attr, flows,
3563 cmd.flow_attr.num_of_specs), GFP_KERNEL); 3568 cmd.flow_attr.num_of_specs), GFP_KERNEL);
3564 if (!flow_attr) { 3569 if (!flow_attr) {
@@ -3578,21 +3583,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3578 flow_attr->flags = kern_flow_attr->flags; 3583 flow_attr->flags = kern_flow_attr->flags;
3579 flow_attr->size = sizeof(*flow_attr); 3584 flow_attr->size = sizeof(*flow_attr);
3580 3585
3581 kern_spec = kern_flow_attr + 1; 3586 kern_spec = kern_flow_attr->flow_specs;
3582 ib_spec = flow_attr + 1; 3587 ib_spec = flow_attr + 1;
3583 for (i = 0; i < flow_attr->num_of_specs && 3588 for (i = 0; i < flow_attr->num_of_specs &&
3584 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) && 3589 cmd.flow_attr.size >= sizeof(*kern_spec) &&
3585 cmd.flow_attr.size >= 3590 cmd.flow_attr.size >= kern_spec->size;
3586 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) { 3591 i++) {
3587 err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec, 3592 err = kern_spec_to_ib_spec(
3588 uflow_res); 3593 file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec,
3594 ib_spec, uflow_res);
3589 if (err) 3595 if (err)
3590 goto err_free; 3596 goto err_free;
3591 3597
3592 flow_attr->size += 3598 flow_attr->size +=
3593 ((union ib_flow_spec *) ib_spec)->size; 3599 ((union ib_flow_spec *) ib_spec)->size;
3594 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size; 3600 cmd.flow_attr.size -= kern_spec->size;
3595 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size; 3601 kern_spec = ((void *)kern_spec) + kern_spec->size;
3596 ib_spec += ((union ib_flow_spec *) ib_spec)->size; 3602 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3597 } 3603 }
3598 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) { 3604 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 1445918e3239..7b76e6f81aeb 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
774{ 774{
775 struct c4iw_mr *mhp = to_c4iw_mr(ibmr); 775 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
776 776
777 if (unlikely(mhp->mpl_len == mhp->max_mpl_len)) 777 if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
778 return -ENOMEM; 778 return -ENOMEM;
779 779
780 mhp->mpl[mhp->mpl_len++] = addr; 780 mhp->mpl[mhp->mpl_len++] = addr;
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 1a1a47ac53c6..f15c93102081 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
271 271
272 lockdep_assert_held(&qp->s_lock); 272 lockdep_assert_held(&qp->s_lock);
273 ps->s_txreq = get_txreq(ps->dev, qp); 273 ps->s_txreq = get_txreq(ps->dev, qp);
274 if (IS_ERR(ps->s_txreq)) 274 if (!ps->s_txreq)
275 goto bail_no_tx; 275 goto bail_no_tx;
276 276
277 if (priv->hdr_type == HFI1_PKT_TYPE_9B) { 277 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index b7b671017e59..e254dcec6f64 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2015, 2016 Intel Corporation. 2 * Copyright(c) 2015 - 2018 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
72 int middle = 0; 72 int middle = 0;
73 73
74 ps->s_txreq = get_txreq(ps->dev, qp); 74 ps->s_txreq = get_txreq(ps->dev, qp);
75 if (IS_ERR(ps->s_txreq)) 75 if (!ps->s_txreq)
76 goto bail_no_tx; 76 goto bail_no_tx;
77 77
78 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { 78 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 1ab332f1866e..70d39fc450a1 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2015, 2016 Intel Corporation. 2 * Copyright(c) 2015 - 2018 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
503 u32 lid; 503 u32 lid;
504 504
505 ps->s_txreq = get_txreq(ps->dev, qp); 505 ps->s_txreq = get_txreq(ps->dev, qp);
506 if (IS_ERR(ps->s_txreq)) 506 if (!ps->s_txreq)
507 goto bail_no_tx; 507 goto bail_no_tx;
508 508
509 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) { 509 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 873e48ea923f..c4ab2d5b4502 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2016 - 2017 Intel Corporation. 2 * Copyright(c) 2016 - 2018 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
94 struct rvt_qp *qp) 94 struct rvt_qp *qp)
95 __must_hold(&qp->s_lock) 95 __must_hold(&qp->s_lock)
96{ 96{
97 struct verbs_txreq *tx = ERR_PTR(-EBUSY); 97 struct verbs_txreq *tx = NULL;
98 98
99 write_seqlock(&dev->txwait_lock); 99 write_seqlock(&dev->txwait_lock);
100 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) { 100 if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 729244c3086c..1c19bbc764b2 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright(c) 2016 Intel Corporation. 2 * Copyright(c) 2016 - 2018 Intel Corporation.
3 * 3 *
4 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license. 5 * redistributing this file, you may do so under either license.
@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
83 if (unlikely(!tx)) { 83 if (unlikely(!tx)) {
84 /* call slow path to get the lock */ 84 /* call slow path to get the lock */
85 tx = __get_txreq(dev, qp); 85 tx = __get_txreq(dev, qp);
86 if (IS_ERR(tx)) 86 if (!tx)
87 return tx; 87 return tx;
88 } 88 }
89 tx->qp = qp; 89 tx->qp = qp;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e3e330f59c2c..b3ba9a222550 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -6113,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
6113 dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports), 6113 dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
6114 MLX5_CAP_GEN(mdev, num_vhca_ports)); 6114 MLX5_CAP_GEN(mdev, num_vhca_ports));
6115 6115
6116 if (MLX5_VPORT_MANAGER(mdev) && 6116 if (MLX5_ESWITCH_MANAGER(mdev) &&
6117 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { 6117 mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
6118 dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0); 6118 dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
6119 6119
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 0af7b7905550..f5de5adc9b1a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
266 266
267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) + 267 desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg); 268 srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
269 if (desc_size == 0 || srq->msrq.max_gs > desc_size) 269 if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
270 return ERR_PTR(-EINVAL); 270 err = -EINVAL;
271 goto err_srq;
272 }
271 desc_size = roundup_pow_of_two(desc_size); 273 desc_size = roundup_pow_of_two(desc_size);
272 desc_size = max_t(size_t, 32, desc_size); 274 desc_size = max_t(size_t, 32, desc_size);
273 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) 275 if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
274 return ERR_PTR(-EINVAL); 276 err = -EINVAL;
277 goto err_srq;
278 }
275 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) / 279 srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
276 sizeof(struct mlx5_wqe_data_seg); 280 sizeof(struct mlx5_wqe_data_seg);
277 srq->msrq.wqe_shift = ilog2(desc_size); 281 srq->msrq.wqe_shift = ilog2(desc_size);
278 buf_size = srq->msrq.max * desc_size; 282 buf_size = srq->msrq.max * desc_size;
279 if (buf_size < desc_size) 283 if (buf_size < desc_size) {
280 return ERR_PTR(-EINVAL); 284 err = -EINVAL;
285 goto err_srq;
286 }
281 in.type = init_attr->srq_type; 287 in.type = init_attr->srq_type;
282 288
283 if (pd->uobject) 289 if (pd->uobject)
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 1f9cd7d8b7ad..f5ae24865355 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1346,6 +1346,8 @@ static const struct acpi_device_id elan_acpi_id[] = {
1346 { "ELAN0611", 0 }, 1346 { "ELAN0611", 0 },
1347 { "ELAN0612", 0 }, 1347 { "ELAN0612", 0 },
1348 { "ELAN0618", 0 }, 1348 { "ELAN0618", 0 },
1349 { "ELAN061D", 0 },
1350 { "ELAN0622", 0 },
1349 { "ELAN1000", 0 }, 1351 { "ELAN1000", 0 },
1350 { } 1352 { }
1351}; 1353};
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index b353d494ad40..136f6e7bf797 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), 527 DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
528 }, 528 },
529 }, 529 },
530 {
531 /* Lenovo LaVie Z */
532 .matches = {
533 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
534 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
535 },
536 },
530 { } 537 { }
531}; 538};
532 539
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e055d228bfb9..689ffe538370 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -142,7 +142,6 @@ config DMAR_TABLE
142config INTEL_IOMMU 142config INTEL_IOMMU
143 bool "Support for Intel IOMMU using DMA Remapping Devices" 143 bool "Support for Intel IOMMU using DMA Remapping Devices"
144 depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC) 144 depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
145 select DMA_DIRECT_OPS
146 select IOMMU_API 145 select IOMMU_API
147 select IOMMU_IOVA 146 select IOMMU_IOVA
148 select NEED_DMA_MAP_STATE 147 select NEED_DMA_MAP_STATE
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 14e4b3722428..115ff26e9ced 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -31,7 +31,6 @@
31#include <linux/pci.h> 31#include <linux/pci.h>
32#include <linux/dmar.h> 32#include <linux/dmar.h>
33#include <linux/dma-mapping.h> 33#include <linux/dma-mapping.h>
34#include <linux/dma-direct.h>
35#include <linux/mempool.h> 34#include <linux/mempool.h>
36#include <linux/memory.h> 35#include <linux/memory.h>
37#include <linux/cpu.h> 36#include <linux/cpu.h>
@@ -485,14 +484,37 @@ static int dmar_forcedac;
485static int intel_iommu_strict; 484static int intel_iommu_strict;
486static int intel_iommu_superpage = 1; 485static int intel_iommu_superpage = 1;
487static int intel_iommu_ecs = 1; 486static int intel_iommu_ecs = 1;
487static int intel_iommu_pasid28;
488static int iommu_identity_mapping; 488static int iommu_identity_mapping;
489 489
490#define IDENTMAP_ALL 1 490#define IDENTMAP_ALL 1
491#define IDENTMAP_GFX 2 491#define IDENTMAP_GFX 2
492#define IDENTMAP_AZALIA 4 492#define IDENTMAP_AZALIA 4
493 493
494#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap)) 494/* Broadwell and Skylake have broken ECS support — normal so-called "second
495#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap)) 495 * level" translation of DMA requests-without-PASID doesn't actually happen
496 * unless you also set the NESTE bit in an extended context-entry. Which of
497 * course means that SVM doesn't work because it's trying to do nested
498 * translation of the physical addresses it finds in the process page tables,
499 * through the IOVA->phys mapping found in the "second level" page tables.
500 *
501 * The VT-d specification was retroactively changed to change the definition
502 * of the capability bits and pretend that Broadwell/Skylake never happened...
503 * but unfortunately the wrong bit was changed. It's ECS which is broken, but
504 * for some reason it was the PASID capability bit which was redefined (from
505 * bit 28 on BDW/SKL to bit 40 in future).
506 *
507 * So our test for ECS needs to eschew those implementations which set the old
508 * PASID capabiity bit 28, since those are the ones on which ECS is broken.
509 * Unless we are working around the 'pasid28' limitations, that is, by putting
510 * the device into passthrough mode for normal DMA and thus masking the bug.
511 */
512#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
513 (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
514/* PASID support is thus enabled if ECS is enabled and *either* of the old
515 * or new capability bits are set. */
516#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
517 (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
496 518
497int intel_iommu_gfx_mapped; 519int intel_iommu_gfx_mapped;
498EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); 520EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -555,6 +577,11 @@ static int __init intel_iommu_setup(char *str)
555 printk(KERN_INFO 577 printk(KERN_INFO
556 "Intel-IOMMU: disable extended context table support\n"); 578 "Intel-IOMMU: disable extended context table support\n");
557 intel_iommu_ecs = 0; 579 intel_iommu_ecs = 0;
580 } else if (!strncmp(str, "pasid28", 7)) {
581 printk(KERN_INFO
582 "Intel-IOMMU: enable pre-production PASID support\n");
583 intel_iommu_pasid28 = 1;
584 iommu_identity_mapping |= IDENTMAP_GFX;
558 } else if (!strncmp(str, "tboot_noforce", 13)) { 585 } else if (!strncmp(str, "tboot_noforce", 13)) {
559 printk(KERN_INFO 586 printk(KERN_INFO
560 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); 587 "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -3713,30 +3740,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
3713 dma_addr_t *dma_handle, gfp_t flags, 3740 dma_addr_t *dma_handle, gfp_t flags,
3714 unsigned long attrs) 3741 unsigned long attrs)
3715{ 3742{
3716 void *vaddr; 3743 struct page *page = NULL;
3744 int order;
3717 3745
3718 vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs); 3746 size = PAGE_ALIGN(size);
3719 if (iommu_no_mapping(dev) || !vaddr) 3747 order = get_order(size);
3720 return vaddr;
3721 3748
3722 *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr), 3749 if (!iommu_no_mapping(dev))
3723 PAGE_ALIGN(size), DMA_BIDIRECTIONAL, 3750 flags &= ~(GFP_DMA | GFP_DMA32);
3724 dev->coherent_dma_mask); 3751 else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
3725 if (!*dma_handle) 3752 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
3726 goto out_free_pages; 3753 flags |= GFP_DMA;
3727 return vaddr; 3754 else
3755 flags |= GFP_DMA32;
3756 }
3757
3758 if (gfpflags_allow_blocking(flags)) {
3759 unsigned int count = size >> PAGE_SHIFT;
3760
3761 page = dma_alloc_from_contiguous(dev, count, order, flags);
3762 if (page && iommu_no_mapping(dev) &&
3763 page_to_phys(page) + size > dev->coherent_dma_mask) {
3764 dma_release_from_contiguous(dev, page, count);
3765 page = NULL;
3766 }
3767 }
3768
3769 if (!page)
3770 page = alloc_pages(flags, order);
3771 if (!page)
3772 return NULL;
3773 memset(page_address(page), 0, size);
3774
3775 *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
3776 DMA_BIDIRECTIONAL,
3777 dev->coherent_dma_mask);
3778 if (*dma_handle)
3779 return page_address(page);
3780 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3781 __free_pages(page, order);
3728 3782
3729out_free_pages:
3730 dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
3731 return NULL; 3783 return NULL;
3732} 3784}
3733 3785
3734static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, 3786static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
3735 dma_addr_t dma_handle, unsigned long attrs) 3787 dma_addr_t dma_handle, unsigned long attrs)
3736{ 3788{
3737 if (!iommu_no_mapping(dev)) 3789 int order;
3738 intel_unmap(dev, dma_handle, PAGE_ALIGN(size)); 3790 struct page *page = virt_to_page(vaddr);
3739 dma_direct_free(dev, size, vaddr, dma_handle, attrs); 3791
3792 size = PAGE_ALIGN(size);
3793 order = get_order(size);
3794
3795 intel_unmap(dev, dma_handle, size);
3796 if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
3797 __free_pages(page, order);
3740} 3798}
3741 3799
3742static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, 3800static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 07ea6a48aac6..87107c995cb5 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -136,6 +136,7 @@ struct dm_writecache {
136 struct dm_target *ti; 136 struct dm_target *ti;
137 struct dm_dev *dev; 137 struct dm_dev *dev;
138 struct dm_dev *ssd_dev; 138 struct dm_dev *ssd_dev;
139 sector_t start_sector;
139 void *memory_map; 140 void *memory_map;
140 uint64_t memory_map_size; 141 uint64_t memory_map_size;
141 size_t metadata_sectors; 142 size_t metadata_sectors;
@@ -293,6 +294,10 @@ static int persistent_memory_claim(struct dm_writecache *wc)
293 } 294 }
294 295
295 dax_read_unlock(id); 296 dax_read_unlock(id);
297
298 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
299 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
300
296 return 0; 301 return 0;
297err3: 302err3:
298 kvfree(pages); 303 kvfree(pages);
@@ -311,7 +316,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
311static void persistent_memory_release(struct dm_writecache *wc) 316static void persistent_memory_release(struct dm_writecache *wc)
312{ 317{
313 if (wc->memory_vmapped) 318 if (wc->memory_vmapped)
314 vunmap(wc->memory_map); 319 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
315} 320}
316 321
317static struct page *persistent_memory_page(void *addr) 322static struct page *persistent_memory_page(void *addr)
@@ -359,7 +364,7 @@ static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
359 364
360static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e) 365static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
361{ 366{
362 return wc->metadata_sectors + 367 return wc->start_sector + wc->metadata_sectors +
363 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT)); 368 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
364} 369}
365 370
@@ -471,6 +476,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
471 if (unlikely(region.sector + region.count > wc->metadata_sectors)) 476 if (unlikely(region.sector + region.count > wc->metadata_sectors))
472 region.count = wc->metadata_sectors - region.sector; 477 region.count = wc->metadata_sectors - region.sector;
473 478
479 region.sector += wc->start_sector;
474 atomic_inc(&endio.count); 480 atomic_inc(&endio.count);
475 req.bi_op = REQ_OP_WRITE; 481 req.bi_op = REQ_OP_WRITE;
476 req.bi_op_flags = REQ_SYNC; 482 req.bi_op_flags = REQ_SYNC;
@@ -1946,14 +1952,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1946 } 1952 }
1947 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode); 1953 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
1948 1954
1949 if (WC_MODE_PMEM(wc)) {
1950 r = persistent_memory_claim(wc);
1951 if (r) {
1952 ti->error = "Unable to map persistent memory for cache";
1953 goto bad;
1954 }
1955 }
1956
1957 /* 1955 /*
1958 * Parse the cache block size 1956 * Parse the cache block size
1959 */ 1957 */
@@ -1982,7 +1980,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1982 1980
1983 while (opt_params) { 1981 while (opt_params) {
1984 string = dm_shift_arg(&as), opt_params--; 1982 string = dm_shift_arg(&as), opt_params--;
1985 if (!strcasecmp(string, "high_watermark") && opt_params >= 1) { 1983 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
1984 unsigned long long start_sector;
1985 string = dm_shift_arg(&as), opt_params--;
1986 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
1987 goto invalid_optional;
1988 wc->start_sector = start_sector;
1989 if (wc->start_sector != start_sector ||
1990 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
1991 goto invalid_optional;
1992 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
1986 string = dm_shift_arg(&as), opt_params--; 1993 string = dm_shift_arg(&as), opt_params--;
1987 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1) 1994 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
1988 goto invalid_optional; 1995 goto invalid_optional;
@@ -2039,12 +2046,20 @@ invalid_optional:
2039 goto bad; 2046 goto bad;
2040 } 2047 }
2041 2048
2042 if (!WC_MODE_PMEM(wc)) { 2049 if (WC_MODE_PMEM(wc)) {
2050 r = persistent_memory_claim(wc);
2051 if (r) {
2052 ti->error = "Unable to map persistent memory for cache";
2053 goto bad;
2054 }
2055 } else {
2043 struct dm_io_region region; 2056 struct dm_io_region region;
2044 struct dm_io_request req; 2057 struct dm_io_request req;
2045 size_t n_blocks, n_metadata_blocks; 2058 size_t n_blocks, n_metadata_blocks;
2046 uint64_t n_bitmap_bits; 2059 uint64_t n_bitmap_bits;
2047 2060
2061 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2062
2048 bio_list_init(&wc->flush_list); 2063 bio_list_init(&wc->flush_list);
2049 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush"); 2064 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2050 if (IS_ERR(wc->flush_thread)) { 2065 if (IS_ERR(wc->flush_thread)) {
@@ -2097,7 +2112,7 @@ invalid_optional:
2097 } 2112 }
2098 2113
2099 region.bdev = wc->ssd_dev->bdev; 2114 region.bdev = wc->ssd_dev->bdev;
2100 region.sector = 0; 2115 region.sector = wc->start_sector;
2101 region.count = wc->metadata_sectors; 2116 region.count = wc->metadata_sectors;
2102 req.bi_op = REQ_OP_READ; 2117 req.bi_op = REQ_OP_READ;
2103 req.bi_op_flags = REQ_SYNC; 2118 req.bi_op_flags = REQ_SYNC;
@@ -2265,7 +2280,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
2265 2280
2266static struct target_type writecache_target = { 2281static struct target_type writecache_target = {
2267 .name = "writecache", 2282 .name = "writecache",
2268 .version = {1, 0, 0}, 2283 .version = {1, 1, 0},
2269 .module = THIS_MODULE, 2284 .module = THIS_MODULE,
2270 .ctr = writecache_ctr, 2285 .ctr = writecache_ctr,
2271 .dtr = writecache_dtr, 2286 .dtr = writecache_dtr,
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 29b0cd9ec951..994aed2f9dff 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev)
5547 else 5547 else
5548 pr_warn("md: personality for level %s is not loaded!\n", 5548 pr_warn("md: personality for level %s is not loaded!\n",
5549 mddev->clevel); 5549 mddev->clevel);
5550 return -EINVAL; 5550 err = -EINVAL;
5551 goto abort;
5551 } 5552 }
5552 spin_unlock(&pers_lock); 5553 spin_unlock(&pers_lock);
5553 if (mddev->level != pers->level) { 5554 if (mddev->level != pers->level) {
@@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev)
5560 pers->start_reshape == NULL) { 5561 pers->start_reshape == NULL) {
5561 /* This personality cannot handle reshaping... */ 5562 /* This personality cannot handle reshaping... */
5562 module_put(pers->owner); 5563 module_put(pers->owner);
5563 return -EINVAL; 5564 err = -EINVAL;
5565 goto abort;
5564 } 5566 }
5565 5567
5566 if (pers->sync_request) { 5568 if (pers->sync_request) {
@@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev)
5629 mddev->private = NULL; 5631 mddev->private = NULL;
5630 module_put(pers->owner); 5632 module_put(pers->owner);
5631 bitmap_destroy(mddev); 5633 bitmap_destroy(mddev);
5632 return err; 5634 goto abort;
5633 } 5635 }
5634 if (mddev->queue) { 5636 if (mddev->queue) {
5635 bool nonrot = true; 5637 bool nonrot = true;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 478cf446827f..35bd3a62451b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev)
3893 disk->rdev->saved_raid_disk < 0) 3893 disk->rdev->saved_raid_disk < 0)
3894 conf->fullsync = 1; 3894 conf->fullsync = 1;
3895 } 3895 }
3896
3897 if (disk->replacement &&
3898 !test_bit(In_sync, &disk->replacement->flags) &&
3899 disk->replacement->saved_raid_disk < 0) {
3900 conf->fullsync = 1;
3901 }
3902
3896 disk->recovery_disabled = mddev->recovery_disabled - 1; 3903 disk->recovery_disabled = mddev->recovery_disabled - 1;
3897 } 3904 }
3898 3905
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 40826bba06b6..fcfab6635f9c 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev)
207 bpf_prog_array_free(rcdev->raw->progs); 207 bpf_prog_array_free(rcdev->raw->progs);
208} 208}
209 209
210int lirc_prog_attach(const union bpf_attr *attr) 210int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
211{ 211{
212 struct bpf_prog *prog;
213 struct rc_dev *rcdev; 212 struct rc_dev *rcdev;
214 int ret; 213 int ret;
215 214
216 if (attr->attach_flags) 215 if (attr->attach_flags)
217 return -EINVAL; 216 return -EINVAL;
218 217
219 prog = bpf_prog_get_type(attr->attach_bpf_fd,
220 BPF_PROG_TYPE_LIRC_MODE2);
221 if (IS_ERR(prog))
222 return PTR_ERR(prog);
223
224 rcdev = rc_dev_get_from_fd(attr->target_fd); 218 rcdev = rc_dev_get_from_fd(attr->target_fd);
225 if (IS_ERR(rcdev)) { 219 if (IS_ERR(rcdev))
226 bpf_prog_put(prog);
227 return PTR_ERR(rcdev); 220 return PTR_ERR(rcdev);
228 }
229 221
230 ret = lirc_bpf_attach(rcdev, prog); 222 ret = lirc_bpf_attach(rcdev, prog);
231 if (ret)
232 bpf_prog_put(prog);
233 223
234 put_device(&rcdev->dev); 224 put_device(&rcdev->dev);
235 225
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 753b1a698fc4..6b16946f9b05 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -103,15 +103,15 @@ static struct file *cxl_getfile(const char *name,
103 d_instantiate(path.dentry, inode); 103 d_instantiate(path.dentry, inode);
104 104
105 file = alloc_file(&path, OPEN_FMODE(flags), fops); 105 file = alloc_file(&path, OPEN_FMODE(flags), fops);
106 if (IS_ERR(file)) 106 if (IS_ERR(file)) {
107 goto err_dput; 107 path_put(&path);
108 goto err_fs;
109 }
108 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); 110 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
109 file->private_data = priv; 111 file->private_data = priv;
110 112
111 return file; 113 return file;
112 114
113err_dput:
114 path_put(&path);
115err_inode: 115err_inode:
116 iput(inode); 116 iput(inode);
117err_fs: 117err_fs:
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e05c3245930a..fa840666bdd1 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
507static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset) 507static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
508{ 508{
509 void __iomem *address = (void __iomem *)file->private_data; 509 void __iomem *address = (void __iomem *)file->private_data;
510 unsigned char *page;
511 int retval;
512 int len = 0; 510 int len = 0;
513 unsigned int value; 511 unsigned int value;
514 512 char lbuf[20];
515 if (*offset < 0)
516 return -EINVAL;
517 if (count == 0 || count > 1024)
518 return 0;
519 if (*offset != 0)
520 return 0;
521
522 page = (unsigned char *)__get_free_page(GFP_KERNEL);
523 if (!page)
524 return -ENOMEM;
525 513
526 value = readl(address); 514 value = readl(address);
527 len = sprintf(page, "%d\n", value); 515 len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
528
529 if (copy_to_user(buf, page, len)) {
530 retval = -EFAULT;
531 goto exit;
532 }
533 *offset += len;
534 retval = len;
535 516
536exit: 517 return simple_read_from_buffer(buf, count, offset, lbuf, len);
537 free_page((unsigned long)page);
538 return retval;
539} 518}
540 519
541static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset) 520static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index b0b8f18a85e3..6649f0d56d2f 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -310,8 +310,11 @@ int mei_irq_read_handler(struct mei_device *dev,
310 if (&cl->link == &dev->file_list) { 310 if (&cl->link == &dev->file_list) {
311 /* A message for not connected fixed address clients 311 /* A message for not connected fixed address clients
312 * should be silently discarded 312 * should be silently discarded
313 * On power down client may be force cleaned,
314 * silently discard such messages
313 */ 315 */
314 if (hdr_is_fixed(mei_hdr)) { 316 if (hdr_is_fixed(mei_hdr) ||
317 dev->dev_state == MEI_DEV_POWER_DOWN) {
315 mei_irq_discard_msg(dev, mei_hdr); 318 mei_irq_discard_msg(dev, mei_hdr);
316 ret = 0; 319 ret = 0;
317 goto reset_slots; 320 goto reset_slots;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index efd733472a35..56c6f79a5c5a 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b,
467 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 467 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
468{ 468{
469 unsigned long status; 469 unsigned long status;
470 unsigned long pfn = page_to_pfn(b->page); 470 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
471 471
472 STATS_INC(b->stats.lock[is_2m_pages]); 472 STATS_INC(b->stats.lock[is_2m_pages]);
473 473
@@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
515 unsigned int num_pages, bool is_2m_pages, unsigned int *target) 515 unsigned int num_pages, bool is_2m_pages, unsigned int *target)
516{ 516{
517 unsigned long status; 517 unsigned long status;
518 unsigned long pfn = page_to_pfn(b->page); 518 unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
519 519
520 STATS_INC(b->stats.unlock[is_2m_pages]); 520 STATS_INC(b->stats.unlock[is_2m_pages]);
521 521
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index ef05e0039378..2a833686784b 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -27,8 +27,8 @@ struct mmc_gpio {
27 bool override_cd_active_level; 27 bool override_cd_active_level;
28 irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); 28 irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
29 char *ro_label; 29 char *ro_label;
30 char cd_label[0];
31 u32 cd_debounce_delay_ms; 30 u32 cd_debounce_delay_ms;
31 char cd_label[];
32}; 32};
33 33
34static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) 34static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 623f4d27fa01..80dc2fd6576c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1065,8 +1065,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1065 * It's used when HS400 mode is enabled. 1065 * It's used when HS400 mode is enabled.
1066 */ 1066 */
1067 if (data->flags & MMC_DATA_WRITE && 1067 if (data->flags & MMC_DATA_WRITE &&
1068 !(host->timing != MMC_TIMING_MMC_HS400)) 1068 host->timing != MMC_TIMING_MMC_HS400)
1069 return; 1069 goto disable;
1070 1070
1071 if (data->flags & MMC_DATA_WRITE) 1071 if (data->flags & MMC_DATA_WRITE)
1072 enable = SDMMC_CARD_WR_THR_EN; 1072 enable = SDMMC_CARD_WR_THR_EN;
@@ -1074,7 +1074,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
1074 enable = SDMMC_CARD_RD_THR_EN; 1074 enable = SDMMC_CARD_RD_THR_EN;
1075 1075
1076 if (host->timing != MMC_TIMING_MMC_HS200 && 1076 if (host->timing != MMC_TIMING_MMC_HS200 &&
1077 host->timing != MMC_TIMING_UHS_SDR104) 1077 host->timing != MMC_TIMING_UHS_SDR104 &&
1078 host->timing != MMC_TIMING_MMC_HS400)
1078 goto disable; 1079 goto disable;
1079 1080
1080 blksz_depth = blksz / (1 << host->data_shift); 1081 blksz_depth = blksz / (1 << host->data_shift);
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index f7f9773d161f..d032bd63444d 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -139,8 +139,7 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
139 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST, 139 renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
140 RST_RESERVED_BITS | val); 140 RST_RESERVED_BITS | val);
141 141
142 if (host->data && host->data->flags & MMC_DATA_READ) 142 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
143 clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
144 143
145 renesas_sdhi_internal_dmac_enable_dma(host, true); 144 renesas_sdhi_internal_dmac_enable_dma(host, true);
146} 145}
@@ -164,17 +163,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
164 goto force_pio; 163 goto force_pio;
165 164
166 /* This DMAC cannot handle if buffer is not 8-bytes alignment */ 165 /* This DMAC cannot handle if buffer is not 8-bytes alignment */
167 if (!IS_ALIGNED(sg_dma_address(sg), 8)) { 166 if (!IS_ALIGNED(sg_dma_address(sg), 8))
168 dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, 167 goto force_pio_with_unmap;
169 mmc_get_dma_dir(data));
170 goto force_pio;
171 }
172 168
173 if (data->flags & MMC_DATA_READ) { 169 if (data->flags & MMC_DATA_READ) {
174 dtran_mode |= DTRAN_MODE_CH_NUM_CH1; 170 dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
175 if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) && 171 if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
176 test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags)) 172 test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
177 goto force_pio; 173 goto force_pio_with_unmap;
178 } else { 174 } else {
179 dtran_mode |= DTRAN_MODE_CH_NUM_CH0; 175 dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
180 } 176 }
@@ -189,6 +185,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
189 185
190 return; 186 return;
191 187
188force_pio_with_unmap:
189 dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data));
190
192force_pio: 191force_pio:
193 host->force_pio = true; 192 host->force_pio = true;
194 renesas_sdhi_internal_dmac_enable_dma(host, false); 193 renesas_sdhi_internal_dmac_enable_dma(host, false);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d6aef70d34fa..4eb3d29ecde1 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -312,6 +312,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
312 312
313 if (imx_data->socdata->flags & ESDHC_FLAG_HS400) 313 if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
314 val |= SDHCI_SUPPORT_HS400; 314 val |= SDHCI_SUPPORT_HS400;
315
316 /*
317 * Do not advertise faster UHS modes if there are no
318 * pinctrl states for 100MHz/200MHz.
319 */
320 if (IS_ERR_OR_NULL(imx_data->pins_100mhz) ||
321 IS_ERR_OR_NULL(imx_data->pins_200mhz))
322 val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50
323 | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400);
315 } 324 }
316 } 325 }
317 326
@@ -1158,18 +1167,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
1158 ESDHC_PINCTRL_STATE_100MHZ); 1167 ESDHC_PINCTRL_STATE_100MHZ);
1159 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, 1168 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
1160 ESDHC_PINCTRL_STATE_200MHZ); 1169 ESDHC_PINCTRL_STATE_200MHZ);
1161 if (IS_ERR(imx_data->pins_100mhz) ||
1162 IS_ERR(imx_data->pins_200mhz)) {
1163 dev_warn(mmc_dev(host->mmc),
1164 "could not get ultra high speed state, work on normal mode\n");
1165 /*
1166 * fall back to not supporting uhs by specifying no
1167 * 1.8v quirk
1168 */
1169 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1170 }
1171 } else {
1172 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1173 } 1170 }
1174 1171
1175 /* call to generic mmc_of_parse to support additional capabilities */ 1172 /* call to generic mmc_of_parse to support additional capabilities */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index e7472590f2ed..8e7f3e35ee3d 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1446,6 +1446,7 @@ static int sunxi_mmc_runtime_resume(struct device *dev)
1446 sunxi_mmc_init_host(host); 1446 sunxi_mmc_init_host(host);
1447 sunxi_mmc_set_bus_width(host, mmc->ios.bus_width); 1447 sunxi_mmc_set_bus_width(host, mmc->ios.bus_width);
1448 sunxi_mmc_set_clk(host, &mmc->ios); 1448 sunxi_mmc_set_clk(host, &mmc->ios);
1449 enable_irq(host->irq);
1449 1450
1450 return 0; 1451 return 0;
1451} 1452}
@@ -1455,6 +1456,12 @@ static int sunxi_mmc_runtime_suspend(struct device *dev)
1455 struct mmc_host *mmc = dev_get_drvdata(dev); 1456 struct mmc_host *mmc = dev_get_drvdata(dev);
1456 struct sunxi_mmc_host *host = mmc_priv(mmc); 1457 struct sunxi_mmc_host *host = mmc_priv(mmc);
1457 1458
1459 /*
1460 * When clocks are off, it's possible receiving
1461 * fake interrupts, which will stall the system.
1462 * Disabling the irq will prevent this.
1463 */
1464 disable_irq(host->irq);
1458 sunxi_mmc_reset_host(host); 1465 sunxi_mmc_reset_host(host);
1459 sunxi_mmc_disable(host); 1466 sunxi_mmc_disable(host);
1460 1467
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index c3f7aaa5d18f..d7e10b36a0b9 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -926,10 +926,12 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
926 if (ret) 926 if (ret)
927 return ret; 927 return ret;
928 928
929 if (f_pdata->use_direct_mode) 929 if (f_pdata->use_direct_mode) {
930 memcpy_toio(cqspi->ahb_base + to, buf, len); 930 memcpy_toio(cqspi->ahb_base + to, buf, len);
931 else 931 ret = cqspi_wait_idle(cqspi);
932 } else {
932 ret = cqspi_indirect_write_execute(nor, to, buf, len); 933 ret = cqspi_indirect_write_execute(nor, to, buf, len);
934 }
933 if (ret) 935 if (ret)
934 return ret; 936 return ret;
935 937
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 98663c50ded0..4d5d01cb8141 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
743static int bond_option_mode_set(struct bonding *bond, 743static int bond_option_mode_set(struct bonding *bond,
744 const struct bond_opt_value *newval) 744 const struct bond_opt_value *newval)
745{ 745{
746 if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) { 746 if (!bond_mode_uses_arp(newval->value)) {
747 netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", 747 if (bond->params.arp_interval) {
748 newval->string); 748 netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
749 /* disable arp monitoring */ 749 newval->string);
750 bond->params.arp_interval = 0; 750 /* disable arp monitoring */
751 /* set miimon to default value */ 751 bond->params.arp_interval = 0;
752 bond->params.miimon = BOND_DEFAULT_MIIMON; 752 }
753 netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", 753
754 bond->params.miimon); 754 if (!bond->params.miimon) {
755 /* set miimon to default value */
756 bond->params.miimon = BOND_DEFAULT_MIIMON;
757 netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
758 bond->params.miimon);
759 }
755 } 760 }
756 761
757 if (newval->value == BOND_MODE_ALB) 762 if (newval->value == BOND_MODE_ALB)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index b397a33f3d32..9b449400376b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
634 int err; 634 int err;
635 635
636 err = pm_runtime_get_sync(priv->device); 636 err = pm_runtime_get_sync(priv->device);
637 if (err) 637 if (err < 0) {
638 pm_runtime_put_noidle(priv->device); 638 pm_runtime_put_noidle(priv->device);
639 return err;
640 }
639 641
640 return err; 642 return 0;
641} 643}
642 644
643static void m_can_clk_stop(struct m_can_priv *priv) 645static void m_can_clk_stop(struct m_can_priv *priv)
@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
1109 1111
1110 } else { 1112 } else {
1111 /* Version 3.1.x or 3.2.x */ 1113 /* Version 3.1.x or 3.2.x */
1112 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); 1114 cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
1115 CCCR_NISO);
1113 1116
1114 /* Only 3.2.x has NISO Bit implemented */ 1117 /* Only 3.2.x has NISO Bit implemented */
1115 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) 1118 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
1642 priv->can.clock.freq = clk_get_rate(cclk); 1645 priv->can.clock.freq = clk_get_rate(cclk);
1643 priv->mram_base = mram_addr; 1646 priv->mram_base = mram_addr;
1644 1647
1645 m_can_of_parse_mram(priv, mram_config_vals);
1646
1647 platform_set_drvdata(pdev, dev); 1648 platform_set_drvdata(pdev, dev);
1648 SET_NETDEV_DEV(dev, &pdev->dev); 1649 SET_NETDEV_DEV(dev, &pdev->dev);
1649 1650
@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
1666 goto clk_disable; 1667 goto clk_disable;
1667 } 1668 }
1668 1669
1670 m_can_of_parse_mram(priv, mram_config_vals);
1671
1669 devm_can_led_init(dev); 1672 devm_can_led_init(dev);
1670 1673
1671 of_can_transceiver(dev); 1674 of_can_transceiver(dev);
@@ -1687,8 +1690,6 @@ failed_ret:
1687 return ret; 1690 return ret;
1688} 1691}
1689 1692
1690/* TODO: runtime PM with power down or sleep mode */
1691
1692static __maybe_unused int m_can_suspend(struct device *dev) 1693static __maybe_unused int m_can_suspend(struct device *dev)
1693{ 1694{
1694 struct net_device *ndev = dev_get_drvdata(dev); 1695 struct net_device *ndev = dev_get_drvdata(dev);
@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
1715 1716
1716 pinctrl_pm_select_default_state(dev); 1717 pinctrl_pm_select_default_state(dev);
1717 1718
1718 m_can_init_ram(priv);
1719
1720 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1719 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1721 1720
1722 if (netif_running(ndev)) { 1721 if (netif_running(ndev)) {
@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
1726 if (ret) 1725 if (ret)
1727 return ret; 1726 return ret;
1728 1727
1728 m_can_init_ram(priv);
1729 m_can_start(ndev); 1729 m_can_start(ndev);
1730 netif_device_attach(ndev); 1730 netif_device_attach(ndev);
1731 netif_start_queue(ndev); 1731 netif_start_queue(ndev);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c7427bdd3a4b..2949a381a94d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
86 return 0; 86 return 0;
87 } 87 }
88 cdm = of_iomap(np_cdm, 0); 88 cdm = of_iomap(np_cdm, 0);
89 if (!cdm) {
90 of_node_put(np_cdm);
91 dev_err(&ofdev->dev, "can't map clock node!\n");
92 return 0;
93 }
89 94
90 if (in_8(&cdm->ipb_clk_sel) & 0x1) 95 if (in_8(&cdm->ipb_clk_sel) & 0x1)
91 freq *= 2; 96 freq *= 2;
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index b9e28578bc7b..455a3797a200 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
58#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ 58#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
59#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ 59#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
60 60
61#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
62 ((u32)(y) << 16) | \
63 ((u32)(z) << 8))
64
61/* System Control Registers Bits */ 65/* System Control Registers Bits */
62#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ 66#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
63#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ 67#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
782 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, 786 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
783 hw_ver_major, hw_ver_minor, hw_ver_sub); 787 hw_ver_major, hw_ver_minor, hw_ver_sub);
784 788
789#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
790 /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
791 * 64-bit logical addresses: this workaround forces usage of 32-bit
792 * DMA addresses only when such a fw is detected.
793 */
794 if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
795 PCIEFD_FW_VERSION(3, 3, 0)) {
796 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
797 if (err)
798 dev_warn(&pdev->dev,
799 "warning: can't set DMA mask %llxh (err %d)\n",
800 DMA_BIT_MASK(32), err);
801 }
802#endif
803
785 /* stop system clock */ 804 /* stop system clock */
786 pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, 805 pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
787 PCIEFD_REG_SYS_CTL_CLR); 806 PCIEFD_REG_SYS_CTL_CLR);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 89aec07c225f..5a24039733ef 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2,6 +2,7 @@
2 * 2 *
3 * Copyright (C) 2012 - 2014 Xilinx, Inc. 3 * Copyright (C) 2012 - 2014 Xilinx, Inc.
4 * Copyright (C) 2009 PetaLogix. All rights reserved. 4 * Copyright (C) 2009 PetaLogix. All rights reserved.
5 * Copyright (C) 2017 Sandvik Mining and Construction Oy
5 * 6 *
6 * Description: 7 * Description:
7 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. 8 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@@ -25,8 +26,10 @@
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/netdevice.h> 27#include <linux/netdevice.h>
27#include <linux/of.h> 28#include <linux/of.h>
29#include <linux/of_device.h>
28#include <linux/platform_device.h> 30#include <linux/platform_device.h>
29#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/spinlock.h>
30#include <linux/string.h> 33#include <linux/string.h>
31#include <linux/types.h> 34#include <linux/types.h>
32#include <linux/can/dev.h> 35#include <linux/can/dev.h>
@@ -101,7 +104,7 @@ enum xcan_reg {
101#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ 104#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
102 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ 105 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
103 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ 106 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
104 XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) 107 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
105 108
106/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ 109/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
107#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ 110#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
@@ -118,6 +121,7 @@ enum xcan_reg {
118/** 121/**
119 * struct xcan_priv - This definition define CAN driver instance 122 * struct xcan_priv - This definition define CAN driver instance
120 * @can: CAN private data structure. 123 * @can: CAN private data structure.
124 * @tx_lock: Lock for synchronizing TX interrupt handling
121 * @tx_head: Tx CAN packets ready to send on the queue 125 * @tx_head: Tx CAN packets ready to send on the queue
122 * @tx_tail: Tx CAN packets successfully sended on the queue 126 * @tx_tail: Tx CAN packets successfully sended on the queue
123 * @tx_max: Maximum number packets the driver can send 127 * @tx_max: Maximum number packets the driver can send
@@ -132,6 +136,7 @@ enum xcan_reg {
132 */ 136 */
133struct xcan_priv { 137struct xcan_priv {
134 struct can_priv can; 138 struct can_priv can;
139 spinlock_t tx_lock;
135 unsigned int tx_head; 140 unsigned int tx_head;
136 unsigned int tx_tail; 141 unsigned int tx_tail;
137 unsigned int tx_max; 142 unsigned int tx_max;
@@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
159 .brp_inc = 1, 164 .brp_inc = 1,
160}; 165};
161 166
167#define XCAN_CAP_WATERMARK 0x0001
168struct xcan_devtype_data {
169 unsigned int caps;
170};
171
162/** 172/**
163 * xcan_write_reg_le - Write a value to the device register little endian 173 * xcan_write_reg_le - Write a value to the device register little endian
164 * @priv: Driver private data structure 174 * @priv: Driver private data structure
@@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
238 usleep_range(500, 10000); 248 usleep_range(500, 10000);
239 } 249 }
240 250
251 /* reset clears FIFOs */
252 priv->tx_head = 0;
253 priv->tx_tail = 0;
254
241 return 0; 255 return 0;
242} 256}
243 257
@@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
392 struct net_device_stats *stats = &ndev->stats; 406 struct net_device_stats *stats = &ndev->stats;
393 struct can_frame *cf = (struct can_frame *)skb->data; 407 struct can_frame *cf = (struct can_frame *)skb->data;
394 u32 id, dlc, data[2] = {0, 0}; 408 u32 id, dlc, data[2] = {0, 0};
409 unsigned long flags;
395 410
396 if (can_dropped_invalid_skb(ndev, skb)) 411 if (can_dropped_invalid_skb(ndev, skb))
397 return NETDEV_TX_OK; 412 return NETDEV_TX_OK;
@@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
439 data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); 454 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
440 455
441 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); 456 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
457
458 spin_lock_irqsave(&priv->tx_lock, flags);
459
442 priv->tx_head++; 460 priv->tx_head++;
443 461
444 /* Write the Frame to Xilinx CAN TX FIFO */ 462 /* Write the Frame to Xilinx CAN TX FIFO */
@@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
454 stats->tx_bytes += cf->can_dlc; 472 stats->tx_bytes += cf->can_dlc;
455 } 473 }
456 474
475 /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
476 if (priv->tx_max > 1)
477 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
478
457 /* Check if the TX buffer is full */ 479 /* Check if the TX buffer is full */
458 if ((priv->tx_head - priv->tx_tail) == priv->tx_max) 480 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
459 netif_stop_queue(ndev); 481 netif_stop_queue(ndev);
460 482
483 spin_unlock_irqrestore(&priv->tx_lock, flags);
484
461 return NETDEV_TX_OK; 485 return NETDEV_TX_OK;
462} 486}
463 487
@@ -530,6 +554,123 @@ static int xcan_rx(struct net_device *ndev)
530} 554}
531 555
532/** 556/**
557 * xcan_current_error_state - Get current error state from HW
558 * @ndev: Pointer to net_device structure
559 *
560 * Checks the current CAN error state from the HW. Note that this
561 * only checks for ERROR_PASSIVE and ERROR_WARNING.
562 *
563 * Return:
564 * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
565 * otherwise.
566 */
567static enum can_state xcan_current_error_state(struct net_device *ndev)
568{
569 struct xcan_priv *priv = netdev_priv(ndev);
570 u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
571
572 if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
573 return CAN_STATE_ERROR_PASSIVE;
574 else if (status & XCAN_SR_ERRWRN_MASK)
575 return CAN_STATE_ERROR_WARNING;
576 else
577 return CAN_STATE_ERROR_ACTIVE;
578}
579
580/**
581 * xcan_set_error_state - Set new CAN error state
582 * @ndev: Pointer to net_device structure
583 * @new_state: The new CAN state to be set
584 * @cf: Error frame to be populated or NULL
585 *
586 * Set new CAN error state for the device, updating statistics and
587 * populating the error frame if given.
588 */
589static void xcan_set_error_state(struct net_device *ndev,
590 enum can_state new_state,
591 struct can_frame *cf)
592{
593 struct xcan_priv *priv = netdev_priv(ndev);
594 u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
595 u32 txerr = ecr & XCAN_ECR_TEC_MASK;
596 u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
597
598 priv->can.state = new_state;
599
600 if (cf) {
601 cf->can_id |= CAN_ERR_CRTL;
602 cf->data[6] = txerr;
603 cf->data[7] = rxerr;
604 }
605
606 switch (new_state) {
607 case CAN_STATE_ERROR_PASSIVE:
608 priv->can.can_stats.error_passive++;
609 if (cf)
610 cf->data[1] = (rxerr > 127) ?
611 CAN_ERR_CRTL_RX_PASSIVE :
612 CAN_ERR_CRTL_TX_PASSIVE;
613 break;
614 case CAN_STATE_ERROR_WARNING:
615 priv->can.can_stats.error_warning++;
616 if (cf)
617 cf->data[1] |= (txerr > rxerr) ?
618 CAN_ERR_CRTL_TX_WARNING :
619 CAN_ERR_CRTL_RX_WARNING;
620 break;
621 case CAN_STATE_ERROR_ACTIVE:
622 if (cf)
623 cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
624 break;
625 default:
626 /* non-ERROR states are handled elsewhere */
627 WARN_ON(1);
628 break;
629 }
630}
631
632/**
633 * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
634 * @ndev: Pointer to net_device structure
635 *
636 * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
637 * the performed RX/TX has caused it to drop to a lesser state and set
638 * the interface state accordingly.
639 */
640static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
641{
642 struct xcan_priv *priv = netdev_priv(ndev);
643 enum can_state old_state = priv->can.state;
644 enum can_state new_state;
645
646 /* changing error state due to successful frame RX/TX can only
647 * occur from these states
648 */
649 if (old_state != CAN_STATE_ERROR_WARNING &&
650 old_state != CAN_STATE_ERROR_PASSIVE)
651 return;
652
653 new_state = xcan_current_error_state(ndev);
654
655 if (new_state != old_state) {
656 struct sk_buff *skb;
657 struct can_frame *cf;
658
659 skb = alloc_can_err_skb(ndev, &cf);
660
661 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
662
663 if (skb) {
664 struct net_device_stats *stats = &ndev->stats;
665
666 stats->rx_packets++;
667 stats->rx_bytes += cf->can_dlc;
668 netif_rx(skb);
669 }
670 }
671}
672
673/**
533 * xcan_err_interrupt - error frame Isr 674 * xcan_err_interrupt - error frame Isr
534 * @ndev: net_device pointer 675 * @ndev: net_device pointer
535 * @isr: interrupt status register value 676 * @isr: interrupt status register value
@@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
544 struct net_device_stats *stats = &ndev->stats; 685 struct net_device_stats *stats = &ndev->stats;
545 struct can_frame *cf; 686 struct can_frame *cf;
546 struct sk_buff *skb; 687 struct sk_buff *skb;
547 u32 err_status, status, txerr = 0, rxerr = 0; 688 u32 err_status;
548 689
549 skb = alloc_can_err_skb(ndev, &cf); 690 skb = alloc_can_err_skb(ndev, &cf);
550 691
551 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); 692 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
552 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); 693 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
553 txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
554 rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
555 XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
556 status = priv->read_reg(priv, XCAN_SR_OFFSET);
557 694
558 if (isr & XCAN_IXR_BSOFF_MASK) { 695 if (isr & XCAN_IXR_BSOFF_MASK) {
559 priv->can.state = CAN_STATE_BUS_OFF; 696 priv->can.state = CAN_STATE_BUS_OFF;
@@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
563 can_bus_off(ndev); 700 can_bus_off(ndev);
564 if (skb) 701 if (skb)
565 cf->can_id |= CAN_ERR_BUSOFF; 702 cf->can_id |= CAN_ERR_BUSOFF;
566 } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { 703 } else {
567 priv->can.state = CAN_STATE_ERROR_PASSIVE; 704 enum can_state new_state = xcan_current_error_state(ndev);
568 priv->can.can_stats.error_passive++; 705
569 if (skb) { 706 xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
570 cf->can_id |= CAN_ERR_CRTL;
571 cf->data[1] = (rxerr > 127) ?
572 CAN_ERR_CRTL_RX_PASSIVE :
573 CAN_ERR_CRTL_TX_PASSIVE;
574 cf->data[6] = txerr;
575 cf->data[7] = rxerr;
576 }
577 } else if (status & XCAN_SR_ERRWRN_MASK) {
578 priv->can.state = CAN_STATE_ERROR_WARNING;
579 priv->can.can_stats.error_warning++;
580 if (skb) {
581 cf->can_id |= CAN_ERR_CRTL;
582 cf->data[1] |= (txerr > rxerr) ?
583 CAN_ERR_CRTL_TX_WARNING :
584 CAN_ERR_CRTL_RX_WARNING;
585 cf->data[6] = txerr;
586 cf->data[7] = rxerr;
587 }
588 } 707 }
589 708
590 /* Check for Arbitration lost interrupt */ 709 /* Check for Arbitration lost interrupt */
@@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
600 if (isr & XCAN_IXR_RXOFLW_MASK) { 719 if (isr & XCAN_IXR_RXOFLW_MASK) {
601 stats->rx_over_errors++; 720 stats->rx_over_errors++;
602 stats->rx_errors++; 721 stats->rx_errors++;
603 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
604 if (skb) { 722 if (skb) {
605 cf->can_id |= CAN_ERR_CRTL; 723 cf->can_id |= CAN_ERR_CRTL;
606 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; 724 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
709 827
710 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 828 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
711 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { 829 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
712 if (isr & XCAN_IXR_RXOK_MASK) { 830 work_done += xcan_rx(ndev);
713 priv->write_reg(priv, XCAN_ICR_OFFSET,
714 XCAN_IXR_RXOK_MASK);
715 work_done += xcan_rx(ndev);
716 } else {
717 priv->write_reg(priv, XCAN_ICR_OFFSET,
718 XCAN_IXR_RXNEMP_MASK);
719 break;
720 }
721 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); 831 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
722 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 832 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
723 } 833 }
724 834
725 if (work_done) 835 if (work_done) {
726 can_led_event(ndev, CAN_LED_EVENT_RX); 836 can_led_event(ndev, CAN_LED_EVENT_RX);
837 xcan_update_error_state_after_rxtx(ndev);
838 }
727 839
728 if (work_done < quota) { 840 if (work_done < quota) {
729 napi_complete_done(napi, work_done); 841 napi_complete_done(napi, work_done);
730 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 842 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
731 ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); 843 ier |= XCAN_IXR_RXNEMP_MASK;
732 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 844 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
733 } 845 }
734 return work_done; 846 return work_done;
@@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
743{ 855{
744 struct xcan_priv *priv = netdev_priv(ndev); 856 struct xcan_priv *priv = netdev_priv(ndev);
745 struct net_device_stats *stats = &ndev->stats; 857 struct net_device_stats *stats = &ndev->stats;
858 unsigned int frames_in_fifo;
859 int frames_sent = 1; /* TXOK => at least 1 frame was sent */
860 unsigned long flags;
861 int retries = 0;
862
863 /* Synchronize with xmit as we need to know the exact number
864 * of frames in the FIFO to stay in sync due to the TXFEMP
865 * handling.
866 * This also prevents a race between netif_wake_queue() and
867 * netif_stop_queue().
868 */
869 spin_lock_irqsave(&priv->tx_lock, flags);
870
871 frames_in_fifo = priv->tx_head - priv->tx_tail;
872
873 if (WARN_ON_ONCE(frames_in_fifo == 0)) {
874 /* clear TXOK anyway to avoid getting back here */
875 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
876 spin_unlock_irqrestore(&priv->tx_lock, flags);
877 return;
878 }
879
880 /* Check if 2 frames were sent (TXOK only means that at least 1
881 * frame was sent).
882 */
883 if (frames_in_fifo > 1) {
884 WARN_ON(frames_in_fifo > priv->tx_max);
885
886 /* Synchronize TXOK and isr so that after the loop:
887 * (1) isr variable is up-to-date at least up to TXOK clear
888 * time. This avoids us clearing a TXOK of a second frame
889 * but not noticing that the FIFO is now empty and thus
890 * marking only a single frame as sent.
891 * (2) No TXOK is left. Having one could mean leaving a
892 * stray TXOK as we might process the associated frame
893 * via TXFEMP handling as we read TXFEMP *after* TXOK
894 * clear to satisfy (1).
895 */
896 while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
897 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
898 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
899 }
746 900
747 while ((priv->tx_head - priv->tx_tail > 0) && 901 if (isr & XCAN_IXR_TXFEMP_MASK) {
748 (isr & XCAN_IXR_TXOK_MASK)) { 902 /* nothing in FIFO anymore */
903 frames_sent = frames_in_fifo;
904 }
905 } else {
906 /* single frame in fifo, just clear TXOK */
749 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); 907 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
908 }
909
910 while (frames_sent--) {
750 can_get_echo_skb(ndev, priv->tx_tail % 911 can_get_echo_skb(ndev, priv->tx_tail %
751 priv->tx_max); 912 priv->tx_max);
752 priv->tx_tail++; 913 priv->tx_tail++;
753 stats->tx_packets++; 914 stats->tx_packets++;
754 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
755 } 915 }
756 can_led_event(ndev, CAN_LED_EVENT_TX); 916
757 netif_wake_queue(ndev); 917 netif_wake_queue(ndev);
918
919 spin_unlock_irqrestore(&priv->tx_lock, flags);
920
921 can_led_event(ndev, CAN_LED_EVENT_TX);
922 xcan_update_error_state_after_rxtx(ndev);
758} 923}
759 924
760/** 925/**
@@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
773 struct net_device *ndev = (struct net_device *)dev_id; 938 struct net_device *ndev = (struct net_device *)dev_id;
774 struct xcan_priv *priv = netdev_priv(ndev); 939 struct xcan_priv *priv = netdev_priv(ndev);
775 u32 isr, ier; 940 u32 isr, ier;
941 u32 isr_errors;
776 942
777 /* Get the interrupt status from Xilinx CAN */ 943 /* Get the interrupt status from Xilinx CAN */
778 isr = priv->read_reg(priv, XCAN_ISR_OFFSET); 944 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
791 xcan_tx_interrupt(ndev, isr); 957 xcan_tx_interrupt(ndev, isr);
792 958
793 /* Check for the type of error interrupt and Processing it */ 959 /* Check for the type of error interrupt and Processing it */
794 if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | 960 isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
795 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { 961 XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
796 priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | 962 if (isr_errors) {
797 XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | 963 priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
798 XCAN_IXR_ARBLST_MASK));
799 xcan_err_interrupt(ndev, isr); 964 xcan_err_interrupt(ndev, isr);
800 } 965 }
801 966
802 /* Check for the type of receive interrupt and Processing it */ 967 /* Check for the type of receive interrupt and Processing it */
803 if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { 968 if (isr & XCAN_IXR_RXNEMP_MASK) {
804 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 969 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
805 ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); 970 ier &= ~XCAN_IXR_RXNEMP_MASK;
806 priv->write_reg(priv, XCAN_IER_OFFSET, ier); 971 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
807 napi_schedule(&priv->napi); 972 napi_schedule(&priv->napi);
808 } 973 }
@@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
819static void xcan_chip_stop(struct net_device *ndev) 984static void xcan_chip_stop(struct net_device *ndev)
820{ 985{
821 struct xcan_priv *priv = netdev_priv(ndev); 986 struct xcan_priv *priv = netdev_priv(ndev);
822 u32 ier;
823 987
824 /* Disable interrupts and leave the can in configuration mode */ 988 /* Disable interrupts and leave the can in configuration mode */
825 ier = priv->read_reg(priv, XCAN_IER_OFFSET); 989 set_reset_mode(ndev);
826 ier &= ~XCAN_INTR_ALL;
827 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
828 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
829 priv->can.state = CAN_STATE_STOPPED; 990 priv->can.state = CAN_STATE_STOPPED;
830} 991}
831 992
@@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
958 */ 1119 */
959static int __maybe_unused xcan_suspend(struct device *dev) 1120static int __maybe_unused xcan_suspend(struct device *dev)
960{ 1121{
961 if (!device_may_wakeup(dev)) 1122 struct net_device *ndev = dev_get_drvdata(dev);
962 return pm_runtime_force_suspend(dev);
963 1123
964 return 0; 1124 if (netif_running(ndev)) {
1125 netif_stop_queue(ndev);
1126 netif_device_detach(ndev);
1127 xcan_chip_stop(ndev);
1128 }
1129
1130 return pm_runtime_force_suspend(dev);
965} 1131}
966 1132
967/** 1133/**
@@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
973 */ 1139 */
974static int __maybe_unused xcan_resume(struct device *dev) 1140static int __maybe_unused xcan_resume(struct device *dev)
975{ 1141{
976 if (!device_may_wakeup(dev)) 1142 struct net_device *ndev = dev_get_drvdata(dev);
977 return pm_runtime_force_resume(dev); 1143 int ret;
978 1144
979 return 0; 1145 ret = pm_runtime_force_resume(dev);
1146 if (ret) {
1147 dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1148 return ret;
1149 }
1150
1151 if (netif_running(ndev)) {
1152 ret = xcan_chip_start(ndev);
1153 if (ret) {
1154 dev_err(dev, "xcan_chip_start failed on resume\n");
1155 return ret;
1156 }
1157
1158 netif_device_attach(ndev);
1159 netif_start_queue(ndev);
1160 }
980 1161
1162 return 0;
981} 1163}
982 1164
983/** 1165/**
@@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
992 struct net_device *ndev = dev_get_drvdata(dev); 1174 struct net_device *ndev = dev_get_drvdata(dev);
993 struct xcan_priv *priv = netdev_priv(ndev); 1175 struct xcan_priv *priv = netdev_priv(ndev);
994 1176
995 if (netif_running(ndev)) {
996 netif_stop_queue(ndev);
997 netif_device_detach(ndev);
998 }
999
1000 priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
1001 priv->can.state = CAN_STATE_SLEEPING;
1002
1003 clk_disable_unprepare(priv->bus_clk); 1177 clk_disable_unprepare(priv->bus_clk);
1004 clk_disable_unprepare(priv->can_clk); 1178 clk_disable_unprepare(priv->can_clk);
1005 1179
@@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
1018 struct net_device *ndev = dev_get_drvdata(dev); 1192 struct net_device *ndev = dev_get_drvdata(dev);
1019 struct xcan_priv *priv = netdev_priv(ndev); 1193 struct xcan_priv *priv = netdev_priv(ndev);
1020 int ret; 1194 int ret;
1021 u32 isr, status;
1022 1195
1023 ret = clk_prepare_enable(priv->bus_clk); 1196 ret = clk_prepare_enable(priv->bus_clk);
1024 if (ret) { 1197 if (ret) {
@@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
1032 return ret; 1205 return ret;
1033 } 1206 }
1034 1207
1035 priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1036 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1037 status = priv->read_reg(priv, XCAN_SR_OFFSET);
1038
1039 if (netif_running(ndev)) {
1040 if (isr & XCAN_IXR_BSOFF_MASK) {
1041 priv->can.state = CAN_STATE_BUS_OFF;
1042 priv->write_reg(priv, XCAN_SRR_OFFSET,
1043 XCAN_SRR_RESET_MASK);
1044 } else if ((status & XCAN_SR_ESTAT_MASK) ==
1045 XCAN_SR_ESTAT_MASK) {
1046 priv->can.state = CAN_STATE_ERROR_PASSIVE;
1047 } else if (status & XCAN_SR_ERRWRN_MASK) {
1048 priv->can.state = CAN_STATE_ERROR_WARNING;
1049 } else {
1050 priv->can.state = CAN_STATE_ERROR_ACTIVE;
1051 }
1052 netif_device_attach(ndev);
1053 netif_start_queue(ndev);
1054 }
1055
1056 return 0; 1208 return 0;
1057} 1209}
1058 1210
@@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
1061 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) 1213 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1062}; 1214};
1063 1215
1216static const struct xcan_devtype_data xcan_zynq_data = {
1217 .caps = XCAN_CAP_WATERMARK,
1218};
1219
1220/* Match table for OF platform binding */
1221static const struct of_device_id xcan_of_match[] = {
1222 { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1223 { .compatible = "xlnx,axi-can-1.00.a", },
1224 { /* end of list */ },
1225};
1226MODULE_DEVICE_TABLE(of, xcan_of_match);
1227
1064/** 1228/**
1065 * xcan_probe - Platform registration call 1229 * xcan_probe - Platform registration call
1066 * @pdev: Handle to the platform device structure 1230 * @pdev: Handle to the platform device structure
@@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
1075 struct resource *res; /* IO mem resources */ 1239 struct resource *res; /* IO mem resources */
1076 struct net_device *ndev; 1240 struct net_device *ndev;
1077 struct xcan_priv *priv; 1241 struct xcan_priv *priv;
1242 const struct of_device_id *of_id;
1243 int caps = 0;
1078 void __iomem *addr; 1244 void __iomem *addr;
1079 int ret, rx_max, tx_max; 1245 int ret, rx_max, tx_max, tx_fifo_depth;
1080 1246
1081 /* Get the virtual base address for the device */ 1247 /* Get the virtual base address for the device */
1082 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1248 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
1086 goto err; 1252 goto err;
1087 } 1253 }
1088 1254
1089 ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); 1255 ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1256 &tx_fifo_depth);
1090 if (ret < 0) 1257 if (ret < 0)
1091 goto err; 1258 goto err;
1092 1259
@@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
1094 if (ret < 0) 1261 if (ret < 0)
1095 goto err; 1262 goto err;
1096 1263
1264 of_id = of_match_device(xcan_of_match, &pdev->dev);
1265 if (of_id) {
1266 const struct xcan_devtype_data *devtype_data = of_id->data;
1267
1268 if (devtype_data)
1269 caps = devtype_data->caps;
1270 }
1271
1272 /* There is no way to directly figure out how many frames have been
1273 * sent when the TXOK interrupt is processed. If watermark programming
1274 * is supported, we can have 2 frames in the FIFO and use TXFEMP
1275 * to determine if 1 or 2 frames have been sent.
1276 * Theoretically we should be able to use TXFWMEMP to determine up
1277 * to 3 frames, but it seems that after putting a second frame in the
1278 * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1279 * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1280 * sent), which is not a sensible state - possibly TXFWMEMP is not
1281 * completely synchronized with the rest of the bits?
1282 */
1283 if (caps & XCAN_CAP_WATERMARK)
1284 tx_max = min(tx_fifo_depth, 2);
1285 else
1286 tx_max = 1;
1287
1097 /* Create a CAN device instance */ 1288 /* Create a CAN device instance */
1098 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); 1289 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1099 if (!ndev) 1290 if (!ndev)
@@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
1108 CAN_CTRLMODE_BERR_REPORTING; 1299 CAN_CTRLMODE_BERR_REPORTING;
1109 priv->reg_base = addr; 1300 priv->reg_base = addr;
1110 priv->tx_max = tx_max; 1301 priv->tx_max = tx_max;
1302 spin_lock_init(&priv->tx_lock);
1111 1303
1112 /* Get IRQ for the device */ 1304 /* Get IRQ for the device */
1113 ndev->irq = platform_get_irq(pdev, 0); 1305 ndev->irq = platform_get_irq(pdev, 0);
@@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
1172 1364
1173 pm_runtime_put(&pdev->dev); 1365 pm_runtime_put(&pdev->dev);
1174 1366
1175 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", 1367 netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
1176 priv->reg_base, ndev->irq, priv->can.clock.freq, 1368 priv->reg_base, ndev->irq, priv->can.clock.freq,
1177 priv->tx_max); 1369 tx_fifo_depth, priv->tx_max);
1178 1370
1179 return 0; 1371 return 0;
1180 1372
@@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
1208 return 0; 1400 return 0;
1209} 1401}
1210 1402
1211/* Match table for OF platform binding */
1212static const struct of_device_id xcan_of_match[] = {
1213 { .compatible = "xlnx,zynq-can-1.0", },
1214 { .compatible = "xlnx,axi-can-1.00.a", },
1215 { /* end of list */ },
1216};
1217MODULE_DEVICE_TABLE(of, xcan_of_match);
1218
1219static struct platform_driver xcan_driver = { 1403static struct platform_driver xcan_driver = {
1220 .probe = xcan_probe, 1404 .probe = xcan_probe,
1221 .remove = xcan_remove, 1405 .remove = xcan_remove,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 437cd6eb4faa..9ef07a06aceb 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
343 .xlate = irq_domain_xlate_twocell, 343 .xlate = irq_domain_xlate_twocell,
344}; 344};
345 345
346/* To be called with reg_lock held */
346static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip) 347static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
347{ 348{
348 int irq, virq; 349 int irq, virq;
@@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
362 363
363static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip) 364static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
364{ 365{
365 mv88e6xxx_g1_irq_free_common(chip); 366 /*
366 367 * free_irq must be called without reg_lock taken because the irq
368 * handler takes this lock, too.
369 */
367 free_irq(chip->irq, chip); 370 free_irq(chip->irq, chip);
371
372 mutex_lock(&chip->reg_lock);
373 mv88e6xxx_g1_irq_free_common(chip);
374 mutex_unlock(&chip->reg_lock);
368} 375}
369 376
370static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip) 377static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
469 476
470static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip) 477static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
471{ 478{
472 mv88e6xxx_g1_irq_free_common(chip);
473
474 kthread_cancel_delayed_work_sync(&chip->irq_poll_work); 479 kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
475 kthread_destroy_worker(chip->kworker); 480 kthread_destroy_worker(chip->kworker);
481
482 mutex_lock(&chip->reg_lock);
483 mv88e6xxx_g1_irq_free_common(chip);
484 mutex_unlock(&chip->reg_lock);
476} 485}
477 486
478int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask) 487int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@ -4506,12 +4515,10 @@ out_g2_irq:
4506 if (chip->info->g2_irqs > 0) 4515 if (chip->info->g2_irqs > 0)
4507 mv88e6xxx_g2_irq_free(chip); 4516 mv88e6xxx_g2_irq_free(chip);
4508out_g1_irq: 4517out_g1_irq:
4509 mutex_lock(&chip->reg_lock);
4510 if (chip->irq > 0) 4518 if (chip->irq > 0)
4511 mv88e6xxx_g1_irq_free(chip); 4519 mv88e6xxx_g1_irq_free(chip);
4512 else 4520 else
4513 mv88e6xxx_irq_poll_free(chip); 4521 mv88e6xxx_irq_poll_free(chip);
4514 mutex_unlock(&chip->reg_lock);
4515out: 4522out:
4516 if (pdata) 4523 if (pdata)
4517 dev_put(pdata->netdev); 4524 dev_put(pdata->netdev);
@@ -4539,12 +4546,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
4539 if (chip->info->g2_irqs > 0) 4546 if (chip->info->g2_irqs > 0)
4540 mv88e6xxx_g2_irq_free(chip); 4547 mv88e6xxx_g2_irq_free(chip);
4541 4548
4542 mutex_lock(&chip->reg_lock);
4543 if (chip->irq > 0) 4549 if (chip->irq > 0)
4544 mv88e6xxx_g1_irq_free(chip); 4550 mv88e6xxx_g1_irq_free(chip);
4545 else 4551 else
4546 mv88e6xxx_irq_poll_free(chip); 4552 mv88e6xxx_irq_poll_free(chip);
4547 mutex_unlock(&chip->reg_lock);
4548} 4553}
4549 4554
4550static const struct of_device_id mv88e6xxx_of_match[] = { 4555static const struct of_device_id mv88e6xxx_of_match[] = {
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5b7658bcf020..5c3ef9fc8207 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -32,7 +32,7 @@ config EL3
32 32
33config 3C515 33config 3C515
34 tristate "3c515 ISA \"Fast EtherLink\"" 34 tristate "3c515 ISA \"Fast EtherLink\""
35 depends on ISA && ISA_DMA_API 35 depends on ISA && ISA_DMA_API && !PPC32
36 ---help--- 36 ---help---
37 If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet 37 If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
38 network card, say Y here. 38 network card, say Y here.
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index f273af136fc7..9e5cf5583c87 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -44,7 +44,7 @@ config AMD8111_ETH
44 44
45config LANCE 45config LANCE
46 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" 46 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
47 depends on ISA && ISA_DMA_API && !ARM 47 depends on ISA && ISA_DMA_API && !ARM && !PPC32
48 ---help--- 48 ---help---
49 If you have a network (Ethernet) card of this type, say Y here. 49 If you have a network (Ethernet) card of this type, say Y here.
50 Some LinkSys cards are of this type. 50 Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
138 138
139config NI65 139config NI65
140 tristate "NI6510 support" 140 tristate "NI6510 support"
141 depends on ISA && ISA_DMA_API && !ARM 141 depends on ISA && ISA_DMA_API && !ARM && !PPC32
142 ---help--- 142 ---help---
143 If you have a network (Ethernet) card of this type, say Y here. 143 If you have a network (Ethernet) card of this type, say Y here.
144 144
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index fc7383106946..91eb8910b1c9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -63,8 +63,6 @@
63 63
64#define AQ_CFG_NAPI_WEIGHT 64U 64#define AQ_CFG_NAPI_WEIGHT 64U
65 65
66#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
67
68/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/ 66/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
69 67
70#define AQ_NIC_FC_OFF 0U 68#define AQ_NIC_FC_OFF 0U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a2d416b24ffc..2c6ebd91a9f2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -98,6 +98,8 @@ struct aq_stats_s {
98#define AQ_HW_MEDIA_TYPE_TP 1U 98#define AQ_HW_MEDIA_TYPE_TP 1U
99#define AQ_HW_MEDIA_TYPE_FIBRE 2U 99#define AQ_HW_MEDIA_TYPE_FIBRE 2U
100 100
101#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
102
101struct aq_hw_s { 103struct aq_hw_s {
102 atomic_t flags; 104 atomic_t flags;
103 u8 rbl_enabled:1; 105 u8 rbl_enabled:1;
@@ -177,7 +179,7 @@ struct aq_hw_ops {
177 unsigned int packet_filter); 179 unsigned int packet_filter);
178 180
179 int (*hw_multicast_list_set)(struct aq_hw_s *self, 181 int (*hw_multicast_list_set)(struct aq_hw_s *self,
180 u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX] 182 u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
181 [ETH_ALEN], 183 [ETH_ALEN],
182 u32 count); 184 u32 count);
183 185
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index ba5fe8c4125d..e3ae29e523f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -135,17 +135,10 @@ err_exit:
135static void aq_ndev_set_multicast_settings(struct net_device *ndev) 135static void aq_ndev_set_multicast_settings(struct net_device *ndev)
136{ 136{
137 struct aq_nic_s *aq_nic = netdev_priv(ndev); 137 struct aq_nic_s *aq_nic = netdev_priv(ndev);
138 int err = 0;
139 138
140 err = aq_nic_set_packet_filter(aq_nic, ndev->flags); 139 aq_nic_set_packet_filter(aq_nic, ndev->flags);
141 if (err < 0)
142 return;
143 140
144 if (netdev_mc_count(ndev)) { 141 aq_nic_set_multicast_list(aq_nic, ndev);
145 err = aq_nic_set_multicast_list(aq_nic, ndev);
146 if (err < 0)
147 return;
148 }
149} 142}
150 143
151static const struct net_device_ops aq_ndev_ops = { 144static const struct net_device_ops aq_ndev_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 1a1a6380c128..7a22d0257e04 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -563,34 +563,41 @@ err_exit:
563 563
564int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev) 564int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
565{ 565{
566 unsigned int packet_filter = self->packet_filter;
566 struct netdev_hw_addr *ha = NULL; 567 struct netdev_hw_addr *ha = NULL;
567 unsigned int i = 0U; 568 unsigned int i = 0U;
568 569
569 self->mc_list.count = 0U; 570 self->mc_list.count = 0;
570 571 if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
571 netdev_for_each_mc_addr(ha, ndev) { 572 packet_filter |= IFF_PROMISC;
572 ether_addr_copy(self->mc_list.ar[i++], ha->addr); 573 } else {
573 ++self->mc_list.count; 574 netdev_for_each_uc_addr(ha, ndev) {
575 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
574 576
575 if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) 577 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
576 break; 578 break;
579 }
577 } 580 }
578 581
579 if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) { 582 if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
580 /* Number of filters is too big: atlantic does not support this. 583 packet_filter |= IFF_ALLMULTI;
581 * Force all multi filter to support this.
582 * With this we disable all UC filters and setup "all pass"
583 * multicast mask
584 */
585 self->packet_filter |= IFF_ALLMULTI;
586 self->aq_nic_cfg.mc_list_count = 0;
587 return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
588 self->packet_filter);
589 } else { 584 } else {
590 return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw, 585 netdev_for_each_mc_addr(ha, ndev) {
591 self->mc_list.ar, 586 ether_addr_copy(self->mc_list.ar[i++], ha->addr);
592 self->mc_list.count); 587
588 if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
589 break;
590 }
591 }
592
593 if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
594 packet_filter |= IFF_MULTICAST;
595 self->mc_list.count = i;
596 self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
597 self->mc_list.ar,
598 self->mc_list.count);
593 } 599 }
600 return aq_nic_set_packet_filter(self, packet_filter);
594} 601}
595 602
596int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu) 603int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index faa533a0ec47..fecfc401f95d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -75,7 +75,7 @@ struct aq_nic_s {
75 struct aq_hw_link_status_s link_status; 75 struct aq_hw_link_status_s link_status;
76 struct { 76 struct {
77 u32 count; 77 u32 count;
78 u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN]; 78 u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
79 } mc_list; 79 } mc_list;
80 80
81 struct pci_dev *pdev; 81 struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 67e2f9fb9402..8cc6abadc03b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
765 765
766static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self, 766static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
767 u8 ar_mac 767 u8 ar_mac
768 [AQ_CFG_MULTICAST_ADDRESS_MAX] 768 [AQ_HW_MULTICAST_ADDRESS_MAX]
769 [ETH_ALEN], 769 [ETH_ALEN],
770 u32 count) 770 u32 count)
771{ 771{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 819f6bcf9b4e..956860a69797 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
784 784
785static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self, 785static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
786 u8 ar_mac 786 u8 ar_mac
787 [AQ_CFG_MULTICAST_ADDRESS_MAX] 787 [AQ_HW_MULTICAST_ADDRESS_MAX]
788 [ETH_ALEN], 788 [ETH_ALEN],
789 u32 count) 789 u32 count)
790{ 790{
@@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
812 812
813 hw_atl_rpfl2_uc_flr_en_set(self, 813 hw_atl_rpfl2_uc_flr_en_set(self,
814 (self->aq_nic_cfg->is_mc_list_enabled), 814 (self->aq_nic_cfg->is_mc_list_enabled),
815 HW_ATL_B0_MAC_MIN + i); 815 HW_ATL_B0_MAC_MIN + i);
816 } 816 }
817 817
818 err = aq_hw_err_from_flags(self); 818 err = aq_hw_err_from_flags(self);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 567ee54504bc..5e5022fa1d04 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
1897 struct pci_dev *pdev = to_pci_dev(dev); 1897 struct pci_dev *pdev = to_pci_dev(dev);
1898 struct alx_priv *alx = pci_get_drvdata(pdev); 1898 struct alx_priv *alx = pci_get_drvdata(pdev);
1899 struct alx_hw *hw = &alx->hw; 1899 struct alx_hw *hw = &alx->hw;
1900 int err;
1900 1901
1901 alx_reset_phy(hw); 1902 alx_reset_phy(hw);
1902 1903
1903 if (!netif_running(alx->dev)) 1904 if (!netif_running(alx->dev))
1904 return 0; 1905 return 0;
1905 netif_device_attach(alx->dev); 1906 netif_device_attach(alx->dev);
1906 return __alx_open(alx, true); 1907
1908 rtnl_lock();
1909 err = __alx_open(alx, true);
1910 rtnl_unlock();
1911
1912 return err;
1907} 1913}
1908 1914
1909static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume); 1915static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 94270f654b3b..7087b88550db 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
1686 skb = build_skb(page_address(page) + adapter->rx_page_offset, 1686 skb = build_skb(page_address(page) + adapter->rx_page_offset,
1687 adapter->rx_frag_size); 1687 adapter->rx_frag_size);
1688 if (likely(skb)) { 1688 if (likely(skb)) {
1689 skb_reserve(skb, NET_SKB_PAD);
1689 adapter->rx_page_offset += adapter->rx_frag_size; 1690 adapter->rx_page_offset += adapter->rx_frag_size;
1690 if (adapter->rx_page_offset >= PAGE_SIZE) 1691 if (adapter->rx_page_offset >= PAGE_SIZE)
1691 adapter->rx_page = NULL; 1692 adapter->rx_page = NULL;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index d5fca2e5a9bc..a1f60f89e059 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_device *dev)
1946 if (!priv->is_lite) 1946 if (!priv->is_lite)
1947 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD); 1947 priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
1948 else 1948 else
1949 priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) & 1949 priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
1950 GIB_FCS_STRIP); 1950 GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
1951 1951
1952 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link, 1952 phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
1953 0, priv->phy_interface); 1953 0, priv->phy_interface);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index d6e5d0cbf3a3..cf440b91fd04 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -278,7 +278,8 @@ struct bcm_rsb {
278#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT) 278#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT)
279#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT) 279#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT)
280#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT) 280#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT)
281#define GIB_FCS_STRIP (1 << 6) 281#define GIB_FCS_STRIP_SHIFT 6
282#define GIB_FCS_STRIP (1 << GIB_FCS_STRIP_SHIFT)
282#define GIB_LCL_LOOP_EN (1 << 7) 283#define GIB_LCL_LOOP_EN (1 << 7)
283#define GIB_LCL_LOOP_TXEN (1 << 8) 284#define GIB_LCL_LOOP_TXEN (1 << 8)
284#define GIB_RMT_LOOP_EN (1 << 9) 285#define GIB_RMT_LOOP_EN (1 << 9)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d847e1b9c37b..be1506169076 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1533,6 +1533,7 @@ struct bnx2x {
1533 struct link_vars link_vars; 1533 struct link_vars link_vars;
1534 u32 link_cnt; 1534 u32 link_cnt;
1535 struct bnx2x_link_report_data last_reported_link; 1535 struct bnx2x_link_report_data last_reported_link;
1536 bool force_link_down;
1536 1537
1537 struct mdio_if_info mdio; 1538 struct mdio_if_info mdio;
1538 1539
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8cd73ff5debc..af7b5a4d8ba0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
1261{ 1261{
1262 struct bnx2x_link_report_data cur_data; 1262 struct bnx2x_link_report_data cur_data;
1263 1263
1264 if (bp->force_link_down) {
1265 bp->link_vars.link_up = 0;
1266 return;
1267 }
1268
1264 /* reread mf_cfg */ 1269 /* reread mf_cfg */
1265 if (IS_PF(bp) && !CHIP_IS_E1(bp)) 1270 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1266 bnx2x_read_mf_cfg(bp); 1271 bnx2x_read_mf_cfg(bp);
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2817 bp->pending_max = 0; 2822 bp->pending_max = 0;
2818 } 2823 }
2819 2824
2825 bp->force_link_down = false;
2820 if (bp->port.pmf) { 2826 if (bp->port.pmf) {
2821 rc = bnx2x_initial_phy_init(bp, load_mode); 2827 rc = bnx2x_initial_phy_init(bp, load_mode);
2822 if (rc) 2828 if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index da18aa239acb..a4a90b6cdb46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
3388 DP(BNX2X_MSG_ETHTOOL, 3388 DP(BNX2X_MSG_ETHTOOL,
3389 "rss re-configured, UDP 4-tupple %s\n", 3389 "rss re-configured, UDP 4-tupple %s\n",
3390 udp_rss_requested ? "enabled" : "disabled"); 3390 udp_rss_requested ? "enabled" : "disabled");
3391 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); 3391 if (bp->state == BNX2X_STATE_OPEN)
3392 return bnx2x_rss(bp, &bp->rss_conf_obj, false,
3393 true);
3392 } else if ((info->flow_type == UDP_V6_FLOW) && 3394 } else if ((info->flow_type == UDP_V6_FLOW) &&
3393 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { 3395 (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
3394 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; 3396 bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
3395 DP(BNX2X_MSG_ETHTOOL, 3397 DP(BNX2X_MSG_ETHTOOL,
3396 "rss re-configured, UDP 4-tupple %s\n", 3398 "rss re-configured, UDP 4-tupple %s\n",
3397 udp_rss_requested ? "enabled" : "disabled"); 3399 udp_rss_requested ? "enabled" : "disabled");
3398 return bnx2x_rss(bp, &bp->rss_conf_obj, false, true); 3400 if (bp->state == BNX2X_STATE_OPEN)
3401 return bnx2x_rss(bp, &bp->rss_conf_obj, false,
3402 true);
3399 } 3403 }
3400 return 0; 3404 return 0;
3401 3405
@@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
3509 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; 3513 bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
3510 } 3514 }
3511 3515
3512 return bnx2x_config_rss_eth(bp, false); 3516 if (bp->state == BNX2X_STATE_OPEN)
3517 return bnx2x_config_rss_eth(bp, false);
3518
3519 return 0;
3513} 3520}
3514 3521
3515/** 3522/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5b1ed240bf18..57348f2b49a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
10279 bp->sp_rtnl_state = 0; 10279 bp->sp_rtnl_state = 0;
10280 smp_mb(); 10280 smp_mb();
10281 10281
10282 /* Immediately indicate link as down */
10283 bp->link_vars.link_up = 0;
10284 bp->force_link_down = true;
10285 netif_carrier_off(bp->dev);
10286 BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
10287
10282 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true); 10288 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
10283 /* When ret value shows failure of allocation failure, 10289 /* When ret value shows failure of allocation failure,
10284 * the nic is rebooted again. If open still fails, a error 10290 * the nic is rebooted again. If open still fails, a error
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 176fc9f4d7de..4394c1162be4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5712,7 +5712,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
5712 } 5712 }
5713 vnic->uc_filter_count = 1; 5713 vnic->uc_filter_count = 1;
5714 5714
5715 vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST; 5715 vnic->rx_mask = 0;
5716 if (bp->dev->flags & IFF_BROADCAST)
5717 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
5716 5718
5717 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 5719 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
5718 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 5720 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
@@ -5917,7 +5919,7 @@ unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
5917 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); 5919 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
5918} 5920}
5919 5921
5920void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) 5922static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
5921{ 5923{
5922 bp->hw_resc.max_irqs = max_irqs; 5924 bp->hw_resc.max_irqs = max_irqs;
5923} 5925}
@@ -6888,7 +6890,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6888 rc = bnxt_request_irq(bp); 6890 rc = bnxt_request_irq(bp);
6889 if (rc) { 6891 if (rc) {
6890 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc); 6892 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
6891 goto open_err; 6893 goto open_err_irq;
6892 } 6894 }
6893 } 6895 }
6894 6896
@@ -6928,6 +6930,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
6928open_err: 6930open_err:
6929 bnxt_debug_dev_exit(bp); 6931 bnxt_debug_dev_exit(bp);
6930 bnxt_disable_napi(bp); 6932 bnxt_disable_napi(bp);
6933
6934open_err_irq:
6931 bnxt_del_napi(bp); 6935 bnxt_del_napi(bp);
6932 6936
6933open_err_free_mem: 6937open_err_free_mem:
@@ -7214,13 +7218,16 @@ static void bnxt_set_rx_mode(struct net_device *dev)
7214 7218
7215 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS | 7219 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
7216 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST | 7220 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
7217 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST); 7221 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
7222 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
7218 7223
7219 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp)) 7224 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
7220 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS; 7225 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
7221 7226
7222 uc_update = bnxt_uc_list_updated(bp); 7227 uc_update = bnxt_uc_list_updated(bp);
7223 7228
7229 if (dev->flags & IFF_BROADCAST)
7230 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
7224 if (dev->flags & IFF_ALLMULTI) { 7231 if (dev->flags & IFF_ALLMULTI) {
7225 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST; 7232 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
7226 vnic->mc_list_count = 0; 7233 vnic->mc_list_count = 0;
@@ -8502,11 +8509,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
8502 int rx, tx, cp; 8509 int rx, tx, cp;
8503 8510
8504 _bnxt_get_max_rings(bp, &rx, &tx, &cp); 8511 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
8512 *max_rx = rx;
8513 *max_tx = tx;
8505 if (!rx || !tx || !cp) 8514 if (!rx || !tx || !cp)
8506 return -ENOMEM; 8515 return -ENOMEM;
8507 8516
8508 *max_rx = rx;
8509 *max_tx = tx;
8510 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared); 8517 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
8511} 8518}
8512 8519
@@ -8520,8 +8527,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
8520 /* Not enough rings, try disabling agg rings. */ 8527 /* Not enough rings, try disabling agg rings. */
8521 bp->flags &= ~BNXT_FLAG_AGG_RINGS; 8528 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
8522 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); 8529 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
8523 if (rc) 8530 if (rc) {
8531 /* set BNXT_FLAG_AGG_RINGS back for consistency */
8532 bp->flags |= BNXT_FLAG_AGG_RINGS;
8524 return rc; 8533 return rc;
8534 }
8525 bp->flags |= BNXT_FLAG_NO_AGG_RINGS; 8535 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
8526 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 8536 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
8527 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW); 8537 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 9b14eb610b9f..91575ef97c8c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1470,7 +1470,6 @@ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
1470unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); 1470unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
1471void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max); 1471void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
1472unsigned int bnxt_get_max_func_irqs(struct bnxt *bp); 1472unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
1473void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
1474int bnxt_get_avail_msix(struct bnxt *bp, int num); 1473int bnxt_get_avail_msix(struct bnxt *bp, int num);
1475int bnxt_reserve_rings(struct bnxt *bp); 1474int bnxt_reserve_rings(struct bnxt *bp);
1476void bnxt_tx_disable(struct bnxt *bp); 1475void bnxt_tx_disable(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 795f45024c20..491bd40a254d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -27,6 +27,15 @@
27#define BNXT_FID_INVALID 0xffff 27#define BNXT_FID_INVALID 0xffff
28#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT)) 28#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
29 29
30#define is_vlan_pcp_wildcarded(vlan_tci_mask) \
31 ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
32#define is_vlan_pcp_exactmatch(vlan_tci_mask) \
33 ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
34#define is_vlan_pcp_zero(vlan_tci) \
35 ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
36#define is_vid_exactmatch(vlan_tci_mask) \
37 ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
38
30/* Return the dst fid of the func for flow forwarding 39/* Return the dst fid of the func for flow forwarding
31 * For PFs: src_fid is the fid of the PF 40 * For PFs: src_fid is the fid of the PF
32 * For VF-reps: src_fid the fid of the VF 41 * For VF-reps: src_fid the fid of the VF
@@ -389,6 +398,21 @@ static bool is_exactmatch(void *mask, int len)
389 return true; 398 return true;
390} 399}
391 400
401static bool is_vlan_tci_allowed(__be16 vlan_tci_mask,
402 __be16 vlan_tci)
403{
404 /* VLAN priority must be either exactly zero or fully wildcarded and
405 * VLAN id must be exact match.
406 */
407 if (is_vid_exactmatch(vlan_tci_mask) &&
408 ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
409 is_vlan_pcp_zero(vlan_tci)) ||
410 is_vlan_pcp_wildcarded(vlan_tci_mask)))
411 return true;
412
413 return false;
414}
415
392static bool bits_set(void *key, int len) 416static bool bits_set(void *key, int len)
393{ 417{
394 const u8 *p = key; 418 const u8 *p = key;
@@ -803,9 +827,9 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
803 /* Currently VLAN fields cannot be partial wildcard */ 827 /* Currently VLAN fields cannot be partial wildcard */
804 if (bits_set(&flow->l2_key.inner_vlan_tci, 828 if (bits_set(&flow->l2_key.inner_vlan_tci,
805 sizeof(flow->l2_key.inner_vlan_tci)) && 829 sizeof(flow->l2_key.inner_vlan_tci)) &&
806 !is_exactmatch(&flow->l2_mask.inner_vlan_tci, 830 !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
807 sizeof(flow->l2_mask.inner_vlan_tci))) { 831 flow->l2_key.inner_vlan_tci)) {
808 netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n"); 832 netdev_info(bp->dev, "Unsupported VLAN TCI\n");
809 return false; 833 return false;
810 } 834 }
811 if (bits_set(&flow->l2_key.inner_vlan_tpid, 835 if (bits_set(&flow->l2_key.inner_vlan_tpid,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 347e4f946eb2..840f6e505f73 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
169 edev->ulp_tbl[ulp_id].msix_requested = avail_msix; 169 edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
170 } 170 }
171 bnxt_fill_msix_vecs(bp, ent); 171 bnxt_fill_msix_vecs(bp, ent);
172 bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
173 bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix); 172 bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
174 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED; 173 edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
175 return avail_msix; 174 return avail_msix;
@@ -192,7 +191,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
192 msix_requested = edev->ulp_tbl[ulp_id].msix_requested; 191 msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
193 bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested); 192 bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
194 edev->ulp_tbl[ulp_id].msix_requested = 0; 193 edev->ulp_tbl[ulp_id].msix_requested = 0;
195 bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
196 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED; 194 edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
197 if (netif_running(dev)) { 195 if (netif_running(dev)) {
198 bnxt_close_nic(bp, true, false); 196 bnxt_close_nic(bp, true, false);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 30273a7717e2..4fd829b5e65d 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
660 id_tbl->max = size; 660 id_tbl->max = size;
661 id_tbl->next = next; 661 id_tbl->next = next;
662 spin_lock_init(&id_tbl->lock); 662 spin_lock_init(&id_tbl->lock);
663 id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL); 663 id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
664 if (!id_tbl->table) 664 if (!id_tbl->table)
665 return -ENOMEM; 665 return -ENOMEM;
666 666
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3be87efdc93d..aa1374d0af93 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6,11 +6,15 @@
6 * Copyright (C) 2004 Sun Microsystems Inc. 6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2016 Broadcom Corporation. 7 * Copyright (C) 2005-2016 Broadcom Corporation.
8 * Copyright (C) 2016-2017 Broadcom Limited. 8 * Copyright (C) 2016-2017 Broadcom Limited.
9 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
10 * refers to Broadcom Inc. and/or its subsidiaries.
9 * 11 *
10 * Firmware is: 12 * Firmware is:
11 * Derived from proprietary unpublished source code, 13 * Derived from proprietary unpublished source code,
12 * Copyright (C) 2000-2016 Broadcom Corporation. 14 * Copyright (C) 2000-2016 Broadcom Corporation.
13 * Copyright (C) 2016-2017 Broadcom Ltd. 15 * Copyright (C) 2016-2017 Broadcom Ltd.
16 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
17 * refers to Broadcom Inc. and/or its subsidiaries.
14 * 18 *
15 * Permission is hereby granted for the distribution of this firmware 19 * Permission is hereby granted for the distribution of this firmware
16 * data in hexadecimal or equivalent format, provided this copyright 20 * data in hexadecimal or equivalent format, provided this copyright
@@ -9290,6 +9294,15 @@ static int tg3_chip_reset(struct tg3 *tp)
9290 9294
9291 tg3_restore_clk(tp); 9295 tg3_restore_clk(tp);
9292 9296
9297 /* Increase the core clock speed to fix tx timeout issue for 5762
9298 * with 100Mbps link speed.
9299 */
9300 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
9301 val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
9302 tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
9303 TG3_CPMU_MAC_ORIDE_ENABLE);
9304 }
9305
9293 /* Reprobe ASF enable state. */ 9306 /* Reprobe ASF enable state. */
9294 tg3_flag_clear(tp, ENABLE_ASF); 9307 tg3_flag_clear(tp, ENABLE_ASF);
9295 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK | 9308 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1d61aa3efda1..a772a33b685c 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -7,6 +7,8 @@
7 * Copyright (C) 2004 Sun Microsystems Inc. 7 * Copyright (C) 2004 Sun Microsystems Inc.
8 * Copyright (C) 2007-2016 Broadcom Corporation. 8 * Copyright (C) 2007-2016 Broadcom Corporation.
9 * Copyright (C) 2016-2017 Broadcom Limited. 9 * Copyright (C) 2016-2017 Broadcom Limited.
10 * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
11 * refers to Broadcom Inc. and/or its subsidiaries.
10 */ 12 */
11 13
12#ifndef _T3_H 14#ifndef _T3_H
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 86659823b259..3d45f4c92cf6 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -166,6 +166,7 @@
166#define GEM_DCFG6 0x0294 /* Design Config 6 */ 166#define GEM_DCFG6 0x0294 /* Design Config 6 */
167#define GEM_DCFG7 0x0298 /* Design Config 7 */ 167#define GEM_DCFG7 0x0298 /* Design Config 7 */
168#define GEM_DCFG8 0x029C /* Design Config 8 */ 168#define GEM_DCFG8 0x029C /* Design Config 8 */
169#define GEM_DCFG10 0x02A4 /* Design Config 10 */
169 170
170#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */ 171#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
171#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */ 172#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
@@ -490,6 +491,12 @@
490#define GEM_SCR2CMP_OFFSET 0 491#define GEM_SCR2CMP_OFFSET 0
491#define GEM_SCR2CMP_SIZE 8 492#define GEM_SCR2CMP_SIZE 8
492 493
494/* Bitfields in DCFG10 */
495#define GEM_TXBD_RDBUFF_OFFSET 12
496#define GEM_TXBD_RDBUFF_SIZE 4
497#define GEM_RXBD_RDBUFF_OFFSET 8
498#define GEM_RXBD_RDBUFF_SIZE 4
499
493/* Bitfields in TISUBN */ 500/* Bitfields in TISUBN */
494#define GEM_SUBNSINCR_OFFSET 0 501#define GEM_SUBNSINCR_OFFSET 0
495#define GEM_SUBNSINCR_SIZE 16 502#define GEM_SUBNSINCR_SIZE 16
@@ -635,6 +642,7 @@
635#define MACB_CAPS_USRIO_DISABLED 0x00000010 642#define MACB_CAPS_USRIO_DISABLED 0x00000010
636#define MACB_CAPS_JUMBO 0x00000020 643#define MACB_CAPS_JUMBO 0x00000020
637#define MACB_CAPS_GEM_HAS_PTP 0x00000040 644#define MACB_CAPS_GEM_HAS_PTP 0x00000040
645#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
638#define MACB_CAPS_FIFO_MODE 0x10000000 646#define MACB_CAPS_FIFO_MODE 0x10000000
639#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 647#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
640#define MACB_CAPS_SG_DISABLED 0x40000000 648#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -1203,6 +1211,9 @@ struct macb {
1203 unsigned int max_tuples; 1211 unsigned int max_tuples;
1204 1212
1205 struct tasklet_struct hresp_err_tasklet; 1213 struct tasklet_struct hresp_err_tasklet;
1214
1215 int rx_bd_rd_prefetch;
1216 int tx_bd_rd_prefetch;
1206}; 1217};
1207 1218
1208#ifdef CONFIG_MACB_USE_HWSTAMP 1219#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 3e93df5d4e3b..a6c911bb5ce2 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1811,23 +1811,25 @@ static void macb_free_consistent(struct macb *bp)
1811{ 1811{
1812 struct macb_queue *queue; 1812 struct macb_queue *queue;
1813 unsigned int q; 1813 unsigned int q;
1814 int size;
1814 1815
1815 queue = &bp->queues[0];
1816 bp->macbgem_ops.mog_free_rx_buffers(bp); 1816 bp->macbgem_ops.mog_free_rx_buffers(bp);
1817 if (queue->rx_ring) {
1818 dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
1819 queue->rx_ring, queue->rx_ring_dma);
1820 queue->rx_ring = NULL;
1821 }
1822 1817
1823 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1818 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1824 kfree(queue->tx_skb); 1819 kfree(queue->tx_skb);
1825 queue->tx_skb = NULL; 1820 queue->tx_skb = NULL;
1826 if (queue->tx_ring) { 1821 if (queue->tx_ring) {
1827 dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp), 1822 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
1823 dma_free_coherent(&bp->pdev->dev, size,
1828 queue->tx_ring, queue->tx_ring_dma); 1824 queue->tx_ring, queue->tx_ring_dma);
1829 queue->tx_ring = NULL; 1825 queue->tx_ring = NULL;
1830 } 1826 }
1827 if (queue->rx_ring) {
1828 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
1829 dma_free_coherent(&bp->pdev->dev, size,
1830 queue->rx_ring, queue->rx_ring_dma);
1831 queue->rx_ring = NULL;
1832 }
1831 } 1833 }
1832} 1834}
1833 1835
@@ -1874,7 +1876,7 @@ static int macb_alloc_consistent(struct macb *bp)
1874 int size; 1876 int size;
1875 1877
1876 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 1878 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1877 size = TX_RING_BYTES(bp); 1879 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
1878 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1880 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1879 &queue->tx_ring_dma, 1881 &queue->tx_ring_dma,
1880 GFP_KERNEL); 1882 GFP_KERNEL);
@@ -1890,7 +1892,7 @@ static int macb_alloc_consistent(struct macb *bp)
1890 if (!queue->tx_skb) 1892 if (!queue->tx_skb)
1891 goto out_err; 1893 goto out_err;
1892 1894
1893 size = RX_RING_BYTES(bp); 1895 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
1894 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, 1896 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
1895 &queue->rx_ring_dma, GFP_KERNEL); 1897 &queue->rx_ring_dma, GFP_KERNEL);
1896 if (!queue->rx_ring) 1898 if (!queue->rx_ring)
@@ -3726,6 +3728,8 @@ static int at91ether_init(struct platform_device *pdev)
3726 int err; 3728 int err;
3727 u32 reg; 3729 u32 reg;
3728 3730
3731 bp->queues[0].bp = bp;
3732
3729 dev->netdev_ops = &at91ether_netdev_ops; 3733 dev->netdev_ops = &at91ether_netdev_ops;
3730 dev->ethtool_ops = &macb_ethtool_ops; 3734 dev->ethtool_ops = &macb_ethtool_ops;
3731 3735
@@ -3795,7 +3799,7 @@ static const struct macb_config np4_config = {
3795static const struct macb_config zynqmp_config = { 3799static const struct macb_config zynqmp_config = {
3796 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | 3800 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
3797 MACB_CAPS_JUMBO | 3801 MACB_CAPS_JUMBO |
3798 MACB_CAPS_GEM_HAS_PTP, 3802 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
3799 .dma_burst_length = 16, 3803 .dma_burst_length = 16,
3800 .clk_init = macb_clk_init, 3804 .clk_init = macb_clk_init,
3801 .init = macb_init, 3805 .init = macb_init,
@@ -3856,7 +3860,7 @@ static int macb_probe(struct platform_device *pdev)
3856 void __iomem *mem; 3860 void __iomem *mem;
3857 const char *mac; 3861 const char *mac;
3858 struct macb *bp; 3862 struct macb *bp;
3859 int err; 3863 int err, val;
3860 3864
3861 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3865 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3862 mem = devm_ioremap_resource(&pdev->dev, regs); 3866 mem = devm_ioremap_resource(&pdev->dev, regs);
@@ -3945,6 +3949,18 @@ static int macb_probe(struct platform_device *pdev)
3945 else 3949 else
3946 dev->max_mtu = ETH_DATA_LEN; 3950 dev->max_mtu = ETH_DATA_LEN;
3947 3951
3952 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
3953 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
3954 if (val)
3955 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
3956 macb_dma_desc_get_size(bp);
3957
3958 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
3959 if (val)
3960 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
3961 macb_dma_desc_get_size(bp);
3962 }
3963
3948 mac = of_get_mac_address(np); 3964 mac = of_get_mac_address(np);
3949 if (mac) { 3965 if (mac) {
3950 ether_addr_copy(bp->dev->dev_addr, mac); 3966 ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 043e3c11c42b..92d88c5f76fb 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -15,7 +15,7 @@ if NET_VENDOR_CAVIUM
15 15
16config THUNDER_NIC_PF 16config THUNDER_NIC_PF
17 tristate "Thunder Physical function driver" 17 tristate "Thunder Physical function driver"
18 depends on 64BIT 18 depends on 64BIT && PCI
19 select THUNDER_NIC_BGX 19 select THUNDER_NIC_BGX
20 ---help--- 20 ---help---
21 This driver supports Thunder's NIC physical function. 21 This driver supports Thunder's NIC physical function.
@@ -28,13 +28,13 @@ config THUNDER_NIC_PF
28config THUNDER_NIC_VF 28config THUNDER_NIC_VF
29 tristate "Thunder Virtual function driver" 29 tristate "Thunder Virtual function driver"
30 imply CAVIUM_PTP 30 imply CAVIUM_PTP
31 depends on 64BIT 31 depends on 64BIT && PCI
32 ---help--- 32 ---help---
33 This driver supports Thunder's NIC virtual function 33 This driver supports Thunder's NIC virtual function
34 34
35config THUNDER_NIC_BGX 35config THUNDER_NIC_BGX
36 tristate "Thunder MAC interface driver (BGX)" 36 tristate "Thunder MAC interface driver (BGX)"
37 depends on 64BIT 37 depends on 64BIT && PCI
38 select PHYLIB 38 select PHYLIB
39 select MDIO_THUNDER 39 select MDIO_THUNDER
40 select THUNDER_NIC_RGX 40 select THUNDER_NIC_RGX
@@ -44,7 +44,7 @@ config THUNDER_NIC_BGX
44 44
45config THUNDER_NIC_RGX 45config THUNDER_NIC_RGX
46 tristate "Thunder MAC interface driver (RGX)" 46 tristate "Thunder MAC interface driver (RGX)"
47 depends on 64BIT 47 depends on 64BIT && PCI
48 select PHYLIB 48 select PHYLIB
49 select MDIO_THUNDER 49 select MDIO_THUNDER
50 ---help--- 50 ---help---
@@ -53,7 +53,7 @@ config THUNDER_NIC_RGX
53 53
54config CAVIUM_PTP 54config CAVIUM_PTP
55 tristate "Cavium PTP coprocessor as PTP clock" 55 tristate "Cavium PTP coprocessor as PTP clock"
56 depends on 64BIT 56 depends on 64BIT && PCI
57 imply PTP_1588_CLOCK 57 imply PTP_1588_CLOCK
58 default y 58 default y
59 ---help--- 59 ---help---
@@ -65,7 +65,7 @@ config CAVIUM_PTP
65 65
66config LIQUIDIO 66config LIQUIDIO
67 tristate "Cavium LiquidIO support" 67 tristate "Cavium LiquidIO support"
68 depends on 64BIT 68 depends on 64BIT && PCI
69 depends on MAY_USE_DEVLINK 69 depends on MAY_USE_DEVLINK
70 imply PTP_1588_CLOCK 70 imply PTP_1588_CLOCK
71 select FW_LOADER 71 select FW_LOADER
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8a815bb57177..7e8454d3b1ad 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -91,6 +91,9 @@ static int octeon_console_debug_enabled(u32 console)
91 */ 91 */
92#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000 92#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
93 93
94/* time to wait for possible in-flight requests in milliseconds */
95#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
96
94struct lio_trusted_vf_ctx { 97struct lio_trusted_vf_ctx {
95 struct completion complete; 98 struct completion complete;
96 int status; 99 int status;
@@ -259,7 +262,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
259 force_io_queues_off(oct); 262 force_io_queues_off(oct);
260 263
261 /* To allow for in-flight requests */ 264 /* To allow for in-flight requests */
262 schedule_timeout_uninterruptible(100); 265 schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
263 266
264 if (wait_for_pending_requests(oct)) 267 if (wait_for_pending_requests(oct))
265 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); 268 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 3f6afb54a5eb..bb43ddb7539e 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
643static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) 643static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
644{ 644{
645 struct octeon_mgmt *p = netdev_priv(netdev); 645 struct octeon_mgmt *p = netdev_priv(netdev);
646 int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; 646 int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
647 647
648 netdev->mtu = new_mtu; 648 netdev->mtu = new_mtu;
649 649
650 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); 650 /* HW lifts the limit if the frame is VLAN tagged
651 * (+4 bytes per each tag, up to two tags)
652 */
653 cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
654 /* Set the hardware to truncate packets larger than the MTU. The jabber
655 * register must be set to a multiple of 8 bytes, so round up. JABBER is
656 * an unconditional limit, so we need to account for two possible VLAN
657 * tags.
658 */
651 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, 659 cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
652 (size_without_fcs + 7) & 0xfff8); 660 (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
653 661
654 return 0; 662 return 0;
655} 663}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 7b795edd9d3a..a19172dbe6be 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -51,6 +51,7 @@
51#include <linux/sched.h> 51#include <linux/sched.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/uaccess.h> 53#include <linux/uaccess.h>
54#include <linux/nospec.h>
54 55
55#include "common.h" 56#include "common.h"
56#include "cxgb3_ioctl.h" 57#include "cxgb3_ioctl.h"
@@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
2268 2269
2269 if (t.qset_idx >= nqsets) 2270 if (t.qset_idx >= nqsets)
2270 return -EINVAL; 2271 return -EINVAL;
2272 t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
2271 2273
2272 q = &adapter->params.sge.qset[q1 + t.qset_idx]; 2274 q = &adapter->params.sge.qset[q1 + t.qset_idx];
2273 t.rspq_size = q->rspq_size; 2275 t.rspq_size = q->rspq_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 974a868a4824..3720c3e11ebb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -8702,7 +8702,7 @@ static int t4_get_flash_params(struct adapter *adap)
8702 }; 8702 };
8703 8703
8704 unsigned int part, manufacturer; 8704 unsigned int part, manufacturer;
8705 unsigned int density, size; 8705 unsigned int density, size = 0;
8706 u32 flashid = 0; 8706 u32 flashid = 0;
8707 int ret; 8707 int ret;
8708 8708
@@ -8772,11 +8772,6 @@ static int t4_get_flash_params(struct adapter *adap)
8772 case 0x22: /* 256MB */ 8772 case 0x22: /* 256MB */
8773 size = 1 << 28; 8773 size = 1 << 28;
8774 break; 8774 break;
8775
8776 default:
8777 dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
8778 flashid, density);
8779 return -EINVAL;
8780 } 8775 }
8781 break; 8776 break;
8782 } 8777 }
@@ -8792,10 +8787,6 @@ static int t4_get_flash_params(struct adapter *adap)
8792 case 0x17: /* 64MB */ 8787 case 0x17: /* 64MB */
8793 size = 1 << 26; 8788 size = 1 << 26;
8794 break; 8789 break;
8795 default:
8796 dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n",
8797 flashid, density);
8798 return -EINVAL;
8799 } 8790 }
8800 break; 8791 break;
8801 } 8792 }
@@ -8811,10 +8802,6 @@ static int t4_get_flash_params(struct adapter *adap)
8811 case 0x18: /* 16MB */ 8802 case 0x18: /* 16MB */
8812 size = 1 << 24; 8803 size = 1 << 24;
8813 break; 8804 break;
8814 default:
8815 dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
8816 flashid, density);
8817 return -EINVAL;
8818 } 8805 }
8819 break; 8806 break;
8820 } 8807 }
@@ -8830,17 +8817,21 @@ static int t4_get_flash_params(struct adapter *adap)
8830 case 0x18: /* 16MB */ 8817 case 0x18: /* 16MB */
8831 size = 1 << 24; 8818 size = 1 << 24;
8832 break; 8819 break;
8833 default:
8834 dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
8835 flashid, density);
8836 return -EINVAL;
8837 } 8820 }
8838 break; 8821 break;
8839 } 8822 }
8840 default: 8823 }
8841 dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n", 8824
8842 flashid); 8825 /* If we didn't recognize the FLASH part, that's no real issue: the
8843 return -EINVAL; 8826 * Hardware/Software contract says that Hardware will _*ALWAYS*_
8827 * use a FLASH part which is at least 4MB in size and has 64KB
8828 * sectors. The unrecognized FLASH part is likely to be much larger
8829 * than 4MB, but that's all we really need.
8830 */
8831 if (size == 0) {
8832 dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
8833 flashid);
8834 size = 1 << 22;
8844 } 8835 }
8845 8836
8846 /* Store decoded Flash size and fall through into vetting code. */ 8837 /* Store decoded Flash size and fall through into vetting code. */
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 5ab912937aff..ec0b545197e2 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
19config CS89x0 19config CS89x0
20 tristate "CS89x0 support" 20 tristate "CS89x0 support"
21 depends on ISA || EISA || ARM 21 depends on ISA || EISA || ARM
22 depends on !PPC32
22 ---help--- 23 ---help---
23 Support for CS89x0 chipset based Ethernet cards. If you have a 24 Support for CS89x0 chipset based Ethernet cards. If you have a
24 network (Ethernet) card of this type, say Y and read the file 25 network (Ethernet) card of this type, say Y and read the file
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 5f4e1ffa7b95..ab02057ac730 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
125/* Default alignment for start of data in an Rx FD */ 125/* Default alignment for start of data in an Rx FD */
126#define DPAA_FD_DATA_ALIGNMENT 16 126#define DPAA_FD_DATA_ALIGNMENT 16
127 127
128/* The DPAA requires 256 bytes reserved and mapped for the SGT */
129#define DPAA_SGT_SIZE 256
130
128/* Values for the L3R field of the FM Parse Results 131/* Values for the L3R field of the FM Parse Results
129 */ 132 */
130/* L3 Type field: First IP Present IPv4 */ 133/* L3 Type field: First IP Present IPv4 */
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
1617 1620
1618 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { 1621 if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
1619 nr_frags = skb_shinfo(skb)->nr_frags; 1622 nr_frags = skb_shinfo(skb)->nr_frags;
1620 dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + 1623 dma_unmap_single(dev, addr,
1621 sizeof(struct qm_sg_entry) * (1 + nr_frags), 1624 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
1622 dma_dir); 1625 dma_dir);
1623 1626
1624 /* The sgt buffer has been allocated with netdev_alloc_frag(), 1627 /* The sgt buffer has been allocated with netdev_alloc_frag(),
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
1903 void *sgt_buf; 1906 void *sgt_buf;
1904 1907
1905 /* get a page frag to store the SGTable */ 1908 /* get a page frag to store the SGTable */
1906 sz = SKB_DATA_ALIGN(priv->tx_headroom + 1909 sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
1907 sizeof(struct qm_sg_entry) * (1 + nr_frags));
1908 sgt_buf = netdev_alloc_frag(sz); 1910 sgt_buf = netdev_alloc_frag(sz);
1909 if (unlikely(!sgt_buf)) { 1911 if (unlikely(!sgt_buf)) {
1910 netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", 1912 netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
1972 skbh = (struct sk_buff **)buffer_start; 1974 skbh = (struct sk_buff **)buffer_start;
1973 *skbh = skb; 1975 *skbh = skb;
1974 1976
1975 addr = dma_map_single(dev, buffer_start, priv->tx_headroom + 1977 addr = dma_map_single(dev, buffer_start,
1976 sizeof(struct qm_sg_entry) * (1 + nr_frags), 1978 priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
1977 dma_dir);
1978 if (unlikely(dma_mapping_error(dev, addr))) { 1979 if (unlikely(dma_mapping_error(dev, addr))) {
1979 dev_err(dev, "DMA mapping failed"); 1980 dev_err(dev, "DMA mapping failed");
1980 err = -EINVAL; 1981 err = -EINVAL;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ce6e24c74978..ecbf6187e13a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
324#define HWP_HXS_PHE_REPORT 0x00000800 324#define HWP_HXS_PHE_REPORT 0x00000800
325#define HWP_HXS_PCAC_PSTAT 0x00000100 325#define HWP_HXS_PCAC_PSTAT 0x00000100
326#define HWP_HXS_PCAC_PSTOP 0x00000001 326#define HWP_HXS_PCAC_PSTOP 0x00000001
327#define HWP_HXS_TCP_OFFSET 0xA
328#define HWP_HXS_UDP_OFFSET 0xB
329#define HWP_HXS_SH_PAD_REM 0x80000000
330
327struct fman_port_hwp_regs { 331struct fman_port_hwp_regs {
328 struct { 332 struct {
329 u32 ssa; /* Soft Sequence Attachment */ 333 u32 ssa; /* Soft Sequence Attachment */
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
728 iowrite32be(0xffffffff, &regs->pmda[i].lcv); 732 iowrite32be(0xffffffff, &regs->pmda[i].lcv);
729 } 733 }
730 734
735 /* Short packet padding removal from checksum calculation */
736 iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
737 iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
738
731 start_port_hwp(port); 739 start_port_hwp(port);
732} 740}
733 741
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index e2e5cdc7119c..4c0f7eda1166 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
439{ 439{
440 struct hinic_rq *rq = rxq->rq; 440 struct hinic_rq *rq = rxq->rq;
441 441
442 irq_set_affinity_hint(rq->irq, NULL);
442 free_irq(rq->irq, rxq); 443 free_irq(rq->irq, rxq);
443 rx_del_napi(rxq); 444 rx_del_napi(rxq);
444} 445}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 9128858479c4..2353ec829c04 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
229 txq->txq_stats.tx_busy++; 229 txq->txq_stats.tx_busy++;
230 u64_stats_update_end(&txq->txq_stats.syncp); 230 u64_stats_update_end(&txq->txq_stats.syncp);
231 err = NETDEV_TX_BUSY; 231 err = NETDEV_TX_BUSY;
232 wqe_size = 0;
232 goto flush_skbs; 233 goto flush_skbs;
233 } 234 }
234 235
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index d0e196bff081..ffe7acbeaa22 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -329,7 +329,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
329 return; 329 return;
330 330
331failure: 331failure:
332 dev_info(dev, "replenish pools failure\n"); 332 if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
333 dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
333 pool->free_map[pool->next_free] = index; 334 pool->free_map[pool->next_free] = index;
334 pool->rx_buff[index].skb = NULL; 335 pool->rx_buff[index].skb = NULL;
335 336
@@ -1617,7 +1618,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
1617 &tx_crq); 1618 &tx_crq);
1618 } 1619 }
1619 if (lpar_rc != H_SUCCESS) { 1620 if (lpar_rc != H_SUCCESS) {
1620 dev_err(dev, "tx failed with code %ld\n", lpar_rc); 1621 if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
1622 dev_err_ratelimited(dev, "tx: send failed\n");
1621 dev_kfree_skb_any(skb); 1623 dev_kfree_skb_any(skb);
1622 tx_buff->skb = NULL; 1624 tx_buff->skb = NULL;
1623 1625
@@ -1825,8 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1825 1827
1826 rc = ibmvnic_login(netdev); 1828 rc = ibmvnic_login(netdev);
1827 if (rc) { 1829 if (rc) {
1828 adapter->state = VNIC_PROBED; 1830 adapter->state = reset_state;
1829 return 0; 1831 return rc;
1830 } 1832 }
1831 1833
1832 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM || 1834 if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
@@ -3204,6 +3206,25 @@ static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
3204 return crq; 3206 return crq;
3205} 3207}
3206 3208
3209static void print_subcrq_error(struct device *dev, int rc, const char *func)
3210{
3211 switch (rc) {
3212 case H_PARAMETER:
3213 dev_warn_ratelimited(dev,
3214 "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
3215 func, rc);
3216 break;
3217 case H_CLOSED:
3218 dev_warn_ratelimited(dev,
3219 "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
3220 func, rc);
3221 break;
3222 default:
3223 dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
3224 break;
3225 }
3226}
3227
3207static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle, 3228static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3208 union sub_crq *sub_crq) 3229 union sub_crq *sub_crq)
3209{ 3230{
@@ -3230,11 +3251,8 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
3230 cpu_to_be64(u64_crq[2]), 3251 cpu_to_be64(u64_crq[2]),
3231 cpu_to_be64(u64_crq[3])); 3252 cpu_to_be64(u64_crq[3]));
3232 3253
3233 if (rc) { 3254 if (rc)
3234 if (rc == H_CLOSED) 3255 print_subcrq_error(dev, rc, __func__);
3235 dev_warn(dev, "CRQ Queue closed\n");
3236 dev_err(dev, "Send error (rc=%d)\n", rc);
3237 }
3238 3256
3239 return rc; 3257 return rc;
3240} 3258}
@@ -3252,11 +3270,8 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
3252 cpu_to_be64(remote_handle), 3270 cpu_to_be64(remote_handle),
3253 ioba, num_entries); 3271 ioba, num_entries);
3254 3272
3255 if (rc) { 3273 if (rc)
3256 if (rc == H_CLOSED) 3274 print_subcrq_error(dev, rc, __func__);
3257 dev_warn(dev, "CRQ Queue closed\n");
3258 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
3259 }
3260 3275
3261 return rc; 3276 return rc;
3262} 3277}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index ed6dbcfd4e96..b151ae316546 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
2199 return true; 2199 return true;
2200} 2200}
2201 2201
2202#define I40E_XDP_PASS 0 2202#define I40E_XDP_PASS 0
2203#define I40E_XDP_CONSUMED 1 2203#define I40E_XDP_CONSUMED BIT(0)
2204#define I40E_XDP_TX 2 2204#define I40E_XDP_TX BIT(1)
2205#define I40E_XDP_REDIR BIT(2)
2205 2206
2206static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, 2207static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
2207 struct i40e_ring *xdp_ring); 2208 struct i40e_ring *xdp_ring);
@@ -2248,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
2248 break; 2249 break;
2249 case XDP_REDIRECT: 2250 case XDP_REDIRECT:
2250 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); 2251 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
2251 result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED; 2252 result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
2252 break; 2253 break;
2253 default: 2254 default:
2254 bpf_warn_invalid_xdp_action(act); 2255 bpf_warn_invalid_xdp_action(act);
@@ -2311,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2311 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 2312 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
2312 struct sk_buff *skb = rx_ring->skb; 2313 struct sk_buff *skb = rx_ring->skb;
2313 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); 2314 u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
2314 bool failure = false, xdp_xmit = false; 2315 unsigned int xdp_xmit = 0;
2316 bool failure = false;
2315 struct xdp_buff xdp; 2317 struct xdp_buff xdp;
2316 2318
2317 xdp.rxq = &rx_ring->xdp_rxq; 2319 xdp.rxq = &rx_ring->xdp_rxq;
@@ -2372,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2372 } 2374 }
2373 2375
2374 if (IS_ERR(skb)) { 2376 if (IS_ERR(skb)) {
2375 if (PTR_ERR(skb) == -I40E_XDP_TX) { 2377 unsigned int xdp_res = -PTR_ERR(skb);
2376 xdp_xmit = true; 2378
2379 if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
2380 xdp_xmit |= xdp_res;
2377 i40e_rx_buffer_flip(rx_ring, rx_buffer, size); 2381 i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
2378 } else { 2382 } else {
2379 rx_buffer->pagecnt_bias++; 2383 rx_buffer->pagecnt_bias++;
@@ -2427,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
2427 total_rx_packets++; 2431 total_rx_packets++;
2428 } 2432 }
2429 2433
2430 if (xdp_xmit) { 2434 if (xdp_xmit & I40E_XDP_REDIR)
2435 xdp_do_flush_map();
2436
2437 if (xdp_xmit & I40E_XDP_TX) {
2431 struct i40e_ring *xdp_ring = 2438 struct i40e_ring *xdp_ring =
2432 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; 2439 rx_ring->vsi->xdp_rings[rx_ring->queue_index];
2433 2440
2434 i40e_xdp_ring_update_tail(xdp_ring); 2441 i40e_xdp_ring_update_tail(xdp_ring);
2435 xdp_do_flush_map();
2436 } 2442 }
2437 2443
2438 rx_ring->skb = skb; 2444 rx_ring->skb = skb;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3f5c350716bb..0bd1294ba517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1871,7 +1871,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
1871 if (enable_addr != 0) 1871 if (enable_addr != 0)
1872 rar_high |= IXGBE_RAH_AV; 1872 rar_high |= IXGBE_RAH_AV;
1873 1873
1874 /* Record lower 32 bits of MAC address and then make
1875 * sure that write is flushed to hardware before writing
1876 * the upper 16 bits and setting the valid bit.
1877 */
1874 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1878 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
1879 IXGBE_WRITE_FLUSH(hw);
1875 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1880 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1876 1881
1877 return 0; 1882 return 0;
@@ -1903,8 +1908,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
1903 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 1908 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
1904 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 1909 rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
1905 1910
1906 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 1911 /* Clear the address valid bit and upper 16 bits of the address
1912 * before clearing the lower bits. This way we aren't updating
1913 * a live filter.
1914 */
1907 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 1915 IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
1916 IXGBE_WRITE_FLUSH(hw);
1917 IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
1908 1918
1909 /* clear VMDq pool/queue selection for this RAR */ 1919 /* clear VMDq pool/queue selection for this RAR */
1910 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 1920 hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index c116f459945d..da4322e4daed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -839,7 +839,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
839 } 839 }
840 840
841 itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX; 841 itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
842 if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) { 842 if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
843 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n", 843 netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
844 __func__, itd->sa_idx, xs->xso.offload_handle); 844 __func__, itd->sa_idx, xs->xso.offload_handle);
845 return 0; 845 return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3e87dbbc9024..62e57b05a0ae 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
2186 return skb; 2186 return skb;
2187} 2187}
2188 2188
2189#define IXGBE_XDP_PASS 0 2189#define IXGBE_XDP_PASS 0
2190#define IXGBE_XDP_CONSUMED 1 2190#define IXGBE_XDP_CONSUMED BIT(0)
2191#define IXGBE_XDP_TX 2 2191#define IXGBE_XDP_TX BIT(1)
2192#define IXGBE_XDP_REDIR BIT(2)
2192 2193
2193static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter, 2194static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
2194 struct xdp_frame *xdpf); 2195 struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
2225 case XDP_REDIRECT: 2226 case XDP_REDIRECT:
2226 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); 2227 err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
2227 if (!err) 2228 if (!err)
2228 result = IXGBE_XDP_TX; 2229 result = IXGBE_XDP_REDIR;
2229 else 2230 else
2230 result = IXGBE_XDP_CONSUMED; 2231 result = IXGBE_XDP_CONSUMED;
2231 break; 2232 break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2285 unsigned int mss = 0; 2286 unsigned int mss = 0;
2286#endif /* IXGBE_FCOE */ 2287#endif /* IXGBE_FCOE */
2287 u16 cleaned_count = ixgbe_desc_unused(rx_ring); 2288 u16 cleaned_count = ixgbe_desc_unused(rx_ring);
2288 bool xdp_xmit = false; 2289 unsigned int xdp_xmit = 0;
2289 struct xdp_buff xdp; 2290 struct xdp_buff xdp;
2290 2291
2291 xdp.rxq = &rx_ring->xdp_rxq; 2292 xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2328 } 2329 }
2329 2330
2330 if (IS_ERR(skb)) { 2331 if (IS_ERR(skb)) {
2331 if (PTR_ERR(skb) == -IXGBE_XDP_TX) { 2332 unsigned int xdp_res = -PTR_ERR(skb);
2332 xdp_xmit = true; 2333
2334 if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
2335 xdp_xmit |= xdp_res;
2333 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size); 2336 ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
2334 } else { 2337 } else {
2335 rx_buffer->pagecnt_bias++; 2338 rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2401 total_rx_packets++; 2404 total_rx_packets++;
2402 } 2405 }
2403 2406
2404 if (xdp_xmit) { 2407 if (xdp_xmit & IXGBE_XDP_REDIR)
2408 xdp_do_flush_map();
2409
2410 if (xdp_xmit & IXGBE_XDP_TX) {
2405 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()]; 2411 struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
2406 2412
2407 /* Force memory writes to complete before letting h/w 2413 /* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
2409 */ 2415 */
2410 wmb(); 2416 wmb();
2411 writel(ring->next_to_use, ring->tail); 2417 writel(ring->next_to_use, ring->tail);
2412
2413 xdp_do_flush_map();
2414 } 2418 }
2415 2419
2416 u64_stats_update_begin(&rx_ring->syncp); 2420 u64_stats_update_begin(&rx_ring->syncp);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9f54ccbddea7..3360f7b9ee73 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
474{ 474{
475 const struct mlx4_en_frag_info *frag_info = priv->frag_info; 475 const struct mlx4_en_frag_info *frag_info = priv->frag_info;
476 unsigned int truesize = 0; 476 unsigned int truesize = 0;
477 bool release = true;
477 int nr, frag_size; 478 int nr, frag_size;
478 struct page *page; 479 struct page *page;
479 dma_addr_t dma; 480 dma_addr_t dma;
480 bool release;
481 481
482 /* Collect used fragments while replacing them in the HW descriptors */ 482 /* Collect used fragments while replacing them in the HW descriptors */
483 for (nr = 0;; frags++) { 483 for (nr = 0;; frags++) {
@@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
500 release = page_count(page) != 1 || 500 release = page_count(page) != 1 ||
501 page_is_pfmemalloc(page) || 501 page_is_pfmemalloc(page) ||
502 page_to_nid(page) != numa_mem_id(); 502 page_to_nid(page) != numa_mem_id();
503 } else { 503 } else if (!priv->rx_headroom) {
504 /* rx_headroom for non XDP setup is always 0.
505 * When XDP is set, the above condition will
506 * guarantee page is always released.
507 */
504 u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES); 508 u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
505 509
506 frags->page_offset += sz_align; 510 frags->page_offset += sz_align;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 7b1b5ac986d0..31bd56727022 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2958 u32 srqn = qp_get_srqn(qpc) & 0xffffff; 2958 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2959 int use_srq = (qp_get_srqn(qpc) >> 24) & 1; 2959 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2960 struct res_srq *srq; 2960 struct res_srq *srq;
2961 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; 2961 int local_qpn = vhcr->in_modifier & 0xffffff;
2962 2962
2963 err = adjust_qp_sched_queue(dev, slave, qpc, inbox); 2963 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2964 if (err) 2964 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 323ffe8bf7e4..456f30007ad6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
123 int i; 123 int i;
124 124
125 buf->size = size; 125 buf->size = size;
126 buf->npages = 1 << get_order(size); 126 buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
127 buf->page_shift = PAGE_SHIFT; 127 buf->page_shift = PAGE_SHIFT;
128 buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list), 128 buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
129 GFP_KERNEL); 129 GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 487388aed98f..384c1fa49081 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work)
807 unsigned long flags; 807 unsigned long flags;
808 bool poll_cmd = ent->polling; 808 bool poll_cmd = ent->polling;
809 int alloc_ret; 809 int alloc_ret;
810 int cmd_mode;
810 811
811 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 812 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
812 down(sem); 813 down(sem);
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work)
853 set_signature(ent, !cmd->checksum_disabled); 854 set_signature(ent, !cmd->checksum_disabled);
854 dump_command(dev, ent, 1); 855 dump_command(dev, ent, 1);
855 ent->ts1 = ktime_get_ns(); 856 ent->ts1 = ktime_get_ns();
857 cmd_mode = cmd->mode;
856 858
857 if (ent->callback) 859 if (ent->callback)
858 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout); 860 schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work)
877 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell); 879 iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
878 mmiowb(); 880 mmiowb();
879 /* if not in polling don't use ent after this point */ 881 /* if not in polling don't use ent after this point */
880 if (cmd->mode == CMD_MODE_POLLING || poll_cmd) { 882 if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
881 poll_timeout(ent); 883 poll_timeout(ent);
882 /* make sure we read the descriptor after ownership is SW */ 884 /* make sure we read the descriptor after ownership is SW */
883 rmb(); 885 rmb();
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
1276{ 1278{
1277 struct mlx5_core_dev *dev = filp->private_data; 1279 struct mlx5_core_dev *dev = filp->private_data;
1278 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1280 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
1279 char outlen_str[8]; 1281 char outlen_str[8] = {0};
1280 int outlen; 1282 int outlen;
1281 void *ptr; 1283 void *ptr;
1282 int err; 1284 int err;
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
1291 if (copy_from_user(outlen_str, buf, count)) 1293 if (copy_from_user(outlen_str, buf, count))
1292 return -EFAULT; 1294 return -EFAULT;
1293 1295
1294 outlen_str[7] = 0;
1295
1296 err = sscanf(outlen_str, "%d", &outlen); 1296 err = sscanf(outlen_str, "%d", &outlen);
1297 if (err < 0) 1297 if (err < 0)
1298 return err; 1298 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 75e4308ba786..d258bb679271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
381 HLIST_HEAD(del_list); 381 HLIST_HEAD(del_list);
382 spin_lock_bh(&priv->fs.arfs.arfs_lock); 382 spin_lock_bh(&priv->fs.arfs.arfs_lock);
383 mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { 383 mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
384 if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
385 break;
386 if (!work_pending(&arfs_rule->arfs_work) && 384 if (!work_pending(&arfs_rule->arfs_work) &&
387 rps_may_expire_flow(priv->netdev, 385 rps_may_expire_flow(priv->netdev,
388 arfs_rule->rxq, arfs_rule->flow_id, 386 arfs_rule->rxq, arfs_rule->flow_id,
389 arfs_rule->filter_id)) { 387 arfs_rule->filter_id)) {
390 hlist_del_init(&arfs_rule->hlist); 388 hlist_del_init(&arfs_rule->hlist);
391 hlist_add_head(&arfs_rule->hlist, &del_list); 389 hlist_add_head(&arfs_rule->hlist, &del_list);
390 if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
391 break;
392 } 392 }
393 } 393 }
394 spin_unlock_bh(&priv->fs.arfs.arfs_lock); 394 spin_unlock_bh(&priv->fs.arfs.arfs_lock);
@@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
711 skb->protocol != htons(ETH_P_IPV6)) 711 skb->protocol != htons(ETH_P_IPV6))
712 return -EPROTONOSUPPORT; 712 return -EPROTONOSUPPORT;
713 713
714 if (skb->encapsulation)
715 return -EPROTONOSUPPORT;
716
714 arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); 717 arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
715 if (!arfs_t) 718 if (!arfs_t)
716 return -EPROTONOSUPPORT; 719 return -EPROTONOSUPPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 0a52f31fef37..86bc9ac99586 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
275} 275}
276 276
277static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, 277static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
278 struct ieee_ets *ets) 278 struct ieee_ets *ets,
279 bool zero_sum_allowed)
279{ 280{
280 bool have_ets_tc = false; 281 bool have_ets_tc = false;
281 int bw_sum = 0; 282 int bw_sum = 0;
@@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
300 } 301 }
301 302
302 if (have_ets_tc && bw_sum != 100) { 303 if (have_ets_tc && bw_sum != 100) {
303 netdev_err(netdev, 304 if (bw_sum || (!bw_sum && !zero_sum_allowed))
304 "Failed to validate ETS: BW sum is illegal\n"); 305 netdev_err(netdev,
306 "Failed to validate ETS: BW sum is illegal\n");
305 return -EINVAL; 307 return -EINVAL;
306 } 308 }
307 return 0; 309 return 0;
@@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
316 if (!MLX5_CAP_GEN(priv->mdev, ets)) 318 if (!MLX5_CAP_GEN(priv->mdev, ets))
317 return -EOPNOTSUPP; 319 return -EOPNOTSUPP;
318 320
319 err = mlx5e_dbcnl_validate_ets(netdev, ets); 321 err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
320 if (err) 322 if (err)
321 return err; 323 return err;
322 324
@@ -642,12 +644,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
642 ets.prio_tc[i]); 644 ets.prio_tc[i]);
643 } 645 }
644 646
645 err = mlx5e_dbcnl_validate_ets(netdev, &ets); 647 err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
646 if (err) { 648 if (err)
647 netdev_err(netdev,
648 "%s, Failed to validate ETS: %d\n", __func__, err);
649 goto out; 649 goto out;
650 }
651 650
652 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets); 651 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
653 if (err) { 652 if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 56c1b6f5593e..dae4156a710d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2846 mlx5e_activate_channels(&priv->channels); 2846 mlx5e_activate_channels(&priv->channels);
2847 netif_tx_start_all_queues(priv->netdev); 2847 netif_tx_start_all_queues(priv->netdev);
2848 2848
2849 if (MLX5_VPORT_MANAGER(priv->mdev)) 2849 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2850 mlx5e_add_sqs_fwd_rules(priv); 2850 mlx5e_add_sqs_fwd_rules(priv);
2851 2851
2852 mlx5e_wait_channels_min_rx_wqes(&priv->channels); 2852 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2857{ 2857{
2858 mlx5e_redirect_rqts_to_drop(priv); 2858 mlx5e_redirect_rqts_to_drop(priv);
2859 2859
2860 if (MLX5_VPORT_MANAGER(priv->mdev)) 2860 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2861 mlx5e_remove_sqs_fwd_rules(priv); 2861 mlx5e_remove_sqs_fwd_rules(priv);
2862 2862
2863 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when 2863 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
4597 mlx5e_set_netdev_dev_addr(netdev); 4597 mlx5e_set_netdev_dev_addr(netdev);
4598 4598
4599#if IS_ENABLED(CONFIG_MLX5_ESWITCH) 4599#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4600 if (MLX5_VPORT_MANAGER(mdev)) 4600 if (MLX5_ESWITCH_MANAGER(mdev))
4601 netdev->switchdev_ops = &mlx5e_switchdev_ops; 4601 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4602#endif 4602#endif
4603 4603
@@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4753 4753
4754 mlx5e_enable_async_events(priv); 4754 mlx5e_enable_async_events(priv);
4755 4755
4756 if (MLX5_VPORT_MANAGER(priv->mdev)) 4756 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4757 mlx5e_register_vport_reps(priv); 4757 mlx5e_register_vport_reps(priv);
4758 4758
4759 if (netdev->reg_state != NETREG_REGISTERED) 4759 if (netdev->reg_state != NETREG_REGISTERED)
@@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4788 4788
4789 queue_work(priv->wq, &priv->set_rx_mode_work); 4789 queue_work(priv->wq, &priv->set_rx_mode_work);
4790 4790
4791 if (MLX5_VPORT_MANAGER(priv->mdev)) 4791 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4792 mlx5e_unregister_vport_reps(priv); 4792 mlx5e_unregister_vport_reps(priv);
4793 4793
4794 mlx5e_disable_async_events(priv); 4794 mlx5e_disable_async_events(priv);
@@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
4972 return NULL; 4972 return NULL;
4973 4973
4974#ifdef CONFIG_MLX5_ESWITCH 4974#ifdef CONFIG_MLX5_ESWITCH
4975 if (MLX5_VPORT_MANAGER(mdev)) { 4975 if (MLX5_ESWITCH_MANAGER(mdev)) {
4976 rpriv = mlx5e_alloc_nic_rep_priv(mdev); 4976 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
4977 if (!rpriv) { 4977 if (!rpriv) {
4978 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); 4978 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 57987f6546e8..2b8040a3cdbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
823 struct mlx5e_rep_priv *rpriv = priv->ppriv; 823 struct mlx5e_rep_priv *rpriv = priv->ppriv;
824 struct mlx5_eswitch_rep *rep; 824 struct mlx5_eswitch_rep *rep;
825 825
826 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) 826 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
827 return false; 827 return false;
828 828
829 rep = rpriv->rep; 829 rep = rpriv->rep;
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
837static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) 837static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
838{ 838{
839 struct mlx5e_rep_priv *rpriv = priv->ppriv; 839 struct mlx5e_rep_priv *rpriv = priv->ppriv;
840 struct mlx5_eswitch_rep *rep = rpriv->rep; 840 struct mlx5_eswitch_rep *rep;
841 841
842 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
843 return false;
844
845 rep = rpriv->rep;
842 if (rep && rep->vport != FDB_UPLINK_VPORT) 846 if (rep && rep->vport != FDB_UPLINK_VPORT)
843 return true; 847 return true;
844 848
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0edf4751a8ba..3a2c4e548226 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
1957 else 1957 else
1958 actions = flow->nic_attr->action; 1958 actions = flow->nic_attr->action;
1959 1959
1960 if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
1961 !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
1962 return false;
1963
1960 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 1964 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1961 return modify_header_match_supported(&parse_attr->spec, exts); 1965 return modify_header_match_supported(&parse_attr->spec, exts);
1962 1966
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f63dfbcd29fe..dd01ad4c0b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
1594} 1594}
1595 1595
1596/* Public E-Switch API */ 1596/* Public E-Switch API */
1597#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev)) 1597#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
1598
1598 1599
1599int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) 1600int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
1600{ 1601{
1601 int err; 1602 int err;
1602 int i, enabled_events; 1603 int i, enabled_events;
1603 1604
1604 if (!ESW_ALLOWED(esw)) 1605 if (!ESW_ALLOWED(esw) ||
1605 return 0;
1606
1607 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1608 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) { 1606 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1609 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n"); 1607 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1610 return -EOPNOTSUPP; 1608 return -EOPNOTSUPP;
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1806 u64 node_guid; 1804 u64 node_guid;
1807 int err = 0; 1805 int err = 0;
1808 1806
1809 if (!ESW_ALLOWED(esw)) 1807 if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
1810 return -EPERM; 1808 return -EPERM;
1811 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac)) 1809 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
1812 return -EINVAL; 1810 return -EINVAL;
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1883{ 1881{
1884 struct mlx5_vport *evport; 1882 struct mlx5_vport *evport;
1885 1883
1886 if (!ESW_ALLOWED(esw)) 1884 if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
1887 return -EPERM; 1885 return -EPERM;
1888 if (!LEGAL_VPORT(esw, vport)) 1886 if (!LEGAL_VPORT(esw, vport))
1889 return -EINVAL; 1887 return -EINVAL;
@@ -2218,6 +2216,6 @@ free_out:
2218 2216
2219u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) 2217u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
2220{ 2218{
2221 return esw->mode; 2219 return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
2222} 2220}
2223EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); 2221EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index cecd201f0b73..91f1209886ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
1079 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 1079 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1080 return -EOPNOTSUPP; 1080 return -EOPNOTSUPP;
1081 1081
1082 if (!MLX5_CAP_GEN(dev, vport_group_manager)) 1082 if(!MLX5_ESWITCH_MANAGER(dev))
1083 return -EOPNOTSUPP; 1083 return -EPERM;
1084 1084
1085 if (dev->priv.eswitch->mode == SRIOV_NONE) 1085 if (dev->priv.eswitch->mode == SRIOV_NONE)
1086 return -EOPNOTSUPP; 1086 return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 49a75d31185e..6ddb2565884d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/mlx5/driver.h> 34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/eswitch.h>
35 36
36#include "mlx5_core.h" 37#include "mlx5_core.h"
37#include "fs_core.h" 38#include "fs_core.h"
@@ -1886,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
1886 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { 1887 if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
1887 if (!fwd_next_prio_supported(ft)) 1888 if (!fwd_next_prio_supported(ft))
1888 return ERR_PTR(-EOPNOTSUPP); 1889 return ERR_PTR(-EOPNOTSUPP);
1889 if (dest) 1890 if (dest_num)
1890 return ERR_PTR(-EINVAL); 1891 return ERR_PTR(-EINVAL);
1891 mutex_lock(&root->chain_lock); 1892 mutex_lock(&root->chain_lock);
1892 next_ft = find_next_chained_ft(prio); 1893 next_ft = find_next_chained_ft(prio);
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
2652 goto err; 2653 goto err;
2653 } 2654 }
2654 2655
2655 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { 2656 if (MLX5_ESWITCH_MANAGER(dev)) {
2656 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) { 2657 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
2657 err = init_fdb_root_ns(steering); 2658 err = init_fdb_root_ns(steering);
2658 if (err) 2659 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index afd9f4fa22f4..41ad24f0de2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/mlx5/driver.h> 33#include <linux/mlx5/driver.h>
34#include <linux/mlx5/cmd.h> 34#include <linux/mlx5/cmd.h>
35#include <linux/mlx5/eswitch.h>
35#include <linux/module.h> 36#include <linux/module.h>
36#include "mlx5_core.h" 37#include "mlx5_core.h"
37#include "../../mlxfw/mlxfw.h" 38#include "../../mlxfw/mlxfw.h"
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
159 } 160 }
160 161
161 if (MLX5_CAP_GEN(dev, vport_group_manager) && 162 if (MLX5_CAP_GEN(dev, vport_group_manager) &&
162 MLX5_CAP_GEN(dev, eswitch_flow_table)) { 163 MLX5_ESWITCH_MANAGER(dev)) {
163 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE); 164 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
164 if (err) 165 if (err)
165 return err; 166 return err;
166 } 167 }
167 168
168 if (MLX5_CAP_GEN(dev, eswitch_flow_table)) { 169 if (MLX5_ESWITCH_MANAGER(dev)) {
169 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH); 170 err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
170 if (err) 171 if (err)
171 return err; 172 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 1e062e6b2587..3f767cde4c1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
488void mlx5_init_clock(struct mlx5_core_dev *mdev) 488void mlx5_init_clock(struct mlx5_core_dev *mdev)
489{ 489{
490 struct mlx5_clock *clock = &mdev->clock; 490 struct mlx5_clock *clock = &mdev->clock;
491 u64 overflow_cycles;
491 u64 ns; 492 u64 ns;
492 u64 frac = 0; 493 u64 frac = 0;
493 u32 dev_freq; 494 u32 dev_freq;
@@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
511 512
512 /* Calculate period in seconds to call the overflow watchdog - to make 513 /* Calculate period in seconds to call the overflow watchdog - to make
513 * sure counter is checked at least once every wrap around. 514 * sure counter is checked at least once every wrap around.
515 * The period is calculated as the minimum between max HW cycles count
516 * (The clock source mask) and max amount of cycles that can be
517 * multiplied by clock multiplier where the result doesn't exceed
518 * 64bits.
514 */ 519 */
515 ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask, 520 overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
521 overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
522
523 ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
516 frac, &frac); 524 frac, &frac);
517 do_div(ns, NSEC_PER_SEC / 2 / HZ); 525 do_div(ns, NSEC_PER_SEC / HZ);
518 clock->overflow_period = ns; 526 clock->overflow_period = ns;
519 527
520 mdev->clock_info_page = alloc_page(GFP_KERNEL); 528 mdev->clock_info_page = alloc_page(GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 7cb67122e8b5..98359559c77e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -33,6 +33,7 @@
33#include <linux/etherdevice.h> 33#include <linux/etherdevice.h>
34#include <linux/mlx5/driver.h> 34#include <linux/mlx5/driver.h>
35#include <linux/mlx5/mlx5_ifc.h> 35#include <linux/mlx5/mlx5_ifc.h>
36#include <linux/mlx5/eswitch.h>
36#include "mlx5_core.h" 37#include "mlx5_core.h"
37#include "lib/mpfs.h" 38#include "lib/mpfs.h"
38 39
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
98 int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table); 99 int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
99 struct mlx5_mpfs *mpfs; 100 struct mlx5_mpfs *mpfs;
100 101
101 if (!MLX5_VPORT_MANAGER(dev)) 102 if (!MLX5_ESWITCH_MANAGER(dev))
102 return 0; 103 return 0;
103 104
104 mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL); 105 mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
122{ 123{
123 struct mlx5_mpfs *mpfs = dev->priv.mpfs; 124 struct mlx5_mpfs *mpfs = dev->priv.mpfs;
124 125
125 if (!MLX5_VPORT_MANAGER(dev)) 126 if (!MLX5_ESWITCH_MANAGER(dev))
126 return; 127 return;
127 128
128 WARN_ON(!hlist_empty(mpfs->hash)); 129 WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
137 u32 index; 138 u32 index;
138 int err; 139 int err;
139 140
140 if (!MLX5_VPORT_MANAGER(dev)) 141 if (!MLX5_ESWITCH_MANAGER(dev))
141 return 0; 142 return 0;
142 143
143 mutex_lock(&mpfs->lock); 144 mutex_lock(&mpfs->lock);
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
179 int err = 0; 180 int err = 0;
180 u32 index; 181 u32 index;
181 182
182 if (!MLX5_VPORT_MANAGER(dev)) 183 if (!MLX5_ESWITCH_MANAGER(dev))
183 return 0; 184 return 0;
184 185
185 mutex_lock(&mpfs->lock); 186 mutex_lock(&mpfs->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index fa9d0760dd36..31a9cbd85689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
701static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in, 701static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
702 int inlen) 702 int inlen)
703{ 703{
704 u32 out[MLX5_ST_SZ_DW(qtct_reg)]; 704 u32 out[MLX5_ST_SZ_DW(qetc_reg)];
705 705
706 if (!MLX5_CAP_GEN(mdev, ets)) 706 if (!MLX5_CAP_GEN(mdev, ets))
707 return -EOPNOTSUPP; 707 return -EOPNOTSUPP;
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
713static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out, 713static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
714 int outlen) 714 int outlen)
715{ 715{
716 u32 in[MLX5_ST_SZ_DW(qtct_reg)]; 716 u32 in[MLX5_ST_SZ_DW(qetc_reg)];
717 717
718 if (!MLX5_CAP_GEN(mdev, ets)) 718 if (!MLX5_CAP_GEN(mdev, ets))
719 return -EOPNOTSUPP; 719 return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2a8b529ce6dd..a0674962f02c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
88 return -EBUSY; 88 return -EBUSY;
89 } 89 }
90 90
91 if (!MLX5_ESWITCH_MANAGER(dev))
92 goto enable_vfs_hca;
93
91 err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY); 94 err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
92 if (err) { 95 if (err) {
93 mlx5_core_warn(dev, 96 mlx5_core_warn(dev,
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
95 return err; 98 return err;
96 } 99 }
97 100
101enable_vfs_hca:
98 for (vf = 0; vf < num_vfs; vf++) { 102 for (vf = 0; vf < num_vfs; vf++) {
99 err = mlx5_core_enable_hca(dev, vf + 1); 103 err = mlx5_core_enable_hca(dev, vf + 1);
100 if (err) { 104 if (err) {
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
140 } 144 }
141 145
142out: 146out:
143 mlx5_eswitch_disable_sriov(dev->priv.eswitch); 147 if (MLX5_ESWITCH_MANAGER(dev))
148 mlx5_eswitch_disable_sriov(dev->priv.eswitch);
144 149
145 if (mlx5_wait_for_vf_pages(dev)) 150 if (mlx5_wait_for_vf_pages(dev))
146 mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); 151 mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 719cecb182c6..7eecd5b07bb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
549 return -EINVAL; 549 return -EINVAL;
550 if (!MLX5_CAP_GEN(mdev, vport_group_manager)) 550 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
551 return -EACCES; 551 return -EACCES;
552 if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
553 return -EOPNOTSUPP;
554 552
555 in = kvzalloc(inlen, GFP_KERNEL); 553 in = kvzalloc(inlen, GFP_KERNEL);
556 if (!in) 554 if (!in)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index b97bb72b4db4..86478a6b99c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -113,35 +113,45 @@ err_db_free:
113 return err; 113 return err;
114} 114}
115 115
116static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf, 116static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
117 struct mlx5_wq_qp *qp) 117 struct mlx5_wq_qp *qp)
118{ 118{
119 struct mlx5_frag_buf_ctrl *sq_fbc;
119 struct mlx5_frag_buf *rqb, *sqb; 120 struct mlx5_frag_buf *rqb, *sqb;
120 121
121 rqb = &qp->rq.fbc.frag_buf; 122 rqb = &qp->rq.fbc.frag_buf;
122 *rqb = *buf; 123 *rqb = *buf;
123 rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); 124 rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
124 rqb->npages = 1 << get_order(rqb->size); 125 rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
125 126
126 sqb = &qp->sq.fbc.frag_buf; 127 sq_fbc = &qp->sq.fbc;
127 *sqb = *buf; 128 sqb = &sq_fbc->frag_buf;
128 sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq); 129 *sqb = *buf;
129 sqb->npages = 1 << get_order(sqb->size); 130 sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
131 sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
130 sqb->frags += rqb->npages; /* first part is for the rq */ 132 sqb->frags += rqb->npages; /* first part is for the rq */
133 if (sq_fbc->strides_offset)
134 sqb->frags--;
131} 135}
132 136
133int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, 137int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
134 void *qpc, struct mlx5_wq_qp *wq, 138 void *qpc, struct mlx5_wq_qp *wq,
135 struct mlx5_wq_ctrl *wq_ctrl) 139 struct mlx5_wq_ctrl *wq_ctrl)
136{ 140{
141 u32 sq_strides_offset;
137 int err; 142 int err;
138 143
139 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4, 144 mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
140 MLX5_GET(qpc, qpc, log_rq_size), 145 MLX5_GET(qpc, qpc, log_rq_size),
141 &wq->rq.fbc); 146 &wq->rq.fbc);
142 mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB), 147
143 MLX5_GET(qpc, qpc, log_sq_size), 148 sq_strides_offset =
144 &wq->sq.fbc); 149 ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
150
151 mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
152 MLX5_GET(qpc, qpc, log_sq_size),
153 sq_strides_offset,
154 &wq->sq.fbc);
145 155
146 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node); 156 err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
147 if (err) { 157 if (err) {
@@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
156 goto err_db_free; 166 goto err_db_free;
157 } 167 }
158 168
159 mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq); 169 mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
160 170
161 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; 171 wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
162 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; 172 wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 6aaaf3d9ba31..77b2adb29341 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4756,6 +4756,12 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
4756 kfree(mlxsw_sp_rt6); 4756 kfree(mlxsw_sp_rt6);
4757} 4757}
4758 4758
4759static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
4760{
4761 /* RTF_CACHE routes are ignored */
4762 return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
4763}
4764
4759static struct fib6_info * 4765static struct fib6_info *
4760mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry) 4766mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4761{ 4767{
@@ -4765,11 +4771,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
4765 4771
4766static struct mlxsw_sp_fib6_entry * 4772static struct mlxsw_sp_fib6_entry *
4767mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node, 4773mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
4768 const struct fib6_info *nrt, bool append) 4774 const struct fib6_info *nrt, bool replace)
4769{ 4775{
4770 struct mlxsw_sp_fib6_entry *fib6_entry; 4776 struct mlxsw_sp_fib6_entry *fib6_entry;
4771 4777
4772 if (!append) 4778 if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
4773 return NULL; 4779 return NULL;
4774 4780
4775 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 4781 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
@@ -4784,7 +4790,8 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
4784 break; 4790 break;
4785 if (rt->fib6_metric < nrt->fib6_metric) 4791 if (rt->fib6_metric < nrt->fib6_metric)
4786 continue; 4792 continue;
4787 if (rt->fib6_metric == nrt->fib6_metric) 4793 if (rt->fib6_metric == nrt->fib6_metric &&
4794 mlxsw_sp_fib6_rt_can_mp(rt))
4788 return fib6_entry; 4795 return fib6_entry;
4789 if (rt->fib6_metric > nrt->fib6_metric) 4796 if (rt->fib6_metric > nrt->fib6_metric)
4790 break; 4797 break;
@@ -5163,7 +5170,7 @@ static struct mlxsw_sp_fib6_entry *
5163mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node, 5170mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5164 const struct fib6_info *nrt, bool replace) 5171 const struct fib6_info *nrt, bool replace)
5165{ 5172{
5166 struct mlxsw_sp_fib6_entry *fib6_entry; 5173 struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
5167 5174
5168 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) { 5175 list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
5169 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry); 5176 struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
@@ -5172,13 +5179,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
5172 continue; 5179 continue;
5173 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id) 5180 if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
5174 break; 5181 break;
5175 if (replace && rt->fib6_metric == nrt->fib6_metric) 5182 if (replace && rt->fib6_metric == nrt->fib6_metric) {
5176 return fib6_entry; 5183 if (mlxsw_sp_fib6_rt_can_mp(rt) ==
5184 mlxsw_sp_fib6_rt_can_mp(nrt))
5185 return fib6_entry;
5186 if (mlxsw_sp_fib6_rt_can_mp(nrt))
5187 fallback = fallback ?: fib6_entry;
5188 }
5177 if (rt->fib6_metric > nrt->fib6_metric) 5189 if (rt->fib6_metric > nrt->fib6_metric)
5178 return fib6_entry; 5190 return fallback ?: fib6_entry;
5179 } 5191 }
5180 5192
5181 return NULL; 5193 return fallback;
5182} 5194}
5183 5195
5184static int 5196static int
@@ -5304,8 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
5304} 5316}
5305 5317
5306static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp, 5318static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5307 struct fib6_info *rt, bool replace, 5319 struct fib6_info *rt, bool replace)
5308 bool append)
5309{ 5320{
5310 struct mlxsw_sp_fib6_entry *fib6_entry; 5321 struct mlxsw_sp_fib6_entry *fib6_entry;
5311 struct mlxsw_sp_fib_node *fib_node; 5322 struct mlxsw_sp_fib_node *fib_node;
@@ -5331,7 +5342,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5331 /* Before creating a new entry, try to append route to an existing 5342 /* Before creating a new entry, try to append route to an existing
5332 * multipath entry. 5343 * multipath entry.
5333 */ 5344 */
5334 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append); 5345 fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
5335 if (fib6_entry) { 5346 if (fib6_entry) {
5336 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt); 5347 err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
5337 if (err) 5348 if (err)
@@ -5339,14 +5350,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5339 return 0; 5350 return 0;
5340 } 5351 }
5341 5352
5342 /* We received an append event, yet did not find any route to
5343 * append to.
5344 */
5345 if (WARN_ON(append)) {
5346 err = -EINVAL;
5347 goto err_fib6_entry_append;
5348 }
5349
5350 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt); 5353 fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
5351 if (IS_ERR(fib6_entry)) { 5354 if (IS_ERR(fib6_entry)) {
5352 err = PTR_ERR(fib6_entry); 5355 err = PTR_ERR(fib6_entry);
@@ -5364,7 +5367,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
5364err_fib6_node_entry_link: 5367err_fib6_node_entry_link:
5365 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry); 5368 mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
5366err_fib6_entry_create: 5369err_fib6_entry_create:
5367err_fib6_entry_append:
5368err_fib6_entry_nexthop_add: 5370err_fib6_entry_nexthop_add:
5369 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node); 5371 mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
5370 return err; 5372 return err;
@@ -5715,7 +5717,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5715 struct mlxsw_sp_fib_event_work *fib_work = 5717 struct mlxsw_sp_fib_event_work *fib_work =
5716 container_of(work, struct mlxsw_sp_fib_event_work, work); 5718 container_of(work, struct mlxsw_sp_fib_event_work, work);
5717 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp; 5719 struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
5718 bool replace, append; 5720 bool replace;
5719 int err; 5721 int err;
5720 5722
5721 rtnl_lock(); 5723 rtnl_lock();
@@ -5726,10 +5728,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
5726 case FIB_EVENT_ENTRY_APPEND: /* fall through */ 5728 case FIB_EVENT_ENTRY_APPEND: /* fall through */
5727 case FIB_EVENT_ENTRY_ADD: 5729 case FIB_EVENT_ENTRY_ADD:
5728 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE; 5730 replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
5729 append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
5730 err = mlxsw_sp_router_fib6_add(mlxsw_sp, 5731 err = mlxsw_sp_router_fib6_add(mlxsw_sp,
5731 fib_work->fen6_info.rt, replace, 5732 fib_work->fen6_info.rt, replace);
5732 append);
5733 if (err) 5733 if (err)
5734 mlxsw_sp_router_fib_abort(mlxsw_sp); 5734 mlxsw_sp_router_fib_abort(mlxsw_sp);
5735 mlxsw_sp_rt6_release(fib_work->fen6_info.rt); 5735 mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index fcdfb8e7fdea..40216d56dddc 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
81 81
82 ret = nfp_net_bpf_offload(nn, prog, running, extack); 82 ret = nfp_net_bpf_offload(nn, prog, running, extack);
83 /* Stop offload if replace not possible */ 83 /* Stop offload if replace not possible */
84 if (ret && prog) 84 if (ret)
85 nfp_bpf_xdp_offload(app, nn, NULL, extack); 85 return ret;
86 86
87 nn->dp.bpf_offload_xdp = prog && !ret; 87 nn->dp.bpf_offload_xdp = !!prog;
88 return ret; 88 return ret;
89} 89}
90 90
@@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
202 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 202 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
203 return -EOPNOTSUPP; 203 return -EOPNOTSUPP;
204 204
205 if (tcf_block_shared(f->block))
206 return -EOPNOTSUPP;
207
205 switch (f->command) { 208 switch (f->command) {
206 case TC_BLOCK_BIND: 209 case TC_BLOCK_BIND:
207 return tcf_block_cb_register(f->block, 210 return tcf_block_cb_register(f->block,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 91935405f586..84f7a5dbea9d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
123 NFP_FLOWER_MASK_MPLS_Q; 123 NFP_FLOWER_MASK_MPLS_Q;
124 124
125 frame->mpls_lse = cpu_to_be32(t_mpls); 125 frame->mpls_lse = cpu_to_be32(t_mpls);
126 } else if (dissector_uses_key(flow->dissector,
127 FLOW_DISSECTOR_KEY_BASIC)) {
128 /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
129 * bit, which indicates an mpls ether type but without any
130 * mpls fields.
131 */
132 struct flow_dissector_key_basic *key_basic;
133
134 key_basic = skb_flow_dissector_target(flow->dissector,
135 FLOW_DISSECTOR_KEY_BASIC,
136 flow->key);
137 if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
138 key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
139 frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
126 } 140 }
127} 141}
128 142
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index c42e64f32333..525057bee0ed 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
264 case cpu_to_be16(ETH_P_ARP): 264 case cpu_to_be16(ETH_P_ARP):
265 return -EOPNOTSUPP; 265 return -EOPNOTSUPP;
266 266
267 case cpu_to_be16(ETH_P_MPLS_UC):
268 case cpu_to_be16(ETH_P_MPLS_MC):
269 if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
270 key_layer |= NFP_FLOWER_LAYER_MAC;
271 key_size += sizeof(struct nfp_flower_mac_mpls);
272 }
273 break;
274
267 /* Will be included in layer 2. */ 275 /* Will be included in layer 2. */
268 case cpu_to_be16(ETH_P_8021Q): 276 case cpu_to_be16(ETH_P_8021Q):
269 break; 277 break;
@@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
623 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) 631 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
624 return -EOPNOTSUPP; 632 return -EOPNOTSUPP;
625 633
634 if (tcf_block_shared(f->block))
635 return -EOPNOTSUPP;
636
626 switch (f->command) { 637 switch (f->command) {
627 case TC_BLOCK_BIND: 638 case TC_BLOCK_BIND:
628 return tcf_block_cb_register(f->block, 639 return tcf_block_cb_register(f->block,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 78afe75129ab..382bb93cb090 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
317 payload.dst_ipv4 = flow->daddr; 317 payload.dst_ipv4 = flow->daddr;
318 318
319 /* If entry has expired send dst IP with all other fields 0. */ 319 /* If entry has expired send dst IP with all other fields 0. */
320 if (!(neigh->nud_state & NUD_VALID)) { 320 if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
321 nfp_tun_del_route_from_cache(app, payload.dst_ipv4); 321 nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
322 /* Trigger ARP to verify invalid neighbour state. */ 322 /* Trigger ARP to verify invalid neighbour state. */
323 neigh_event_send(neigh, NULL); 323 neigh_event_send(neigh, NULL);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 46b76d5a726c..152283d7e59c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -240,7 +240,6 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
240 return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); 240 return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
241 241
242 pf->limit_vfs = ~0; 242 pf->limit_vfs = ~0;
243 pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */
244 /* Allow any setting for backwards compatibility if symbol not found */ 243 /* Allow any setting for backwards compatibility if symbol not found */
245 if (err == -ENOENT) 244 if (err == -ENOENT)
246 return 0; 245 return 0;
@@ -668,7 +667,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
668 667
669 err = nfp_net_pci_probe(pf); 668 err = nfp_net_pci_probe(pf);
670 if (err) 669 if (err)
671 goto err_sriov_unlimit; 670 goto err_fw_unload;
672 671
673 err = nfp_hwmon_register(pf); 672 err = nfp_hwmon_register(pf);
674 if (err) { 673 if (err) {
@@ -680,8 +679,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,
680 679
681err_net_remove: 680err_net_remove:
682 nfp_net_pci_remove(pf); 681 nfp_net_pci_remove(pf);
683err_sriov_unlimit:
684 pci_sriov_set_totalvfs(pf->pdev, 0);
685err_fw_unload: 682err_fw_unload:
686 kfree(pf->rtbl); 683 kfree(pf->rtbl);
687 nfp_mip_close(pf->mip); 684 nfp_mip_close(pf->mip);
@@ -715,7 +712,6 @@ static void nfp_pci_remove(struct pci_dev *pdev)
715 nfp_hwmon_unregister(pf); 712 nfp_hwmon_unregister(pf);
716 713
717 nfp_pcie_sriov_disable(pdev); 714 nfp_pcie_sriov_disable(pdev);
718 pci_sriov_set_totalvfs(pf->pdev, 0);
719 715
720 nfp_net_pci_remove(pf); 716 nfp_net_pci_remove(pf);
721 717
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index cd34097b79f1..37a6d7822a38 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
232 err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), 232 err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
233 nfp_resource_address(state->res), 233 nfp_resource_address(state->res),
234 fwinf, sizeof(*fwinf)); 234 fwinf, sizeof(*fwinf));
235 if (err < sizeof(*fwinf)) 235 if (err < (int)sizeof(*fwinf))
236 goto err_release; 236 goto err_release;
237 237
238 if (!nffw_res_flg_init_get(fwinf)) 238 if (!nffw_res_flg_init_get(fwinf))
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00db3401b898..1dfaccd151f0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -502,6 +502,7 @@ enum BAR_ID {
502struct qed_nvm_image_info { 502struct qed_nvm_image_info {
503 u32 num_images; 503 u32 num_images;
504 struct bist_nvm_image_att *image_att; 504 struct bist_nvm_image_att *image_att;
505 bool valid;
505}; 506};
506 507
507#define DRV_MODULE_VERSION \ 508#define DRV_MODULE_VERSION \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index f0b01385d5cb..e0680ce91328 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -709,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
709 p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE]; 709 p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
710 710
711 memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id, 711 memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
712 ARRAY_SIZE(p_local->local_chassis_id)); 712 sizeof(p_local->local_chassis_id));
713 memcpy(params->lldp_local.local_port_id, p_local->local_port_id, 713 memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
714 ARRAY_SIZE(p_local->local_port_id)); 714 sizeof(p_local->local_port_id));
715} 715}
716 716
717static void 717static void
@@ -723,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
723 p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE]; 723 p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
724 724
725 memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id, 725 memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
726 ARRAY_SIZE(p_remote->peer_chassis_id)); 726 sizeof(p_remote->peer_chassis_id));
727 memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id, 727 memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
728 ARRAY_SIZE(p_remote->peer_port_id)); 728 sizeof(p_remote->peer_port_id));
729} 729}
730 730
731static int 731static int
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index a14e48489029..4340c4c90bcb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -6723,7 +6723,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
6723 format_idx = header & MFW_TRACE_EVENTID_MASK; 6723 format_idx = header & MFW_TRACE_EVENTID_MASK;
6724 6724
6725 /* Skip message if its index doesn't exist in the meta data */ 6725 /* Skip message if its index doesn't exist in the meta data */
6726 if (format_idx > s_mcp_trace_meta.formats_num) { 6726 if (format_idx >= s_mcp_trace_meta.formats_num) {
6727 u8 format_size = 6727 u8 format_size =
6728 (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >> 6728 (u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
6729 MFW_TRACE_PRM_SIZE_SHIFT); 6729 MFW_TRACE_PRM_SIZE_SHIFT);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 329781cda77f..e5249b4741d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
1804 DP_INFO(p_hwfn, "Failed to update driver state\n"); 1804 DP_INFO(p_hwfn, "Failed to update driver state\n");
1805 1805
1806 rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt, 1806 rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
1807 QED_OV_ESWITCH_VEB); 1807 QED_OV_ESWITCH_NONE);
1808 if (rc) 1808 if (rc)
1809 DP_INFO(p_hwfn, "Failed to update eswitch mode\n"); 1809 DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
1810 } 1810 }
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 99973e10b179..5ede6408649d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
665 665
666 p_ramrod->common.update_approx_mcast_flg = 1; 666 p_ramrod->common.update_approx_mcast_flg = 1;
667 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 667 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
668 u32 *p_bins = (u32 *)p_params->bins; 668 u32 *p_bins = p_params->bins;
669 669
670 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]); 670 p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
671 } 671 }
@@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1476 enum spq_mode comp_mode, 1476 enum spq_mode comp_mode,
1477 struct qed_spq_comp_cb *p_comp_data) 1477 struct qed_spq_comp_cb *p_comp_data)
1478{ 1478{
1479 unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1480 struct vport_update_ramrod_data *p_ramrod = NULL; 1479 struct vport_update_ramrod_data *p_ramrod = NULL;
1480 u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1481 struct qed_spq_entry *p_ent = NULL; 1481 struct qed_spq_entry *p_ent = NULL;
1482 struct qed_sp_init_data init_data; 1482 struct qed_sp_init_data init_data;
1483 u8 abs_vport_id = 0; 1483 u8 abs_vport_id = 0;
@@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
1513 /* explicitly clear out the entire vector */ 1513 /* explicitly clear out the entire vector */
1514 memset(&p_ramrod->approx_mcast.bins, 0, 1514 memset(&p_ramrod->approx_mcast.bins, 0,
1515 sizeof(p_ramrod->approx_mcast.bins)); 1515 sizeof(p_ramrod->approx_mcast.bins));
1516 memset(bins, 0, sizeof(unsigned long) * 1516 memset(bins, 0, sizeof(bins));
1517 ETH_MULTICAST_MAC_BINS_IN_REGS);
1518 /* filter ADD op is explicit set op and it removes 1517 /* filter ADD op is explicit set op and it removes
1519 * any existing filters for the vport 1518 * any existing filters for the vport
1520 */ 1519 */
1521 if (p_filter_cmd->opcode == QED_FILTER_ADD) { 1520 if (p_filter_cmd->opcode == QED_FILTER_ADD) {
1522 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { 1521 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1523 u32 bit; 1522 u32 bit, nbits;
1524 1523
1525 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1524 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1526 __set_bit(bit, bins); 1525 nbits = sizeof(u32) * BITS_PER_BYTE;
1526 bins[bit / nbits] |= 1 << (bit % nbits);
1527 } 1527 }
1528 1528
1529 /* Convert to correct endianity */ 1529 /* Convert to correct endianity */
1530 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { 1530 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1531 struct vport_update_ramrod_mcast *p_ramrod_bins; 1531 struct vport_update_ramrod_mcast *p_ramrod_bins;
1532 u32 *p_bins = (u32 *)bins;
1533 1532
1534 p_ramrod_bins = &p_ramrod->approx_mcast; 1533 p_ramrod_bins = &p_ramrod->approx_mcast;
1535 p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]); 1534 p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
1536 } 1535 }
1537 } 1536 }
1538 1537
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 806a8da257e9..8d80f1095d17 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -215,7 +215,7 @@ struct qed_sp_vport_update_params {
215 u8 anti_spoofing_en; 215 u8 anti_spoofing_en;
216 u8 update_accept_any_vlan_flg; 216 u8 update_accept_any_vlan_flg;
217 u8 accept_any_vlan; 217 u8 accept_any_vlan;
218 unsigned long bins[8]; 218 u32 bins[8];
219 struct qed_rss_params *rss_params; 219 struct qed_rss_params *rss_params;
220 struct qed_filter_accept_flags accept_flags; 220 struct qed_filter_accept_flags accept_flags;
221 struct qed_sge_tpa_params *sge_tpa_params; 221 struct qed_sge_tpa_params *sge_tpa_params;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 5c10fd7210c3..758a9a5127fa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -371,7 +371,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
371 goto err2; 371 goto err2;
372 } 372 }
373 373
374 DP_INFO(cdev, "qed_probe completed successffuly\n"); 374 DP_INFO(cdev, "qed_probe completed successfully\n");
375 375
376 return cdev; 376 return cdev;
377 377
@@ -789,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
789 /* We want a minimum of one slowpath and one fastpath vector per hwfn */ 789 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
790 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2; 790 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
791 791
792 if (is_kdump_kernel()) {
793 DP_INFO(cdev,
794 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
795 cdev->int_params.in.min_msix_cnt);
796 cdev->int_params.in.num_vectors =
797 cdev->int_params.in.min_msix_cnt;
798 }
799
792 rc = qed_set_int_mode(cdev, false); 800 rc = qed_set_int_mode(cdev, false);
793 if (rc) { 801 if (rc) {
794 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n"); 802 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 4e0b443c9519..cdd645024a32 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -592,6 +592,9 @@ int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
592 *o_mcp_resp = mb_params.mcp_resp; 592 *o_mcp_resp = mb_params.mcp_resp;
593 *o_mcp_param = mb_params.mcp_param; 593 *o_mcp_param = mb_params.mcp_param;
594 594
595 /* nvm_info needs to be updated */
596 p_hwfn->nvm_info.valid = false;
597
595 return 0; 598 return 0;
596} 599}
597 600
@@ -1208,6 +1211,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
1208 break; 1211 break;
1209 default: 1212 default:
1210 p_link->speed = 0; 1213 p_link->speed = 0;
1214 p_link->link_up = 0;
1211 } 1215 }
1212 1216
1213 if (p_link->link_up && p_link->speed) 1217 if (p_link->link_up && p_link->speed)
@@ -1305,9 +1309,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
1305 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0; 1309 phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
1306 phy_cfg.adv_speed = params->speed.advertised_speeds; 1310 phy_cfg.adv_speed = params->speed.advertised_speeds;
1307 phy_cfg.loopback_mode = params->loopback_mode; 1311 phy_cfg.loopback_mode = params->loopback_mode;
1308 if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) { 1312
1309 if (params->eee.enable) 1313 /* There are MFWs that share this capability regardless of whether
1310 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED; 1314 * this is feasible or not. And given that at the very least adv_caps
1315 * would be set internally by qed, we want to make sure LFA would
1316 * still work.
1317 */
1318 if ((p_hwfn->mcp_info->capabilities &
1319 FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
1320 phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
1311 if (params->eee.tx_lpi_enable) 1321 if (params->eee.tx_lpi_enable)
1312 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI; 1322 phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
1313 if (params->eee.adv_caps & QED_EEE_1G_ADV) 1323 if (params->eee.adv_caps & QED_EEE_1G_ADV)
@@ -2555,11 +2565,14 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
2555 2565
2556int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn) 2566int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
2557{ 2567{
2558 struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info; 2568 struct qed_nvm_image_info nvm_info;
2559 struct qed_ptt *p_ptt; 2569 struct qed_ptt *p_ptt;
2560 int rc; 2570 int rc;
2561 u32 i; 2571 u32 i;
2562 2572
2573 if (p_hwfn->nvm_info.valid)
2574 return 0;
2575
2563 p_ptt = qed_ptt_acquire(p_hwfn); 2576 p_ptt = qed_ptt_acquire(p_hwfn);
2564 if (!p_ptt) { 2577 if (!p_ptt) {
2565 DP_ERR(p_hwfn, "failed to acquire ptt\n"); 2578 DP_ERR(p_hwfn, "failed to acquire ptt\n");
@@ -2567,29 +2580,29 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
2567 } 2580 }
2568 2581
2569 /* Acquire from MFW the amount of available images */ 2582 /* Acquire from MFW the amount of available images */
2570 nvm_info->num_images = 0; 2583 nvm_info.num_images = 0;
2571 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn, 2584 rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
2572 p_ptt, &nvm_info->num_images); 2585 p_ptt, &nvm_info.num_images);
2573 if (rc == -EOPNOTSUPP) { 2586 if (rc == -EOPNOTSUPP) {
2574 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n"); 2587 DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
2575 goto out; 2588 goto out;
2576 } else if (rc || !nvm_info->num_images) { 2589 } else if (rc || !nvm_info.num_images) {
2577 DP_ERR(p_hwfn, "Failed getting number of images\n"); 2590 DP_ERR(p_hwfn, "Failed getting number of images\n");
2578 goto err0; 2591 goto err0;
2579 } 2592 }
2580 2593
2581 nvm_info->image_att = kmalloc_array(nvm_info->num_images, 2594 nvm_info.image_att = kmalloc_array(nvm_info.num_images,
2582 sizeof(struct bist_nvm_image_att), 2595 sizeof(struct bist_nvm_image_att),
2583 GFP_KERNEL); 2596 GFP_KERNEL);
2584 if (!nvm_info->image_att) { 2597 if (!nvm_info.image_att) {
2585 rc = -ENOMEM; 2598 rc = -ENOMEM;
2586 goto err0; 2599 goto err0;
2587 } 2600 }
2588 2601
2589 /* Iterate over images and get their attributes */ 2602 /* Iterate over images and get their attributes */
2590 for (i = 0; i < nvm_info->num_images; i++) { 2603 for (i = 0; i < nvm_info.num_images; i++) {
2591 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt, 2604 rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
2592 &nvm_info->image_att[i], i); 2605 &nvm_info.image_att[i], i);
2593 if (rc) { 2606 if (rc) {
2594 DP_ERR(p_hwfn, 2607 DP_ERR(p_hwfn,
2595 "Failed getting image index %d attributes\n", i); 2608 "Failed getting image index %d attributes\n", i);
@@ -2597,14 +2610,22 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
2597 } 2610 }
2598 2611
2599 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i, 2612 DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
2600 nvm_info->image_att[i].len); 2613 nvm_info.image_att[i].len);
2601 } 2614 }
2602out: 2615out:
2616 /* Update hwfn's nvm_info */
2617 if (nvm_info.num_images) {
2618 p_hwfn->nvm_info.num_images = nvm_info.num_images;
2619 kfree(p_hwfn->nvm_info.image_att);
2620 p_hwfn->nvm_info.image_att = nvm_info.image_att;
2621 p_hwfn->nvm_info.valid = true;
2622 }
2623
2603 qed_ptt_release(p_hwfn, p_ptt); 2624 qed_ptt_release(p_hwfn, p_ptt);
2604 return 0; 2625 return 0;
2605 2626
2606err1: 2627err1:
2607 kfree(nvm_info->image_att); 2628 kfree(nvm_info.image_att);
2608err0: 2629err0:
2609 qed_ptt_release(p_hwfn, p_ptt); 2630 qed_ptt_release(p_hwfn, p_ptt);
2610 return rc; 2631 return rc;
@@ -2641,6 +2662,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
2641 return -EINVAL; 2662 return -EINVAL;
2642 } 2663 }
2643 2664
2665 qed_mcp_nvm_info_populate(p_hwfn);
2644 for (i = 0; i < p_hwfn->nvm_info.num_images; i++) 2666 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2645 if (type == p_hwfn->nvm_info.image_att[i].image_type) 2667 if (type == p_hwfn->nvm_info.image_att[i].image_type)
2646 break; 2668 break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index f01bf52bc381..26e918d7f2f9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
2831 2831
2832 p_data->update_approx_mcast_flg = 1; 2832 p_data->update_approx_mcast_flg = 1;
2833 memcpy(p_data->bins, p_mcast_tlv->bins, 2833 memcpy(p_data->bins, p_mcast_tlv->bins,
2834 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 2834 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
2835 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST; 2835 *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
2836} 2836}
2837 2837
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
4513static int qed_sriov_enable(struct qed_dev *cdev, int num) 4513static int qed_sriov_enable(struct qed_dev *cdev, int num)
4514{ 4514{
4515 struct qed_iov_vf_init_params params; 4515 struct qed_iov_vf_init_params params;
4516 struct qed_hwfn *hwfn;
4517 struct qed_ptt *ptt;
4516 int i, j, rc; 4518 int i, j, rc;
4517 4519
4518 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) { 4520 if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
4525 4527
4526 /* Initialize HW for VF access */ 4528 /* Initialize HW for VF access */
4527 for_each_hwfn(cdev, j) { 4529 for_each_hwfn(cdev, j) {
4528 struct qed_hwfn *hwfn = &cdev->hwfns[j]; 4530 hwfn = &cdev->hwfns[j];
4529 struct qed_ptt *ptt = qed_ptt_acquire(hwfn); 4531 ptt = qed_ptt_acquire(hwfn);
4530 4532
4531 /* Make sure not to use more than 16 queues per VF */ 4533 /* Make sure not to use more than 16 queues per VF */
4532 params.num_queues = min_t(int, 4534 params.num_queues = min_t(int,
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
4562 goto err; 4564 goto err;
4563 } 4565 }
4564 4566
4567 hwfn = QED_LEADING_HWFN(cdev);
4568 ptt = qed_ptt_acquire(hwfn);
4569 if (!ptt) {
4570 DP_ERR(hwfn, "Failed to acquire ptt\n");
4571 rc = -EBUSY;
4572 goto err;
4573 }
4574
4575 rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
4576 if (rc)
4577 DP_INFO(cdev, "Failed to update eswitch mode\n");
4578 qed_ptt_release(hwfn, ptt);
4579
4565 return num; 4580 return num;
4566 4581
4567err: 4582err:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 2d7fcd6a0777..be6ddde1a104 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
1126 resp_size += sizeof(struct pfvf_def_resp_tlv); 1126 resp_size += sizeof(struct pfvf_def_resp_tlv);
1127 1127
1128 memcpy(p_mcast_tlv->bins, p_params->bins, 1128 memcpy(p_mcast_tlv->bins, p_params->bins,
1129 sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS); 1129 sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1130 } 1130 }
1131 1131
1132 update_rx = p_params->accept_flags.update_rx_mode_config; 1132 update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
1272 u32 bit; 1272 u32 bit;
1273 1273
1274 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]); 1274 bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1275 __set_bit(bit, sp_params.bins); 1275 sp_params.bins[bit / 32] |= 1 << (bit % 32);
1276 } 1276 }
1277 } 1277 }
1278 1278
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 4f05d5eb3cf5..033409db86ae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
392 struct channel_tlv tl; 392 struct channel_tlv tl;
393 u8 padding[4]; 393 u8 padding[4];
394 394
395 u64 bins[8]; 395 /* There are only 256 approx bins, and in HSI they're divided into
396 * 32-bit values. As old VFs used to set-bit to the values on its side,
397 * the upper half of the array is never expected to contain any data.
398 */
399 u64 bins[4];
400 u64 obsolete_bins[4];
396}; 401};
397 402
398struct vfpf_vport_update_accept_param_tlv { 403struct vfpf_vport_update_accept_param_tlv {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 02adb513f475..013ff567283c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
337{ 337{
338 struct qede_ptp *ptp = edev->ptp; 338 struct qede_ptp *ptp = edev->ptp;
339 339
340 if (!ptp) 340 if (!ptp) {
341 return -EIO; 341 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
342 SOF_TIMESTAMPING_RX_SOFTWARE |
343 SOF_TIMESTAMPING_SOFTWARE;
344 info->phc_index = -1;
345
346 return 0;
347 }
342 348
343 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 349 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
344 SOF_TIMESTAMPING_RX_SOFTWARE | 350 SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 891f03a7a33d..8d7b9bb910f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
1128 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 1128 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
1129 1129
1130 ret = kstrtoul(buf, 16, &data); 1130 ret = kstrtoul(buf, 16, &data);
1131 if (ret)
1132 return ret;
1131 1133
1132 switch (data) { 1134 switch (data) {
1133 case QLC_83XX_FLASH_SECTOR_ERASE_CMD: 1135 case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5803cd6db406..206f0266463e 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -658,7 +658,7 @@ qcaspi_netdev_open(struct net_device *dev)
658 return ret; 658 return ret;
659 } 659 }
660 660
661 netif_start_queue(qca->net_dev); 661 /* SPI thread takes care of TX queue */
662 662
663 return 0; 663 return 0;
664} 664}
@@ -760,6 +760,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
760 qca->net_dev->stats.tx_errors++; 760 qca->net_dev->stats.tx_errors++;
761 /* Trigger tx queue flush and QCA7000 reset */ 761 /* Trigger tx queue flush and QCA7000 reset */
762 qca->sync = QCASPI_SYNC_UNKNOWN; 762 qca->sync = QCASPI_SYNC_UNKNOWN;
763
764 if (qca->spi_thread)
765 wake_up_process(qca->spi_thread);
763} 766}
764 767
765static int 768static int
@@ -878,22 +881,22 @@ qca_spi_probe(struct spi_device *spi)
878 881
879 if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) || 882 if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
880 (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) { 883 (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
881 dev_info(&spi->dev, "Invalid clkspeed: %d\n", 884 dev_err(&spi->dev, "Invalid clkspeed: %d\n",
882 qcaspi_clkspeed); 885 qcaspi_clkspeed);
883 return -EINVAL; 886 return -EINVAL;
884 } 887 }
885 888
886 if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) || 889 if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
887 (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) { 890 (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
888 dev_info(&spi->dev, "Invalid burst len: %d\n", 891 dev_err(&spi->dev, "Invalid burst len: %d\n",
889 qcaspi_burst_len); 892 qcaspi_burst_len);
890 return -EINVAL; 893 return -EINVAL;
891 } 894 }
892 895
893 if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) || 896 if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
894 (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) { 897 (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
895 dev_info(&spi->dev, "Invalid pluggable: %d\n", 898 dev_err(&spi->dev, "Invalid pluggable: %d\n",
896 qcaspi_pluggable); 899 qcaspi_pluggable);
897 return -EINVAL; 900 return -EINVAL;
898 } 901 }
899 902
@@ -955,8 +958,8 @@ qca_spi_probe(struct spi_device *spi)
955 } 958 }
956 959
957 if (register_netdev(qcaspi_devs)) { 960 if (register_netdev(qcaspi_devs)) {
958 dev_info(&spi->dev, "Unable to register net device %s\n", 961 dev_err(&spi->dev, "Unable to register net device %s\n",
959 qcaspi_devs->name); 962 qcaspi_devs->name);
960 free_netdev(qcaspi_devs); 963 free_netdev(qcaspi_devs);
961 return -EFAULT; 964 return -EFAULT;
962 } 965 }
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f4cae2be0fda..eaedc11ed686 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7734,8 +7734,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7734 return rc; 7734 return rc;
7735 } 7735 }
7736 7736
7737 /* override BIOS settings, use userspace tools to enable WOL */ 7737 tp->saved_wolopts = __rtl8169_get_wol(tp);
7738 __rtl8169_set_wol(tp, 0);
7739 7738
7740 if (rtl_tbi_enabled(tp)) { 7739 if (rtl_tbi_enabled(tp)) {
7741 tp->set_speed = rtl8169_set_speed_tbi; 7740 tp->set_speed = rtl8169_set_speed_tbi;
@@ -7789,6 +7788,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7789 NETIF_F_HW_VLAN_CTAG_RX; 7788 NETIF_F_HW_VLAN_CTAG_RX;
7790 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | 7789 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
7791 NETIF_F_HIGHDMA; 7790 NETIF_F_HIGHDMA;
7791 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7792 7792
7793 tp->cp_cmd |= RxChkSum | RxVlan; 7793 tp->cp_cmd |= RxChkSum | RxVlan;
7794 7794
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 68f122140966..0d811c02ff34 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -980,6 +980,13 @@ static void ravb_adjust_link(struct net_device *ndev)
980 struct ravb_private *priv = netdev_priv(ndev); 980 struct ravb_private *priv = netdev_priv(ndev);
981 struct phy_device *phydev = ndev->phydev; 981 struct phy_device *phydev = ndev->phydev;
982 bool new_state = false; 982 bool new_state = false;
983 unsigned long flags;
984
985 spin_lock_irqsave(&priv->lock, flags);
986
987 /* Disable TX and RX right over here, if E-MAC change is ignored */
988 if (priv->no_avb_link)
989 ravb_rcv_snd_disable(ndev);
983 990
984 if (phydev->link) { 991 if (phydev->link) {
985 if (phydev->duplex != priv->duplex) { 992 if (phydev->duplex != priv->duplex) {
@@ -997,18 +1004,21 @@ static void ravb_adjust_link(struct net_device *ndev)
997 ravb_modify(ndev, ECMR, ECMR_TXF, 0); 1004 ravb_modify(ndev, ECMR, ECMR_TXF, 0);
998 new_state = true; 1005 new_state = true;
999 priv->link = phydev->link; 1006 priv->link = phydev->link;
1000 if (priv->no_avb_link)
1001 ravb_rcv_snd_enable(ndev);
1002 } 1007 }
1003 } else if (priv->link) { 1008 } else if (priv->link) {
1004 new_state = true; 1009 new_state = true;
1005 priv->link = 0; 1010 priv->link = 0;
1006 priv->speed = 0; 1011 priv->speed = 0;
1007 priv->duplex = -1; 1012 priv->duplex = -1;
1008 if (priv->no_avb_link)
1009 ravb_rcv_snd_disable(ndev);
1010 } 1013 }
1011 1014
1015 /* Enable TX and RX right over here, if E-MAC change is ignored */
1016 if (priv->no_avb_link && phydev->link)
1017 ravb_rcv_snd_enable(ndev);
1018
1019 mmiowb();
1020 spin_unlock_irqrestore(&priv->lock, flags);
1021
1012 if (new_state && netif_msg_link(priv)) 1022 if (new_state && netif_msg_link(priv))
1013 phy_print_status(phydev); 1023 phy_print_status(phydev);
1014} 1024}
@@ -1096,75 +1106,6 @@ static int ravb_phy_start(struct net_device *ndev)
1096 return 0; 1106 return 0;
1097} 1107}
1098 1108
1099static int ravb_get_link_ksettings(struct net_device *ndev,
1100 struct ethtool_link_ksettings *cmd)
1101{
1102 struct ravb_private *priv = netdev_priv(ndev);
1103 unsigned long flags;
1104
1105 if (!ndev->phydev)
1106 return -ENODEV;
1107
1108 spin_lock_irqsave(&priv->lock, flags);
1109 phy_ethtool_ksettings_get(ndev->phydev, cmd);
1110 spin_unlock_irqrestore(&priv->lock, flags);
1111
1112 return 0;
1113}
1114
1115static int ravb_set_link_ksettings(struct net_device *ndev,
1116 const struct ethtool_link_ksettings *cmd)
1117{
1118 struct ravb_private *priv = netdev_priv(ndev);
1119 unsigned long flags;
1120 int error;
1121
1122 if (!ndev->phydev)
1123 return -ENODEV;
1124
1125 spin_lock_irqsave(&priv->lock, flags);
1126
1127 /* Disable TX and RX */
1128 ravb_rcv_snd_disable(ndev);
1129
1130 error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
1131 if (error)
1132 goto error_exit;
1133
1134 if (cmd->base.duplex == DUPLEX_FULL)
1135 priv->duplex = 1;
1136 else
1137 priv->duplex = 0;
1138
1139 ravb_set_duplex(ndev);
1140
1141error_exit:
1142 mdelay(1);
1143
1144 /* Enable TX and RX */
1145 ravb_rcv_snd_enable(ndev);
1146
1147 mmiowb();
1148 spin_unlock_irqrestore(&priv->lock, flags);
1149
1150 return error;
1151}
1152
1153static int ravb_nway_reset(struct net_device *ndev)
1154{
1155 struct ravb_private *priv = netdev_priv(ndev);
1156 int error = -ENODEV;
1157 unsigned long flags;
1158
1159 if (ndev->phydev) {
1160 spin_lock_irqsave(&priv->lock, flags);
1161 error = phy_start_aneg(ndev->phydev);
1162 spin_unlock_irqrestore(&priv->lock, flags);
1163 }
1164
1165 return error;
1166}
1167
1168static u32 ravb_get_msglevel(struct net_device *ndev) 1109static u32 ravb_get_msglevel(struct net_device *ndev)
1169{ 1110{
1170 struct ravb_private *priv = netdev_priv(ndev); 1111 struct ravb_private *priv = netdev_priv(ndev);
@@ -1377,7 +1318,7 @@ static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
1377} 1318}
1378 1319
1379static const struct ethtool_ops ravb_ethtool_ops = { 1320static const struct ethtool_ops ravb_ethtool_ops = {
1380 .nway_reset = ravb_nway_reset, 1321 .nway_reset = phy_ethtool_nway_reset,
1381 .get_msglevel = ravb_get_msglevel, 1322 .get_msglevel = ravb_get_msglevel,
1382 .set_msglevel = ravb_set_msglevel, 1323 .set_msglevel = ravb_set_msglevel,
1383 .get_link = ethtool_op_get_link, 1324 .get_link = ethtool_op_get_link,
@@ -1387,8 +1328,8 @@ static const struct ethtool_ops ravb_ethtool_ops = {
1387 .get_ringparam = ravb_get_ringparam, 1328 .get_ringparam = ravb_get_ringparam,
1388 .set_ringparam = ravb_set_ringparam, 1329 .set_ringparam = ravb_set_ringparam,
1389 .get_ts_info = ravb_get_ts_info, 1330 .get_ts_info = ravb_get_ts_info,
1390 .get_link_ksettings = ravb_get_link_ksettings, 1331 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1391 .set_link_ksettings = ravb_set_link_ksettings, 1332 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1392 .get_wol = ravb_get_wol, 1333 .get_wol = ravb_get_wol,
1393 .set_wol = ravb_set_wol, 1334 .set_wol = ravb_set_wol,
1394}; 1335};
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e9007b613f17..5614fd231bbe 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1927,8 +1927,15 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1927{ 1927{
1928 struct sh_eth_private *mdp = netdev_priv(ndev); 1928 struct sh_eth_private *mdp = netdev_priv(ndev);
1929 struct phy_device *phydev = ndev->phydev; 1929 struct phy_device *phydev = ndev->phydev;
1930 unsigned long flags;
1930 int new_state = 0; 1931 int new_state = 0;
1931 1932
1933 spin_lock_irqsave(&mdp->lock, flags);
1934
1935 /* Disable TX and RX right over here, if E-MAC change is ignored */
1936 if (mdp->cd->no_psr || mdp->no_ether_link)
1937 sh_eth_rcv_snd_disable(ndev);
1938
1932 if (phydev->link) { 1939 if (phydev->link) {
1933 if (phydev->duplex != mdp->duplex) { 1940 if (phydev->duplex != mdp->duplex) {
1934 new_state = 1; 1941 new_state = 1;
@@ -1947,18 +1954,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1947 sh_eth_modify(ndev, ECMR, ECMR_TXF, 0); 1954 sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
1948 new_state = 1; 1955 new_state = 1;
1949 mdp->link = phydev->link; 1956 mdp->link = phydev->link;
1950 if (mdp->cd->no_psr || mdp->no_ether_link)
1951 sh_eth_rcv_snd_enable(ndev);
1952 } 1957 }
1953 } else if (mdp->link) { 1958 } else if (mdp->link) {
1954 new_state = 1; 1959 new_state = 1;
1955 mdp->link = 0; 1960 mdp->link = 0;
1956 mdp->speed = 0; 1961 mdp->speed = 0;
1957 mdp->duplex = -1; 1962 mdp->duplex = -1;
1958 if (mdp->cd->no_psr || mdp->no_ether_link)
1959 sh_eth_rcv_snd_disable(ndev);
1960 } 1963 }
1961 1964
1965 /* Enable TX and RX right over here, if E-MAC change is ignored */
1966 if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
1967 sh_eth_rcv_snd_enable(ndev);
1968
1969 mmiowb();
1970 spin_unlock_irqrestore(&mdp->lock, flags);
1971
1962 if (new_state && netif_msg_link(mdp)) 1972 if (new_state && netif_msg_link(mdp))
1963 phy_print_status(phydev); 1973 phy_print_status(phydev);
1964} 1974}
@@ -2030,60 +2040,6 @@ static int sh_eth_phy_start(struct net_device *ndev)
2030 return 0; 2040 return 0;
2031} 2041}
2032 2042
2033static int sh_eth_get_link_ksettings(struct net_device *ndev,
2034 struct ethtool_link_ksettings *cmd)
2035{
2036 struct sh_eth_private *mdp = netdev_priv(ndev);
2037 unsigned long flags;
2038
2039 if (!ndev->phydev)
2040 return -ENODEV;
2041
2042 spin_lock_irqsave(&mdp->lock, flags);
2043 phy_ethtool_ksettings_get(ndev->phydev, cmd);
2044 spin_unlock_irqrestore(&mdp->lock, flags);
2045
2046 return 0;
2047}
2048
2049static int sh_eth_set_link_ksettings(struct net_device *ndev,
2050 const struct ethtool_link_ksettings *cmd)
2051{
2052 struct sh_eth_private *mdp = netdev_priv(ndev);
2053 unsigned long flags;
2054 int ret;
2055
2056 if (!ndev->phydev)
2057 return -ENODEV;
2058
2059 spin_lock_irqsave(&mdp->lock, flags);
2060
2061 /* disable tx and rx */
2062 sh_eth_rcv_snd_disable(ndev);
2063
2064 ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
2065 if (ret)
2066 goto error_exit;
2067
2068 if (cmd->base.duplex == DUPLEX_FULL)
2069 mdp->duplex = 1;
2070 else
2071 mdp->duplex = 0;
2072
2073 if (mdp->cd->set_duplex)
2074 mdp->cd->set_duplex(ndev);
2075
2076error_exit:
2077 mdelay(1);
2078
2079 /* enable tx and rx */
2080 sh_eth_rcv_snd_enable(ndev);
2081
2082 spin_unlock_irqrestore(&mdp->lock, flags);
2083
2084 return ret;
2085}
2086
2087/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the 2043/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
2088 * version must be bumped as well. Just adding registers up to that 2044 * version must be bumped as well. Just adding registers up to that
2089 * limit is fine, as long as the existing register indices don't 2045 * limit is fine, as long as the existing register indices don't
@@ -2263,22 +2219,6 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
2263 pm_runtime_put_sync(&mdp->pdev->dev); 2219 pm_runtime_put_sync(&mdp->pdev->dev);
2264} 2220}
2265 2221
2266static int sh_eth_nway_reset(struct net_device *ndev)
2267{
2268 struct sh_eth_private *mdp = netdev_priv(ndev);
2269 unsigned long flags;
2270 int ret;
2271
2272 if (!ndev->phydev)
2273 return -ENODEV;
2274
2275 spin_lock_irqsave(&mdp->lock, flags);
2276 ret = phy_start_aneg(ndev->phydev);
2277 spin_unlock_irqrestore(&mdp->lock, flags);
2278
2279 return ret;
2280}
2281
2282static u32 sh_eth_get_msglevel(struct net_device *ndev) 2222static u32 sh_eth_get_msglevel(struct net_device *ndev)
2283{ 2223{
2284 struct sh_eth_private *mdp = netdev_priv(ndev); 2224 struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2429,7 +2369,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2429static const struct ethtool_ops sh_eth_ethtool_ops = { 2369static const struct ethtool_ops sh_eth_ethtool_ops = {
2430 .get_regs_len = sh_eth_get_regs_len, 2370 .get_regs_len = sh_eth_get_regs_len,
2431 .get_regs = sh_eth_get_regs, 2371 .get_regs = sh_eth_get_regs,
2432 .nway_reset = sh_eth_nway_reset, 2372 .nway_reset = phy_ethtool_nway_reset,
2433 .get_msglevel = sh_eth_get_msglevel, 2373 .get_msglevel = sh_eth_get_msglevel,
2434 .set_msglevel = sh_eth_set_msglevel, 2374 .set_msglevel = sh_eth_set_msglevel,
2435 .get_link = ethtool_op_get_link, 2375 .get_link = ethtool_op_get_link,
@@ -2438,8 +2378,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
2438 .get_sset_count = sh_eth_get_sset_count, 2378 .get_sset_count = sh_eth_get_sset_count,
2439 .get_ringparam = sh_eth_get_ringparam, 2379 .get_ringparam = sh_eth_get_ringparam,
2440 .set_ringparam = sh_eth_set_ringparam, 2380 .set_ringparam = sh_eth_set_ringparam,
2441 .get_link_ksettings = sh_eth_get_link_ksettings, 2381 .get_link_ksettings = phy_ethtool_get_link_ksettings,
2442 .set_link_ksettings = sh_eth_set_link_ksettings, 2382 .set_link_ksettings = phy_ethtool_set_link_ksettings,
2443 .get_wol = sh_eth_get_wol, 2383 .get_wol = sh_eth_get_wol,
2444 .set_wol = sh_eth_set_wol, 2384 .set_wol = sh_eth_set_wol,
2445}; 2385};
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 23f0785c0573..7eeac3d6cfe8 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4288,9 +4288,9 @@ static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
4288 return -EPROTONOSUPPORT; 4288 return -EPROTONOSUPPORT;
4289} 4289}
4290 4290
4291static s32 efx_ef10_filter_insert(struct efx_nic *efx, 4291static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
4292 struct efx_filter_spec *spec, 4292 struct efx_filter_spec *spec,
4293 bool replace_equal) 4293 bool replace_equal)
4294{ 4294{
4295 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); 4295 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
4296 struct efx_ef10_nic_data *nic_data = efx->nic_data; 4296 struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -4307,7 +4307,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4307 bool is_mc_recip; 4307 bool is_mc_recip;
4308 s32 rc; 4308 s32 rc;
4309 4309
4310 down_read(&efx->filter_sem); 4310 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
4311 table = efx->filter_state; 4311 table = efx->filter_state;
4312 down_write(&table->lock); 4312 down_write(&table->lock);
4313 4313
@@ -4498,10 +4498,22 @@ out_unlock:
4498 if (rss_locked) 4498 if (rss_locked)
4499 mutex_unlock(&efx->rss_lock); 4499 mutex_unlock(&efx->rss_lock);
4500 up_write(&table->lock); 4500 up_write(&table->lock);
4501 up_read(&efx->filter_sem);
4502 return rc; 4501 return rc;
4503} 4502}
4504 4503
4504static s32 efx_ef10_filter_insert(struct efx_nic *efx,
4505 struct efx_filter_spec *spec,
4506 bool replace_equal)
4507{
4508 s32 ret;
4509
4510 down_read(&efx->filter_sem);
4511 ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
4512 up_read(&efx->filter_sem);
4513
4514 return ret;
4515}
4516
4505static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx) 4517static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
4506{ 4518{
4507 /* no need to do anything here on EF10 */ 4519 /* no need to do anything here on EF10 */
@@ -5285,7 +5297,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5285 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID); 5297 EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
5286 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5298 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5287 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr); 5299 efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
5288 rc = efx_ef10_filter_insert(efx, &spec, true); 5300 rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5289 if (rc < 0) { 5301 if (rc < 0) {
5290 if (rollback) { 5302 if (rollback) {
5291 netif_info(efx, drv, efx->net_dev, 5303 netif_info(efx, drv, efx->net_dev,
@@ -5314,7 +5326,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
5314 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); 5326 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
5315 eth_broadcast_addr(baddr); 5327 eth_broadcast_addr(baddr);
5316 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 5328 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5317 rc = efx_ef10_filter_insert(efx, &spec, true); 5329 rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5318 if (rc < 0) { 5330 if (rc < 0) {
5319 netif_warn(efx, drv, efx->net_dev, 5331 netif_warn(efx, drv, efx->net_dev,
5320 "Broadcast filter insert failed rc=%d\n", rc); 5332 "Broadcast filter insert failed rc=%d\n", rc);
@@ -5370,7 +5382,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5370 if (vlan->vid != EFX_FILTER_VID_UNSPEC) 5382 if (vlan->vid != EFX_FILTER_VID_UNSPEC)
5371 efx_filter_set_eth_local(&spec, vlan->vid, NULL); 5383 efx_filter_set_eth_local(&spec, vlan->vid, NULL);
5372 5384
5373 rc = efx_ef10_filter_insert(efx, &spec, true); 5385 rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5374 if (rc < 0) { 5386 if (rc < 0) {
5375 const char *um = multicast ? "Multicast" : "Unicast"; 5387 const char *um = multicast ? "Multicast" : "Unicast";
5376 const char *encap_name = ""; 5388 const char *encap_name = "";
@@ -5430,7 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
5430 filter_flags, 0); 5442 filter_flags, 0);
5431 eth_broadcast_addr(baddr); 5443 eth_broadcast_addr(baddr);
5432 efx_filter_set_eth_local(&spec, vlan->vid, baddr); 5444 efx_filter_set_eth_local(&spec, vlan->vid, baddr);
5433 rc = efx_ef10_filter_insert(efx, &spec, true); 5445 rc = efx_ef10_filter_insert_locked(efx, &spec, true);
5434 if (rc < 0) { 5446 if (rc < 0) {
5435 netif_warn(efx, drv, efx->net_dev, 5447 netif_warn(efx, drv, efx->net_dev,
5436 "Broadcast filter insert failed rc=%d\n", 5448 "Broadcast filter insert failed rc=%d\n",
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 570ec72266f3..ce3a177081a8 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1871,12 +1871,6 @@ static void efx_remove_filters(struct efx_nic *efx)
1871 up_write(&efx->filter_sem); 1871 up_write(&efx->filter_sem);
1872} 1872}
1873 1873
1874static void efx_restore_filters(struct efx_nic *efx)
1875{
1876 down_read(&efx->filter_sem);
1877 efx->type->filter_table_restore(efx);
1878 up_read(&efx->filter_sem);
1879}
1880 1874
1881/************************************************************************** 1875/**************************************************************************
1882 * 1876 *
@@ -2688,6 +2682,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
2688 efx_disable_interrupts(efx); 2682 efx_disable_interrupts(efx);
2689 2683
2690 mutex_lock(&efx->mac_lock); 2684 mutex_lock(&efx->mac_lock);
2685 down_write(&efx->filter_sem);
2691 mutex_lock(&efx->rss_lock); 2686 mutex_lock(&efx->rss_lock);
2692 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE && 2687 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
2693 method != RESET_TYPE_DATAPATH) 2688 method != RESET_TYPE_DATAPATH)
@@ -2745,9 +2740,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2745 if (efx->type->rx_restore_rss_contexts) 2740 if (efx->type->rx_restore_rss_contexts)
2746 efx->type->rx_restore_rss_contexts(efx); 2741 efx->type->rx_restore_rss_contexts(efx);
2747 mutex_unlock(&efx->rss_lock); 2742 mutex_unlock(&efx->rss_lock);
2748 down_read(&efx->filter_sem); 2743 efx->type->filter_table_restore(efx);
2749 efx_restore_filters(efx); 2744 up_write(&efx->filter_sem);
2750 up_read(&efx->filter_sem);
2751 if (efx->type->sriov_reset) 2745 if (efx->type->sriov_reset)
2752 efx->type->sriov_reset(efx); 2746 efx->type->sriov_reset(efx);
2753 2747
@@ -2764,6 +2758,7 @@ fail:
2764 efx->port_initialized = false; 2758 efx->port_initialized = false;
2765 2759
2766 mutex_unlock(&efx->rss_lock); 2760 mutex_unlock(&efx->rss_lock);
2761 up_write(&efx->filter_sem);
2767 mutex_unlock(&efx->mac_lock); 2762 mutex_unlock(&efx->mac_lock);
2768 2763
2769 return rc; 2764 return rc;
@@ -3473,7 +3468,9 @@ static int efx_pci_probe_main(struct efx_nic *efx)
3473 3468
3474 efx_init_napi(efx); 3469 efx_init_napi(efx);
3475 3470
3471 down_write(&efx->filter_sem);
3476 rc = efx->type->init(efx); 3472 rc = efx->type->init(efx);
3473 up_write(&efx->filter_sem);
3477 if (rc) { 3474 if (rc) {
3478 netif_err(efx, probe, efx->net_dev, 3475 netif_err(efx, probe, efx->net_dev,
3479 "failed to initialise NIC\n"); 3476 "failed to initialise NIC\n");
@@ -3765,7 +3762,9 @@ static int efx_pm_resume(struct device *dev)
3765 rc = efx->type->reset(efx, RESET_TYPE_ALL); 3762 rc = efx->type->reset(efx, RESET_TYPE_ALL);
3766 if (rc) 3763 if (rc)
3767 return rc; 3764 return rc;
3765 down_write(&efx->filter_sem);
3768 rc = efx->type->init(efx); 3766 rc = efx->type->init(efx);
3767 up_write(&efx->filter_sem);
3769 if (rc) 3768 if (rc)
3770 return rc; 3769 return rc;
3771 rc = efx_pm_thaw(dev); 3770 rc = efx_pm_thaw(dev);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 8edf20967c82..e045a5d6b938 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
2794 if (!state) 2794 if (!state)
2795 return -ENOMEM; 2795 return -ENOMEM;
2796 efx->filter_state = state; 2796 efx->filter_state = state;
2797 init_rwsem(&state->lock);
2797 2798
2798 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; 2799 table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
2799 table->id = EFX_FARCH_FILTER_TABLE_RX_IP; 2800 table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 2e6e2a96b4f2..f9a61f90cfbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -37,7 +37,7 @@
37 * is done in the "stmmac files" 37 * is done in the "stmmac files"
38 */ 38 */
39 39
40/* struct emac_variant - Descrive dwmac-sun8i hardware variant 40/* struct emac_variant - Describe dwmac-sun8i hardware variant
41 * @default_syscon_value: The default value of the EMAC register in syscon 41 * @default_syscon_value: The default value of the EMAC register in syscon
42 * This value is used for disabling properly EMAC 42 * This value is used for disabling properly EMAC
43 * and used as a good starting value in case of the 43 * and used as a good starting value in case of the
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index d37f17ca62fe..65bc3556bd8f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
407 } 407 }
408} 408}
409 409
410static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
411{
412 u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
413
414 value &= ~DMA_RBSZ_MASK;
415 value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
416
417 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
418}
419
410const struct stmmac_dma_ops dwmac4_dma_ops = { 420const struct stmmac_dma_ops dwmac4_dma_ops = {
411 .reset = dwmac4_dma_reset, 421 .reset = dwmac4_dma_reset,
412 .init = dwmac4_dma_init, 422 .init = dwmac4_dma_init,
@@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
431 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 441 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
432 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 442 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
433 .enable_tso = dwmac4_enable_tso, 443 .enable_tso = dwmac4_enable_tso,
444 .set_bfsize = dwmac4_set_bfsize,
434}; 445};
435 446
436const struct stmmac_dma_ops dwmac410_dma_ops = { 447const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
457 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr, 468 .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
458 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr, 469 .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
459 .enable_tso = dwmac4_enable_tso, 470 .enable_tso = dwmac4_enable_tso,
471 .set_bfsize = dwmac4_set_bfsize,
460}; 472};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index c63c1fe3f26b..22a4a6dbb1a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -120,6 +120,8 @@
120 120
121/* DMA Rx Channel X Control register defines */ 121/* DMA Rx Channel X Control register defines */
122#define DMA_CONTROL_SR BIT(0) 122#define DMA_CONTROL_SR BIT(0)
123#define DMA_RBSZ_MASK GENMASK(14, 1)
124#define DMA_RBSZ_SHIFT 1
123 125
124/* Interrupt status per channel */ 126/* Interrupt status per channel */
125#define DMA_CHAN_STATUS_REB GENMASK(21, 19) 127#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index e44e7b26ce82..fe8b536b13f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
183 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 183 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
184 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan); 184 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
185 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan); 185 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
186 void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
186}; 187};
187 188
188#define stmmac_reset(__priv, __args...) \ 189#define stmmac_reset(__priv, __args...) \
@@ -235,6 +236,8 @@ struct stmmac_dma_ops {
235 stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args) 236 stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
236#define stmmac_enable_tso(__priv, __args...) \ 237#define stmmac_enable_tso(__priv, __args...) \
237 stmmac_do_void_callback(__priv, dma, enable_tso, __args) 238 stmmac_do_void_callback(__priv, dma, enable_tso, __args)
239#define stmmac_set_dma_bfsize(__priv, __args...) \
240 stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
238 241
239struct mac_device_info; 242struct mac_device_info;
240struct net_device; 243struct net_device;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index cba46b62a1cd..60f59abab009 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1804,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1804 1804
1805 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, 1805 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
1806 rxfifosz, qmode); 1806 rxfifosz, qmode);
1807 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
1808 chan);
1807 } 1809 }
1808 1810
1809 for (chan = 0; chan < tx_channels_count; chan++) { 1811 for (chan = 0; chan < tx_channels_count; chan++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6d141f3931eb..72da77b94ecd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -94,7 +94,6 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
94/** 94/**
95 * stmmac_axi_setup - parse DT parameters for programming the AXI register 95 * stmmac_axi_setup - parse DT parameters for programming the AXI register
96 * @pdev: platform device 96 * @pdev: platform device
97 * @priv: driver private struct.
98 * Description: 97 * Description:
99 * if required, from device-tree the AXI internal register can be tuned 98 * if required, from device-tree the AXI internal register can be tuned
100 * by using platform parameters. 99 * by using platform parameters.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 16c3bfbe1992..757a3b37ae8a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -218,6 +218,7 @@ issue:
218 ret = of_mdiobus_register(bus, np1); 218 ret = of_mdiobus_register(bus, np1);
219 if (ret) { 219 if (ret) {
220 mdiobus_free(bus); 220 mdiobus_free(bus);
221 lp->mii_bus = NULL;
221 return ret; 222 return ret;
222 } 223 }
223 return 0; 224 return 0;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 750eaa53bf0c..ada33c2d9ac2 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
476out_unlock: 476out_unlock:
477 rcu_read_unlock(); 477 rcu_read_unlock();
478out: 478out:
479 NAPI_GRO_CB(skb)->flush |= flush; 479 skb_gro_flush_final(skb, pp, flush);
480 480
481 return pp; 481 return pp;
482} 482}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 1a924b867b07..4b6e308199d2 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net,
210void netvsc_channel_cb(void *context); 210void netvsc_channel_cb(void *context);
211int netvsc_poll(struct napi_struct *napi, int budget); 211int netvsc_poll(struct napi_struct *napi, int budget);
212 212
213void rndis_set_subchannel(struct work_struct *w); 213int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
214int rndis_filter_open(struct netvsc_device *nvdev); 214int rndis_filter_open(struct netvsc_device *nvdev);
215int rndis_filter_close(struct netvsc_device *nvdev); 215int rndis_filter_close(struct netvsc_device *nvdev);
216struct netvsc_device *rndis_filter_device_add(struct hv_device *dev, 216struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5d5bd513847f..31c3d77b4733 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
65 VM_PKT_DATA_INBAND, 0); 65 VM_PKT_DATA_INBAND, 0);
66} 66}
67 67
68/* Worker to setup sub channels on initial setup
69 * Initial hotplug event occurs in softirq context
70 * and can't wait for channels.
71 */
72static void netvsc_subchan_work(struct work_struct *w)
73{
74 struct netvsc_device *nvdev =
75 container_of(w, struct netvsc_device, subchan_work);
76 struct rndis_device *rdev;
77 int i, ret;
78
79 /* Avoid deadlock with device removal already under RTNL */
80 if (!rtnl_trylock()) {
81 schedule_work(w);
82 return;
83 }
84
85 rdev = nvdev->extension;
86 if (rdev) {
87 ret = rndis_set_subchannel(rdev->ndev, nvdev);
88 if (ret == 0) {
89 netif_device_attach(rdev->ndev);
90 } else {
91 /* fallback to only primary channel */
92 for (i = 1; i < nvdev->num_chn; i++)
93 netif_napi_del(&nvdev->chan_table[i].napi);
94
95 nvdev->max_chn = 1;
96 nvdev->num_chn = 1;
97 }
98 }
99
100 rtnl_unlock();
101}
102
68static struct netvsc_device *alloc_net_device(void) 103static struct netvsc_device *alloc_net_device(void)
69{ 104{
70 struct netvsc_device *net_device; 105 struct netvsc_device *net_device;
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void)
81 116
82 init_completion(&net_device->channel_init_wait); 117 init_completion(&net_device->channel_init_wait);
83 init_waitqueue_head(&net_device->subchan_open); 118 init_waitqueue_head(&net_device->subchan_open);
84 INIT_WORK(&net_device->subchan_work, rndis_set_subchannel); 119 INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
85 120
86 return net_device; 121 return net_device;
87} 122}
@@ -1239,6 +1274,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
1239 struct hv_device *device = netvsc_channel_to_device(channel); 1274 struct hv_device *device = netvsc_channel_to_device(channel);
1240 struct net_device *ndev = hv_get_drvdata(device); 1275 struct net_device *ndev = hv_get_drvdata(device);
1241 int work_done = 0; 1276 int work_done = 0;
1277 int ret;
1242 1278
1243 /* If starting a new interval */ 1279 /* If starting a new interval */
1244 if (!nvchan->desc) 1280 if (!nvchan->desc)
@@ -1250,16 +1286,18 @@ int netvsc_poll(struct napi_struct *napi, int budget)
1250 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc); 1286 nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
1251 } 1287 }
1252 1288
1253 /* If send of pending receive completions suceeded 1289 /* Send any pending receive completions */
1254 * and did not exhaust NAPI budget this time 1290 ret = send_recv_completions(ndev, net_device, nvchan);
1255 * and not doing busy poll 1291
1292 /* If it did not exhaust NAPI budget this time
1293 * and not doing busy poll
1256 * then re-enable host interrupts 1294 * then re-enable host interrupts
1257 * and reschedule if ring is not empty. 1295 * and reschedule if ring is not empty
1296 * or sending receive completion failed.
1258 */ 1297 */
1259 if (send_recv_completions(ndev, net_device, nvchan) == 0 && 1298 if (work_done < budget &&
1260 work_done < budget &&
1261 napi_complete_done(napi, work_done) && 1299 napi_complete_done(napi, work_done) &&
1262 hv_end_read(&channel->inbound) && 1300 (ret || hv_end_read(&channel->inbound)) &&
1263 napi_schedule_prep(napi)) { 1301 napi_schedule_prep(napi)) {
1264 hv_begin_read(&channel->inbound); 1302 hv_begin_read(&channel->inbound);
1265 __napi_schedule(napi); 1303 __napi_schedule(napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index fe2256bf1d13..dd1d6e115145 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
905 if (IS_ERR(nvdev)) 905 if (IS_ERR(nvdev))
906 return PTR_ERR(nvdev); 906 return PTR_ERR(nvdev);
907 907
908 /* Note: enable and attach happen when sub-channels setup */ 908 if (nvdev->num_chn > 1) {
909 ret = rndis_set_subchannel(ndev, nvdev);
910
911 /* if unavailable, just proceed with one queue */
912 if (ret) {
913 nvdev->max_chn = 1;
914 nvdev->num_chn = 1;
915 }
916 }
917
918 /* In any case device is now ready */
919 netif_device_attach(ndev);
909 920
921 /* Note: enable and attach happen when sub-channels setup */
910 netif_carrier_off(ndev); 922 netif_carrier_off(ndev);
911 923
912 if (netif_running(ndev)) { 924 if (netif_running(ndev)) {
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev,
2089 2101
2090 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN); 2102 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2091 2103
2104 if (nvdev->num_chn > 1)
2105 schedule_work(&nvdev->subchan_work);
2106
2092 /* hw_features computed in rndis_netdev_set_hwcaps() */ 2107 /* hw_features computed in rndis_netdev_set_hwcaps() */
2093 net->features = net->hw_features | 2108 net->features = net->hw_features |
2094 NETIF_F_HIGHDMA | NETIF_F_SG | 2109 NETIF_F_HIGHDMA | NETIF_F_SG |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 5428bb261102..408ece27131c 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
1062 * This breaks overlap of processing the host message for the 1062 * This breaks overlap of processing the host message for the
1063 * new primary channel with the initialization of sub-channels. 1063 * new primary channel with the initialization of sub-channels.
1064 */ 1064 */
1065void rndis_set_subchannel(struct work_struct *w) 1065int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
1066{ 1066{
1067 struct netvsc_device *nvdev
1068 = container_of(w, struct netvsc_device, subchan_work);
1069 struct nvsp_message *init_packet = &nvdev->channel_init_pkt; 1067 struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
1070 struct net_device_context *ndev_ctx; 1068 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1071 struct rndis_device *rdev; 1069 struct hv_device *hv_dev = ndev_ctx->device_ctx;
1072 struct net_device *ndev; 1070 struct rndis_device *rdev = nvdev->extension;
1073 struct hv_device *hv_dev;
1074 int i, ret; 1071 int i, ret;
1075 1072
1076 if (!rtnl_trylock()) { 1073 ASSERT_RTNL();
1077 schedule_work(w);
1078 return;
1079 }
1080
1081 rdev = nvdev->extension;
1082 if (!rdev)
1083 goto unlock; /* device was removed */
1084
1085 ndev = rdev->ndev;
1086 ndev_ctx = netdev_priv(ndev);
1087 hv_dev = ndev_ctx->device_ctx;
1088 1074
1089 memset(init_packet, 0, sizeof(struct nvsp_message)); 1075 memset(init_packet, 0, sizeof(struct nvsp_message));
1090 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL; 1076 init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w)
1100 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 1086 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
1101 if (ret) { 1087 if (ret) {
1102 netdev_err(ndev, "sub channel allocate send failed: %d\n", ret); 1088 netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
1103 goto failed; 1089 return ret;
1104 } 1090 }
1105 1091
1106 wait_for_completion(&nvdev->channel_init_wait); 1092 wait_for_completion(&nvdev->channel_init_wait);
1107 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) { 1093 if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
1108 netdev_err(ndev, "sub channel request failed\n"); 1094 netdev_err(ndev, "sub channel request failed\n");
1109 goto failed; 1095 return -EIO;
1110 } 1096 }
1111 1097
1112 nvdev->num_chn = 1 + 1098 nvdev->num_chn = 1 +
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w)
1125 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++) 1111 for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
1126 ndev_ctx->tx_table[i] = i % nvdev->num_chn; 1112 ndev_ctx->tx_table[i] = i % nvdev->num_chn;
1127 1113
1128 netif_device_attach(ndev); 1114 return 0;
1129 rtnl_unlock();
1130 return;
1131
1132failed:
1133 /* fallback to only primary channel */
1134 for (i = 1; i < nvdev->num_chn; i++)
1135 netif_napi_del(&nvdev->chan_table[i].napi);
1136
1137 nvdev->max_chn = 1;
1138 nvdev->num_chn = 1;
1139
1140 netif_device_attach(ndev);
1141unlock:
1142 rtnl_unlock();
1143} 1115}
1144 1116
1145static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device, 1117static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
@@ -1360,21 +1332,13 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
1360 netif_napi_add(net, &net_device->chan_table[i].napi, 1332 netif_napi_add(net, &net_device->chan_table[i].napi,
1361 netvsc_poll, NAPI_POLL_WEIGHT); 1333 netvsc_poll, NAPI_POLL_WEIGHT);
1362 1334
1363 if (net_device->num_chn > 1) 1335 return net_device;
1364 schedule_work(&net_device->subchan_work);
1365 1336
1366out: 1337out:
1367 /* if unavailable, just proceed with one queue */ 1338 /* setting up multiple channels failed */
1368 if (ret) { 1339 net_device->max_chn = 1;
1369 net_device->max_chn = 1; 1340 net_device->num_chn = 1;
1370 net_device->num_chn = 1; 1341 return 0;
1371 }
1372
1373 /* No sub channels, device is ready */
1374 if (net_device->num_chn == 1)
1375 netif_device_attach(net);
1376
1377 return net_device;
1378 1342
1379err_dev_remv: 1343err_dev_remv:
1380 rndis_filter_device_remove(dev, net_device); 1344 rndis_filter_device_remove(dev, net_device);
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 64f1b1e77bc0..23a52b9293f3 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -275,6 +275,8 @@ struct adf7242_local {
275 struct spi_message stat_msg; 275 struct spi_message stat_msg;
276 struct spi_transfer stat_xfer; 276 struct spi_transfer stat_xfer;
277 struct dentry *debugfs_root; 277 struct dentry *debugfs_root;
278 struct delayed_work work;
279 struct workqueue_struct *wqueue;
278 unsigned long flags; 280 unsigned long flags;
279 int tx_stat; 281 int tx_stat;
280 bool promiscuous; 282 bool promiscuous;
@@ -575,10 +577,26 @@ static int adf7242_cmd_rx(struct adf7242_local *lp)
575 /* Wait until the ACK is sent */ 577 /* Wait until the ACK is sent */
576 adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__); 578 adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
577 adf7242_clear_irqstat(lp); 579 adf7242_clear_irqstat(lp);
580 mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
578 581
579 return adf7242_cmd(lp, CMD_RC_RX); 582 return adf7242_cmd(lp, CMD_RC_RX);
580} 583}
581 584
585static void adf7242_rx_cal_work(struct work_struct *work)
586{
587 struct adf7242_local *lp =
588 container_of(work, struct adf7242_local, work.work);
589
590 /* Reissuing RC_RX every 400ms - to adjust for offset
591 * drift in receiver (datasheet page 61, OCL section)
592 */
593
594 if (!test_bit(FLAG_XMIT, &lp->flags)) {
595 adf7242_cmd(lp, CMD_RC_PHY_RDY);
596 adf7242_cmd_rx(lp);
597 }
598}
599
582static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm) 600static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
583{ 601{
584 struct adf7242_local *lp = hw->priv; 602 struct adf7242_local *lp = hw->priv;
@@ -686,7 +704,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
686 enable_irq(lp->spi->irq); 704 enable_irq(lp->spi->irq);
687 set_bit(FLAG_START, &lp->flags); 705 set_bit(FLAG_START, &lp->flags);
688 706
689 return adf7242_cmd(lp, CMD_RC_RX); 707 return adf7242_cmd_rx(lp);
690} 708}
691 709
692static void adf7242_stop(struct ieee802154_hw *hw) 710static void adf7242_stop(struct ieee802154_hw *hw)
@@ -694,6 +712,7 @@ static void adf7242_stop(struct ieee802154_hw *hw)
694 struct adf7242_local *lp = hw->priv; 712 struct adf7242_local *lp = hw->priv;
695 713
696 disable_irq(lp->spi->irq); 714 disable_irq(lp->spi->irq);
715 cancel_delayed_work_sync(&lp->work);
697 adf7242_cmd(lp, CMD_RC_IDLE); 716 adf7242_cmd(lp, CMD_RC_IDLE);
698 clear_bit(FLAG_START, &lp->flags); 717 clear_bit(FLAG_START, &lp->flags);
699 adf7242_clear_irqstat(lp); 718 adf7242_clear_irqstat(lp);
@@ -719,7 +738,10 @@ static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
719 adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8); 738 adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8);
720 adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16); 739 adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16);
721 740
722 return adf7242_cmd(lp, CMD_RC_RX); 741 if (test_bit(FLAG_START, &lp->flags))
742 return adf7242_cmd_rx(lp);
743 else
744 return adf7242_cmd(lp, CMD_RC_PHY_RDY);
723} 745}
724 746
725static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw, 747static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw,
@@ -814,6 +836,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
814 /* ensure existing instances of the IRQ handler have completed */ 836 /* ensure existing instances of the IRQ handler have completed */
815 disable_irq(lp->spi->irq); 837 disable_irq(lp->spi->irq);
816 set_bit(FLAG_XMIT, &lp->flags); 838 set_bit(FLAG_XMIT, &lp->flags);
839 cancel_delayed_work_sync(&lp->work);
817 reinit_completion(&lp->tx_complete); 840 reinit_completion(&lp->tx_complete);
818 adf7242_cmd(lp, CMD_RC_PHY_RDY); 841 adf7242_cmd(lp, CMD_RC_PHY_RDY);
819 adf7242_clear_irqstat(lp); 842 adf7242_clear_irqstat(lp);
@@ -952,6 +975,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
952 unsigned int xmit; 975 unsigned int xmit;
953 u8 irq1; 976 u8 irq1;
954 977
978 mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
955 adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1); 979 adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
956 980
957 if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA))) 981 if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
@@ -1241,6 +1265,9 @@ static int adf7242_probe(struct spi_device *spi)
1241 spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg); 1265 spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg);
1242 1266
1243 spi_set_drvdata(spi, lp); 1267 spi_set_drvdata(spi, lp);
1268 INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
1269 lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
1270 WQ_MEM_RECLAIM);
1244 1271
1245 ret = adf7242_hw_init(lp); 1272 ret = adf7242_hw_init(lp);
1246 if (ret) 1273 if (ret)
@@ -1284,6 +1311,9 @@ static int adf7242_remove(struct spi_device *spi)
1284 if (!IS_ERR_OR_NULL(lp->debugfs_root)) 1311 if (!IS_ERR_OR_NULL(lp->debugfs_root))
1285 debugfs_remove_recursive(lp->debugfs_root); 1312 debugfs_remove_recursive(lp->debugfs_root);
1286 1313
1314 cancel_delayed_work_sync(&lp->work);
1315 destroy_workqueue(lp->wqueue);
1316
1287 ieee802154_unregister_hw(lp->hw); 1317 ieee802154_unregister_hw(lp->hw);
1288 mutex_destroy(&lp->bmux); 1318 mutex_destroy(&lp->bmux);
1289 ieee802154_free_hw(lp->hw); 1319 ieee802154_free_hw(lp->hw);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 77abedf0b524..3d9e91579866 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
940static int 940static int
941at86rf230_ed(struct ieee802154_hw *hw, u8 *level) 941at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
942{ 942{
943 BUG_ON(!level); 943 WARN_ON(!level);
944 *level = 0xbe; 944 *level = 0xbe;
945 return 0; 945 return 0;
946} 946}
@@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
1121 if (changed & IEEE802154_AFILT_SADDR_CHANGED) { 1121 if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
1122 u16 addr = le16_to_cpu(filt->short_addr); 1122 u16 addr = le16_to_cpu(filt->short_addr);
1123 1123
1124 dev_vdbg(&lp->spi->dev, 1124 dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
1125 "at86rf230_set_hw_addr_filt called for saddr\n");
1126 __at86rf230_write(lp, RG_SHORT_ADDR_0, addr); 1125 __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
1127 __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8); 1126 __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
1128 } 1127 }
@@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
1130 if (changed & IEEE802154_AFILT_PANID_CHANGED) { 1129 if (changed & IEEE802154_AFILT_PANID_CHANGED) {
1131 u16 pan = le16_to_cpu(filt->pan_id); 1130 u16 pan = le16_to_cpu(filt->pan_id);
1132 1131
1133 dev_vdbg(&lp->spi->dev, 1132 dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
1134 "at86rf230_set_hw_addr_filt called for pan id\n");
1135 __at86rf230_write(lp, RG_PAN_ID_0, pan); 1133 __at86rf230_write(lp, RG_PAN_ID_0, pan);
1136 __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8); 1134 __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
1137 } 1135 }
@@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
1140 u8 i, addr[8]; 1138 u8 i, addr[8];
1141 1139
1142 memcpy(addr, &filt->ieee_addr, 8); 1140 memcpy(addr, &filt->ieee_addr, 8);
1143 dev_vdbg(&lp->spi->dev, 1141 dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
1144 "at86rf230_set_hw_addr_filt called for IEEE addr\n");
1145 for (i = 0; i < 8; i++) 1142 for (i = 0; i < 8; i++)
1146 __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]); 1143 __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
1147 } 1144 }
1148 1145
1149 if (changed & IEEE802154_AFILT_PANC_CHANGED) { 1146 if (changed & IEEE802154_AFILT_PANC_CHANGED) {
1150 dev_vdbg(&lp->spi->dev, 1147 dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
1151 "at86rf230_set_hw_addr_filt called for panc change\n");
1152 if (filt->pan_coord) 1148 if (filt->pan_coord)
1153 at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1); 1149 at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
1154 else 1150 else
@@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
1252 return at86rf230_write_subreg(lp, SR_CCA_MODE, val); 1248 return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
1253} 1249}
1254 1250
1255
1256static int 1251static int
1257at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) 1252at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
1258{ 1253{
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 0d673f7682ee..176395e4b7bb 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -49,7 +49,7 @@ struct fakelb_phy {
49 49
50static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level) 50static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
51{ 51{
52 BUG_ON(!level); 52 WARN_ON(!level);
53 *level = 0xbe; 53 *level = 0xbe;
54 54
55 return 0; 55 return 0;
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index de0d7f28a181..e428277781ac 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -15,10 +15,11 @@
15 */ 15 */
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/gpio.h> 18#include <linux/gpio/consumer.h>
19#include <linux/spi/spi.h> 19#include <linux/spi/spi.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/irq.h>
22#include <linux/skbuff.h> 23#include <linux/skbuff.h>
23#include <linux/of_gpio.h> 24#include <linux/of_gpio.h>
24#include <linux/regmap.h> 25#include <linux/regmap.h>
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 23c1d6600241..4a949569ec4c 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
75{ 75{
76 struct ipvl_dev *ipvlan; 76 struct ipvl_dev *ipvlan;
77 struct net_device *mdev = port->dev; 77 struct net_device *mdev = port->dev;
78 int err = 0; 78 unsigned int flags;
79 int err;
79 80
80 ASSERT_RTNL(); 81 ASSERT_RTNL();
81 if (port->mode != nval) { 82 if (port->mode != nval) {
83 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
84 flags = ipvlan->dev->flags;
85 if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
86 err = dev_change_flags(ipvlan->dev,
87 flags | IFF_NOARP);
88 } else {
89 err = dev_change_flags(ipvlan->dev,
90 flags & ~IFF_NOARP);
91 }
92 if (unlikely(err))
93 goto fail;
94 }
82 if (nval == IPVLAN_MODE_L3S) { 95 if (nval == IPVLAN_MODE_L3S) {
83 /* New mode is L3S */ 96 /* New mode is L3S */
84 err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); 97 err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
86 mdev->l3mdev_ops = &ipvl_l3mdev_ops; 99 mdev->l3mdev_ops = &ipvl_l3mdev_ops;
87 mdev->priv_flags |= IFF_L3MDEV_MASTER; 100 mdev->priv_flags |= IFF_L3MDEV_MASTER;
88 } else 101 } else
89 return err; 102 goto fail;
90 } else if (port->mode == IPVLAN_MODE_L3S) { 103 } else if (port->mode == IPVLAN_MODE_L3S) {
91 /* Old mode was L3S */ 104 /* Old mode was L3S */
92 mdev->priv_flags &= ~IFF_L3MDEV_MASTER; 105 mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
93 ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); 106 ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
94 mdev->l3mdev_ops = NULL; 107 mdev->l3mdev_ops = NULL;
95 } 108 }
96 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
97 if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
98 ipvlan->dev->flags |= IFF_NOARP;
99 else
100 ipvlan->dev->flags &= ~IFF_NOARP;
101 }
102 port->mode = nval; 109 port->mode = nval;
103 } 110 }
111 return 0;
112
113fail:
114 /* Undo the flags changes that have been done so far. */
115 list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
116 flags = ipvlan->dev->flags;
117 if (port->mode == IPVLAN_MODE_L3 ||
118 port->mode == IPVLAN_MODE_L3S)
119 dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
120 else
121 dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
122 }
123
104 return err; 124 return err;
105} 125}
106 126
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 081d99aa3985..49ac678eb2dc 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev)
222 if (err < 0) 222 if (err < 0)
223 return err; 223 return err;
224 224
225 err = phy_write(phydev, MII_DP83811_INT_STAT1, 0); 225 err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
226 } 226 }
227 227
228 return err; 228 return err;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b8f57e9b9379..1cd439bdf608 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -130,8 +130,9 @@
130#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) 130#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
131#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14) 131#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
132 132
133#define MII_88E1121_PHY_LED_CTRL 16 133#define MII_PHY_LED_CTRL 16
134#define MII_88E1121_PHY_LED_DEF 0x0030 134#define MII_88E1121_PHY_LED_DEF 0x0030
135#define MII_88E1510_PHY_LED_DEF 0x1177
135 136
136#define MII_M1011_PHY_STATUS 0x11 137#define MII_M1011_PHY_STATUS 0x11
137#define MII_M1011_PHY_STATUS_1000 0x8000 138#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -632,8 +633,40 @@ error:
632 return err; 633 return err;
633} 634}
634 635
636static void marvell_config_led(struct phy_device *phydev)
637{
638 u16 def_config;
639 int err;
640
641 switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) {
642 /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
643 case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R):
644 case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S):
645 def_config = MII_88E1121_PHY_LED_DEF;
646 break;
647 /* Default PHY LED config:
648 * LED[0] .. 1000Mbps Link
649 * LED[1] .. 100Mbps Link
650 * LED[2] .. Blink, Activity
651 */
652 case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
653 def_config = MII_88E1510_PHY_LED_DEF;
654 break;
655 default:
656 return;
657 }
658
659 err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL,
660 def_config);
661 if (err < 0)
662 pr_warn("Fail to config marvell phy LED.\n");
663}
664
635static int marvell_config_init(struct phy_device *phydev) 665static int marvell_config_init(struct phy_device *phydev)
636{ 666{
667 /* Set defalut LED */
668 marvell_config_led(phydev);
669
637 /* Set registers from marvell,reg-init DT property */ 670 /* Set registers from marvell,reg-init DT property */
638 return marvell_of_reg_init(phydev); 671 return marvell_of_reg_init(phydev);
639} 672}
@@ -813,21 +846,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
813 return genphy_soft_reset(phydev); 846 return genphy_soft_reset(phydev);
814} 847}
815 848
816static int m88e1121_config_init(struct phy_device *phydev)
817{
818 int err;
819
820 /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
821 err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
822 MII_88E1121_PHY_LED_CTRL,
823 MII_88E1121_PHY_LED_DEF);
824 if (err < 0)
825 return err;
826
827 /* Set marvell,reg-init configuration from device tree */
828 return marvell_config_init(phydev);
829}
830
831static int m88e1318_config_init(struct phy_device *phydev) 849static int m88e1318_config_init(struct phy_device *phydev)
832{ 850{
833 if (phy_interrupt_is_valid(phydev)) { 851 if (phy_interrupt_is_valid(phydev)) {
@@ -841,7 +859,7 @@ static int m88e1318_config_init(struct phy_device *phydev)
841 return err; 859 return err;
842 } 860 }
843 861
844 return m88e1121_config_init(phydev); 862 return marvell_config_init(phydev);
845} 863}
846 864
847static int m88e1510_config_init(struct phy_device *phydev) 865static int m88e1510_config_init(struct phy_device *phydev)
@@ -2087,7 +2105,7 @@ static struct phy_driver marvell_drivers[] = {
2087 .features = PHY_GBIT_FEATURES, 2105 .features = PHY_GBIT_FEATURES,
2088 .flags = PHY_HAS_INTERRUPT, 2106 .flags = PHY_HAS_INTERRUPT,
2089 .probe = &m88e1121_probe, 2107 .probe = &m88e1121_probe,
2090 .config_init = &m88e1121_config_init, 2108 .config_init = &marvell_config_init,
2091 .config_aneg = &m88e1121_config_aneg, 2109 .config_aneg = &m88e1121_config_aneg,
2092 .read_status = &marvell_read_status, 2110 .read_status = &marvell_read_status,
2093 .ack_interrupt = &marvell_ack_interrupt, 2111 .ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 537297d2b4b4..6c9b24fe3148 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -514,7 +514,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
514 * negotiation may already be done and aneg interrupt may not be 514 * negotiation may already be done and aneg interrupt may not be
515 * generated. 515 * generated.
516 */ 516 */
517 if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { 517 if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
518 err = phy_aneg_done(phydev); 518 err = phy_aneg_done(phydev);
519 if (err > 0) { 519 if (err > 0) {
520 trigger = true; 520 trigger = true;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bd0f339f69fd..b9f5f40a7ac1 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1724,11 +1724,8 @@ EXPORT_SYMBOL(genphy_loopback);
1724 1724
1725static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) 1725static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
1726{ 1726{
1727 /* The default values for phydev->supported are provided by the PHY 1727 phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
1728 * driver "features" member, we want to reset to sane defaults first 1728 PHY_10BT_FEATURES);
1729 * before supporting higher speeds.
1730 */
1731 phydev->supported &= PHY_DEFAULT_FEATURES;
1732 1729
1733 switch (max_speed) { 1730 switch (max_speed) {
1734 default: 1731 default:
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index d437f4f5ed52..740655261e5b 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -349,7 +349,6 @@ static int sfp_register_bus(struct sfp_bus *bus)
349 } 349 }
350 if (bus->started) 350 if (bus->started)
351 bus->socket_ops->start(bus->sfp); 351 bus->socket_ops->start(bus->sfp);
352 bus->netdev->sfp_bus = bus;
353 bus->registered = true; 352 bus->registered = true;
354 return 0; 353 return 0;
355} 354}
@@ -364,7 +363,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
364 if (bus->phydev && ops && ops->disconnect_phy) 363 if (bus->phydev && ops && ops->disconnect_phy)
365 ops->disconnect_phy(bus->upstream); 364 ops->disconnect_phy(bus->upstream);
366 } 365 }
367 bus->netdev->sfp_bus = NULL;
368 bus->registered = false; 366 bus->registered = false;
369} 367}
370 368
@@ -436,6 +434,14 @@ void sfp_upstream_stop(struct sfp_bus *bus)
436} 434}
437EXPORT_SYMBOL_GPL(sfp_upstream_stop); 435EXPORT_SYMBOL_GPL(sfp_upstream_stop);
438 436
437static void sfp_upstream_clear(struct sfp_bus *bus)
438{
439 bus->upstream_ops = NULL;
440 bus->upstream = NULL;
441 bus->netdev->sfp_bus = NULL;
442 bus->netdev = NULL;
443}
444
439/** 445/**
440 * sfp_register_upstream() - Register the neighbouring device 446 * sfp_register_upstream() - Register the neighbouring device
441 * @fwnode: firmware node for the SFP bus 447 * @fwnode: firmware node for the SFP bus
@@ -461,9 +467,13 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
461 bus->upstream_ops = ops; 467 bus->upstream_ops = ops;
462 bus->upstream = upstream; 468 bus->upstream = upstream;
463 bus->netdev = ndev; 469 bus->netdev = ndev;
470 ndev->sfp_bus = bus;
464 471
465 if (bus->sfp) 472 if (bus->sfp) {
466 ret = sfp_register_bus(bus); 473 ret = sfp_register_bus(bus);
474 if (ret)
475 sfp_upstream_clear(bus);
476 }
467 rtnl_unlock(); 477 rtnl_unlock();
468 } 478 }
469 479
@@ -488,8 +498,7 @@ void sfp_unregister_upstream(struct sfp_bus *bus)
488 rtnl_lock(); 498 rtnl_lock();
489 if (bus->sfp) 499 if (bus->sfp)
490 sfp_unregister_bus(bus); 500 sfp_unregister_bus(bus);
491 bus->upstream = NULL; 501 sfp_upstream_clear(bus);
492 bus->netdev = NULL;
493 rtnl_unlock(); 502 rtnl_unlock();
494 503
495 sfp_bus_put(bus); 504 sfp_bus_put(bus);
@@ -561,6 +570,13 @@ void sfp_module_remove(struct sfp_bus *bus)
561} 570}
562EXPORT_SYMBOL_GPL(sfp_module_remove); 571EXPORT_SYMBOL_GPL(sfp_module_remove);
563 572
573static void sfp_socket_clear(struct sfp_bus *bus)
574{
575 bus->sfp_dev = NULL;
576 bus->sfp = NULL;
577 bus->socket_ops = NULL;
578}
579
564struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp, 580struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
565 const struct sfp_socket_ops *ops) 581 const struct sfp_socket_ops *ops)
566{ 582{
@@ -573,8 +589,11 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
573 bus->sfp = sfp; 589 bus->sfp = sfp;
574 bus->socket_ops = ops; 590 bus->socket_ops = ops;
575 591
576 if (bus->netdev) 592 if (bus->netdev) {
577 ret = sfp_register_bus(bus); 593 ret = sfp_register_bus(bus);
594 if (ret)
595 sfp_socket_clear(bus);
596 }
578 rtnl_unlock(); 597 rtnl_unlock();
579 } 598 }
580 599
@@ -592,9 +611,7 @@ void sfp_unregister_socket(struct sfp_bus *bus)
592 rtnl_lock(); 611 rtnl_lock();
593 if (bus->netdev) 612 if (bus->netdev)
594 sfp_unregister_bus(bus); 613 sfp_unregister_bus(bus);
595 bus->sfp_dev = NULL; 614 sfp_socket_clear(bus);
596 bus->sfp = NULL;
597 bus->socket_ops = NULL;
598 rtnl_unlock(); 615 rtnl_unlock();
599 616
600 sfp_bus_put(bus); 617 sfp_bus_put(bus);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a192a017cc68..f5727baac84a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1688,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1688 case XDP_TX: 1688 case XDP_TX:
1689 get_page(alloc_frag->page); 1689 get_page(alloc_frag->page);
1690 alloc_frag->offset += buflen; 1690 alloc_frag->offset += buflen;
1691 if (tun_xdp_tx(tun->dev, &xdp)) 1691 if (tun_xdp_tx(tun->dev, &xdp) < 0)
1692 goto err_redirect; 1692 goto err_redirect;
1693 rcu_read_unlock(); 1693 rcu_read_unlock();
1694 local_bh_enable(); 1694 local_bh_enable();
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 3d4f7959dabb..b1b3d8f7e67d 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev)
642 priv->presvd_phy_advertise); 642 priv->presvd_phy_advertise);
643 643
644 /* Restore BMCR */ 644 /* Restore BMCR */
645 if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
646 priv->presvd_phy_bmcr |= BMCR_ANRESTART;
647
645 asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR, 648 asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
646 priv->presvd_phy_bmcr); 649 priv->presvd_phy_bmcr);
647 650
648 mii_nway_restart(&dev->mii);
649 priv->presvd_phy_advertise = 0; 651 priv->presvd_phy_advertise = 0;
650 priv->presvd_phy_bmcr = 0; 652 priv->presvd_phy_bmcr = 0;
651 } 653 }
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 8dff87ec6d99..ed10d49eb5e0 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -64,6 +64,7 @@
64#define DEFAULT_RX_CSUM_ENABLE (true) 64#define DEFAULT_RX_CSUM_ENABLE (true)
65#define DEFAULT_TSO_CSUM_ENABLE (true) 65#define DEFAULT_TSO_CSUM_ENABLE (true)
66#define DEFAULT_VLAN_FILTER_ENABLE (true) 66#define DEFAULT_VLAN_FILTER_ENABLE (true)
67#define DEFAULT_VLAN_RX_OFFLOAD (true)
67#define TX_OVERHEAD (8) 68#define TX_OVERHEAD (8)
68#define RXW_PADDING 2 69#define RXW_PADDING 2
69 70
@@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2298 if ((ll_mtu % dev->maxpacket) == 0) 2299 if ((ll_mtu % dev->maxpacket) == 0)
2299 return -EDOM; 2300 return -EDOM;
2300 2301
2301 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN); 2302 ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
2302 2303
2303 netdev->mtu = new_mtu; 2304 netdev->mtu = new_mtu;
2304 2305
@@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev,
2364 } 2365 }
2365 2366
2366 if (features & NETIF_F_HW_VLAN_CTAG_RX) 2367 if (features & NETIF_F_HW_VLAN_CTAG_RX)
2368 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
2369 else
2370 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
2371
2372 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2367 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_; 2373 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
2368 else 2374 else
2369 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_; 2375 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
@@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
2587 buf |= FCT_TX_CTL_EN_; 2593 buf |= FCT_TX_CTL_EN_;
2588 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf); 2594 ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
2589 2595
2590 ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN); 2596 ret = lan78xx_set_rx_max_frame_length(dev,
2597 dev->net->mtu + VLAN_ETH_HLEN);
2591 2598
2592 ret = lan78xx_read_reg(dev, MAC_RX, &buf); 2599 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2593 buf |= MAC_RX_RXEN_; 2600 buf |= MAC_RX_RXEN_;
@@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
2975 if (DEFAULT_TSO_CSUM_ENABLE) 2982 if (DEFAULT_TSO_CSUM_ENABLE)
2976 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG; 2983 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
2977 2984
2985 if (DEFAULT_VLAN_RX_OFFLOAD)
2986 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
2987
2988 if (DEFAULT_VLAN_FILTER_ENABLE)
2989 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2990
2978 dev->net->hw_features = dev->net->features; 2991 dev->net->hw_features = dev->net->features;
2979 2992
2980 ret = lan78xx_setup_irq_domain(dev); 2993 ret = lan78xx_setup_irq_domain(dev);
@@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3039 struct sk_buff *skb, 3052 struct sk_buff *skb,
3040 u32 rx_cmd_a, u32 rx_cmd_b) 3053 u32 rx_cmd_a, u32 rx_cmd_b)
3041{ 3054{
3055 /* HW Checksum offload appears to be flawed if used when not stripping
3056 * VLAN headers. Drop back to S/W checksums under these conditions.
3057 */
3042 if (!(dev->net->features & NETIF_F_RXCSUM) || 3058 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3043 unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) { 3059 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3060 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3061 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3044 skb->ip_summed = CHECKSUM_NONE; 3062 skb->ip_summed = CHECKSUM_NONE;
3045 } else { 3063 } else {
3046 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_)); 3064 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
@@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3048 } 3066 }
3049} 3067}
3050 3068
3069static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3070 struct sk_buff *skb,
3071 u32 rx_cmd_a, u32 rx_cmd_b)
3072{
3073 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3074 (rx_cmd_a & RX_CMD_A_FVTG_))
3075 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3076 (rx_cmd_b & 0xffff));
3077}
3078
3051static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb) 3079static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3052{ 3080{
3053 int status; 3081 int status;
@@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3112 if (skb->len == size) { 3140 if (skb->len == size) {
3113 lan78xx_rx_csum_offload(dev, skb, 3141 lan78xx_rx_csum_offload(dev, skb,
3114 rx_cmd_a, rx_cmd_b); 3142 rx_cmd_a, rx_cmd_b);
3143 lan78xx_rx_vlan_offload(dev, skb,
3144 rx_cmd_a, rx_cmd_b);
3115 3145
3116 skb_trim(skb, skb->len - 4); /* remove fcs */ 3146 skb_trim(skb, skb->len - 4); /* remove fcs */
3117 skb->truesize = size + sizeof(struct sk_buff); 3147 skb->truesize = size + sizeof(struct sk_buff);
@@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
3130 skb_set_tail_pointer(skb2, size); 3160 skb_set_tail_pointer(skb2, size);
3131 3161
3132 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b); 3162 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3163 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3133 3164
3134 skb_trim(skb2, skb2->len - 4); /* remove fcs */ 3165 skb_trim(skb2, skb2->len - 4); /* remove fcs */
3135 skb2->truesize = size + sizeof(struct sk_buff); 3166 skb2->truesize = size + sizeof(struct sk_buff);
@@ -3313,6 +3344,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
3313 pkt_cnt = 0; 3344 pkt_cnt = 0;
3314 count = 0; 3345 count = 0;
3315 length = 0; 3346 length = 0;
3347 spin_lock_irqsave(&tqp->lock, flags);
3316 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) { 3348 for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
3317 if (skb_is_gso(skb)) { 3349 if (skb_is_gso(skb)) {
3318 if (pkt_cnt) { 3350 if (pkt_cnt) {
@@ -3321,7 +3353,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
3321 } 3353 }
3322 count = 1; 3354 count = 1;
3323 length = skb->len - TX_OVERHEAD; 3355 length = skb->len - TX_OVERHEAD;
3324 skb2 = skb_dequeue(tqp); 3356 __skb_unlink(skb, tqp);
3357 spin_unlock_irqrestore(&tqp->lock, flags);
3325 goto gso_skb; 3358 goto gso_skb;
3326 } 3359 }
3327 3360
@@ -3330,6 +3363,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
3330 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32)); 3363 skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
3331 pkt_cnt++; 3364 pkt_cnt++;
3332 } 3365 }
3366 spin_unlock_irqrestore(&tqp->lock, flags);
3333 3367
3334 /* copy to a single skb */ 3368 /* copy to a single skb */
3335 skb = alloc_skb(skb_totallen, GFP_ATOMIC); 3369 skb = alloc_skb(skb_totallen, GFP_ATOMIC);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8fac8e132c5b..cb0cc30c3d6a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1246,13 +1246,14 @@ static const struct usb_device_id products[] = {
1246 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ 1246 {QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
1247 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ 1247 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
1248 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ 1248 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
1249 {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e */ 1249 {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
1250 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 1250 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
1251 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ 1251 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
1252 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 1252 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
1253 {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ 1253 {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
1254 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */ 1254 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
1255 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ 1255 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
1256 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
1256 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ 1257 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
1257 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ 1258 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
1258 1259
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 86f7196f9d91..2a58607a6aea 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev)
3962#ifdef CONFIG_PM_SLEEP 3962#ifdef CONFIG_PM_SLEEP
3963 unregister_pm_notifier(&tp->pm_notifier); 3963 unregister_pm_notifier(&tp->pm_notifier);
3964#endif 3964#endif
3965 napi_disable(&tp->napi); 3965 if (!test_bit(RTL8152_UNPLUG, &tp->flags))
3966 napi_disable(&tp->napi);
3966 clear_bit(WORK_ENABLE, &tp->flags); 3967 clear_bit(WORK_ENABLE, &tp->flags);
3967 usb_kill_urb(tp->intr_urb); 3968 usb_kill_urb(tp->intr_urb);
3968 cancel_delayed_work_sync(&tp->schedule); 3969 cancel_delayed_work_sync(&tp->schedule);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 5f565bd574da..48ba80a8ca5c 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
681 (netdev->flags & IFF_ALLMULTI)) { 681 (netdev->flags & IFF_ALLMULTI)) {
682 rx_creg &= 0xfffe; 682 rx_creg &= 0xfffe;
683 rx_creg |= 0x0002; 683 rx_creg |= 0x0002;
684 dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); 684 dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
685 } else { 685 } else {
686 /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ 686 /* ~RX_MULTICAST, ~RX_PROMISCUOUS */
687 rx_creg &= 0x00fc; 687 rx_creg &= 0x00fc;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 7a6a1fe79309..05553d252446 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -82,6 +82,9 @@ static bool turbo_mode = true;
82module_param(turbo_mode, bool, 0644); 82module_param(turbo_mode, bool, 0644);
83MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); 83MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
84 84
85static int smsc75xx_link_ok_nopm(struct usbnet *dev);
86static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
87
85static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index, 88static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
86 u32 *data, int in_pm) 89 u32 *data, int in_pm)
87{ 90{
@@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
852 return -EIO; 855 return -EIO;
853 } 856 }
854 857
858 /* phy workaround for gig link */
859 smsc75xx_phy_gig_workaround(dev);
860
855 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, 861 smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
856 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | 862 ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
857 ADVERTISE_PAUSE_ASYM); 863 ADVERTISE_PAUSE_ASYM);
@@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
987 return -EIO; 993 return -EIO;
988} 994}
989 995
996static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
997{
998 struct mii_if_info *mii = &dev->mii;
999 int ret = 0, timeout = 0;
1000 u32 buf, link_up = 0;
1001
1002 /* Set the phy in Gig loopback */
1003 smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
1004
1005 /* Wait for the link up */
1006 do {
1007 link_up = smsc75xx_link_ok_nopm(dev);
1008 usleep_range(10000, 20000);
1009 timeout++;
1010 } while ((!link_up) && (timeout < 1000));
1011
1012 if (timeout >= 1000) {
1013 netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
1014 return -EIO;
1015 }
1016
1017 /* phy reset */
1018 ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
1019 if (ret < 0) {
1020 netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
1021 return ret;
1022 }
1023
1024 buf |= PMT_CTL_PHY_RST;
1025
1026 ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
1027 if (ret < 0) {
1028 netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
1029 return ret;
1030 }
1031
1032 timeout = 0;
1033 do {
1034 usleep_range(10000, 20000);
1035 ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
1036 if (ret < 0) {
1037 netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
1038 ret);
1039 return ret;
1040 }
1041 timeout++;
1042 } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
1043
1044 if (timeout >= 100) {
1045 netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
1046 return -EIO;
1047 }
1048
1049 return 0;
1050}
1051
990static int smsc75xx_reset(struct usbnet *dev) 1052static int smsc75xx_reset(struct usbnet *dev)
991{ 1053{
992 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]); 1054 struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b6c9a2af3732..53085c63277b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
53/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ 53/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
54#define VIRTIO_XDP_HEADROOM 256 54#define VIRTIO_XDP_HEADROOM 256
55 55
56/* Separating two types of XDP xmit */
57#define VIRTIO_XDP_TX BIT(0)
58#define VIRTIO_XDP_REDIR BIT(1)
59
56/* RX packet size EWMA. The average packet size is used to determine the packet 60/* RX packet size EWMA. The average packet size is used to determine the packet
57 * buffer size when refilling RX rings. As the entire RX ring may be refilled 61 * buffer size when refilling RX rings. As the entire RX ring may be refilled
58 * at once, the weight is chosen so that the EWMA will be insensitive to short- 62 * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
582 struct receive_queue *rq, 586 struct receive_queue *rq,
583 void *buf, void *ctx, 587 void *buf, void *ctx,
584 unsigned int len, 588 unsigned int len,
585 bool *xdp_xmit) 589 unsigned int *xdp_xmit)
586{ 590{
587 struct sk_buff *skb; 591 struct sk_buff *skb;
588 struct bpf_prog *xdp_prog; 592 struct bpf_prog *xdp_prog;
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
654 trace_xdp_exception(vi->dev, xdp_prog, act); 658 trace_xdp_exception(vi->dev, xdp_prog, act);
655 goto err_xdp; 659 goto err_xdp;
656 } 660 }
657 *xdp_xmit = true; 661 *xdp_xmit |= VIRTIO_XDP_TX;
658 rcu_read_unlock(); 662 rcu_read_unlock();
659 goto xdp_xmit; 663 goto xdp_xmit;
660 case XDP_REDIRECT: 664 case XDP_REDIRECT:
661 err = xdp_do_redirect(dev, &xdp, xdp_prog); 665 err = xdp_do_redirect(dev, &xdp, xdp_prog);
662 if (err) 666 if (err)
663 goto err_xdp; 667 goto err_xdp;
664 *xdp_xmit = true; 668 *xdp_xmit |= VIRTIO_XDP_REDIR;
665 rcu_read_unlock(); 669 rcu_read_unlock();
666 goto xdp_xmit; 670 goto xdp_xmit;
667 default: 671 default:
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
723 void *buf, 727 void *buf,
724 void *ctx, 728 void *ctx,
725 unsigned int len, 729 unsigned int len,
726 bool *xdp_xmit) 730 unsigned int *xdp_xmit)
727{ 731{
728 struct virtio_net_hdr_mrg_rxbuf *hdr = buf; 732 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
729 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); 733 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
818 put_page(xdp_page); 822 put_page(xdp_page);
819 goto err_xdp; 823 goto err_xdp;
820 } 824 }
821 *xdp_xmit = true; 825 *xdp_xmit |= VIRTIO_XDP_TX;
822 if (unlikely(xdp_page != page)) 826 if (unlikely(xdp_page != page))
823 put_page(page); 827 put_page(page);
824 rcu_read_unlock(); 828 rcu_read_unlock();
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
830 put_page(xdp_page); 834 put_page(xdp_page);
831 goto err_xdp; 835 goto err_xdp;
832 } 836 }
833 *xdp_xmit = true; 837 *xdp_xmit |= VIRTIO_XDP_REDIR;
834 if (unlikely(xdp_page != page)) 838 if (unlikely(xdp_page != page))
835 put_page(page); 839 put_page(page);
836 rcu_read_unlock(); 840 rcu_read_unlock();
@@ -939,7 +943,8 @@ xdp_xmit:
939} 943}
940 944
941static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq, 945static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
942 void *buf, unsigned int len, void **ctx, bool *xdp_xmit) 946 void *buf, unsigned int len, void **ctx,
947 unsigned int *xdp_xmit)
943{ 948{
944 struct net_device *dev = vi->dev; 949 struct net_device *dev = vi->dev;
945 struct sk_buff *skb; 950 struct sk_buff *skb;
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
1232 } 1237 }
1233} 1238}
1234 1239
1235static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit) 1240static int virtnet_receive(struct receive_queue *rq, int budget,
1241 unsigned int *xdp_xmit)
1236{ 1242{
1237 struct virtnet_info *vi = rq->vq->vdev->priv; 1243 struct virtnet_info *vi = rq->vq->vdev->priv;
1238 unsigned int len, received = 0, bytes = 0; 1244 unsigned int len, received = 0, bytes = 0;
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1321 struct virtnet_info *vi = rq->vq->vdev->priv; 1327 struct virtnet_info *vi = rq->vq->vdev->priv;
1322 struct send_queue *sq; 1328 struct send_queue *sq;
1323 unsigned int received, qp; 1329 unsigned int received, qp;
1324 bool xdp_xmit = false; 1330 unsigned int xdp_xmit = 0;
1325 1331
1326 virtnet_poll_cleantx(rq); 1332 virtnet_poll_cleantx(rq);
1327 1333
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1331 if (received < budget) 1337 if (received < budget)
1332 virtqueue_napi_complete(napi, rq->vq, received); 1338 virtqueue_napi_complete(napi, rq->vq, received);
1333 1339
1334 if (xdp_xmit) { 1340 if (xdp_xmit & VIRTIO_XDP_REDIR)
1341 xdp_do_flush_map();
1342
1343 if (xdp_xmit & VIRTIO_XDP_TX) {
1335 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + 1344 qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
1336 smp_processor_id(); 1345 smp_processor_id();
1337 sq = &vi->sq[qp]; 1346 sq = &vi->sq[qp];
1338 virtqueue_kick(sq->vq); 1347 virtqueue_kick(sq->vq);
1339 xdp_do_flush_map();
1340 } 1348 }
1341 1349
1342 return received; 1350 return received;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index aee0e60471f1..e857cb3335f6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
623 flush = 0; 623 flush = 0;
624 624
625out: 625out:
626 skb_gro_remcsum_cleanup(skb, &grc); 626 skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
627 skb->remcsum_offload = 0;
628 NAPI_GRO_CB(skb)->flush |= flush;
629 627
630 return pp; 628 return pp;
631} 629}
@@ -638,9 +636,62 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
638 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); 636 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
639} 637}
640 638
641/* Add new entry to forwarding table -- assumes lock held */ 639static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
640 const u8 *mac, __u16 state,
641 __be32 src_vni, __u8 ndm_flags)
642{
643 struct vxlan_fdb *f;
644
645 f = kmalloc(sizeof(*f), GFP_ATOMIC);
646 if (!f)
647 return NULL;
648 f->state = state;
649 f->flags = ndm_flags;
650 f->updated = f->used = jiffies;
651 f->vni = src_vni;
652 INIT_LIST_HEAD(&f->remotes);
653 memcpy(f->eth_addr, mac, ETH_ALEN);
654
655 return f;
656}
657
642static int vxlan_fdb_create(struct vxlan_dev *vxlan, 658static int vxlan_fdb_create(struct vxlan_dev *vxlan,
643 const u8 *mac, union vxlan_addr *ip, 659 const u8 *mac, union vxlan_addr *ip,
660 __u16 state, __be16 port, __be32 src_vni,
661 __be32 vni, __u32 ifindex, __u8 ndm_flags,
662 struct vxlan_fdb **fdb)
663{
664 struct vxlan_rdst *rd = NULL;
665 struct vxlan_fdb *f;
666 int rc;
667
668 if (vxlan->cfg.addrmax &&
669 vxlan->addrcnt >= vxlan->cfg.addrmax)
670 return -ENOSPC;
671
672 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
673 f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
674 if (!f)
675 return -ENOMEM;
676
677 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
678 if (rc < 0) {
679 kfree(f);
680 return rc;
681 }
682
683 ++vxlan->addrcnt;
684 hlist_add_head_rcu(&f->hlist,
685 vxlan_fdb_head(vxlan, mac, src_vni));
686
687 *fdb = f;
688
689 return 0;
690}
691
692/* Add new entry to forwarding table -- assumes lock held */
693static int vxlan_fdb_update(struct vxlan_dev *vxlan,
694 const u8 *mac, union vxlan_addr *ip,
644 __u16 state, __u16 flags, 695 __u16 state, __u16 flags,
645 __be16 port, __be32 src_vni, __be32 vni, 696 __be16 port, __be32 src_vni, __be32 vni,
646 __u32 ifindex, __u8 ndm_flags) 697 __u32 ifindex, __u8 ndm_flags)
@@ -689,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
689 if (!(flags & NLM_F_CREATE)) 740 if (!(flags & NLM_F_CREATE))
690 return -ENOENT; 741 return -ENOENT;
691 742
692 if (vxlan->cfg.addrmax &&
693 vxlan->addrcnt >= vxlan->cfg.addrmax)
694 return -ENOSPC;
695
696 /* Disallow replace to add a multicast entry */ 743 /* Disallow replace to add a multicast entry */
697 if ((flags & NLM_F_REPLACE) && 744 if ((flags & NLM_F_REPLACE) &&
698 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) 745 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
699 return -EOPNOTSUPP; 746 return -EOPNOTSUPP;
700 747
701 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); 748 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
702 f = kmalloc(sizeof(*f), GFP_ATOMIC); 749 rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
703 if (!f) 750 vni, ifindex, ndm_flags, &f);
704 return -ENOMEM; 751 if (rc < 0)
705
706 notify = 1;
707 f->state = state;
708 f->flags = ndm_flags;
709 f->updated = f->used = jiffies;
710 f->vni = src_vni;
711 INIT_LIST_HEAD(&f->remotes);
712 memcpy(f->eth_addr, mac, ETH_ALEN);
713
714 rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
715 if (rc < 0) {
716 kfree(f);
717 return rc; 752 return rc;
718 } 753 notify = 1;
719
720 ++vxlan->addrcnt;
721 hlist_add_head_rcu(&f->hlist,
722 vxlan_fdb_head(vxlan, mac, src_vni));
723 } 754 }
724 755
725 if (notify) { 756 if (notify) {
@@ -743,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
743 kfree(f); 774 kfree(f);
744} 775}
745 776
746static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) 777static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
778 bool do_notify)
747{ 779{
748 netdev_dbg(vxlan->dev, 780 netdev_dbg(vxlan->dev,
749 "delete %pM\n", f->eth_addr); 781 "delete %pM\n", f->eth_addr);
750 782
751 --vxlan->addrcnt; 783 --vxlan->addrcnt;
752 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); 784 if (do_notify)
785 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
753 786
754 hlist_del_rcu(&f->hlist); 787 hlist_del_rcu(&f->hlist);
755 call_rcu(&f->rcu, vxlan_fdb_free); 788 call_rcu(&f->rcu, vxlan_fdb_free);
@@ -865,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
865 return -EAFNOSUPPORT; 898 return -EAFNOSUPPORT;
866 899
867 spin_lock_bh(&vxlan->hash_lock); 900 spin_lock_bh(&vxlan->hash_lock);
868 err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, 901 err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
869 port, src_vni, vni, ifindex, ndm->ndm_flags); 902 port, src_vni, vni, ifindex, ndm->ndm_flags);
870 spin_unlock_bh(&vxlan->hash_lock); 903 spin_unlock_bh(&vxlan->hash_lock);
871 904
@@ -899,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
899 goto out; 932 goto out;
900 } 933 }
901 934
902 vxlan_fdb_destroy(vxlan, f); 935 vxlan_fdb_destroy(vxlan, f, true);
903 936
904out: 937out:
905 return 0; 938 return 0;
@@ -1008,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev,
1008 1041
1009 /* close off race between vxlan_flush and incoming packets */ 1042 /* close off race between vxlan_flush and incoming packets */
1010 if (netif_running(dev)) 1043 if (netif_running(dev))
1011 vxlan_fdb_create(vxlan, src_mac, src_ip, 1044 vxlan_fdb_update(vxlan, src_mac, src_ip,
1012 NUD_REACHABLE, 1045 NUD_REACHABLE,
1013 NLM_F_EXCL|NLM_F_CREATE, 1046 NLM_F_EXCL|NLM_F_CREATE,
1014 vxlan->cfg.dst_port, 1047 vxlan->cfg.dst_port,
@@ -2366,7 +2399,7 @@ static void vxlan_cleanup(struct timer_list *t)
2366 "garbage collect %pM\n", 2399 "garbage collect %pM\n",
2367 f->eth_addr); 2400 f->eth_addr);
2368 f->state = NUD_STALE; 2401 f->state = NUD_STALE;
2369 vxlan_fdb_destroy(vxlan, f); 2402 vxlan_fdb_destroy(vxlan, f, true);
2370 } else if (time_before(timeout, next_timer)) 2403 } else if (time_before(timeout, next_timer))
2371 next_timer = timeout; 2404 next_timer = timeout;
2372 } 2405 }
@@ -2417,7 +2450,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
2417 spin_lock_bh(&vxlan->hash_lock); 2450 spin_lock_bh(&vxlan->hash_lock);
2418 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); 2451 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
2419 if (f) 2452 if (f)
2420 vxlan_fdb_destroy(vxlan, f); 2453 vxlan_fdb_destroy(vxlan, f, true);
2421 spin_unlock_bh(&vxlan->hash_lock); 2454 spin_unlock_bh(&vxlan->hash_lock);
2422} 2455}
2423 2456
@@ -2471,7 +2504,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2471 continue; 2504 continue;
2472 /* the all_zeros_mac entry is deleted at vxlan_uninit */ 2505 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2473 if (!is_zero_ether_addr(f->eth_addr)) 2506 if (!is_zero_ether_addr(f->eth_addr))
2474 vxlan_fdb_destroy(vxlan, f); 2507 vxlan_fdb_destroy(vxlan, f, true);
2475 } 2508 }
2476 } 2509 }
2477 spin_unlock_bh(&vxlan->hash_lock); 2510 spin_unlock_bh(&vxlan->hash_lock);
@@ -3162,6 +3195,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3162{ 3195{
3163 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 3196 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
3164 struct vxlan_dev *vxlan = netdev_priv(dev); 3197 struct vxlan_dev *vxlan = netdev_priv(dev);
3198 struct vxlan_fdb *f = NULL;
3165 int err; 3199 int err;
3166 3200
3167 err = vxlan_dev_configure(net, dev, conf, false, extack); 3201 err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3175,24 +3209,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
3175 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3209 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3176 &vxlan->default_dst.remote_ip, 3210 &vxlan->default_dst.remote_ip,
3177 NUD_REACHABLE | NUD_PERMANENT, 3211 NUD_REACHABLE | NUD_PERMANENT,
3178 NLM_F_EXCL | NLM_F_CREATE,
3179 vxlan->cfg.dst_port, 3212 vxlan->cfg.dst_port,
3180 vxlan->default_dst.remote_vni, 3213 vxlan->default_dst.remote_vni,
3181 vxlan->default_dst.remote_vni, 3214 vxlan->default_dst.remote_vni,
3182 vxlan->default_dst.remote_ifindex, 3215 vxlan->default_dst.remote_ifindex,
3183 NTF_SELF); 3216 NTF_SELF, &f);
3184 if (err) 3217 if (err)
3185 return err; 3218 return err;
3186 } 3219 }
3187 3220
3188 err = register_netdevice(dev); 3221 err = register_netdevice(dev);
3222 if (err)
3223 goto errout;
3224
3225 err = rtnl_configure_link(dev, NULL);
3189 if (err) { 3226 if (err) {
3190 vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); 3227 unregister_netdevice(dev);
3191 return err; 3228 goto errout;
3192 } 3229 }
3193 3230
3231 /* notify default fdb entry */
3232 if (f)
3233 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
3234
3194 list_add(&vxlan->next, &vn->vxlan_list); 3235 list_add(&vxlan->next, &vn->vxlan_list);
3195 return 0; 3236 return 0;
3237errout:
3238 if (f)
3239 vxlan_fdb_destroy(vxlan, f, false);
3240 return err;
3196} 3241}
3197 3242
3198static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], 3243static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
@@ -3427,6 +3472,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3427 struct vxlan_rdst *dst = &vxlan->default_dst; 3472 struct vxlan_rdst *dst = &vxlan->default_dst;
3428 struct vxlan_rdst old_dst; 3473 struct vxlan_rdst old_dst;
3429 struct vxlan_config conf; 3474 struct vxlan_config conf;
3475 struct vxlan_fdb *f = NULL;
3430 int err; 3476 int err;
3431 3477
3432 err = vxlan_nl2conf(tb, data, 3478 err = vxlan_nl2conf(tb, data,
@@ -3455,16 +3501,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
3455 err = vxlan_fdb_create(vxlan, all_zeros_mac, 3501 err = vxlan_fdb_create(vxlan, all_zeros_mac,
3456 &dst->remote_ip, 3502 &dst->remote_ip,
3457 NUD_REACHABLE | NUD_PERMANENT, 3503 NUD_REACHABLE | NUD_PERMANENT,
3458 NLM_F_CREATE | NLM_F_APPEND,
3459 vxlan->cfg.dst_port, 3504 vxlan->cfg.dst_port,
3460 dst->remote_vni, 3505 dst->remote_vni,
3461 dst->remote_vni, 3506 dst->remote_vni,
3462 dst->remote_ifindex, 3507 dst->remote_ifindex,
3463 NTF_SELF); 3508 NTF_SELF, &f);
3464 if (err) { 3509 if (err) {
3465 spin_unlock_bh(&vxlan->hash_lock); 3510 spin_unlock_bh(&vxlan->hash_lock);
3466 return err; 3511 return err;
3467 } 3512 }
3513 vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
3468 } 3514 }
3469 spin_unlock_bh(&vxlan->hash_lock); 3515 spin_unlock_bh(&vxlan->hash_lock);
3470 } 3516 }
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e9c2fb318c03..836e0a47b94a 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6058,8 +6058,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
6058 ath10k_mac_max_vht_nss(vht_mcs_mask))); 6058 ath10k_mac_max_vht_nss(vht_mcs_mask)));
6059 6059
6060 if (changed & IEEE80211_RC_BW_CHANGED) { 6060 if (changed & IEEE80211_RC_BW_CHANGED) {
6061 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", 6061 enum wmi_phy_mode mode;
6062 sta->addr, bw); 6062
6063 mode = chan_to_phymode(&def);
6064 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
6065 sta->addr, bw, mode);
6066
6067 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6068 WMI_PEER_PHYMODE, mode);
6069 if (err) {
6070 ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
6071 sta->addr, mode, err);
6072 goto exit;
6073 }
6063 6074
6064 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6075 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6065 WMI_PEER_CHAN_WIDTH, bw); 6076 WMI_PEER_CHAN_WIDTH, bw);
@@ -6100,6 +6111,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
6100 sta->addr); 6111 sta->addr);
6101 } 6112 }
6102 6113
6114exit:
6103 mutex_unlock(&ar->conf_mutex); 6115 mutex_unlock(&ar->conf_mutex);
6104} 6116}
6105 6117
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index b48db54e9865..d68afb65402a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -6144,6 +6144,7 @@ enum wmi_peer_param {
6144 WMI_PEER_NSS = 0x5, 6144 WMI_PEER_NSS = 0x5,
6145 WMI_PEER_USE_4ADDR = 0x6, 6145 WMI_PEER_USE_4ADDR = 0x6,
6146 WMI_PEER_DEBUG = 0xa, 6146 WMI_PEER_DEBUG = 0xa,
6147 WMI_PEER_PHYMODE = 0xd,
6147 WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */ 6148 WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
6148}; 6149};
6149 6150
diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c
index 1279064a3b71..51a038022c8b 100644
--- a/drivers/net/wireless/ath/wcn36xx/testmode.c
+++ b/drivers/net/wireless/ath/wcn36xx/testmode.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 2 * Copyright (c) 2018, The Linux Foundation. All rights reserved.
3 * 3 *
4 * Permission to use, copy, modify, and/or distribute this software for any 4 * Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index c99a191e8d69..a907d7b065fa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4296,6 +4296,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
4296 brcmf_dbg(TRACE, "Enter\n"); 4296 brcmf_dbg(TRACE, "Enter\n");
4297 4297
4298 if (bus) { 4298 if (bus) {
4299 /* Stop watchdog task */
4300 if (bus->watchdog_tsk) {
4301 send_sig(SIGTERM, bus->watchdog_tsk, 1);
4302 kthread_stop(bus->watchdog_tsk);
4303 bus->watchdog_tsk = NULL;
4304 }
4305
4299 /* De-register interrupt handler */ 4306 /* De-register interrupt handler */
4300 brcmf_sdiod_intr_unregister(bus->sdiodev); 4307 brcmf_sdiod_intr_unregister(bus->sdiodev);
4301 4308
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 6e3cf9817730..88f4c89f89ba 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -644,11 +644,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
644 MWIFIEX_FUNC_SHUTDOWN); 644 MWIFIEX_FUNC_SHUTDOWN);
645 } 645 }
646 646
647 if (adapter->workqueue)
648 flush_workqueue(adapter->workqueue);
649
650 mwifiex_usb_free(card);
651
652 mwifiex_dbg(adapter, FATAL, 647 mwifiex_dbg(adapter, FATAL,
653 "%s: removing card\n", __func__); 648 "%s: removing card\n", __func__);
654 mwifiex_remove_card(adapter); 649 mwifiex_remove_card(adapter);
@@ -1356,6 +1351,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
1356{ 1351{
1357 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card; 1352 struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
1358 1353
1354 mwifiex_usb_free(card);
1355
1359 mwifiex_usb_cleanup_tx_aggr(adapter); 1356 mwifiex_usb_cleanup_tx_aggr(adapter);
1360 1357
1361 card->adapter = NULL; 1358 card->adapter = NULL;
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 9d2f9a776ef1..b804abd464ae 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -986,13 +986,15 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev)
986 */ 986 */
987 spin_lock_bh(&dev->con_mon_lock); 987 spin_lock_bh(&dev->con_mon_lock);
988 avg_rssi = ewma_rssi_read(&dev->avg_rssi); 988 avg_rssi = ewma_rssi_read(&dev->avg_rssi);
989 WARN_ON_ONCE(avg_rssi == 0); 989 spin_unlock_bh(&dev->con_mon_lock);
990 if (avg_rssi == 0)
991 return;
992
990 avg_rssi = -avg_rssi; 993 avg_rssi = -avg_rssi;
991 if (avg_rssi <= -70) 994 if (avg_rssi <= -70)
992 val -= 0x20; 995 val -= 0x20;
993 else if (avg_rssi <= -60) 996 else if (avg_rssi <= -60)
994 val -= 0x10; 997 val -= 0x10;
995 spin_unlock_bh(&dev->con_mon_lock);
996 998
997 if (val != mt7601u_bbp_rr(dev, 66)) 999 if (val != mt7601u_bbp_rr(dev, 66))
998 mt7601u_bbp_wr(dev, 66, val); 1000 mt7601u_bbp_wr(dev, 66, val);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 220e2b710208..ae0ca8006849 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -654,8 +654,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
654 vif = qtnf_mac_get_base_vif(mac); 654 vif = qtnf_mac_get_base_vif(mac);
655 if (!vif) { 655 if (!vif) {
656 pr_err("MAC%u: primary VIF is not configured\n", mac->macid); 656 pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
657 ret = -EFAULT; 657 return -EFAULT;
658 goto out;
659 } 658 }
660 659
661 if (vif->wdev.iftype != NL80211_IFTYPE_STATION) { 660 if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 39c817eddd78..54c9f6ab0c8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -484,18 +484,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
484 484
485} 485}
486 486
487void rtl_deinit_deferred_work(struct ieee80211_hw *hw) 487void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
488{ 488{
489 struct rtl_priv *rtlpriv = rtl_priv(hw); 489 struct rtl_priv *rtlpriv = rtl_priv(hw);
490 490
491 del_timer_sync(&rtlpriv->works.watchdog_timer); 491 del_timer_sync(&rtlpriv->works.watchdog_timer);
492 492
493 cancel_delayed_work(&rtlpriv->works.watchdog_wq); 493 cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq);
494 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); 494 if (ips_wq)
495 cancel_delayed_work(&rtlpriv->works.ps_work); 495 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
496 cancel_delayed_work(&rtlpriv->works.ps_rfon_wq); 496 else
497 cancel_delayed_work(&rtlpriv->works.fwevt_wq); 497 cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
498 cancel_delayed_work(&rtlpriv->works.c2hcmd_wq); 498 cancel_delayed_work_sync(&rtlpriv->works.ps_work);
499 cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq);
500 cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq);
501 cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq);
499} 502}
500EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work); 503EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
501 504
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index 912f205779c3..a7ae40eaa3cd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
121void rtl_deinit_rfkill(struct ieee80211_hw *hw); 121void rtl_deinit_rfkill(struct ieee80211_hw *hw);
122 122
123void rtl_watch_dog_timer_callback(struct timer_list *t); 123void rtl_watch_dog_timer_callback(struct timer_list *t);
124void rtl_deinit_deferred_work(struct ieee80211_hw *hw); 124void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq);
125 125
126bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx); 126bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
127int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht, 127int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index cfea57efa7f4..4bf7967590ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -130,7 +130,6 @@ found_alt:
130 firmware->size); 130 firmware->size);
131 rtlpriv->rtlhal.wowlan_fwsize = firmware->size; 131 rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
132 } 132 }
133 rtlpriv->rtlhal.fwsize = firmware->size;
134 release_firmware(firmware); 133 release_firmware(firmware);
135} 134}
136 135
@@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
196 /* reset sec info */ 195 /* reset sec info */
197 rtl_cam_reset_sec_info(hw); 196 rtl_cam_reset_sec_info(hw);
198 197
199 rtl_deinit_deferred_work(hw); 198 rtl_deinit_deferred_work(hw, false);
200 } 199 }
201 rtlpriv->intf_ops->adapter_stop(hw); 200 rtlpriv->intf_ops->adapter_stop(hw);
202 201
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index ae13bcfb3bf0..5d1fda16fc8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2377,7 +2377,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
2377 ieee80211_unregister_hw(hw); 2377 ieee80211_unregister_hw(hw);
2378 rtlmac->mac80211_registered = 0; 2378 rtlmac->mac80211_registered = 0;
2379 } else { 2379 } else {
2380 rtl_deinit_deferred_work(hw); 2380 rtl_deinit_deferred_work(hw, false);
2381 rtlpriv->intf_ops->adapter_stop(hw); 2381 rtlpriv->intf_ops->adapter_stop(hw);
2382 } 2382 }
2383 rtlpriv->cfg->ops->disable_interrupt(hw); 2383 rtlpriv->cfg->ops->disable_interrupt(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 71af24e2e051..479a4cfc245d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -71,7 +71,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
71 struct rtl_priv *rtlpriv = rtl_priv(hw); 71 struct rtl_priv *rtlpriv = rtl_priv(hw);
72 72
73 /*<1> Stop all timer */ 73 /*<1> Stop all timer */
74 rtl_deinit_deferred_work(hw); 74 rtl_deinit_deferred_work(hw, true);
75 75
76 /*<2> Disable Interrupt */ 76 /*<2> Disable Interrupt */
77 rtlpriv->cfg->ops->disable_interrupt(hw); 77 rtlpriv->cfg->ops->disable_interrupt(hw);
@@ -292,7 +292,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
292 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); 292 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
293 enum rf_pwrstate rtstate; 293 enum rf_pwrstate rtstate;
294 294
295 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq); 295 cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
296 296
297 mutex_lock(&rtlpriv->locks.ips_mutex); 297 mutex_lock(&rtlpriv->locks.ips_mutex);
298 if (ppsc->inactiveps) { 298 if (ppsc->inactiveps) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index f9faffc498bc..2ac5004d7a40 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1132,7 +1132,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
1132 ieee80211_unregister_hw(hw); 1132 ieee80211_unregister_hw(hw);
1133 rtlmac->mac80211_registered = 0; 1133 rtlmac->mac80211_registered = 0;
1134 } else { 1134 } else {
1135 rtl_deinit_deferred_work(hw); 1135 rtl_deinit_deferred_work(hw, false);
1136 rtlpriv->intf_ops->adapter_stop(hw); 1136 rtlpriv->intf_ops->adapter_stop(hw);
1137 } 1137 }
1138 /*deinit rfkill */ 1138 /*deinit rfkill */
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 2e96b34bc936..fb667bf469c7 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
278 return -EIO; 278 return -EIO;
279 if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0) 279 if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
280 return -EIO; 280 return -EIO;
281 return 0;
281 } 282 }
282 283
283 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) { 284 if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 46df030b2c3f..bf65501e6ed6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
100static void nvme_ns_remove(struct nvme_ns *ns); 100static void nvme_ns_remove(struct nvme_ns *ns);
101static int nvme_revalidate_disk(struct gendisk *disk); 101static int nvme_revalidate_disk(struct gendisk *disk);
102static void nvme_put_subsystem(struct nvme_subsystem *subsys); 102static void nvme_put_subsystem(struct nvme_subsystem *subsys);
103static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
104 unsigned nsid);
105
106static void nvme_set_queue_dying(struct nvme_ns *ns)
107{
108 /*
109 * Revalidating a dead namespace sets capacity to 0. This will end
110 * buffered writers dirtying pages that can't be synced.
111 */
112 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
113 return;
114 revalidate_disk(ns->disk);
115 blk_set_queue_dying(ns->queue);
116 /* Forcibly unquiesce queues to avoid blocking dispatch */
117 blk_mq_unquiesce_queue(ns->queue);
118}
103 119
104static void nvme_queue_scan(struct nvme_ctrl *ctrl) 120static void nvme_queue_scan(struct nvme_ctrl *ctrl)
105{ 121{
@@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count);
1044 1060
1045static void nvme_enable_aen(struct nvme_ctrl *ctrl) 1061static void nvme_enable_aen(struct nvme_ctrl *ctrl)
1046{ 1062{
1047 u32 result; 1063 u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
1048 int status; 1064 int status;
1049 1065
1050 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, 1066 if (!supported_aens)
1051 ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result); 1067 return;
1068
1069 status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
1070 NULL, 0, &result);
1052 if (status) 1071 if (status)
1053 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n", 1072 dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
1054 ctrl->oaes & NVME_AEN_SUPPORTED); 1073 supported_aens);
1055} 1074}
1056 1075
1057static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) 1076static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
@@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1151 1170
1152static void nvme_update_formats(struct nvme_ctrl *ctrl) 1171static void nvme_update_formats(struct nvme_ctrl *ctrl)
1153{ 1172{
1154 struct nvme_ns *ns, *next; 1173 struct nvme_ns *ns;
1155 LIST_HEAD(rm_list);
1156 1174
1157 down_write(&ctrl->namespaces_rwsem); 1175 down_read(&ctrl->namespaces_rwsem);
1158 list_for_each_entry(ns, &ctrl->namespaces, list) { 1176 list_for_each_entry(ns, &ctrl->namespaces, list)
1159 if (ns->disk && nvme_revalidate_disk(ns->disk)) { 1177 if (ns->disk && nvme_revalidate_disk(ns->disk))
1160 list_move_tail(&ns->list, &rm_list); 1178 nvme_set_queue_dying(ns);
1161 } 1179 up_read(&ctrl->namespaces_rwsem);
1162 }
1163 up_write(&ctrl->namespaces_rwsem);
1164 1180
1165 list_for_each_entry_safe(ns, next, &rm_list, list) 1181 nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
1166 nvme_ns_remove(ns);
1167} 1182}
1168 1183
1169static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) 1184static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
1218 effects = nvme_passthru_start(ctrl, ns, cmd.opcode); 1233 effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
1219 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c, 1234 status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
1220 (void __user *)(uintptr_t)cmd.addr, cmd.data_len, 1235 (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
1221 (void __user *)(uintptr_t)cmd.metadata, cmd.metadata, 1236 (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
1222 0, &cmd.result, timeout); 1237 0, &cmd.result, timeout);
1223 nvme_passthru_end(ctrl, effects); 1238 nvme_passthru_end(ctrl, effects);
1224 1239
@@ -3138,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
3138 3153
3139 down_write(&ctrl->namespaces_rwsem); 3154 down_write(&ctrl->namespaces_rwsem);
3140 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) { 3155 list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
3141 if (ns->head->ns_id > nsid) 3156 if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
3142 list_move_tail(&ns->list, &rm_list); 3157 list_move_tail(&ns->list, &rm_list);
3143 } 3158 }
3144 up_write(&ctrl->namespaces_rwsem); 3159 up_write(&ctrl->namespaces_rwsem);
@@ -3542,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
3542 if (ctrl->admin_q) 3557 if (ctrl->admin_q)
3543 blk_mq_unquiesce_queue(ctrl->admin_q); 3558 blk_mq_unquiesce_queue(ctrl->admin_q);
3544 3559
3545 list_for_each_entry(ns, &ctrl->namespaces, list) { 3560 list_for_each_entry(ns, &ctrl->namespaces, list)
3546 /* 3561 nvme_set_queue_dying(ns);
3547 * Revalidating a dead namespace sets capacity to 0. This will
3548 * end buffered writers dirtying pages that can't be synced.
3549 */
3550 if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
3551 continue;
3552 revalidate_disk(ns->disk);
3553 blk_set_queue_dying(ns->queue);
3554 3562
3555 /* Forcibly unquiesce queues to avoid blocking dispatch */
3556 blk_mq_unquiesce_queue(ns->queue);
3557 }
3558 up_read(&ctrl->namespaces_rwsem); 3563 up_read(&ctrl->namespaces_rwsem);
3559} 3564}
3560EXPORT_SYMBOL_GPL(nvme_kill_queues); 3565EXPORT_SYMBOL_GPL(nvme_kill_queues);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 903eb4545e26..f7efe5a58cc7 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
539/* 539/*
540 * For something we're not in a state to send to the device the default action 540 * For something we're not in a state to send to the device the default action
541 * is to busy it and retry it after the controller state is recovered. However, 541 * is to busy it and retry it after the controller state is recovered. However,
542 * anything marked for failfast or nvme multipath is immediately failed. 542 * if the controller is deleting or if anything is marked for failfast or
543 * nvme multipath it is immediately failed.
543 * 544 *
544 * Note: commands used to initialize the controller will be marked for failfast. 545 * Note: commands used to initialize the controller will be marked for failfast.
545 * Note: nvme cli/ioctl commands are marked for failfast. 546 * Note: nvme cli/ioctl commands are marked for failfast.
546 */ 547 */
547blk_status_t nvmf_fail_nonready_command(struct request *rq) 548blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
549 struct request *rq)
548{ 550{
549 if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 551 if (ctrl->state != NVME_CTRL_DELETING &&
552 ctrl->state != NVME_CTRL_DEAD &&
553 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
550 return BLK_STS_RESOURCE; 554 return BLK_STS_RESOURCE;
551 nvme_req(rq)->status = NVME_SC_ABORT_REQ; 555 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
552 return BLK_STS_IOERR; 556 return BLK_STS_IOERR;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index e1818a27aa2d..aa2fdb2a2e8f 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
162void nvmf_free_options(struct nvmf_ctrl_options *opts); 162void nvmf_free_options(struct nvmf_ctrl_options *opts);
163int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); 163int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
164bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); 164bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
165blk_status_t nvmf_fail_nonready_command(struct request *rq); 165blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
166 struct request *rq);
166bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 167bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
167 bool queue_live); 168 bool queue_live);
168 169
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 41d45a1b5c62..9bac912173ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2272 2272
2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2275 return nvmf_fail_nonready_command(rq); 2275 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2276 2276
2277 ret = nvme_setup_cmd(ns, rq, sqe); 2277 ret = nvme_setup_cmd(ns, rq, sqe);
2278 if (ret) 2278 if (ret)
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ba943f211687..ddd441b1516a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2556,11 +2556,6 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2556 2556
2557 quirks |= check_vendor_combination_bug(pdev); 2557 quirks |= check_vendor_combination_bug(pdev);
2558 2558
2559 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2560 quirks);
2561 if (result)
2562 goto release_pools;
2563
2564 /* 2559 /*
2565 * Double check that our mempool alloc size will cover the biggest 2560 * Double check that our mempool alloc size will cover the biggest
2566 * command we support. 2561 * command we support.
@@ -2578,6 +2573,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2578 goto release_pools; 2573 goto release_pools;
2579 } 2574 }
2580 2575
2576 result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2577 quirks);
2578 if (result)
2579 goto release_mempool;
2580
2581 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev)); 2581 dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
2582 2582
2583 nvme_get_ctrl(&dev->ctrl); 2583 nvme_get_ctrl(&dev->ctrl);
@@ -2585,6 +2585,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2585 2585
2586 return 0; 2586 return 0;
2587 2587
2588 release_mempool:
2589 mempool_destroy(dev->iod_mempool);
2588 release_pools: 2590 release_pools:
2589 nvme_release_prp_pools(dev); 2591 nvme_release_prp_pools(dev);
2590 unmap: 2592 unmap:
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 518c5b09038c..66ec5985c9f3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1639,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1639 WARN_ON_ONCE(rq->tag < 0); 1639 WARN_ON_ONCE(rq->tag < 0);
1640 1640
1641 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 1641 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
1642 return nvmf_fail_nonready_command(rq); 1642 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
1643 1643
1644 dev = queue->device->dev; 1644 dev = queue->device->dev;
1645 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1645 ib_dma_sync_single_for_cpu(dev, sqe->dma,
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d3f3b3ec4d1a..ebea1373d1b7 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
282{ 282{
283 struct nvmet_ns *ns = to_nvmet_ns(item); 283 struct nvmet_ns *ns = to_nvmet_ns(item);
284 struct nvmet_subsys *subsys = ns->subsys; 284 struct nvmet_subsys *subsys = ns->subsys;
285 size_t len;
285 int ret; 286 int ret;
286 287
287 mutex_lock(&subsys->lock); 288 mutex_lock(&subsys->lock);
@@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
289 if (ns->enabled) 290 if (ns->enabled)
290 goto out_unlock; 291 goto out_unlock;
291 292
292 kfree(ns->device_path); 293 ret = -EINVAL;
294 len = strcspn(page, "\n");
295 if (!len)
296 goto out_unlock;
293 297
298 kfree(ns->device_path);
294 ret = -ENOMEM; 299 ret = -ENOMEM;
295 ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL); 300 ns->device_path = kstrndup(page, len, GFP_KERNEL);
296 if (!ns->device_path) 301 if (!ns->device_path)
297 goto out_unlock; 302 goto out_unlock;
298 303
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 74d4b785d2da..9838103f2d62 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
339 goto out_unlock; 339 goto out_unlock;
340 340
341 ret = nvmet_bdev_ns_enable(ns); 341 ret = nvmet_bdev_ns_enable(ns);
342 if (ret) 342 if (ret == -ENOTBLK)
343 ret = nvmet_file_ns_enable(ns); 343 ret = nvmet_file_ns_enable(ns);
344 if (ret) 344 if (ret)
345 goto out_unlock; 345 goto out_unlock;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 408279cb6f2c..29b4b236afd8 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
58 struct work_struct work; 58 struct work_struct work;
59} __aligned(sizeof(unsigned long long)); 59} __aligned(sizeof(unsigned long long));
60 60
61/* desired maximum for a single sequence - if sg list allows it */
61#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
63 63
64enum nvmet_fcp_datadir { 64enum nvmet_fcp_datadir {
65 NVMET_FCP_NODATA, 65 NVMET_FCP_NODATA,
@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
74 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_cmd_iu cmdiubuf;
75 struct nvme_fc_ersp_iu rspiubuf; 75 struct nvme_fc_ersp_iu rspiubuf;
76 dma_addr_t rspdma; 76 dma_addr_t rspdma;
77 struct scatterlist *next_sg;
77 struct scatterlist *data_sg; 78 struct scatterlist *data_sg;
78 int data_sg_cnt; 79 int data_sg_cnt;
79 u32 offset; 80 u32 offset;
@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1025 INIT_LIST_HEAD(&newrec->assoc_list); 1026 INIT_LIST_HEAD(&newrec->assoc_list);
1026 kref_init(&newrec->ref); 1027 kref_init(&newrec->ref);
1027 ida_init(&newrec->assoc_cnt); 1028 ida_init(&newrec->assoc_cnt);
1028 newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, 1029 newrec->max_sg_cnt = template->max_sgl_segments;
1029 template->max_sgl_segments);
1030 1030
1031 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1031 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1032 if (ret) { 1032 if (ret) {
@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1722 ((fod->io_dir == NVMET_FCP_WRITE) ? 1722 ((fod->io_dir == NVMET_FCP_WRITE) ?
1723 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 1723 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1724 /* note: write from initiator perspective */ 1724 /* note: write from initiator perspective */
1725 fod->next_sg = fod->data_sg;
1725 1726
1726 return 0; 1727 return 0;
1727 1728
@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1866 struct nvmet_fc_fcp_iod *fod, u8 op) 1867 struct nvmet_fc_fcp_iod *fod, u8 op)
1867{ 1868{
1868 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 1869 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1870 struct scatterlist *sg = fod->next_sg;
1869 unsigned long flags; 1871 unsigned long flags;
1870 u32 tlen; 1872 u32 remaininglen = fod->req.transfer_len - fod->offset;
1873 u32 tlen = 0;
1871 int ret; 1874 int ret;
1872 1875
1873 fcpreq->op = op; 1876 fcpreq->op = op;
1874 fcpreq->offset = fod->offset; 1877 fcpreq->offset = fod->offset;
1875 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 1878 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1876 1879
1877 tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, 1880 /*
1878 (fod->req.transfer_len - fod->offset)); 1881 * for next sequence:
1882 * break at a sg element boundary
1883 * attempt to keep sequence length capped at
1884 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
1885 * be longer if a single sg element is larger
1886 * than that amount. This is done to avoid creating
1887 * a new sg list to use for the tgtport api.
1888 */
1889 fcpreq->sg = sg;
1890 fcpreq->sg_cnt = 0;
1891 while (tlen < remaininglen &&
1892 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
1893 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
1894 fcpreq->sg_cnt++;
1895 tlen += sg_dma_len(sg);
1896 sg = sg_next(sg);
1897 }
1898 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
1899 fcpreq->sg_cnt++;
1900 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
1901 sg = sg_next(sg);
1902 }
1903 if (tlen < remaininglen)
1904 fod->next_sg = sg;
1905 else
1906 fod->next_sg = NULL;
1907
1879 fcpreq->transfer_length = tlen; 1908 fcpreq->transfer_length = tlen;
1880 fcpreq->transferred_length = 0; 1909 fcpreq->transferred_length = 0;
1881 fcpreq->fcp_error = 0; 1910 fcpreq->fcp_error = 0;
1882 fcpreq->rsplen = 0; 1911 fcpreq->rsplen = 0;
1883 1912
1884 fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1885 fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
1886
1887 /* 1913 /*
1888 * If the last READDATA request: check if LLDD supports 1914 * If the last READDATA request: check if LLDD supports
1889 * combined xfr with response. 1915 * combined xfr with response.
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d8d91f04bd7e..ae7586b8be07 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
162 blk_status_t ret; 162 blk_status_t ret;
163 163
164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready)) 164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
165 return nvmf_fail_nonready_command(req); 165 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
166 166
167 ret = nvme_setup_cmd(ns, req, &iod->cmd); 167 ret = nvme_setup_cmd(ns, req, &iod->cmd);
168 if (ret) 168 if (ret)
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index b5b0cdc21d01..514d1dfc5630 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -936,6 +936,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
936 return cell; 936 return cell;
937 } 937 }
938 938
939 /* NULL cell_id only allowed for device tree; invalid otherwise */
940 if (!cell_id)
941 return ERR_PTR(-EINVAL);
942
939 return nvmem_cell_get_from_list(cell_id); 943 return nvmem_cell_get_from_list(cell_id);
940} 944}
941EXPORT_SYMBOL_GPL(nvmem_cell_get); 945EXPORT_SYMBOL_GPL(nvmem_cell_get);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 848f549164cd..466e3c8582f0 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -102,7 +102,7 @@ static u32 phandle_cache_mask;
102 * - the phandle lookup overhead reduction provided by the cache 102 * - the phandle lookup overhead reduction provided by the cache
103 * will likely be less 103 * will likely be less
104 */ 104 */
105static void of_populate_phandle_cache(void) 105void of_populate_phandle_cache(void)
106{ 106{
107 unsigned long flags; 107 unsigned long flags;
108 u32 cache_entries; 108 u32 cache_entries;
@@ -134,8 +134,7 @@ out:
134 raw_spin_unlock_irqrestore(&devtree_lock, flags); 134 raw_spin_unlock_irqrestore(&devtree_lock, flags);
135} 135}
136 136
137#ifndef CONFIG_MODULES 137int of_free_phandle_cache(void)
138static int __init of_free_phandle_cache(void)
139{ 138{
140 unsigned long flags; 139 unsigned long flags;
141 140
@@ -148,6 +147,7 @@ static int __init of_free_phandle_cache(void)
148 147
149 return 0; 148 return 0;
150} 149}
150#if !defined(CONFIG_MODULES)
151late_initcall_sync(of_free_phandle_cache); 151late_initcall_sync(of_free_phandle_cache);
152#endif 152#endif
153 153
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 891d780c076a..216175d11d3d 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -79,6 +79,8 @@ int of_resolve_phandles(struct device_node *tree);
79#if defined(CONFIG_OF_OVERLAY) 79#if defined(CONFIG_OF_OVERLAY)
80void of_overlay_mutex_lock(void); 80void of_overlay_mutex_lock(void);
81void of_overlay_mutex_unlock(void); 81void of_overlay_mutex_unlock(void);
82int of_free_phandle_cache(void);
83void of_populate_phandle_cache(void);
82#else 84#else
83static inline void of_overlay_mutex_lock(void) {}; 85static inline void of_overlay_mutex_lock(void) {};
84static inline void of_overlay_mutex_unlock(void) {}; 86static inline void of_overlay_mutex_unlock(void) {};
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 7baa53e5b1d7..eda57ef12fd0 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -804,6 +804,8 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
804 goto err_free_overlay_changeset; 804 goto err_free_overlay_changeset;
805 } 805 }
806 806
807 of_populate_phandle_cache();
808
807 ret = __of_changeset_apply_notify(&ovcs->cset); 809 ret = __of_changeset_apply_notify(&ovcs->cset);
808 if (ret) 810 if (ret)
809 pr_err("overlay changeset entry notify error %d\n", ret); 811 pr_err("overlay changeset entry notify error %d\n", ret);
@@ -1046,8 +1048,17 @@ int of_overlay_remove(int *ovcs_id)
1046 1048
1047 list_del(&ovcs->ovcs_list); 1049 list_del(&ovcs->ovcs_list);
1048 1050
1051 /*
1052 * Disable phandle cache. Avoids race condition that would arise
1053 * from removing cache entry when the associated node is deleted.
1054 */
1055 of_free_phandle_cache();
1056
1049 ret_apply = 0; 1057 ret_apply = 0;
1050 ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply); 1058 ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
1059
1060 of_populate_phandle_cache();
1061
1051 if (ret) { 1062 if (ret) {
1052 if (ret_apply) 1063 if (ret_apply)
1053 devicetree_state_flags |= DTSF_REVERT_FAIL; 1064 devicetree_state_flags |= DTSF_REVERT_FAIL;
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 16f52c626b4b..91b0194240a5 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -58,7 +58,6 @@ config PCIE_DW_PLAT_HOST
58 depends on PCI && PCI_MSI_IRQ_DOMAIN 58 depends on PCI && PCI_MSI_IRQ_DOMAIN
59 select PCIE_DW_HOST 59 select PCIE_DW_HOST
60 select PCIE_DW_PLAT 60 select PCIE_DW_PLAT
61 default y
62 help 61 help
63 Enables support for the PCIe controller in the Designware IP to 62 Enables support for the PCIe controller in the Designware IP to
64 work in host mode. There are two instances of PCIe controller in 63 work in host mode. There are two instances of PCIe controller in
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 781aa03aeede..29a05759a294 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -363,7 +363,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
363 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { 363 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
364 switch (resource_type(win->res)) { 364 switch (resource_type(win->res)) {
365 case IORESOURCE_IO: 365 case IORESOURCE_IO:
366 ret = pci_remap_iospace(win->res, pp->io_base); 366 ret = devm_pci_remap_iospace(dev, win->res,
367 pp->io_base);
367 if (ret) { 368 if (ret) {
368 dev_warn(dev, "Error %d: failed to map resource %pR\n", 369 dev_warn(dev, "Error %d: failed to map resource %pR\n",
369 ret, win->res); 370 ret, win->res);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index d3172d5d3d35..0fae816fba39 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -849,7 +849,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
849 0, 0xF8000000, 0, 849 0, 0xF8000000, 0,
850 lower_32_bits(res->start), 850 lower_32_bits(res->start),
851 OB_PCIE_IO); 851 OB_PCIE_IO);
852 err = pci_remap_iospace(res, iobase); 852 err = devm_pci_remap_iospace(dev, res, iobase);
853 if (err) { 853 if (err) {
854 dev_warn(dev, "error %d: failed to map resource %pR\n", 854 dev_warn(dev, "error %d: failed to map resource %pR\n",
855 err, res); 855 err, res);
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index a1ebe9ed441f..bf5ece5d9291 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -355,11 +355,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
355 irq = of_irq_get(intc, 0); 355 irq = of_irq_get(intc, 0);
356 if (irq <= 0) { 356 if (irq <= 0) {
357 dev_err(p->dev, "failed to get parent IRQ\n"); 357 dev_err(p->dev, "failed to get parent IRQ\n");
358 of_node_put(intc);
358 return irq ?: -EINVAL; 359 return irq ?: -EINVAL;
359 } 360 }
360 361
361 p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX, 362 p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
362 &faraday_pci_irqdomain_ops, p); 363 &faraday_pci_irqdomain_ops, p);
364 of_node_put(intc);
363 if (!p->irqdomain) { 365 if (!p->irqdomain) {
364 dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n"); 366 dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
365 return -EINVAL; 367 return -EINVAL;
@@ -501,7 +503,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
501 dev_err(dev, "illegal IO mem size\n"); 503 dev_err(dev, "illegal IO mem size\n");
502 return -EINVAL; 504 return -EINVAL;
503 } 505 }
504 ret = pci_remap_iospace(io, io_base); 506 ret = devm_pci_remap_iospace(dev, io, io_base);
505 if (ret) { 507 if (ret) {
506 dev_warn(dev, "error %d: failed to map resource %pR\n", 508 dev_warn(dev, "error %d: failed to map resource %pR\n",
507 ret, io); 509 ret, io);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 6cc5036ac83c..f6325f1a89e8 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1073,6 +1073,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1073 struct pci_bus *pbus; 1073 struct pci_bus *pbus;
1074 struct pci_dev *pdev; 1074 struct pci_dev *pdev;
1075 struct cpumask *dest; 1075 struct cpumask *dest;
1076 unsigned long flags;
1076 struct compose_comp_ctxt comp; 1077 struct compose_comp_ctxt comp;
1077 struct tran_int_desc *int_desc; 1078 struct tran_int_desc *int_desc;
1078 struct { 1079 struct {
@@ -1164,14 +1165,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1164 * the channel callback directly when channel->target_cpu is 1165 * the channel callback directly when channel->target_cpu is
1165 * the current CPU. When the higher level interrupt code 1166 * the current CPU. When the higher level interrupt code
1166 * calls us with interrupt enabled, let's add the 1167 * calls us with interrupt enabled, let's add the
1167 * local_bh_disable()/enable() to avoid race. 1168 * local_irq_save()/restore() to avoid race:
1169 * hv_pci_onchannelcallback() can also run in tasklet.
1168 */ 1170 */
1169 local_bh_disable(); 1171 local_irq_save(flags);
1170 1172
1171 if (hbus->hdev->channel->target_cpu == smp_processor_id()) 1173 if (hbus->hdev->channel->target_cpu == smp_processor_id())
1172 hv_pci_onchannelcallback(hbus); 1174 hv_pci_onchannelcallback(hbus);
1173 1175
1174 local_bh_enable(); 1176 local_irq_restore(flags);
1175 1177
1176 if (hpdev->state == hv_pcichild_ejecting) { 1178 if (hpdev->state == hv_pcichild_ejecting) {
1177 dev_err_once(&hbus->hdev->device, 1179 dev_err_once(&hbus->hdev->device,
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index 68b8bfbdb867..d219404bad92 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -537,7 +537,7 @@ static int v3_pci_setup_resource(struct v3_pci *v3,
537 v3->io_bus_addr = io->start - win->offset; 537 v3->io_bus_addr = io->start - win->offset;
538 dev_dbg(dev, "I/O window %pR, bus addr %pap\n", 538 dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
539 io, &v3->io_bus_addr); 539 io, &v3->io_bus_addr);
540 ret = pci_remap_iospace(io, io_base); 540 ret = devm_pci_remap_iospace(dev, io, io_base);
541 if (ret) { 541 if (ret) {
542 dev_warn(dev, 542 dev_warn(dev,
543 "error %d: failed to map resource %pR\n", 543 "error %d: failed to map resource %pR\n",
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
index 994f32061b32..f59ad2728c0b 100644
--- a/drivers/pci/controller/pci-versatile.c
+++ b/drivers/pci/controller/pci-versatile.c
@@ -82,7 +82,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
82 82
83 switch (resource_type(res)) { 83 switch (resource_type(res)) {
84 case IORESOURCE_IO: 84 case IORESOURCE_IO:
85 err = pci_remap_iospace(res, iobase); 85 err = devm_pci_remap_iospace(dev, res, iobase);
86 if (err) { 86 if (err) {
87 dev_warn(dev, "error %d: failed to map resource %pR\n", 87 dev_warn(dev, "error %d: failed to map resource %pR\n",
88 err, res); 88 err, res);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index d854d67e873c..ffda3e8b4742 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -423,7 +423,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
423 case IORESOURCE_IO: 423 case IORESOURCE_IO:
424 xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base, 424 xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
425 res->start - window->offset); 425 res->start - window->offset);
426 ret = pci_remap_iospace(res, io_base); 426 ret = devm_pci_remap_iospace(dev, res, io_base);
427 if (ret < 0) 427 if (ret < 0)
428 return ret; 428 return ret;
429 break; 429 break;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 0baabe30858f..861dda69f366 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1109,7 +1109,7 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
1109 if (err < 0) 1109 if (err < 0)
1110 return err; 1110 return err;
1111 1111
1112 pci_remap_iospace(&pcie->pio, pcie->io.start); 1112 devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
1113 1113
1114 return 0; 1114 return 0;
1115} 1115}
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index 874d75c9ee4a..c8febb009454 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -680,7 +680,11 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
680 if (err) 680 if (err)
681 return err; 681 return err;
682 682
683 return phy_power_on(pcie->phy); 683 err = phy_power_on(pcie->phy);
684 if (err)
685 phy_exit(pcie->phy);
686
687 return err;
684} 688}
685 689
686static int rcar_msi_alloc(struct rcar_msi *chip) 690static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -1165,7 +1169,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1165 if (rcar_pcie_hw_init(pcie)) { 1169 if (rcar_pcie_hw_init(pcie)) {
1166 dev_info(dev, "PCIe link down\n"); 1170 dev_info(dev, "PCIe link down\n");
1167 err = -ENODEV; 1171 err = -ENODEV;
1168 goto err_clk_disable; 1172 goto err_phy_shutdown;
1169 } 1173 }
1170 1174
1171 data = rcar_pci_read_reg(pcie, MACSR); 1175 data = rcar_pci_read_reg(pcie, MACSR);
@@ -1177,7 +1181,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
1177 dev_err(dev, 1181 dev_err(dev,
1178 "failed to enable MSI support: %d\n", 1182 "failed to enable MSI support: %d\n",
1179 err); 1183 err);
1180 goto err_clk_disable; 1184 goto err_phy_shutdown;
1181 } 1185 }
1182 } 1186 }
1183 1187
@@ -1191,6 +1195,12 @@ err_msi_teardown:
1191 if (IS_ENABLED(CONFIG_PCI_MSI)) 1195 if (IS_ENABLED(CONFIG_PCI_MSI))
1192 rcar_pcie_teardown_msi(pcie); 1196 rcar_pcie_teardown_msi(pcie);
1193 1197
1198err_phy_shutdown:
1199 if (pcie->phy) {
1200 phy_power_off(pcie->phy);
1201 phy_exit(pcie->phy);
1202 }
1203
1194err_clk_disable: 1204err_clk_disable:
1195 clk_disable_unprepare(pcie->bus_clk); 1205 clk_disable_unprepare(pcie->bus_clk);
1196 1206
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 6a4bbb5b3de0..fb32840ce8e6 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -559,7 +559,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
559 PCI_NUM_INTX, 559 PCI_NUM_INTX,
560 &legacy_domain_ops, 560 &legacy_domain_ops,
561 pcie); 561 pcie);
562 562 of_node_put(legacy_intc_node);
563 if (!pcie->legacy_irq_domain) { 563 if (!pcie->legacy_irq_domain) {
564 dev_err(dev, "failed to create IRQ domain\n"); 564 dev_err(dev, "failed to create IRQ domain\n");
565 return -ENOMEM; 565 return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index b110a3a814e3..7b1389d8e2a5 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -509,6 +509,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
509 port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, 509 port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
510 &intx_domain_ops, 510 &intx_domain_ops,
511 port); 511 port);
512 of_node_put(pcie_intc_node);
512 if (!port->leg_domain) { 513 if (!port->leg_domain) {
513 dev_err(dev, "Failed to get a INTx IRQ domain\n"); 514 dev_err(dev, "Failed to get a INTx IRQ domain\n");
514 return -ENODEV; 515 return -ENODEV;
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 523a8cab3bfb..825fa24427a3 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -137,6 +137,20 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
137} 137}
138EXPORT_SYMBOL_GPL(pci_epf_alloc_space); 138EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
139 139
140static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
141{
142 struct config_group *group, *tmp;
143
144 if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
145 return;
146
147 mutex_lock(&pci_epf_mutex);
148 list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
149 pci_ep_cfs_remove_epf_group(group);
150 list_del(&driver->epf_group);
151 mutex_unlock(&pci_epf_mutex);
152}
153
140/** 154/**
141 * pci_epf_unregister_driver() - unregister the PCI EPF driver 155 * pci_epf_unregister_driver() - unregister the PCI EPF driver
142 * @driver: the PCI EPF driver that has to be unregistered 156 * @driver: the PCI EPF driver that has to be unregistered
@@ -145,17 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
145 */ 159 */
146void pci_epf_unregister_driver(struct pci_epf_driver *driver) 160void pci_epf_unregister_driver(struct pci_epf_driver *driver)
147{ 161{
148 struct config_group *group; 162 pci_epf_remove_cfs(driver);
149
150 mutex_lock(&pci_epf_mutex);
151 list_for_each_entry(group, &driver->epf_group, group_entry)
152 pci_ep_cfs_remove_epf_group(group);
153 list_del(&driver->epf_group);
154 mutex_unlock(&pci_epf_mutex);
155 driver_unregister(&driver->driver); 163 driver_unregister(&driver->driver);
156} 164}
157EXPORT_SYMBOL_GPL(pci_epf_unregister_driver); 165EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
158 166
167static int pci_epf_add_cfs(struct pci_epf_driver *driver)
168{
169 struct config_group *group;
170 const struct pci_epf_device_id *id;
171
172 if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
173 return 0;
174
175 INIT_LIST_HEAD(&driver->epf_group);
176
177 id = driver->id_table;
178 while (id->name[0]) {
179 group = pci_ep_cfs_add_epf_group(id->name);
180 if (IS_ERR(group)) {
181 pci_epf_remove_cfs(driver);
182 return PTR_ERR(group);
183 }
184
185 mutex_lock(&pci_epf_mutex);
186 list_add_tail(&group->group_entry, &driver->epf_group);
187 mutex_unlock(&pci_epf_mutex);
188 id++;
189 }
190
191 return 0;
192}
193
159/** 194/**
160 * __pci_epf_register_driver() - register a new PCI EPF driver 195 * __pci_epf_register_driver() - register a new PCI EPF driver
161 * @driver: structure representing PCI EPF driver 196 * @driver: structure representing PCI EPF driver
@@ -167,8 +202,6 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
167 struct module *owner) 202 struct module *owner)
168{ 203{
169 int ret; 204 int ret;
170 struct config_group *group;
171 const struct pci_epf_device_id *id;
172 205
173 if (!driver->ops) 206 if (!driver->ops)
174 return -EINVAL; 207 return -EINVAL;
@@ -183,16 +216,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
183 if (ret) 216 if (ret)
184 return ret; 217 return ret;
185 218
186 INIT_LIST_HEAD(&driver->epf_group); 219 pci_epf_add_cfs(driver);
187
188 id = driver->id_table;
189 while (id->name[0]) {
190 group = pci_ep_cfs_add_epf_group(id->name);
191 mutex_lock(&pci_epf_mutex);
192 list_add_tail(&group->group_entry, &driver->epf_group);
193 mutex_unlock(&pci_epf_mutex);
194 id++;
195 }
196 220
197 return 0; 221 return 0;
198} 222}
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index d0d73dbbd5ca..0f04ae648cf1 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -575,6 +575,22 @@ void pci_iov_release(struct pci_dev *dev)
575} 575}
576 576
577/** 577/**
578 * pci_iov_remove - clean up SR-IOV state after PF driver is detached
579 * @dev: the PCI device
580 */
581void pci_iov_remove(struct pci_dev *dev)
582{
583 struct pci_sriov *iov = dev->sriov;
584
585 if (!dev->is_physfn)
586 return;
587
588 iov->driver_max_VFs = iov->total_VFs;
589 if (iov->num_VFs)
590 pci_warn(dev, "driver left SR-IOV enabled after remove\n");
591}
592
593/**
578 * pci_iov_update_resource - update a VF BAR 594 * pci_iov_update_resource - update a VF BAR
579 * @dev: the PCI device 595 * @dev: the PCI device
580 * @resno: the resource number 596 * @resno: the resource number
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index d088c9147f10..69a60d6ebd73 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -612,7 +612,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
612 612
613 switch (resource_type(res)) { 613 switch (resource_type(res)) {
614 case IORESOURCE_IO: 614 case IORESOURCE_IO:
615 err = pci_remap_iospace(res, iobase); 615 err = devm_pci_remap_iospace(dev, res, iobase);
616 if (err) { 616 if (err) {
617 dev_warn(dev, "error %d: failed to map resource %pR\n", 617 dev_warn(dev, "error %d: failed to map resource %pR\n",
618 err, res); 618 err, res);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 65113b6eed14..89ee6a2b6eb8 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -629,6 +629,18 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
629{ 629{
630 struct acpi_device *adev = ACPI_COMPANION(&dev->dev); 630 struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
631 631
632 /*
633 * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
634 * system-wide suspend/resume confuses the platform firmware, so avoid
635 * doing that, unless the bridge has a driver that should take care of
636 * the PM handling. According to Section 16.1.6 of ACPI 6.2, endpoint
637 * devices are expected to be in D3 before invoking the S3 entry path
638 * from the firmware, so they should not be affected by this issue.
639 */
640 if (pci_is_bridge(dev) && !dev->driver &&
641 acpi_target_system_state() != ACPI_STATE_S0)
642 return true;
643
632 if (!adev || !acpi_device_power_manageable(adev)) 644 if (!adev || !acpi_device_power_manageable(adev))
633 return false; 645 return false;
634 646
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index c125d53033c6..6792292b5fc7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -445,6 +445,7 @@ static int pci_device_remove(struct device *dev)
445 } 445 }
446 pcibios_free_irq(pci_dev); 446 pcibios_free_irq(pci_dev);
447 pci_dev->driver = NULL; 447 pci_dev->driver = NULL;
448 pci_iov_remove(pci_dev);
448 } 449 }
449 450
450 /* Undo the runtime PM settings in local_pci_probe() */ 451 /* Undo the runtime PM settings in local_pci_probe() */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 22adaf35b136..aa1684d99b70 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3579,6 +3579,44 @@ void pci_unmap_iospace(struct resource *res)
3579} 3579}
3580EXPORT_SYMBOL(pci_unmap_iospace); 3580EXPORT_SYMBOL(pci_unmap_iospace);
3581 3581
3582static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
3583{
3584 struct resource **res = ptr;
3585
3586 pci_unmap_iospace(*res);
3587}
3588
3589/**
3590 * devm_pci_remap_iospace - Managed pci_remap_iospace()
3591 * @dev: Generic device to remap IO address for
3592 * @res: Resource describing the I/O space
3593 * @phys_addr: physical address of range to be mapped
3594 *
3595 * Managed pci_remap_iospace(). Map is automatically unmapped on driver
3596 * detach.
3597 */
3598int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
3599 phys_addr_t phys_addr)
3600{
3601 const struct resource **ptr;
3602 int error;
3603
3604 ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
3605 if (!ptr)
3606 return -ENOMEM;
3607
3608 error = pci_remap_iospace(res, phys_addr);
3609 if (error) {
3610 devres_free(ptr);
3611 } else {
3612 *ptr = res;
3613 devres_add(dev, ptr);
3614 }
3615
3616 return error;
3617}
3618EXPORT_SYMBOL(devm_pci_remap_iospace);
3619
3582/** 3620/**
3583 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace() 3621 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
3584 * @dev: Generic device to remap IO address for 3622 * @dev: Generic device to remap IO address for
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index c358e7a07f3f..882f1f9596df 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -311,6 +311,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
311#ifdef CONFIG_PCI_IOV 311#ifdef CONFIG_PCI_IOV
312int pci_iov_init(struct pci_dev *dev); 312int pci_iov_init(struct pci_dev *dev);
313void pci_iov_release(struct pci_dev *dev); 313void pci_iov_release(struct pci_dev *dev);
314void pci_iov_remove(struct pci_dev *dev);
314void pci_iov_update_resource(struct pci_dev *dev, int resno); 315void pci_iov_update_resource(struct pci_dev *dev, int resno);
315resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno); 316resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
316void pci_restore_iov_state(struct pci_dev *dev); 317void pci_restore_iov_state(struct pci_dev *dev);
@@ -325,6 +326,9 @@ static inline void pci_iov_release(struct pci_dev *dev)
325 326
326{ 327{
327} 328}
329static inline void pci_iov_remove(struct pci_dev *dev)
330{
331}
328static inline void pci_restore_iov_state(struct pci_dev *dev) 332static inline void pci_restore_iov_state(struct pci_dev *dev)
329{ 333{
330} 334}
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index f7ce0cb0b0b7..f02e334beb45 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -295,6 +295,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
295 295
296 parent = udev->subordinate; 296 parent = udev->subordinate;
297 pci_lock_rescan_remove(); 297 pci_lock_rescan_remove();
298 pci_dev_get(dev);
298 list_for_each_entry_safe_reverse(pdev, temp, &parent->devices, 299 list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
299 bus_list) { 300 bus_list) {
300 pci_dev_get(pdev); 301 pci_dev_get(pdev);
@@ -328,6 +329,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
328 pci_info(dev, "Device recovery from fatal error failed\n"); 329 pci_info(dev, "Device recovery from fatal error failed\n");
329 } 330 }
330 331
332 pci_dev_put(dev);
331 pci_unlock_rescan_remove(); 333 pci_unlock_rescan_remove();
332} 334}
333 335
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 1b7febc43da9..29d2c3b1913a 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
962{ 962{
963 void __iomem *ctrl = params->ctrl_regs; 963 void __iomem *ctrl = params->ctrl_regs;
964 964
965 USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
966 /* 1 millisecond - for USB clocks to settle down */
967 usleep_range(1000, 2000);
968
965 if (BRCM_ID(params->family_id) == 0x7366) { 969 if (BRCM_ID(params->family_id) == 0x7366) {
966 /* 970 /*
967 * The PHY3_SOFT_RESETB bits default to the wrong state. 971 * The PHY3_SOFT_RESETB bits default to the wrong state.
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 23705e1a0023..0075fb0bef8c 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
182 ddata = container_of(work, struct phy_mdm6600, status_work.work); 182 ddata = container_of(work, struct phy_mdm6600, status_work.work);
183 dev = ddata->dev; 183 dev = ddata->dev;
184 184
185 error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES, 185 error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
186 ddata->status_gpios->desc, 186 ddata->status_gpios->desc,
187 values); 187 values);
188 if (error) 188 if (error)
189 return; 189 return;
190 190
191 for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) { 191 for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
192 val |= values[i] << i; 192 val |= values[i] << i;
193 dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n", 193 dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
194 __func__, i, values[i], val); 194 __func__, i, values[i], val);
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 35c17653c694..87618a4e90e4 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -460,8 +460,8 @@ static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev,
460 const struct nsp_pin_function *func; 460 const struct nsp_pin_function *func;
461 const struct nsp_pin_group *grp; 461 const struct nsp_pin_group *grp;
462 462
463 if (grp_select > pinctrl->num_groups || 463 if (grp_select >= pinctrl->num_groups ||
464 func_select > pinctrl->num_functions) 464 func_select >= pinctrl->num_functions)
465 return -EINVAL; 465 return -EINVAL;
466 466
467 func = &pinctrl->functions[func_select]; 467 func = &pinctrl->functions[func_select];
@@ -577,6 +577,8 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
577 return PTR_ERR(pinctrl->base0); 577 return PTR_ERR(pinctrl->base0);
578 578
579 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 579 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
580 if (!res)
581 return -EINVAL;
580 pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start, 582 pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
581 resource_size(res)); 583 resource_size(res));
582 if (!pinctrl->base1) { 584 if (!pinctrl->base1) {
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index e3f1ab2290fc..4c4740ffeb9c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -1424,7 +1424,7 @@ static struct pinctrl_desc mtk_desc = {
1424 1424
1425static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio) 1425static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
1426{ 1426{
1427 struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent); 1427 struct mtk_pinctrl *hw = gpiochip_get_data(chip);
1428 int value, err; 1428 int value, err;
1429 1429
1430 err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value); 1430 err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value);
@@ -1436,7 +1436,7 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
1436 1436
1437static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) 1437static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
1438{ 1438{
1439 struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent); 1439 struct mtk_pinctrl *hw = gpiochip_get_data(chip);
1440 1440
1441 mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value); 1441 mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value);
1442} 1442}
@@ -1508,11 +1508,20 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
1508 if (ret < 0) 1508 if (ret < 0)
1509 return ret; 1509 return ret;
1510 1510
1511 ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0, 1511 /* Just for backward compatible for these old pinctrl nodes without
1512 chip->ngpio); 1512 * "gpio-ranges" property. Otherwise, called directly from a
1513 if (ret < 0) { 1513 * DeviceTree-supported pinctrl driver is DEPRECATED.
1514 gpiochip_remove(chip); 1514 * Please see Section 2.1 of
1515 return ret; 1515 * Documentation/devicetree/bindings/gpio/gpio.txt on how to
1516 * bind pinctrl and gpio drivers via the "gpio-ranges" property.
1517 */
1518 if (!of_find_property(np, "gpio-ranges", NULL)) {
1519 ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
1520 chip->ngpio);
1521 if (ret < 0) {
1522 gpiochip_remove(chip);
1523 return ret;
1524 }
1516 } 1525 }
1517 1526
1518 return 0; 1527 return 0;
@@ -1695,15 +1704,16 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
1695 mtk_desc.custom_conf_items = mtk_conf_items; 1704 mtk_desc.custom_conf_items = mtk_conf_items;
1696#endif 1705#endif
1697 1706
1698 hw->pctrl = devm_pinctrl_register(&pdev->dev, &mtk_desc, hw); 1707 err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
1699 if (IS_ERR(hw->pctrl)) 1708 &hw->pctrl);
1700 return PTR_ERR(hw->pctrl); 1709 if (err)
1710 return err;
1701 1711
1702 /* Setup groups descriptions per SoC types */ 1712 /* Setup groups descriptions per SoC types */
1703 err = mtk_build_groups(hw); 1713 err = mtk_build_groups(hw);
1704 if (err) { 1714 if (err) {
1705 dev_err(&pdev->dev, "Failed to build groups\n"); 1715 dev_err(&pdev->dev, "Failed to build groups\n");
1706 return 0; 1716 return err;
1707 } 1717 }
1708 1718
1709 /* Setup functions descriptions per SoC types */ 1719 /* Setup functions descriptions per SoC types */
@@ -1713,17 +1723,25 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
1713 return err; 1723 return err;
1714 } 1724 }
1715 1725
1716 err = mtk_build_gpiochip(hw, pdev->dev.of_node); 1726 /* For able to make pinctrl_claim_hogs, we must not enable pinctrl
1717 if (err) { 1727 * until all groups and functions are being added one.
1718 dev_err(&pdev->dev, "Failed to add gpio_chip\n"); 1728 */
1729 err = pinctrl_enable(hw->pctrl);
1730 if (err)
1719 return err; 1731 return err;
1720 }
1721 1732
1722 err = mtk_build_eint(hw, pdev); 1733 err = mtk_build_eint(hw, pdev);
1723 if (err) 1734 if (err)
1724 dev_warn(&pdev->dev, 1735 dev_warn(&pdev->dev,
1725 "Failed to add EINT, but pinctrl still can work\n"); 1736 "Failed to add EINT, but pinctrl still can work\n");
1726 1737
1738 /* Build gpiochip should be after pinctrl_enable is done */
1739 err = mtk_build_gpiochip(hw, pdev->dev.of_node);
1740 if (err) {
1741 dev_err(&pdev->dev, "Failed to add gpio_chip\n");
1742 return err;
1743 }
1744
1727 platform_set_drvdata(pdev, hw); 1745 platform_set_drvdata(pdev, hw);
1728 1746
1729 return 0; 1747 return 0;
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index a1d7156d0a43..6a1b6058b991 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -536,7 +536,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
536 ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input); 536 ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
537 } else { 537 } else {
538 ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false); 538 ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
539 ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input); 539 ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
540 ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false); 540 ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
541 } 541 }
542 542
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
index b02caf316711..eeb58b3bbc9a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
@@ -21,15 +21,13 @@
21#include "core.h" 21#include "core.h"
22#include "sh_pfc.h" 22#include "sh_pfc.h"
23 23
24#define CFG_FLAGS SH_PFC_PIN_CFG_DRIVE_STRENGTH
25
26#define CPU_ALL_PORT(fn, sfx) \ 24#define CPU_ALL_PORT(fn, sfx) \
27 PORT_GP_CFG_22(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ 25 PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
28 PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS), \ 26 PORT_GP_28(1, fn, sfx), \
29 PORT_GP_CFG_17(2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ 27 PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
30 PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \ 28 PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
31 PORT_GP_CFG_6(4, fn, sfx, CFG_FLAGS), \ 29 PORT_GP_6(4, fn, sfx), \
32 PORT_GP_CFG_15(5, fn, sfx, CFG_FLAGS) 30 PORT_GP_15(5, fn, sfx)
33/* 31/*
34 * F_() : just information 32 * F_() : just information
35 * FM() : macro for FN_xxx / xxx_MARK 33 * FM() : macro for FN_xxx / xxx_MARK
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index f1fa8612db40..06978c14c83b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2185,7 +2185,7 @@ static int __init dell_init(void)
2185 dell_fill_request(&buffer, token->location, 0, 0, 0); 2185 dell_fill_request(&buffer, token->location, 0, 0, 0);
2186 ret = dell_send_request(&buffer, 2186 ret = dell_send_request(&buffer,
2187 CLASS_TOKEN_READ, SELECT_TOKEN_AC); 2187 CLASS_TOKEN_READ, SELECT_TOKEN_AC);
2188 if (ret) 2188 if (ret == 0)
2189 max_intensity = buffer.output[3]; 2189 max_intensity = buffer.output[3];
2190 } 2190 }
2191 2191
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 547dbdac9d54..01b0e2bb3319 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
89 case PTP_PF_PHYSYNC: 89 case PTP_PF_PHYSYNC:
90 if (chan != 0) 90 if (chan != 0)
91 return -EINVAL; 91 return -EINVAL;
92 break;
92 default: 93 default:
93 return -EINVAL; 94 return -EINVAL;
94 } 95 }
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 6d4012dd6922..bac1eeb3d312 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -265,8 +265,10 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
265 return err; 265 return err;
266 266
267 /* full-function RTCs won't have such missing fields */ 267 /* full-function RTCs won't have such missing fields */
268 if (rtc_valid_tm(&alarm->time) == 0) 268 if (rtc_valid_tm(&alarm->time) == 0) {
269 rtc_add_offset(rtc, &alarm->time);
269 return 0; 270 return 0;
271 }
270 272
271 /* get the "after" timestamp, to detect wrapped fields */ 273 /* get the "after" timestamp, to detect wrapped fields */
272 err = rtc_read_time(rtc, &now); 274 err = rtc_read_time(rtc, &now);
@@ -409,7 +411,6 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
409 if (err) 411 if (err)
410 return err; 412 return err;
411 413
412 rtc_subtract_offset(rtc, &alarm->time);
413 scheduled = rtc_tm_to_time64(&alarm->time); 414 scheduled = rtc_tm_to_time64(&alarm->time);
414 415
415 /* Make sure we're not setting alarms in the past */ 416 /* Make sure we're not setting alarms in the past */
@@ -426,6 +427,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
426 * over right here, before we set the alarm. 427 * over right here, before we set the alarm.
427 */ 428 */
428 429
430 rtc_subtract_offset(rtc, &alarm->time);
431
429 if (!rtc->ops) 432 if (!rtc->ops)
430 err = -ENODEV; 433 err = -ENODEV;
431 else if (!rtc->ops->set_alarm) 434 else if (!rtc->ops->set_alarm)
@@ -467,7 +470,6 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
467 470
468 mutex_unlock(&rtc->ops_lock); 471 mutex_unlock(&rtc->ops_lock);
469 472
470 rtc_add_offset(rtc, &alarm->time);
471 return err; 473 return err;
472} 474}
473EXPORT_SYMBOL_GPL(rtc_set_alarm); 475EXPORT_SYMBOL_GPL(rtc_set_alarm);
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 097a4d4e2aba..1925aaf09093 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -367,10 +367,8 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
367 } 367 }
368 368
369 retval = rtc_register_device(mrst_rtc.rtc); 369 retval = rtc_register_device(mrst_rtc.rtc);
370 if (retval) { 370 if (retval)
371 retval = PTR_ERR(mrst_rtc.rtc);
372 goto cleanup0; 371 goto cleanup0;
373 }
374 372
375 dev_dbg(dev, "initialised\n"); 373 dev_dbg(dev, "initialised\n");
376 return 0; 374 return 0;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d3a38c421503..a9f60d0ee02e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -41,6 +41,15 @@
41 41
42#define DASD_DIAG_MOD "dasd_diag_mod" 42#define DASD_DIAG_MOD "dasd_diag_mod"
43 43
44static unsigned int queue_depth = 32;
45static unsigned int nr_hw_queues = 4;
46
47module_param(queue_depth, uint, 0444);
48MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
49
50module_param(nr_hw_queues, uint, 0444);
51MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
52
44/* 53/*
45 * SECTION: exported variables of dasd.c 54 * SECTION: exported variables of dasd.c
46 */ 55 */
@@ -3115,8 +3124,8 @@ static int dasd_alloc_queue(struct dasd_block *block)
3115 3124
3116 block->tag_set.ops = &dasd_mq_ops; 3125 block->tag_set.ops = &dasd_mq_ops;
3117 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); 3126 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
3118 block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES; 3127 block->tag_set.nr_hw_queues = nr_hw_queues;
3119 block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV; 3128 block->tag_set.queue_depth = queue_depth;
3120 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3129 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3121 3130
3122 rc = blk_mq_alloc_tag_set(&block->tag_set); 3131 rc = blk_mq_alloc_tag_set(&block->tag_set);
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 976b6bd4fb05..de6b96036aa4 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -228,14 +228,6 @@ struct dasd_ccw_req {
228#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */ 228#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
229#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */ 229#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */
230 230
231/*
232 * There is no reliable way to determine the number of available CPUs on
233 * LPAR but there is no big performance difference between 1 and the
234 * maximum CPU number.
235 * 64 is a good trade off performance wise.
236 */
237#define DASD_NR_HW_QUEUES 64
238#define DASD_MAX_LCU_DEV 256
239#define DASD_REQ_PER_DEV 4 231#define DASD_REQ_PER_DEV 4
240 232
241/* Signature for error recovery functions. */ 233/* Signature for error recovery functions. */
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 2a5fec55bf60..a246a618f9a4 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -829,6 +829,17 @@ struct qeth_trap_id {
829/*some helper functions*/ 829/*some helper functions*/
830#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "") 830#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
831 831
832static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
833 unsigned int elements)
834{
835 unsigned int i;
836
837 for (i = 0; i < elements; i++)
838 memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
839 buf->element[14].sflags = 0;
840 buf->element[15].sflags = 0;
841}
842
832/** 843/**
833 * qeth_get_elements_for_range() - find number of SBALEs to cover range. 844 * qeth_get_elements_for_range() - find number of SBALEs to cover range.
834 * @start: Start of the address range. 845 * @start: Start of the address range.
@@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
1029 __u16, __u16, 1040 __u16, __u16,
1030 enum qeth_prot_versions); 1041 enum qeth_prot_versions);
1031int qeth_set_features(struct net_device *, netdev_features_t); 1042int qeth_set_features(struct net_device *, netdev_features_t);
1032void qeth_recover_features(struct net_device *dev); 1043void qeth_enable_hw_features(struct net_device *dev);
1033netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); 1044netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
1034netdev_features_t qeth_features_check(struct sk_buff *skb, 1045netdev_features_t qeth_features_check(struct sk_buff *skb,
1035 struct net_device *dev, 1046 struct net_device *dev,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 8e1474f1ffac..d01ac29fd986 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
73 struct qeth_qdio_out_buffer *buf, 73 struct qeth_qdio_out_buffer *buf,
74 enum iucv_tx_notify notification); 74 enum iucv_tx_notify notification);
75static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); 75static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
76static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
77 struct qeth_qdio_out_buffer *buf,
78 enum qeth_qdio_buffer_states newbufstate);
79static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); 76static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
80 77
81struct workqueue_struct *qeth_wq; 78struct workqueue_struct *qeth_wq;
@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
489 struct qaob *aob; 486 struct qaob *aob;
490 struct qeth_qdio_out_buffer *buffer; 487 struct qeth_qdio_out_buffer *buffer;
491 enum iucv_tx_notify notification; 488 enum iucv_tx_notify notification;
489 unsigned int i;
492 490
493 aob = (struct qaob *) phys_to_virt(phys_aob_addr); 491 aob = (struct qaob *) phys_to_virt(phys_aob_addr);
494 QETH_CARD_TEXT(card, 5, "haob"); 492 QETH_CARD_TEXT(card, 5, "haob");
@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
513 qeth_notify_skbs(buffer->q, buffer, notification); 511 qeth_notify_skbs(buffer->q, buffer, notification);
514 512
515 buffer->aob = NULL; 513 buffer->aob = NULL;
516 qeth_clear_output_buffer(buffer->q, buffer, 514 /* Free dangling allocations. The attached skbs are handled by
517 QETH_QDIO_BUF_HANDLED_DELAYED); 515 * qeth_cleanup_handled_pending().
516 */
517 for (i = 0;
518 i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
519 i++) {
520 if (aob->sba[i] && buffer->is_header[i])
521 kmem_cache_free(qeth_core_header_cache,
522 (void *) aob->sba[i]);
523 }
524 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
518 525
519 /* from here on: do not touch buffer anymore */
520 qdio_release_aob(aob); 526 qdio_release_aob(aob);
521} 527}
522 528
@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3759 QETH_CARD_TEXT(queue->card, 5, "aob"); 3765 QETH_CARD_TEXT(queue->card, 5, "aob");
3760 QETH_CARD_TEXT_(queue->card, 5, "%lx", 3766 QETH_CARD_TEXT_(queue->card, 5, "%lx",
3761 virt_to_phys(buffer->aob)); 3767 virt_to_phys(buffer->aob));
3768
3769 /* prepare the queue slot for re-use: */
3770 qeth_scrub_qdio_buffer(buffer->buffer,
3771 QETH_MAX_BUFFER_ELEMENTS(card));
3762 if (qeth_init_qdio_out_buf(queue, bidx)) { 3772 if (qeth_init_qdio_out_buf(queue, bidx)) {
3763 QETH_CARD_TEXT(card, 2, "outofbuf"); 3773 QETH_CARD_TEXT(card, 2, "outofbuf");
3764 qeth_schedule_recovery(card); 3774 qeth_schedule_recovery(card);
@@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
4834 goto out; 4844 goto out;
4835 } 4845 }
4836 4846
4837 ccw_device_get_id(CARD_RDEV(card), &id); 4847 ccw_device_get_id(CARD_DDEV(card), &id);
4838 request->resp_buf_len = sizeof(*response); 4848 request->resp_buf_len = sizeof(*response);
4839 request->resp_version = DIAG26C_VERSION2; 4849 request->resp_version = DIAG26C_VERSION2;
4840 request->op_code = DIAG26C_GET_MAC; 4850 request->op_code = DIAG26C_GET_MAC;
@@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6459#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \ 6469#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
6460 NETIF_F_IPV6_CSUM) 6470 NETIF_F_IPV6_CSUM)
6461/** 6471/**
6462 * qeth_recover_features() - Restore device features after recovery 6472 * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6463 * @dev: the recovering net_device 6473 * @dev: a net_device
6464 *
6465 * Caller must hold rtnl lock.
6466 */ 6474 */
6467void qeth_recover_features(struct net_device *dev) 6475void qeth_enable_hw_features(struct net_device *dev)
6468{ 6476{
6469 netdev_features_t features = dev->features;
6470 struct qeth_card *card = dev->ml_priv; 6477 struct qeth_card *card = dev->ml_priv;
6478 netdev_features_t features;
6471 6479
6480 rtnl_lock();
6481 features = dev->features;
6472 /* force-off any feature that needs an IPA sequence. 6482 /* force-off any feature that needs an IPA sequence.
6473 * netdev_update_features() will restart them. 6483 * netdev_update_features() will restart them.
6474 */ 6484 */
6475 dev->features &= ~QETH_HW_FEATURES; 6485 dev->features &= ~QETH_HW_FEATURES;
6476 netdev_update_features(dev); 6486 netdev_update_features(dev);
6477 6487 if (features != dev->features)
6478 if (features == dev->features) 6488 dev_warn(&card->gdev->dev,
6479 return; 6489 "Device recovery failed to restore all offload features\n");
6480 dev_warn(&card->gdev->dev, 6490 rtnl_unlock();
6481 "Device recovery failed to restore all offload features\n");
6482} 6491}
6483EXPORT_SYMBOL_GPL(qeth_recover_features); 6492EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6484 6493
6485int qeth_set_features(struct net_device *dev, netdev_features_t features) 6494int qeth_set_features(struct net_device *dev, netdev_features_t features)
6486{ 6495{
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a7cb37da6a21..2487f0aeb165 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
140 140
141static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) 141static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
142{ 142{
143 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? 143 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
144 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC; 144 IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
145 int rc; 145 int rc;
146 146
@@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
157 157
158static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac) 158static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
159{ 159{
160 enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? 160 enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
161 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC; 161 IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
162 int rc; 162 int rc;
163 163
@@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
501 return -ERESTARTSYS; 501 return -ERESTARTSYS;
502 } 502 }
503 503
504 /* avoid racing against concurrent state change: */
505 if (!mutex_trylock(&card->conf_mutex))
506 return -EAGAIN;
507
504 if (!qeth_card_hw_is_reachable(card)) { 508 if (!qeth_card_hw_is_reachable(card)) {
505 ether_addr_copy(dev->dev_addr, addr->sa_data); 509 ether_addr_copy(dev->dev_addr, addr->sa_data);
506 return 0; 510 goto out_unlock;
507 } 511 }
508 512
509 /* don't register the same address twice */ 513 /* don't register the same address twice */
510 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && 514 if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
511 (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) 515 (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
512 return 0; 516 goto out_unlock;
513 517
514 /* add the new address, switch over, drop the old */ 518 /* add the new address, switch over, drop the old */
515 rc = qeth_l2_send_setmac(card, addr->sa_data); 519 rc = qeth_l2_send_setmac(card, addr->sa_data);
516 if (rc) 520 if (rc)
517 return rc; 521 goto out_unlock;
518 ether_addr_copy(old_addr, dev->dev_addr); 522 ether_addr_copy(old_addr, dev->dev_addr);
519 ether_addr_copy(dev->dev_addr, addr->sa_data); 523 ether_addr_copy(dev->dev_addr, addr->sa_data);
520 524
521 if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) 525 if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
522 qeth_l2_remove_mac(card, old_addr); 526 qeth_l2_remove_mac(card, old_addr);
523 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; 527 card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
524 return 0; 528
529out_unlock:
530 mutex_unlock(&card->conf_mutex);
531 return rc;
525} 532}
526 533
527static void qeth_promisc_to_bridge(struct qeth_card *card) 534static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1112 netif_carrier_off(card->dev); 1119 netif_carrier_off(card->dev);
1113 1120
1114 qeth_set_allowed_threads(card, 0xffffffff, 0); 1121 qeth_set_allowed_threads(card, 0xffffffff, 0);
1122
1123 qeth_enable_hw_features(card->dev);
1115 if (recover_flag == CARD_STATE_RECOVER) { 1124 if (recover_flag == CARD_STATE_RECOVER) {
1116 if (recovery_mode && 1125 if (recovery_mode &&
1117 card->info.type != QETH_CARD_TYPE_OSN) { 1126 card->info.type != QETH_CARD_TYPE_OSN) {
@@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1123 } 1132 }
1124 /* this also sets saved unicast addresses */ 1133 /* this also sets saved unicast addresses */
1125 qeth_l2_set_rx_mode(card->dev); 1134 qeth_l2_set_rx_mode(card->dev);
1126 rtnl_lock();
1127 qeth_recover_features(card->dev);
1128 rtnl_unlock();
1129 } 1135 }
1130 /* let user_space know that device is online */ 1136 /* let user_space know that device is online */
1131 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1137 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e7fa479adf47..5905dc63e256 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
2662 netif_carrier_on(card->dev); 2662 netif_carrier_on(card->dev);
2663 else 2663 else
2664 netif_carrier_off(card->dev); 2664 netif_carrier_off(card->dev);
2665
2666 qeth_enable_hw_features(card->dev);
2665 if (recover_flag == CARD_STATE_RECOVER) { 2667 if (recover_flag == CARD_STATE_RECOVER) {
2666 rtnl_lock(); 2668 rtnl_lock();
2667 if (recovery_mode) 2669 if (recovery_mode)
@@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
2669 else 2671 else
2670 dev_open(card->dev); 2672 dev_open(card->dev);
2671 qeth_l3_set_rx_mode(card->dev); 2673 qeth_l3_set_rx_mode(card->dev);
2672 qeth_recover_features(card->dev);
2673 rtnl_unlock(); 2674 rtnl_unlock();
2674 } 2675 }
2675 qeth_trace_features(card); 2676 qeth_trace_features(card);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a9831bd37a73..a57f3a7d4748 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1974,7 +1974,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
1974 u32 lun_count, nexus; 1974 u32 lun_count, nexus;
1975 u32 i, bus, target; 1975 u32 i, bus, target;
1976 u8 expose_flag, attribs; 1976 u8 expose_flag, attribs;
1977 u8 devtype;
1978 1977
1979 lun_count = aac_get_safw_phys_lun_count(dev); 1978 lun_count = aac_get_safw_phys_lun_count(dev);
1980 1979
@@ -1992,23 +1991,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
1992 continue; 1991 continue;
1993 1992
1994 if (expose_flag != 0) { 1993 if (expose_flag != 0) {
1995 devtype = AAC_DEVTYPE_RAID_MEMBER; 1994 dev->hba_map[bus][target].devtype =
1996 goto update_devtype; 1995 AAC_DEVTYPE_RAID_MEMBER;
1996 continue;
1997 } 1997 }
1998 1998
1999 if (nexus != 0 && (attribs & 8)) { 1999 if (nexus != 0 && (attribs & 8)) {
2000 devtype = AAC_DEVTYPE_NATIVE_RAW; 2000 dev->hba_map[bus][target].devtype =
2001 AAC_DEVTYPE_NATIVE_RAW;
2001 dev->hba_map[bus][target].rmw_nexus = 2002 dev->hba_map[bus][target].rmw_nexus =
2002 nexus; 2003 nexus;
2003 } else 2004 } else
2004 devtype = AAC_DEVTYPE_ARC_RAW; 2005 dev->hba_map[bus][target].devtype =
2006 AAC_DEVTYPE_ARC_RAW;
2005 2007
2006 dev->hba_map[bus][target].scan_counter = dev->scan_counter; 2008 dev->hba_map[bus][target].scan_counter = dev->scan_counter;
2007 2009
2008 aac_set_safw_target_qd(dev, bus, target); 2010 aac_set_safw_target_qd(dev, bus, target);
2009
2010update_devtype:
2011 dev->hba_map[bus][target].devtype = devtype;
2012 } 2011 }
2013} 2012}
2014 2013
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 2a3977823812..a39be94d110c 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -107,12 +107,12 @@ cxlflash_assign_ops(struct dev_dependent_vals *ddv)
107{ 107{
108 const struct cxlflash_backend_ops *ops = NULL; 108 const struct cxlflash_backend_ops *ops = NULL;
109 109
110#ifdef CONFIG_OCXL 110#ifdef CONFIG_OCXL_BASE
111 if (ddv->flags & CXLFLASH_OCXL_DEV) 111 if (ddv->flags & CXLFLASH_OCXL_DEV)
112 ops = &cxlflash_ocxl_ops; 112 ops = &cxlflash_ocxl_ops;
113#endif 113#endif
114 114
115#ifdef CONFIG_CXL 115#ifdef CONFIG_CXL_BASE
116 if (!(ddv->flags & CXLFLASH_OCXL_DEV)) 116 if (!(ddv->flags & CXLFLASH_OCXL_DEV))
117 ops = &cxlflash_cxl_ops; 117 ops = &cxlflash_cxl_ops;
118#endif 118#endif
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 0a95b5f25380..497a68389461 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -134,15 +134,14 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
134 rc = PTR_ERR(file); 134 rc = PTR_ERR(file);
135 dev_err(dev, "%s: alloc_file failed rc=%d\n", 135 dev_err(dev, "%s: alloc_file failed rc=%d\n",
136 __func__, rc); 136 __func__, rc);
137 goto err5; 137 path_put(&path);
138 goto err3;
138 } 139 }
139 140
140 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); 141 file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
141 file->private_data = priv; 142 file->private_data = priv;
142out: 143out:
143 return file; 144 return file;
144err5:
145 path_put(&path);
146err4: 145err4:
147 iput(inode); 146 iput(inode);
148err3: 147err3:
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 15c7f3b6f35e..58bb70b886d7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3440,11 +3440,11 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
3440 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index]; 3440 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3441 u16 bmic_device_index = 0; 3441 u16 bmic_device_index = 0;
3442 3442
3443 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]); 3443 encl_dev->eli =
3444
3445 encl_dev->sas_address =
3446 hpsa_get_enclosure_logical_identifier(h, scsi3addr); 3444 hpsa_get_enclosure_logical_identifier(h, scsi3addr);
3447 3445
3446 bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
3447
3448 if (encl_dev->target == -1 || encl_dev->lun == -1) { 3448 if (encl_dev->target == -1 || encl_dev->lun == -1) {
3449 rc = IO_OK; 3449 rc = IO_OK;
3450 goto out; 3450 goto out;
@@ -9697,7 +9697,24 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy)
9697static int 9697static int
9698hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) 9698hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
9699{ 9699{
9700 *identifier = rphy->identify.sas_address; 9700 struct Scsi_Host *shost = phy_to_shost(rphy);
9701 struct ctlr_info *h;
9702 struct hpsa_scsi_dev_t *sd;
9703
9704 if (!shost)
9705 return -ENXIO;
9706
9707 h = shost_to_hba(shost);
9708
9709 if (!h)
9710 return -ENXIO;
9711
9712 sd = hpsa_find_device_by_sas_rphy(h, rphy);
9713 if (!sd)
9714 return -ENXIO;
9715
9716 *identifier = sd->eli;
9717
9701 return 0; 9718 return 0;
9702} 9719}
9703 9720
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index fb9f5e7f8209..59e023696fff 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -68,6 +68,7 @@ struct hpsa_scsi_dev_t {
68#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0" 68#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
69 unsigned char device_id[16]; /* from inquiry pg. 0x83 */ 69 unsigned char device_id[16]; /* from inquiry pg. 0x83 */
70 u64 sas_address; 70 u64 sas_address;
71 u64 eli; /* from report diags. */
71 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */ 72 unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
72 unsigned char model[16]; /* bytes 16-31 of inquiry data */ 73 unsigned char model[16]; /* bytes 16-31 of inquiry data */
73 unsigned char rev; /* byte 2 of inquiry data */ 74 unsigned char rev; /* byte 2 of inquiry data */
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 90394cef0f41..0a5dd5595dd3 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3295,6 +3295,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
3295 3295
3296 init_completion(&qedf->flogi_compl); 3296 init_completion(&qedf->flogi_compl);
3297 3297
3298 status = qed_ops->common->update_drv_state(qedf->cdev, true);
3299 if (status)
3300 QEDF_ERR(&(qedf->dbg_ctx),
3301 "Failed to send drv state to MFW.\n");
3302
3298 memset(&link_params, 0, sizeof(struct qed_link_params)); 3303 memset(&link_params, 0, sizeof(struct qed_link_params));
3299 link_params.link_up = true; 3304 link_params.link_up = true;
3300 status = qed_ops->common->set_link(qedf->cdev, &link_params); 3305 status = qed_ops->common->set_link(qedf->cdev, &link_params);
@@ -3343,6 +3348,7 @@ static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
3343static void __qedf_remove(struct pci_dev *pdev, int mode) 3348static void __qedf_remove(struct pci_dev *pdev, int mode)
3344{ 3349{
3345 struct qedf_ctx *qedf; 3350 struct qedf_ctx *qedf;
3351 int rc;
3346 3352
3347 if (!pdev) { 3353 if (!pdev) {
3348 QEDF_ERR(NULL, "pdev is NULL.\n"); 3354 QEDF_ERR(NULL, "pdev is NULL.\n");
@@ -3437,6 +3443,12 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
3437 qed_ops->common->set_power_state(qedf->cdev, PCI_D0); 3443 qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
3438 pci_set_drvdata(pdev, NULL); 3444 pci_set_drvdata(pdev, NULL);
3439 } 3445 }
3446
3447 rc = qed_ops->common->update_drv_state(qedf->cdev, false);
3448 if (rc)
3449 QEDF_ERR(&(qedf->dbg_ctx),
3450 "Failed to send drv state to MFW.\n");
3451
3440 qed_ops->common->slowpath_stop(qedf->cdev); 3452 qed_ops->common->slowpath_stop(qedf->cdev);
3441 qed_ops->common->remove(qedf->cdev); 3453 qed_ops->common->remove(qedf->cdev);
3442 3454
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cf274a79e77a..091ec1207bea 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2273,6 +2273,7 @@ kset_free:
2273static void __qedi_remove(struct pci_dev *pdev, int mode) 2273static void __qedi_remove(struct pci_dev *pdev, int mode)
2274{ 2274{
2275 struct qedi_ctx *qedi = pci_get_drvdata(pdev); 2275 struct qedi_ctx *qedi = pci_get_drvdata(pdev);
2276 int rval;
2276 2277
2277 if (qedi->tmf_thread) { 2278 if (qedi->tmf_thread) {
2278 flush_workqueue(qedi->tmf_thread); 2279 flush_workqueue(qedi->tmf_thread);
@@ -2302,6 +2303,10 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
2302 if (mode == QEDI_MODE_NORMAL) 2303 if (mode == QEDI_MODE_NORMAL)
2303 qedi_free_iscsi_pf_param(qedi); 2304 qedi_free_iscsi_pf_param(qedi);
2304 2305
2306 rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
2307 if (rval)
2308 QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n");
2309
2305 if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) { 2310 if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
2306 qedi_ops->common->slowpath_stop(qedi->cdev); 2311 qedi_ops->common->slowpath_stop(qedi->cdev);
2307 qedi_ops->common->remove(qedi->cdev); 2312 qedi_ops->common->remove(qedi->cdev);
@@ -2576,6 +2581,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
2576 if (qedi_setup_boot_info(qedi)) 2581 if (qedi_setup_boot_info(qedi))
2577 QEDI_ERR(&qedi->dbg_ctx, 2582 QEDI_ERR(&qedi->dbg_ctx,
2578 "No iSCSI boot target configured\n"); 2583 "No iSCSI boot target configured\n");
2584
2585 rc = qedi_ops->common->update_drv_state(qedi->cdev, true);
2586 if (rc)
2587 QEDI_ERR(&qedi->dbg_ctx,
2588 "Failed to send drv state to MFW\n");
2589
2579 } 2590 }
2580 2591
2581 return 0; 2592 return 0;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9442e18aef6f..0f94b1d62d3f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -361,6 +361,8 @@ struct ct_arg {
361 dma_addr_t rsp_dma; 361 dma_addr_t rsp_dma;
362 u32 req_size; 362 u32 req_size;
363 u32 rsp_size; 363 u32 rsp_size;
364 u32 req_allocated_size;
365 u32 rsp_allocated_size;
364 void *req; 366 void *req;
365 void *rsp; 367 void *rsp;
366 port_id_t id; 368 port_id_t id;
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4bc2b66b299f..2c35b0b2baa0 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -556,7 +556,7 @@ err2:
556 /* please ignore kernel warning. otherwise, we have mem leak. */ 556 /* please ignore kernel warning. otherwise, we have mem leak. */
557 if (sp->u.iocb_cmd.u.ctarg.req) { 557 if (sp->u.iocb_cmd.u.ctarg.req) {
558 dma_free_coherent(&vha->hw->pdev->dev, 558 dma_free_coherent(&vha->hw->pdev->dev,
559 sizeof(struct ct_sns_pkt), 559 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
560 sp->u.iocb_cmd.u.ctarg.req, 560 sp->u.iocb_cmd.u.ctarg.req,
561 sp->u.iocb_cmd.u.ctarg.req_dma); 561 sp->u.iocb_cmd.u.ctarg.req_dma);
562 sp->u.iocb_cmd.u.ctarg.req = NULL; 562 sp->u.iocb_cmd.u.ctarg.req = NULL;
@@ -564,7 +564,7 @@ err2:
564 564
565 if (sp->u.iocb_cmd.u.ctarg.rsp) { 565 if (sp->u.iocb_cmd.u.ctarg.rsp) {
566 dma_free_coherent(&vha->hw->pdev->dev, 566 dma_free_coherent(&vha->hw->pdev->dev,
567 sizeof(struct ct_sns_pkt), 567 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
568 sp->u.iocb_cmd.u.ctarg.rsp, 568 sp->u.iocb_cmd.u.ctarg.rsp,
569 sp->u.iocb_cmd.u.ctarg.rsp_dma); 569 sp->u.iocb_cmd.u.ctarg.rsp_dma);
570 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 570 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -617,6 +617,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
617 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 617 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
618 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 618 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
619 GFP_KERNEL); 619 GFP_KERNEL);
620 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
620 if (!sp->u.iocb_cmd.u.ctarg.req) { 621 if (!sp->u.iocb_cmd.u.ctarg.req) {
621 ql_log(ql_log_warn, vha, 0xd041, 622 ql_log(ql_log_warn, vha, 0xd041,
622 "%s: Failed to allocate ct_sns request.\n", 623 "%s: Failed to allocate ct_sns request.\n",
@@ -627,6 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
627 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 628 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
628 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 629 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
629 GFP_KERNEL); 630 GFP_KERNEL);
631 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
630 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 632 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
631 ql_log(ql_log_warn, vha, 0xd042, 633 ql_log(ql_log_warn, vha, 0xd042,
632 "%s: Failed to allocate ct_sns request.\n", 634 "%s: Failed to allocate ct_sns request.\n",
@@ -712,6 +714,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
712 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 714 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
713 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 715 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
714 GFP_KERNEL); 716 GFP_KERNEL);
717 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
715 if (!sp->u.iocb_cmd.u.ctarg.req) { 718 if (!sp->u.iocb_cmd.u.ctarg.req) {
716 ql_log(ql_log_warn, vha, 0xd041, 719 ql_log(ql_log_warn, vha, 0xd041,
717 "%s: Failed to allocate ct_sns request.\n", 720 "%s: Failed to allocate ct_sns request.\n",
@@ -722,6 +725,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
722 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 725 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
723 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 726 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
724 GFP_KERNEL); 727 GFP_KERNEL);
728 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
725 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 729 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
726 ql_log(ql_log_warn, vha, 0xd042, 730 ql_log(ql_log_warn, vha, 0xd042,
727 "%s: Failed to allocate ct_sns request.\n", 731 "%s: Failed to allocate ct_sns request.\n",
@@ -802,6 +806,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
802 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 806 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
803 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 807 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
804 GFP_KERNEL); 808 GFP_KERNEL);
809 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
805 if (!sp->u.iocb_cmd.u.ctarg.req) { 810 if (!sp->u.iocb_cmd.u.ctarg.req) {
806 ql_log(ql_log_warn, vha, 0xd041, 811 ql_log(ql_log_warn, vha, 0xd041,
807 "%s: Failed to allocate ct_sns request.\n", 812 "%s: Failed to allocate ct_sns request.\n",
@@ -812,6 +817,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
812 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 817 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
813 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 818 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
814 GFP_KERNEL); 819 GFP_KERNEL);
820 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
815 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 821 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
816 ql_log(ql_log_warn, vha, 0xd042, 822 ql_log(ql_log_warn, vha, 0xd042,
817 "%s: Failed to allocate ct_sns request.\n", 823 "%s: Failed to allocate ct_sns request.\n",
@@ -909,6 +915,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
909 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 915 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
910 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 916 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
911 GFP_KERNEL); 917 GFP_KERNEL);
918 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
912 if (!sp->u.iocb_cmd.u.ctarg.req) { 919 if (!sp->u.iocb_cmd.u.ctarg.req) {
913 ql_log(ql_log_warn, vha, 0xd041, 920 ql_log(ql_log_warn, vha, 0xd041,
914 "%s: Failed to allocate ct_sns request.\n", 921 "%s: Failed to allocate ct_sns request.\n",
@@ -919,6 +926,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
919 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 926 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
920 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 927 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
921 GFP_KERNEL); 928 GFP_KERNEL);
929 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
922 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 930 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
923 ql_log(ql_log_warn, vha, 0xd042, 931 ql_log(ql_log_warn, vha, 0xd042,
924 "%s: Failed to allocate ct_sns request.\n", 932 "%s: Failed to allocate ct_sns request.\n",
@@ -3388,14 +3396,14 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3388{ 3396{
3389 if (sp->u.iocb_cmd.u.ctarg.req) { 3397 if (sp->u.iocb_cmd.u.ctarg.req) {
3390 dma_free_coherent(&vha->hw->pdev->dev, 3398 dma_free_coherent(&vha->hw->pdev->dev,
3391 sizeof(struct ct_sns_pkt), 3399 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3392 sp->u.iocb_cmd.u.ctarg.req, 3400 sp->u.iocb_cmd.u.ctarg.req,
3393 sp->u.iocb_cmd.u.ctarg.req_dma); 3401 sp->u.iocb_cmd.u.ctarg.req_dma);
3394 sp->u.iocb_cmd.u.ctarg.req = NULL; 3402 sp->u.iocb_cmd.u.ctarg.req = NULL;
3395 } 3403 }
3396 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3404 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3397 dma_free_coherent(&vha->hw->pdev->dev, 3405 dma_free_coherent(&vha->hw->pdev->dev,
3398 sizeof(struct ct_sns_pkt), 3406 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3399 sp->u.iocb_cmd.u.ctarg.rsp, 3407 sp->u.iocb_cmd.u.ctarg.rsp,
3400 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3408 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3401 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3409 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -3596,14 +3604,14 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
3596 /* please ignore kernel warning. otherwise, we have mem leak. */ 3604 /* please ignore kernel warning. otherwise, we have mem leak. */
3597 if (sp->u.iocb_cmd.u.ctarg.req) { 3605 if (sp->u.iocb_cmd.u.ctarg.req) {
3598 dma_free_coherent(&vha->hw->pdev->dev, 3606 dma_free_coherent(&vha->hw->pdev->dev,
3599 sizeof(struct ct_sns_pkt), 3607 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3600 sp->u.iocb_cmd.u.ctarg.req, 3608 sp->u.iocb_cmd.u.ctarg.req,
3601 sp->u.iocb_cmd.u.ctarg.req_dma); 3609 sp->u.iocb_cmd.u.ctarg.req_dma);
3602 sp->u.iocb_cmd.u.ctarg.req = NULL; 3610 sp->u.iocb_cmd.u.ctarg.req = NULL;
3603 } 3611 }
3604 if (sp->u.iocb_cmd.u.ctarg.rsp) { 3612 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3605 dma_free_coherent(&vha->hw->pdev->dev, 3613 dma_free_coherent(&vha->hw->pdev->dev,
3606 sizeof(struct ct_sns_pkt), 3614 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3607 sp->u.iocb_cmd.u.ctarg.rsp, 3615 sp->u.iocb_cmd.u.ctarg.rsp,
3608 sp->u.iocb_cmd.u.ctarg.rsp_dma); 3616 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3609 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 3617 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -3654,6 +3662,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3654 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev, 3662 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3655 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma, 3663 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3656 GFP_KERNEL); 3664 GFP_KERNEL);
3665 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3657 if (!sp->u.iocb_cmd.u.ctarg.req) { 3666 if (!sp->u.iocb_cmd.u.ctarg.req) {
3658 ql_log(ql_log_warn, vha, 0xd041, 3667 ql_log(ql_log_warn, vha, 0xd041,
3659 "Failed to allocate ct_sns request.\n"); 3668 "Failed to allocate ct_sns request.\n");
@@ -3663,6 +3672,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3663 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev, 3672 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3664 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma, 3673 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3665 GFP_KERNEL); 3674 GFP_KERNEL);
3675 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3666 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 3676 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3667 ql_log(ql_log_warn, vha, 0xd042, 3677 ql_log(ql_log_warn, vha, 0xd042,
3668 "Failed to allocate ct_sns request.\n"); 3678 "Failed to allocate ct_sns request.\n");
@@ -4142,14 +4152,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
4142 */ 4152 */
4143 if (sp->u.iocb_cmd.u.ctarg.req) { 4153 if (sp->u.iocb_cmd.u.ctarg.req) {
4144 dma_free_coherent(&vha->hw->pdev->dev, 4154 dma_free_coherent(&vha->hw->pdev->dev,
4145 sizeof(struct ct_sns_pkt), 4155 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4146 sp->u.iocb_cmd.u.ctarg.req, 4156 sp->u.iocb_cmd.u.ctarg.req,
4147 sp->u.iocb_cmd.u.ctarg.req_dma); 4157 sp->u.iocb_cmd.u.ctarg.req_dma);
4148 sp->u.iocb_cmd.u.ctarg.req = NULL; 4158 sp->u.iocb_cmd.u.ctarg.req = NULL;
4149 } 4159 }
4150 if (sp->u.iocb_cmd.u.ctarg.rsp) { 4160 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4151 dma_free_coherent(&vha->hw->pdev->dev, 4161 dma_free_coherent(&vha->hw->pdev->dev,
4152 sizeof(struct ct_sns_pkt), 4162 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4153 sp->u.iocb_cmd.u.ctarg.rsp, 4163 sp->u.iocb_cmd.u.ctarg.rsp,
4154 sp->u.iocb_cmd.u.ctarg.rsp_dma); 4164 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4155 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 4165 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4179,14 +4189,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
4179 /* please ignore kernel warning. Otherwise, we have mem leak. */ 4189 /* please ignore kernel warning. Otherwise, we have mem leak. */
4180 if (sp->u.iocb_cmd.u.ctarg.req) { 4190 if (sp->u.iocb_cmd.u.ctarg.req) {
4181 dma_free_coherent(&vha->hw->pdev->dev, 4191 dma_free_coherent(&vha->hw->pdev->dev,
4182 sizeof(struct ct_sns_pkt), 4192 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4183 sp->u.iocb_cmd.u.ctarg.req, 4193 sp->u.iocb_cmd.u.ctarg.req,
4184 sp->u.iocb_cmd.u.ctarg.req_dma); 4194 sp->u.iocb_cmd.u.ctarg.req_dma);
4185 sp->u.iocb_cmd.u.ctarg.req = NULL; 4195 sp->u.iocb_cmd.u.ctarg.req = NULL;
4186 } 4196 }
4187 if (sp->u.iocb_cmd.u.ctarg.rsp) { 4197 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4188 dma_free_coherent(&vha->hw->pdev->dev, 4198 dma_free_coherent(&vha->hw->pdev->dev,
4189 sizeof(struct ct_sns_pkt), 4199 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4190 sp->u.iocb_cmd.u.ctarg.rsp, 4200 sp->u.iocb_cmd.u.ctarg.rsp,
4191 sp->u.iocb_cmd.u.ctarg.rsp_dma); 4201 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4192 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 4202 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4281,14 +4291,14 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
4281done_free_sp: 4291done_free_sp:
4282 if (sp->u.iocb_cmd.u.ctarg.req) { 4292 if (sp->u.iocb_cmd.u.ctarg.req) {
4283 dma_free_coherent(&vha->hw->pdev->dev, 4293 dma_free_coherent(&vha->hw->pdev->dev,
4284 sizeof(struct ct_sns_pkt), 4294 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4285 sp->u.iocb_cmd.u.ctarg.req, 4295 sp->u.iocb_cmd.u.ctarg.req,
4286 sp->u.iocb_cmd.u.ctarg.req_dma); 4296 sp->u.iocb_cmd.u.ctarg.req_dma);
4287 sp->u.iocb_cmd.u.ctarg.req = NULL; 4297 sp->u.iocb_cmd.u.ctarg.req = NULL;
4288 } 4298 }
4289 if (sp->u.iocb_cmd.u.ctarg.rsp) { 4299 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4290 dma_free_coherent(&vha->hw->pdev->dev, 4300 dma_free_coherent(&vha->hw->pdev->dev,
4291 sizeof(struct ct_sns_pkt), 4301 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4292 sp->u.iocb_cmd.u.ctarg.rsp, 4302 sp->u.iocb_cmd.u.ctarg.rsp,
4293 sp->u.iocb_cmd.u.ctarg.rsp_dma); 4303 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4294 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 4304 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4349,6 +4359,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4349 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent( 4359 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
4350 &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt), 4360 &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
4351 &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL); 4361 &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
4362 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4352 if (!sp->u.iocb_cmd.u.ctarg.req) { 4363 if (!sp->u.iocb_cmd.u.ctarg.req) {
4353 ql_log(ql_log_warn, vha, 0xffff, 4364 ql_log(ql_log_warn, vha, 0xffff,
4354 "Failed to allocate ct_sns request.\n"); 4365 "Failed to allocate ct_sns request.\n");
@@ -4366,6 +4377,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4366 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent( 4377 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
4367 &vha->hw->pdev->dev, rspsz, 4378 &vha->hw->pdev->dev, rspsz,
4368 &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); 4379 &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
4380 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
4369 if (!sp->u.iocb_cmd.u.ctarg.rsp) { 4381 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4370 ql_log(ql_log_warn, vha, 0xffff, 4382 ql_log(ql_log_warn, vha, 0xffff,
4371 "Failed to allocate ct_sns request.\n"); 4383 "Failed to allocate ct_sns request.\n");
@@ -4425,14 +4437,14 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4425done_free_sp: 4437done_free_sp:
4426 if (sp->u.iocb_cmd.u.ctarg.req) { 4438 if (sp->u.iocb_cmd.u.ctarg.req) {
4427 dma_free_coherent(&vha->hw->pdev->dev, 4439 dma_free_coherent(&vha->hw->pdev->dev,
4428 sizeof(struct ct_sns_pkt), 4440 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4429 sp->u.iocb_cmd.u.ctarg.req, 4441 sp->u.iocb_cmd.u.ctarg.req,
4430 sp->u.iocb_cmd.u.ctarg.req_dma); 4442 sp->u.iocb_cmd.u.ctarg.req_dma);
4431 sp->u.iocb_cmd.u.ctarg.req = NULL; 4443 sp->u.iocb_cmd.u.ctarg.req = NULL;
4432 } 4444 }
4433 if (sp->u.iocb_cmd.u.ctarg.rsp) { 4445 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4434 dma_free_coherent(&vha->hw->pdev->dev, 4446 dma_free_coherent(&vha->hw->pdev->dev,
4435 sizeof(struct ct_sns_pkt), 4447 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4436 sp->u.iocb_cmd.u.ctarg.rsp, 4448 sp->u.iocb_cmd.u.ctarg.rsp,
4437 sp->u.iocb_cmd.u.ctarg.rsp_dma); 4449 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4438 sp->u.iocb_cmd.u.ctarg.rsp = NULL; 4450 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 7b675243bd16..db0e3279e07a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -591,12 +591,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
591 conflict_fcport = 591 conflict_fcport =
592 qla2x00_find_fcport_by_wwpn(vha, 592 qla2x00_find_fcport_by_wwpn(vha,
593 e->port_name, 0); 593 e->port_name, 0);
594 ql_dbg(ql_dbg_disc, vha, 0x20e6, 594 if (conflict_fcport) {
595 "%s %d %8phC post del sess\n", 595 qlt_schedule_sess_for_deletion
596 __func__, __LINE__, 596 (conflict_fcport);
597 conflict_fcport->port_name); 597 ql_dbg(ql_dbg_disc, vha, 0x20e6,
598 qlt_schedule_sess_for_deletion 598 "%s %d %8phC post del sess\n",
599 (conflict_fcport); 599 __func__, __LINE__,
600 conflict_fcport->port_name);
601 }
600 } 602 }
601 603
602 /* FW already picked this loop id for another fcport */ 604 /* FW already picked this loop id for another fcport */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e881fce7477a..9f309e572be4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3180,6 +3180,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3180 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n", 3180 "req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
3181 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); 3181 req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
3182 3182
3183 ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
3184
3183 if (ha->isp_ops->initialize_adapter(base_vha)) { 3185 if (ha->isp_ops->initialize_adapter(base_vha)) {
3184 ql_log(ql_log_fatal, base_vha, 0x00d6, 3186 ql_log(ql_log_fatal, base_vha, 0x00d6,
3185 "Failed to initialize adapter - Adapter flags %x.\n", 3187 "Failed to initialize adapter - Adapter flags %x.\n",
@@ -3216,8 +3218,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
3216 host->can_queue, base_vha->req, 3218 host->can_queue, base_vha->req,
3217 base_vha->mgmt_svr_loop_id, host->sg_tablesize); 3219 base_vha->mgmt_svr_loop_id, host->sg_tablesize);
3218 3220
3219 ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
3220
3221 if (ha->mqenable) { 3221 if (ha->mqenable) {
3222 bool mq = false; 3222 bool mq = false;
3223 bool startit = false; 3223 bool startit = false;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 8932ae81a15a..2715cdaa669c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -296,6 +296,20 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
296 rtn = host->hostt->eh_timed_out(scmd); 296 rtn = host->hostt->eh_timed_out(scmd);
297 297
298 if (rtn == BLK_EH_DONE) { 298 if (rtn == BLK_EH_DONE) {
299 /*
300 * For blk-mq, we must set the request state to complete now
301 * before sending the request to the scsi error handler. This
302 * will prevent a use-after-free in the event the LLD manages
303 * to complete the request before the error handler finishes
304 * processing this timed out request.
305 *
306 * If the request was already completed, then the LLD beat the
307 * time out handler from transferring the request to the scsi
308 * error handler. In that case we can return immediately as no
309 * further action is required.
310 */
311 if (req->q->mq_ops && !blk_mq_mark_complete(req))
312 return rtn;
299 if (scsi_abort_command(scmd) != SUCCESS) { 313 if (scsi_abort_command(scmd) != SUCCESS) {
300 set_host_byte(scmd, DID_TIME_OUT); 314 set_host_byte(scmd, DID_TIME_OUT);
301 scsi_eh_scmd_add(scmd); 315 scsi_eh_scmd_add(scmd);
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index a14fef11776e..2bf3bf73886e 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -391,7 +391,8 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
391 * Check that all zones of the device are equal. The last zone can however 391 * Check that all zones of the device are equal. The last zone can however
392 * be smaller. The zone size must also be a power of two number of LBAs. 392 * be smaller. The zone size must also be a power of two number of LBAs.
393 * 393 *
394 * Returns the zone size in bytes upon success or an error code upon failure. 394 * Returns the zone size in number of blocks upon success or an error code
395 * upon failure.
395 */ 396 */
396static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp) 397static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
397{ 398{
@@ -401,7 +402,7 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
401 unsigned char *rec; 402 unsigned char *rec;
402 unsigned int buf_len; 403 unsigned int buf_len;
403 unsigned int list_length; 404 unsigned int list_length;
404 int ret; 405 s64 ret;
405 u8 same; 406 u8 same;
406 407
407 /* Get a buffer */ 408 /* Get a buffer */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 53ae52dbff84..cd2fdac000c9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
51#include <linux/atomic.h> 51#include <linux/atomic.h>
52#include <linux/ratelimit.h> 52#include <linux/ratelimit.h>
53#include <linux/uio.h> 53#include <linux/uio.h>
54#include <linux/cred.h> /* for sg_check_file_access() */
54 55
55#include "scsi.h" 56#include "scsi.h"
56#include <scsi/scsi_dbg.h> 57#include <scsi/scsi_dbg.h>
@@ -209,6 +210,33 @@ static void sg_device_destroy(struct kref *kref);
209 sdev_prefix_printk(prefix, (sdp)->device, \ 210 sdev_prefix_printk(prefix, (sdp)->device, \
210 (sdp)->disk->disk_name, fmt, ##a) 211 (sdp)->disk->disk_name, fmt, ##a)
211 212
213/*
214 * The SCSI interfaces that use read() and write() as an asynchronous variant of
215 * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
216 * to trigger read() and write() calls from various contexts with elevated
217 * privileges. This can lead to kernel memory corruption (e.g. if these
218 * interfaces are called through splice()) and privilege escalation inside
219 * userspace (e.g. if a process with access to such a device passes a file
220 * descriptor to a SUID binary as stdin/stdout/stderr).
221 *
222 * This function provides protection for the legacy API by restricting the
223 * calling context.
224 */
225static int sg_check_file_access(struct file *filp, const char *caller)
226{
227 if (filp->f_cred != current_real_cred()) {
228 pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
229 caller, task_tgid_vnr(current), current->comm);
230 return -EPERM;
231 }
232 if (uaccess_kernel()) {
233 pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
234 caller, task_tgid_vnr(current), current->comm);
235 return -EACCES;
236 }
237 return 0;
238}
239
212static int sg_allow_access(struct file *filp, unsigned char *cmd) 240static int sg_allow_access(struct file *filp, unsigned char *cmd)
213{ 241{
214 struct sg_fd *sfp = filp->private_data; 242 struct sg_fd *sfp = filp->private_data;
@@ -393,6 +421,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
393 struct sg_header *old_hdr = NULL; 421 struct sg_header *old_hdr = NULL;
394 int retval = 0; 422 int retval = 0;
395 423
424 /*
425 * This could cause a response to be stranded. Close the associated
426 * file descriptor to free up any resources being held.
427 */
428 retval = sg_check_file_access(filp, __func__);
429 if (retval)
430 return retval;
431
396 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 432 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
397 return -ENXIO; 433 return -ENXIO;
398 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, 434 SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
@@ -580,9 +616,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
580 struct sg_header old_hdr; 616 struct sg_header old_hdr;
581 sg_io_hdr_t *hp; 617 sg_io_hdr_t *hp;
582 unsigned char cmnd[SG_MAX_CDB_SIZE]; 618 unsigned char cmnd[SG_MAX_CDB_SIZE];
619 int retval;
583 620
584 if (unlikely(uaccess_kernel())) 621 retval = sg_check_file_access(filp, __func__);
585 return -EINVAL; 622 if (retval)
623 return retval;
586 624
587 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) 625 if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
588 return -ENXIO; 626 return -ENXIO;
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 32f0748fd067..0097a939487f 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -27,9 +27,16 @@
27#define GPC_PGC_SW2ISO_SHIFT 0x8 27#define GPC_PGC_SW2ISO_SHIFT 0x8
28#define GPC_PGC_SW_SHIFT 0x0 28#define GPC_PGC_SW_SHIFT 0x0
29 29
30#define GPC_PGC_PCI_PDN 0x200
31#define GPC_PGC_PCI_SR 0x20c
32
30#define GPC_PGC_GPU_PDN 0x260 33#define GPC_PGC_GPU_PDN 0x260
31#define GPC_PGC_GPU_PUPSCR 0x264 34#define GPC_PGC_GPU_PUPSCR 0x264
32#define GPC_PGC_GPU_PDNSCR 0x268 35#define GPC_PGC_GPU_PDNSCR 0x268
36#define GPC_PGC_GPU_SR 0x26c
37
38#define GPC_PGC_DISP_PDN 0x240
39#define GPC_PGC_DISP_SR 0x24c
33 40
34#define GPU_VPU_PUP_REQ BIT(1) 41#define GPU_VPU_PUP_REQ BIT(1)
35#define GPU_VPU_PDN_REQ BIT(0) 42#define GPU_VPU_PDN_REQ BIT(0)
@@ -318,10 +325,24 @@ static const struct of_device_id imx_gpc_dt_ids[] = {
318 { } 325 { }
319}; 326};
320 327
328static const struct regmap_range yes_ranges[] = {
329 regmap_reg_range(GPC_CNTR, GPC_CNTR),
330 regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR),
331 regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR),
332 regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR),
333};
334
335static const struct regmap_access_table access_table = {
336 .yes_ranges = yes_ranges,
337 .n_yes_ranges = ARRAY_SIZE(yes_ranges),
338};
339
321static const struct regmap_config imx_gpc_regmap_config = { 340static const struct regmap_config imx_gpc_regmap_config = {
322 .reg_bits = 32, 341 .reg_bits = 32,
323 .val_bits = 32, 342 .val_bits = 32,
324 .reg_stride = 4, 343 .reg_stride = 4,
344 .rd_table = &access_table,
345 .wr_table = &access_table,
325 .max_register = 0x2ac, 346 .max_register = 0x2ac,
326}; 347};
327 348
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 0ecffab52ec2..abdaf7cf8162 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -1842,15 +1842,15 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
1842 memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN); 1842 memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
1843 1843
1844 if (dev->flags & IFF_PROMISC) { 1844 if (dev->flags & IFF_PROMISC) {
1845 hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1845 hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
1846 MCAST_FILTER_PROMISC); 1846 MCAST_FILTER_PROMISC);
1847 goto spin_unlock; 1847 goto spin_unlock;
1848 } 1848 }
1849 1849
1850 if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) || 1850 if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
1851 (dev->flags & IFF_ALLMULTI)) { 1851 (dev->flags & IFF_ALLMULTI)) {
1852 hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1852 hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
1853 MCAST_FILTER_MCASTALL); 1853 MCAST_FILTER_MCASTALL);
1854 goto spin_unlock; 1854 goto spin_unlock;
1855 } 1855 }
1856 1856
@@ -1866,8 +1866,8 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
1866 ETH_ALEN * mc_count); 1866 ETH_ALEN * mc_count);
1867 } else { 1867 } else {
1868 priv->sme_i.sme_flag |= SME_MULTICAST; 1868 priv->sme_i.sme_flag |= SME_MULTICAST;
1869 hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER, 1869 hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
1870 MCAST_FILTER_MCAST); 1870 MCAST_FILTER_MCAST);
1871 } 1871 }
1872 1872
1873spin_unlock: 1873spin_unlock:
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index a3a83424a926..16478fe9e3f8 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -11,7 +11,6 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#include <asm/cacheflush.h>
15#include <linux/clk.h> 14#include <linux/clk.h>
16#include <linux/mm.h> 15#include <linux/mm.h>
17#include <linux/pagemap.h> 16#include <linux/pagemap.h>
@@ -24,6 +23,8 @@
24#include <media/v4l2-ioctl.h> 23#include <media/v4l2-ioctl.h>
25#include <media/v4l2-mc.h> 24#include <media/v4l2-mc.h>
26 25
26#include <asm/cacheflush.h>
27
27#include "iss_video.h" 28#include "iss_video.h"
28#include "iss.h" 29#include "iss.h"
29 30
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
index 673fdce25530..ff7832798a77 100644
--- a/drivers/staging/rtl8188eu/Kconfig
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -7,7 +7,6 @@ config R8188EU
7 select LIB80211 7 select LIB80211
8 select LIB80211_CRYPT_WEP 8 select LIB80211_CRYPT_WEP
9 select LIB80211_CRYPT_CCMP 9 select LIB80211_CRYPT_CCMP
10 select LIB80211_CRYPT_TKIP
11 ---help--- 10 ---help---
12 This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N. 11 This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
13 If built as a module, it will be called r8188eu. 12 If built as a module, it will be called r8188eu.
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 05936a45eb93..c6857a5be12a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -23,7 +23,6 @@
23#include <mon.h> 23#include <mon.h>
24#include <wifi.h> 24#include <wifi.h>
25#include <linux/vmalloc.h> 25#include <linux/vmalloc.h>
26#include <net/lib80211.h>
27 26
28#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */ 27#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
29#define LLC_HEADER_SIZE 6 /* LLC Header Length */ 28#define LLC_HEADER_SIZE 6 /* LLC Header Length */
@@ -221,20 +220,31 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
221static int recvframe_chkmic(struct adapter *adapter, 220static int recvframe_chkmic(struct adapter *adapter,
222 struct recv_frame *precvframe) 221 struct recv_frame *precvframe)
223{ 222{
224 int res = _SUCCESS; 223 int i, res = _SUCCESS;
225 struct rx_pkt_attrib *prxattrib = &precvframe->attrib; 224 u32 datalen;
226 struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta); 225 u8 miccode[8];
226 u8 bmic_err = false, brpt_micerror = true;
227 u8 *pframe, *payload, *pframemic;
228 u8 *mickey;
229 struct sta_info *stainfo;
230 struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
231 struct security_priv *psecuritypriv = &adapter->securitypriv;
232
233 struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
234 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
235
236 stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
227 237
228 if (prxattrib->encrypt == _TKIP_) { 238 if (prxattrib->encrypt == _TKIP_) {
239 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
240 ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
241 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
242 ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
243 __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
244 prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
245
246 /* calculate mic code */
229 if (stainfo) { 247 if (stainfo) {
230 int key_idx;
231 const int iv_len = 8, icv_len = 4, key_length = 32;
232 struct sk_buff *skb = precvframe->pkt;
233 u8 key[32], iv[8], icv[4], *pframe = skb->data;
234 void *crypto_private = NULL;
235 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
236 struct security_priv *psecuritypriv = &adapter->securitypriv;
237
238 if (IS_MCAST(prxattrib->ra)) { 248 if (IS_MCAST(prxattrib->ra)) {
239 if (!psecuritypriv) { 249 if (!psecuritypriv) {
240 res = _FAIL; 250 res = _FAIL;
@@ -243,58 +253,115 @@ static int recvframe_chkmic(struct adapter *adapter,
243 DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__); 253 DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
244 goto exit; 254 goto exit;
245 } 255 }
246 key_idx = prxattrib->key_index; 256 mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
247 memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16); 257
248 memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16); 258 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
259 ("\n %s: bcmc key\n", __func__));
249 } else { 260 } else {
250 key_idx = 0; 261 mickey = &stainfo->dot11tkiprxmickey.skey[0];
251 memcpy(key, stainfo->dot118021x_UncstKey.skey, 16); 262 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
252 memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16); 263 ("\n %s: unicast key\n", __func__));
253 } 264 }
254 265
255 if (!crypto_ops) { 266 /* icv_len included the mic code */
256 res = _FAIL; 267 datalen = precvframe->pkt->len-prxattrib->hdrlen -
257 goto exit_lib80211_tkip; 268 prxattrib->iv_len-prxattrib->icv_len-8;
258 } 269 pframe = precvframe->pkt->data;
270 payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
259 271
260 memcpy(iv, pframe + prxattrib->hdrlen, iv_len); 272 RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
261 memcpy(icv, pframe + skb->len - icv_len, icv_len); 273 rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
262 memmove(pframe + iv_len, pframe, prxattrib->hdrlen); 274 (unsigned char)prxattrib->priority); /* care the length of the data */
263 275
264 skb_pull(skb, iv_len); 276 pframemic = payload+datalen;
265 skb_trim(skb, skb->len - icv_len);
266 277
267 crypto_private = crypto_ops->init(key_idx); 278 bmic_err = false;
268 if (!crypto_private) { 279
269 res = _FAIL; 280 for (i = 0; i < 8; i++) {
270 goto exit_lib80211_tkip; 281 if (miccode[i] != *(pframemic+i)) {
271 } 282 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
272 if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) { 283 ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
273 res = _FAIL; 284 __func__, i, miccode[i], i, *(pframemic + i)));
274 goto exit_lib80211_tkip; 285 bmic_err = true;
275 } 286 }
276 if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) {
277 res = _FAIL;
278 goto exit_lib80211_tkip;
279 } 287 }
280 288
281 memmove(pframe, pframe + iv_len, prxattrib->hdrlen); 289 if (bmic_err) {
282 skb_push(skb, iv_len); 290 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
283 skb_put(skb, icv_len); 291 ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
292 *(pframemic-8), *(pframemic-7), *(pframemic-6),
293 *(pframemic-5), *(pframemic-4), *(pframemic-3),
294 *(pframemic-2), *(pframemic-1)));
295 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
296 ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
297 *(pframemic-16), *(pframemic-15), *(pframemic-14),
298 *(pframemic-13), *(pframemic-12), *(pframemic-11),
299 *(pframemic-10), *(pframemic-9)));
300 {
301 uint i;
284 302
285 memcpy(pframe + prxattrib->hdrlen, iv, iv_len); 303 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
286 memcpy(pframe + skb->len - icv_len, icv, icv_len); 304 ("\n ======demp packet (len=%d)======\n",
305 precvframe->pkt->len));
306 for (i = 0; i < precvframe->pkt->len; i += 8) {
307 RT_TRACE(_module_rtl871x_recv_c_,
308 _drv_err_,
309 ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
310 *(precvframe->pkt->data+i),
311 *(precvframe->pkt->data+i+1),
312 *(precvframe->pkt->data+i+2),
313 *(precvframe->pkt->data+i+3),
314 *(precvframe->pkt->data+i+4),
315 *(precvframe->pkt->data+i+5),
316 *(precvframe->pkt->data+i+6),
317 *(precvframe->pkt->data+i+7)));
318 }
319 RT_TRACE(_module_rtl871x_recv_c_,
320 _drv_err_,
321 ("\n ====== demp packet end [len=%d]======\n",
322 precvframe->pkt->len));
323 RT_TRACE(_module_rtl871x_recv_c_,
324 _drv_err_,
325 ("\n hrdlen=%d,\n",
326 prxattrib->hdrlen));
327 }
287 328
288exit_lib80211_tkip: 329 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
289 if (crypto_ops && crypto_private) 330 ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
290 crypto_ops->deinit(crypto_private); 331 prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
332 prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
333
334 /* double check key_index for some timing issue , */
335 /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
336 if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
337 brpt_micerror = false;
338
339 if ((prxattrib->bdecrypted) && (brpt_micerror)) {
340 rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
341 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
342 DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
343 } else {
344 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
345 DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
346 }
347 res = _FAIL;
348 } else {
349 /* mic checked ok */
350 if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
351 psecuritypriv->bcheck_grpkey = true;
352 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
353 }
354 }
291 } else { 355 } else {
292 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, 356 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
293 ("%s: rtw_get_stainfo==NULL!!!\n", __func__)); 357 ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
294 } 358 }
359
360 skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
295 } 361 }
296 362
297exit: 363exit:
364
298 return res; 365 return res;
299} 366}
300 367
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index bfe0b217e679..67a2490f055e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -650,71 +650,71 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
650 return res; 650 return res;
651} 651}
652 652
653/* The hlen isn't include the IV */
653u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe) 654u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
654{ 655{ /* exclude ICV */
655 struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib; 656 u16 pnl;
656 u32 res = _SUCCESS; 657 u32 pnh;
658 u8 rc4key[16];
659 u8 ttkey[16];
660 u8 crc[4];
661 struct arc4context mycontext;
662 int length;
663
664 u8 *pframe, *payload, *iv, *prwskey;
665 union pn48 dot11txpn;
666 struct sta_info *stainfo;
667 struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
668 struct security_priv *psecuritypriv = &padapter->securitypriv;
669 u32 res = _SUCCESS;
670
671
672 pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data;
657 673
658 /* 4 start to decrypt recvframe */ 674 /* 4 start to decrypt recvframe */
659 if (prxattrib->encrypt == _TKIP_) { 675 if (prxattrib->encrypt == _TKIP_) {
660 struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta); 676 stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
661
662 if (stainfo) { 677 if (stainfo) {
663 int key_idx;
664 const int iv_len = 8, icv_len = 4, key_length = 32;
665 void *crypto_private = NULL;
666 struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
667 u8 key[32], iv[8], icv[4], *pframe = skb->data;
668 struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
669 struct security_priv *psecuritypriv = &padapter->securitypriv;
670
671 if (IS_MCAST(prxattrib->ra)) { 678 if (IS_MCAST(prxattrib->ra)) {
672 if (!psecuritypriv->binstallGrpkey) { 679 if (!psecuritypriv->binstallGrpkey) {
673 res = _FAIL; 680 res = _FAIL;
674 DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__); 681 DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
675 goto exit; 682 goto exit;
676 } 683 }
677 key_idx = prxattrib->key_index; 684 prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
678 memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
679 memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
680 } else { 685 } else {
681 key_idx = 0; 686 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
682 memcpy(key, stainfo->dot118021x_UncstKey.skey, 16); 687 prwskey = &stainfo->dot118021x_UncstKey.skey[0];
683 memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
684 } 688 }
685 689
686 if (!crypto_ops) { 690 iv = pframe+prxattrib->hdrlen;
687 res = _FAIL; 691 payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
688 goto exit_lib80211_tkip; 692 length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len;
689 }
690 693
691 memcpy(iv, pframe + prxattrib->hdrlen, iv_len); 694 GET_TKIP_PN(iv, dot11txpn);
692 memcpy(icv, pframe + skb->len - icv_len, icv_len);
693 695
694 crypto_private = crypto_ops->init(key_idx); 696 pnl = (u16)(dot11txpn.val);
695 if (!crypto_private) { 697 pnh = (u32)(dot11txpn.val>>16);
696 res = _FAIL;
697 goto exit_lib80211_tkip;
698 }
699 if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
700 res = _FAIL;
701 goto exit_lib80211_tkip;
702 }
703 if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
704 res = _FAIL;
705 goto exit_lib80211_tkip;
706 }
707 698
708 memmove(pframe, pframe + iv_len, prxattrib->hdrlen); 699 phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
709 skb_push(skb, iv_len); 700 phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
710 skb_put(skb, icv_len);
711 701
712 memcpy(pframe + prxattrib->hdrlen, iv, iv_len); 702 /* 4 decrypt payload include icv */
713 memcpy(pframe + skb->len - icv_len, icv, icv_len);
714 703
715exit_lib80211_tkip: 704 arcfour_init(&mycontext, rc4key, 16);
716 if (crypto_ops && crypto_private) 705 arcfour_encrypt(&mycontext, payload, payload, length);
717 crypto_ops->deinit(crypto_private); 706
707 *((__le32 *)crc) = getcrc32(payload, length-4);
708
709 if (crc[3] != payload[length-1] ||
710 crc[2] != payload[length-2] ||
711 crc[1] != payload[length-3] ||
712 crc[0] != payload[length-4]) {
713 RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
714 ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
715 &crc, &payload[length-4]));
716 res = _FAIL;
717 }
718 } else { 718 } else {
719 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n")); 719 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
720 res = _FAIL; 720 res = _FAIL;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 45c05527a57a..faf4b4158cfa 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -1051,7 +1051,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
1051 return _FAIL; 1051 return _FAIL;
1052 1052
1053 1053
1054 if (len > MAX_IE_SZ) 1054 if (len < 0 || len > MAX_IE_SZ)
1055 return _FAIL; 1055 return _FAIL;
1056 1056
1057 pbss_network->IELength = len; 1057 pbss_network->IELength = len;
diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.c b/drivers/staging/rtlwifi/rtl8822be/hw.c
index 7947edb239a1..88ba5b2fea6a 100644
--- a/drivers/staging/rtlwifi/rtl8822be/hw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/hw.c
@@ -803,7 +803,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw)
803 return; 803 return;
804 804
805 pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp); 805 pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp);
806 pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7)); 806 pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3);
807 807
808 pci_read_config_byte(rtlpci->pdev, 0x719, &tmp); 808 pci_read_config_byte(rtlpci->pdev, 0x719, &tmp);
809 pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4)); 809 pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4));
diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h
index 012fb618840b..a45f0eb69d3f 100644
--- a/drivers/staging/rtlwifi/wifi.h
+++ b/drivers/staging/rtlwifi/wifi.h
@@ -88,6 +88,7 @@
88#define RTL_USB_MAX_RX_COUNT 100 88#define RTL_USB_MAX_RX_COUNT 100
89#define QBSS_LOAD_SIZE 5 89#define QBSS_LOAD_SIZE 5
90#define MAX_WMMELE_LENGTH 64 90#define MAX_WMMELE_LENGTH 64
91#define ASPM_L1_LATENCY 7
91 92
92#define TOTAL_CAM_ENTRY 32 93#define TOTAL_CAM_ENTRY 32
93 94
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index a61bc41b82d7..947c79532e10 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -198,11 +198,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
198 int chars_sent = 0; 198 int chars_sent = 0;
199 char __user *cp; 199 char __user *cp;
200 char *init; 200 char *init;
201 size_t bytes_per_ch = unicode ? 3 : 1;
201 u16 ch; 202 u16 ch;
202 int empty; 203 int empty;
203 unsigned long flags; 204 unsigned long flags;
204 DEFINE_WAIT(wait); 205 DEFINE_WAIT(wait);
205 206
207 if (count < bytes_per_ch)
208 return -EINVAL;
209
206 spin_lock_irqsave(&speakup_info.spinlock, flags); 210 spin_lock_irqsave(&speakup_info.spinlock, flags);
207 while (1) { 211 while (1) {
208 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); 212 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
@@ -228,7 +232,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
228 init = get_initstring(); 232 init = get_initstring();
229 233
230 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ 234 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
231 while (chars_sent <= count - 3) { 235 while (chars_sent <= count - bytes_per_ch) {
232 if (speakup_info.flushing) { 236 if (speakup_info.flushing) {
233 speakup_info.flushing = 0; 237 speakup_info.flushing = 0;
234 ch = '\x18'; 238 ch = '\x18';
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 01ac306131c1..10db5656fd5d 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
3727 * Check for overflow of 8byte PRI READ_KEYS payload and 3727 * Check for overflow of 8byte PRI READ_KEYS payload and
3728 * next reservation key list descriptor. 3728 * next reservation key list descriptor.
3729 */ 3729 */
3730 if ((add_len + 8) > (cmd->data_length - 8)) 3730 if (off + 8 <= cmd->data_length) {
3731 break; 3731 put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
3732 3732 off += 8;
3733 put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); 3733 }
3734 off += 8; 3734 /*
3735 * SPC5r17: 6.16.2 READ KEYS service action
3736 * The ADDITIONAL LENGTH field indicates the number of bytes in
3737 * the Reservation key list. The contents of the ADDITIONAL
3738 * LENGTH field are not altered based on the allocation length
3739 */
3735 add_len += 8; 3740 add_len += 8;
3736 } 3741 }
3737 spin_unlock(&dev->t10_pr.registration_lock); 3742 spin_unlock(&dev->t10_pr.registration_lock);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 6281266b8ec0..a923ebdeb73c 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -213,6 +213,10 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
213 goto err_free_acl; 213 goto err_free_acl;
214 } 214 }
215 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl); 215 ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
216 if (!ret) {
217 /* Notify userspace about the change */
218 kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
219 }
216 mutex_unlock(&tb->lock); 220 mutex_unlock(&tb->lock);
217 221
218err_free_acl: 222err_free_acl:
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index e8f4ac9400ea..5d421d7e8904 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -215,7 +215,20 @@ static ssize_t name_show(struct device *dev,
215 struct device_attribute *attr, char *buf) 215 struct device_attribute *attr, char *buf)
216{ 216{
217 struct uio_device *idev = dev_get_drvdata(dev); 217 struct uio_device *idev = dev_get_drvdata(dev);
218 return sprintf(buf, "%s\n", idev->info->name); 218 int ret;
219
220 mutex_lock(&idev->info_lock);
221 if (!idev->info) {
222 ret = -EINVAL;
223 dev_err(dev, "the device has been unregistered\n");
224 goto out;
225 }
226
227 ret = sprintf(buf, "%s\n", idev->info->name);
228
229out:
230 mutex_unlock(&idev->info_lock);
231 return ret;
219} 232}
220static DEVICE_ATTR_RO(name); 233static DEVICE_ATTR_RO(name);
221 234
@@ -223,7 +236,20 @@ static ssize_t version_show(struct device *dev,
223 struct device_attribute *attr, char *buf) 236 struct device_attribute *attr, char *buf)
224{ 237{
225 struct uio_device *idev = dev_get_drvdata(dev); 238 struct uio_device *idev = dev_get_drvdata(dev);
226 return sprintf(buf, "%s\n", idev->info->version); 239 int ret;
240
241 mutex_lock(&idev->info_lock);
242 if (!idev->info) {
243 ret = -EINVAL;
244 dev_err(dev, "the device has been unregistered\n");
245 goto out;
246 }
247
248 ret = sprintf(buf, "%s\n", idev->info->version);
249
250out:
251 mutex_unlock(&idev->info_lock);
252 return ret;
227} 253}
228static DEVICE_ATTR_RO(version); 254static DEVICE_ATTR_RO(version);
229 255
@@ -415,11 +441,15 @@ EXPORT_SYMBOL_GPL(uio_event_notify);
415static irqreturn_t uio_interrupt(int irq, void *dev_id) 441static irqreturn_t uio_interrupt(int irq, void *dev_id)
416{ 442{
417 struct uio_device *idev = (struct uio_device *)dev_id; 443 struct uio_device *idev = (struct uio_device *)dev_id;
418 irqreturn_t ret = idev->info->handler(irq, idev->info); 444 irqreturn_t ret;
419 445
446 mutex_lock(&idev->info_lock);
447
448 ret = idev->info->handler(irq, idev->info);
420 if (ret == IRQ_HANDLED) 449 if (ret == IRQ_HANDLED)
421 uio_event_notify(idev->info); 450 uio_event_notify(idev->info);
422 451
452 mutex_unlock(&idev->info_lock);
423 return ret; 453 return ret;
424} 454}
425 455
@@ -433,7 +463,6 @@ static int uio_open(struct inode *inode, struct file *filep)
433 struct uio_device *idev; 463 struct uio_device *idev;
434 struct uio_listener *listener; 464 struct uio_listener *listener;
435 int ret = 0; 465 int ret = 0;
436 unsigned long flags;
437 466
438 mutex_lock(&minor_lock); 467 mutex_lock(&minor_lock);
439 idev = idr_find(&uio_idr, iminor(inode)); 468 idev = idr_find(&uio_idr, iminor(inode));
@@ -460,10 +489,16 @@ static int uio_open(struct inode *inode, struct file *filep)
460 listener->event_count = atomic_read(&idev->event); 489 listener->event_count = atomic_read(&idev->event);
461 filep->private_data = listener; 490 filep->private_data = listener;
462 491
463 spin_lock_irqsave(&idev->info_lock, flags); 492 mutex_lock(&idev->info_lock);
493 if (!idev->info) {
494 mutex_unlock(&idev->info_lock);
495 ret = -EINVAL;
496 goto err_alloc_listener;
497 }
498
464 if (idev->info && idev->info->open) 499 if (idev->info && idev->info->open)
465 ret = idev->info->open(idev->info, inode); 500 ret = idev->info->open(idev->info, inode);
466 spin_unlock_irqrestore(&idev->info_lock, flags); 501 mutex_unlock(&idev->info_lock);
467 if (ret) 502 if (ret)
468 goto err_infoopen; 503 goto err_infoopen;
469 504
@@ -495,12 +530,11 @@ static int uio_release(struct inode *inode, struct file *filep)
495 int ret = 0; 530 int ret = 0;
496 struct uio_listener *listener = filep->private_data; 531 struct uio_listener *listener = filep->private_data;
497 struct uio_device *idev = listener->dev; 532 struct uio_device *idev = listener->dev;
498 unsigned long flags;
499 533
500 spin_lock_irqsave(&idev->info_lock, flags); 534 mutex_lock(&idev->info_lock);
501 if (idev->info && idev->info->release) 535 if (idev->info && idev->info->release)
502 ret = idev->info->release(idev->info, inode); 536 ret = idev->info->release(idev->info, inode);
503 spin_unlock_irqrestore(&idev->info_lock, flags); 537 mutex_unlock(&idev->info_lock);
504 538
505 module_put(idev->owner); 539 module_put(idev->owner);
506 kfree(listener); 540 kfree(listener);
@@ -513,12 +547,11 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait)
513 struct uio_listener *listener = filep->private_data; 547 struct uio_listener *listener = filep->private_data;
514 struct uio_device *idev = listener->dev; 548 struct uio_device *idev = listener->dev;
515 __poll_t ret = 0; 549 __poll_t ret = 0;
516 unsigned long flags;
517 550
518 spin_lock_irqsave(&idev->info_lock, flags); 551 mutex_lock(&idev->info_lock);
519 if (!idev->info || !idev->info->irq) 552 if (!idev->info || !idev->info->irq)
520 ret = -EIO; 553 ret = -EIO;
521 spin_unlock_irqrestore(&idev->info_lock, flags); 554 mutex_unlock(&idev->info_lock);
522 555
523 if (ret) 556 if (ret)
524 return ret; 557 return ret;
@@ -537,12 +570,11 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
537 DECLARE_WAITQUEUE(wait, current); 570 DECLARE_WAITQUEUE(wait, current);
538 ssize_t retval = 0; 571 ssize_t retval = 0;
539 s32 event_count; 572 s32 event_count;
540 unsigned long flags;
541 573
542 spin_lock_irqsave(&idev->info_lock, flags); 574 mutex_lock(&idev->info_lock);
543 if (!idev->info || !idev->info->irq) 575 if (!idev->info || !idev->info->irq)
544 retval = -EIO; 576 retval = -EIO;
545 spin_unlock_irqrestore(&idev->info_lock, flags); 577 mutex_unlock(&idev->info_lock);
546 578
547 if (retval) 579 if (retval)
548 return retval; 580 return retval;
@@ -592,9 +624,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
592 struct uio_device *idev = listener->dev; 624 struct uio_device *idev = listener->dev;
593 ssize_t retval; 625 ssize_t retval;
594 s32 irq_on; 626 s32 irq_on;
595 unsigned long flags;
596 627
597 spin_lock_irqsave(&idev->info_lock, flags); 628 mutex_lock(&idev->info_lock);
629 if (!idev->info) {
630 retval = -EINVAL;
631 goto out;
632 }
633
598 if (!idev->info || !idev->info->irq) { 634 if (!idev->info || !idev->info->irq) {
599 retval = -EIO; 635 retval = -EIO;
600 goto out; 636 goto out;
@@ -618,7 +654,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
618 retval = idev->info->irqcontrol(idev->info, irq_on); 654 retval = idev->info->irqcontrol(idev->info, irq_on);
619 655
620out: 656out:
621 spin_unlock_irqrestore(&idev->info_lock, flags); 657 mutex_unlock(&idev->info_lock);
622 return retval ? retval : sizeof(s32); 658 return retval ? retval : sizeof(s32);
623} 659}
624 660
@@ -640,10 +676,20 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
640 struct page *page; 676 struct page *page;
641 unsigned long offset; 677 unsigned long offset;
642 void *addr; 678 void *addr;
679 int ret = 0;
680 int mi;
643 681
644 int mi = uio_find_mem_index(vmf->vma); 682 mutex_lock(&idev->info_lock);
645 if (mi < 0) 683 if (!idev->info) {
646 return VM_FAULT_SIGBUS; 684 ret = VM_FAULT_SIGBUS;
685 goto out;
686 }
687
688 mi = uio_find_mem_index(vmf->vma);
689 if (mi < 0) {
690 ret = VM_FAULT_SIGBUS;
691 goto out;
692 }
647 693
648 /* 694 /*
649 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 695 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
@@ -658,7 +704,11 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
658 page = vmalloc_to_page(addr); 704 page = vmalloc_to_page(addr);
659 get_page(page); 705 get_page(page);
660 vmf->page = page; 706 vmf->page = page;
661 return 0; 707
708out:
709 mutex_unlock(&idev->info_lock);
710
711 return ret;
662} 712}
663 713
664static const struct vm_operations_struct uio_logical_vm_ops = { 714static const struct vm_operations_struct uio_logical_vm_ops = {
@@ -683,6 +733,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
683 struct uio_device *idev = vma->vm_private_data; 733 struct uio_device *idev = vma->vm_private_data;
684 int mi = uio_find_mem_index(vma); 734 int mi = uio_find_mem_index(vma);
685 struct uio_mem *mem; 735 struct uio_mem *mem;
736
686 if (mi < 0) 737 if (mi < 0)
687 return -EINVAL; 738 return -EINVAL;
688 mem = idev->info->mem + mi; 739 mem = idev->info->mem + mi;
@@ -724,30 +775,46 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
724 775
725 vma->vm_private_data = idev; 776 vma->vm_private_data = idev;
726 777
778 mutex_lock(&idev->info_lock);
779 if (!idev->info) {
780 ret = -EINVAL;
781 goto out;
782 }
783
727 mi = uio_find_mem_index(vma); 784 mi = uio_find_mem_index(vma);
728 if (mi < 0) 785 if (mi < 0) {
729 return -EINVAL; 786 ret = -EINVAL;
787 goto out;
788 }
730 789
731 requested_pages = vma_pages(vma); 790 requested_pages = vma_pages(vma);
732 actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) 791 actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
733 + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; 792 + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
734 if (requested_pages > actual_pages) 793 if (requested_pages > actual_pages) {
735 return -EINVAL; 794 ret = -EINVAL;
795 goto out;
796 }
736 797
737 if (idev->info->mmap) { 798 if (idev->info->mmap) {
738 ret = idev->info->mmap(idev->info, vma); 799 ret = idev->info->mmap(idev->info, vma);
739 return ret; 800 goto out;
740 } 801 }
741 802
742 switch (idev->info->mem[mi].memtype) { 803 switch (idev->info->mem[mi].memtype) {
743 case UIO_MEM_PHYS: 804 case UIO_MEM_PHYS:
744 return uio_mmap_physical(vma); 805 ret = uio_mmap_physical(vma);
806 break;
745 case UIO_MEM_LOGICAL: 807 case UIO_MEM_LOGICAL:
746 case UIO_MEM_VIRTUAL: 808 case UIO_MEM_VIRTUAL:
747 return uio_mmap_logical(vma); 809 ret = uio_mmap_logical(vma);
810 break;
748 default: 811 default:
749 return -EINVAL; 812 ret = -EINVAL;
750 } 813 }
814
815out:
816 mutex_unlock(&idev->info_lock);
817 return 0;
751} 818}
752 819
753static const struct file_operations uio_fops = { 820static const struct file_operations uio_fops = {
@@ -865,7 +932,7 @@ int __uio_register_device(struct module *owner,
865 932
866 idev->owner = owner; 933 idev->owner = owner;
867 idev->info = info; 934 idev->info = info;
868 spin_lock_init(&idev->info_lock); 935 mutex_init(&idev->info_lock);
869 init_waitqueue_head(&idev->wait); 936 init_waitqueue_head(&idev->wait);
870 atomic_set(&idev->event, 0); 937 atomic_set(&idev->event, 0);
871 938
@@ -902,8 +969,9 @@ int __uio_register_device(struct module *owner,
902 * FDs at the time of unregister and therefore may not be 969 * FDs at the time of unregister and therefore may not be
903 * freed until they are released. 970 * freed until they are released.
904 */ 971 */
905 ret = request_irq(info->irq, uio_interrupt, 972 ret = request_threaded_irq(info->irq, NULL, uio_interrupt,
906 info->irq_flags, info->name, idev); 973 info->irq_flags, info->name, idev);
974
907 if (ret) 975 if (ret)
908 goto err_request_irq; 976 goto err_request_irq;
909 } 977 }
@@ -928,7 +996,6 @@ EXPORT_SYMBOL_GPL(__uio_register_device);
928void uio_unregister_device(struct uio_info *info) 996void uio_unregister_device(struct uio_info *info)
929{ 997{
930 struct uio_device *idev; 998 struct uio_device *idev;
931 unsigned long flags;
932 999
933 if (!info || !info->uio_dev) 1000 if (!info || !info->uio_dev)
934 return; 1001 return;
@@ -937,14 +1004,14 @@ void uio_unregister_device(struct uio_info *info)
937 1004
938 uio_free_minor(idev); 1005 uio_free_minor(idev);
939 1006
1007 mutex_lock(&idev->info_lock);
940 uio_dev_del_attributes(idev); 1008 uio_dev_del_attributes(idev);
941 1009
942 if (info->irq && info->irq != UIO_IRQ_CUSTOM) 1010 if (info->irq && info->irq != UIO_IRQ_CUSTOM)
943 free_irq(info->irq, idev); 1011 free_irq(info->irq, idev);
944 1012
945 spin_lock_irqsave(&idev->info_lock, flags);
946 idev->info = NULL; 1013 idev->info = NULL;
947 spin_unlock_irqrestore(&idev->info_lock, flags); 1014 mutex_unlock(&idev->info_lock);
948 1015
949 device_unregister(&idev->dev); 1016 device_unregister(&idev->dev);
950 1017
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 785f0ed037f7..ee34e9046f7e 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -3,6 +3,7 @@ config USB_CHIPIDEA
3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA 3 depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
4 select EXTCON 4 select EXTCON
5 select RESET_CONTROLLER 5 select RESET_CONTROLLER
6 select USB_ULPI_BUS
6 help 7 help
7 Say Y here if your system has a dual role high speed USB 8 Say Y here if your system has a dual role high speed USB
8 controller based on ChipIdea silicon IP. It supports: 9 controller based on ChipIdea silicon IP. It supports:
@@ -38,12 +39,4 @@ config USB_CHIPIDEA_HOST
38 help 39 help
39 Say Y here to enable host controller functionality of the 40 Say Y here to enable host controller functionality of the
40 ChipIdea driver. 41 ChipIdea driver.
41
42config USB_CHIPIDEA_ULPI
43 bool "ChipIdea ULPI PHY support"
44 depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA
45 help
46 Say Y here if you have a ULPI PHY attached to your ChipIdea
47 controller.
48
49endif 42endif
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index e3d5e728fa53..12df94f78f72 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -1,11 +1,10 @@
1# SPDX-License-Identifier: GPL-2.0 1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o 2obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o
3 3
4ci_hdrc-y := core.o otg.o debug.o 4ci_hdrc-y := core.o otg.o debug.o ulpi.o
5ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o 5ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o
6ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o 6ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o
7ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o 7ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o
8ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI) += ulpi.o
9 8
10# Glue/Bridge layers go here 9# Glue/Bridge layers go here
11 10
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 0bf244d50544..6a2cc5cd0281 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -240,10 +240,8 @@ struct ci_hdrc {
240 240
241 struct ci_hdrc_platform_data *platdata; 241 struct ci_hdrc_platform_data *platdata;
242 int vbus_active; 242 int vbus_active;
243#ifdef CONFIG_USB_CHIPIDEA_ULPI
244 struct ulpi *ulpi; 243 struct ulpi *ulpi;
245 struct ulpi_ops ulpi_ops; 244 struct ulpi_ops ulpi_ops;
246#endif
247 struct phy *phy; 245 struct phy *phy;
248 /* old usb_phy interface */ 246 /* old usb_phy interface */
249 struct usb_phy *usb_phy; 247 struct usb_phy *usb_phy;
@@ -426,15 +424,9 @@ static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
426#endif 424#endif
427} 425}
428 426
429#if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI)
430int ci_ulpi_init(struct ci_hdrc *ci); 427int ci_ulpi_init(struct ci_hdrc *ci);
431void ci_ulpi_exit(struct ci_hdrc *ci); 428void ci_ulpi_exit(struct ci_hdrc *ci);
432int ci_ulpi_resume(struct ci_hdrc *ci); 429int ci_ulpi_resume(struct ci_hdrc *ci);
433#else
434static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; }
435static inline void ci_ulpi_exit(struct ci_hdrc *ci) { }
436static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; }
437#endif
438 430
439u32 hw_read_intr_enable(struct ci_hdrc *ci); 431u32 hw_read_intr_enable(struct ci_hdrc *ci);
440 432
diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c
index 6da42dcd2888..dfec07e8ae1d 100644
--- a/drivers/usb/chipidea/ulpi.c
+++ b/drivers/usb/chipidea/ulpi.c
@@ -95,6 +95,9 @@ int ci_ulpi_resume(struct ci_hdrc *ci)
95{ 95{
96 int cnt = 100000; 96 int cnt = 100000;
97 97
98 if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
99 return 0;
100
98 while (cnt-- > 0) { 101 while (cnt-- > 0) {
99 if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE)) 102 if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE))
100 return 0; 103 return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 998b32d0167e..75c4623ad779 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1831,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = {
1831 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ 1831 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
1832 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ 1832 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
1833 }, 1833 },
1834 { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
1835 .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
1836 },
1834 1837
1835 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ 1838 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
1836 .driver_info = CLEAR_HALT_CONDITIONS, 1839 .driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fcae521df29b..1fb266809966 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
1142 1142
1143 if (!udev || udev->state == USB_STATE_NOTATTACHED) { 1143 if (!udev || udev->state == USB_STATE_NOTATTACHED) {
1144 /* Tell hub_wq to disconnect the device or 1144 /* Tell hub_wq to disconnect the device or
1145 * check for a new connection 1145 * check for a new connection or over current condition.
1146 * Based on USB2.0 Spec Section 11.12.5,
1147 * C_PORT_OVER_CURRENT could be set while
1148 * PORT_OVER_CURRENT is not. So check for any of them.
1146 */ 1149 */
1147 if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || 1150 if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
1148 (portstatus & USB_PORT_STAT_OVERCURRENT)) 1151 (portstatus & USB_PORT_STAT_OVERCURRENT) ||
1152 (portchange & USB_PORT_STAT_C_OVERCURRENT))
1149 set_bit(port1, hub->change_bits); 1153 set_bit(port1, hub->change_bits);
1150 1154
1151 } else if (portstatus & USB_PORT_STAT_ENABLE) { 1155 } else if (portstatus & USB_PORT_STAT_ENABLE) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index c55def2f1320..097057d2eacf 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -378,6 +378,10 @@ static const struct usb_device_id usb_quirk_list[] = {
378 /* Corsair K70 RGB */ 378 /* Corsair K70 RGB */
379 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT }, 379 { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
380 380
381 /* Corsair Strafe */
382 { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
383 USB_QUIRK_DELAY_CTRL_MSG },
384
381 /* Corsair Strafe RGB */ 385 /* Corsair Strafe RGB */
382 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT | 386 { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
383 USB_QUIRK_DELAY_CTRL_MSG }, 387 USB_QUIRK_DELAY_CTRL_MSG },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index a0f82cca2d9a..cefc99ae69b2 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3430,7 +3430,7 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
3430 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3430 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3431 hs_ep = hsotg->eps_in[idx]; 3431 hs_ep = hsotg->eps_in[idx];
3432 /* Proceed only unmasked ISOC EPs */ 3432 /* Proceed only unmasked ISOC EPs */
3433 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3433 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3434 continue; 3434 continue;
3435 3435
3436 epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx)); 3436 epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
@@ -3476,7 +3476,7 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
3476 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3476 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3477 hs_ep = hsotg->eps_out[idx]; 3477 hs_ep = hsotg->eps_out[idx];
3478 /* Proceed only unmasked ISOC EPs */ 3478 /* Proceed only unmasked ISOC EPs */
3479 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3479 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3480 continue; 3480 continue;
3481 3481
3482 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); 3482 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
@@ -3650,7 +3650,7 @@ irq_retry:
3650 for (idx = 1; idx < hsotg->num_of_eps; idx++) { 3650 for (idx = 1; idx < hsotg->num_of_eps; idx++) {
3651 hs_ep = hsotg->eps_out[idx]; 3651 hs_ep = hsotg->eps_out[idx];
3652 /* Proceed only unmasked ISOC EPs */ 3652 /* Proceed only unmasked ISOC EPs */
3653 if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk)) 3653 if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
3654 continue; 3654 continue;
3655 3655
3656 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx)); 3656 epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index b1104be3429c..6e2cdd7b93d4 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2665,34 +2665,35 @@ static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
2665 2665
2666#define DWC2_USB_DMA_ALIGN 4 2666#define DWC2_USB_DMA_ALIGN 4
2667 2667
2668struct dma_aligned_buffer {
2669 void *kmalloc_ptr;
2670 void *old_xfer_buffer;
2671 u8 data[0];
2672};
2673
2674static void dwc2_free_dma_aligned_buffer(struct urb *urb) 2668static void dwc2_free_dma_aligned_buffer(struct urb *urb)
2675{ 2669{
2676 struct dma_aligned_buffer *temp; 2670 void *stored_xfer_buffer;
2671 size_t length;
2677 2672
2678 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) 2673 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2679 return; 2674 return;
2680 2675
2681 temp = container_of(urb->transfer_buffer, 2676 /* Restore urb->transfer_buffer from the end of the allocated area */
2682 struct dma_aligned_buffer, data); 2677 memcpy(&stored_xfer_buffer, urb->transfer_buffer +
2678 urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
2683 2679
2684 if (usb_urb_dir_in(urb)) 2680 if (usb_urb_dir_in(urb)) {
2685 memcpy(temp->old_xfer_buffer, temp->data, 2681 if (usb_pipeisoc(urb->pipe))
2686 urb->transfer_buffer_length); 2682 length = urb->transfer_buffer_length;
2687 urb->transfer_buffer = temp->old_xfer_buffer; 2683 else
2688 kfree(temp->kmalloc_ptr); 2684 length = urb->actual_length;
2685
2686 memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
2687 }
2688 kfree(urb->transfer_buffer);
2689 urb->transfer_buffer = stored_xfer_buffer;
2689 2690
2690 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; 2691 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2691} 2692}
2692 2693
2693static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) 2694static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2694{ 2695{
2695 struct dma_aligned_buffer *temp, *kmalloc_ptr; 2696 void *kmalloc_ptr;
2696 size_t kmalloc_size; 2697 size_t kmalloc_size;
2697 2698
2698 if (urb->num_sgs || urb->sg || 2699 if (urb->num_sgs || urb->sg ||
@@ -2700,22 +2701,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2700 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) 2701 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
2701 return 0; 2702 return 0;
2702 2703
2703 /* Allocate a buffer with enough padding for alignment */ 2704 /*
2705 * Allocate a buffer with enough padding for original transfer_buffer
2706 * pointer. This allocation is guaranteed to be aligned properly for
2707 * DMA
2708 */
2704 kmalloc_size = urb->transfer_buffer_length + 2709 kmalloc_size = urb->transfer_buffer_length +
2705 sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; 2710 sizeof(urb->transfer_buffer);
2706 2711
2707 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); 2712 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2708 if (!kmalloc_ptr) 2713 if (!kmalloc_ptr)
2709 return -ENOMEM; 2714 return -ENOMEM;
2710 2715
2711 /* Position our struct dma_aligned_buffer such that data is aligned */ 2716 /*
2712 temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; 2717 * Position value of original urb->transfer_buffer pointer to the end
2713 temp->kmalloc_ptr = kmalloc_ptr; 2718 * of allocation for later referencing
2714 temp->old_xfer_buffer = urb->transfer_buffer; 2719 */
2720 memcpy(kmalloc_ptr + urb->transfer_buffer_length,
2721 &urb->transfer_buffer, sizeof(urb->transfer_buffer));
2722
2715 if (usb_urb_dir_out(urb)) 2723 if (usb_urb_dir_out(urb))
2716 memcpy(temp->data, urb->transfer_buffer, 2724 memcpy(kmalloc_ptr, urb->transfer_buffer,
2717 urb->transfer_buffer_length); 2725 urb->transfer_buffer_length);
2718 urb->transfer_buffer = temp->data; 2726 urb->transfer_buffer = kmalloc_ptr;
2719 2727
2720 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; 2728 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2721 2729
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index ed7f05cf4906..8ce10caf3e19 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -1231,7 +1231,10 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1231 * avoid interrupt storms we'll wait before retrying if we've got 1231 * avoid interrupt storms we'll wait before retrying if we've got
1232 * several NAKs. If we didn't do this we'd retry directly from the 1232 * several NAKs. If we didn't do this we'd retry directly from the
1233 * interrupt handler and could end up quickly getting another 1233 * interrupt handler and could end up quickly getting another
1234 * interrupt (another NAK), which we'd retry. 1234 * interrupt (another NAK), which we'd retry. Note that we do not
1235 * delay retries for IN parts of control requests, as those are expected
1236 * to complete fairly quickly, and if we delay them we risk confusing
1237 * the device and cause it issue STALL.
1235 * 1238 *
1236 * Note that in DMA mode software only gets involved to re-send NAKed 1239 * Note that in DMA mode software only gets involved to re-send NAKed
1237 * transfers for split transactions, so we only need to apply this 1240 * transfers for split transactions, so we only need to apply this
@@ -1244,7 +1247,9 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1244 qtd->error_count = 0; 1247 qtd->error_count = 0;
1245 qtd->complete_split = 0; 1248 qtd->complete_split = 0;
1246 qtd->num_naks++; 1249 qtd->num_naks++;
1247 qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY; 1250 qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
1251 !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
1252 chan->ep_is_in);
1248 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK); 1253 dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1249 goto handle_nak_done; 1254 goto handle_nak_done;
1250 } 1255 }
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index c77ff50a88a2..8efde178eef4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -973,15 +973,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
973 ret = dwc3_ep0_start_trans(dep); 973 ret = dwc3_ep0_start_trans(dep);
974 } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) && 974 } else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
975 req->request.length && req->request.zero) { 975 req->request.length && req->request.zero) {
976 u32 maxpacket;
977 976
978 ret = usb_gadget_map_request_by_dev(dwc->sysdev, 977 ret = usb_gadget_map_request_by_dev(dwc->sysdev,
979 &req->request, dep->number); 978 &req->request, dep->number);
980 if (ret) 979 if (ret)
981 return; 980 return;
982 981
983 maxpacket = dep->endpoint.maxpacket;
984
985 /* prepare normal TRB */ 982 /* prepare normal TRB */
986 dwc3_ep0_prepare_one_trb(dep, req->request.dma, 983 dwc3_ep0_prepare_one_trb(dep, req->request.dma,
987 req->request.length, 984 req->request.length,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index d2fa071c21b1..b8a15840b4ff 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1819,7 +1819,6 @@ unknown:
1819 if (cdev->use_os_string && cdev->os_desc_config && 1819 if (cdev->use_os_string && cdev->os_desc_config &&
1820 (ctrl->bRequestType & USB_TYPE_VENDOR) && 1820 (ctrl->bRequestType & USB_TYPE_VENDOR) &&
1821 ctrl->bRequest == cdev->b_vendor_code) { 1821 ctrl->bRequest == cdev->b_vendor_code) {
1822 struct usb_request *req;
1823 struct usb_configuration *os_desc_cfg; 1822 struct usb_configuration *os_desc_cfg;
1824 u8 *buf; 1823 u8 *buf;
1825 int interface; 1824 int interface;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 33e2030503fa..3ada83d81bda 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3263,7 +3263,7 @@ static int ffs_func_setup(struct usb_function *f,
3263 __ffs_event_add(ffs, FUNCTIONFS_SETUP); 3263 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
3264 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); 3264 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
3265 3265
3266 return USB_GADGET_DELAYED_STATUS; 3266 return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
3267} 3267}
3268 3268
3269static bool ffs_func_req_match(struct usb_function *f, 3269static bool ffs_func_req_match(struct usb_function *f,
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index d2dc1f00180b..d582921f7257 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -438,14 +438,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
438}; 438};
439 439
440struct cntrl_cur_lay3 { 440struct cntrl_cur_lay3 {
441 __u32 dCUR; 441 __le32 dCUR;
442}; 442};
443 443
444struct cntrl_range_lay3 { 444struct cntrl_range_lay3 {
445 __u16 wNumSubRanges; 445 __le16 wNumSubRanges;
446 __u32 dMIN; 446 __le32 dMIN;
447 __u32 dMAX; 447 __le32 dMAX;
448 __u32 dRES; 448 __le32 dRES;
449} __packed; 449} __packed;
450 450
451static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts, 451static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
@@ -559,13 +559,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
559 agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc); 559 agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
560 if (!agdev->out_ep) { 560 if (!agdev->out_ep) {
561 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); 561 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
562 return ret; 562 return -ENODEV;
563 } 563 }
564 564
565 agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc); 565 agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
566 if (!agdev->in_ep) { 566 if (!agdev->in_ep) {
567 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__); 567 dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
568 return ret; 568 return -ENODEV;
569 } 569 }
570 570
571 agdev->in_ep_maxpsize = max_t(u16, 571 agdev->in_ep_maxpsize = max_t(u16,
@@ -703,9 +703,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
703 memset(&c, 0, sizeof(struct cntrl_cur_lay3)); 703 memset(&c, 0, sizeof(struct cntrl_cur_lay3));
704 704
705 if (entity_id == USB_IN_CLK_ID) 705 if (entity_id == USB_IN_CLK_ID)
706 c.dCUR = p_srate; 706 c.dCUR = cpu_to_le32(p_srate);
707 else if (entity_id == USB_OUT_CLK_ID) 707 else if (entity_id == USB_OUT_CLK_ID)
708 c.dCUR = c_srate; 708 c.dCUR = cpu_to_le32(c_srate);
709 709
710 value = min_t(unsigned, w_length, sizeof c); 710 value = min_t(unsigned, w_length, sizeof c);
711 memcpy(req->buf, &c, value); 711 memcpy(req->buf, &c, value);
@@ -742,15 +742,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
742 742
743 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { 743 if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
744 if (entity_id == USB_IN_CLK_ID) 744 if (entity_id == USB_IN_CLK_ID)
745 r.dMIN = p_srate; 745 r.dMIN = cpu_to_le32(p_srate);
746 else if (entity_id == USB_OUT_CLK_ID) 746 else if (entity_id == USB_OUT_CLK_ID)
747 r.dMIN = c_srate; 747 r.dMIN = cpu_to_le32(c_srate);
748 else 748 else
749 return -EOPNOTSUPP; 749 return -EOPNOTSUPP;
750 750
751 r.dMAX = r.dMIN; 751 r.dMAX = r.dMIN;
752 r.dRES = 0; 752 r.dRES = 0;
753 r.wNumSubRanges = 1; 753 r.wNumSubRanges = cpu_to_le16(1);
754 754
755 value = min_t(unsigned, w_length, sizeof r); 755 value = min_t(unsigned, w_length, sizeof r);
756 memcpy(req->buf, &r, value); 756 memcpy(req->buf, &r, value);
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index a72295c953bb..fb5ed97572e5 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -32,9 +32,6 @@ struct uac_req {
32struct uac_rtd_params { 32struct uac_rtd_params {
33 struct snd_uac_chip *uac; /* parent chip */ 33 struct snd_uac_chip *uac; /* parent chip */
34 bool ep_enabled; /* if the ep is enabled */ 34 bool ep_enabled; /* if the ep is enabled */
35 /* Size of the ring buffer */
36 size_t dma_bytes;
37 unsigned char *dma_area;
38 35
39 struct snd_pcm_substream *ss; 36 struct snd_pcm_substream *ss;
40 37
@@ -43,8 +40,6 @@ struct uac_rtd_params {
43 40
44 void *rbuf; 41 void *rbuf;
45 42
46 size_t period_size;
47
48 unsigned max_psize; /* MaxPacketSize of endpoint */ 43 unsigned max_psize; /* MaxPacketSize of endpoint */
49 struct uac_req *ureq; 44 struct uac_req *ureq;
50 45
@@ -84,12 +79,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
84static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req) 79static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
85{ 80{
86 unsigned pending; 81 unsigned pending;
87 unsigned long flags; 82 unsigned long flags, flags2;
88 unsigned int hw_ptr; 83 unsigned int hw_ptr;
89 bool update_alsa = false;
90 int status = req->status; 84 int status = req->status;
91 struct uac_req *ur = req->context; 85 struct uac_req *ur = req->context;
92 struct snd_pcm_substream *substream; 86 struct snd_pcm_substream *substream;
87 struct snd_pcm_runtime *runtime;
93 struct uac_rtd_params *prm = ur->pp; 88 struct uac_rtd_params *prm = ur->pp;
94 struct snd_uac_chip *uac = prm->uac; 89 struct snd_uac_chip *uac = prm->uac;
95 90
@@ -111,6 +106,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
111 if (!substream) 106 if (!substream)
112 goto exit; 107 goto exit;
113 108
109 snd_pcm_stream_lock_irqsave(substream, flags2);
110
111 runtime = substream->runtime;
112 if (!runtime || !snd_pcm_running(substream)) {
113 snd_pcm_stream_unlock_irqrestore(substream, flags2);
114 goto exit;
115 }
116
114 spin_lock_irqsave(&prm->lock, flags); 117 spin_lock_irqsave(&prm->lock, flags);
115 118
116 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 119 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -137,43 +140,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
137 req->actual = req->length; 140 req->actual = req->length;
138 } 141 }
139 142
140 pending = prm->hw_ptr % prm->period_size;
141 pending += req->actual;
142 if (pending >= prm->period_size)
143 update_alsa = true;
144
145 hw_ptr = prm->hw_ptr; 143 hw_ptr = prm->hw_ptr;
146 prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
147 144
148 spin_unlock_irqrestore(&prm->lock, flags); 145 spin_unlock_irqrestore(&prm->lock, flags);
149 146
150 /* Pack USB load in ALSA ring buffer */ 147 /* Pack USB load in ALSA ring buffer */
151 pending = prm->dma_bytes - hw_ptr; 148 pending = runtime->dma_bytes - hw_ptr;
152 149
153 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 150 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
154 if (unlikely(pending < req->actual)) { 151 if (unlikely(pending < req->actual)) {
155 memcpy(req->buf, prm->dma_area + hw_ptr, pending); 152 memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
156 memcpy(req->buf + pending, prm->dma_area, 153 memcpy(req->buf + pending, runtime->dma_area,
157 req->actual - pending); 154 req->actual - pending);
158 } else { 155 } else {
159 memcpy(req->buf, prm->dma_area + hw_ptr, req->actual); 156 memcpy(req->buf, runtime->dma_area + hw_ptr,
157 req->actual);
160 } 158 }
161 } else { 159 } else {
162 if (unlikely(pending < req->actual)) { 160 if (unlikely(pending < req->actual)) {
163 memcpy(prm->dma_area + hw_ptr, req->buf, pending); 161 memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
164 memcpy(prm->dma_area, req->buf + pending, 162 memcpy(runtime->dma_area, req->buf + pending,
165 req->actual - pending); 163 req->actual - pending);
166 } else { 164 } else {
167 memcpy(prm->dma_area + hw_ptr, req->buf, req->actual); 165 memcpy(runtime->dma_area + hw_ptr, req->buf,
166 req->actual);
168 } 167 }
169 } 168 }
170 169
170 spin_lock_irqsave(&prm->lock, flags);
171 /* update hw_ptr after data is copied to memory */
172 prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
173 hw_ptr = prm->hw_ptr;
174 spin_unlock_irqrestore(&prm->lock, flags);
175 snd_pcm_stream_unlock_irqrestore(substream, flags2);
176
177 if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
178 snd_pcm_period_elapsed(substream);
179
171exit: 180exit:
172 if (usb_ep_queue(ep, req, GFP_ATOMIC)) 181 if (usb_ep_queue(ep, req, GFP_ATOMIC))
173 dev_err(uac->card->dev, "%d Error!\n", __LINE__); 182 dev_err(uac->card->dev, "%d Error!\n", __LINE__);
174
175 if (update_alsa)
176 snd_pcm_period_elapsed(substream);
177} 183}
178 184
179static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd) 185static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -236,40 +242,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
236static int uac_pcm_hw_params(struct snd_pcm_substream *substream, 242static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
237 struct snd_pcm_hw_params *hw_params) 243 struct snd_pcm_hw_params *hw_params)
238{ 244{
239 struct snd_uac_chip *uac = snd_pcm_substream_chip(substream); 245 return snd_pcm_lib_malloc_pages(substream,
240 struct uac_rtd_params *prm;
241 int err;
242
243 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
244 prm = &uac->p_prm;
245 else
246 prm = &uac->c_prm;
247
248 err = snd_pcm_lib_malloc_pages(substream,
249 params_buffer_bytes(hw_params)); 246 params_buffer_bytes(hw_params));
250 if (err >= 0) {
251 prm->dma_bytes = substream->runtime->dma_bytes;
252 prm->dma_area = substream->runtime->dma_area;
253 prm->period_size = params_period_bytes(hw_params);
254 }
255
256 return err;
257} 247}
258 248
259static int uac_pcm_hw_free(struct snd_pcm_substream *substream) 249static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
260{ 250{
261 struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
262 struct uac_rtd_params *prm;
263
264 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
265 prm = &uac->p_prm;
266 else
267 prm = &uac->c_prm;
268
269 prm->dma_area = NULL;
270 prm->dma_bytes = 0;
271 prm->period_size = 0;
272
273 return snd_pcm_lib_free_pages(substream); 251 return snd_pcm_lib_free_pages(substream);
274} 252}
275 253
@@ -595,15 +573,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
595 if (err < 0) 573 if (err < 0)
596 goto snd_fail; 574 goto snd_fail;
597 575
598 strcpy(pcm->name, pcm_name); 576 strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
599 pcm->private_data = uac; 577 pcm->private_data = uac;
600 uac->pcm = pcm; 578 uac->pcm = pcm;
601 579
602 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops); 580 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
603 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops); 581 snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
604 582
605 strcpy(card->driver, card_name); 583 strlcpy(card->driver, card_name, sizeof(card->driver));
606 strcpy(card->shortname, card_name); 584 strlcpy(card->shortname, card_name, sizeof(card->shortname));
607 sprintf(card->longname, "%s %i", card_name, card->dev->id); 585 sprintf(card->longname, "%s %i", card_name, card->dev->id);
608 586
609 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, 587 snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
index f0cdf89b8503..83ba8a2eb6af 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
+++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
@@ -2,6 +2,7 @@
2config USB_ASPEED_VHUB 2config USB_ASPEED_VHUB
3 tristate "Aspeed vHub UDC driver" 3 tristate "Aspeed vHub UDC driver"
4 depends on ARCH_ASPEED || COMPILE_TEST 4 depends on ARCH_ASPEED || COMPILE_TEST
5 depends on USB_LIBCOMPOSITE
5 help 6 help
6 USB peripheral controller for the Aspeed AST2500 family 7 USB peripheral controller for the Aspeed AST2500 family
7 SoCs supporting the "vHub" functionality and USB2.0 8 SoCs supporting the "vHub" functionality and USB2.0
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
index 20ffb03ff6ac..e2927fb083cf 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
@@ -108,6 +108,13 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
108 /* Check our state, cancel pending requests if needed */ 108 /* Check our state, cancel pending requests if needed */
109 if (ep->ep0.state != ep0_state_token) { 109 if (ep->ep0.state != ep0_state_token) {
110 EPDBG(ep, "wrong state\n"); 110 EPDBG(ep, "wrong state\n");
111 ast_vhub_nuke(ep, -EIO);
112
113 /*
114 * Accept the packet regardless, this seems to happen
115 * when stalling a SETUP packet that has an OUT data
116 * phase.
117 */
111 ast_vhub_nuke(ep, 0); 118 ast_vhub_nuke(ep, 0);
112 goto stall; 119 goto stall;
113 } 120 }
@@ -212,6 +219,8 @@ static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
212 if (chunk && req->req.buf) 219 if (chunk && req->req.buf)
213 memcpy(ep->buf, req->req.buf + req->req.actual, chunk); 220 memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
214 221
222 vhub_dma_workaround(ep->buf);
223
215 /* Remember chunk size and trigger send */ 224 /* Remember chunk size and trigger send */
216 reg = VHUB_EP0_SET_TX_LEN(chunk); 225 reg = VHUB_EP0_SET_TX_LEN(chunk);
217 writel(reg, ep->ep0.ctlstat); 226 writel(reg, ep->ep0.ctlstat);
@@ -224,7 +233,7 @@ static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
224 EPVDBG(ep, "rx prime\n"); 233 EPVDBG(ep, "rx prime\n");
225 234
226 /* Prime endpoint for receiving data */ 235 /* Prime endpoint for receiving data */
227 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL); 236 writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
228} 237}
229 238
230static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 239static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 80c9feac5147..5939eb1e97f2 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -66,11 +66,16 @@ static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
66 if (!req->req.dma) { 66 if (!req->req.dma) {
67 67
68 /* For IN transfers, copy data over first */ 68 /* For IN transfers, copy data over first */
69 if (ep->epn.is_in) 69 if (ep->epn.is_in) {
70 memcpy(ep->buf, req->req.buf + act, chunk); 70 memcpy(ep->buf, req->req.buf + act, chunk);
71 vhub_dma_workaround(ep->buf);
72 }
71 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 73 writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
72 } else 74 } else {
75 if (ep->epn.is_in)
76 vhub_dma_workaround(req->req.buf);
73 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE); 77 writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
78 }
74 79
75 /* Start DMA */ 80 /* Start DMA */
76 req->active = true; 81 req->active = true;
@@ -161,6 +166,7 @@ static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
161static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep, 166static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
162 struct ast_vhub_req *req) 167 struct ast_vhub_req *req)
163{ 168{
169 struct ast_vhub_desc *desc = NULL;
164 unsigned int act = req->act_count; 170 unsigned int act = req->act_count;
165 unsigned int len = req->req.length; 171 unsigned int len = req->req.length;
166 unsigned int chunk; 172 unsigned int chunk;
@@ -177,7 +183,6 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
177 183
178 /* While we can create descriptors */ 184 /* While we can create descriptors */
179 while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) { 185 while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
180 struct ast_vhub_desc *desc;
181 unsigned int d_num; 186 unsigned int d_num;
182 187
183 /* Grab next free descriptor */ 188 /* Grab next free descriptor */
@@ -227,6 +232,9 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
227 req->act_count = act = act + chunk; 232 req->act_count = act = act + chunk;
228 } 233 }
229 234
235 if (likely(desc))
236 vhub_dma_workaround(desc);
237
230 /* Tell HW about new descriptors */ 238 /* Tell HW about new descriptors */
231 writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next), 239 writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
232 ep->epn.regs + AST_VHUB_EP_DESC_STATUS); 240 ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index 2b040257bc1f..4ed03d33a5a9 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -462,6 +462,39 @@ enum std_req_rc {
462#define DDBG(d, fmt, ...) do { } while(0) 462#define DDBG(d, fmt, ...) do { } while(0)
463#endif 463#endif
464 464
465static inline void vhub_dma_workaround(void *addr)
466{
467 /*
468 * This works around a confirmed HW issue with the Aspeed chip.
469 *
470 * The core uses a different bus to memory than the AHB going to
471 * the USB device controller. Due to the latter having a higher
472 * priority than the core for arbitration on that bus, it's
473 * possible for an MMIO to the device, followed by a DMA by the
474 * device from memory to all be performed and services before
475 * a previous store to memory gets completed.
476 *
477 * This the following scenario can happen:
478 *
479 * - Driver writes to a DMA descriptor (Mbus)
480 * - Driver writes to the MMIO register to start the DMA (AHB)
481 * - The gadget sees the second write and sends a read of the
482 * descriptor to the memory controller (Mbus)
483 * - The gadget hits memory before the descriptor write
484 * causing it to read an obsolete value.
485 *
486 * Thankfully the problem is limited to the USB gadget device, other
487 * masters in the SoC all have a lower priority than the core, thus
488 * ensuring that the store by the core arrives first.
489 *
490 * The workaround consists of using a dummy read of the memory before
491 * doing the MMIO writes. This will ensure that the previous writes
492 * have been "pushed out".
493 */
494 mb();
495 (void)__raw_readl((void __iomem *)addr);
496}
497
465/* core.c */ 498/* core.c */
466void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req, 499void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
467 int status); 500 int status);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index a3ecce62662b..11e25a3f4f1f 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -832,11 +832,11 @@ static void init_controller(struct r8a66597 *r8a66597)
832 832
833 r8a66597_bset(r8a66597, XCKE, SYSCFG0); 833 r8a66597_bset(r8a66597, XCKE, SYSCFG0);
834 834
835 msleep(3); 835 mdelay(3);
836 836
837 r8a66597_bset(r8a66597, PLLC, SYSCFG0); 837 r8a66597_bset(r8a66597, PLLC, SYSCFG0);
838 838
839 msleep(1); 839 mdelay(1);
840 840
841 r8a66597_bset(r8a66597, SCKE, SYSCFG0); 841 r8a66597_bset(r8a66597, SCKE, SYSCFG0);
842 842
@@ -1190,7 +1190,7 @@ __acquires(r8a66597->lock)
1190 r8a66597->ep0_req->length = 2; 1190 r8a66597->ep0_req->length = 2;
1191 /* AV: what happens if we get called again before that gets through? */ 1191 /* AV: what happens if we get called again before that gets through? */
1192 spin_unlock(&r8a66597->lock); 1192 spin_unlock(&r8a66597->lock);
1193 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL); 1193 r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
1194 spin_lock(&r8a66597->lock); 1194 spin_lock(&r8a66597->lock);
1195} 1195}
1196 1196
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 1fbfd89d0a0f..387f124a8334 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -508,16 +508,18 @@ static int xhci_do_dbc_start(struct xhci_hcd *xhci)
508 return 0; 508 return 0;
509} 509}
510 510
511static void xhci_do_dbc_stop(struct xhci_hcd *xhci) 511static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
512{ 512{
513 struct xhci_dbc *dbc = xhci->dbc; 513 struct xhci_dbc *dbc = xhci->dbc;
514 514
515 if (dbc->state == DS_DISABLED) 515 if (dbc->state == DS_DISABLED)
516 return; 516 return -1;
517 517
518 writel(0, &dbc->regs->control); 518 writel(0, &dbc->regs->control);
519 xhci_dbc_mem_cleanup(xhci); 519 xhci_dbc_mem_cleanup(xhci);
520 dbc->state = DS_DISABLED; 520 dbc->state = DS_DISABLED;
521
522 return 0;
521} 523}
522 524
523static int xhci_dbc_start(struct xhci_hcd *xhci) 525static int xhci_dbc_start(struct xhci_hcd *xhci)
@@ -544,6 +546,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)
544 546
545static void xhci_dbc_stop(struct xhci_hcd *xhci) 547static void xhci_dbc_stop(struct xhci_hcd *xhci)
546{ 548{
549 int ret;
547 unsigned long flags; 550 unsigned long flags;
548 struct xhci_dbc *dbc = xhci->dbc; 551 struct xhci_dbc *dbc = xhci->dbc;
549 struct dbc_port *port = &dbc->port; 552 struct dbc_port *port = &dbc->port;
@@ -556,10 +559,11 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
556 xhci_dbc_tty_unregister_device(xhci); 559 xhci_dbc_tty_unregister_device(xhci);
557 560
558 spin_lock_irqsave(&dbc->lock, flags); 561 spin_lock_irqsave(&dbc->lock, flags);
559 xhci_do_dbc_stop(xhci); 562 ret = xhci_do_dbc_stop(xhci);
560 spin_unlock_irqrestore(&dbc->lock, flags); 563 spin_unlock_irqrestore(&dbc->lock, flags);
561 564
562 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); 565 if (!ret)
566 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
563} 567}
564 568
565static void 569static void
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 8a62eee9eee1..ef350c33dc4a 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -595,7 +595,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
595 if (!ep->stream_info) 595 if (!ep->stream_info)
596 return NULL; 596 return NULL;
597 597
598 if (stream_id > ep->stream_info->num_streams) 598 if (stream_id >= ep->stream_info->num_streams)
599 return NULL; 599 return NULL;
600 return ep->stream_info->stream_rings[stream_id]; 600 return ep->stream_info->stream_rings[stream_id];
601} 601}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2f4850f25e82..68e6132aa8b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3051,6 +3051,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
3051 if (!list_empty(&ep->ring->td_list)) { 3051 if (!list_empty(&ep->ring->td_list)) {
3052 dev_err(&udev->dev, "EP not empty, refuse reset\n"); 3052 dev_err(&udev->dev, "EP not empty, refuse reset\n");
3053 spin_unlock_irqrestore(&xhci->lock, flags); 3053 spin_unlock_irqrestore(&xhci->lock, flags);
3054 xhci_free_command(xhci, cfg_cmd);
3054 goto cleanup; 3055 goto cleanup;
3055 } 3056 }
3056 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0); 3057 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 8abb6cbbd98a..3be40eaa1ac9 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -396,8 +396,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
396 loff_t *ppos) 396 loff_t *ppos)
397{ 397{
398 struct usb_yurex *dev; 398 struct usb_yurex *dev;
399 int retval = 0; 399 int len = 0;
400 int bytes_read = 0;
401 char in_buffer[20]; 400 char in_buffer[20];
402 unsigned long flags; 401 unsigned long flags;
403 402
@@ -405,26 +404,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
405 404
406 mutex_lock(&dev->io_mutex); 405 mutex_lock(&dev->io_mutex);
407 if (!dev->interface) { /* already disconnected */ 406 if (!dev->interface) { /* already disconnected */
408 retval = -ENODEV; 407 mutex_unlock(&dev->io_mutex);
409 goto exit; 408 return -ENODEV;
410 } 409 }
411 410
412 spin_lock_irqsave(&dev->lock, flags); 411 spin_lock_irqsave(&dev->lock, flags);
413 bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu); 412 len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
414 spin_unlock_irqrestore(&dev->lock, flags); 413 spin_unlock_irqrestore(&dev->lock, flags);
415
416 if (*ppos < bytes_read) {
417 if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
418 retval = -EFAULT;
419 else {
420 retval = bytes_read - *ppos;
421 *ppos += bytes_read;
422 }
423 }
424
425exit:
426 mutex_unlock(&dev->io_mutex); 414 mutex_unlock(&dev->io_mutex);
427 return retval; 415
416 return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
428} 417}
429 418
430static ssize_t yurex_write(struct file *file, const char __user *user_buffer, 419static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 900875f326d7..f7c96d209eda 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -861,6 +861,7 @@ int usb_otg_start(struct platform_device *pdev)
861 if (pdata->init && pdata->init(pdev) != 0) 861 if (pdata->init && pdata->init(pdev) != 0)
862 return -EINVAL; 862 return -EINVAL;
863 863
864#ifdef CONFIG_PPC32
864 if (pdata->big_endian_mmio) { 865 if (pdata->big_endian_mmio) {
865 _fsl_readl = _fsl_readl_be; 866 _fsl_readl = _fsl_readl_be;
866 _fsl_writel = _fsl_writel_be; 867 _fsl_writel = _fsl_writel_be;
@@ -868,6 +869,7 @@ int usb_otg_start(struct platform_device *pdev)
868 _fsl_readl = _fsl_readl_le; 869 _fsl_readl = _fsl_readl_le;
869 _fsl_writel = _fsl_writel_le; 870 _fsl_writel = _fsl_writel_le;
870 } 871 }
872#endif
871 873
872 /* request irq */ 874 /* request irq */
873 p_otg->irq = platform_get_irq(pdev, 0); 875 p_otg->irq = platform_get_irq(pdev, 0);
@@ -958,7 +960,7 @@ int usb_otg_start(struct platform_device *pdev)
958/* 960/*
959 * state file in sysfs 961 * state file in sysfs
960 */ 962 */
961static int show_fsl_usb2_otg_state(struct device *dev, 963static ssize_t show_fsl_usb2_otg_state(struct device *dev,
962 struct device_attribute *attr, char *buf) 964 struct device_attribute *attr, char *buf)
963{ 965{
964 struct otg_fsm *fsm = &fsl_otg_dev->fsm; 966 struct otg_fsm *fsm = &fsl_otg_dev->fsm;
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index bdd7a5ad3bf1..3bb1fff02bed 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -128,7 +128,7 @@ static int ch341_control_in(struct usb_device *dev,
128 r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, 128 r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
129 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 129 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
130 value, index, buf, bufsize, DEFAULT_TIMEOUT); 130 value, index, buf, bufsize, DEFAULT_TIMEOUT);
131 if (r < bufsize) { 131 if (r < (int)bufsize) {
132 if (r >= 0) { 132 if (r >= 0) {
133 dev_err(&dev->dev, 133 dev_err(&dev->dev,
134 "short control message received (%d < %u)\n", 134 "short control message received (%d < %u)\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index ee0cc1d90b51..626a29d9aa58 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -149,6 +149,7 @@ static const struct usb_device_id id_table[] = {
149 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ 149 { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
150 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ 150 { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
151 { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ 151 { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
152 { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
152 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ 153 { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
153 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ 154 { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
154 { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ 155 { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 5169624d8b11..38d43c4b7ce5 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -369,8 +369,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
369 3, /* get pins */ 369 3, /* get pins */
370 USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN, 370 USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
371 0, 0, data, 1, 2000); 371 0, 0, data, 1, 2000);
372 if (rc >= 0) 372 if (rc == 1)
373 *value = *data; 373 *value = *data;
374 else if (rc >= 0)
375 rc = -EIO;
374 376
375 kfree(data); 377 kfree(data);
376 return rc; 378 return rc;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index fdceb46d9fc6..b580b4c7fa48 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -468,6 +468,9 @@ static void mos7840_control_callback(struct urb *urb)
468 } 468 }
469 469
470 dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length); 470 dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
471 if (urb->actual_length < 1)
472 goto out;
473
471 dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__, 474 dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
472 mos7840_port->MsrLsr, mos7840_port->port_num); 475 mos7840_port->MsrLsr, mos7840_port->port_num);
473 data = urb->transfer_buffer; 476 data = urb->transfer_buffer;
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index d961f1ec0e08..d1d20252bad8 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -725,6 +725,9 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
725 725
726 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma); 726 tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
727 727
728 port->supply_voltage = mv;
729 port->current_limit = max_ma;
730
728 if (port->tcpc->set_current_limit) 731 if (port->tcpc->set_current_limit)
729 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv); 732 ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
730 733
@@ -2137,7 +2140,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
2137 * PPS APDO. Again skip the first sink PDO as this will 2140 * PPS APDO. Again skip the first sink PDO as this will
2138 * always be 5V 3A. 2141 * always be 5V 3A.
2139 */ 2142 */
2140 for (j = i; j < port->nr_snk_pdo; j++) { 2143 for (j = 1; j < port->nr_snk_pdo; j++) {
2141 pdo = port->snk_pdo[j]; 2144 pdo = port->snk_pdo[j];
2142 2145
2143 switch (pdo_type(pdo)) { 2146 switch (pdo_type(pdo)) {
@@ -2595,8 +2598,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
2595 tcpm_set_attached_state(port, false); 2598 tcpm_set_attached_state(port, false);
2596 port->try_src_count = 0; 2599 port->try_src_count = 0;
2597 port->try_snk_count = 0; 2600 port->try_snk_count = 0;
2598 port->supply_voltage = 0;
2599 port->current_limit = 0;
2600 port->usb_type = POWER_SUPPLY_USB_TYPE_C; 2601 port->usb_type = POWER_SUPPLY_USB_TYPE_C;
2601 2602
2602 power_supply_changed(port->psy); 2603 power_supply_changed(port->psy);
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 24ee2605b9f0..42dc1d3d71cf 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -28,5 +28,13 @@ config VFIO_PCI_INTX
28 def_bool y if !S390 28 def_bool y if !S390
29 29
30config VFIO_PCI_IGD 30config VFIO_PCI_IGD
31 depends on VFIO_PCI 31 bool "VFIO PCI extensions for Intel graphics (GVT-d)"
32 def_bool y if X86 32 depends on VFIO_PCI && X86
33 default y
34 help
35 Support for Intel IGD specific extensions to enable direct
36 assignment to virtual machines. This includes exposing an IGD
37 specific firmware table and read-only copies of the host bridge
38 and LPC bridge config space.
39
40 To enable Intel IGD assignment through vfio-pci, say Y.
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index b423a309a6e0..125b58eff936 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -28,6 +28,7 @@
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/vfio.h> 29#include <linux/vfio.h>
30#include <linux/vgaarb.h> 30#include <linux/vgaarb.h>
31#include <linux/nospec.h>
31 32
32#include "vfio_pci_private.h" 33#include "vfio_pci_private.h"
33 34
@@ -727,6 +728,9 @@ static long vfio_pci_ioctl(void *device_data,
727 if (info.index >= 728 if (info.index >=
728 VFIO_PCI_NUM_REGIONS + vdev->num_regions) 729 VFIO_PCI_NUM_REGIONS + vdev->num_regions)
729 return -EINVAL; 730 return -EINVAL;
731 info.index = array_index_nospec(info.index,
732 VFIO_PCI_NUM_REGIONS +
733 vdev->num_regions);
730 734
731 i = info.index - VFIO_PCI_NUM_REGIONS; 735 i = info.index - VFIO_PCI_NUM_REGIONS;
732 736
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 759a5bdd40e1..7cd63b0c1a46 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -457,17 +457,17 @@ static void tce_iommu_unuse_page(struct tce_container *container,
457} 457}
458 458
459static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, 459static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
460 unsigned long tce, unsigned long size, 460 unsigned long tce, unsigned long shift,
461 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem) 461 unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
462{ 462{
463 long ret = 0; 463 long ret = 0;
464 struct mm_iommu_table_group_mem_t *mem; 464 struct mm_iommu_table_group_mem_t *mem;
465 465
466 mem = mm_iommu_lookup(container->mm, tce, size); 466 mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
467 if (!mem) 467 if (!mem)
468 return -EINVAL; 468 return -EINVAL;
469 469
470 ret = mm_iommu_ua_to_hpa(mem, tce, phpa); 470 ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
471 if (ret) 471 if (ret)
472 return -EINVAL; 472 return -EINVAL;
473 473
@@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
487 if (!pua) 487 if (!pua)
488 return; 488 return;
489 489
490 ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl), 490 ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
491 &hpa, &mem); 491 &hpa, &mem);
492 if (ret) 492 if (ret)
493 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n", 493 pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
611 entry + i); 611 entry + i);
612 612
613 ret = tce_iommu_prereg_ua_to_hpa(container, 613 ret = tce_iommu_prereg_ua_to_hpa(container,
614 tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem); 614 tce, tbl->it_page_shift, &hpa, &mem);
615 if (ret) 615 if (ret)
616 break; 616 break;
617 617
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2c75b33db4ac..3e5b17710a4f 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
343 struct page *page[1]; 343 struct page *page[1];
344 struct vm_area_struct *vma; 344 struct vm_area_struct *vma;
345 struct vm_area_struct *vmas[1]; 345 struct vm_area_struct *vmas[1];
346 unsigned int flags = 0;
346 int ret; 347 int ret;
347 348
349 if (prot & IOMMU_WRITE)
350 flags |= FOLL_WRITE;
351
352 down_read(&mm->mmap_sem);
348 if (mm == current->mm) { 353 if (mm == current->mm) {
349 ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE), 354 ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
350 page, vmas);
351 } else { 355 } else {
352 unsigned int flags = 0;
353
354 if (prot & IOMMU_WRITE)
355 flags |= FOLL_WRITE;
356
357 down_read(&mm->mmap_sem);
358 ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, 356 ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
359 vmas, NULL); 357 vmas, NULL);
360 /* 358 /*
@@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
368 ret = -EOPNOTSUPP; 366 ret = -EOPNOTSUPP;
369 put_page(page[0]); 367 put_page(page[0]);
370 } 368 }
371 up_read(&mm->mmap_sem);
372 } 369 }
370 up_read(&mm->mmap_sem);
373 371
374 if (ret == 1) { 372 if (ret == 1) {
375 *pfn = page_to_pfn(page[0]); 373 *pfn = page_to_pfn(page[0]);